Commit e08e4c85d4735a3296266cccc4594c23c14cd333

Authored by Santosh Shilimkar
Committed by Kishon Vijay Abraham I
1 parent 2001f67e47

drivers: net: cpsw: convert tx completion to NAPI

CPSW driver TX and RX interrupt handling is not optimal. The driver handles
both irq's together and relies on RX NAPI to proces the TX packet. Lets
seperate the interrupt handling and convert the TX completion to NAPI as
well.

With these changes the network perfromance shoots up by almost 40 %
for UDP. For TCP we also we get pretty good boost.

Signed-off-by: Santosh Shilimkar <santosh.shilimkar@ti.com>
Signed-off-by: Mugunthan V N <mugunthanvnm@ti.com>

Showing 1 changed file with 87 additions and 20 deletions Inline Diff

drivers/net/ethernet/ti/cpsw.c
1 /* 1 /*
2 * Texas Instruments Ethernet Switch Driver 2 * Texas Instruments Ethernet Switch Driver
3 * 3 *
4 * Copyright (C) 2012 Texas Instruments 4 * Copyright (C) 2012 Texas Instruments
5 * 5 *
6 * This program is free software; you can redistribute it and/or 6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as 7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation version 2. 8 * published by the Free Software Foundation version 2.
9 * 9 *
10 * This program is distributed "as is" WITHOUT ANY WARRANTY of any 10 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
11 * kind, whether express or implied; without even the implied warranty 11 * kind, whether express or implied; without even the implied warranty
12 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details. 13 * GNU General Public License for more details.
14 */ 14 */
15 15
16 #include <linux/kernel.h> 16 #include <linux/kernel.h>
17 #include <linux/io.h> 17 #include <linux/io.h>
18 #include <linux/clk.h> 18 #include <linux/clk.h>
19 #include <linux/timer.h> 19 #include <linux/timer.h>
20 #include <linux/module.h> 20 #include <linux/module.h>
21 #include <linux/platform_device.h> 21 #include <linux/platform_device.h>
22 #include <linux/irqreturn.h> 22 #include <linux/irqreturn.h>
23 #include <linux/interrupt.h> 23 #include <linux/interrupt.h>
24 #include <linux/if_ether.h> 24 #include <linux/if_ether.h>
25 #include <linux/etherdevice.h> 25 #include <linux/etherdevice.h>
26 #include <linux/netdevice.h> 26 #include <linux/netdevice.h>
27 #include <linux/net_tstamp.h> 27 #include <linux/net_tstamp.h>
28 #include <linux/phy.h> 28 #include <linux/phy.h>
29 #include <linux/workqueue.h> 29 #include <linux/workqueue.h>
30 #include <linux/delay.h> 30 #include <linux/delay.h>
31 #include <linux/pm_runtime.h> 31 #include <linux/pm_runtime.h>
32 #include <linux/of.h> 32 #include <linux/of.h>
33 #include <linux/of_net.h> 33 #include <linux/of_net.h>
34 #include <linux/of_device.h> 34 #include <linux/of_device.h>
35 #include <linux/if_vlan.h> 35 #include <linux/if_vlan.h>
36 36
37 #include <linux/pinctrl/consumer.h> 37 #include <linux/pinctrl/consumer.h>
38 38
39 #include "cpsw.h" 39 #include "cpsw.h"
40 #include "cpsw_ale.h" 40 #include "cpsw_ale.h"
41 #include "cpts.h" 41 #include "cpts.h"
42 #include "davinci_cpdma.h" 42 #include "davinci_cpdma.h"
43 43
44 #define CPSW_DEBUG (NETIF_MSG_HW | NETIF_MSG_WOL | \ 44 #define CPSW_DEBUG (NETIF_MSG_HW | NETIF_MSG_WOL | \
45 NETIF_MSG_DRV | NETIF_MSG_LINK | \ 45 NETIF_MSG_DRV | NETIF_MSG_LINK | \
46 NETIF_MSG_IFUP | NETIF_MSG_INTR | \ 46 NETIF_MSG_IFUP | NETIF_MSG_INTR | \
47 NETIF_MSG_PROBE | NETIF_MSG_TIMER | \ 47 NETIF_MSG_PROBE | NETIF_MSG_TIMER | \
48 NETIF_MSG_IFDOWN | NETIF_MSG_RX_ERR | \ 48 NETIF_MSG_IFDOWN | NETIF_MSG_RX_ERR | \
49 NETIF_MSG_TX_ERR | NETIF_MSG_TX_DONE | \ 49 NETIF_MSG_TX_ERR | NETIF_MSG_TX_DONE | \
50 NETIF_MSG_PKTDATA | NETIF_MSG_TX_QUEUED | \ 50 NETIF_MSG_PKTDATA | NETIF_MSG_TX_QUEUED | \
51 NETIF_MSG_RX_STATUS) 51 NETIF_MSG_RX_STATUS)
52 52
53 #define cpsw_info(priv, type, format, ...) \ 53 #define cpsw_info(priv, type, format, ...) \
54 do { \ 54 do { \
55 if (netif_msg_##type(priv) && net_ratelimit()) \ 55 if (netif_msg_##type(priv) && net_ratelimit()) \
56 dev_info(priv->dev, format, ## __VA_ARGS__); \ 56 dev_info(priv->dev, format, ## __VA_ARGS__); \
57 } while (0) 57 } while (0)
58 58
59 #define cpsw_err(priv, type, format, ...) \ 59 #define cpsw_err(priv, type, format, ...) \
60 do { \ 60 do { \
61 if (netif_msg_##type(priv) && net_ratelimit()) \ 61 if (netif_msg_##type(priv) && net_ratelimit()) \
62 dev_err(priv->dev, format, ## __VA_ARGS__); \ 62 dev_err(priv->dev, format, ## __VA_ARGS__); \
63 } while (0) 63 } while (0)
64 64
65 #define cpsw_dbg(priv, type, format, ...) \ 65 #define cpsw_dbg(priv, type, format, ...) \
66 do { \ 66 do { \
67 if (netif_msg_##type(priv) && net_ratelimit()) \ 67 if (netif_msg_##type(priv) && net_ratelimit()) \
68 dev_dbg(priv->dev, format, ## __VA_ARGS__); \ 68 dev_dbg(priv->dev, format, ## __VA_ARGS__); \
69 } while (0) 69 } while (0)
70 70
71 #define cpsw_notice(priv, type, format, ...) \ 71 #define cpsw_notice(priv, type, format, ...) \
72 do { \ 72 do { \
73 if (netif_msg_##type(priv) && net_ratelimit()) \ 73 if (netif_msg_##type(priv) && net_ratelimit()) \
74 dev_notice(priv->dev, format, ## __VA_ARGS__); \ 74 dev_notice(priv->dev, format, ## __VA_ARGS__); \
75 } while (0) 75 } while (0)
76 76
77 #define ALE_ALL_PORTS 0x7 77 #define ALE_ALL_PORTS 0x7
78 78
79 #define CPSW_MAJOR_VERSION(reg) (reg >> 8 & 0x7) 79 #define CPSW_MAJOR_VERSION(reg) (reg >> 8 & 0x7)
80 #define CPSW_MINOR_VERSION(reg) (reg & 0xff) 80 #define CPSW_MINOR_VERSION(reg) (reg & 0xff)
81 #define CPSW_RTL_VERSION(reg) ((reg >> 11) & 0x1f) 81 #define CPSW_RTL_VERSION(reg) ((reg >> 11) & 0x1f)
82 82
83 #define CPSW_VERSION_1 0x19010a 83 #define CPSW_VERSION_1 0x19010a
84 #define CPSW_VERSION_2 0x19010c 84 #define CPSW_VERSION_2 0x19010c
85 #define CPSW_VERSION_3 0x19010f 85 #define CPSW_VERSION_3 0x19010f
86 #define CPSW_VERSION_4 0x190112 86 #define CPSW_VERSION_4 0x190112
87 87
88 #define HOST_PORT_NUM 0 88 #define HOST_PORT_NUM 0
89 #define SLIVER_SIZE 0x40 89 #define SLIVER_SIZE 0x40
90 90
91 #define CPSW1_HOST_PORT_OFFSET 0x028 91 #define CPSW1_HOST_PORT_OFFSET 0x028
92 #define CPSW1_SLAVE_OFFSET 0x050 92 #define CPSW1_SLAVE_OFFSET 0x050
93 #define CPSW1_SLAVE_SIZE 0x040 93 #define CPSW1_SLAVE_SIZE 0x040
94 #define CPSW1_CPDMA_OFFSET 0x100 94 #define CPSW1_CPDMA_OFFSET 0x100
95 #define CPSW1_STATERAM_OFFSET 0x200 95 #define CPSW1_STATERAM_OFFSET 0x200
96 #define CPSW1_HW_STATS 0x400 96 #define CPSW1_HW_STATS 0x400
97 #define CPSW1_CPTS_OFFSET 0x500 97 #define CPSW1_CPTS_OFFSET 0x500
98 #define CPSW1_ALE_OFFSET 0x600 98 #define CPSW1_ALE_OFFSET 0x600
99 #define CPSW1_SLIVER_OFFSET 0x700 99 #define CPSW1_SLIVER_OFFSET 0x700
100 100
101 #define CPSW2_HOST_PORT_OFFSET 0x108 101 #define CPSW2_HOST_PORT_OFFSET 0x108
102 #define CPSW2_SLAVE_OFFSET 0x200 102 #define CPSW2_SLAVE_OFFSET 0x200
103 #define CPSW2_SLAVE_SIZE 0x100 103 #define CPSW2_SLAVE_SIZE 0x100
104 #define CPSW2_CPDMA_OFFSET 0x800 104 #define CPSW2_CPDMA_OFFSET 0x800
105 #define CPSW2_HW_STATS 0x900 105 #define CPSW2_HW_STATS 0x900
106 #define CPSW2_STATERAM_OFFSET 0xa00 106 #define CPSW2_STATERAM_OFFSET 0xa00
107 #define CPSW2_CPTS_OFFSET 0xc00 107 #define CPSW2_CPTS_OFFSET 0xc00
108 #define CPSW2_ALE_OFFSET 0xd00 108 #define CPSW2_ALE_OFFSET 0xd00
109 #define CPSW2_SLIVER_OFFSET 0xd80 109 #define CPSW2_SLIVER_OFFSET 0xd80
110 #define CPSW2_BD_OFFSET 0x2000 110 #define CPSW2_BD_OFFSET 0x2000
111 111
112 #define CPDMA_RXTHRESH 0x0c0 112 #define CPDMA_RXTHRESH 0x0c0
113 #define CPDMA_RXFREE 0x0e0 113 #define CPDMA_RXFREE 0x0e0
114 #define CPDMA_TXHDP 0x00 114 #define CPDMA_TXHDP 0x00
115 #define CPDMA_RXHDP 0x20 115 #define CPDMA_RXHDP 0x20
116 #define CPDMA_TXCP 0x40 116 #define CPDMA_TXCP 0x40
117 #define CPDMA_RXCP 0x60 117 #define CPDMA_RXCP 0x60
118 118
119 #define CPSW_POLL_WEIGHT 64 119 #define CPSW_POLL_WEIGHT 64
120 #define CPSW_MIN_PACKET_SIZE 60 120 #define CPSW_MIN_PACKET_SIZE 60
121 #define CPSW_MAX_PACKET_SIZE (1500 + 14 + 4 + 4) 121 #define CPSW_MAX_PACKET_SIZE (1500 + 14 + 4 + 4)
122 122
123 #define RX_PRIORITY_MAPPING 0x76543210 123 #define RX_PRIORITY_MAPPING 0x76543210
124 #define TX_PRIORITY_MAPPING 0x33221100 124 #define TX_PRIORITY_MAPPING 0x33221100
125 #define CPDMA_TX_PRIORITY_MAP 0x76543210 125 #define CPDMA_TX_PRIORITY_MAP 0x76543210
126 126
127 #define CPSW_VLAN_AWARE BIT(1) 127 #define CPSW_VLAN_AWARE BIT(1)
128 #define CPSW_ALE_VLAN_AWARE 1 128 #define CPSW_ALE_VLAN_AWARE 1
129 129
130 #define CPSW_FIFO_NORMAL_MODE (0 << 15) 130 #define CPSW_FIFO_NORMAL_MODE (0 << 15)
131 #define CPSW_FIFO_DUAL_MAC_MODE (1 << 15) 131 #define CPSW_FIFO_DUAL_MAC_MODE (1 << 15)
132 #define CPSW_FIFO_RATE_LIMIT_MODE (2 << 15) 132 #define CPSW_FIFO_RATE_LIMIT_MODE (2 << 15)
133 133
134 #define CPSW_INTPACEEN (0x3f << 16) 134 #define CPSW_INTPACEEN (0x3f << 16)
135 #define CPSW_INTPRESCALE_MASK (0x7FF << 0) 135 #define CPSW_INTPRESCALE_MASK (0x7FF << 0)
136 #define CPSW_CMINTMAX_CNT 63 136 #define CPSW_CMINTMAX_CNT 63
137 #define CPSW_CMINTMIN_CNT 2 137 #define CPSW_CMINTMIN_CNT 2
138 #define CPSW_CMINTMAX_INTVL (1000 / CPSW_CMINTMIN_CNT) 138 #define CPSW_CMINTMAX_INTVL (1000 / CPSW_CMINTMIN_CNT)
139 #define CPSW_CMINTMIN_INTVL ((1000 / CPSW_CMINTMAX_CNT) + 1) 139 #define CPSW_CMINTMIN_INTVL ((1000 / CPSW_CMINTMAX_CNT) + 1)
140 140
141 #define cpsw_enable_irq(priv) \ 141 #define cpsw_enable_irq(priv) \
142 do { \ 142 do { \
143 u32 i; \ 143 u32 i; \
144 for (i = 0; i < priv->num_irqs; i++) \ 144 for (i = 0; i < priv->num_irqs; i++) \
145 enable_irq(priv->irqs_table[i]); \ 145 enable_irq(priv->irqs_table[i]); \
146 } while (0); 146 } while (0);
147 #define cpsw_disable_irq(priv) \ 147 #define cpsw_disable_irq(priv) \
148 do { \ 148 do { \
149 u32 i; \ 149 u32 i; \
150 for (i = 0; i < priv->num_irqs; i++) \ 150 for (i = 0; i < priv->num_irqs; i++) \
151 disable_irq_nosync(priv->irqs_table[i]); \ 151 disable_irq_nosync(priv->irqs_table[i]); \
152 } while (0); 152 } while (0);
153 153
154 #define cpsw_slave_index(priv) \ 154 #define cpsw_slave_index(priv) \
155 ((priv->data.dual_emac) ? priv->emac_port : \ 155 ((priv->data.dual_emac) ? priv->emac_port : \
156 priv->data.active_slave) 156 priv->data.active_slave)
157 157
158 static int debug_level; 158 static int debug_level;
159 module_param(debug_level, int, 0); 159 module_param(debug_level, int, 0);
160 MODULE_PARM_DESC(debug_level, "cpsw debug level (NETIF_MSG bits)"); 160 MODULE_PARM_DESC(debug_level, "cpsw debug level (NETIF_MSG bits)");
161 161
162 static int ale_ageout = 10; 162 static int ale_ageout = 10;
163 module_param(ale_ageout, int, 0); 163 module_param(ale_ageout, int, 0);
164 MODULE_PARM_DESC(ale_ageout, "cpsw ale ageout interval (seconds)"); 164 MODULE_PARM_DESC(ale_ageout, "cpsw ale ageout interval (seconds)");
165 165
166 static int rx_packet_max = CPSW_MAX_PACKET_SIZE; 166 static int rx_packet_max = CPSW_MAX_PACKET_SIZE;
167 module_param(rx_packet_max, int, 0); 167 module_param(rx_packet_max, int, 0);
168 MODULE_PARM_DESC(rx_packet_max, "maximum receive packet size (bytes)"); 168 MODULE_PARM_DESC(rx_packet_max, "maximum receive packet size (bytes)");
169 169
170 struct cpsw_wr_regs { 170 struct cpsw_wr_regs {
171 u32 id_ver; 171 u32 id_ver;
172 u32 soft_reset; 172 u32 soft_reset;
173 u32 control; 173 u32 control;
174 u32 int_control; 174 u32 int_control;
175 u32 rx_thresh_en; 175 u32 rx_thresh_en;
176 u32 rx_en; 176 u32 rx_en;
177 u32 tx_en; 177 u32 tx_en;
178 u32 misc_en; 178 u32 misc_en;
179 u32 mem_allign1[8]; 179 u32 mem_allign1[8];
180 u32 rx_thresh_stat; 180 u32 rx_thresh_stat;
181 u32 rx_stat; 181 u32 rx_stat;
182 u32 tx_stat; 182 u32 tx_stat;
183 u32 misc_stat; 183 u32 misc_stat;
184 u32 mem_allign2[8]; 184 u32 mem_allign2[8];
185 u32 rx_imax; 185 u32 rx_imax;
186 u32 tx_imax; 186 u32 tx_imax;
187 187
188 }; 188 };
189 189
190 struct cpsw_ss_regs { 190 struct cpsw_ss_regs {
191 u32 id_ver; 191 u32 id_ver;
192 u32 control; 192 u32 control;
193 u32 soft_reset; 193 u32 soft_reset;
194 u32 stat_port_en; 194 u32 stat_port_en;
195 u32 ptype; 195 u32 ptype;
196 u32 soft_idle; 196 u32 soft_idle;
197 u32 thru_rate; 197 u32 thru_rate;
198 u32 gap_thresh; 198 u32 gap_thresh;
199 u32 tx_start_wds; 199 u32 tx_start_wds;
200 u32 flow_control; 200 u32 flow_control;
201 u32 vlan_ltype; 201 u32 vlan_ltype;
202 u32 ts_ltype; 202 u32 ts_ltype;
203 u32 dlr_ltype; 203 u32 dlr_ltype;
204 }; 204 };
205 205
206 /* CPSW_PORT_V1 */ 206 /* CPSW_PORT_V1 */
207 #define CPSW1_MAX_BLKS 0x00 /* Maximum FIFO Blocks */ 207 #define CPSW1_MAX_BLKS 0x00 /* Maximum FIFO Blocks */
208 #define CPSW1_BLK_CNT 0x04 /* FIFO Block Usage Count (Read Only) */ 208 #define CPSW1_BLK_CNT 0x04 /* FIFO Block Usage Count (Read Only) */
209 #define CPSW1_TX_IN_CTL 0x08 /* Transmit FIFO Control */ 209 #define CPSW1_TX_IN_CTL 0x08 /* Transmit FIFO Control */
210 #define CPSW1_PORT_VLAN 0x0c /* VLAN Register */ 210 #define CPSW1_PORT_VLAN 0x0c /* VLAN Register */
211 #define CPSW1_TX_PRI_MAP 0x10 /* Tx Header Priority to Switch Pri Mapping */ 211 #define CPSW1_TX_PRI_MAP 0x10 /* Tx Header Priority to Switch Pri Mapping */
212 #define CPSW1_TS_CTL 0x14 /* Time Sync Control */ 212 #define CPSW1_TS_CTL 0x14 /* Time Sync Control */
213 #define CPSW1_TS_SEQ_LTYPE 0x18 /* Time Sync Sequence ID Offset and Msg Type */ 213 #define CPSW1_TS_SEQ_LTYPE 0x18 /* Time Sync Sequence ID Offset and Msg Type */
214 #define CPSW1_TS_VLAN 0x1c /* Time Sync VLAN1 and VLAN2 */ 214 #define CPSW1_TS_VLAN 0x1c /* Time Sync VLAN1 and VLAN2 */
215 215
216 /* CPSW_PORT_V2 */ 216 /* CPSW_PORT_V2 */
217 #define CPSW2_CONTROL 0x00 /* Control Register */ 217 #define CPSW2_CONTROL 0x00 /* Control Register */
218 #define CPSW2_MAX_BLKS 0x08 /* Maximum FIFO Blocks */ 218 #define CPSW2_MAX_BLKS 0x08 /* Maximum FIFO Blocks */
219 #define CPSW2_BLK_CNT 0x0c /* FIFO Block Usage Count (Read Only) */ 219 #define CPSW2_BLK_CNT 0x0c /* FIFO Block Usage Count (Read Only) */
220 #define CPSW2_TX_IN_CTL 0x10 /* Transmit FIFO Control */ 220 #define CPSW2_TX_IN_CTL 0x10 /* Transmit FIFO Control */
221 #define CPSW2_PORT_VLAN 0x14 /* VLAN Register */ 221 #define CPSW2_PORT_VLAN 0x14 /* VLAN Register */
222 #define CPSW2_TX_PRI_MAP 0x18 /* Tx Header Priority to Switch Pri Mapping */ 222 #define CPSW2_TX_PRI_MAP 0x18 /* Tx Header Priority to Switch Pri Mapping */
223 #define CPSW2_TS_SEQ_MTYPE 0x1c /* Time Sync Sequence ID Offset and Msg Type */ 223 #define CPSW2_TS_SEQ_MTYPE 0x1c /* Time Sync Sequence ID Offset and Msg Type */
224 224
225 /* CPSW_PORT_V1 and V2 */ 225 /* CPSW_PORT_V1 and V2 */
226 #define SA_LO 0x20 /* CPGMAC_SL Source Address Low */ 226 #define SA_LO 0x20 /* CPGMAC_SL Source Address Low */
227 #define SA_HI 0x24 /* CPGMAC_SL Source Address High */ 227 #define SA_HI 0x24 /* CPGMAC_SL Source Address High */
228 #define SEND_PERCENT 0x28 /* Transmit Queue Send Percentages */ 228 #define SEND_PERCENT 0x28 /* Transmit Queue Send Percentages */
229 229
230 /* CPSW_PORT_V2 only */ 230 /* CPSW_PORT_V2 only */
231 #define RX_DSCP_PRI_MAP0 0x30 /* Rx DSCP Priority to Rx Packet Mapping */ 231 #define RX_DSCP_PRI_MAP0 0x30 /* Rx DSCP Priority to Rx Packet Mapping */
232 #define RX_DSCP_PRI_MAP1 0x34 /* Rx DSCP Priority to Rx Packet Mapping */ 232 #define RX_DSCP_PRI_MAP1 0x34 /* Rx DSCP Priority to Rx Packet Mapping */
233 #define RX_DSCP_PRI_MAP2 0x38 /* Rx DSCP Priority to Rx Packet Mapping */ 233 #define RX_DSCP_PRI_MAP2 0x38 /* Rx DSCP Priority to Rx Packet Mapping */
234 #define RX_DSCP_PRI_MAP3 0x3c /* Rx DSCP Priority to Rx Packet Mapping */ 234 #define RX_DSCP_PRI_MAP3 0x3c /* Rx DSCP Priority to Rx Packet Mapping */
235 #define RX_DSCP_PRI_MAP4 0x40 /* Rx DSCP Priority to Rx Packet Mapping */ 235 #define RX_DSCP_PRI_MAP4 0x40 /* Rx DSCP Priority to Rx Packet Mapping */
236 #define RX_DSCP_PRI_MAP5 0x44 /* Rx DSCP Priority to Rx Packet Mapping */ 236 #define RX_DSCP_PRI_MAP5 0x44 /* Rx DSCP Priority to Rx Packet Mapping */
237 #define RX_DSCP_PRI_MAP6 0x48 /* Rx DSCP Priority to Rx Packet Mapping */ 237 #define RX_DSCP_PRI_MAP6 0x48 /* Rx DSCP Priority to Rx Packet Mapping */
238 #define RX_DSCP_PRI_MAP7 0x4c /* Rx DSCP Priority to Rx Packet Mapping */ 238 #define RX_DSCP_PRI_MAP7 0x4c /* Rx DSCP Priority to Rx Packet Mapping */
239 239
240 /* Bit definitions for the CPSW2_CONTROL register */ 240 /* Bit definitions for the CPSW2_CONTROL register */
241 #define PASS_PRI_TAGGED (1<<24) /* Pass Priority Tagged */ 241 #define PASS_PRI_TAGGED (1<<24) /* Pass Priority Tagged */
242 #define VLAN_LTYPE2_EN (1<<21) /* VLAN LTYPE 2 enable */ 242 #define VLAN_LTYPE2_EN (1<<21) /* VLAN LTYPE 2 enable */
243 #define VLAN_LTYPE1_EN (1<<20) /* VLAN LTYPE 1 enable */ 243 #define VLAN_LTYPE1_EN (1<<20) /* VLAN LTYPE 1 enable */
244 #define DSCP_PRI_EN (1<<16) /* DSCP Priority Enable */ 244 #define DSCP_PRI_EN (1<<16) /* DSCP Priority Enable */
245 #define TS_320 (1<<14) /* Time Sync Dest Port 320 enable */ 245 #define TS_320 (1<<14) /* Time Sync Dest Port 320 enable */
246 #define TS_319 (1<<13) /* Time Sync Dest Port 319 enable */ 246 #define TS_319 (1<<13) /* Time Sync Dest Port 319 enable */
247 #define TS_132 (1<<12) /* Time Sync Dest IP Addr 132 enable */ 247 #define TS_132 (1<<12) /* Time Sync Dest IP Addr 132 enable */
248 #define TS_131 (1<<11) /* Time Sync Dest IP Addr 131 enable */ 248 #define TS_131 (1<<11) /* Time Sync Dest IP Addr 131 enable */
249 #define TS_130 (1<<10) /* Time Sync Dest IP Addr 130 enable */ 249 #define TS_130 (1<<10) /* Time Sync Dest IP Addr 130 enable */
250 #define TS_129 (1<<9) /* Time Sync Dest IP Addr 129 enable */ 250 #define TS_129 (1<<9) /* Time Sync Dest IP Addr 129 enable */
251 #define TS_BIT8 (1<<8) /* ts_ttl_nonzero? */ 251 #define TS_BIT8 (1<<8) /* ts_ttl_nonzero? */
252 #define TS_ANNEX_D_EN (1<<4) /* Time Sync Annex D enable */ 252 #define TS_ANNEX_D_EN (1<<4) /* Time Sync Annex D enable */
253 #define TS_LTYPE2_EN (1<<3) /* Time Sync LTYPE 2 enable */ 253 #define TS_LTYPE2_EN (1<<3) /* Time Sync LTYPE 2 enable */
254 #define TS_LTYPE1_EN (1<<2) /* Time Sync LTYPE 1 enable */ 254 #define TS_LTYPE1_EN (1<<2) /* Time Sync LTYPE 1 enable */
255 #define TS_TX_EN (1<<1) /* Time Sync Transmit Enable */ 255 #define TS_TX_EN (1<<1) /* Time Sync Transmit Enable */
256 #define TS_RX_EN (1<<0) /* Time Sync Receive Enable */ 256 #define TS_RX_EN (1<<0) /* Time Sync Receive Enable */
257 257
258 #define CTRL_TS_BITS \ 258 #define CTRL_TS_BITS \
259 (TS_320 | TS_319 | TS_132 | TS_131 | TS_130 | TS_129 | TS_BIT8 | \ 259 (TS_320 | TS_319 | TS_132 | TS_131 | TS_130 | TS_129 | TS_BIT8 | \
260 TS_ANNEX_D_EN | TS_LTYPE1_EN) 260 TS_ANNEX_D_EN | TS_LTYPE1_EN)
261 261
262 #define CTRL_ALL_TS_MASK (CTRL_TS_BITS | TS_TX_EN | TS_RX_EN) 262 #define CTRL_ALL_TS_MASK (CTRL_TS_BITS | TS_TX_EN | TS_RX_EN)
263 #define CTRL_TX_TS_BITS (CTRL_TS_BITS | TS_TX_EN) 263 #define CTRL_TX_TS_BITS (CTRL_TS_BITS | TS_TX_EN)
264 #define CTRL_RX_TS_BITS (CTRL_TS_BITS | TS_RX_EN) 264 #define CTRL_RX_TS_BITS (CTRL_TS_BITS | TS_RX_EN)
265 265
266 /* Bit definitions for the CPSW2_TS_SEQ_MTYPE register */ 266 /* Bit definitions for the CPSW2_TS_SEQ_MTYPE register */
267 #define TS_SEQ_ID_OFFSET_SHIFT (16) /* Time Sync Sequence ID Offset */ 267 #define TS_SEQ_ID_OFFSET_SHIFT (16) /* Time Sync Sequence ID Offset */
268 #define TS_SEQ_ID_OFFSET_MASK (0x3f) 268 #define TS_SEQ_ID_OFFSET_MASK (0x3f)
269 #define TS_MSG_TYPE_EN_SHIFT (0) /* Time Sync Message Type Enable */ 269 #define TS_MSG_TYPE_EN_SHIFT (0) /* Time Sync Message Type Enable */
270 #define TS_MSG_TYPE_EN_MASK (0xffff) 270 #define TS_MSG_TYPE_EN_MASK (0xffff)
271 271
272 /* The PTP event messages - Sync, Delay_Req, Pdelay_Req, and Pdelay_Resp. */ 272 /* The PTP event messages - Sync, Delay_Req, Pdelay_Req, and Pdelay_Resp. */
273 #define EVENT_MSG_BITS ((1<<0) | (1<<1) | (1<<2) | (1<<3)) 273 #define EVENT_MSG_BITS ((1<<0) | (1<<1) | (1<<2) | (1<<3))
274 274
275 /* Bit definitions for the CPSW1_TS_CTL register */ 275 /* Bit definitions for the CPSW1_TS_CTL register */
276 #define CPSW_V1_TS_RX_EN BIT(0) 276 #define CPSW_V1_TS_RX_EN BIT(0)
277 #define CPSW_V1_TS_TX_EN BIT(4) 277 #define CPSW_V1_TS_TX_EN BIT(4)
278 #define CPSW_V1_MSG_TYPE_OFS 16 278 #define CPSW_V1_MSG_TYPE_OFS 16
279 279
280 /* Bit definitions for the CPSW1_TS_SEQ_LTYPE register */ 280 /* Bit definitions for the CPSW1_TS_SEQ_LTYPE register */
281 #define CPSW_V1_SEQ_ID_OFS_SHIFT 16 281 #define CPSW_V1_SEQ_ID_OFS_SHIFT 16
282 282
283 struct cpsw_host_regs { 283 struct cpsw_host_regs {
284 u32 max_blks; 284 u32 max_blks;
285 u32 blk_cnt; 285 u32 blk_cnt;
286 u32 tx_in_ctl; 286 u32 tx_in_ctl;
287 u32 port_vlan; 287 u32 port_vlan;
288 u32 tx_pri_map; 288 u32 tx_pri_map;
289 u32 cpdma_tx_pri_map; 289 u32 cpdma_tx_pri_map;
290 u32 cpdma_rx_chan_map; 290 u32 cpdma_rx_chan_map;
291 }; 291 };
292 292
293 struct cpsw_sliver_regs { 293 struct cpsw_sliver_regs {
294 u32 id_ver; 294 u32 id_ver;
295 u32 mac_control; 295 u32 mac_control;
296 u32 mac_status; 296 u32 mac_status;
297 u32 soft_reset; 297 u32 soft_reset;
298 u32 rx_maxlen; 298 u32 rx_maxlen;
299 u32 __reserved_0; 299 u32 __reserved_0;
300 u32 rx_pause; 300 u32 rx_pause;
301 u32 tx_pause; 301 u32 tx_pause;
302 u32 __reserved_1; 302 u32 __reserved_1;
303 u32 rx_pri_map; 303 u32 rx_pri_map;
304 }; 304 };
305 305
306 struct cpsw_hw_stats { 306 struct cpsw_hw_stats {
307 u32 rxgoodframes; 307 u32 rxgoodframes;
308 u32 rxbroadcastframes; 308 u32 rxbroadcastframes;
309 u32 rxmulticastframes; 309 u32 rxmulticastframes;
310 u32 rxpauseframes; 310 u32 rxpauseframes;
311 u32 rxcrcerrors; 311 u32 rxcrcerrors;
312 u32 rxaligncodeerrors; 312 u32 rxaligncodeerrors;
313 u32 rxoversizedframes; 313 u32 rxoversizedframes;
314 u32 rxjabberframes; 314 u32 rxjabberframes;
315 u32 rxundersizedframes; 315 u32 rxundersizedframes;
316 u32 rxfragments; 316 u32 rxfragments;
317 u32 __pad_0[2]; 317 u32 __pad_0[2];
318 u32 rxoctets; 318 u32 rxoctets;
319 u32 txgoodframes; 319 u32 txgoodframes;
320 u32 txbroadcastframes; 320 u32 txbroadcastframes;
321 u32 txmulticastframes; 321 u32 txmulticastframes;
322 u32 txpauseframes; 322 u32 txpauseframes;
323 u32 txdeferredframes; 323 u32 txdeferredframes;
324 u32 txcollisionframes; 324 u32 txcollisionframes;
325 u32 txsinglecollframes; 325 u32 txsinglecollframes;
326 u32 txmultcollframes; 326 u32 txmultcollframes;
327 u32 txexcessivecollisions; 327 u32 txexcessivecollisions;
328 u32 txlatecollisions; 328 u32 txlatecollisions;
329 u32 txunderrun; 329 u32 txunderrun;
330 u32 txcarriersenseerrors; 330 u32 txcarriersenseerrors;
331 u32 txoctets; 331 u32 txoctets;
332 u32 octetframes64; 332 u32 octetframes64;
333 u32 octetframes65t127; 333 u32 octetframes65t127;
334 u32 octetframes128t255; 334 u32 octetframes128t255;
335 u32 octetframes256t511; 335 u32 octetframes256t511;
336 u32 octetframes512t1023; 336 u32 octetframes512t1023;
337 u32 octetframes1024tup; 337 u32 octetframes1024tup;
338 u32 netoctets; 338 u32 netoctets;
339 u32 rxsofoverruns; 339 u32 rxsofoverruns;
340 u32 rxmofoverruns; 340 u32 rxmofoverruns;
341 u32 rxdmaoverruns; 341 u32 rxdmaoverruns;
342 }; 342 };
343 343
344 struct cpsw_slave { 344 struct cpsw_slave {
345 void __iomem *regs; 345 void __iomem *regs;
346 struct cpsw_sliver_regs __iomem *sliver; 346 struct cpsw_sliver_regs __iomem *sliver;
347 int slave_num; 347 int slave_num;
348 u32 mac_control; 348 u32 mac_control;
349 struct cpsw_slave_data *data; 349 struct cpsw_slave_data *data;
350 struct phy_device *phy; 350 struct phy_device *phy;
351 struct net_device *ndev; 351 struct net_device *ndev;
352 u32 port_vlan; 352 u32 port_vlan;
353 u32 open_stat; 353 u32 open_stat;
354 }; 354 };
355 355
356 static inline u32 slave_read(struct cpsw_slave *slave, u32 offset) 356 static inline u32 slave_read(struct cpsw_slave *slave, u32 offset)
357 { 357 {
358 return __raw_readl(slave->regs + offset); 358 return __raw_readl(slave->regs + offset);
359 } 359 }
360 360
361 static inline void slave_write(struct cpsw_slave *slave, u32 val, u32 offset) 361 static inline void slave_write(struct cpsw_slave *slave, u32 val, u32 offset)
362 { 362 {
363 __raw_writel(val, slave->regs + offset); 363 __raw_writel(val, slave->regs + offset);
364 } 364 }
365 365
366 struct cpsw_priv { 366 struct cpsw_priv {
367 spinlock_t lock; 367 spinlock_t lock;
368 struct platform_device *pdev; 368 struct platform_device *pdev;
369 struct net_device *ndev; 369 struct net_device *ndev;
370 struct napi_struct napi; 370 struct napi_struct napi;
371 struct napi_struct napi_tx;
372 bool irq_enabled;
373 bool irq_tx_enabled;
371 struct device *dev; 374 struct device *dev;
372 struct cpsw_platform_data data; 375 struct cpsw_platform_data data;
373 struct cpsw_ss_regs __iomem *regs; 376 struct cpsw_ss_regs __iomem *regs;
374 struct cpsw_wr_regs __iomem *wr_regs; 377 struct cpsw_wr_regs __iomem *wr_regs;
375 u8 __iomem *hw_stats; 378 u8 __iomem *hw_stats;
376 struct cpsw_host_regs __iomem *host_port_regs; 379 struct cpsw_host_regs __iomem *host_port_regs;
377 u32 msg_enable; 380 u32 msg_enable;
378 u32 version; 381 u32 version;
379 u32 coal_intvl; 382 u32 coal_intvl;
380 u32 bus_freq_mhz; 383 u32 bus_freq_mhz;
381 struct net_device_stats stats; 384 struct net_device_stats stats;
382 int rx_packet_max; 385 int rx_packet_max;
383 int host_port; 386 int host_port;
384 struct clk *clk; 387 struct clk *clk;
385 u8 mac_addr[ETH_ALEN]; 388 u8 mac_addr[ETH_ALEN];
386 struct cpsw_slave *slaves; 389 struct cpsw_slave *slaves;
387 struct cpdma_ctlr *dma; 390 struct cpdma_ctlr *dma;
388 struct cpdma_chan *txch, *rxch; 391 struct cpdma_chan *txch, *rxch;
389 struct cpsw_ale *ale; 392 struct cpsw_ale *ale;
390 /* snapshot of IRQ numbers */ 393 /* snapshot of IRQ numbers */
391 u32 irqs_table[4]; 394 u32 irqs_table[4];
392 u32 num_irqs; 395 u32 num_irqs;
393 bool irq_enabled;
394 struct cpts *cpts; 396 struct cpts *cpts;
395 u32 emac_port; 397 u32 emac_port;
396 }; 398 };
397 399
398 struct cpsw_stats { 400 struct cpsw_stats {
399 char stat_string[ETH_GSTRING_LEN]; 401 char stat_string[ETH_GSTRING_LEN];
400 int type; 402 int type;
401 int sizeof_stat; 403 int sizeof_stat;
402 int stat_offset; 404 int stat_offset;
403 }; 405 };
404 406
405 enum { 407 enum {
406 CPSW_STATS, 408 CPSW_STATS,
407 CPDMA_RX_STATS, 409 CPDMA_RX_STATS,
408 CPDMA_TX_STATS, 410 CPDMA_TX_STATS,
409 }; 411 };
410 412
411 #define CPSW_STAT(m) CPSW_STATS, \ 413 #define CPSW_STAT(m) CPSW_STATS, \
412 sizeof(((struct cpsw_hw_stats *)0)->m), \ 414 sizeof(((struct cpsw_hw_stats *)0)->m), \
413 offsetof(struct cpsw_hw_stats, m) 415 offsetof(struct cpsw_hw_stats, m)
414 #define CPDMA_RX_STAT(m) CPDMA_RX_STATS, \ 416 #define CPDMA_RX_STAT(m) CPDMA_RX_STATS, \
415 sizeof(((struct cpdma_chan_stats *)0)->m), \ 417 sizeof(((struct cpdma_chan_stats *)0)->m), \
416 offsetof(struct cpdma_chan_stats, m) 418 offsetof(struct cpdma_chan_stats, m)
417 #define CPDMA_TX_STAT(m) CPDMA_TX_STATS, \ 419 #define CPDMA_TX_STAT(m) CPDMA_TX_STATS, \
418 sizeof(((struct cpdma_chan_stats *)0)->m), \ 420 sizeof(((struct cpdma_chan_stats *)0)->m), \
419 offsetof(struct cpdma_chan_stats, m) 421 offsetof(struct cpdma_chan_stats, m)
420 422
421 static const struct cpsw_stats cpsw_gstrings_stats[] = { 423 static const struct cpsw_stats cpsw_gstrings_stats[] = {
422 { "Good Rx Frames", CPSW_STAT(rxgoodframes) }, 424 { "Good Rx Frames", CPSW_STAT(rxgoodframes) },
423 { "Broadcast Rx Frames", CPSW_STAT(rxbroadcastframes) }, 425 { "Broadcast Rx Frames", CPSW_STAT(rxbroadcastframes) },
424 { "Multicast Rx Frames", CPSW_STAT(rxmulticastframes) }, 426 { "Multicast Rx Frames", CPSW_STAT(rxmulticastframes) },
425 { "Pause Rx Frames", CPSW_STAT(rxpauseframes) }, 427 { "Pause Rx Frames", CPSW_STAT(rxpauseframes) },
426 { "Rx CRC Errors", CPSW_STAT(rxcrcerrors) }, 428 { "Rx CRC Errors", CPSW_STAT(rxcrcerrors) },
427 { "Rx Align/Code Errors", CPSW_STAT(rxaligncodeerrors) }, 429 { "Rx Align/Code Errors", CPSW_STAT(rxaligncodeerrors) },
428 { "Oversize Rx Frames", CPSW_STAT(rxoversizedframes) }, 430 { "Oversize Rx Frames", CPSW_STAT(rxoversizedframes) },
429 { "Rx Jabbers", CPSW_STAT(rxjabberframes) }, 431 { "Rx Jabbers", CPSW_STAT(rxjabberframes) },
430 { "Undersize (Short) Rx Frames", CPSW_STAT(rxundersizedframes) }, 432 { "Undersize (Short) Rx Frames", CPSW_STAT(rxundersizedframes) },
431 { "Rx Fragments", CPSW_STAT(rxfragments) }, 433 { "Rx Fragments", CPSW_STAT(rxfragments) },
432 { "Rx Octets", CPSW_STAT(rxoctets) }, 434 { "Rx Octets", CPSW_STAT(rxoctets) },
433 { "Good Tx Frames", CPSW_STAT(txgoodframes) }, 435 { "Good Tx Frames", CPSW_STAT(txgoodframes) },
434 { "Broadcast Tx Frames", CPSW_STAT(txbroadcastframes) }, 436 { "Broadcast Tx Frames", CPSW_STAT(txbroadcastframes) },
435 { "Multicast Tx Frames", CPSW_STAT(txmulticastframes) }, 437 { "Multicast Tx Frames", CPSW_STAT(txmulticastframes) },
436 { "Pause Tx Frames", CPSW_STAT(txpauseframes) }, 438 { "Pause Tx Frames", CPSW_STAT(txpauseframes) },
437 { "Deferred Tx Frames", CPSW_STAT(txdeferredframes) }, 439 { "Deferred Tx Frames", CPSW_STAT(txdeferredframes) },
438 { "Collisions", CPSW_STAT(txcollisionframes) }, 440 { "Collisions", CPSW_STAT(txcollisionframes) },
439 { "Single Collision Tx Frames", CPSW_STAT(txsinglecollframes) }, 441 { "Single Collision Tx Frames", CPSW_STAT(txsinglecollframes) },
440 { "Multiple Collision Tx Frames", CPSW_STAT(txmultcollframes) }, 442 { "Multiple Collision Tx Frames", CPSW_STAT(txmultcollframes) },
441 { "Excessive Collisions", CPSW_STAT(txexcessivecollisions) }, 443 { "Excessive Collisions", CPSW_STAT(txexcessivecollisions) },
442 { "Late Collisions", CPSW_STAT(txlatecollisions) }, 444 { "Late Collisions", CPSW_STAT(txlatecollisions) },
443 { "Tx Underrun", CPSW_STAT(txunderrun) }, 445 { "Tx Underrun", CPSW_STAT(txunderrun) },
444 { "Carrier Sense Errors", CPSW_STAT(txcarriersenseerrors) }, 446 { "Carrier Sense Errors", CPSW_STAT(txcarriersenseerrors) },
445 { "Tx Octets", CPSW_STAT(txoctets) }, 447 { "Tx Octets", CPSW_STAT(txoctets) },
446 { "Rx + Tx 64 Octet Frames", CPSW_STAT(octetframes64) }, 448 { "Rx + Tx 64 Octet Frames", CPSW_STAT(octetframes64) },
447 { "Rx + Tx 65-127 Octet Frames", CPSW_STAT(octetframes65t127) }, 449 { "Rx + Tx 65-127 Octet Frames", CPSW_STAT(octetframes65t127) },
448 { "Rx + Tx 128-255 Octet Frames", CPSW_STAT(octetframes128t255) }, 450 { "Rx + Tx 128-255 Octet Frames", CPSW_STAT(octetframes128t255) },
449 { "Rx + Tx 256-511 Octet Frames", CPSW_STAT(octetframes256t511) }, 451 { "Rx + Tx 256-511 Octet Frames", CPSW_STAT(octetframes256t511) },
450 { "Rx + Tx 512-1023 Octet Frames", CPSW_STAT(octetframes512t1023) }, 452 { "Rx + Tx 512-1023 Octet Frames", CPSW_STAT(octetframes512t1023) },
451 { "Rx + Tx 1024-Up Octet Frames", CPSW_STAT(octetframes1024tup) }, 453 { "Rx + Tx 1024-Up Octet Frames", CPSW_STAT(octetframes1024tup) },
452 { "Net Octets", CPSW_STAT(netoctets) }, 454 { "Net Octets", CPSW_STAT(netoctets) },
453 { "Rx Start of Frame Overruns", CPSW_STAT(rxsofoverruns) }, 455 { "Rx Start of Frame Overruns", CPSW_STAT(rxsofoverruns) },
454 { "Rx Middle of Frame Overruns", CPSW_STAT(rxmofoverruns) }, 456 { "Rx Middle of Frame Overruns", CPSW_STAT(rxmofoverruns) },
455 { "Rx DMA Overruns", CPSW_STAT(rxdmaoverruns) }, 457 { "Rx DMA Overruns", CPSW_STAT(rxdmaoverruns) },
456 { "Rx DMA chan: head_enqueue", CPDMA_RX_STAT(head_enqueue) }, 458 { "Rx DMA chan: head_enqueue", CPDMA_RX_STAT(head_enqueue) },
457 { "Rx DMA chan: tail_enqueue", CPDMA_RX_STAT(tail_enqueue) }, 459 { "Rx DMA chan: tail_enqueue", CPDMA_RX_STAT(tail_enqueue) },
458 { "Rx DMA chan: pad_enqueue", CPDMA_RX_STAT(pad_enqueue) }, 460 { "Rx DMA chan: pad_enqueue", CPDMA_RX_STAT(pad_enqueue) },
459 { "Rx DMA chan: misqueued", CPDMA_RX_STAT(misqueued) }, 461 { "Rx DMA chan: misqueued", CPDMA_RX_STAT(misqueued) },
460 { "Rx DMA chan: desc_alloc_fail", CPDMA_RX_STAT(desc_alloc_fail) }, 462 { "Rx DMA chan: desc_alloc_fail", CPDMA_RX_STAT(desc_alloc_fail) },
461 { "Rx DMA chan: pad_alloc_fail", CPDMA_RX_STAT(pad_alloc_fail) }, 463 { "Rx DMA chan: pad_alloc_fail", CPDMA_RX_STAT(pad_alloc_fail) },
462 { "Rx DMA chan: runt_receive_buf", CPDMA_RX_STAT(runt_receive_buff) }, 464 { "Rx DMA chan: runt_receive_buf", CPDMA_RX_STAT(runt_receive_buff) },
463 { "Rx DMA chan: runt_transmit_buf", CPDMA_RX_STAT(runt_transmit_buff) }, 465 { "Rx DMA chan: runt_transmit_buf", CPDMA_RX_STAT(runt_transmit_buff) },
464 { "Rx DMA chan: empty_dequeue", CPDMA_RX_STAT(empty_dequeue) }, 466 { "Rx DMA chan: empty_dequeue", CPDMA_RX_STAT(empty_dequeue) },
465 { "Rx DMA chan: busy_dequeue", CPDMA_RX_STAT(busy_dequeue) }, 467 { "Rx DMA chan: busy_dequeue", CPDMA_RX_STAT(busy_dequeue) },
466 { "Rx DMA chan: good_dequeue", CPDMA_RX_STAT(good_dequeue) }, 468 { "Rx DMA chan: good_dequeue", CPDMA_RX_STAT(good_dequeue) },
467 { "Rx DMA chan: requeue", CPDMA_RX_STAT(requeue) }, 469 { "Rx DMA chan: requeue", CPDMA_RX_STAT(requeue) },
468 { "Rx DMA chan: teardown_dequeue", CPDMA_RX_STAT(teardown_dequeue) }, 470 { "Rx DMA chan: teardown_dequeue", CPDMA_RX_STAT(teardown_dequeue) },
469 { "Tx DMA chan: head_enqueue", CPDMA_TX_STAT(head_enqueue) }, 471 { "Tx DMA chan: head_enqueue", CPDMA_TX_STAT(head_enqueue) },
470 { "Tx DMA chan: tail_enqueue", CPDMA_TX_STAT(tail_enqueue) }, 472 { "Tx DMA chan: tail_enqueue", CPDMA_TX_STAT(tail_enqueue) },
471 { "Tx DMA chan: pad_enqueue", CPDMA_TX_STAT(pad_enqueue) }, 473 { "Tx DMA chan: pad_enqueue", CPDMA_TX_STAT(pad_enqueue) },
472 { "Tx DMA chan: misqueued", CPDMA_TX_STAT(misqueued) }, 474 { "Tx DMA chan: misqueued", CPDMA_TX_STAT(misqueued) },
473 { "Tx DMA chan: desc_alloc_fail", CPDMA_TX_STAT(desc_alloc_fail) }, 475 { "Tx DMA chan: desc_alloc_fail", CPDMA_TX_STAT(desc_alloc_fail) },
474 { "Tx DMA chan: pad_alloc_fail", CPDMA_TX_STAT(pad_alloc_fail) }, 476 { "Tx DMA chan: pad_alloc_fail", CPDMA_TX_STAT(pad_alloc_fail) },
475 { "Tx DMA chan: runt_receive_buf", CPDMA_TX_STAT(runt_receive_buff) }, 477 { "Tx DMA chan: runt_receive_buf", CPDMA_TX_STAT(runt_receive_buff) },
476 { "Tx DMA chan: runt_transmit_buf", CPDMA_TX_STAT(runt_transmit_buff) }, 478 { "Tx DMA chan: runt_transmit_buf", CPDMA_TX_STAT(runt_transmit_buff) },
477 { "Tx DMA chan: empty_dequeue", CPDMA_TX_STAT(empty_dequeue) }, 479 { "Tx DMA chan: empty_dequeue", CPDMA_TX_STAT(empty_dequeue) },
478 { "Tx DMA chan: busy_dequeue", CPDMA_TX_STAT(busy_dequeue) }, 480 { "Tx DMA chan: busy_dequeue", CPDMA_TX_STAT(busy_dequeue) },
479 { "Tx DMA chan: good_dequeue", CPDMA_TX_STAT(good_dequeue) }, 481 { "Tx DMA chan: good_dequeue", CPDMA_TX_STAT(good_dequeue) },
480 { "Tx DMA chan: requeue", CPDMA_TX_STAT(requeue) }, 482 { "Tx DMA chan: requeue", CPDMA_TX_STAT(requeue) },
481 { "Tx DMA chan: teardown_dequeue", CPDMA_TX_STAT(teardown_dequeue) }, 483 { "Tx DMA chan: teardown_dequeue", CPDMA_TX_STAT(teardown_dequeue) },
482 }; 484 };
483 485
484 #define CPSW_STATS_LEN ARRAY_SIZE(cpsw_gstrings_stats) 486 #define CPSW_STATS_LEN ARRAY_SIZE(cpsw_gstrings_stats)
485 487
486 #define napi_to_priv(napi) container_of(napi, struct cpsw_priv, napi) 488 #define napi_to_priv(napi) container_of(napi, struct cpsw_priv, napi)
489 #define napi_tx_to_priv(napi) container_of(napi, struct cpsw_priv, napi_tx)
487 #define for_each_slave(priv, func, arg...) \ 490 #define for_each_slave(priv, func, arg...) \
488 do { \ 491 do { \
489 struct cpsw_slave *slave; \ 492 struct cpsw_slave *slave; \
490 int n; \ 493 int n; \
491 if (priv->data.dual_emac) \ 494 if (priv->data.dual_emac) \
492 (func)((priv)->slaves + priv->emac_port, ##arg);\ 495 (func)((priv)->slaves + priv->emac_port, ##arg);\
493 else \ 496 else \
494 for (n = (priv)->data.slaves, \ 497 for (n = (priv)->data.slaves, \
495 slave = (priv)->slaves; \ 498 slave = (priv)->slaves; \
496 n; n--) \ 499 n; n--) \
497 (func)(slave++, ##arg); \ 500 (func)(slave++, ##arg); \
498 } while (0) 501 } while (0)
499 #define cpsw_get_slave_ndev(priv, __slave_no__) \ 502 #define cpsw_get_slave_ndev(priv, __slave_no__) \
500 (priv->slaves[__slave_no__].ndev) 503 (priv->slaves[__slave_no__].ndev)
501 #define cpsw_get_slave_priv(priv, __slave_no__) \ 504 #define cpsw_get_slave_priv(priv, __slave_no__) \
502 ((priv->slaves[__slave_no__].ndev) ? \ 505 ((priv->slaves[__slave_no__].ndev) ? \
503 netdev_priv(priv->slaves[__slave_no__].ndev) : NULL) \ 506 netdev_priv(priv->slaves[__slave_no__].ndev) : NULL) \
504 507
505 #define cpsw_dual_emac_src_port_detect(status, priv, ndev, skb) \ 508 #define cpsw_dual_emac_src_port_detect(status, priv, ndev, skb) \
506 do { \ 509 do { \
507 if (!priv->data.dual_emac) \ 510 if (!priv->data.dual_emac) \
508 break; \ 511 break; \
509 if (CPDMA_RX_SOURCE_PORT(status) == 1) { \ 512 if (CPDMA_RX_SOURCE_PORT(status) == 1) { \
510 ndev = cpsw_get_slave_ndev(priv, 0); \ 513 ndev = cpsw_get_slave_ndev(priv, 0); \
511 priv = netdev_priv(ndev); \ 514 priv = netdev_priv(ndev); \
512 skb->dev = ndev; \ 515 skb->dev = ndev; \
513 } else if (CPDMA_RX_SOURCE_PORT(status) == 2) { \ 516 } else if (CPDMA_RX_SOURCE_PORT(status) == 2) { \
514 ndev = cpsw_get_slave_ndev(priv, 1); \ 517 ndev = cpsw_get_slave_ndev(priv, 1); \
515 priv = netdev_priv(ndev); \ 518 priv = netdev_priv(ndev); \
516 skb->dev = ndev; \ 519 skb->dev = ndev; \
517 } \ 520 } \
518 } while (0) 521 } while (0)
519 #define cpsw_add_mcast(priv, addr) \ 522 #define cpsw_add_mcast(priv, addr) \
520 do { \ 523 do { \
521 if (priv->data.dual_emac) { \ 524 if (priv->data.dual_emac) { \
522 struct cpsw_slave *slave = priv->slaves + \ 525 struct cpsw_slave *slave = priv->slaves + \
523 priv->emac_port; \ 526 priv->emac_port; \
524 int slave_port = cpsw_get_slave_port(priv, \ 527 int slave_port = cpsw_get_slave_port(priv, \
525 slave->slave_num); \ 528 slave->slave_num); \
526 cpsw_ale_add_mcast(priv->ale, addr, \ 529 cpsw_ale_add_mcast(priv->ale, addr, \
527 1 << slave_port | 1 << priv->host_port, \ 530 1 << slave_port | 1 << priv->host_port, \
528 ALE_VLAN, slave->port_vlan, 0); \ 531 ALE_VLAN, slave->port_vlan, 0); \
529 } else { \ 532 } else { \
530 cpsw_ale_add_mcast(priv->ale, addr, \ 533 cpsw_ale_add_mcast(priv->ale, addr, \
531 ALE_ALL_PORTS << priv->host_port, \ 534 ALE_ALL_PORTS << priv->host_port, \
532 0, 0, 0); \ 535 0, 0, 0); \
533 } \ 536 } \
534 } while (0) 537 } while (0)
535 538
536 static inline int cpsw_get_slave_port(struct cpsw_priv *priv, u32 slave_num) 539 static inline int cpsw_get_slave_port(struct cpsw_priv *priv, u32 slave_num)
537 { 540 {
538 if (priv->host_port == 0) 541 if (priv->host_port == 0)
539 return slave_num + 1; 542 return slave_num + 1;
540 else 543 else
541 return slave_num; 544 return slave_num;
542 } 545 }
543 546
544 static void cpsw_set_promiscious(struct net_device *ndev, bool enable) 547 static void cpsw_set_promiscious(struct net_device *ndev, bool enable)
545 { 548 {
546 struct cpsw_priv *priv = netdev_priv(ndev); 549 struct cpsw_priv *priv = netdev_priv(ndev);
547 struct cpsw_ale *ale = priv->ale; 550 struct cpsw_ale *ale = priv->ale;
548 551
549 if (priv->data.dual_emac) { 552 if (priv->data.dual_emac) {
550 /* Enabling promiscuous mode for one interface will be 553 /* Enabling promiscuous mode for one interface will be
551 * common for both the interface as the interface shares 554 * common for both the interface as the interface shares
552 * the same hardware resource. 555 * the same hardware resource.
553 */ 556 */
554 if (!enable && ((priv->slaves[0].ndev->flags & IFF_PROMISC) || 557 if (!enable && ((priv->slaves[0].ndev->flags & IFF_PROMISC) ||
555 (priv->slaves[1].ndev->flags & IFF_PROMISC))) { 558 (priv->slaves[1].ndev->flags & IFF_PROMISC))) {
556 enable = true; 559 enable = true;
557 dev_err(&ndev->dev, "promiscuity not disabled as the other interface is still in promiscuity mode\n"); 560 dev_err(&ndev->dev, "promiscuity not disabled as the other interface is still in promiscuity mode\n");
558 } 561 }
559 562
560 if (enable) { 563 if (enable) {
561 /* Enable Bypass */ 564 /* Enable Bypass */
562 cpsw_ale_control_set(ale, 0, ALE_BYPASS, 1); 565 cpsw_ale_control_set(ale, 0, ALE_BYPASS, 1);
563 566
564 dev_dbg(&ndev->dev, "promiscuity enabled\n"); 567 dev_dbg(&ndev->dev, "promiscuity enabled\n");
565 } else { 568 } else {
566 /* Disable Bypass */ 569 /* Disable Bypass */
567 cpsw_ale_control_set(ale, 0, ALE_BYPASS, 0); 570 cpsw_ale_control_set(ale, 0, ALE_BYPASS, 0);
568 dev_dbg(&ndev->dev, "promiscuity disabled\n"); 571 dev_dbg(&ndev->dev, "promiscuity disabled\n");
569 } 572 }
570 } else { 573 } else {
571 int i; 574 int i;
572 575
573 if (enable) { 576 if (enable) {
574 unsigned long timeout = jiffies + HZ; 577 unsigned long timeout = jiffies + HZ;
575 578
576 /* Disable Learn for all ports */ 579 /* Disable Learn for all ports */
577 for (i = 0; i <= priv->data.slaves; i++) { 580 for (i = 0; i <= priv->data.slaves; i++) {
578 cpsw_ale_control_set(ale, i, 581 cpsw_ale_control_set(ale, i,
579 ALE_PORT_NOLEARN, 1); 582 ALE_PORT_NOLEARN, 1);
580 cpsw_ale_control_set(ale, i, 583 cpsw_ale_control_set(ale, i,
581 ALE_PORT_NO_SA_UPDATE, 1); 584 ALE_PORT_NO_SA_UPDATE, 1);
582 } 585 }
583 586
584 /* Clear All Untouched entries */ 587 /* Clear All Untouched entries */
585 cpsw_ale_control_set(ale, 0, ALE_AGEOUT, 1); 588 cpsw_ale_control_set(ale, 0, ALE_AGEOUT, 1);
586 do { 589 do {
587 cpu_relax(); 590 cpu_relax();
588 if (cpsw_ale_control_get(ale, 0, ALE_AGEOUT)) 591 if (cpsw_ale_control_get(ale, 0, ALE_AGEOUT))
589 break; 592 break;
590 } while (time_after(timeout, jiffies)); 593 } while (time_after(timeout, jiffies));
591 cpsw_ale_control_set(ale, 0, ALE_AGEOUT, 1); 594 cpsw_ale_control_set(ale, 0, ALE_AGEOUT, 1);
592 595
593 /* Clear all mcast from ALE */ 596 /* Clear all mcast from ALE */
594 cpsw_ale_flush_multicast(ale, 597 cpsw_ale_flush_multicast(ale,
595 ALE_ALL_PORTS << priv->host_port); 598 ALE_ALL_PORTS << priv->host_port);
596 599
597 /* Flood All Unicast Packets to Host port */ 600 /* Flood All Unicast Packets to Host port */
598 cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 1); 601 cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 1);
599 dev_dbg(&ndev->dev, "promiscuity enabled\n"); 602 dev_dbg(&ndev->dev, "promiscuity enabled\n");
600 } else { 603 } else {
601 /* Flood All Unicast Packets to Host port */ 604 /* Flood All Unicast Packets to Host port */
602 cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 0); 605 cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 0);
603 606
604 /* Enable Learn for all ports */ 607 /* Enable Learn for all ports */
605 for (i = 0; i <= priv->data.slaves; i++) { 608 for (i = 0; i <= priv->data.slaves; i++) {
606 cpsw_ale_control_set(ale, i, 609 cpsw_ale_control_set(ale, i,
607 ALE_PORT_NOLEARN, 0); 610 ALE_PORT_NOLEARN, 0);
608 cpsw_ale_control_set(ale, i, 611 cpsw_ale_control_set(ale, i,
609 ALE_PORT_NO_SA_UPDATE, 0); 612 ALE_PORT_NO_SA_UPDATE, 0);
610 } 613 }
611 dev_dbg(&ndev->dev, "promiscuity disabled\n"); 614 dev_dbg(&ndev->dev, "promiscuity disabled\n");
612 } 615 }
613 } 616 }
614 } 617 }
615 618
616 static void cpsw_ndo_set_rx_mode(struct net_device *ndev) 619 static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
617 { 620 {
618 struct cpsw_priv *priv = netdev_priv(ndev); 621 struct cpsw_priv *priv = netdev_priv(ndev);
619 622
620 if (ndev->flags & IFF_PROMISC) { 623 if (ndev->flags & IFF_PROMISC) {
621 /* Enable promiscuous mode */ 624 /* Enable promiscuous mode */
622 cpsw_set_promiscious(ndev, true); 625 cpsw_set_promiscious(ndev, true);
623 return; 626 return;
624 } else { 627 } else {
625 /* Disable promiscuous mode */ 628 /* Disable promiscuous mode */
626 cpsw_set_promiscious(ndev, false); 629 cpsw_set_promiscious(ndev, false);
627 } 630 }
628 631
629 /* Clear all mcast from ALE */ 632 /* Clear all mcast from ALE */
630 cpsw_ale_flush_multicast(priv->ale, ALE_ALL_PORTS << priv->host_port); 633 cpsw_ale_flush_multicast(priv->ale, ALE_ALL_PORTS << priv->host_port);
631 634
632 if (!netdev_mc_empty(ndev)) { 635 if (!netdev_mc_empty(ndev)) {
633 struct netdev_hw_addr *ha; 636 struct netdev_hw_addr *ha;
634 637
635 /* program multicast address list into ALE register */ 638 /* program multicast address list into ALE register */
636 netdev_for_each_mc_addr(ha, ndev) { 639 netdev_for_each_mc_addr(ha, ndev) {
637 cpsw_add_mcast(priv, (u8 *)ha->addr); 640 cpsw_add_mcast(priv, (u8 *)ha->addr);
638 } 641 }
639 } 642 }
640 } 643 }
641 644
642 static void cpsw_intr_enable(struct cpsw_priv *priv) 645 static void cpsw_intr_enable(struct cpsw_priv *priv)
643 { 646 {
644 __raw_writel(0xFF, &priv->wr_regs->tx_en); 647 __raw_writel(0xFF, &priv->wr_regs->tx_en);
645 __raw_writel(0xFF, &priv->wr_regs->rx_en); 648 __raw_writel(0xFF, &priv->wr_regs->rx_en);
646 649
647 cpdma_ctlr_int_ctrl(priv->dma, true); 650 cpdma_ctlr_int_ctrl(priv->dma, true);
648 return; 651 return;
649 } 652 }
650 653
651 static void cpsw_intr_disable(struct cpsw_priv *priv) 654 static void cpsw_intr_disable(struct cpsw_priv *priv)
652 { 655 {
653 __raw_writel(0, &priv->wr_regs->tx_en); 656 __raw_writel(0, &priv->wr_regs->tx_en);
654 __raw_writel(0, &priv->wr_regs->rx_en); 657 __raw_writel(0, &priv->wr_regs->rx_en);
655 658
656 cpdma_ctlr_int_ctrl(priv->dma, false); 659 cpdma_ctlr_int_ctrl(priv->dma, false);
657 return; 660 return;
658 } 661 }
659 662
660 void cpsw_tx_handler(void *token, int len, int status) 663 void cpsw_tx_handler(void *token, int len, int status)
661 { 664 {
662 struct sk_buff *skb = token; 665 struct sk_buff *skb = token;
663 struct net_device *ndev = skb->dev; 666 struct net_device *ndev = skb->dev;
664 struct cpsw_priv *priv = netdev_priv(ndev); 667 struct cpsw_priv *priv = netdev_priv(ndev);
665 668
666 /* Check whether the queue is stopped due to stalled tx dma, if the 669 /* Check whether the queue is stopped due to stalled tx dma, if the
667 * queue is stopped then start the queue as we have free desc for tx 670 * queue is stopped then start the queue as we have free desc for tx
668 */ 671 */
669 if (unlikely(netif_queue_stopped(ndev))) 672 if (unlikely(netif_queue_stopped(ndev)))
670 netif_wake_queue(ndev); 673 netif_wake_queue(ndev);
671 cpts_tx_timestamp(priv->cpts, skb); 674 cpts_tx_timestamp(priv->cpts, skb);
672 priv->stats.tx_packets++; 675 priv->stats.tx_packets++;
673 priv->stats.tx_bytes += len; 676 priv->stats.tx_bytes += len;
674 dev_kfree_skb_any(skb); 677 dev_kfree_skb_any(skb);
675 } 678 }
676 679
677 void cpsw_rx_handler(void *token, int len, int status) 680 void cpsw_rx_handler(void *token, int len, int status)
678 { 681 {
679 struct sk_buff *skb = token; 682 struct sk_buff *skb = token;
680 struct sk_buff *new_skb; 683 struct sk_buff *new_skb;
681 struct net_device *ndev = skb->dev; 684 struct net_device *ndev = skb->dev;
682 struct cpsw_priv *priv = netdev_priv(ndev); 685 struct cpsw_priv *priv = netdev_priv(ndev);
683 int ret = 0; 686 int ret = 0;
684 687
685 cpsw_dual_emac_src_port_detect(status, priv, ndev, skb); 688 cpsw_dual_emac_src_port_detect(status, priv, ndev, skb);
686 689
687 if (unlikely(status < 0) || unlikely(!netif_running(ndev))) { 690 if (unlikely(status < 0) || unlikely(!netif_running(ndev))) {
688 /* the interface is going down, skbs are purged */ 691 /* the interface is going down, skbs are purged */
689 dev_kfree_skb_any(skb); 692 dev_kfree_skb_any(skb);
690 return; 693 return;
691 } 694 }
692 695
693 new_skb = netdev_alloc_skb_ip_align(ndev, priv->rx_packet_max); 696 new_skb = netdev_alloc_skb_ip_align(ndev, priv->rx_packet_max);
694 if (new_skb) { 697 if (new_skb) {
695 skb_put(skb, len); 698 skb_put(skb, len);
696 cpts_rx_timestamp(priv->cpts, skb); 699 cpts_rx_timestamp(priv->cpts, skb);
697 skb->protocol = eth_type_trans(skb, ndev); 700 skb->protocol = eth_type_trans(skb, ndev);
698 netif_receive_skb(skb); 701 netif_receive_skb(skb);
699 priv->stats.rx_bytes += len; 702 priv->stats.rx_bytes += len;
700 priv->stats.rx_packets++; 703 priv->stats.rx_packets++;
701 } else { 704 } else {
702 priv->stats.rx_dropped++; 705 priv->stats.rx_dropped++;
703 new_skb = skb; 706 new_skb = skb;
704 } 707 }
705 708
706 ret = cpdma_chan_submit(priv->rxch, new_skb, new_skb->data, 709 ret = cpdma_chan_submit(priv->rxch, new_skb, new_skb->data,
707 skb_tailroom(new_skb), 0); 710 skb_tailroom(new_skb), 0);
708 if (WARN_ON(ret < 0)) 711 if (WARN_ON(ret < 0))
709 dev_kfree_skb_any(new_skb); 712 dev_kfree_skb_any(new_skb);
710 } 713 }
711 714
712 static irqreturn_t cpsw_interrupt(int irq, void *dev_id) 715 static irqreturn_t cpsw_interrupt(int irq, void *dev_id)
713 { 716 {
714 struct cpsw_priv *priv = dev_id; 717 struct cpsw_priv *priv = dev_id;
715 718
716 cpsw_intr_disable(priv); 719 __raw_writel(0, &priv->wr_regs->rx_en);
717 if (priv->irq_enabled == true) { 720 if (priv->irq_enabled) {
718 cpsw_disable_irq(priv); 721 disable_irq_nosync(priv->irqs_table[0]);
722 disable_irq_nosync(priv->irqs_table[1]);
723 disable_irq_nosync(priv->irqs_table[3]);
719 priv->irq_enabled = false; 724 priv->irq_enabled = false;
720 } 725 }
721 726
722 if (netif_running(priv->ndev)) { 727 if (netif_running(priv->ndev)) {
723 napi_schedule(&priv->napi); 728 napi_schedule(&priv->napi);
724 return IRQ_HANDLED; 729 return IRQ_HANDLED;
725 } 730 }
726 731
727 priv = cpsw_get_slave_priv(priv, 1); 732 priv = cpsw_get_slave_priv(priv, 1);
728 if (!priv) 733 if (!priv)
729 return IRQ_NONE; 734 return IRQ_NONE;
730 735
731 if (netif_running(priv->ndev)) { 736 if (netif_running(priv->ndev)) {
732 napi_schedule(&priv->napi); 737 napi_schedule(&priv->napi);
733 return IRQ_HANDLED; 738 return IRQ_HANDLED;
734 } 739 }
735 return IRQ_NONE; 740 return IRQ_NONE;
736 } 741 }
737 742
743 static irqreturn_t cpsw_tx_interrupt(int irq, void *dev_id)
744 {
745 struct cpsw_priv *priv = dev_id;
746
747 __raw_writel(0, &priv->wr_regs->tx_en);
748 if (priv->irq_tx_enabled) {
749 disable_irq_nosync(priv->irqs_table[2]);
750 priv->irq_tx_enabled = false;
751 }
752
753 if (netif_running(priv->ndev)) {
754 napi_schedule(&priv->napi_tx);
755 return IRQ_HANDLED;
756 }
757
758 priv = cpsw_get_slave_priv(priv, 1);
759 if (!priv)
760 return IRQ_NONE;
761
762 if (netif_running(priv->ndev)) {
763 napi_schedule(&priv->napi_tx);
764 return IRQ_HANDLED;
765 }
766 return IRQ_NONE;
767 }
768
738 static int cpsw_poll(struct napi_struct *napi, int budget) 769 static int cpsw_poll(struct napi_struct *napi, int budget)
739 { 770 {
740 struct cpsw_priv *priv = napi_to_priv(napi); 771 struct cpsw_priv *priv = napi_to_priv(napi);
741 int num_tx, num_rx; 772 int num_rx;
742 773
743 num_tx = cpdma_chan_process(priv->txch, 128);
744 if (num_tx)
745 cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX);
746
747 num_rx = cpdma_chan_process(priv->rxch, budget); 774 num_rx = cpdma_chan_process(priv->rxch, budget);
748 if (num_rx < budget) { 775 if (num_rx < budget) {
749 struct cpsw_priv *prim_cpsw; 776 struct cpsw_priv *prim_cpsw;
750 777
751 napi_complete(napi); 778 napi_complete(napi);
752 cpsw_intr_enable(priv); 779 __raw_writel(0xFF, &priv->wr_regs->rx_en);
753 cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX); 780 cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX);
754 prim_cpsw = cpsw_get_slave_priv(priv, 0); 781 prim_cpsw = cpsw_get_slave_priv(priv, 0);
755 if (prim_cpsw->irq_enabled == false) { 782 if (!prim_cpsw->irq_enabled) {
756 prim_cpsw->irq_enabled = true; 783 prim_cpsw->irq_enabled = true;
757 cpsw_enable_irq(priv); 784 enable_irq(priv->irqs_table[0]);
785 enable_irq(priv->irqs_table[1]);
786 enable_irq(priv->irqs_table[3]);
758 } 787 }
759 } 788 }
760 789
761 if (num_rx || num_tx) 790 if (num_rx)
762 cpsw_dbg(priv, intr, "poll %d rx, %d tx pkts\n", 791 cpsw_dbg(priv, intr, "poll %d rx pkts\n", num_rx);
763 num_rx, num_tx);
764 792
765 return num_rx; 793 return num_rx;
766 } 794 }
767 795
796 static int cpsw_tx_poll(struct napi_struct *napi, int budget)
797 {
798 struct cpsw_priv *priv = napi_tx_to_priv(napi);
799 int num_tx;
800
801 num_tx = cpdma_chan_process(priv->txch, budget);
802 if (num_tx < budget) {
803 struct cpsw_priv *prim_cpsw;
804
805 napi_complete(napi);
806 __raw_writel(0xFF, &priv->wr_regs->tx_en);
807 cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX);
808 prim_cpsw = cpsw_get_slave_priv(priv, 0);
809 if (!prim_cpsw->irq_tx_enabled) {
810 prim_cpsw->irq_tx_enabled = true;
811 enable_irq(priv->irqs_table[2]);
812 }
813 }
814
815 if (num_tx)
816 cpsw_dbg(priv, intr, "poll %d tx pkts\n", num_tx);
817
818 return num_tx;
819 }
820
768 static inline void soft_reset(const char *module, void __iomem *reg) 821 static inline void soft_reset(const char *module, void __iomem *reg)
769 { 822 {
770 unsigned long timeout = jiffies + HZ; 823 unsigned long timeout = jiffies + HZ;
771 824
772 __raw_writel(1, reg); 825 __raw_writel(1, reg);
773 do { 826 do {
774 cpu_relax(); 827 cpu_relax();
775 } while ((__raw_readl(reg) & 1) && time_after(timeout, jiffies)); 828 } while ((__raw_readl(reg) & 1) && time_after(timeout, jiffies));
776 829
777 WARN(__raw_readl(reg) & 1, "failed to soft-reset %s\n", module); 830 WARN(__raw_readl(reg) & 1, "failed to soft-reset %s\n", module);
778 } 831 }
779 832
780 #define mac_hi(mac) (((mac)[0] << 0) | ((mac)[1] << 8) | \ 833 #define mac_hi(mac) (((mac)[0] << 0) | ((mac)[1] << 8) | \
781 ((mac)[2] << 16) | ((mac)[3] << 24)) 834 ((mac)[2] << 16) | ((mac)[3] << 24))
782 #define mac_lo(mac) (((mac)[4] << 0) | ((mac)[5] << 8)) 835 #define mac_lo(mac) (((mac)[4] << 0) | ((mac)[5] << 8))
783 836
784 static void cpsw_set_slave_mac(struct cpsw_slave *slave, 837 static void cpsw_set_slave_mac(struct cpsw_slave *slave,
785 struct cpsw_priv *priv) 838 struct cpsw_priv *priv)
786 { 839 {
787 slave_write(slave, mac_hi(priv->mac_addr), SA_HI); 840 slave_write(slave, mac_hi(priv->mac_addr), SA_HI);
788 slave_write(slave, mac_lo(priv->mac_addr), SA_LO); 841 slave_write(slave, mac_lo(priv->mac_addr), SA_LO);
789 } 842 }
790 843
791 static void _cpsw_adjust_link(struct cpsw_slave *slave, 844 static void _cpsw_adjust_link(struct cpsw_slave *slave,
792 struct cpsw_priv *priv, bool *link) 845 struct cpsw_priv *priv, bool *link)
793 { 846 {
794 struct phy_device *phy = slave->phy; 847 struct phy_device *phy = slave->phy;
795 u32 mac_control = 0; 848 u32 mac_control = 0;
796 u32 slave_port; 849 u32 slave_port;
797 850
798 if (!phy) 851 if (!phy)
799 return; 852 return;
800 853
801 slave_port = cpsw_get_slave_port(priv, slave->slave_num); 854 slave_port = cpsw_get_slave_port(priv, slave->slave_num);
802 855
803 if (phy->link) { 856 if (phy->link) {
804 mac_control = priv->data.mac_control; 857 mac_control = priv->data.mac_control;
805 858
806 /* enable forwarding */ 859 /* enable forwarding */
807 cpsw_ale_control_set(priv->ale, slave_port, 860 cpsw_ale_control_set(priv->ale, slave_port,
808 ALE_PORT_STATE, ALE_PORT_STATE_FORWARD); 861 ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
809 862
810 if (phy->speed == 1000) 863 if (phy->speed == 1000)
811 mac_control |= BIT(7); /* GIGABITEN */ 864 mac_control |= BIT(7); /* GIGABITEN */
812 if (phy->duplex) 865 if (phy->duplex)
813 mac_control |= BIT(0); /* FULLDUPLEXEN */ 866 mac_control |= BIT(0); /* FULLDUPLEXEN */
814 867
815 /* set speed_in input in case RMII mode is used in 100Mbps */ 868 /* set speed_in input in case RMII mode is used in 100Mbps */
816 if (phy->speed == 100) 869 if (phy->speed == 100)
817 mac_control |= BIT(15); 870 mac_control |= BIT(15);
818 else if (phy->speed == 10) 871 else if (phy->speed == 10)
819 mac_control |= BIT(18); /* In Band mode */ 872 mac_control |= BIT(18); /* In Band mode */
820 873
821 *link = true; 874 *link = true;
822 } else { 875 } else {
823 mac_control = 0; 876 mac_control = 0;
824 /* disable forwarding */ 877 /* disable forwarding */
825 cpsw_ale_control_set(priv->ale, slave_port, 878 cpsw_ale_control_set(priv->ale, slave_port,
826 ALE_PORT_STATE, ALE_PORT_STATE_DISABLE); 879 ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
827 } 880 }
828 881
829 if (mac_control != slave->mac_control) { 882 if (mac_control != slave->mac_control) {
830 phy_print_status(phy); 883 phy_print_status(phy);
831 __raw_writel(mac_control, &slave->sliver->mac_control); 884 __raw_writel(mac_control, &slave->sliver->mac_control);
832 } 885 }
833 886
834 slave->mac_control = mac_control; 887 slave->mac_control = mac_control;
835 } 888 }
836 889
837 static void cpsw_adjust_link(struct net_device *ndev) 890 static void cpsw_adjust_link(struct net_device *ndev)
838 { 891 {
839 struct cpsw_priv *priv = netdev_priv(ndev); 892 struct cpsw_priv *priv = netdev_priv(ndev);
840 bool link = false; 893 bool link = false;
841 894
842 for_each_slave(priv, _cpsw_adjust_link, priv, &link); 895 for_each_slave(priv, _cpsw_adjust_link, priv, &link);
843 896
844 if (link) { 897 if (link) {
845 netif_carrier_on(ndev); 898 netif_carrier_on(ndev);
846 if (netif_running(ndev)) 899 if (netif_running(ndev))
847 netif_wake_queue(ndev); 900 netif_wake_queue(ndev);
848 } else { 901 } else {
849 netif_carrier_off(ndev); 902 netif_carrier_off(ndev);
850 netif_stop_queue(ndev); 903 netif_stop_queue(ndev);
851 } 904 }
852 } 905 }
853 906
854 static int cpsw_get_coalesce(struct net_device *ndev, 907 static int cpsw_get_coalesce(struct net_device *ndev,
855 struct ethtool_coalesce *coal) 908 struct ethtool_coalesce *coal)
856 { 909 {
857 struct cpsw_priv *priv = netdev_priv(ndev); 910 struct cpsw_priv *priv = netdev_priv(ndev);
858 911
859 coal->rx_coalesce_usecs = priv->coal_intvl; 912 coal->rx_coalesce_usecs = priv->coal_intvl;
860 return 0; 913 return 0;
861 } 914 }
862 915
863 static int cpsw_set_coalesce(struct net_device *ndev, 916 static int cpsw_set_coalesce(struct net_device *ndev,
864 struct ethtool_coalesce *coal) 917 struct ethtool_coalesce *coal)
865 { 918 {
866 struct cpsw_priv *priv = netdev_priv(ndev); 919 struct cpsw_priv *priv = netdev_priv(ndev);
867 u32 int_ctrl; 920 u32 int_ctrl;
868 u32 num_interrupts = 0; 921 u32 num_interrupts = 0;
869 u32 prescale = 0; 922 u32 prescale = 0;
870 u32 addnl_dvdr = 1; 923 u32 addnl_dvdr = 1;
871 u32 coal_intvl = 0; 924 u32 coal_intvl = 0;
872 925
873 if (!coal->rx_coalesce_usecs) 926 if (!coal->rx_coalesce_usecs)
874 return -EINVAL; 927 return -EINVAL;
875 928
876 coal_intvl = coal->rx_coalesce_usecs; 929 coal_intvl = coal->rx_coalesce_usecs;
877 930
878 int_ctrl = readl(&priv->wr_regs->int_control); 931 int_ctrl = readl(&priv->wr_regs->int_control);
879 prescale = priv->bus_freq_mhz * 4; 932 prescale = priv->bus_freq_mhz * 4;
880 933
881 if (coal_intvl < CPSW_CMINTMIN_INTVL) 934 if (coal_intvl < CPSW_CMINTMIN_INTVL)
882 coal_intvl = CPSW_CMINTMIN_INTVL; 935 coal_intvl = CPSW_CMINTMIN_INTVL;
883 936
884 if (coal_intvl > CPSW_CMINTMAX_INTVL) { 937 if (coal_intvl > CPSW_CMINTMAX_INTVL) {
885 /* Interrupt pacer works with 4us Pulse, we can 938 /* Interrupt pacer works with 4us Pulse, we can
886 * throttle further by dilating the 4us pulse. 939 * throttle further by dilating the 4us pulse.
887 */ 940 */
888 addnl_dvdr = CPSW_INTPRESCALE_MASK / prescale; 941 addnl_dvdr = CPSW_INTPRESCALE_MASK / prescale;
889 942
890 if (addnl_dvdr > 1) { 943 if (addnl_dvdr > 1) {
891 prescale *= addnl_dvdr; 944 prescale *= addnl_dvdr;
892 if (coal_intvl > (CPSW_CMINTMAX_INTVL * addnl_dvdr)) 945 if (coal_intvl > (CPSW_CMINTMAX_INTVL * addnl_dvdr))
893 coal_intvl = (CPSW_CMINTMAX_INTVL 946 coal_intvl = (CPSW_CMINTMAX_INTVL
894 * addnl_dvdr); 947 * addnl_dvdr);
895 } else { 948 } else {
896 addnl_dvdr = 1; 949 addnl_dvdr = 1;
897 coal_intvl = CPSW_CMINTMAX_INTVL; 950 coal_intvl = CPSW_CMINTMAX_INTVL;
898 } 951 }
899 } 952 }
900 953
901 num_interrupts = (1000 * addnl_dvdr) / coal_intvl; 954 num_interrupts = (1000 * addnl_dvdr) / coal_intvl;
902 writel(num_interrupts, &priv->wr_regs->rx_imax); 955 writel(num_interrupts, &priv->wr_regs->rx_imax);
903 writel(num_interrupts, &priv->wr_regs->tx_imax); 956 writel(num_interrupts, &priv->wr_regs->tx_imax);
904 957
905 int_ctrl |= CPSW_INTPACEEN; 958 int_ctrl |= CPSW_INTPACEEN;
906 int_ctrl &= (~CPSW_INTPRESCALE_MASK); 959 int_ctrl &= (~CPSW_INTPRESCALE_MASK);
907 int_ctrl |= (prescale & CPSW_INTPRESCALE_MASK); 960 int_ctrl |= (prescale & CPSW_INTPRESCALE_MASK);
908 writel(int_ctrl, &priv->wr_regs->int_control); 961 writel(int_ctrl, &priv->wr_regs->int_control);
909 962
910 cpsw_notice(priv, timer, "Set coalesce to %d usecs.\n", coal_intvl); 963 cpsw_notice(priv, timer, "Set coalesce to %d usecs.\n", coal_intvl);
911 if (priv->data.dual_emac) { 964 if (priv->data.dual_emac) {
912 int i; 965 int i;
913 966
914 for (i = 0; i < priv->data.slaves; i++) { 967 for (i = 0; i < priv->data.slaves; i++) {
915 priv = netdev_priv(priv->slaves[i].ndev); 968 priv = netdev_priv(priv->slaves[i].ndev);
916 priv->coal_intvl = coal_intvl; 969 priv->coal_intvl = coal_intvl;
917 } 970 }
918 } else { 971 } else {
919 priv->coal_intvl = coal_intvl; 972 priv->coal_intvl = coal_intvl;
920 } 973 }
921 974
922 return 0; 975 return 0;
923 } 976 }
924 977
925 static int cpsw_get_sset_count(struct net_device *ndev, int sset) 978 static int cpsw_get_sset_count(struct net_device *ndev, int sset)
926 { 979 {
927 switch (sset) { 980 switch (sset) {
928 case ETH_SS_STATS: 981 case ETH_SS_STATS:
929 return CPSW_STATS_LEN; 982 return CPSW_STATS_LEN;
930 default: 983 default:
931 return -EOPNOTSUPP; 984 return -EOPNOTSUPP;
932 } 985 }
933 } 986 }
934 987
935 static void cpsw_get_strings(struct net_device *ndev, u32 stringset, u8 *data) 988 static void cpsw_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
936 { 989 {
937 u8 *p = data; 990 u8 *p = data;
938 int i; 991 int i;
939 992
940 switch (stringset) { 993 switch (stringset) {
941 case ETH_SS_STATS: 994 case ETH_SS_STATS:
942 for (i = 0; i < CPSW_STATS_LEN; i++) { 995 for (i = 0; i < CPSW_STATS_LEN; i++) {
943 memcpy(p, cpsw_gstrings_stats[i].stat_string, 996 memcpy(p, cpsw_gstrings_stats[i].stat_string,
944 ETH_GSTRING_LEN); 997 ETH_GSTRING_LEN);
945 p += ETH_GSTRING_LEN; 998 p += ETH_GSTRING_LEN;
946 } 999 }
947 break; 1000 break;
948 } 1001 }
949 } 1002 }
950 1003
951 static void cpsw_get_ethtool_stats(struct net_device *ndev, 1004 static void cpsw_get_ethtool_stats(struct net_device *ndev,
952 struct ethtool_stats *stats, u64 *data) 1005 struct ethtool_stats *stats, u64 *data)
953 { 1006 {
954 struct cpsw_priv *priv = netdev_priv(ndev); 1007 struct cpsw_priv *priv = netdev_priv(ndev);
955 struct cpdma_chan_stats rx_stats; 1008 struct cpdma_chan_stats rx_stats;
956 struct cpdma_chan_stats tx_stats; 1009 struct cpdma_chan_stats tx_stats;
957 u32 val; 1010 u32 val;
958 u8 *p; 1011 u8 *p;
959 int i; 1012 int i;
960 1013
961 /* Collect Davinci CPDMA stats for Rx and Tx Channel */ 1014 /* Collect Davinci CPDMA stats for Rx and Tx Channel */
962 cpdma_chan_get_stats(priv->rxch, &rx_stats); 1015 cpdma_chan_get_stats(priv->rxch, &rx_stats);
963 cpdma_chan_get_stats(priv->txch, &tx_stats); 1016 cpdma_chan_get_stats(priv->txch, &tx_stats);
964 1017
965 for (i = 0; i < CPSW_STATS_LEN; i++) { 1018 for (i = 0; i < CPSW_STATS_LEN; i++) {
966 switch (cpsw_gstrings_stats[i].type) { 1019 switch (cpsw_gstrings_stats[i].type) {
967 case CPSW_STATS: 1020 case CPSW_STATS:
968 val = readl(priv->hw_stats + 1021 val = readl(priv->hw_stats +
969 cpsw_gstrings_stats[i].stat_offset); 1022 cpsw_gstrings_stats[i].stat_offset);
970 data[i] = val; 1023 data[i] = val;
971 break; 1024 break;
972 1025
973 case CPDMA_RX_STATS: 1026 case CPDMA_RX_STATS:
974 p = (u8 *)&rx_stats + 1027 p = (u8 *)&rx_stats +
975 cpsw_gstrings_stats[i].stat_offset; 1028 cpsw_gstrings_stats[i].stat_offset;
976 data[i] = *(u32 *)p; 1029 data[i] = *(u32 *)p;
977 break; 1030 break;
978 1031
979 case CPDMA_TX_STATS: 1032 case CPDMA_TX_STATS:
980 p = (u8 *)&tx_stats + 1033 p = (u8 *)&tx_stats +
981 cpsw_gstrings_stats[i].stat_offset; 1034 cpsw_gstrings_stats[i].stat_offset;
982 data[i] = *(u32 *)p; 1035 data[i] = *(u32 *)p;
983 break; 1036 break;
984 } 1037 }
985 } 1038 }
986 } 1039 }
987 1040
988 static inline int __show_stat(char *buf, int maxlen, const char *name, u32 val) 1041 static inline int __show_stat(char *buf, int maxlen, const char *name, u32 val)
989 { 1042 {
990 static char *leader = "........................................"; 1043 static char *leader = "........................................";
991 1044
992 if (!val) 1045 if (!val)
993 return 0; 1046 return 0;
994 else 1047 else
995 return snprintf(buf, maxlen, "%s %s %10d\n", name, 1048 return snprintf(buf, maxlen, "%s %s %10d\n", name,
996 leader + strlen(name), val); 1049 leader + strlen(name), val);
997 } 1050 }
998 1051
999 static int cpsw_common_res_usage_state(struct cpsw_priv *priv) 1052 static int cpsw_common_res_usage_state(struct cpsw_priv *priv)
1000 { 1053 {
1001 u32 i; 1054 u32 i;
1002 u32 usage_count = 0; 1055 u32 usage_count = 0;
1003 1056
1004 if (!priv->data.dual_emac) 1057 if (!priv->data.dual_emac)
1005 return 0; 1058 return 0;
1006 1059
1007 for (i = 0; i < priv->data.slaves; i++) 1060 for (i = 0; i < priv->data.slaves; i++)
1008 if (priv->slaves[i].open_stat) 1061 if (priv->slaves[i].open_stat)
1009 usage_count++; 1062 usage_count++;
1010 1063
1011 return usage_count; 1064 return usage_count;
1012 } 1065 }
1013 1066
1014 static inline int cpsw_tx_packet_submit(struct net_device *ndev, 1067 static inline int cpsw_tx_packet_submit(struct net_device *ndev,
1015 struct cpsw_priv *priv, struct sk_buff *skb) 1068 struct cpsw_priv *priv, struct sk_buff *skb)
1016 { 1069 {
1017 if (!priv->data.dual_emac) 1070 if (!priv->data.dual_emac)
1018 return cpdma_chan_submit(priv->txch, skb, skb->data, 1071 return cpdma_chan_submit(priv->txch, skb, skb->data,
1019 skb->len, 0); 1072 skb->len, 0);
1020 1073
1021 if (ndev == cpsw_get_slave_ndev(priv, 0)) 1074 if (ndev == cpsw_get_slave_ndev(priv, 0))
1022 return cpdma_chan_submit(priv->txch, skb, skb->data, 1075 return cpdma_chan_submit(priv->txch, skb, skb->data,
1023 skb->len, 1); 1076 skb->len, 1);
1024 else 1077 else
1025 return cpdma_chan_submit(priv->txch, skb, skb->data, 1078 return cpdma_chan_submit(priv->txch, skb, skb->data,
1026 skb->len, 2); 1079 skb->len, 2);
1027 } 1080 }
1028 1081
1029 static inline void cpsw_add_dual_emac_def_ale_entries( 1082 static inline void cpsw_add_dual_emac_def_ale_entries(
1030 struct cpsw_priv *priv, struct cpsw_slave *slave, 1083 struct cpsw_priv *priv, struct cpsw_slave *slave,
1031 u32 slave_port) 1084 u32 slave_port)
1032 { 1085 {
1033 u32 port_mask = 1 << slave_port | 1 << priv->host_port; 1086 u32 port_mask = 1 << slave_port | 1 << priv->host_port;
1034 1087
1035 if (priv->version == CPSW_VERSION_1) 1088 if (priv->version == CPSW_VERSION_1)
1036 slave_write(slave, slave->port_vlan, CPSW1_PORT_VLAN); 1089 slave_write(slave, slave->port_vlan, CPSW1_PORT_VLAN);
1037 else 1090 else
1038 slave_write(slave, slave->port_vlan, CPSW2_PORT_VLAN); 1091 slave_write(slave, slave->port_vlan, CPSW2_PORT_VLAN);
1039 cpsw_ale_add_vlan(priv->ale, slave->port_vlan, port_mask, 1092 cpsw_ale_add_vlan(priv->ale, slave->port_vlan, port_mask,
1040 port_mask, port_mask, 0); 1093 port_mask, port_mask, 0);
1041 cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast, 1094 cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast,
1042 port_mask, ALE_VLAN, slave->port_vlan, 0); 1095 port_mask, ALE_VLAN, slave->port_vlan, 0);
1043 cpsw_ale_add_ucast(priv->ale, priv->mac_addr, 1096 cpsw_ale_add_ucast(priv->ale, priv->mac_addr,
1044 priv->host_port, ALE_VLAN, slave->port_vlan); 1097 priv->host_port, ALE_VLAN, slave->port_vlan);
1045 } 1098 }
1046 1099
1047 static void soft_reset_slave(struct cpsw_slave *slave) 1100 static void soft_reset_slave(struct cpsw_slave *slave)
1048 { 1101 {
1049 char name[32]; 1102 char name[32];
1050 1103
1051 snprintf(name, sizeof(name), "slave-%d", slave->slave_num); 1104 snprintf(name, sizeof(name), "slave-%d", slave->slave_num);
1052 soft_reset(name, &slave->sliver->soft_reset); 1105 soft_reset(name, &slave->sliver->soft_reset);
1053 } 1106 }
1054 1107
1055 static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv) 1108 static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
1056 { 1109 {
1057 u32 slave_port; 1110 u32 slave_port;
1058 1111
1059 soft_reset_slave(slave); 1112 soft_reset_slave(slave);
1060 1113
1061 /* setup priority mapping */ 1114 /* setup priority mapping */
1062 __raw_writel(RX_PRIORITY_MAPPING, &slave->sliver->rx_pri_map); 1115 __raw_writel(RX_PRIORITY_MAPPING, &slave->sliver->rx_pri_map);
1063 1116
1064 switch (priv->version) { 1117 switch (priv->version) {
1065 case CPSW_VERSION_1: 1118 case CPSW_VERSION_1:
1066 slave_write(slave, TX_PRIORITY_MAPPING, CPSW1_TX_PRI_MAP); 1119 slave_write(slave, TX_PRIORITY_MAPPING, CPSW1_TX_PRI_MAP);
1067 break; 1120 break;
1068 case CPSW_VERSION_2: 1121 case CPSW_VERSION_2:
1069 case CPSW_VERSION_3: 1122 case CPSW_VERSION_3:
1070 case CPSW_VERSION_4: 1123 case CPSW_VERSION_4:
1071 slave_write(slave, TX_PRIORITY_MAPPING, CPSW2_TX_PRI_MAP); 1124 slave_write(slave, TX_PRIORITY_MAPPING, CPSW2_TX_PRI_MAP);
1072 break; 1125 break;
1073 } 1126 }
1074 1127
1075 /* setup max packet size, and mac address */ 1128 /* setup max packet size, and mac address */
1076 __raw_writel(priv->rx_packet_max, &slave->sliver->rx_maxlen); 1129 __raw_writel(priv->rx_packet_max, &slave->sliver->rx_maxlen);
1077 cpsw_set_slave_mac(slave, priv); 1130 cpsw_set_slave_mac(slave, priv);
1078 1131
1079 slave->mac_control = 0; /* no link yet */ 1132 slave->mac_control = 0; /* no link yet */
1080 1133
1081 slave_port = cpsw_get_slave_port(priv, slave->slave_num); 1134 slave_port = cpsw_get_slave_port(priv, slave->slave_num);
1082 1135
1083 if (priv->data.dual_emac) 1136 if (priv->data.dual_emac)
1084 cpsw_add_dual_emac_def_ale_entries(priv, slave, slave_port); 1137 cpsw_add_dual_emac_def_ale_entries(priv, slave, slave_port);
1085 else 1138 else
1086 cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast, 1139 cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast,
1087 1 << slave_port, 0, 0, ALE_MCAST_FWD_2); 1140 1 << slave_port, 0, 0, ALE_MCAST_FWD_2);
1088 1141
1089 slave->phy = phy_connect(priv->ndev, slave->data->phy_id, 1142 slave->phy = phy_connect(priv->ndev, slave->data->phy_id,
1090 &cpsw_adjust_link, slave->data->phy_if); 1143 &cpsw_adjust_link, slave->data->phy_if);
1091 if (IS_ERR(slave->phy)) { 1144 if (IS_ERR(slave->phy)) {
1092 dev_err(priv->dev, "phy %s not found on slave %d\n", 1145 dev_err(priv->dev, "phy %s not found on slave %d\n",
1093 slave->data->phy_id, slave->slave_num); 1146 slave->data->phy_id, slave->slave_num);
1094 slave->phy = NULL; 1147 slave->phy = NULL;
1095 } else { 1148 } else {
1096 dev_info(priv->dev, "phy found : id is : 0x%x\n", 1149 dev_info(priv->dev, "phy found : id is : 0x%x\n",
1097 slave->phy->phy_id); 1150 slave->phy->phy_id);
1098 phy_start(slave->phy); 1151 phy_start(slave->phy);
1099 1152
1100 /* Configure GMII_SEL register */ 1153 /* Configure GMII_SEL register */
1101 cpsw_phy_sel(&priv->pdev->dev, slave->phy->interface, 1154 cpsw_phy_sel(&priv->pdev->dev, slave->phy->interface,
1102 slave->slave_num); 1155 slave->slave_num);
1103 } 1156 }
1104 } 1157 }
1105 1158
1106 static inline void cpsw_add_default_vlan(struct cpsw_priv *priv) 1159 static inline void cpsw_add_default_vlan(struct cpsw_priv *priv)
1107 { 1160 {
1108 const int vlan = priv->data.default_vlan; 1161 const int vlan = priv->data.default_vlan;
1109 const int port = priv->host_port; 1162 const int port = priv->host_port;
1110 u32 reg; 1163 u32 reg;
1111 int i; 1164 int i;
1112 1165
1113 reg = (priv->version == CPSW_VERSION_1) ? CPSW1_PORT_VLAN : 1166 reg = (priv->version == CPSW_VERSION_1) ? CPSW1_PORT_VLAN :
1114 CPSW2_PORT_VLAN; 1167 CPSW2_PORT_VLAN;
1115 1168
1116 writel(vlan, &priv->host_port_regs->port_vlan); 1169 writel(vlan, &priv->host_port_regs->port_vlan);
1117 1170
1118 for (i = 0; i < priv->data.slaves; i++) 1171 for (i = 0; i < priv->data.slaves; i++)
1119 slave_write(priv->slaves + i, vlan, reg); 1172 slave_write(priv->slaves + i, vlan, reg);
1120 1173
1121 cpsw_ale_add_vlan(priv->ale, vlan, ALE_ALL_PORTS << port, 1174 cpsw_ale_add_vlan(priv->ale, vlan, ALE_ALL_PORTS << port,
1122 ALE_ALL_PORTS << port, ALE_ALL_PORTS << port, 1175 ALE_ALL_PORTS << port, ALE_ALL_PORTS << port,
1123 (ALE_PORT_1 | ALE_PORT_2) << port); 1176 (ALE_PORT_1 | ALE_PORT_2) << port);
1124 } 1177 }
1125 1178
1126 static void cpsw_init_host_port(struct cpsw_priv *priv) 1179 static void cpsw_init_host_port(struct cpsw_priv *priv)
1127 { 1180 {
1128 u32 control_reg; 1181 u32 control_reg;
1129 u32 fifo_mode; 1182 u32 fifo_mode;
1130 1183
1131 /* soft reset the controller and initialize ale */ 1184 /* soft reset the controller and initialize ale */
1132 soft_reset("cpsw", &priv->regs->soft_reset); 1185 soft_reset("cpsw", &priv->regs->soft_reset);
1133 cpsw_ale_start(priv->ale); 1186 cpsw_ale_start(priv->ale);
1134 1187
1135 /* switch to vlan unaware mode */ 1188 /* switch to vlan unaware mode */
1136 cpsw_ale_control_set(priv->ale, priv->host_port, ALE_VLAN_AWARE, 1189 cpsw_ale_control_set(priv->ale, priv->host_port, ALE_VLAN_AWARE,
1137 CPSW_ALE_VLAN_AWARE); 1190 CPSW_ALE_VLAN_AWARE);
1138 control_reg = readl(&priv->regs->control); 1191 control_reg = readl(&priv->regs->control);
1139 control_reg |= CPSW_VLAN_AWARE; 1192 control_reg |= CPSW_VLAN_AWARE;
1140 writel(control_reg, &priv->regs->control); 1193 writel(control_reg, &priv->regs->control);
1141 fifo_mode = (priv->data.dual_emac) ? CPSW_FIFO_DUAL_MAC_MODE : 1194 fifo_mode = (priv->data.dual_emac) ? CPSW_FIFO_DUAL_MAC_MODE :
1142 CPSW_FIFO_NORMAL_MODE; 1195 CPSW_FIFO_NORMAL_MODE;
1143 writel(fifo_mode, &priv->host_port_regs->tx_in_ctl); 1196 writel(fifo_mode, &priv->host_port_regs->tx_in_ctl);
1144 1197
1145 /* setup host port priority mapping */ 1198 /* setup host port priority mapping */
1146 __raw_writel(CPDMA_TX_PRIORITY_MAP, 1199 __raw_writel(CPDMA_TX_PRIORITY_MAP,
1147 &priv->host_port_regs->cpdma_tx_pri_map); 1200 &priv->host_port_regs->cpdma_tx_pri_map);
1148 __raw_writel(0, &priv->host_port_regs->cpdma_rx_chan_map); 1201 __raw_writel(0, &priv->host_port_regs->cpdma_rx_chan_map);
1149 1202
1150 cpsw_ale_control_set(priv->ale, priv->host_port, 1203 cpsw_ale_control_set(priv->ale, priv->host_port,
1151 ALE_PORT_STATE, ALE_PORT_STATE_FORWARD); 1204 ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
1152 1205
1153 if (!priv->data.dual_emac) { 1206 if (!priv->data.dual_emac) {
1154 cpsw_ale_add_ucast(priv->ale, priv->mac_addr, priv->host_port, 1207 cpsw_ale_add_ucast(priv->ale, priv->mac_addr, priv->host_port,
1155 0, 0); 1208 0, 0);
1156 cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast, 1209 cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast,
1157 1 << priv->host_port, 0, 0, ALE_MCAST_FWD_2); 1210 1 << priv->host_port, 0, 0, ALE_MCAST_FWD_2);
1158 } 1211 }
1159 } 1212 }
1160 1213
1161 static void cpsw_slave_stop(struct cpsw_slave *slave, struct cpsw_priv *priv) 1214 static void cpsw_slave_stop(struct cpsw_slave *slave, struct cpsw_priv *priv)
1162 { 1215 {
1163 if (!slave->phy) 1216 if (!slave->phy)
1164 return; 1217 return;
1165 phy_stop(slave->phy); 1218 phy_stop(slave->phy);
1166 phy_disconnect(slave->phy); 1219 phy_disconnect(slave->phy);
1167 slave->phy = NULL; 1220 slave->phy = NULL;
1168 } 1221 }
1169 1222
1170 static int cpsw_ndo_open(struct net_device *ndev) 1223 static int cpsw_ndo_open(struct net_device *ndev)
1171 { 1224 {
1172 struct cpsw_priv *priv = netdev_priv(ndev); 1225 struct cpsw_priv *priv = netdev_priv(ndev);
1173 struct cpsw_priv *prim_cpsw; 1226 struct cpsw_priv *prim_cpsw;
1174 int i, ret; 1227 int i, ret;
1175 u32 reg; 1228 u32 reg;
1176 1229
1177 if (!cpsw_common_res_usage_state(priv)) 1230 if (!cpsw_common_res_usage_state(priv))
1178 cpsw_intr_disable(priv); 1231 cpsw_intr_disable(priv);
1179 netif_carrier_off(ndev); 1232 netif_carrier_off(ndev);
1180 1233
1181 pm_runtime_get_sync(&priv->pdev->dev); 1234 pm_runtime_get_sync(&priv->pdev->dev);
1182 1235
1183 reg = priv->version; 1236 reg = priv->version;
1184 1237
1185 dev_info(priv->dev, "initializing cpsw version %d.%d (%d)\n", 1238 dev_info(priv->dev, "initializing cpsw version %d.%d (%d)\n",
1186 CPSW_MAJOR_VERSION(reg), CPSW_MINOR_VERSION(reg), 1239 CPSW_MAJOR_VERSION(reg), CPSW_MINOR_VERSION(reg),
1187 CPSW_RTL_VERSION(reg)); 1240 CPSW_RTL_VERSION(reg));
1188 1241
1189 /* initialize host and slave ports */ 1242 /* initialize host and slave ports */
1190 if (!cpsw_common_res_usage_state(priv)) 1243 if (!cpsw_common_res_usage_state(priv))
1191 cpsw_init_host_port(priv); 1244 cpsw_init_host_port(priv);
1192 for_each_slave(priv, cpsw_slave_open, priv); 1245 for_each_slave(priv, cpsw_slave_open, priv);
1193 1246
1194 /* Add default VLAN */ 1247 /* Add default VLAN */
1195 if (!priv->data.dual_emac) 1248 if (!priv->data.dual_emac)
1196 cpsw_add_default_vlan(priv); 1249 cpsw_add_default_vlan(priv);
1197 1250
1198 if (!cpsw_common_res_usage_state(priv)) { 1251 if (!cpsw_common_res_usage_state(priv)) {
1199 /* setup tx dma to fixed prio and zero offset */ 1252 /* setup tx dma to fixed prio and zero offset */
1200 cpdma_control_set(priv->dma, CPDMA_TX_PRIO_FIXED, 1); 1253 cpdma_control_set(priv->dma, CPDMA_TX_PRIO_FIXED, 1);
1201 cpdma_control_set(priv->dma, CPDMA_RX_BUFFER_OFFSET, 0); 1254 cpdma_control_set(priv->dma, CPDMA_RX_BUFFER_OFFSET, 0);
1202 1255
1203 /* disable priority elevation */ 1256 /* disable priority elevation */
1204 __raw_writel(0, &priv->regs->ptype); 1257 __raw_writel(0, &priv->regs->ptype);
1205 1258
1206 /* enable statistics collection only on all ports */ 1259 /* enable statistics collection only on all ports */
1207 __raw_writel(0x7, &priv->regs->stat_port_en); 1260 __raw_writel(0x7, &priv->regs->stat_port_en);
1208 1261
1209 if (WARN_ON(!priv->data.rx_descs)) 1262 if (WARN_ON(!priv->data.rx_descs))
1210 priv->data.rx_descs = 128; 1263 priv->data.rx_descs = 128;
1211 1264
1212 for (i = 0; i < priv->data.rx_descs; i++) { 1265 for (i = 0; i < priv->data.rx_descs; i++) {
1213 struct sk_buff *skb; 1266 struct sk_buff *skb;
1214 1267
1215 ret = -ENOMEM; 1268 ret = -ENOMEM;
1216 skb = __netdev_alloc_skb_ip_align(priv->ndev, 1269 skb = __netdev_alloc_skb_ip_align(priv->ndev,
1217 priv->rx_packet_max, GFP_KERNEL); 1270 priv->rx_packet_max, GFP_KERNEL);
1218 if (!skb) 1271 if (!skb)
1219 goto err_cleanup; 1272 goto err_cleanup;
1220 ret = cpdma_chan_submit(priv->rxch, skb, skb->data, 1273 ret = cpdma_chan_submit(priv->rxch, skb, skb->data,
1221 skb_tailroom(skb), 0); 1274 skb_tailroom(skb), 0);
1222 if (ret < 0) { 1275 if (ret < 0) {
1223 kfree_skb(skb); 1276 kfree_skb(skb);
1224 goto err_cleanup; 1277 goto err_cleanup;
1225 } 1278 }
1226 } 1279 }
1227 /* continue even if we didn't manage to submit all 1280 /* continue even if we didn't manage to submit all
1228 * receive descs 1281 * receive descs
1229 */ 1282 */
1230 cpsw_info(priv, ifup, "submitted %d rx descriptors\n", i); 1283 cpsw_info(priv, ifup, "submitted %d rx descriptors\n", i);
1231 1284
1232 if (cpts_register(&priv->pdev->dev, priv->cpts, 1285 if (cpts_register(&priv->pdev->dev, priv->cpts,
1233 priv->data.cpts_clock_mult, 1286 priv->data.cpts_clock_mult,
1234 priv->data.cpts_clock_shift)) 1287 priv->data.cpts_clock_shift))
1235 dev_err(priv->dev, "error registering cpts device\n"); 1288 dev_err(priv->dev, "error registering cpts device\n");
1236 1289
1237 } 1290 }
1238 1291
1239 /* Enable Interrupt pacing if configured */ 1292 /* Enable Interrupt pacing if configured */
1240 if (priv->coal_intvl != 0) { 1293 if (priv->coal_intvl != 0) {
1241 struct ethtool_coalesce coal; 1294 struct ethtool_coalesce coal;
1242 1295
1243 coal.rx_coalesce_usecs = (priv->coal_intvl << 4); 1296 coal.rx_coalesce_usecs = (priv->coal_intvl << 4);
1244 cpsw_set_coalesce(ndev, &coal); 1297 cpsw_set_coalesce(ndev, &coal);
1245 } 1298 }
1246 1299
1247 napi_enable(&priv->napi); 1300 napi_enable(&priv->napi);
1301 napi_enable(&priv->napi_tx);
1248 cpdma_ctlr_start(priv->dma); 1302 cpdma_ctlr_start(priv->dma);
1249 cpsw_intr_enable(priv); 1303 cpsw_intr_enable(priv);
1250 cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX); 1304 cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX);
1251 cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX); 1305 cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX);
1252 1306
1253 prim_cpsw = cpsw_get_slave_priv(priv, 0); 1307 prim_cpsw = cpsw_get_slave_priv(priv, 0);
1254 if (prim_cpsw->irq_enabled == false) { 1308 if (!prim_cpsw->irq_enabled && !prim_cpsw->irq_tx_enabled) {
1255 if ((priv == prim_cpsw) || !netif_running(prim_cpsw->ndev)) { 1309 if ((priv == prim_cpsw) || !netif_running(prim_cpsw->ndev)) {
1256 prim_cpsw->irq_enabled = true; 1310 prim_cpsw->irq_enabled = true;
1311 prim_cpsw->irq_tx_enabled = true;
1257 cpsw_enable_irq(prim_cpsw); 1312 cpsw_enable_irq(prim_cpsw);
1258 } 1313 }
1259 } 1314 }
1260 1315
1261 if (priv->data.dual_emac) 1316 if (priv->data.dual_emac)
1262 priv->slaves[priv->emac_port].open_stat = true; 1317 priv->slaves[priv->emac_port].open_stat = true;
1263 return 0; 1318 return 0;
1264 1319
1265 err_cleanup: 1320 err_cleanup:
1266 cpdma_ctlr_stop(priv->dma); 1321 cpdma_ctlr_stop(priv->dma);
1267 for_each_slave(priv, cpsw_slave_stop, priv); 1322 for_each_slave(priv, cpsw_slave_stop, priv);
1268 pm_runtime_put_sync(&priv->pdev->dev); 1323 pm_runtime_put_sync(&priv->pdev->dev);
1269 netif_carrier_off(priv->ndev); 1324 netif_carrier_off(priv->ndev);
1270 return ret; 1325 return ret;
1271 } 1326 }
1272 1327
1273 static int cpsw_ndo_stop(struct net_device *ndev) 1328 static int cpsw_ndo_stop(struct net_device *ndev)
1274 { 1329 {
1275 struct cpsw_priv *priv = netdev_priv(ndev); 1330 struct cpsw_priv *priv = netdev_priv(ndev);
1276 1331
1277 cpsw_info(priv, ifdown, "shutting down cpsw device\n"); 1332 cpsw_info(priv, ifdown, "shutting down cpsw device\n");
1278 netif_stop_queue(priv->ndev); 1333 netif_stop_queue(priv->ndev);
1279 napi_disable(&priv->napi); 1334 napi_disable(&priv->napi);
1335 napi_disable(&priv->napi_tx);
1280 netif_carrier_off(priv->ndev); 1336 netif_carrier_off(priv->ndev);
1281 1337
1282 if (cpsw_common_res_usage_state(priv) <= 1) { 1338 if (cpsw_common_res_usage_state(priv) <= 1) {
1283 cpts_unregister(priv->cpts); 1339 cpts_unregister(priv->cpts);
1284 cpsw_intr_disable(priv); 1340 cpsw_intr_disable(priv);
1285 cpdma_ctlr_int_ctrl(priv->dma, false); 1341 cpdma_ctlr_int_ctrl(priv->dma, false);
1286 cpdma_ctlr_stop(priv->dma); 1342 cpdma_ctlr_stop(priv->dma);
1287 cpsw_ale_stop(priv->ale); 1343 cpsw_ale_stop(priv->ale);
1288 } 1344 }
1289 for_each_slave(priv, cpsw_slave_stop, priv); 1345 for_each_slave(priv, cpsw_slave_stop, priv);
1290 pm_runtime_put_sync(&priv->pdev->dev); 1346 pm_runtime_put_sync(&priv->pdev->dev);
1291 if (priv->data.dual_emac) 1347 if (priv->data.dual_emac)
1292 priv->slaves[priv->emac_port].open_stat = false; 1348 priv->slaves[priv->emac_port].open_stat = false;
1293 return 0; 1349 return 0;
1294 } 1350 }
1295 1351
1296 static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb, 1352 static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb,
1297 struct net_device *ndev) 1353 struct net_device *ndev)
1298 { 1354 {
1299 struct cpsw_priv *priv = netdev_priv(ndev); 1355 struct cpsw_priv *priv = netdev_priv(ndev);
1300 int ret; 1356 int ret;
1301 1357
1302 ndev->trans_start = jiffies; 1358 ndev->trans_start = jiffies;
1303 1359
1304 if (skb_padto(skb, CPSW_MIN_PACKET_SIZE)) { 1360 if (skb_padto(skb, CPSW_MIN_PACKET_SIZE)) {
1305 cpsw_err(priv, tx_err, "packet pad failed\n"); 1361 cpsw_err(priv, tx_err, "packet pad failed\n");
1306 priv->stats.tx_dropped++; 1362 priv->stats.tx_dropped++;
1307 return NETDEV_TX_OK; 1363 return NETDEV_TX_OK;
1308 } 1364 }
1309 1365
1310 if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && 1366 if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
1311 priv->cpts->tx_enable) 1367 priv->cpts->tx_enable)
1312 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 1368 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1313 1369
1314 skb_tx_timestamp(skb); 1370 skb_tx_timestamp(skb);
1315 1371
1316 ret = cpsw_tx_packet_submit(ndev, priv, skb); 1372 ret = cpsw_tx_packet_submit(ndev, priv, skb);
1317 if (unlikely(ret != 0)) { 1373 if (unlikely(ret != 0)) {
1318 cpsw_err(priv, tx_err, "desc submit failed\n"); 1374 cpsw_err(priv, tx_err, "desc submit failed\n");
1319 goto fail; 1375 goto fail;
1320 } 1376 }
1321 1377
1322 /* If there is no more tx desc left free then we need to 1378 /* If there is no more tx desc left free then we need to
1323 * tell the kernel to stop sending us tx frames. 1379 * tell the kernel to stop sending us tx frames.
1324 */ 1380 */
1325 if (unlikely(!cpdma_check_free_tx_desc(priv->txch))) 1381 if (unlikely(!cpdma_check_free_tx_desc(priv->txch)))
1326 netif_stop_queue(ndev); 1382 netif_stop_queue(ndev);
1327 1383
1328 return NETDEV_TX_OK; 1384 return NETDEV_TX_OK;
1329 fail: 1385 fail:
1330 priv->stats.tx_dropped++; 1386 priv->stats.tx_dropped++;
1331 netif_stop_queue(ndev); 1387 netif_stop_queue(ndev);
1332 return NETDEV_TX_BUSY; 1388 return NETDEV_TX_BUSY;
1333 } 1389 }
1334 1390
1335 #ifdef CONFIG_TI_CPTS 1391 #ifdef CONFIG_TI_CPTS
1336 1392
1337 static void cpsw_hwtstamp_v1(struct cpsw_priv *priv) 1393 static void cpsw_hwtstamp_v1(struct cpsw_priv *priv)
1338 { 1394 {
1339 struct cpsw_slave *slave = &priv->slaves[priv->data.active_slave]; 1395 struct cpsw_slave *slave = &priv->slaves[priv->data.active_slave];
1340 u32 ts_en, seq_id; 1396 u32 ts_en, seq_id;
1341 1397
1342 if (!priv->cpts->tx_enable && !priv->cpts->rx_enable) { 1398 if (!priv->cpts->tx_enable && !priv->cpts->rx_enable) {
1343 slave_write(slave, 0, CPSW1_TS_CTL); 1399 slave_write(slave, 0, CPSW1_TS_CTL);
1344 return; 1400 return;
1345 } 1401 }
1346 1402
1347 seq_id = (30 << CPSW_V1_SEQ_ID_OFS_SHIFT) | ETH_P_1588; 1403 seq_id = (30 << CPSW_V1_SEQ_ID_OFS_SHIFT) | ETH_P_1588;
1348 ts_en = EVENT_MSG_BITS << CPSW_V1_MSG_TYPE_OFS; 1404 ts_en = EVENT_MSG_BITS << CPSW_V1_MSG_TYPE_OFS;
1349 1405
1350 if (priv->cpts->tx_enable) 1406 if (priv->cpts->tx_enable)
1351 ts_en |= CPSW_V1_TS_TX_EN; 1407 ts_en |= CPSW_V1_TS_TX_EN;
1352 1408
1353 if (priv->cpts->rx_enable) 1409 if (priv->cpts->rx_enable)
1354 ts_en |= CPSW_V1_TS_RX_EN; 1410 ts_en |= CPSW_V1_TS_RX_EN;
1355 1411
1356 slave_write(slave, ts_en, CPSW1_TS_CTL); 1412 slave_write(slave, ts_en, CPSW1_TS_CTL);
1357 slave_write(slave, seq_id, CPSW1_TS_SEQ_LTYPE); 1413 slave_write(slave, seq_id, CPSW1_TS_SEQ_LTYPE);
1358 } 1414 }
1359 1415
1360 static void cpsw_hwtstamp_v2(struct cpsw_priv *priv) 1416 static void cpsw_hwtstamp_v2(struct cpsw_priv *priv)
1361 { 1417 {
1362 struct cpsw_slave *slave; 1418 struct cpsw_slave *slave;
1363 u32 ctrl, mtype; 1419 u32 ctrl, mtype;
1364 1420
1365 if (priv->data.dual_emac) 1421 if (priv->data.dual_emac)
1366 slave = &priv->slaves[priv->emac_port]; 1422 slave = &priv->slaves[priv->emac_port];
1367 else 1423 else
1368 slave = &priv->slaves[priv->data.active_slave]; 1424 slave = &priv->slaves[priv->data.active_slave];
1369 1425
1370 ctrl = slave_read(slave, CPSW2_CONTROL); 1426 ctrl = slave_read(slave, CPSW2_CONTROL);
1371 ctrl &= ~CTRL_ALL_TS_MASK; 1427 ctrl &= ~CTRL_ALL_TS_MASK;
1372 1428
1373 if (priv->cpts->tx_enable) 1429 if (priv->cpts->tx_enable)
1374 ctrl |= CTRL_TX_TS_BITS; 1430 ctrl |= CTRL_TX_TS_BITS;
1375 1431
1376 if (priv->cpts->rx_enable) 1432 if (priv->cpts->rx_enable)
1377 ctrl |= CTRL_RX_TS_BITS; 1433 ctrl |= CTRL_RX_TS_BITS;
1378 1434
1379 mtype = (30 << TS_SEQ_ID_OFFSET_SHIFT) | EVENT_MSG_BITS; 1435 mtype = (30 << TS_SEQ_ID_OFFSET_SHIFT) | EVENT_MSG_BITS;
1380 1436
1381 slave_write(slave, mtype, CPSW2_TS_SEQ_MTYPE); 1437 slave_write(slave, mtype, CPSW2_TS_SEQ_MTYPE);
1382 slave_write(slave, ctrl, CPSW2_CONTROL); 1438 slave_write(slave, ctrl, CPSW2_CONTROL);
1383 __raw_writel(ETH_P_1588, &priv->regs->ts_ltype); 1439 __raw_writel(ETH_P_1588, &priv->regs->ts_ltype);
1384 } 1440 }
1385 1441
1386 static int cpsw_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr) 1442 static int cpsw_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
1387 { 1443 {
1388 struct cpsw_priv *priv = netdev_priv(dev); 1444 struct cpsw_priv *priv = netdev_priv(dev);
1389 struct cpts *cpts = priv->cpts; 1445 struct cpts *cpts = priv->cpts;
1390 struct hwtstamp_config cfg; 1446 struct hwtstamp_config cfg;
1391 1447
1392 if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg))) 1448 if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
1393 return -EFAULT; 1449 return -EFAULT;
1394 1450
1395 /* reserved for future extensions */ 1451 /* reserved for future extensions */
1396 if (cfg.flags) 1452 if (cfg.flags)
1397 return -EINVAL; 1453 return -EINVAL;
1398 1454
1399 switch (cfg.tx_type) { 1455 switch (cfg.tx_type) {
1400 case HWTSTAMP_TX_OFF: 1456 case HWTSTAMP_TX_OFF:
1401 cpts->tx_enable = 0; 1457 cpts->tx_enable = 0;
1402 break; 1458 break;
1403 case HWTSTAMP_TX_ON: 1459 case HWTSTAMP_TX_ON:
1404 cpts->tx_enable = 1; 1460 cpts->tx_enable = 1;
1405 break; 1461 break;
1406 default: 1462 default:
1407 return -ERANGE; 1463 return -ERANGE;
1408 } 1464 }
1409 1465
1410 switch (cfg.rx_filter) { 1466 switch (cfg.rx_filter) {
1411 case HWTSTAMP_FILTER_NONE: 1467 case HWTSTAMP_FILTER_NONE:
1412 cpts->rx_enable = 0; 1468 cpts->rx_enable = 0;
1413 break; 1469 break;
1414 case HWTSTAMP_FILTER_ALL: 1470 case HWTSTAMP_FILTER_ALL:
1415 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: 1471 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
1416 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 1472 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
1417 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: 1473 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
1418 return -ERANGE; 1474 return -ERANGE;
1419 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: 1475 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
1420 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: 1476 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
1421 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: 1477 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
1422 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: 1478 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
1423 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: 1479 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
1424 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: 1480 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
1425 case HWTSTAMP_FILTER_PTP_V2_EVENT: 1481 case HWTSTAMP_FILTER_PTP_V2_EVENT:
1426 case HWTSTAMP_FILTER_PTP_V2_SYNC: 1482 case HWTSTAMP_FILTER_PTP_V2_SYNC:
1427 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: 1483 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
1428 cpts->rx_enable = 1; 1484 cpts->rx_enable = 1;
1429 cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; 1485 cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
1430 break; 1486 break;
1431 default: 1487 default:
1432 return -ERANGE; 1488 return -ERANGE;
1433 } 1489 }
1434 1490
1435 switch (priv->version) { 1491 switch (priv->version) {
1436 case CPSW_VERSION_1: 1492 case CPSW_VERSION_1:
1437 cpsw_hwtstamp_v1(priv); 1493 cpsw_hwtstamp_v1(priv);
1438 break; 1494 break;
1439 case CPSW_VERSION_2: 1495 case CPSW_VERSION_2:
1440 case CPSW_VERSION_3: 1496 case CPSW_VERSION_3:
1441 cpsw_hwtstamp_v2(priv); 1497 cpsw_hwtstamp_v2(priv);
1442 break; 1498 break;
1443 default: 1499 default:
1444 return -ENOTSUPP; 1500 return -ENOTSUPP;
1445 } 1501 }
1446 1502
1447 return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0; 1503 return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
1448 } 1504 }
1449 1505
1450 #endif /*CONFIG_TI_CPTS*/ 1506 #endif /*CONFIG_TI_CPTS*/
1451 1507
1452 static int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd) 1508 static int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
1453 { 1509 {
1454 struct cpsw_priv *priv = netdev_priv(dev); 1510 struct cpsw_priv *priv = netdev_priv(dev);
1455 struct mii_ioctl_data *data = if_mii(req); 1511 struct mii_ioctl_data *data = if_mii(req);
1456 int slave_no = cpsw_slave_index(priv); 1512 int slave_no = cpsw_slave_index(priv);
1457 1513
1458 if (!netif_running(dev)) 1514 if (!netif_running(dev))
1459 return -EINVAL; 1515 return -EINVAL;
1460 1516
1461 switch (cmd) { 1517 switch (cmd) {
1462 #ifdef CONFIG_TI_CPTS 1518 #ifdef CONFIG_TI_CPTS
1463 case SIOCSHWTSTAMP: 1519 case SIOCSHWTSTAMP:
1464 return cpsw_hwtstamp_ioctl(dev, req); 1520 return cpsw_hwtstamp_ioctl(dev, req);
1465 #endif 1521 #endif
1466 case SIOCGMIIPHY: 1522 case SIOCGMIIPHY:
1467 data->phy_id = priv->slaves[slave_no].phy->addr; 1523 data->phy_id = priv->slaves[slave_no].phy->addr;
1468 break; 1524 break;
1469 default: 1525 default:
1470 return -ENOTSUPP; 1526 return -ENOTSUPP;
1471 } 1527 }
1472 1528
1473 return 0; 1529 return 0;
1474 } 1530 }
1475 1531
1476 static void cpsw_ndo_tx_timeout(struct net_device *ndev) 1532 static void cpsw_ndo_tx_timeout(struct net_device *ndev)
1477 { 1533 {
1478 struct cpsw_priv *priv = netdev_priv(ndev); 1534 struct cpsw_priv *priv = netdev_priv(ndev);
1479 1535
1480 cpsw_err(priv, tx_err, "transmit timeout, restarting dma\n"); 1536 cpsw_err(priv, tx_err, "transmit timeout, restarting dma\n");
1481 priv->stats.tx_errors++; 1537 priv->stats.tx_errors++;
1482 cpsw_intr_disable(priv); 1538 cpsw_intr_disable(priv);
1483 cpdma_ctlr_int_ctrl(priv->dma, false); 1539 cpdma_ctlr_int_ctrl(priv->dma, false);
1484 cpdma_chan_stop(priv->txch); 1540 cpdma_chan_stop(priv->txch);
1485 cpdma_chan_start(priv->txch); 1541 cpdma_chan_start(priv->txch);
1486 cpdma_ctlr_int_ctrl(priv->dma, true); 1542 cpdma_ctlr_int_ctrl(priv->dma, true);
1487 cpsw_intr_enable(priv); 1543 cpsw_intr_enable(priv);
1488 cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX); 1544 cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX);
1489 cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX); 1545 cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX);
1490 1546
1491 } 1547 }
1492 1548
1493 static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p) 1549 static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p)
1494 { 1550 {
1495 struct cpsw_priv *priv = netdev_priv(ndev); 1551 struct cpsw_priv *priv = netdev_priv(ndev);
1496 struct sockaddr *addr = (struct sockaddr *)p; 1552 struct sockaddr *addr = (struct sockaddr *)p;
1497 int flags = 0; 1553 int flags = 0;
1498 u16 vid = 0; 1554 u16 vid = 0;
1499 1555
1500 if (!is_valid_ether_addr(addr->sa_data)) 1556 if (!is_valid_ether_addr(addr->sa_data))
1501 return -EADDRNOTAVAIL; 1557 return -EADDRNOTAVAIL;
1502 1558
1503 if (priv->data.dual_emac) { 1559 if (priv->data.dual_emac) {
1504 vid = priv->slaves[priv->emac_port].port_vlan; 1560 vid = priv->slaves[priv->emac_port].port_vlan;
1505 flags = ALE_VLAN; 1561 flags = ALE_VLAN;
1506 } 1562 }
1507 1563
1508 cpsw_ale_del_ucast(priv->ale, priv->mac_addr, priv->host_port, 1564 cpsw_ale_del_ucast(priv->ale, priv->mac_addr, priv->host_port,
1509 flags, vid); 1565 flags, vid);
1510 cpsw_ale_add_ucast(priv->ale, addr->sa_data, priv->host_port, 1566 cpsw_ale_add_ucast(priv->ale, addr->sa_data, priv->host_port,
1511 flags, vid); 1567 flags, vid);
1512 1568
1513 memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN); 1569 memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN);
1514 memcpy(ndev->dev_addr, priv->mac_addr, ETH_ALEN); 1570 memcpy(ndev->dev_addr, priv->mac_addr, ETH_ALEN);
1515 for_each_slave(priv, cpsw_set_slave_mac, priv); 1571 for_each_slave(priv, cpsw_set_slave_mac, priv);
1516 1572
1517 return 0; 1573 return 0;
1518 } 1574 }
1519 1575
1520 static struct net_device_stats *cpsw_ndo_get_stats(struct net_device *ndev) 1576 static struct net_device_stats *cpsw_ndo_get_stats(struct net_device *ndev)
1521 { 1577 {
1522 struct cpsw_priv *priv = netdev_priv(ndev); 1578 struct cpsw_priv *priv = netdev_priv(ndev);
1523 return &priv->stats; 1579 return &priv->stats;
1524 } 1580 }
1525 1581
1526 #ifdef CONFIG_NET_POLL_CONTROLLER 1582 #ifdef CONFIG_NET_POLL_CONTROLLER
1527 static void cpsw_ndo_poll_controller(struct net_device *ndev) 1583 static void cpsw_ndo_poll_controller(struct net_device *ndev)
1528 { 1584 {
1529 struct cpsw_priv *priv = netdev_priv(ndev); 1585 struct cpsw_priv *priv = netdev_priv(ndev);
1530 1586
1531 cpsw_intr_disable(priv); 1587 cpsw_intr_disable(priv);
1532 cpdma_ctlr_int_ctrl(priv->dma, false); 1588 cpdma_ctlr_int_ctrl(priv->dma, false);
1533 cpsw_interrupt(ndev->irq, priv); 1589 cpsw_interrupt(ndev->irq, priv);
1534 cpdma_ctlr_int_ctrl(priv->dma, true); 1590 cpdma_ctlr_int_ctrl(priv->dma, true);
1535 cpsw_intr_enable(priv); 1591 cpsw_intr_enable(priv);
1536 cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX); 1592 cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX);
1537 cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX); 1593 cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX);
1538 1594
1539 } 1595 }
1540 #endif 1596 #endif
1541 1597
1542 static inline int cpsw_add_vlan_ale_entry(struct cpsw_priv *priv, 1598 static inline int cpsw_add_vlan_ale_entry(struct cpsw_priv *priv,
1543 unsigned short vid) 1599 unsigned short vid)
1544 { 1600 {
1545 int ret; 1601 int ret;
1546 1602
1547 ret = cpsw_ale_add_vlan(priv->ale, vid, 1603 ret = cpsw_ale_add_vlan(priv->ale, vid,
1548 ALE_ALL_PORTS << priv->host_port, 1604 ALE_ALL_PORTS << priv->host_port,
1549 0, ALE_ALL_PORTS << priv->host_port, 1605 0, ALE_ALL_PORTS << priv->host_port,
1550 (ALE_PORT_1 | ALE_PORT_2) << priv->host_port); 1606 (ALE_PORT_1 | ALE_PORT_2) << priv->host_port);
1551 if (ret != 0) 1607 if (ret != 0)
1552 return ret; 1608 return ret;
1553 1609
1554 ret = cpsw_ale_add_ucast(priv->ale, priv->mac_addr, 1610 ret = cpsw_ale_add_ucast(priv->ale, priv->mac_addr,
1555 priv->host_port, ALE_VLAN, vid); 1611 priv->host_port, ALE_VLAN, vid);
1556 if (ret != 0) 1612 if (ret != 0)
1557 goto clean_vid; 1613 goto clean_vid;
1558 1614
1559 ret = cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast, 1615 ret = cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast,
1560 ALE_ALL_PORTS << priv->host_port, 1616 ALE_ALL_PORTS << priv->host_port,
1561 ALE_VLAN, vid, 0); 1617 ALE_VLAN, vid, 0);
1562 if (ret != 0) 1618 if (ret != 0)
1563 goto clean_vlan_ucast; 1619 goto clean_vlan_ucast;
1564 return 0; 1620 return 0;
1565 1621
1566 clean_vlan_ucast: 1622 clean_vlan_ucast:
1567 cpsw_ale_del_ucast(priv->ale, priv->mac_addr, 1623 cpsw_ale_del_ucast(priv->ale, priv->mac_addr,
1568 priv->host_port, ALE_VLAN, vid); 1624 priv->host_port, ALE_VLAN, vid);
1569 clean_vid: 1625 clean_vid:
1570 cpsw_ale_del_vlan(priv->ale, vid, 0); 1626 cpsw_ale_del_vlan(priv->ale, vid, 0);
1571 return ret; 1627 return ret;
1572 } 1628 }
1573 1629
1574 static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev, 1630 static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev,
1575 __be16 proto, u16 vid) 1631 __be16 proto, u16 vid)
1576 { 1632 {
1577 struct cpsw_priv *priv = netdev_priv(ndev); 1633 struct cpsw_priv *priv = netdev_priv(ndev);
1578 1634
1579 if (vid == priv->data.default_vlan) 1635 if (vid == priv->data.default_vlan)
1580 return 0; 1636 return 0;
1581 1637
1582 dev_info(priv->dev, "Adding vlanid %d to vlan filter\n", vid); 1638 dev_info(priv->dev, "Adding vlanid %d to vlan filter\n", vid);
1583 return cpsw_add_vlan_ale_entry(priv, vid); 1639 return cpsw_add_vlan_ale_entry(priv, vid);
1584 } 1640 }
1585 1641
1586 static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev, 1642 static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev,
1587 __be16 proto, u16 vid) 1643 __be16 proto, u16 vid)
1588 { 1644 {
1589 struct cpsw_priv *priv = netdev_priv(ndev); 1645 struct cpsw_priv *priv = netdev_priv(ndev);
1590 int ret; 1646 int ret;
1591 1647
1592 if (vid == priv->data.default_vlan) 1648 if (vid == priv->data.default_vlan)
1593 return 0; 1649 return 0;
1594 1650
1595 dev_info(priv->dev, "removing vlanid %d from vlan filter\n", vid); 1651 dev_info(priv->dev, "removing vlanid %d from vlan filter\n", vid);
1596 ret = cpsw_ale_del_vlan(priv->ale, vid, 0); 1652 ret = cpsw_ale_del_vlan(priv->ale, vid, 0);
1597 if (ret != 0) 1653 if (ret != 0)
1598 return ret; 1654 return ret;
1599 1655
1600 ret = cpsw_ale_del_ucast(priv->ale, priv->mac_addr, 1656 ret = cpsw_ale_del_ucast(priv->ale, priv->mac_addr,
1601 priv->host_port, ALE_VLAN, vid); 1657 priv->host_port, ALE_VLAN, vid);
1602 if (ret != 0) 1658 if (ret != 0)
1603 return ret; 1659 return ret;
1604 1660
1605 return cpsw_ale_del_mcast(priv->ale, priv->ndev->broadcast, 1661 return cpsw_ale_del_mcast(priv->ale, priv->ndev->broadcast,
1606 0, ALE_VLAN, vid); 1662 0, ALE_VLAN, vid);
1607 } 1663 }
1608 1664
1609 static const struct net_device_ops cpsw_netdev_ops = { 1665 static const struct net_device_ops cpsw_netdev_ops = {
1610 .ndo_open = cpsw_ndo_open, 1666 .ndo_open = cpsw_ndo_open,
1611 .ndo_stop = cpsw_ndo_stop, 1667 .ndo_stop = cpsw_ndo_stop,
1612 .ndo_start_xmit = cpsw_ndo_start_xmit, 1668 .ndo_start_xmit = cpsw_ndo_start_xmit,
1613 .ndo_set_mac_address = cpsw_ndo_set_mac_address, 1669 .ndo_set_mac_address = cpsw_ndo_set_mac_address,
1614 .ndo_do_ioctl = cpsw_ndo_ioctl, 1670 .ndo_do_ioctl = cpsw_ndo_ioctl,
1615 .ndo_validate_addr = eth_validate_addr, 1671 .ndo_validate_addr = eth_validate_addr,
1616 .ndo_change_mtu = eth_change_mtu, 1672 .ndo_change_mtu = eth_change_mtu,
1617 .ndo_tx_timeout = cpsw_ndo_tx_timeout, 1673 .ndo_tx_timeout = cpsw_ndo_tx_timeout,
1618 .ndo_get_stats = cpsw_ndo_get_stats, 1674 .ndo_get_stats = cpsw_ndo_get_stats,
1619 .ndo_set_rx_mode = cpsw_ndo_set_rx_mode, 1675 .ndo_set_rx_mode = cpsw_ndo_set_rx_mode,
1620 #ifdef CONFIG_NET_POLL_CONTROLLER 1676 #ifdef CONFIG_NET_POLL_CONTROLLER
1621 .ndo_poll_controller = cpsw_ndo_poll_controller, 1677 .ndo_poll_controller = cpsw_ndo_poll_controller,
1622 #endif 1678 #endif
1623 .ndo_vlan_rx_add_vid = cpsw_ndo_vlan_rx_add_vid, 1679 .ndo_vlan_rx_add_vid = cpsw_ndo_vlan_rx_add_vid,
1624 .ndo_vlan_rx_kill_vid = cpsw_ndo_vlan_rx_kill_vid, 1680 .ndo_vlan_rx_kill_vid = cpsw_ndo_vlan_rx_kill_vid,
1625 }; 1681 };
1626 1682
1627 static void cpsw_get_drvinfo(struct net_device *ndev, 1683 static void cpsw_get_drvinfo(struct net_device *ndev,
1628 struct ethtool_drvinfo *info) 1684 struct ethtool_drvinfo *info)
1629 { 1685 {
1630 struct cpsw_priv *priv = netdev_priv(ndev); 1686 struct cpsw_priv *priv = netdev_priv(ndev);
1631 1687
1632 strlcpy(info->driver, "TI CPSW Driver v1.0", sizeof(info->driver)); 1688 strlcpy(info->driver, "TI CPSW Driver v1.0", sizeof(info->driver));
1633 strlcpy(info->version, "1.0", sizeof(info->version)); 1689 strlcpy(info->version, "1.0", sizeof(info->version));
1634 strlcpy(info->bus_info, priv->pdev->name, sizeof(info->bus_info)); 1690 strlcpy(info->bus_info, priv->pdev->name, sizeof(info->bus_info));
1635 } 1691 }
1636 1692
1637 static u32 cpsw_get_msglevel(struct net_device *ndev) 1693 static u32 cpsw_get_msglevel(struct net_device *ndev)
1638 { 1694 {
1639 struct cpsw_priv *priv = netdev_priv(ndev); 1695 struct cpsw_priv *priv = netdev_priv(ndev);
1640 return priv->msg_enable; 1696 return priv->msg_enable;
1641 } 1697 }
1642 1698
1643 static void cpsw_set_msglevel(struct net_device *ndev, u32 value) 1699 static void cpsw_set_msglevel(struct net_device *ndev, u32 value)
1644 { 1700 {
1645 struct cpsw_priv *priv = netdev_priv(ndev); 1701 struct cpsw_priv *priv = netdev_priv(ndev);
1646 priv->msg_enable = value; 1702 priv->msg_enable = value;
1647 } 1703 }
1648 1704
1649 static int cpsw_get_ts_info(struct net_device *ndev, 1705 static int cpsw_get_ts_info(struct net_device *ndev,
1650 struct ethtool_ts_info *info) 1706 struct ethtool_ts_info *info)
1651 { 1707 {
1652 #ifdef CONFIG_TI_CPTS 1708 #ifdef CONFIG_TI_CPTS
1653 struct cpsw_priv *priv = netdev_priv(ndev); 1709 struct cpsw_priv *priv = netdev_priv(ndev);
1654 1710
1655 info->so_timestamping = 1711 info->so_timestamping =
1656 SOF_TIMESTAMPING_TX_HARDWARE | 1712 SOF_TIMESTAMPING_TX_HARDWARE |
1657 SOF_TIMESTAMPING_TX_SOFTWARE | 1713 SOF_TIMESTAMPING_TX_SOFTWARE |
1658 SOF_TIMESTAMPING_RX_HARDWARE | 1714 SOF_TIMESTAMPING_RX_HARDWARE |
1659 SOF_TIMESTAMPING_RX_SOFTWARE | 1715 SOF_TIMESTAMPING_RX_SOFTWARE |
1660 SOF_TIMESTAMPING_SOFTWARE | 1716 SOF_TIMESTAMPING_SOFTWARE |
1661 SOF_TIMESTAMPING_RAW_HARDWARE; 1717 SOF_TIMESTAMPING_RAW_HARDWARE;
1662 info->phc_index = priv->cpts->phc_index; 1718 info->phc_index = priv->cpts->phc_index;
1663 info->tx_types = 1719 info->tx_types =
1664 (1 << HWTSTAMP_TX_OFF) | 1720 (1 << HWTSTAMP_TX_OFF) |
1665 (1 << HWTSTAMP_TX_ON); 1721 (1 << HWTSTAMP_TX_ON);
1666 info->rx_filters = 1722 info->rx_filters =
1667 (1 << HWTSTAMP_FILTER_NONE) | 1723 (1 << HWTSTAMP_FILTER_NONE) |
1668 (1 << HWTSTAMP_FILTER_PTP_V2_EVENT); 1724 (1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
1669 #else 1725 #else
1670 info->so_timestamping = 1726 info->so_timestamping =
1671 SOF_TIMESTAMPING_TX_SOFTWARE | 1727 SOF_TIMESTAMPING_TX_SOFTWARE |
1672 SOF_TIMESTAMPING_RX_SOFTWARE | 1728 SOF_TIMESTAMPING_RX_SOFTWARE |
1673 SOF_TIMESTAMPING_SOFTWARE; 1729 SOF_TIMESTAMPING_SOFTWARE;
1674 info->phc_index = -1; 1730 info->phc_index = -1;
1675 info->tx_types = 0; 1731 info->tx_types = 0;
1676 info->rx_filters = 0; 1732 info->rx_filters = 0;
1677 #endif 1733 #endif
1678 return 0; 1734 return 0;
1679 } 1735 }
1680 1736
1681 static int cpsw_get_settings(struct net_device *ndev, 1737 static int cpsw_get_settings(struct net_device *ndev,
1682 struct ethtool_cmd *ecmd) 1738 struct ethtool_cmd *ecmd)
1683 { 1739 {
1684 struct cpsw_priv *priv = netdev_priv(ndev); 1740 struct cpsw_priv *priv = netdev_priv(ndev);
1685 int slave_no = cpsw_slave_index(priv); 1741 int slave_no = cpsw_slave_index(priv);
1686 1742
1687 if (priv->slaves[slave_no].phy) 1743 if (priv->slaves[slave_no].phy)
1688 return phy_ethtool_gset(priv->slaves[slave_no].phy, ecmd); 1744 return phy_ethtool_gset(priv->slaves[slave_no].phy, ecmd);
1689 else 1745 else
1690 return -EOPNOTSUPP; 1746 return -EOPNOTSUPP;
1691 } 1747 }
1692 1748
1693 static int cpsw_set_settings(struct net_device *ndev, struct ethtool_cmd *ecmd) 1749 static int cpsw_set_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
1694 { 1750 {
1695 struct cpsw_priv *priv = netdev_priv(ndev); 1751 struct cpsw_priv *priv = netdev_priv(ndev);
1696 int slave_no = cpsw_slave_index(priv); 1752 int slave_no = cpsw_slave_index(priv);
1697 1753
1698 if (priv->slaves[slave_no].phy) 1754 if (priv->slaves[slave_no].phy)
1699 return phy_ethtool_sset(priv->slaves[slave_no].phy, ecmd); 1755 return phy_ethtool_sset(priv->slaves[slave_no].phy, ecmd);
1700 else 1756 else
1701 return -EOPNOTSUPP; 1757 return -EOPNOTSUPP;
1702 } 1758 }
1703 1759
1704 static void cpsw_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) 1760 static void cpsw_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
1705 { 1761 {
1706 struct cpsw_priv *priv = netdev_priv(ndev); 1762 struct cpsw_priv *priv = netdev_priv(ndev);
1707 int slave_no = cpsw_slave_index(priv); 1763 int slave_no = cpsw_slave_index(priv);
1708 1764
1709 wol->supported = 0; 1765 wol->supported = 0;
1710 wol->wolopts = 0; 1766 wol->wolopts = 0;
1711 1767
1712 if (priv->slaves[slave_no].phy) 1768 if (priv->slaves[slave_no].phy)
1713 phy_ethtool_get_wol(priv->slaves[slave_no].phy, wol); 1769 phy_ethtool_get_wol(priv->slaves[slave_no].phy, wol);
1714 } 1770 }
1715 1771
1716 static int cpsw_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) 1772 static int cpsw_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
1717 { 1773 {
1718 struct cpsw_priv *priv = netdev_priv(ndev); 1774 struct cpsw_priv *priv = netdev_priv(ndev);
1719 int slave_no = cpsw_slave_index(priv); 1775 int slave_no = cpsw_slave_index(priv);
1720 1776
1721 if (priv->slaves[slave_no].phy) 1777 if (priv->slaves[slave_no].phy)
1722 return phy_ethtool_set_wol(priv->slaves[slave_no].phy, wol); 1778 return phy_ethtool_set_wol(priv->slaves[slave_no].phy, wol);
1723 else 1779 else
1724 return -EOPNOTSUPP; 1780 return -EOPNOTSUPP;
1725 } 1781 }
1726 1782
1727 static const struct ethtool_ops cpsw_ethtool_ops = { 1783 static const struct ethtool_ops cpsw_ethtool_ops = {
1728 .get_drvinfo = cpsw_get_drvinfo, 1784 .get_drvinfo = cpsw_get_drvinfo,
1729 .get_msglevel = cpsw_get_msglevel, 1785 .get_msglevel = cpsw_get_msglevel,
1730 .set_msglevel = cpsw_set_msglevel, 1786 .set_msglevel = cpsw_set_msglevel,
1731 .get_link = ethtool_op_get_link, 1787 .get_link = ethtool_op_get_link,
1732 .get_ts_info = cpsw_get_ts_info, 1788 .get_ts_info = cpsw_get_ts_info,
1733 .get_settings = cpsw_get_settings, 1789 .get_settings = cpsw_get_settings,
1734 .set_settings = cpsw_set_settings, 1790 .set_settings = cpsw_set_settings,
1735 .get_coalesce = cpsw_get_coalesce, 1791 .get_coalesce = cpsw_get_coalesce,
1736 .set_coalesce = cpsw_set_coalesce, 1792 .set_coalesce = cpsw_set_coalesce,
1737 .get_sset_count = cpsw_get_sset_count, 1793 .get_sset_count = cpsw_get_sset_count,
1738 .get_strings = cpsw_get_strings, 1794 .get_strings = cpsw_get_strings,
1739 .get_ethtool_stats = cpsw_get_ethtool_stats, 1795 .get_ethtool_stats = cpsw_get_ethtool_stats,
1740 .get_wol = cpsw_get_wol, 1796 .get_wol = cpsw_get_wol,
1741 .set_wol = cpsw_set_wol, 1797 .set_wol = cpsw_set_wol,
1742 }; 1798 };
1743 1799
1744 static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv, 1800 static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv,
1745 u32 slave_reg_ofs, u32 sliver_reg_ofs) 1801 u32 slave_reg_ofs, u32 sliver_reg_ofs)
1746 { 1802 {
1747 void __iomem *regs = priv->regs; 1803 void __iomem *regs = priv->regs;
1748 int slave_num = slave->slave_num; 1804 int slave_num = slave->slave_num;
1749 struct cpsw_slave_data *data = priv->data.slave_data + slave_num; 1805 struct cpsw_slave_data *data = priv->data.slave_data + slave_num;
1750 1806
1751 slave->data = data; 1807 slave->data = data;
1752 slave->regs = regs + slave_reg_ofs; 1808 slave->regs = regs + slave_reg_ofs;
1753 slave->sliver = regs + sliver_reg_ofs; 1809 slave->sliver = regs + sliver_reg_ofs;
1754 slave->port_vlan = data->dual_emac_res_vlan; 1810 slave->port_vlan = data->dual_emac_res_vlan;
1755 } 1811 }
1756 1812
1757 static int cpsw_probe_dt(struct cpsw_platform_data *data, 1813 static int cpsw_probe_dt(struct cpsw_platform_data *data,
1758 struct platform_device *pdev) 1814 struct platform_device *pdev)
1759 { 1815 {
1760 struct device_node *node = pdev->dev.of_node; 1816 struct device_node *node = pdev->dev.of_node;
1761 struct device_node *slave_node; 1817 struct device_node *slave_node;
1762 int i = 0, ret; 1818 int i = 0, ret;
1763 u32 prop; 1819 u32 prop;
1764 1820
1765 if (!node) 1821 if (!node)
1766 return -EINVAL; 1822 return -EINVAL;
1767 1823
1768 if (of_property_read_u32(node, "slaves", &prop)) { 1824 if (of_property_read_u32(node, "slaves", &prop)) {
1769 pr_err("Missing slaves property in the DT.\n"); 1825 pr_err("Missing slaves property in the DT.\n");
1770 return -EINVAL; 1826 return -EINVAL;
1771 } 1827 }
1772 data->slaves = prop; 1828 data->slaves = prop;
1773 1829
1774 if (of_property_read_u32(node, "active_slave", &prop)) { 1830 if (of_property_read_u32(node, "active_slave", &prop)) {
1775 pr_err("Missing active_slave property in the DT.\n"); 1831 pr_err("Missing active_slave property in the DT.\n");
1776 return -EINVAL; 1832 return -EINVAL;
1777 } 1833 }
1778 data->active_slave = prop; 1834 data->active_slave = prop;
1779 1835
1780 if (of_property_read_u32(node, "cpts_clock_mult", &prop)) { 1836 if (of_property_read_u32(node, "cpts_clock_mult", &prop)) {
1781 pr_err("Missing cpts_clock_mult property in the DT.\n"); 1837 pr_err("Missing cpts_clock_mult property in the DT.\n");
1782 return -EINVAL; 1838 return -EINVAL;
1783 } 1839 }
1784 data->cpts_clock_mult = prop; 1840 data->cpts_clock_mult = prop;
1785 1841
1786 if (of_property_read_u32(node, "cpts_clock_shift", &prop)) { 1842 if (of_property_read_u32(node, "cpts_clock_shift", &prop)) {
1787 pr_err("Missing cpts_clock_shift property in the DT.\n"); 1843 pr_err("Missing cpts_clock_shift property in the DT.\n");
1788 return -EINVAL; 1844 return -EINVAL;
1789 } 1845 }
1790 data->cpts_clock_shift = prop; 1846 data->cpts_clock_shift = prop;
1791 1847
1792 data->slave_data = devm_kzalloc(&pdev->dev, data->slaves 1848 data->slave_data = devm_kzalloc(&pdev->dev, data->slaves
1793 * sizeof(struct cpsw_slave_data), 1849 * sizeof(struct cpsw_slave_data),
1794 GFP_KERNEL); 1850 GFP_KERNEL);
1795 if (!data->slave_data) 1851 if (!data->slave_data)
1796 return -ENOMEM; 1852 return -ENOMEM;
1797 1853
1798 if (of_property_read_u32(node, "cpdma_channels", &prop)) { 1854 if (of_property_read_u32(node, "cpdma_channels", &prop)) {
1799 pr_err("Missing cpdma_channels property in the DT.\n"); 1855 pr_err("Missing cpdma_channels property in the DT.\n");
1800 return -EINVAL; 1856 return -EINVAL;
1801 } 1857 }
1802 data->channels = prop; 1858 data->channels = prop;
1803 1859
1804 if (of_property_read_u32(node, "ale_entries", &prop)) { 1860 if (of_property_read_u32(node, "ale_entries", &prop)) {
1805 pr_err("Missing ale_entries property in the DT.\n"); 1861 pr_err("Missing ale_entries property in the DT.\n");
1806 return -EINVAL; 1862 return -EINVAL;
1807 } 1863 }
1808 data->ale_entries = prop; 1864 data->ale_entries = prop;
1809 1865
1810 if (of_property_read_u32(node, "bd_ram_size", &prop)) { 1866 if (of_property_read_u32(node, "bd_ram_size", &prop)) {
1811 pr_err("Missing bd_ram_size property in the DT.\n"); 1867 pr_err("Missing bd_ram_size property in the DT.\n");
1812 return -EINVAL; 1868 return -EINVAL;
1813 } 1869 }
1814 data->bd_ram_size = prop; 1870 data->bd_ram_size = prop;
1815 1871
1816 if (of_property_read_u32(node, "rx_descs", &prop)) { 1872 if (of_property_read_u32(node, "rx_descs", &prop)) {
1817 pr_err("Missing rx_descs property in the DT.\n"); 1873 pr_err("Missing rx_descs property in the DT.\n");
1818 return -EINVAL; 1874 return -EINVAL;
1819 } 1875 }
1820 data->rx_descs = prop; 1876 data->rx_descs = prop;
1821 1877
1822 if (of_property_read_u32(node, "mac_control", &prop)) { 1878 if (of_property_read_u32(node, "mac_control", &prop)) {
1823 pr_err("Missing mac_control property in the DT.\n"); 1879 pr_err("Missing mac_control property in the DT.\n");
1824 return -EINVAL; 1880 return -EINVAL;
1825 } 1881 }
1826 data->mac_control = prop; 1882 data->mac_control = prop;
1827 1883
1828 if (of_property_read_bool(node, "dual_emac")) 1884 if (of_property_read_bool(node, "dual_emac"))
1829 data->dual_emac = 1; 1885 data->dual_emac = 1;
1830 1886
1831 /* 1887 /*
1832 * Populate all the child nodes here... 1888 * Populate all the child nodes here...
1833 */ 1889 */
1834 ret = of_platform_populate(node, NULL, NULL, &pdev->dev); 1890 ret = of_platform_populate(node, NULL, NULL, &pdev->dev);
1835 /* We do not want to force this, as in some cases may not have child */ 1891 /* We do not want to force this, as in some cases may not have child */
1836 if (ret) 1892 if (ret)
1837 pr_warn("Doesn't have any child node\n"); 1893 pr_warn("Doesn't have any child node\n");
1838 1894
1839 for_each_child_of_node(node, slave_node) { 1895 for_each_child_of_node(node, slave_node) {
1840 struct cpsw_slave_data *slave_data = data->slave_data + i; 1896 struct cpsw_slave_data *slave_data = data->slave_data + i;
1841 const void *mac_addr = NULL; 1897 const void *mac_addr = NULL;
1842 u32 phyid; 1898 u32 phyid;
1843 int lenp; 1899 int lenp;
1844 const __be32 *parp; 1900 const __be32 *parp;
1845 struct device_node *mdio_node; 1901 struct device_node *mdio_node;
1846 struct platform_device *mdio; 1902 struct platform_device *mdio;
1847 1903
1848 /* This is no slave child node, continue */ 1904 /* This is no slave child node, continue */
1849 if (strcmp(slave_node->name, "slave")) 1905 if (strcmp(slave_node->name, "slave"))
1850 continue; 1906 continue;
1851 1907
1852 parp = of_get_property(slave_node, "phy_id", &lenp); 1908 parp = of_get_property(slave_node, "phy_id", &lenp);
1853 if ((parp == NULL) || (lenp != (sizeof(void *) * 2))) { 1909 if ((parp == NULL) || (lenp != (sizeof(void *) * 2))) {
1854 pr_err("Missing slave[%d] phy_id property\n", i); 1910 pr_err("Missing slave[%d] phy_id property\n", i);
1855 return -EINVAL; 1911 return -EINVAL;
1856 } 1912 }
1857 mdio_node = of_find_node_by_phandle(be32_to_cpup(parp)); 1913 mdio_node = of_find_node_by_phandle(be32_to_cpup(parp));
1858 phyid = be32_to_cpup(parp+1); 1914 phyid = be32_to_cpup(parp+1);
1859 mdio = of_find_device_by_node(mdio_node); 1915 mdio = of_find_device_by_node(mdio_node);
1860 snprintf(slave_data->phy_id, sizeof(slave_data->phy_id), 1916 snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
1861 PHY_ID_FMT, mdio->name, phyid); 1917 PHY_ID_FMT, mdio->name, phyid);
1862 1918
1863 mac_addr = of_get_mac_address(slave_node); 1919 mac_addr = of_get_mac_address(slave_node);
1864 if (mac_addr) 1920 if (mac_addr)
1865 memcpy(slave_data->mac_addr, mac_addr, ETH_ALEN); 1921 memcpy(slave_data->mac_addr, mac_addr, ETH_ALEN);
1866 1922
1867 slave_data->phy_if = of_get_phy_mode(slave_node); 1923 slave_data->phy_if = of_get_phy_mode(slave_node);
1868 1924
1869 if (data->dual_emac) { 1925 if (data->dual_emac) {
1870 if (of_property_read_u32(slave_node, "dual_emac_res_vlan", 1926 if (of_property_read_u32(slave_node, "dual_emac_res_vlan",
1871 &prop)) { 1927 &prop)) {
1872 pr_err("Missing dual_emac_res_vlan in DT.\n"); 1928 pr_err("Missing dual_emac_res_vlan in DT.\n");
1873 slave_data->dual_emac_res_vlan = i+1; 1929 slave_data->dual_emac_res_vlan = i+1;
1874 pr_err("Using %d as Reserved VLAN for %d slave\n", 1930 pr_err("Using %d as Reserved VLAN for %d slave\n",
1875 slave_data->dual_emac_res_vlan, i); 1931 slave_data->dual_emac_res_vlan, i);
1876 } else { 1932 } else {
1877 slave_data->dual_emac_res_vlan = prop; 1933 slave_data->dual_emac_res_vlan = prop;
1878 } 1934 }
1879 } 1935 }
1880 1936
1881 i++; 1937 i++;
1882 if (i == data->slaves) 1938 if (i == data->slaves)
1883 break; 1939 break;
1884 } 1940 }
1885 1941
1886 return 0; 1942 return 0;
1887 } 1943 }
1888 1944
1889 static int cpsw_probe_dual_emac(struct platform_device *pdev, 1945 static int cpsw_probe_dual_emac(struct platform_device *pdev,
1890 struct cpsw_priv *priv) 1946 struct cpsw_priv *priv)
1891 { 1947 {
1892 struct cpsw_platform_data *data = &priv->data; 1948 struct cpsw_platform_data *data = &priv->data;
1893 struct net_device *ndev; 1949 struct net_device *ndev;
1894 struct cpsw_priv *priv_sl2; 1950 struct cpsw_priv *priv_sl2;
1895 int ret = 0, i; 1951 int ret = 0, i;
1896 1952
1897 ndev = alloc_etherdev(sizeof(struct cpsw_priv)); 1953 ndev = alloc_etherdev(sizeof(struct cpsw_priv));
1898 if (!ndev) { 1954 if (!ndev) {
1899 pr_err("cpsw: error allocating net_device\n"); 1955 pr_err("cpsw: error allocating net_device\n");
1900 return -ENOMEM; 1956 return -ENOMEM;
1901 } 1957 }
1902 1958
1903 priv_sl2 = netdev_priv(ndev); 1959 priv_sl2 = netdev_priv(ndev);
1904 spin_lock_init(&priv_sl2->lock); 1960 spin_lock_init(&priv_sl2->lock);
1905 priv_sl2->data = *data; 1961 priv_sl2->data = *data;
1906 priv_sl2->pdev = pdev; 1962 priv_sl2->pdev = pdev;
1907 priv_sl2->ndev = ndev; 1963 priv_sl2->ndev = ndev;
1908 priv_sl2->dev = &ndev->dev; 1964 priv_sl2->dev = &ndev->dev;
1909 priv_sl2->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG); 1965 priv_sl2->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG);
1910 priv_sl2->rx_packet_max = max(rx_packet_max, 128); 1966 priv_sl2->rx_packet_max = max(rx_packet_max, 128);
1911 1967
1912 if (is_valid_ether_addr(data->slave_data[1].mac_addr)) { 1968 if (is_valid_ether_addr(data->slave_data[1].mac_addr)) {
1913 memcpy(priv_sl2->mac_addr, data->slave_data[1].mac_addr, 1969 memcpy(priv_sl2->mac_addr, data->slave_data[1].mac_addr,
1914 ETH_ALEN); 1970 ETH_ALEN);
1915 pr_info("cpsw: Detected MACID = %pM\n", priv_sl2->mac_addr); 1971 pr_info("cpsw: Detected MACID = %pM\n", priv_sl2->mac_addr);
1916 } else { 1972 } else {
1917 random_ether_addr(priv_sl2->mac_addr); 1973 random_ether_addr(priv_sl2->mac_addr);
1918 pr_info("cpsw: Random MACID = %pM\n", priv_sl2->mac_addr); 1974 pr_info("cpsw: Random MACID = %pM\n", priv_sl2->mac_addr);
1919 } 1975 }
1920 memcpy(ndev->dev_addr, priv_sl2->mac_addr, ETH_ALEN); 1976 memcpy(ndev->dev_addr, priv_sl2->mac_addr, ETH_ALEN);
1921 1977
1922 priv_sl2->slaves = priv->slaves; 1978 priv_sl2->slaves = priv->slaves;
1923 priv_sl2->clk = priv->clk; 1979 priv_sl2->clk = priv->clk;
1924 1980
1925 priv_sl2->coal_intvl = 0; 1981 priv_sl2->coal_intvl = 0;
1926 priv_sl2->bus_freq_mhz = priv->bus_freq_mhz; 1982 priv_sl2->bus_freq_mhz = priv->bus_freq_mhz;
1927 1983
1928 priv_sl2->regs = priv->regs; 1984 priv_sl2->regs = priv->regs;
1929 priv_sl2->host_port = priv->host_port; 1985 priv_sl2->host_port = priv->host_port;
1930 priv_sl2->host_port_regs = priv->host_port_regs; 1986 priv_sl2->host_port_regs = priv->host_port_regs;
1931 priv_sl2->wr_regs = priv->wr_regs; 1987 priv_sl2->wr_regs = priv->wr_regs;
1932 priv_sl2->hw_stats = priv->hw_stats; 1988 priv_sl2->hw_stats = priv->hw_stats;
1933 priv_sl2->dma = priv->dma; 1989 priv_sl2->dma = priv->dma;
1934 priv_sl2->txch = priv->txch; 1990 priv_sl2->txch = priv->txch;
1935 priv_sl2->rxch = priv->rxch; 1991 priv_sl2->rxch = priv->rxch;
1936 priv_sl2->ale = priv->ale; 1992 priv_sl2->ale = priv->ale;
1937 priv_sl2->emac_port = 1; 1993 priv_sl2->emac_port = 1;
1938 priv->slaves[1].ndev = ndev; 1994 priv->slaves[1].ndev = ndev;
1939 priv_sl2->cpts = priv->cpts; 1995 priv_sl2->cpts = priv->cpts;
1940 priv_sl2->version = priv->version; 1996 priv_sl2->version = priv->version;
1941 1997
1942 for (i = 0; i < priv->num_irqs; i++) { 1998 for (i = 0; i < priv->num_irqs; i++) {
1943 priv_sl2->irqs_table[i] = priv->irqs_table[i]; 1999 priv_sl2->irqs_table[i] = priv->irqs_table[i];
1944 priv_sl2->num_irqs = priv->num_irqs; 2000 priv_sl2->num_irqs = priv->num_irqs;
1945 } 2001 }
1946 ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; 2002 ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
1947 2003
1948 ndev->netdev_ops = &cpsw_netdev_ops; 2004 ndev->netdev_ops = &cpsw_netdev_ops;
1949 SET_ETHTOOL_OPS(ndev, &cpsw_ethtool_ops); 2005 SET_ETHTOOL_OPS(ndev, &cpsw_ethtool_ops);
1950 netif_napi_add(ndev, &priv_sl2->napi, cpsw_poll, CPSW_POLL_WEIGHT); 2006 netif_napi_add(ndev, &priv_sl2->napi, cpsw_poll, CPSW_POLL_WEIGHT);
2007 netif_napi_add(ndev, &priv_sl2->napi_tx, cpsw_tx_poll,
2008 CPSW_POLL_WEIGHT);
1951 2009
1952 /* register the network device */ 2010 /* register the network device */
1953 SET_NETDEV_DEV(ndev, &pdev->dev); 2011 SET_NETDEV_DEV(ndev, &pdev->dev);
1954 ret = register_netdev(ndev); 2012 ret = register_netdev(ndev);
1955 if (ret) { 2013 if (ret) {
1956 pr_err("cpsw: error registering net device\n"); 2014 pr_err("cpsw: error registering net device\n");
1957 free_netdev(ndev); 2015 free_netdev(ndev);
1958 ret = -ENODEV; 2016 ret = -ENODEV;
1959 } 2017 }
1960 2018
1961 return ret; 2019 return ret;
1962 } 2020 }
1963 2021
1964 static int cpsw_probe(struct platform_device *pdev) 2022 static int cpsw_probe(struct platform_device *pdev)
1965 { 2023 {
1966 struct cpsw_platform_data *data; 2024 struct cpsw_platform_data *data;
1967 struct net_device *ndev; 2025 struct net_device *ndev;
1968 struct cpsw_priv *priv; 2026 struct cpsw_priv *priv;
1969 struct cpdma_params dma_params; 2027 struct cpdma_params dma_params;
1970 struct cpsw_ale_params ale_params; 2028 struct cpsw_ale_params ale_params;
1971 void __iomem *ss_regs; 2029 void __iomem *ss_regs;
1972 struct resource *res, *ss_res; 2030 struct resource *res, *ss_res;
1973 u32 slave_offset, sliver_offset, slave_size; 2031 u32 slave_offset, sliver_offset, slave_size;
1974 int ret = 0, i, k = 0; 2032 int ret = 0, i, j = 0, k = 0;
1975 2033
1976 ndev = alloc_etherdev(sizeof(struct cpsw_priv)); 2034 ndev = alloc_etherdev(sizeof(struct cpsw_priv));
1977 if (!ndev) { 2035 if (!ndev) {
1978 pr_err("error allocating net_device\n"); 2036 pr_err("error allocating net_device\n");
1979 return -ENOMEM; 2037 return -ENOMEM;
1980 } 2038 }
1981 2039
1982 platform_set_drvdata(pdev, ndev); 2040 platform_set_drvdata(pdev, ndev);
1983 priv = netdev_priv(ndev); 2041 priv = netdev_priv(ndev);
1984 spin_lock_init(&priv->lock); 2042 spin_lock_init(&priv->lock);
1985 priv->pdev = pdev; 2043 priv->pdev = pdev;
1986 priv->ndev = ndev; 2044 priv->ndev = ndev;
1987 priv->dev = &ndev->dev; 2045 priv->dev = &ndev->dev;
1988 priv->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG); 2046 priv->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG);
1989 priv->rx_packet_max = max(rx_packet_max, 128); 2047 priv->rx_packet_max = max(rx_packet_max, 128);
1990 priv->cpts = devm_kzalloc(&pdev->dev, sizeof(struct cpts), GFP_KERNEL); 2048 priv->cpts = devm_kzalloc(&pdev->dev, sizeof(struct cpts), GFP_KERNEL);
1991 priv->irq_enabled = true; 2049 priv->irq_enabled = true;
2050 priv->irq_tx_enabled = true;
1992 if (!priv->cpts) { 2051 if (!priv->cpts) {
1993 pr_err("error allocating cpts\n"); 2052 pr_err("error allocating cpts\n");
1994 goto clean_ndev_ret; 2053 goto clean_ndev_ret;
1995 } 2054 }
1996 2055
1997 /* 2056 /*
1998 * This may be required here for child devices. 2057 * This may be required here for child devices.
1999 */ 2058 */
2000 pm_runtime_enable(&pdev->dev); 2059 pm_runtime_enable(&pdev->dev);
2001 2060
2002 /* Select default pin state */ 2061 /* Select default pin state */
2003 pinctrl_pm_select_default_state(&pdev->dev); 2062 pinctrl_pm_select_default_state(&pdev->dev);
2004 2063
2005 if (cpsw_probe_dt(&priv->data, pdev)) { 2064 if (cpsw_probe_dt(&priv->data, pdev)) {
2006 pr_err("cpsw: platform data missing\n"); 2065 pr_err("cpsw: platform data missing\n");
2007 ret = -ENODEV; 2066 ret = -ENODEV;
2008 goto clean_runtime_disable_ret; 2067 goto clean_runtime_disable_ret;
2009 } 2068 }
2010 data = &priv->data; 2069 data = &priv->data;
2011 2070
2012 if (is_valid_ether_addr(data->slave_data[0].mac_addr)) { 2071 if (is_valid_ether_addr(data->slave_data[0].mac_addr)) {
2013 memcpy(priv->mac_addr, data->slave_data[0].mac_addr, ETH_ALEN); 2072 memcpy(priv->mac_addr, data->slave_data[0].mac_addr, ETH_ALEN);
2014 pr_info("Detected MACID = %pM\n", priv->mac_addr); 2073 pr_info("Detected MACID = %pM\n", priv->mac_addr);
2015 } else { 2074 } else {
2016 eth_random_addr(priv->mac_addr); 2075 eth_random_addr(priv->mac_addr);
2017 pr_info("Random MACID = %pM\n", priv->mac_addr); 2076 pr_info("Random MACID = %pM\n", priv->mac_addr);
2018 } 2077 }
2019 2078
2020 memcpy(ndev->dev_addr, priv->mac_addr, ETH_ALEN); 2079 memcpy(ndev->dev_addr, priv->mac_addr, ETH_ALEN);
2021 2080
2022 priv->slaves = devm_kzalloc(&pdev->dev, 2081 priv->slaves = devm_kzalloc(&pdev->dev,
2023 sizeof(struct cpsw_slave) * data->slaves, 2082 sizeof(struct cpsw_slave) * data->slaves,
2024 GFP_KERNEL); 2083 GFP_KERNEL);
2025 if (!priv->slaves) { 2084 if (!priv->slaves) {
2026 ret = -ENOMEM; 2085 ret = -ENOMEM;
2027 goto clean_runtime_disable_ret; 2086 goto clean_runtime_disable_ret;
2028 } 2087 }
2029 for (i = 0; i < data->slaves; i++) 2088 for (i = 0; i < data->slaves; i++)
2030 priv->slaves[i].slave_num = i; 2089 priv->slaves[i].slave_num = i;
2031 2090
2032 priv->slaves[0].ndev = ndev; 2091 priv->slaves[0].ndev = ndev;
2033 priv->emac_port = 0; 2092 priv->emac_port = 0;
2034 2093
2035 priv->clk = devm_clk_get(&pdev->dev, "fck"); 2094 priv->clk = devm_clk_get(&pdev->dev, "fck");
2036 if (IS_ERR(priv->clk)) { 2095 if (IS_ERR(priv->clk)) {
2037 dev_err(priv->dev, "fck is not found\n"); 2096 dev_err(priv->dev, "fck is not found\n");
2038 ret = -ENODEV; 2097 ret = -ENODEV;
2039 goto clean_runtime_disable_ret; 2098 goto clean_runtime_disable_ret;
2040 } 2099 }
2041 priv->coal_intvl = 0; 2100 priv->coal_intvl = 0;
2042 priv->bus_freq_mhz = clk_get_rate(priv->clk) / 1000000; 2101 priv->bus_freq_mhz = clk_get_rate(priv->clk) / 1000000;
2043 2102
2044 ss_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2103 ss_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2045 ss_regs = devm_ioremap_resource(&pdev->dev, ss_res); 2104 ss_regs = devm_ioremap_resource(&pdev->dev, ss_res);
2046 if (IS_ERR(ss_regs)) { 2105 if (IS_ERR(ss_regs)) {
2047 ret = PTR_ERR(ss_regs); 2106 ret = PTR_ERR(ss_regs);
2048 goto clean_runtime_disable_ret; 2107 goto clean_runtime_disable_ret;
2049 } 2108 }
2050 priv->regs = ss_regs; 2109 priv->regs = ss_regs;
2051 priv->host_port = HOST_PORT_NUM; 2110 priv->host_port = HOST_PORT_NUM;
2052 2111
2053 /* Need to enable clocks with runtime PM api to access module 2112 /* Need to enable clocks with runtime PM api to access module
2054 * registers 2113 * registers
2055 */ 2114 */
2056 pm_runtime_get_sync(&pdev->dev); 2115 pm_runtime_get_sync(&pdev->dev);
2057 priv->version = readl(&priv->regs->id_ver); 2116 priv->version = readl(&priv->regs->id_ver);
2058 pm_runtime_put_sync(&pdev->dev); 2117 pm_runtime_put_sync(&pdev->dev);
2059 2118
2060 res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 2119 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
2061 priv->wr_regs = devm_ioremap_resource(&pdev->dev, res); 2120 priv->wr_regs = devm_ioremap_resource(&pdev->dev, res);
2062 if (IS_ERR(priv->wr_regs)) { 2121 if (IS_ERR(priv->wr_regs)) {
2063 ret = PTR_ERR(priv->wr_regs); 2122 ret = PTR_ERR(priv->wr_regs);
2064 goto clean_runtime_disable_ret; 2123 goto clean_runtime_disable_ret;
2065 } 2124 }
2066 2125
2067 memset(&dma_params, 0, sizeof(dma_params)); 2126 memset(&dma_params, 0, sizeof(dma_params));
2068 memset(&ale_params, 0, sizeof(ale_params)); 2127 memset(&ale_params, 0, sizeof(ale_params));
2069 2128
2070 switch (priv->version) { 2129 switch (priv->version) {
2071 case CPSW_VERSION_1: 2130 case CPSW_VERSION_1:
2072 priv->host_port_regs = ss_regs + CPSW1_HOST_PORT_OFFSET; 2131 priv->host_port_regs = ss_regs + CPSW1_HOST_PORT_OFFSET;
2073 priv->cpts->reg = ss_regs + CPSW1_CPTS_OFFSET; 2132 priv->cpts->reg = ss_regs + CPSW1_CPTS_OFFSET;
2074 priv->hw_stats = ss_regs + CPSW1_HW_STATS; 2133 priv->hw_stats = ss_regs + CPSW1_HW_STATS;
2075 dma_params.dmaregs = ss_regs + CPSW1_CPDMA_OFFSET; 2134 dma_params.dmaregs = ss_regs + CPSW1_CPDMA_OFFSET;
2076 dma_params.txhdp = ss_regs + CPSW1_STATERAM_OFFSET; 2135 dma_params.txhdp = ss_regs + CPSW1_STATERAM_OFFSET;
2077 ale_params.ale_regs = ss_regs + CPSW1_ALE_OFFSET; 2136 ale_params.ale_regs = ss_regs + CPSW1_ALE_OFFSET;
2078 slave_offset = CPSW1_SLAVE_OFFSET; 2137 slave_offset = CPSW1_SLAVE_OFFSET;
2079 slave_size = CPSW1_SLAVE_SIZE; 2138 slave_size = CPSW1_SLAVE_SIZE;
2080 sliver_offset = CPSW1_SLIVER_OFFSET; 2139 sliver_offset = CPSW1_SLIVER_OFFSET;
2081 dma_params.desc_mem_phys = 0; 2140 dma_params.desc_mem_phys = 0;
2082 break; 2141 break;
2083 case CPSW_VERSION_2: 2142 case CPSW_VERSION_2:
2084 case CPSW_VERSION_3: 2143 case CPSW_VERSION_3:
2085 case CPSW_VERSION_4: 2144 case CPSW_VERSION_4:
2086 priv->host_port_regs = ss_regs + CPSW2_HOST_PORT_OFFSET; 2145 priv->host_port_regs = ss_regs + CPSW2_HOST_PORT_OFFSET;
2087 priv->cpts->reg = ss_regs + CPSW2_CPTS_OFFSET; 2146 priv->cpts->reg = ss_regs + CPSW2_CPTS_OFFSET;
2088 priv->hw_stats = ss_regs + CPSW2_HW_STATS; 2147 priv->hw_stats = ss_regs + CPSW2_HW_STATS;
2089 dma_params.dmaregs = ss_regs + CPSW2_CPDMA_OFFSET; 2148 dma_params.dmaregs = ss_regs + CPSW2_CPDMA_OFFSET;
2090 dma_params.txhdp = ss_regs + CPSW2_STATERAM_OFFSET; 2149 dma_params.txhdp = ss_regs + CPSW2_STATERAM_OFFSET;
2091 ale_params.ale_regs = ss_regs + CPSW2_ALE_OFFSET; 2150 ale_params.ale_regs = ss_regs + CPSW2_ALE_OFFSET;
2092 slave_offset = CPSW2_SLAVE_OFFSET; 2151 slave_offset = CPSW2_SLAVE_OFFSET;
2093 slave_size = CPSW2_SLAVE_SIZE; 2152 slave_size = CPSW2_SLAVE_SIZE;
2094 sliver_offset = CPSW2_SLIVER_OFFSET; 2153 sliver_offset = CPSW2_SLIVER_OFFSET;
2095 dma_params.desc_mem_phys = 2154 dma_params.desc_mem_phys =
2096 (u32 __force) ss_res->start + CPSW2_BD_OFFSET; 2155 (u32 __force) ss_res->start + CPSW2_BD_OFFSET;
2097 break; 2156 break;
2098 default: 2157 default:
2099 dev_err(priv->dev, "unknown version 0x%08x\n", priv->version); 2158 dev_err(priv->dev, "unknown version 0x%08x\n", priv->version);
2100 ret = -ENODEV; 2159 ret = -ENODEV;
2101 goto clean_runtime_disable_ret; 2160 goto clean_runtime_disable_ret;
2102 } 2161 }
2103 for (i = 0; i < priv->data.slaves; i++) { 2162 for (i = 0; i < priv->data.slaves; i++) {
2104 struct cpsw_slave *slave = &priv->slaves[i]; 2163 struct cpsw_slave *slave = &priv->slaves[i];
2105 cpsw_slave_init(slave, priv, slave_offset, sliver_offset); 2164 cpsw_slave_init(slave, priv, slave_offset, sliver_offset);
2106 slave_offset += slave_size; 2165 slave_offset += slave_size;
2107 sliver_offset += SLIVER_SIZE; 2166 sliver_offset += SLIVER_SIZE;
2108 } 2167 }
2109 2168
2110 dma_params.dev = &pdev->dev; 2169 dma_params.dev = &pdev->dev;
2111 dma_params.rxthresh = dma_params.dmaregs + CPDMA_RXTHRESH; 2170 dma_params.rxthresh = dma_params.dmaregs + CPDMA_RXTHRESH;
2112 dma_params.rxfree = dma_params.dmaregs + CPDMA_RXFREE; 2171 dma_params.rxfree = dma_params.dmaregs + CPDMA_RXFREE;
2113 dma_params.rxhdp = dma_params.txhdp + CPDMA_RXHDP; 2172 dma_params.rxhdp = dma_params.txhdp + CPDMA_RXHDP;
2114 dma_params.txcp = dma_params.txhdp + CPDMA_TXCP; 2173 dma_params.txcp = dma_params.txhdp + CPDMA_TXCP;
2115 dma_params.rxcp = dma_params.txhdp + CPDMA_RXCP; 2174 dma_params.rxcp = dma_params.txhdp + CPDMA_RXCP;
2116 2175
2117 dma_params.num_chan = data->channels; 2176 dma_params.num_chan = data->channels;
2118 dma_params.has_soft_reset = true; 2177 dma_params.has_soft_reset = true;
2119 dma_params.min_packet_size = CPSW_MIN_PACKET_SIZE; 2178 dma_params.min_packet_size = CPSW_MIN_PACKET_SIZE;
2120 dma_params.desc_mem_size = data->bd_ram_size; 2179 dma_params.desc_mem_size = data->bd_ram_size;
2121 dma_params.desc_align = 16; 2180 dma_params.desc_align = 16;
2122 dma_params.has_ext_regs = true; 2181 dma_params.has_ext_regs = true;
2123 dma_params.desc_hw_addr = dma_params.desc_mem_phys; 2182 dma_params.desc_hw_addr = dma_params.desc_mem_phys;
2124 2183
2125 priv->dma = cpdma_ctlr_create(&dma_params); 2184 priv->dma = cpdma_ctlr_create(&dma_params);
2126 if (!priv->dma) { 2185 if (!priv->dma) {
2127 dev_err(priv->dev, "error initializing dma\n"); 2186 dev_err(priv->dev, "error initializing dma\n");
2128 ret = -ENOMEM; 2187 ret = -ENOMEM;
2129 goto clean_runtime_disable_ret; 2188 goto clean_runtime_disable_ret;
2130 } 2189 }
2131 2190
2132 priv->txch = cpdma_chan_create(priv->dma, tx_chan_num(0), 2191 priv->txch = cpdma_chan_create(priv->dma, tx_chan_num(0),
2133 cpsw_tx_handler); 2192 cpsw_tx_handler);
2134 priv->rxch = cpdma_chan_create(priv->dma, rx_chan_num(0), 2193 priv->rxch = cpdma_chan_create(priv->dma, rx_chan_num(0),
2135 cpsw_rx_handler); 2194 cpsw_rx_handler);
2136 2195
2137 if (WARN_ON(!priv->txch || !priv->rxch)) { 2196 if (WARN_ON(!priv->txch || !priv->rxch)) {
2138 dev_err(priv->dev, "error initializing dma channels\n"); 2197 dev_err(priv->dev, "error initializing dma channels\n");
2139 ret = -ENOMEM; 2198 ret = -ENOMEM;
2140 goto clean_dma_ret; 2199 goto clean_dma_ret;
2141 } 2200 }
2142 2201
2143 ale_params.dev = &ndev->dev; 2202 ale_params.dev = &ndev->dev;
2144 ale_params.ale_ageout = ale_ageout; 2203 ale_params.ale_ageout = ale_ageout;
2145 ale_params.ale_entries = data->ale_entries; 2204 ale_params.ale_entries = data->ale_entries;
2146 ale_params.ale_ports = data->slaves; 2205 ale_params.ale_ports = data->slaves;
2147 2206
2148 priv->ale = cpsw_ale_create(&ale_params); 2207 priv->ale = cpsw_ale_create(&ale_params);
2149 if (!priv->ale) { 2208 if (!priv->ale) {
2150 dev_err(priv->dev, "error initializing ale engine\n"); 2209 dev_err(priv->dev, "error initializing ale engine\n");
2151 ret = -ENODEV; 2210 ret = -ENODEV;
2152 goto clean_dma_ret; 2211 goto clean_dma_ret;
2153 } 2212 }
2154 2213
2155 ndev->irq = platform_get_irq(pdev, 0); 2214 ndev->irq = platform_get_irq(pdev, 0);
2156 if (ndev->irq < 0) { 2215 if (ndev->irq < 0) {
2157 dev_err(priv->dev, "error getting irq resource\n"); 2216 dev_err(priv->dev, "error getting irq resource\n");
2158 ret = -ENOENT; 2217 ret = -ENOENT;
2159 goto clean_ale_ret; 2218 goto clean_ale_ret;
2160 } 2219 }
2161 2220
2162 ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; 2221 ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
2163 2222
2164 ndev->netdev_ops = &cpsw_netdev_ops; 2223 ndev->netdev_ops = &cpsw_netdev_ops;
2165 SET_ETHTOOL_OPS(ndev, &cpsw_ethtool_ops); 2224 SET_ETHTOOL_OPS(ndev, &cpsw_ethtool_ops);
2166 netif_napi_add(ndev, &priv->napi, cpsw_poll, CPSW_POLL_WEIGHT); 2225 netif_napi_add(ndev, &priv->napi, cpsw_poll, CPSW_POLL_WEIGHT);
2226 netif_napi_add(ndev, &priv->napi_tx, cpsw_tx_poll, CPSW_POLL_WEIGHT);
2167 2227
2168 /* register the network device */ 2228 /* register the network device */
2169 SET_NETDEV_DEV(ndev, &pdev->dev); 2229 SET_NETDEV_DEV(ndev, &pdev->dev);
2170 ret = register_netdev(ndev); 2230 ret = register_netdev(ndev);
2171 if (ret) { 2231 if (ret) {
2172 dev_err(priv->dev, "error registering net device\n"); 2232 dev_err(priv->dev, "error registering net device\n");
2173 ret = -ENODEV; 2233 ret = -ENODEV;
2174 goto clean_ale_ret; 2234 goto clean_ale_ret;
2175 } 2235 }
2176 2236
2177 while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, k))) { 2237 while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, k))) {
2178 for (i = res->start; i <= res->end; i++) { 2238 for (i = res->start; i <= res->end; i++, j++) {
2179 if (devm_request_irq(&pdev->dev, i, cpsw_interrupt, 0, 2239 if (j == 2)
2180 dev_name(priv->dev), priv)) { 2240 ret = devm_request_irq(&pdev->dev, i,
2241 cpsw_tx_interrupt, 0,
2242 "eth-tx", priv);
2243 else
2244 ret = devm_request_irq(&pdev->dev, i,
2245 cpsw_interrupt, 0,
2246 dev_name(priv->dev), priv);
2247 if (ret) {
2181 dev_err(priv->dev, "error attaching irq\n"); 2248 dev_err(priv->dev, "error attaching irq\n");
2182 goto clean_ale_ret; 2249 goto clean_ale_ret;
2183 } 2250 }
2184 priv->irqs_table[k] = i; 2251 priv->irqs_table[k] = i;
2185 priv->num_irqs = k + 1; 2252 priv->num_irqs = k + 1;
2186 } 2253 }
2187 k++; 2254 k++;
2188 } 2255 }
2189 2256
2190 cpsw_notice(priv, probe, "initialized device (regs %x, irq %d)\n", 2257 cpsw_notice(priv, probe, "initialized device (regs %x, irq %d)\n",
2191 ss_res->start, ndev->irq); 2258 ss_res->start, ndev->irq);
2192 2259
2193 if (priv->data.dual_emac) { 2260 if (priv->data.dual_emac) {
2194 ret = cpsw_probe_dual_emac(pdev, priv); 2261 ret = cpsw_probe_dual_emac(pdev, priv);
2195 if (ret) { 2262 if (ret) {
2196 cpsw_err(priv, probe, "error probe slave 2 emac interface\n"); 2263 cpsw_err(priv, probe, "error probe slave 2 emac interface\n");
2197 goto clean_ale_ret; 2264 goto clean_ale_ret;
2198 } 2265 }
2199 } 2266 }
2200 2267
2201 return 0; 2268 return 0;
2202 2269
2203 clean_ale_ret: 2270 clean_ale_ret:
2204 cpsw_ale_destroy(priv->ale); 2271 cpsw_ale_destroy(priv->ale);
2205 clean_dma_ret: 2272 clean_dma_ret:
2206 cpdma_chan_destroy(priv->txch); 2273 cpdma_chan_destroy(priv->txch);
2207 cpdma_chan_destroy(priv->rxch); 2274 cpdma_chan_destroy(priv->rxch);
2208 cpdma_ctlr_destroy(priv->dma); 2275 cpdma_ctlr_destroy(priv->dma);
2209 clean_runtime_disable_ret: 2276 clean_runtime_disable_ret:
2210 pm_runtime_disable(&pdev->dev); 2277 pm_runtime_disable(&pdev->dev);
2211 clean_ndev_ret: 2278 clean_ndev_ret:
2212 free_netdev(priv->ndev); 2279 free_netdev(priv->ndev);
2213 return ret; 2280 return ret;
2214 } 2281 }
2215 2282
2216 static int cpsw_remove(struct platform_device *pdev) 2283 static int cpsw_remove(struct platform_device *pdev)
2217 { 2284 {
2218 struct net_device *ndev = platform_get_drvdata(pdev); 2285 struct net_device *ndev = platform_get_drvdata(pdev);
2219 struct cpsw_priv *priv = netdev_priv(ndev); 2286 struct cpsw_priv *priv = netdev_priv(ndev);
2220 2287
2221 if (priv->data.dual_emac) 2288 if (priv->data.dual_emac)
2222 unregister_netdev(cpsw_get_slave_ndev(priv, 1)); 2289 unregister_netdev(cpsw_get_slave_ndev(priv, 1));
2223 unregister_netdev(ndev); 2290 unregister_netdev(ndev);
2224 2291
2225 cpsw_ale_destroy(priv->ale); 2292 cpsw_ale_destroy(priv->ale);
2226 cpdma_chan_destroy(priv->txch); 2293 cpdma_chan_destroy(priv->txch);
2227 cpdma_chan_destroy(priv->rxch); 2294 cpdma_chan_destroy(priv->rxch);
2228 cpdma_ctlr_destroy(priv->dma); 2295 cpdma_ctlr_destroy(priv->dma);
2229 pm_runtime_disable(&pdev->dev); 2296 pm_runtime_disable(&pdev->dev);
2230 if (priv->data.dual_emac) 2297 if (priv->data.dual_emac)
2231 free_netdev(cpsw_get_slave_ndev(priv, 1)); 2298 free_netdev(cpsw_get_slave_ndev(priv, 1));
2232 free_netdev(ndev); 2299 free_netdev(ndev);
2233 return 0; 2300 return 0;
2234 } 2301 }
2235 2302
2236 static int cpsw_suspend(struct device *dev) 2303 static int cpsw_suspend(struct device *dev)
2237 { 2304 {
2238 struct platform_device *pdev = to_platform_device(dev); 2305 struct platform_device *pdev = to_platform_device(dev);
2239 struct net_device *ndev = platform_get_drvdata(pdev); 2306 struct net_device *ndev = platform_get_drvdata(pdev);
2240 struct cpsw_priv *priv = netdev_priv(ndev); 2307 struct cpsw_priv *priv = netdev_priv(ndev);
2241 2308
2242 if (netif_running(ndev)) 2309 if (netif_running(ndev))
2243 cpsw_ndo_stop(ndev); 2310 cpsw_ndo_stop(ndev);
2244 2311
2245 for_each_slave(priv, soft_reset_slave); 2312 for_each_slave(priv, soft_reset_slave);
2246 2313
2247 pm_runtime_put_sync(&pdev->dev); 2314 pm_runtime_put_sync(&pdev->dev);
2248 2315
2249 /* Select sleep pin state */ 2316 /* Select sleep pin state */
2250 pinctrl_pm_select_sleep_state(&pdev->dev); 2317 pinctrl_pm_select_sleep_state(&pdev->dev);
2251 2318
2252 return 0; 2319 return 0;
2253 } 2320 }
2254 2321
2255 static int cpsw_resume(struct device *dev) 2322 static int cpsw_resume(struct device *dev)
2256 { 2323 {
2257 struct platform_device *pdev = to_platform_device(dev); 2324 struct platform_device *pdev = to_platform_device(dev);
2258 struct net_device *ndev = platform_get_drvdata(pdev); 2325 struct net_device *ndev = platform_get_drvdata(pdev);
2259 2326
2260 pm_runtime_get_sync(&pdev->dev); 2327 pm_runtime_get_sync(&pdev->dev);
2261 2328
2262 /* Select default pin state */ 2329 /* Select default pin state */
2263 pinctrl_pm_select_default_state(&pdev->dev); 2330 pinctrl_pm_select_default_state(&pdev->dev);
2264 2331
2265 if (netif_running(ndev)) 2332 if (netif_running(ndev))
2266 cpsw_ndo_open(ndev); 2333 cpsw_ndo_open(ndev);
2267 return 0; 2334 return 0;
2268 } 2335 }
2269 2336
2270 static const struct dev_pm_ops cpsw_pm_ops = { 2337 static const struct dev_pm_ops cpsw_pm_ops = {
2271 .suspend = cpsw_suspend, 2338 .suspend = cpsw_suspend,
2272 .resume = cpsw_resume, 2339 .resume = cpsw_resume,
2273 }; 2340 };
2274 2341
2275 static const struct of_device_id cpsw_of_mtable[] = { 2342 static const struct of_device_id cpsw_of_mtable[] = {
2276 { .compatible = "ti,cpsw", }, 2343 { .compatible = "ti,cpsw", },
2277 { /* sentinel */ }, 2344 { /* sentinel */ },
2278 }; 2345 };
2279 MODULE_DEVICE_TABLE(of, cpsw_of_mtable); 2346 MODULE_DEVICE_TABLE(of, cpsw_of_mtable);
2280 2347
2281 static struct platform_driver cpsw_driver = { 2348 static struct platform_driver cpsw_driver = {
2282 .driver = { 2349 .driver = {
2283 .name = "cpsw", 2350 .name = "cpsw",
2284 .owner = THIS_MODULE, 2351 .owner = THIS_MODULE,
2285 .pm = &cpsw_pm_ops, 2352 .pm = &cpsw_pm_ops,
2286 .of_match_table = of_match_ptr(cpsw_of_mtable), 2353 .of_match_table = of_match_ptr(cpsw_of_mtable),
2287 }, 2354 },
2288 .probe = cpsw_probe, 2355 .probe = cpsw_probe,
2289 .remove = cpsw_remove, 2356 .remove = cpsw_remove,
2290 }; 2357 };
2291 2358
2292 static int __init cpsw_init(void) 2359 static int __init cpsw_init(void)
2293 { 2360 {
2294 return platform_driver_register(&cpsw_driver); 2361 return platform_driver_register(&cpsw_driver);
2295 } 2362 }
2296 late_initcall(cpsw_init); 2363 late_initcall(cpsw_init);
2297 2364
2298 static void __exit cpsw_exit(void) 2365 static void __exit cpsw_exit(void)
2299 { 2366 {
2300 platform_driver_unregister(&cpsw_driver); 2367 platform_driver_unregister(&cpsw_driver);
2301 } 2368 }
2302 module_exit(cpsw_exit); 2369 module_exit(cpsw_exit);