Commit 565a55c81567b5478d8d7a0f4fe23171ef2e9827

Authored by Chandan Nath
1 parent 12ad41c225
Exists in master

ARM:omap:am33xx: CPSW removes dmtimer re enabling

This patch is added to remove unwanted dmtimer re enabling
code after reseting timer status register. Also, some additional
cleanup is done in this patch.

Signed-off-by: Chandan Nath <chandan.nath@ti.com>

Showing 2 changed files with 17 additions and 35 deletions Inline Diff

1 /* 1 /*
2 * Texas Instruments Ethernet Switch Driver 2 * Texas Instruments Ethernet Switch Driver
3 * 3 *
4 * Copyright (C) 2010 Texas Instruments 4 * Copyright (C) 2010 Texas Instruments
5 * 5 *
6 * This program is free software; you can redistribute it and/or 6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as 7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation version 2. 8 * published by the Free Software Foundation version 2.
9 * 9 *
10 * This program is distributed "as is" WITHOUT ANY WARRANTY of any 10 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
11 * kind, whether express or implied; without even the implied warranty 11 * kind, whether express or implied; without even the implied warranty
12 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details. 13 * GNU General Public License for more details.
14 */ 14 */
15 #include <linux/kernel.h> 15 #include <linux/kernel.h>
16 #include <linux/io.h> 16 #include <linux/io.h>
17 #include <linux/clk.h> 17 #include <linux/clk.h>
18 #include <linux/timer.h> 18 #include <linux/timer.h>
19 #include <linux/module.h> 19 #include <linux/module.h>
20 #include <linux/platform_device.h> 20 #include <linux/platform_device.h>
21 #include <linux/if_ether.h> 21 #include <linux/if_ether.h>
22 #include <linux/etherdevice.h> 22 #include <linux/etherdevice.h>
23 #include <linux/ethtool.h> 23 #include <linux/ethtool.h>
24 #include <linux/netdevice.h> 24 #include <linux/netdevice.h>
25 #include <linux/phy.h> 25 #include <linux/phy.h>
26 #include <linux/workqueue.h> 26 #include <linux/workqueue.h>
27 #include <linux/delay.h> 27 #include <linux/delay.h>
28 #include <linux/interrupt.h> 28 #include <linux/interrupt.h>
29 29
30 #include <linux/cpsw.h> 30 #include <linux/cpsw.h>
31 #include <plat/dmtimer.h> 31 #include <plat/dmtimer.h>
32 #include "cpsw_ale.h" 32 #include "cpsw_ale.h"
33 #include "davinci_cpdma.h" 33 #include "davinci_cpdma.h"
34 34
35 35
36 #define CPSW_DEBUG (NETIF_MSG_HW | NETIF_MSG_WOL | \ 36 #define CPSW_DEBUG (NETIF_MSG_HW | NETIF_MSG_WOL | \
37 NETIF_MSG_DRV | NETIF_MSG_LINK | \ 37 NETIF_MSG_DRV | NETIF_MSG_LINK | \
38 NETIF_MSG_IFUP | NETIF_MSG_INTR | \ 38 NETIF_MSG_IFUP | NETIF_MSG_INTR | \
39 NETIF_MSG_PROBE | NETIF_MSG_TIMER | \ 39 NETIF_MSG_PROBE | NETIF_MSG_TIMER | \
40 NETIF_MSG_IFDOWN | NETIF_MSG_RX_ERR | \ 40 NETIF_MSG_IFDOWN | NETIF_MSG_RX_ERR | \
41 NETIF_MSG_TX_ERR | NETIF_MSG_TX_DONE | \ 41 NETIF_MSG_TX_ERR | NETIF_MSG_TX_DONE | \
42 NETIF_MSG_PKTDATA | NETIF_MSG_TX_QUEUED | \ 42 NETIF_MSG_PKTDATA | NETIF_MSG_TX_QUEUED | \
43 NETIF_MSG_RX_STATUS) 43 NETIF_MSG_RX_STATUS)
44 44
45 #define msg(level, type, format, ...) \ 45 #define msg(level, type, format, ...) \
46 do { \ 46 do { \
47 if (netif_msg_##type(priv) && net_ratelimit()) \ 47 if (netif_msg_##type(priv) && net_ratelimit()) \
48 dev_##level(priv->dev, format, ## __VA_ARGS__); \ 48 dev_##level(priv->dev, format, ## __VA_ARGS__); \
49 } while (0) 49 } while (0)
50 50
51 #define CPDMA_RXTHRESH 0x0c0 51 #define CPDMA_RXTHRESH 0x0c0
52 #define CPDMA_RXFREE 0x0e0 52 #define CPDMA_RXFREE 0x0e0
53 #define CPDMA_TXHDP_VER1 0x100 53 #define CPDMA_TXHDP_VER1 0x100
54 #define CPDMA_TXHDP_VER2 0x200 54 #define CPDMA_TXHDP_VER2 0x200
55 #define CPDMA_RXHDP_VER1 0x120 55 #define CPDMA_RXHDP_VER1 0x120
56 #define CPDMA_RXHDP_VER2 0x220 56 #define CPDMA_RXHDP_VER2 0x220
57 #define CPDMA_TXCP_VER1 0x140 57 #define CPDMA_TXCP_VER1 0x140
58 #define CPDMA_TXCP_VER2 0x240 58 #define CPDMA_TXCP_VER2 0x240
59 #define CPDMA_RXCP_VER1 0x160 59 #define CPDMA_RXCP_VER1 0x160
60 #define CPDMA_RXCP_VER2 0x260 60 #define CPDMA_RXCP_VER2 0x260
61 61
62 #define CPSW_POLL_WEIGHT 64 62 #define CPSW_POLL_WEIGHT 64
63 #define CPSW_MIN_PACKET_SIZE 60 63 #define CPSW_MIN_PACKET_SIZE 60
64 #define CPSW_MAX_PACKET_SIZE (1500 + 14 + 4 + 4) 64 #define CPSW_MAX_PACKET_SIZE (1500 + 14 + 4 + 4)
65 #define CPSW_PHY_SPEED 1000 65 #define CPSW_PHY_SPEED 1000
66 66
67 /* CPSW control module masks */ 67 /* CPSW control module masks */
68 #define CPSW_INTPACEEN (0x3 << 16) 68 #define CPSW_INTPACEEN (0x3 << 16)
69 #define CPSW_INTPRESCALE_MASK (0x7FF << 0) 69 #define CPSW_INTPRESCALE_MASK (0x7FF << 0)
70 #define CPSW_CMINTMAX_CNT 63 70 #define CPSW_CMINTMAX_CNT 63
71 #define CPSW_CMINTMIN_CNT 2 71 #define CPSW_CMINTMIN_CNT 2
72 #define CPSW_CMINTMAX_INTVL (1000 / CPSW_CMINTMIN_CNT) 72 #define CPSW_CMINTMAX_INTVL (1000 / CPSW_CMINTMIN_CNT)
73 #define CPSW_CMINTMIN_INTVL ((1000 / CPSW_CMINTMAX_CNT) + 1) 73 #define CPSW_CMINTMIN_INTVL ((1000 / CPSW_CMINTMAX_CNT) + 1)
74 74
75 #define CPSW_IRQ_QUIRK 75 #define CPSW_IRQ_QUIRK
76 #ifdef CPSW_IRQ_QUIRK 76 #ifdef CPSW_IRQ_QUIRK
77 #define cpsw_enable_irq(priv) \ 77 #define cpsw_enable_irq(priv) \
78 do { \ 78 do { \
79 u32 i; \ 79 u32 i; \
80 for (i = 0; i < priv->num_irqs; i++) \ 80 for (i = 0; i < priv->num_irqs; i++) \
81 enable_irq(priv->irqs_table[i]); \ 81 enable_irq(priv->irqs_table[i]); \
82 } while (0); 82 } while (0);
83 #define cpsw_disable_irq(priv) \ 83 #define cpsw_disable_irq(priv) \
84 do { \ 84 do { \
85 u32 i; \ 85 u32 i; \
86 for (i = 0; i < priv->num_irqs; i++) \ 86 for (i = 0; i < priv->num_irqs; i++) \
87 disable_irq_nosync(priv->irqs_table[i]); \ 87 disable_irq_nosync(priv->irqs_table[i]); \
88 } while (0); 88 } while (0);
89 #else 89 #else
90 #define cpsw_enable_irq(priv) do { } while (0); 90 #define cpsw_enable_irq(priv) do { } while (0);
91 #define cpsw_disable_irq(priv) do { } while (0); 91 #define cpsw_disable_irq(priv) do { } while (0);
92 #endif 92 #endif
93 93
94 #define CPSW_CPDMA_EOI_REG 0x894 94 #define CPSW_CPDMA_EOI_REG 0x894
95 #define CPSW_TIMER_MASK 0xA0908 95 #define CPSW_TIMER_MASK 0xA0908
96 #define CPSW_TIMER_CAP_REG 0xFD0 96 #define CPSW_TIMER_CAP_REG 0xFD0
97 #define CPSW_RX_TIMER_REQ 5 97 #define CPSW_RX_TIMER_REQ 5
98 #define CPSW_TX_TIMER_REQ 6 98 #define CPSW_TX_TIMER_REQ 6
99 99
100 struct omap_dm_timer *stTimerRx; 100 struct omap_dm_timer *dmtimer_rx;
101 struct omap_dm_timer *stTimerTx; 101 struct omap_dm_timer *dmtimer_tx;
102 102
103 extern u32 omap_ctrl_readl(u16 offset); 103 extern u32 omap_ctrl_readl(u16 offset);
104 extern void omap_ctrl_writel(u32 val, u16 offset); 104 extern void omap_ctrl_writel(u32 val, u16 offset);
105 105
106 static int debug_level; 106 static int debug_level;
107 module_param(debug_level, int, 0); 107 module_param(debug_level, int, 0);
108 MODULE_PARM_DESC(debug_level, "cpsw debug level (NETIF_MSG bits)"); 108 MODULE_PARM_DESC(debug_level, "cpsw debug level (NETIF_MSG bits)");
109 109
110 static int ale_ageout = 10; 110 static int ale_ageout = 10;
111 module_param(ale_ageout, int, 0); 111 module_param(ale_ageout, int, 0);
112 MODULE_PARM_DESC(ale_ageout, "cpsw ale ageout interval (seconds)"); 112 MODULE_PARM_DESC(ale_ageout, "cpsw ale ageout interval (seconds)");
113 113
114 static int rx_packet_max = CPSW_MAX_PACKET_SIZE; 114 static int rx_packet_max = CPSW_MAX_PACKET_SIZE;
115 module_param(rx_packet_max, int, 0); 115 module_param(rx_packet_max, int, 0);
116 MODULE_PARM_DESC(rx_packet_max, "maximum receive packet size (bytes)"); 116 MODULE_PARM_DESC(rx_packet_max, "maximum receive packet size (bytes)");
117 117
118 struct cpsw_ss_regs { 118 struct cpsw_ss_regs {
119 u32 id_ver; 119 u32 id_ver;
120 u32 soft_reset; 120 u32 soft_reset;
121 u32 control; 121 u32 control;
122 u32 int_control; 122 u32 int_control;
123 u32 rx_thresh_en; 123 u32 rx_thresh_en;
124 u32 rx_en; 124 u32 rx_en;
125 u32 tx_en; 125 u32 tx_en;
126 u32 misc_en; 126 u32 misc_en;
127 u32 mem_allign1[8]; 127 u32 mem_allign1[8];
128 u32 rx_thresh_stat; 128 u32 rx_thresh_stat;
129 u32 rx_stat; 129 u32 rx_stat;
130 u32 tx_stat; 130 u32 tx_stat;
131 u32 misc_stat; 131 u32 misc_stat;
132 u32 mem_allign2[8]; 132 u32 mem_allign2[8];
133 u32 rx_imax; 133 u32 rx_imax;
134 u32 tx_imax; 134 u32 tx_imax;
135 }; 135 };
136 136
137 struct cpsw_regs { 137 struct cpsw_regs {
138 u32 id_ver; 138 u32 id_ver;
139 u32 control; 139 u32 control;
140 u32 soft_reset; 140 u32 soft_reset;
141 u32 stat_port_en; 141 u32 stat_port_en;
142 u32 ptype; 142 u32 ptype;
143 }; 143 };
144 144
145 struct cpsw_slave_regs { 145 struct cpsw_slave_regs {
146 u32 max_blks; 146 u32 max_blks;
147 u32 blk_cnt; 147 u32 blk_cnt;
148 u32 flow_thresh; 148 u32 flow_thresh;
149 u32 port_vlan; 149 u32 port_vlan;
150 u32 tx_pri_map; 150 u32 tx_pri_map;
151 u32 ts_seq_mtype; 151 u32 ts_seq_mtype;
152 #ifdef CONFIG_ARCH_TI814X 152 #ifdef CONFIG_ARCH_TI814X
153 u32 ts_ctl; 153 u32 ts_ctl;
154 u32 ts_seq_ltype; 154 u32 ts_seq_ltype;
155 u32 ts_vlan; 155 u32 ts_vlan;
156 #endif 156 #endif
157 u32 sa_lo; 157 u32 sa_lo;
158 u32 sa_hi; 158 u32 sa_hi;
159 }; 159 };
160 160
161 struct cpsw_host_regs { 161 struct cpsw_host_regs {
162 u32 max_blks; 162 u32 max_blks;
163 u32 blk_cnt; 163 u32 blk_cnt;
164 u32 flow_thresh; 164 u32 flow_thresh;
165 u32 port_vlan; 165 u32 port_vlan;
166 u32 tx_pri_map; 166 u32 tx_pri_map;
167 u32 cpdma_tx_pri_map; 167 u32 cpdma_tx_pri_map;
168 u32 cpdma_rx_chan_map; 168 u32 cpdma_rx_chan_map;
169 }; 169 };
170 170
171 struct cpsw_sliver_regs { 171 struct cpsw_sliver_regs {
172 u32 id_ver; 172 u32 id_ver;
173 u32 mac_control; 173 u32 mac_control;
174 u32 mac_status; 174 u32 mac_status;
175 u32 soft_reset; 175 u32 soft_reset;
176 u32 rx_maxlen; 176 u32 rx_maxlen;
177 u32 __reserved_0; 177 u32 __reserved_0;
178 u32 rx_pause; 178 u32 rx_pause;
179 u32 tx_pause; 179 u32 tx_pause;
180 u32 __reserved_1; 180 u32 __reserved_1;
181 u32 rx_pri_map; 181 u32 rx_pri_map;
182 }; 182 };
183 183
184 struct cpsw_hw_stats { 184 struct cpsw_hw_stats {
185 u32 rxgoodframes; 185 u32 rxgoodframes;
186 u32 rxbroadcastframes; 186 u32 rxbroadcastframes;
187 u32 rxmulticastframes; 187 u32 rxmulticastframes;
188 u32 rxpauseframes; 188 u32 rxpauseframes;
189 u32 rxcrcerrors; 189 u32 rxcrcerrors;
190 u32 rxaligncodeerrors; 190 u32 rxaligncodeerrors;
191 u32 rxoversizedframes; 191 u32 rxoversizedframes;
192 u32 rxjabberframes; 192 u32 rxjabberframes;
193 u32 rxundersizedframes; 193 u32 rxundersizedframes;
194 u32 rxfragments; 194 u32 rxfragments;
195 u32 __pad_0[2]; 195 u32 __pad_0[2];
196 u32 rxoctets; 196 u32 rxoctets;
197 u32 txgoodframes; 197 u32 txgoodframes;
198 u32 txbroadcastframes; 198 u32 txbroadcastframes;
199 u32 txmulticastframes; 199 u32 txmulticastframes;
200 u32 txpauseframes; 200 u32 txpauseframes;
201 u32 txdeferredframes; 201 u32 txdeferredframes;
202 u32 txcollisionframes; 202 u32 txcollisionframes;
203 u32 txsinglecollframes; 203 u32 txsinglecollframes;
204 u32 txmultcollframes; 204 u32 txmultcollframes;
205 u32 txexcessivecollisions; 205 u32 txexcessivecollisions;
206 u32 txlatecollisions; 206 u32 txlatecollisions;
207 u32 txunderrun; 207 u32 txunderrun;
208 u32 txcarriersenseerrors; 208 u32 txcarriersenseerrors;
209 u32 txoctets; 209 u32 txoctets;
210 u32 octetframes64; 210 u32 octetframes64;
211 u32 octetframes65t127; 211 u32 octetframes65t127;
212 u32 octetframes128t255; 212 u32 octetframes128t255;
213 u32 octetframes256t511; 213 u32 octetframes256t511;
214 u32 octetframes512t1023; 214 u32 octetframes512t1023;
215 u32 octetframes1024tup; 215 u32 octetframes1024tup;
216 u32 netoctets; 216 u32 netoctets;
217 u32 rxsofoverruns; 217 u32 rxsofoverruns;
218 u32 rxmofoverruns; 218 u32 rxmofoverruns;
219 u32 rxdmaoverruns; 219 u32 rxdmaoverruns;
220 }; 220 };
221 221
222 struct cpsw_slave { 222 struct cpsw_slave {
223 struct cpsw_slave_regs __iomem *regs; 223 struct cpsw_slave_regs __iomem *regs;
224 struct cpsw_sliver_regs __iomem *sliver; 224 struct cpsw_sliver_regs __iomem *sliver;
225 int slave_num; 225 int slave_num;
226 u32 mac_control; 226 u32 mac_control;
227 struct cpsw_slave_data *data; 227 struct cpsw_slave_data *data;
228 struct phy_device *phy; 228 struct phy_device *phy;
229 }; 229 };
230 230
231 struct cpsw_priv { 231 struct cpsw_priv {
232 spinlock_t lock; 232 spinlock_t lock;
233 struct platform_device *pdev; 233 struct platform_device *pdev;
234 struct net_device *ndev; 234 struct net_device *ndev;
235 struct resource *cpsw_res; 235 struct resource *cpsw_res;
236 struct resource *cpsw_ss_res; 236 struct resource *cpsw_ss_res;
237 struct napi_struct napi; 237 struct napi_struct napi;
238 #define napi_to_priv(napi) container_of(napi, struct cpsw_priv, napi) 238 #define napi_to_priv(napi) container_of(napi, struct cpsw_priv, napi)
239 struct device *dev; 239 struct device *dev;
240 struct cpsw_platform_data data; 240 struct cpsw_platform_data data;
241 struct cpsw_regs __iomem *regs; 241 struct cpsw_regs __iomem *regs;
242 struct cpsw_ss_regs __iomem *ss_regs; 242 struct cpsw_ss_regs __iomem *ss_regs;
243 struct cpsw_hw_stats __iomem *hw_stats; 243 struct cpsw_hw_stats __iomem *hw_stats;
244 struct cpsw_host_regs __iomem *host_port_regs; 244 struct cpsw_host_regs __iomem *host_port_regs;
245 u32 msg_enable; 245 u32 msg_enable;
246 u32 coal_intvl; 246 u32 coal_intvl;
247 u32 bus_freq_mhz; 247 u32 bus_freq_mhz;
248 struct net_device_stats stats; 248 struct net_device_stats stats;
249 int rx_packet_max; 249 int rx_packet_max;
250 int host_port; 250 int host_port;
251 struct clk *clk; 251 struct clk *clk;
252 u8 mac_addr[ETH_ALEN]; 252 u8 mac_addr[ETH_ALEN];
253 struct cpsw_slave *slaves; 253 struct cpsw_slave *slaves;
254 #define for_each_slave(priv, func, arg...) \ 254 #define for_each_slave(priv, func, arg...) \
255 do { \ 255 do { \
256 int idx; \ 256 int idx; \
257 for (idx = 0; idx < (priv)->data.slaves; idx++) \ 257 for (idx = 0; idx < (priv)->data.slaves; idx++) \
258 (func)((priv)->slaves + idx, ##arg); \ 258 (func)((priv)->slaves + idx, ##arg); \
259 } while (0) 259 } while (0)
260 260
261 struct cpdma_ctlr *dma; 261 struct cpdma_ctlr *dma;
262 struct cpdma_chan *txch, *rxch; 262 struct cpdma_chan *txch, *rxch;
263 struct cpsw_ale *ale; 263 struct cpsw_ale *ale;
264 264
265 #ifdef CPSW_IRQ_QUIRK 265 #ifdef CPSW_IRQ_QUIRK
266 /* snapshot of IRQ numbers */ 266 /* snapshot of IRQ numbers */
267 u32 irqs_table[4]; 267 u32 irqs_table[4];
268 u32 num_irqs; 268 u32 num_irqs;
269 #endif 269 #endif
270 270
271 }; 271 };
272 272
273 static int cpsw_set_coalesce(struct net_device *ndev, 273 static int cpsw_set_coalesce(struct net_device *ndev,
274 struct ethtool_coalesce *coal); 274 struct ethtool_coalesce *coal);
275 275
276 static void __iomem *cpdma_base;
277
278 static void cpsw_intr_enable(struct cpsw_priv *priv) 276 static void cpsw_intr_enable(struct cpsw_priv *priv)
279 { 277 {
280 __raw_writel(0xFF, &priv->ss_regs->tx_en); 278 __raw_writel(0xFF, &priv->ss_regs->tx_en);
281 __raw_writel(0xFF, &priv->ss_regs->rx_en); 279 __raw_writel(0xFF, &priv->ss_regs->rx_en);
282 280
283 cpdma_ctlr_int_ctrl(priv->dma, true); 281 cpdma_ctlr_int_ctrl(priv->dma, true);
284 return; 282 return;
285 } 283 }
286 284
287 static void cpsw_intr_disable(struct cpsw_priv *priv) 285 static void cpsw_intr_disable(struct cpsw_priv *priv)
288 { 286 {
289 __raw_writel(0, &priv->ss_regs->tx_en); 287 __raw_writel(0, &priv->ss_regs->tx_en);
290 __raw_writel(0, &priv->ss_regs->rx_en); 288 __raw_writel(0, &priv->ss_regs->rx_en);
291 289
292 cpdma_ctlr_int_ctrl(priv->dma, false); 290 cpdma_ctlr_int_ctrl(priv->dma, false);
293 return; 291 return;
294 } 292 }
295 293
296 void cpsw_tx_handler(void *token, int len, int status) 294 void cpsw_tx_handler(void *token, int len, int status)
297 { 295 {
298 struct sk_buff *skb = token; 296 struct sk_buff *skb = token;
299 struct net_device *ndev = skb->dev; 297 struct net_device *ndev = skb->dev;
300 struct cpsw_priv *priv = netdev_priv(ndev); 298 struct cpsw_priv *priv = netdev_priv(ndev);
301 299
302 if (unlikely(netif_queue_stopped(ndev))) 300 if (unlikely(netif_queue_stopped(ndev)))
303 netif_start_queue(ndev); 301 netif_start_queue(ndev);
304 priv->stats.tx_packets++; 302 priv->stats.tx_packets++;
305 priv->stats.tx_bytes += len; 303 priv->stats.tx_bytes += len;
306 dev_kfree_skb_any(skb); 304 dev_kfree_skb_any(skb);
307 } 305 }
308 306
309 void cpsw_rx_handler(void *token, int len, int status) 307 void cpsw_rx_handler(void *token, int len, int status)
310 { 308 {
311 struct sk_buff *skb = token; 309 struct sk_buff *skb = token;
312 struct net_device *ndev = skb->dev; 310 struct net_device *ndev = skb->dev;
313 struct cpsw_priv *priv = netdev_priv(ndev); 311 struct cpsw_priv *priv = netdev_priv(ndev);
314 int ret = 0; 312 int ret = 0;
315 313
316 if (likely(status >= 0)) { 314 if (likely(status >= 0)) {
317 skb_put(skb, len); 315 skb_put(skb, len);
318 skb->protocol = eth_type_trans(skb, ndev); 316 skb->protocol = eth_type_trans(skb, ndev);
319 netif_receive_skb(skb); 317 netif_receive_skb(skb);
320 priv->stats.rx_bytes += len; 318 priv->stats.rx_bytes += len;
321 priv->stats.rx_packets++; 319 priv->stats.rx_packets++;
322 skb = NULL; 320 skb = NULL;
323 } 321 }
324 322
325 323
326 if (unlikely(!netif_running(ndev))) { 324 if (unlikely(!netif_running(ndev))) {
327 if (skb) 325 if (skb)
328 dev_kfree_skb_any(skb); 326 dev_kfree_skb_any(skb);
329 return; 327 return;
330 } 328 }
331 329
332 if (likely(!skb)) { 330 if (likely(!skb)) {
333 skb = netdev_alloc_skb_ip_align(ndev, priv->rx_packet_max); 331 skb = netdev_alloc_skb_ip_align(ndev, priv->rx_packet_max);
334 if (WARN_ON(!skb)) 332 if (WARN_ON(!skb))
335 return; 333 return;
336 334
337 ret = cpdma_chan_submit(priv->rxch, skb, skb->data, 335 ret = cpdma_chan_submit(priv->rxch, skb, skb->data,
338 skb_tailroom(skb), GFP_KERNEL); 336 skb_tailroom(skb), GFP_KERNEL);
339 } 337 }
340 338
341 WARN_ON(ret < 0); 339 WARN_ON(ret < 0);
342 340
343 } 341 }
344 342
345 static void set_cpsw_dmtimer_clear(void) 343 static void set_cpsw_dmtimer_clear(void)
346 { 344 {
347 omap_dm_timer_write_status(stTimerRx, OMAP_TIMER_INT_CAPTURE); 345 omap_dm_timer_write_status(dmtimer_rx, OMAP_TIMER_INT_CAPTURE);
348 omap_dm_timer_write_status(stTimerTx, OMAP_TIMER_INT_CAPTURE); 346 omap_dm_timer_write_status(dmtimer_tx, OMAP_TIMER_INT_CAPTURE);
349 347
350 omap_dm_timer_disable(stTimerRx);
351 omap_dm_timer_set_int_enable(stTimerRx, OMAP_TIMER_INT_CAPTURE);
352 omap_dm_timer_set_capture(stTimerRx, 1, 0, 0);
353 omap_dm_timer_enable(stTimerRx);
354
355 omap_dm_timer_disable(stTimerTx);
356 omap_dm_timer_set_int_enable(stTimerTx, OMAP_TIMER_INT_CAPTURE);
357 omap_dm_timer_set_capture(stTimerTx, 1, 0, 0);
358 omap_dm_timer_enable(stTimerTx);
359
360 return; 348 return;
361 } 349 }
362 350
363 static irqreturn_t cpsw_interrupt(int irq, void *dev_id) 351 static irqreturn_t cpsw_interrupt(int irq, void *dev_id)
364 { 352 {
365 struct cpsw_priv *priv = dev_id; 353 struct cpsw_priv *priv = dev_id;
366 354
367 if (likely(netif_running(priv->ndev))) { 355 if (likely(netif_running(priv->ndev))) {
368 cpsw_intr_disable(priv); 356 cpsw_intr_disable(priv);
369 cpsw_disable_irq(priv); 357 cpsw_disable_irq(priv);
370 napi_schedule(&priv->napi); 358 napi_schedule(&priv->napi);
371 } 359 }
372 360
373 return IRQ_HANDLED; 361 return IRQ_HANDLED;
374 } 362 }
375 363
376 static int cpsw_poll(struct napi_struct *napi, int budget) 364 static int cpsw_poll(struct napi_struct *napi, int budget)
377 { 365 {
378 struct cpsw_priv *priv = napi_to_priv(napi); 366 struct cpsw_priv *priv = napi_to_priv(napi);
379 int num_tx, num_rx; 367 int num_tx, num_rx;
380 368
381 num_tx = cpdma_chan_process(priv->txch, 128); 369 num_tx = cpdma_chan_process(priv->txch, 128);
382 num_rx = cpdma_chan_process(priv->rxch, budget); 370 num_rx = cpdma_chan_process(priv->rxch, budget);
383 371
384 if (num_rx || num_tx) 372 if (num_rx || num_tx)
385 msg(dbg, intr, "poll %d rx, %d tx pkts\n", num_rx, num_tx); 373 msg(dbg, intr, "poll %d rx, %d tx pkts\n", num_rx, num_tx);
386 374
387 if (num_rx < budget) { 375 if (num_rx < budget) {
388 napi_complete(napi); 376 napi_complete(napi);
389 cpdma_ctlr_eoi(priv->dma); 377 cpdma_ctlr_eoi(priv->dma);
390 __raw_writel(0x1, cpdma_base + CPSW_CPDMA_EOI_REG);
391 __raw_writel(0x2, cpdma_base + CPSW_CPDMA_EOI_REG);
392 set_cpsw_dmtimer_clear(); 378 set_cpsw_dmtimer_clear();
393 cpsw_intr_enable(priv); 379 cpsw_intr_enable(priv);
394 cpsw_enable_irq(priv); 380 cpsw_enable_irq(priv);
395 } 381 }
396 382
397 return num_rx; 383 return num_rx;
398 } 384 }
399 385
400 static inline void soft_reset(const char *module, void __iomem *reg) 386 static inline void soft_reset(const char *module, void __iomem *reg)
401 { 387 {
402 unsigned long timeout = jiffies + HZ; 388 unsigned long timeout = jiffies + HZ;
403 389
404 __raw_writel(1, reg); 390 __raw_writel(1, reg);
405 do { 391 do {
406 cpu_relax(); 392 cpu_relax();
407 } while ((__raw_readl(reg) & 1) && time_after(timeout, jiffies)); 393 } while ((__raw_readl(reg) & 1) && time_after(timeout, jiffies));
408 394
409 WARN(__raw_readl(reg) & 1, "failed to soft-reset %s\n", module); 395 WARN(__raw_readl(reg) & 1, "failed to soft-reset %s\n", module);
410 } 396 }
411 397
412 #define mac_hi(mac) (((mac)[0] << 0) | ((mac)[1] << 8) | \ 398 #define mac_hi(mac) (((mac)[0] << 0) | ((mac)[1] << 8) | \
413 ((mac)[2] << 16) | ((mac)[3] << 24)) 399 ((mac)[2] << 16) | ((mac)[3] << 24))
414 #define mac_lo(mac) (((mac)[4] << 0) | ((mac)[5] << 8)) 400 #define mac_lo(mac) (((mac)[4] << 0) | ((mac)[5] << 8))
415 401
416 static void cpsw_set_slave_mac(struct cpsw_slave *slave, 402 static void cpsw_set_slave_mac(struct cpsw_slave *slave,
417 struct cpsw_priv *priv) 403 struct cpsw_priv *priv)
418 { 404 {
419 __raw_writel(mac_hi(priv->mac_addr), &slave->regs->sa_hi); 405 __raw_writel(mac_hi(priv->mac_addr), &slave->regs->sa_hi);
420 __raw_writel(mac_lo(priv->mac_addr), &slave->regs->sa_lo); 406 __raw_writel(mac_lo(priv->mac_addr), &slave->regs->sa_lo);
421 } 407 }
422 408
423 static inline u32 cpsw_get_slave_port(struct cpsw_priv *priv, u32 slave_num) 409 static inline u32 cpsw_get_slave_port(struct cpsw_priv *priv, u32 slave_num)
424 { 410 {
425 if (priv->host_port == 0) 411 if (priv->host_port == 0)
426 return slave_num + 1; 412 return slave_num + 1;
427 else 413 else
428 return slave_num; 414 return slave_num;
429 } 415 }
430 416
431 static void _cpsw_adjust_link(struct cpsw_slave *slave, 417 static void _cpsw_adjust_link(struct cpsw_slave *slave,
432 struct cpsw_priv *priv, bool *link) 418 struct cpsw_priv *priv, bool *link)
433 { 419 {
434 struct phy_device *phy = slave->phy; 420 struct phy_device *phy = slave->phy;
435 u32 mac_control = 0; 421 u32 mac_control = 0;
436 u32 slave_port; 422 u32 slave_port;
437 423
438 if (!phy) 424 if (!phy)
439 return; 425 return;
440 426
441 slave_port = cpsw_get_slave_port(priv, slave->slave_num); 427 slave_port = cpsw_get_slave_port(priv, slave->slave_num);
442 428
443 if (phy->link) { 429 if (phy->link) {
444 /* enable forwarding */ 430 /* enable forwarding */
445 cpsw_ale_control_set(priv->ale, slave_port, 431 cpsw_ale_control_set(priv->ale, slave_port,
446 ALE_PORT_STATE, ALE_PORT_STATE_FORWARD); 432 ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
447 433
448 mac_control = priv->data.mac_control; 434 mac_control = priv->data.mac_control;
449 if (phy->speed == 10) 435 if (phy->speed == 10)
450 mac_control |= BIT(18); /* In Band mode */ 436 mac_control |= BIT(18); /* In Band mode */
451 if (phy->speed == 1000) { 437 if (phy->speed == 1000) {
452 mac_control |= BIT(7); /* Enable gigabit mode */ 438 mac_control |= BIT(7); /* Enable gigabit mode */
453 } 439 }
454 if (phy->speed == 100) 440 if (phy->speed == 100)
455 mac_control |= BIT(15); 441 mac_control |= BIT(15);
456 if (phy->duplex) 442 if (phy->duplex)
457 mac_control |= BIT(0); /* FULLDUPLEXEN */ 443 mac_control |= BIT(0); /* FULLDUPLEXEN */
458 if (phy->interface == PHY_INTERFACE_MODE_RGMII) /* RGMII */ 444 if (phy->interface == PHY_INTERFACE_MODE_RGMII) /* RGMII */
459 mac_control |= (BIT(15)|BIT(16)); 445 mac_control |= (BIT(15)|BIT(16));
460 *link = true; 446 *link = true;
461 } else { 447 } else {
462 cpsw_ale_control_set(priv->ale, slave_port, 448 cpsw_ale_control_set(priv->ale, slave_port,
463 ALE_PORT_STATE, ALE_PORT_STATE_DISABLE); 449 ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
464 mac_control = 0; 450 mac_control = 0;
465 } 451 }
466 452
467 if (mac_control != slave->mac_control) { 453 if (mac_control != slave->mac_control) {
468 phy_print_status(phy); 454 phy_print_status(phy);
469 __raw_writel(mac_control, &slave->sliver->mac_control); 455 __raw_writel(mac_control, &slave->sliver->mac_control);
470 } 456 }
471 457
472 slave->mac_control = mac_control; 458 slave->mac_control = mac_control;
473 } 459 }
474 460
475 static void cpsw_adjust_link(struct net_device *ndev) 461 static void cpsw_adjust_link(struct net_device *ndev)
476 { 462 {
477 struct cpsw_priv *priv = netdev_priv(ndev); 463 struct cpsw_priv *priv = netdev_priv(ndev);
478 bool link = false; 464 bool link = false;
479 465
480 for_each_slave(priv, _cpsw_adjust_link, priv, &link); 466 for_each_slave(priv, _cpsw_adjust_link, priv, &link);
481 467
482 if (link) { 468 if (link) {
483 netif_carrier_on(ndev); 469 netif_carrier_on(ndev);
484 if (netif_running(ndev)) 470 if (netif_running(ndev))
485 netif_wake_queue(ndev); 471 netif_wake_queue(ndev);
486 } else { 472 } else {
487 netif_carrier_off(ndev); 473 netif_carrier_off(ndev);
488 netif_stop_queue(ndev); 474 netif_stop_queue(ndev);
489 } 475 }
490 } 476 }
491 477
492 static inline int __show_stat(char *buf, int maxlen, const char* name, u32 val) 478 static inline int __show_stat(char *buf, int maxlen, const char* name, u32 val)
493 { 479 {
494 static char *leader = "........................................"; 480 static char *leader = "........................................";
495 481
496 if (!val) 482 if (!val)
497 return 0; 483 return 0;
498 else 484 else
499 return snprintf(buf, maxlen, "%s %s %10d\n", name, 485 return snprintf(buf, maxlen, "%s %s %10d\n", name,
500 leader + strlen(name), val); 486 leader + strlen(name), val);
501 } 487 }
502 488
503 static ssize_t cpsw_hw_stats_show(struct device *dev, 489 static ssize_t cpsw_hw_stats_show(struct device *dev,
504 struct device_attribute *attr, 490 struct device_attribute *attr,
505 char *buf) 491 char *buf)
506 { 492 {
507 struct net_device *ndev = to_net_dev(dev); 493 struct net_device *ndev = to_net_dev(dev);
508 struct cpsw_priv *priv = netdev_priv(ndev); 494 struct cpsw_priv *priv = netdev_priv(ndev);
509 int len = 0; 495 int len = 0;
510 struct cpdma_chan_stats dma_stats; 496 struct cpdma_chan_stats dma_stats;
511 497
512 #define show_stat(x) do { \ 498 #define show_stat(x) do { \
513 len += __show_stat(buf + len, SZ_4K - len, #x, \ 499 len += __show_stat(buf + len, SZ_4K - len, #x, \
514 __raw_readl(&priv->hw_stats->x)); \ 500 __raw_readl(&priv->hw_stats->x)); \
515 } while (0) 501 } while (0)
516 502
517 #define show_dma_stat(x) do { \ 503 #define show_dma_stat(x) do { \
518 len += __show_stat(buf + len, SZ_4K - len, #x, dma_stats.x); \ 504 len += __show_stat(buf + len, SZ_4K - len, #x, dma_stats.x); \
519 } while (0) 505 } while (0)
520 506
521 len += snprintf(buf + len, SZ_4K - len, "CPSW Statistics:\n"); 507 len += snprintf(buf + len, SZ_4K - len, "CPSW Statistics:\n");
522 show_stat(rxgoodframes); show_stat(rxbroadcastframes); 508 show_stat(rxgoodframes); show_stat(rxbroadcastframes);
523 show_stat(rxmulticastframes); show_stat(rxpauseframes); 509 show_stat(rxmulticastframes); show_stat(rxpauseframes);
524 show_stat(rxcrcerrors); show_stat(rxaligncodeerrors); 510 show_stat(rxcrcerrors); show_stat(rxaligncodeerrors);
525 show_stat(rxoversizedframes); show_stat(rxjabberframes); 511 show_stat(rxoversizedframes); show_stat(rxjabberframes);
526 show_stat(rxundersizedframes); show_stat(rxfragments); 512 show_stat(rxundersizedframes); show_stat(rxfragments);
527 show_stat(rxoctets); show_stat(txgoodframes); 513 show_stat(rxoctets); show_stat(txgoodframes);
528 show_stat(txbroadcastframes); show_stat(txmulticastframes); 514 show_stat(txbroadcastframes); show_stat(txmulticastframes);
529 show_stat(txpauseframes); show_stat(txdeferredframes); 515 show_stat(txpauseframes); show_stat(txdeferredframes);
530 show_stat(txcollisionframes); show_stat(txsinglecollframes); 516 show_stat(txcollisionframes); show_stat(txsinglecollframes);
531 show_stat(txmultcollframes); show_stat(txexcessivecollisions); 517 show_stat(txmultcollframes); show_stat(txexcessivecollisions);
532 show_stat(txlatecollisions); show_stat(txunderrun); 518 show_stat(txlatecollisions); show_stat(txunderrun);
533 show_stat(txcarriersenseerrors); show_stat(txoctets); 519 show_stat(txcarriersenseerrors); show_stat(txoctets);
534 show_stat(octetframes64); show_stat(octetframes65t127); 520 show_stat(octetframes64); show_stat(octetframes65t127);
535 show_stat(octetframes128t255); show_stat(octetframes256t511); 521 show_stat(octetframes128t255); show_stat(octetframes256t511);
536 show_stat(octetframes512t1023); show_stat(octetframes1024tup); 522 show_stat(octetframes512t1023); show_stat(octetframes1024tup);
537 show_stat(netoctets); show_stat(rxsofoverruns); 523 show_stat(netoctets); show_stat(rxsofoverruns);
538 show_stat(rxmofoverruns); show_stat(rxdmaoverruns); 524 show_stat(rxmofoverruns); show_stat(rxdmaoverruns);
539 525
540 cpdma_chan_get_stats(priv->rxch, &dma_stats); 526 cpdma_chan_get_stats(priv->rxch, &dma_stats);
541 len += snprintf(buf + len, SZ_4K - len, "\nRX DMA Statistics:\n"); 527 len += snprintf(buf + len, SZ_4K - len, "\nRX DMA Statistics:\n");
542 show_dma_stat(head_enqueue); show_dma_stat(tail_enqueue); 528 show_dma_stat(head_enqueue); show_dma_stat(tail_enqueue);
543 show_dma_stat(pad_enqueue); show_dma_stat(misqueued); 529 show_dma_stat(pad_enqueue); show_dma_stat(misqueued);
544 show_dma_stat(desc_alloc_fail); show_dma_stat(pad_alloc_fail); 530 show_dma_stat(desc_alloc_fail); show_dma_stat(pad_alloc_fail);
545 show_dma_stat(runt_receive_buff); show_dma_stat(runt_transmit_buff); 531 show_dma_stat(runt_receive_buff); show_dma_stat(runt_transmit_buff);
546 show_dma_stat(empty_dequeue); show_dma_stat(busy_dequeue); 532 show_dma_stat(empty_dequeue); show_dma_stat(busy_dequeue);
547 show_dma_stat(good_dequeue); show_dma_stat(teardown_dequeue); 533 show_dma_stat(good_dequeue); show_dma_stat(teardown_dequeue);
548 534
549 cpdma_chan_get_stats(priv->txch, &dma_stats); 535 cpdma_chan_get_stats(priv->txch, &dma_stats);
550 len += snprintf(buf + len, SZ_4K - len, "\nTX DMA Statistics:\n"); 536 len += snprintf(buf + len, SZ_4K - len, "\nTX DMA Statistics:\n");
551 show_dma_stat(head_enqueue); show_dma_stat(tail_enqueue); 537 show_dma_stat(head_enqueue); show_dma_stat(tail_enqueue);
552 show_dma_stat(pad_enqueue); show_dma_stat(misqueued); 538 show_dma_stat(pad_enqueue); show_dma_stat(misqueued);
553 show_dma_stat(desc_alloc_fail); show_dma_stat(pad_alloc_fail); 539 show_dma_stat(desc_alloc_fail); show_dma_stat(pad_alloc_fail);
554 show_dma_stat(runt_receive_buff); show_dma_stat(runt_transmit_buff); 540 show_dma_stat(runt_receive_buff); show_dma_stat(runt_transmit_buff);
555 show_dma_stat(empty_dequeue); show_dma_stat(busy_dequeue); 541 show_dma_stat(empty_dequeue); show_dma_stat(busy_dequeue);
556 show_dma_stat(good_dequeue); show_dma_stat(teardown_dequeue); 542 show_dma_stat(good_dequeue); show_dma_stat(teardown_dequeue);
557 543
558 return len; 544 return len;
559 } 545 }
560 546
561 DEVICE_ATTR(hw_stats, S_IRUGO, cpsw_hw_stats_show, NULL); 547 DEVICE_ATTR(hw_stats, S_IRUGO, cpsw_hw_stats_show, NULL);
562 548
563 #define PHY_CONFIG_REG 22 549 #define PHY_CONFIG_REG 22
564 static void cpsw_set_phy_config(struct cpsw_priv *priv, struct phy_device *phy) 550 static void cpsw_set_phy_config(struct cpsw_priv *priv, struct phy_device *phy)
565 { 551 {
566 struct cpsw_platform_data *pdata = priv->pdev->dev.platform_data; 552 struct cpsw_platform_data *pdata = priv->pdev->dev.platform_data;
567 struct mii_bus *miibus; 553 struct mii_bus *miibus;
568 int phy_addr = 0; 554 int phy_addr = 0;
569 u16 val = 0; 555 u16 val = 0;
570 u16 tmp = 0; 556 u16 tmp = 0;
571 557
572 if (!phy) 558 if (!phy)
573 return; 559 return;
574 560
575 miibus = phy->bus; 561 miibus = phy->bus;
576 562
577 if (!miibus) 563 if (!miibus)
578 return; 564 return;
579 565
580 phy_addr = phy->addr; 566 phy_addr = phy->addr;
581 567
582 /* Disable 1 Gig mode support if it is not supported */ 568 /* Disable 1 Gig mode support if it is not supported */
583 if (!pdata->gigabit_en) 569 if (!pdata->gigabit_en)
584 phy->supported &= ~(SUPPORTED_1000baseT_Half | 570 phy->supported &= ~(SUPPORTED_1000baseT_Half |
585 SUPPORTED_1000baseT_Full); 571 SUPPORTED_1000baseT_Full);
586 572
587 /* Following lines enable gigbit advertisement capability even in case 573 /* Following lines enable gigbit advertisement capability even in case
588 * the advertisement is not enabled by default 574 * the advertisement is not enabled by default
589 */ 575 */
590 val = miibus->read(miibus, phy_addr, MII_BMCR); 576 val = miibus->read(miibus, phy_addr, MII_BMCR);
591 val |= (BMCR_SPEED100 | BMCR_ANENABLE | BMCR_FULLDPLX); 577 val |= (BMCR_SPEED100 | BMCR_ANENABLE | BMCR_FULLDPLX);
592 miibus->write(miibus, phy_addr, MII_BMCR, val); 578 miibus->write(miibus, phy_addr, MII_BMCR, val);
593 tmp = miibus->read(miibus, phy_addr, MII_BMCR); 579 tmp = miibus->read(miibus, phy_addr, MII_BMCR);
594 580
595 /* Enable gigabit support only if the speed is 1000Mbps */ 581 /* Enable gigabit support only if the speed is 1000Mbps */
596 if (phy->speed == CPSW_PHY_SPEED) { 582 if (phy->speed == CPSW_PHY_SPEED) {
597 tmp = miibus->read(miibus, phy_addr, MII_BMSR); 583 tmp = miibus->read(miibus, phy_addr, MII_BMSR);
598 if (tmp & 0x1) { 584 if (tmp & 0x1) {
599 val = miibus->read(miibus, phy_addr, MII_CTRL1000); 585 val = miibus->read(miibus, phy_addr, MII_CTRL1000);
600 val |= BIT(9); 586 val |= BIT(9);
601 miibus->write(miibus, phy_addr, MII_CTRL1000, val); 587 miibus->write(miibus, phy_addr, MII_CTRL1000, val);
602 tmp = miibus->read(miibus, phy_addr, MII_CTRL1000); 588 tmp = miibus->read(miibus, phy_addr, MII_CTRL1000);
603 } 589 }
604 } 590 }
605 591
606 val = miibus->read(miibus, phy_addr, MII_ADVERTISE); 592 val = miibus->read(miibus, phy_addr, MII_ADVERTISE);
607 val |= (ADVERTISE_10HALF | ADVERTISE_10FULL | \ 593 val |= (ADVERTISE_10HALF | ADVERTISE_10FULL | \
608 ADVERTISE_100HALF | ADVERTISE_100FULL); 594 ADVERTISE_100HALF | ADVERTISE_100FULL);
609 miibus->write(miibus, phy_addr, MII_ADVERTISE, val); 595 miibus->write(miibus, phy_addr, MII_ADVERTISE, val);
610 tmp = miibus->read(miibus, phy_addr, MII_ADVERTISE); 596 tmp = miibus->read(miibus, phy_addr, MII_ADVERTISE);
611 597
612 /* TODO : This check is required. This should be 598 /* TODO : This check is required. This should be
613 * moved to a board init section as its specific 599 * moved to a board init section as its specific
614 * to a phy.*/ 600 * to a phy.*/
615 if (phy->phy_id == 0x0282F014) { 601 if (phy->phy_id == 0x0282F014) {
616 /* This enables TX_CLK-ing in case of 10/100MBps operation */ 602 /* This enables TX_CLK-ing in case of 10/100MBps operation */
617 val = miibus->read(miibus, phy_addr, PHY_CONFIG_REG); 603 val = miibus->read(miibus, phy_addr, PHY_CONFIG_REG);
618 val |= BIT(5); 604 val |= BIT(5);
619 miibus->write(miibus, phy_addr, PHY_CONFIG_REG, val); 605 miibus->write(miibus, phy_addr, PHY_CONFIG_REG, val);
620 tmp = miibus->read(miibus, phy_addr, PHY_CONFIG_REG); 606 tmp = miibus->read(miibus, phy_addr, PHY_CONFIG_REG);
621 } 607 }
622 608
623 return; 609 return;
624 } 610 }
625 611
626 static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv) 612 static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
627 { 613 {
628 char name[32]; 614 char name[32];
629 u32 slave_port; 615 u32 slave_port;
630 616
631 sprintf(name, "slave-%d", slave->slave_num); 617 sprintf(name, "slave-%d", slave->slave_num);
632 618
633 soft_reset(name, &slave->sliver->soft_reset); 619 soft_reset(name, &slave->sliver->soft_reset);
634 620
635 /* setup priority mapping */ 621 /* setup priority mapping */
636 __raw_writel(0x76543210, &slave->sliver->rx_pri_map); 622 __raw_writel(0x76543210, &slave->sliver->rx_pri_map);
637 __raw_writel(0x33221100, &slave->regs->tx_pri_map); 623 __raw_writel(0x33221100, &slave->regs->tx_pri_map);
638 624
639 /* setup max packet size, and mac address */ 625 /* setup max packet size, and mac address */
640 __raw_writel(priv->rx_packet_max, &slave->sliver->rx_maxlen); 626 __raw_writel(priv->rx_packet_max, &slave->sliver->rx_maxlen);
641 cpsw_set_slave_mac(slave, priv); 627 cpsw_set_slave_mac(slave, priv);
642 628
643 slave->mac_control = 0; /* no link yet */ 629 slave->mac_control = 0; /* no link yet */
644 630
645 slave_port = cpsw_get_slave_port(priv, slave->slave_num); 631 slave_port = cpsw_get_slave_port(priv, slave->slave_num);
646 cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast, 632 cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast,
647 1 << slave_port); 633 1 << slave_port);
648 634
649 slave->phy = phy_connect(priv->ndev, slave->data->phy_id, 635 slave->phy = phy_connect(priv->ndev, slave->data->phy_id,
650 &cpsw_adjust_link, 0, slave->data->phy_if); 636 &cpsw_adjust_link, 0, slave->data->phy_if);
651 if (IS_ERR(slave->phy)) { 637 if (IS_ERR(slave->phy)) {
652 msg(err, ifup, "phy %s not found on slave %d\n", 638 msg(err, ifup, "phy %s not found on slave %d\n",
653 slave->data->phy_id, slave->slave_num); 639 slave->data->phy_id, slave->slave_num);
654 slave->phy = NULL; 640 slave->phy = NULL;
655 } else { 641 } else {
656 printk(KERN_ERR"\nCPSW phy found : id is : 0x%x\n", 642 printk(KERN_ERR"\nCPSW phy found : id is : 0x%x\n",
657 slave->phy->phy_id); 643 slave->phy->phy_id);
658 cpsw_set_phy_config(priv, slave->phy); 644 cpsw_set_phy_config(priv, slave->phy);
659 phy_start(slave->phy); 645 phy_start(slave->phy);
660 } 646 }
661 } 647 }
662 648
663 static void cpsw_init_host_port(struct cpsw_priv *priv) 649 static void cpsw_init_host_port(struct cpsw_priv *priv)
664 { 650 {
665 /* soft reset the controller and initialize ale */ 651 /* soft reset the controller and initialize ale */
666 soft_reset("cpsw", &priv->regs->soft_reset); 652 soft_reset("cpsw", &priv->regs->soft_reset);
667 cpsw_ale_start(priv->ale); 653 cpsw_ale_start(priv->ale);
668 654
669 /* switch to vlan unaware mode */ 655 /* switch to vlan unaware mode */
670 cpsw_ale_control_set(priv->ale, 0, ALE_VLAN_AWARE, 0); 656 cpsw_ale_control_set(priv->ale, 0, ALE_VLAN_AWARE, 0);
671 657
672 /* setup host port priority mapping */ 658 /* setup host port priority mapping */
673 __raw_writel(0x76543210, &priv->host_port_regs->cpdma_tx_pri_map); 659 __raw_writel(0x76543210, &priv->host_port_regs->cpdma_tx_pri_map);
674 __raw_writel(0, &priv->host_port_regs->cpdma_rx_chan_map); 660 __raw_writel(0, &priv->host_port_regs->cpdma_rx_chan_map);
675 661
676 cpsw_ale_control_set(priv->ale, priv->host_port, 662 cpsw_ale_control_set(priv->ale, priv->host_port,
677 ALE_PORT_STATE, ALE_PORT_STATE_FORWARD); 663 ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
678 664
679 cpsw_ale_add_ucast(priv->ale, priv->mac_addr, priv->host_port, 665 cpsw_ale_add_ucast(priv->ale, priv->mac_addr, priv->host_port,
680 0); 666 0);
681 /* ALE_SECURE); */ 667 /* ALE_SECURE); */
682 cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast, 668 cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast,
683 1 << priv->host_port); 669 1 << priv->host_port);
684 } 670 }
685 671
686 static int cpsw_ndo_open(struct net_device *ndev) 672 static int cpsw_ndo_open(struct net_device *ndev)
687 { 673 {
688 struct cpsw_priv *priv = netdev_priv(ndev); 674 struct cpsw_priv *priv = netdev_priv(ndev);
689 int i, ret; 675 int i, ret;
690 u32 reg; 676 u32 reg;
691 677
692 cpsw_intr_disable(priv); 678 cpsw_intr_disable(priv);
693 netif_carrier_off(ndev); 679 netif_carrier_off(ndev);
694 680
695 ret = clk_enable(priv->clk); 681 ret = clk_enable(priv->clk);
696 if (ret < 0) { 682 if (ret < 0) {
697 dev_err(priv->dev, "unable to turn on device clock\n"); 683 dev_err(priv->dev, "unable to turn on device clock\n");
698 return ret; 684 return ret;
699 } 685 }
700 686
701 ret = device_create_file(&ndev->dev, &dev_attr_hw_stats); 687 ret = device_create_file(&ndev->dev, &dev_attr_hw_stats);
702 if (ret < 0) { 688 if (ret < 0) {
703 dev_err(priv->dev, "unable to add device attr\n"); 689 dev_err(priv->dev, "unable to add device attr\n");
704 return ret; 690 return ret;
705 } 691 }
706 692
707 if (priv->data.phy_control) 693 if (priv->data.phy_control)
708 (*priv->data.phy_control)(true); 694 (*priv->data.phy_control)(true);
709 695
710 reg = __raw_readl(&priv->regs->id_ver); 696 reg = __raw_readl(&priv->regs->id_ver);
711 697
712 msg(info, ifup, "initializing cpsw version %d.%d (%d)\n", 698 msg(info, ifup, "initializing cpsw version %d.%d (%d)\n",
713 (reg >> 8 & 0x7), reg & 0xff, (reg >> 11) & 0x1f); 699 (reg >> 8 & 0x7), reg & 0xff, (reg >> 11) & 0x1f);
714 700
715 /* initialize host and slave ports */ 701 /* initialize host and slave ports */
716 cpsw_init_host_port(priv); 702 cpsw_init_host_port(priv);
717 for_each_slave(priv, cpsw_slave_open, priv); 703 for_each_slave(priv, cpsw_slave_open, priv);
718 704
719 /* setup tx dma to fixed prio and zero offset */ 705 /* setup tx dma to fixed prio and zero offset */
720 cpdma_control_set(priv->dma, CPDMA_TX_PRIO_FIXED, 1); 706 cpdma_control_set(priv->dma, CPDMA_TX_PRIO_FIXED, 1);
721 cpdma_control_set(priv->dma, CPDMA_RX_BUFFER_OFFSET, 0); 707 cpdma_control_set(priv->dma, CPDMA_RX_BUFFER_OFFSET, 0);
722 708
723 /* disable priority elevation and enable statistics on all ports */ 709 /* disable priority elevation and enable statistics on all ports */
724 __raw_writel(0, &priv->regs->ptype); 710 __raw_writel(0, &priv->regs->ptype);
725 711
726 /* enable statistics collection only on the host port */ 712 /* enable statistics collection only on the host port */
727 /* __raw_writel(BIT(priv->host_port), &priv->regs->stat_port_en); */ 713 /* __raw_writel(BIT(priv->host_port), &priv->regs->stat_port_en); */
728 __raw_writel(0x7, &priv->regs->stat_port_en); 714 __raw_writel(0x7, &priv->regs->stat_port_en);
729 715
730 if (WARN_ON(!priv->data.rx_descs)) 716 if (WARN_ON(!priv->data.rx_descs))
731 priv->data.rx_descs = 128; 717 priv->data.rx_descs = 128;
732 718
733 for (i = 0; i < priv->data.rx_descs; i++) { 719 for (i = 0; i < priv->data.rx_descs; i++) {
734 struct sk_buff *skb; 720 struct sk_buff *skb;
735 721
736 ret = -ENOMEM; 722 ret = -ENOMEM;
737 skb = netdev_alloc_skb_ip_align(priv->ndev, 723 skb = netdev_alloc_skb_ip_align(priv->ndev,
738 priv->rx_packet_max); 724 priv->rx_packet_max);
739 if (!skb) 725 if (!skb)
740 break; 726 break;
741 ret = cpdma_chan_submit(priv->rxch, skb, skb->data, 727 ret = cpdma_chan_submit(priv->rxch, skb, skb->data,
742 skb_tailroom(skb), GFP_KERNEL); 728 skb_tailroom(skb), GFP_KERNEL);
743 if (WARN_ON(ret < 0)) 729 if (WARN_ON(ret < 0))
744 break; 730 break;
745 } 731 }
746 /* continue even if we didn't manage to submit all receive descs */ 732 /* continue even if we didn't manage to submit all receive descs */
747 msg(info, ifup, "submitted %d rx descriptors\n", i); 733 msg(info, ifup, "submitted %d rx descriptors\n", i);
748 734
749 /* Enable Interrupt pacing if configured */ 735 /* Enable Interrupt pacing if configured */
750 if (priv->coal_intvl != 0) { 736 if (priv->coal_intvl != 0) {
751 struct ethtool_coalesce coal; 737 struct ethtool_coalesce coal;
752 738
753 coal.rx_coalesce_usecs = (priv->coal_intvl << 4); 739 coal.rx_coalesce_usecs = (priv->coal_intvl << 4);
754 cpsw_set_coalesce(ndev, &coal); 740 cpsw_set_coalesce(ndev, &coal);
755 } 741 }
756 742
757 cpdma_ctlr_start(priv->dma); 743 cpdma_ctlr_start(priv->dma);
758 cpsw_intr_enable(priv); 744 cpsw_intr_enable(priv);
759 napi_enable(&priv->napi); 745 napi_enable(&priv->napi);
760 cpdma_ctlr_eoi(priv->dma); 746 cpdma_ctlr_eoi(priv->dma);
761 747
762 return 0; 748 return 0;
763 } 749 }
764 750
765 static void cpsw_slave_stop(struct cpsw_slave *slave, struct cpsw_priv *priv) 751 static void cpsw_slave_stop(struct cpsw_slave *slave, struct cpsw_priv *priv)
766 { 752 {
767 if (!slave->phy) 753 if (!slave->phy)
768 return; 754 return;
769 phy_stop(slave->phy); 755 phy_stop(slave->phy);
770 phy_disconnect(slave->phy); 756 phy_disconnect(slave->phy);
771 slave->phy = NULL; 757 slave->phy = NULL;
772 } 758 }
773 759
774 static int cpsw_ndo_stop(struct net_device *ndev) 760 static int cpsw_ndo_stop(struct net_device *ndev)
775 { 761 {
776 struct cpsw_priv *priv = netdev_priv(ndev); 762 struct cpsw_priv *priv = netdev_priv(ndev);
777 763
778 msg(info, ifdown, "shutting down cpsw device\n"); 764 msg(info, ifdown, "shutting down cpsw device\n");
779 cpsw_intr_disable(priv); 765 cpsw_intr_disable(priv);
780 cpdma_ctlr_int_ctrl(priv->dma, false); 766 cpdma_ctlr_int_ctrl(priv->dma, false);
781 cpdma_ctlr_stop(priv->dma); 767 cpdma_ctlr_stop(priv->dma);
782 netif_stop_queue(priv->ndev); 768 netif_stop_queue(priv->ndev);
783 napi_disable(&priv->napi); 769 napi_disable(&priv->napi);
784 netif_carrier_off(priv->ndev); 770 netif_carrier_off(priv->ndev);
785 cpsw_ale_stop(priv->ale); 771 cpsw_ale_stop(priv->ale);
786 device_remove_file(&ndev->dev, &dev_attr_hw_stats); 772 device_remove_file(&ndev->dev, &dev_attr_hw_stats);
787 for_each_slave(priv, cpsw_slave_stop, priv); 773 for_each_slave(priv, cpsw_slave_stop, priv);
788 if (priv->data.phy_control) 774 if (priv->data.phy_control)
789 (*priv->data.phy_control)(false); 775 (*priv->data.phy_control)(false);
790 clk_disable(priv->clk); 776 clk_disable(priv->clk);
791 return 0; 777 return 0;
792 } 778 }
793 779
794 static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb, 780 static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb,
795 struct net_device *ndev) 781 struct net_device *ndev)
796 { 782 {
797 struct cpsw_priv *priv = netdev_priv(ndev); 783 struct cpsw_priv *priv = netdev_priv(ndev);
798 int ret; 784 int ret;
799 785
800 ndev->trans_start = jiffies; 786 ndev->trans_start = jiffies;
801 787
802 ret = skb_padto(skb, CPSW_MIN_PACKET_SIZE); 788 ret = skb_padto(skb, CPSW_MIN_PACKET_SIZE);
803 if (unlikely(ret < 0)) { 789 if (unlikely(ret < 0)) {
804 msg(err, tx_err, "packet pad failed"); 790 msg(err, tx_err, "packet pad failed");
805 goto fail; 791 goto fail;
806 } 792 }
807 793
808 ret = cpdma_chan_submit(priv->txch, skb, skb->data, 794 ret = cpdma_chan_submit(priv->txch, skb, skb->data,
809 skb->len, GFP_KERNEL); 795 skb->len, GFP_KERNEL);
810 if (unlikely(ret != 0)) { 796 if (unlikely(ret != 0)) {
811 msg(err, tx_err, "desc submit failed"); 797 msg(err, tx_err, "desc submit failed");
812 goto fail; 798 goto fail;
813 } 799 }
814 800
815 return NETDEV_TX_OK; 801 return NETDEV_TX_OK;
816 fail: 802 fail:
817 priv->stats.tx_dropped++; 803 priv->stats.tx_dropped++;
818 netif_stop_queue(ndev); 804 netif_stop_queue(ndev);
819 return NETDEV_TX_BUSY; 805 return NETDEV_TX_BUSY;
820 } 806 }
821 807
822 static void cpsw_ndo_change_rx_flags(struct net_device *ndev, int flags) 808 static void cpsw_ndo_change_rx_flags(struct net_device *ndev, int flags)
823 { 809 {
824 /* 810 /*
825 * The switch cannot operate in promiscuous mode without substantial 811 * The switch cannot operate in promiscuous mode without substantial
826 * headache. For promiscuous mode to work, we would need to put the 812 * headache. For promiscuous mode to work, we would need to put the
827 * ALE in bypass mode and route all traffic to the host port. 813 * ALE in bypass mode and route all traffic to the host port.
828 * Subsequently, the host will need to operate as a "bridge", learn, 814 * Subsequently, the host will need to operate as a "bridge", learn,
829 * and flood as needed. For now, we simply complain here and 815 * and flood as needed. For now, we simply complain here and
830 * do nothing about it :-) 816 * do nothing about it :-)
831 */ 817 */
832 if ((flags & IFF_PROMISC) && (ndev->flags & IFF_PROMISC)) 818 if ((flags & IFF_PROMISC) && (ndev->flags & IFF_PROMISC))
833 dev_err(&ndev->dev, "promiscuity ignored!\n"); 819 dev_err(&ndev->dev, "promiscuity ignored!\n");
834 820
835 /* 821 /*
836 * The switch cannot filter multicast traffic unless it is configured 822 * The switch cannot filter multicast traffic unless it is configured
837 * in "VLAN Aware" mode. Unfortunately, VLAN awareness requires a 823 * in "VLAN Aware" mode. Unfortunately, VLAN awareness requires a
838 * whole bunch of additional logic that this driver does not implement 824 * whole bunch of additional logic that this driver does not implement
839 * at present. 825 * at present.
840 */ 826 */
841 if ((flags & IFF_ALLMULTI) && !(ndev->flags & IFF_ALLMULTI)) 827 if ((flags & IFF_ALLMULTI) && !(ndev->flags & IFF_ALLMULTI))
842 dev_err(&ndev->dev, "multicast traffic cannot be filtered!\n"); 828 dev_err(&ndev->dev, "multicast traffic cannot be filtered!\n");
843 } 829 }
844 830
845 static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *addr) 831 static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *addr)
846 { 832 {
847 struct cpsw_priv *priv = netdev_priv(ndev); 833 struct cpsw_priv *priv = netdev_priv(ndev);
848 834
849 cpsw_ale_del_ucast(priv->ale, priv->mac_addr, priv->host_port); 835 cpsw_ale_del_ucast(priv->ale, priv->mac_addr, priv->host_port);
850 memcpy(priv->mac_addr, ndev->dev_addr, ETH_ALEN); 836 memcpy(priv->mac_addr, ndev->dev_addr, ETH_ALEN);
851 cpsw_ale_add_ucast(priv->ale, priv->mac_addr, priv->host_port, 837 cpsw_ale_add_ucast(priv->ale, priv->mac_addr, priv->host_port,
852 0); 838 0);
853 /* ALE_SECURE); */ 839 /* ALE_SECURE); */
854 for_each_slave(priv, cpsw_set_slave_mac, priv); 840 for_each_slave(priv, cpsw_set_slave_mac, priv);
855 return 0; 841 return 0;
856 } 842 }
857 843
858 static void cpsw_ndo_tx_timeout(struct net_device *ndev) 844 static void cpsw_ndo_tx_timeout(struct net_device *ndev)
859 { 845 {
860 struct cpsw_priv *priv = netdev_priv(ndev); 846 struct cpsw_priv *priv = netdev_priv(ndev);
861 847
862 msg(err, tx_err, "transmit timeout, restarting dma"); 848 msg(err, tx_err, "transmit timeout, restarting dma");
863 priv->stats.tx_errors++; 849 priv->stats.tx_errors++;
864 cpsw_intr_disable(priv); 850 cpsw_intr_disable(priv);
865 cpdma_ctlr_int_ctrl(priv->dma, false); 851 cpdma_ctlr_int_ctrl(priv->dma, false);
866 cpdma_chan_stop(priv->txch); 852 cpdma_chan_stop(priv->txch);
867 cpdma_chan_start(priv->txch); 853 cpdma_chan_start(priv->txch);
868 cpdma_ctlr_int_ctrl(priv->dma, true); 854 cpdma_ctlr_int_ctrl(priv->dma, true);
869 cpsw_intr_enable(priv); 855 cpsw_intr_enable(priv);
870 cpdma_ctlr_eoi(priv->dma); 856 cpdma_ctlr_eoi(priv->dma);
871 } 857 }
872 858
873 static struct net_device_stats *cpsw_ndo_get_stats(struct net_device *ndev) 859 static struct net_device_stats *cpsw_ndo_get_stats(struct net_device *ndev)
874 { 860 {
875 struct cpsw_priv *priv = netdev_priv(ndev); 861 struct cpsw_priv *priv = netdev_priv(ndev);
876 return &priv->stats; 862 return &priv->stats;
877 } 863 }
878 864
879 #ifdef CONFIG_NET_POLL_CONTROLLER 865 #ifdef CONFIG_NET_POLL_CONTROLLER
880 static void cpsw_ndo_poll_controller(struct net_device *ndev) 866 static void cpsw_ndo_poll_controller(struct net_device *ndev)
881 { 867 {
882 struct cpsw_priv *priv = netdev_priv(ndev); 868 struct cpsw_priv *priv = netdev_priv(ndev);
883 869
884 cpsw_intr_disable(priv); 870 cpsw_intr_disable(priv);
885 cpdma_ctlr_int_ctrl(priv->dma, false); 871 cpdma_ctlr_int_ctrl(priv->dma, false);
886 cpsw_interrupt(ndev->irq, priv); 872 cpsw_interrupt(ndev->irq, priv);
887 cpdma_ctlr_int_ctrl(priv->dma, true); 873 cpdma_ctlr_int_ctrl(priv->dma, true);
888 cpsw_intr_enable(priv); 874 cpsw_intr_enable(priv);
889 cpdma_ctlr_eoi(priv->dma); 875 cpdma_ctlr_eoi(priv->dma);
890 } 876 }
891 #endif 877 #endif
892 878
893 /** 879 /**
894 * cpsw_get_coalesce : Get interrupt coalesce settings for this device 880 * cpsw_get_coalesce : Get interrupt coalesce settings for this device
895 * @ndev : CPSW network adapter 881 * @ndev : CPSW network adapter
896 * @coal : ethtool coalesce settings structure 882 * @coal : ethtool coalesce settings structure
897 * 883 *
898 * Fetch the current interrupt coalesce settings 884 * Fetch the current interrupt coalesce settings
899 * 885 *
900 */ 886 */
901 static int cpsw_get_coalesce(struct net_device *ndev, 887 static int cpsw_get_coalesce(struct net_device *ndev,
902 struct ethtool_coalesce *coal) 888 struct ethtool_coalesce *coal)
903 { 889 {
904 struct cpsw_priv *priv = netdev_priv(ndev); 890 struct cpsw_priv *priv = netdev_priv(ndev);
905 891
906 coal->rx_coalesce_usecs = priv->coal_intvl; 892 coal->rx_coalesce_usecs = priv->coal_intvl;
907 return 0; 893 return 0;
908 } 894 }
909 895
910 /** 896 /**
911 * cpsw_set_coalesce : Set interrupt coalesce settings for this device 897 * cpsw_set_coalesce : Set interrupt coalesce settings for this device
912 * @ndev : CPSW network adapter 898 * @ndev : CPSW network adapter
913 * @coal : ethtool coalesce settings structure 899 * @coal : ethtool coalesce settings structure
914 * 900 *
915 * Set interrupt coalesce parameters 901 * Set interrupt coalesce parameters
916 * 902 *
917 */ 903 */
918 static int cpsw_set_coalesce(struct net_device *ndev, 904 static int cpsw_set_coalesce(struct net_device *ndev,
919 struct ethtool_coalesce *coal) 905 struct ethtool_coalesce *coal)
920 { 906 {
921 struct cpsw_priv *priv = netdev_priv(ndev); 907 struct cpsw_priv *priv = netdev_priv(ndev);
922 u32 int_ctrl; 908 u32 int_ctrl;
923 u32 num_interrupts = 0; 909 u32 num_interrupts = 0;
924 u32 prescale = 0; 910 u32 prescale = 0;
925 u32 addnl_dvdr = 1; 911 u32 addnl_dvdr = 1;
926 u32 coal_intvl = 0; 912 u32 coal_intvl = 0;
927 913
928 if (!coal->rx_coalesce_usecs) 914 if (!coal->rx_coalesce_usecs)
929 return -EINVAL; 915 return -EINVAL;
930 916
931 coal_intvl = coal->rx_coalesce_usecs; 917 coal_intvl = coal->rx_coalesce_usecs;
932 918
933 int_ctrl = __raw_readl(&priv->ss_regs->int_control); 919 int_ctrl = __raw_readl(&priv->ss_regs->int_control);
934 prescale = priv->bus_freq_mhz * 4; 920 prescale = priv->bus_freq_mhz * 4;
935 921
936 if (coal_intvl < CPSW_CMINTMIN_INTVL) 922 if (coal_intvl < CPSW_CMINTMIN_INTVL)
937 coal_intvl = CPSW_CMINTMIN_INTVL; 923 coal_intvl = CPSW_CMINTMIN_INTVL;
938 924
939 if (coal_intvl > CPSW_CMINTMAX_INTVL) { 925 if (coal_intvl > CPSW_CMINTMAX_INTVL) {
940 /* 926 /*
941 * Interrupt pacer works with 4us Pulse, we can 927 * Interrupt pacer works with 4us Pulse, we can
942 * throttle further by dilating the 4us pulse. 928 * throttle further by dilating the 4us pulse.
943 */ 929 */
944 addnl_dvdr = CPSW_INTPRESCALE_MASK / prescale; 930 addnl_dvdr = CPSW_INTPRESCALE_MASK / prescale;
945 931
946 if (addnl_dvdr > 1) { 932 if (addnl_dvdr > 1) {
947 prescale *= addnl_dvdr; 933 prescale *= addnl_dvdr;
948 if (coal_intvl > (CPSW_CMINTMAX_INTVL * addnl_dvdr)) 934 if (coal_intvl > (CPSW_CMINTMAX_INTVL * addnl_dvdr))
949 coal_intvl = (CPSW_CMINTMAX_INTVL 935 coal_intvl = (CPSW_CMINTMAX_INTVL
950 * addnl_dvdr); 936 * addnl_dvdr);
951 } else { 937 } else {
952 addnl_dvdr = 1; 938 addnl_dvdr = 1;
953 coal_intvl = CPSW_CMINTMAX_INTVL; 939 coal_intvl = CPSW_CMINTMAX_INTVL;
954 } 940 }
955 } 941 }
956 942
957 num_interrupts = (1000 * addnl_dvdr) / coal_intvl; 943 num_interrupts = (1000 * addnl_dvdr) / coal_intvl;
958 944
959 int_ctrl |= CPSW_INTPACEEN; 945 int_ctrl |= CPSW_INTPACEEN;
960 int_ctrl &= (~CPSW_INTPRESCALE_MASK); 946 int_ctrl &= (~CPSW_INTPRESCALE_MASK);
961 int_ctrl |= (prescale & CPSW_INTPRESCALE_MASK); 947 int_ctrl |= (prescale & CPSW_INTPRESCALE_MASK);
962 __raw_writel(int_ctrl, &priv->ss_regs->int_control); 948 __raw_writel(int_ctrl, &priv->ss_regs->int_control);
963 949
964 __raw_writel(num_interrupts, &priv->ss_regs->rx_imax); 950 __raw_writel(num_interrupts, &priv->ss_regs->rx_imax);
965 __raw_writel(num_interrupts, &priv->ss_regs->tx_imax); 951 __raw_writel(num_interrupts, &priv->ss_regs->tx_imax);
966 952
967 printk(KERN_INFO"Set coalesce to %d usecs.\n", coal_intvl); 953 printk(KERN_INFO"Set coalesce to %d usecs.\n", coal_intvl);
968 priv->coal_intvl = coal_intvl; 954 priv->coal_intvl = coal_intvl;
969 955
970 return 0; 956 return 0;
971 } 957 }
972 958
973 static const struct net_device_ops cpsw_netdev_ops = { 959 static const struct net_device_ops cpsw_netdev_ops = {
974 .ndo_open = cpsw_ndo_open, 960 .ndo_open = cpsw_ndo_open,
975 .ndo_stop = cpsw_ndo_stop, 961 .ndo_stop = cpsw_ndo_stop,
976 .ndo_start_xmit = cpsw_ndo_start_xmit, 962 .ndo_start_xmit = cpsw_ndo_start_xmit,
977 .ndo_change_rx_flags = cpsw_ndo_change_rx_flags, 963 .ndo_change_rx_flags = cpsw_ndo_change_rx_flags,
978 .ndo_set_mac_address = cpsw_ndo_set_mac_address, 964 .ndo_set_mac_address = cpsw_ndo_set_mac_address,
979 .ndo_validate_addr = eth_validate_addr, 965 .ndo_validate_addr = eth_validate_addr,
980 .ndo_tx_timeout = cpsw_ndo_tx_timeout, 966 .ndo_tx_timeout = cpsw_ndo_tx_timeout,
981 .ndo_get_stats = cpsw_ndo_get_stats, 967 .ndo_get_stats = cpsw_ndo_get_stats,
982 #ifdef CONFIG_NET_POLL_CONTROLLER 968 #ifdef CONFIG_NET_POLL_CONTROLLER
983 .ndo_poll_controller = cpsw_ndo_poll_controller, 969 .ndo_poll_controller = cpsw_ndo_poll_controller,
984 #endif 970 #endif
985 }; 971 };
986 972
987 static void cpsw_get_drvinfo(struct net_device *ndev, 973 static void cpsw_get_drvinfo(struct net_device *ndev,
988 struct ethtool_drvinfo *info) 974 struct ethtool_drvinfo *info)
989 { 975 {
990 struct cpsw_priv *priv = netdev_priv(ndev); 976 struct cpsw_priv *priv = netdev_priv(ndev);
991 strcpy(info->driver, "TI CPSW Driver v1.0"); 977 strcpy(info->driver, "TI CPSW Driver v1.0");
992 strcpy(info->version, "1.0"); 978 strcpy(info->version, "1.0");
993 strcpy(info->bus_info, priv->pdev->name); 979 strcpy(info->bus_info, priv->pdev->name);
994 } 980 }
995 981
996 static u32 cpsw_get_msglevel(struct net_device *ndev) 982 static u32 cpsw_get_msglevel(struct net_device *ndev)
997 { 983 {
998 struct cpsw_priv *priv = netdev_priv(ndev); 984 struct cpsw_priv *priv = netdev_priv(ndev);
999 return priv->msg_enable; 985 return priv->msg_enable;
1000 } 986 }
1001 987
1002 static void cpsw_set_msglevel(struct net_device *ndev, u32 value) 988 static void cpsw_set_msglevel(struct net_device *ndev, u32 value)
1003 { 989 {
1004 struct cpsw_priv *priv = netdev_priv(ndev); 990 struct cpsw_priv *priv = netdev_priv(ndev);
1005 priv->msg_enable = value; 991 priv->msg_enable = value;
1006 } 992 }
1007 993
1008 static const struct ethtool_ops cpsw_ethtool_ops = { 994 static const struct ethtool_ops cpsw_ethtool_ops = {
1009 .get_drvinfo = cpsw_get_drvinfo, 995 .get_drvinfo = cpsw_get_drvinfo,
1010 .get_msglevel = cpsw_get_msglevel, 996 .get_msglevel = cpsw_get_msglevel,
1011 .set_msglevel = cpsw_set_msglevel, 997 .set_msglevel = cpsw_set_msglevel,
1012 .get_link = ethtool_op_get_link, 998 .get_link = ethtool_op_get_link,
1013 .get_coalesce = cpsw_get_coalesce, 999 .get_coalesce = cpsw_get_coalesce,
1014 .set_coalesce = cpsw_set_coalesce, 1000 .set_coalesce = cpsw_set_coalesce,
1015 }; 1001 };
1016 1002
1017 static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv) 1003 static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv)
1018 { 1004 {
1019 void __iomem *regs = priv->regs; 1005 void __iomem *regs = priv->regs;
1020 int slave_num = slave->slave_num; 1006 int slave_num = slave->slave_num;
1021 struct cpsw_slave_data *data = priv->data.slave_data + slave_num; 1007 struct cpsw_slave_data *data = priv->data.slave_data + slave_num;
1022 1008
1023 slave->data = data; 1009 slave->data = data;
1024 slave->regs = regs + data->slave_reg_ofs; 1010 slave->regs = regs + data->slave_reg_ofs;
1025 slave->sliver = regs + data->sliver_reg_ofs; 1011 slave->sliver = regs + data->sliver_reg_ofs;
1026 } 1012 }
1027 1013
1028 static int __devinit cpsw_probe(struct platform_device *pdev) 1014 static int __devinit cpsw_probe(struct platform_device *pdev)
1029 { 1015 {
1030 struct cpsw_platform_data *data = pdev->dev.platform_data; 1016 struct cpsw_platform_data *data = pdev->dev.platform_data;
1031 struct net_device *ndev; 1017 struct net_device *ndev;
1032 struct cpsw_priv *priv; 1018 struct cpsw_priv *priv;
1033 struct cpdma_params dma_params; 1019 struct cpdma_params dma_params;
1034 struct cpsw_ale_params ale_params; 1020 struct cpsw_ale_params ale_params;
1035 void __iomem *regs; 1021 void __iomem *regs;
1036 struct resource *res; 1022 struct resource *res;
1037 int ret = 0, i, k = 0; 1023 int ret = 0, i, k = 0;
1038 1024
1039 cpdma_base = ioremap(AM33XX_CPSW_BASE, SZ_4K);
1040 if (WARN_ON(!cpdma_base)) {
1041 printk(KERN_ERR"errror: %s: ioremap", __func__);
1042 return -ENODEV;
1043 }
1044
1045 if (!data) { 1025 if (!data) {
1046 pr_err("cpsw: platform data missing\n"); 1026 pr_err("cpsw: platform data missing\n");
1047 return -ENODEV; 1027 return -ENODEV;
1048 } 1028 }
1049 1029
1050 ndev = alloc_etherdev(sizeof(struct cpsw_priv)); 1030 ndev = alloc_etherdev(sizeof(struct cpsw_priv));
1051 if (!ndev) { 1031 if (!ndev) {
1052 pr_err("cpsw: error allocating net_device\n"); 1032 pr_err("cpsw: error allocating net_device\n");
1053 return -ENOMEM; 1033 return -ENOMEM;
1054 } 1034 }
1055 1035
1056 platform_set_drvdata(pdev, ndev); 1036 platform_set_drvdata(pdev, ndev);
1057 priv = netdev_priv(ndev); 1037 priv = netdev_priv(ndev);
1058 spin_lock_init(&priv->lock); 1038 spin_lock_init(&priv->lock);
1059 priv->data = *data; 1039 priv->data = *data;
1060 priv->pdev = pdev; 1040 priv->pdev = pdev;
1061 priv->ndev = ndev; 1041 priv->ndev = ndev;
1062 priv->dev = &ndev->dev; 1042 priv->dev = &ndev->dev;
1063 priv->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG); 1043 priv->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG);
1064 priv->rx_packet_max = max(rx_packet_max, 128); 1044 priv->rx_packet_max = max(rx_packet_max, 128);
1065 1045
1066 if (is_valid_ether_addr(data->mac_addr)) { 1046 if (is_valid_ether_addr(data->mac_addr)) {
1067 memcpy(priv->mac_addr, data->mac_addr, ETH_ALEN); 1047 memcpy(priv->mac_addr, data->mac_addr, ETH_ALEN);
1068 printk(KERN_INFO"Detected MACID=%x:%x:%x:%x:%x:%x\n", 1048 printk(KERN_INFO"Detected MACID=%x:%x:%x:%x:%x:%x\n",
1069 priv->mac_addr[0], priv->mac_addr[1], 1049 priv->mac_addr[0], priv->mac_addr[1],
1070 priv->mac_addr[2], priv->mac_addr[3], 1050 priv->mac_addr[2], priv->mac_addr[3],
1071 priv->mac_addr[4], priv->mac_addr[5]); 1051 priv->mac_addr[4], priv->mac_addr[5]);
1072 } else { 1052 } else {
1073 random_ether_addr(priv->mac_addr); 1053 random_ether_addr(priv->mac_addr);
1074 printk(KERN_INFO"Random MACID=%x:%x:%x:%x:%x:%x\n", 1054 printk(KERN_INFO"Random MACID=%x:%x:%x:%x:%x:%x\n",
1075 priv->mac_addr[0], priv->mac_addr[1], 1055 priv->mac_addr[0], priv->mac_addr[1],
1076 priv->mac_addr[2], priv->mac_addr[3], 1056 priv->mac_addr[2], priv->mac_addr[3],
1077 priv->mac_addr[4], priv->mac_addr[5]); 1057 priv->mac_addr[4], priv->mac_addr[5]);
1078 } 1058 }
1079 1059
1080 memcpy(ndev->dev_addr, priv->mac_addr, ETH_ALEN); 1060 memcpy(ndev->dev_addr, priv->mac_addr, ETH_ALEN);
1081 1061
1082 priv->slaves = kzalloc(sizeof(struct cpsw_slave) * data->slaves, 1062 priv->slaves = kzalloc(sizeof(struct cpsw_slave) * data->slaves,
1083 GFP_KERNEL); 1063 GFP_KERNEL);
1084 if (!priv->slaves) { 1064 if (!priv->slaves) {
1085 dev_err(priv->dev, "failed to allocate slave ports\n"); 1065 dev_err(priv->dev, "failed to allocate slave ports\n");
1086 ret = -EBUSY; 1066 ret = -EBUSY;
1087 goto clean_ndev_ret; 1067 goto clean_ndev_ret;
1088 } 1068 }
1089 for (i = 0; i < data->slaves; i++) 1069 for (i = 0; i < data->slaves; i++)
1090 priv->slaves[i].slave_num = i; 1070 priv->slaves[i].slave_num = i;
1091 1071
1092 priv->clk = clk_get(&pdev->dev, NULL); 1072 priv->clk = clk_get(&pdev->dev, NULL);
1093 if (IS_ERR(priv->clk)) 1073 if (IS_ERR(priv->clk))
1094 dev_err(priv->dev, "failed to get device clock\n"); 1074 dev_err(priv->dev, "failed to get device clock\n");
1095 1075
1096 priv->coal_intvl = 0; 1076 priv->coal_intvl = 0;
1097 priv->bus_freq_mhz = clk_get_rate(priv->clk) / 1000000; 1077 priv->bus_freq_mhz = clk_get_rate(priv->clk) / 1000000;
1098 1078
1099 priv->cpsw_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1079 priv->cpsw_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1100 if (!priv->cpsw_res) { 1080 if (!priv->cpsw_res) {
1101 dev_err(priv->dev, "error getting i/o resource\n"); 1081 dev_err(priv->dev, "error getting i/o resource\n");
1102 ret = -ENOENT; 1082 ret = -ENOENT;
1103 goto clean_clk_ret; 1083 goto clean_clk_ret;
1104 } 1084 }
1105 1085
1106 if (!request_mem_region(priv->cpsw_res->start, 1086 if (!request_mem_region(priv->cpsw_res->start,
1107 resource_size(priv->cpsw_res), ndev->name)) { 1087 resource_size(priv->cpsw_res), ndev->name)) {
1108 dev_err(priv->dev, "failed request i/o region\n"); 1088 dev_err(priv->dev, "failed request i/o region\n");
1109 ret = -ENXIO; 1089 ret = -ENXIO;
1110 goto clean_clk_ret; 1090 goto clean_clk_ret;
1111 } 1091 }
1112 1092
1113 regs = ioremap(priv->cpsw_res->start, resource_size(priv->cpsw_res)); 1093 regs = ioremap(priv->cpsw_res->start, resource_size(priv->cpsw_res));
1114 if (!regs) { 1094 if (!regs) {
1115 dev_err(priv->dev, "unable to map i/o region\n"); 1095 dev_err(priv->dev, "unable to map i/o region\n");
1116 goto clean_cpsw_iores_ret; 1096 goto clean_cpsw_iores_ret;
1117 } 1097 }
1118 priv->regs = regs; 1098 priv->regs = regs;
1119 priv->host_port = data->host_port_num; 1099 priv->host_port = data->host_port_num;
1120 priv->host_port_regs = regs + data->host_port_reg_ofs; 1100 priv->host_port_regs = regs + data->host_port_reg_ofs;
1121 priv->hw_stats = regs + data->hw_stats_reg_ofs; 1101 priv->hw_stats = regs + data->hw_stats_reg_ofs;
1122 1102
1123 priv->cpsw_ss_res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 1103 priv->cpsw_ss_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1124 if (!priv->cpsw_ss_res) { 1104 if (!priv->cpsw_ss_res) {
1125 dev_err(priv->dev, "error getting i/o resource\n"); 1105 dev_err(priv->dev, "error getting i/o resource\n");
1126 ret = -ENOENT; 1106 ret = -ENOENT;
1127 goto clean_clk_ret; 1107 goto clean_clk_ret;
1128 } 1108 }
1129 1109
1130 if (!request_mem_region(priv->cpsw_ss_res->start, 1110 if (!request_mem_region(priv->cpsw_ss_res->start,
1131 resource_size(priv->cpsw_ss_res), ndev->name)) { 1111 resource_size(priv->cpsw_ss_res), ndev->name)) {
1132 dev_err(priv->dev, "failed request i/o region\n"); 1112 dev_err(priv->dev, "failed request i/o region\n");
1133 ret = -ENXIO; 1113 ret = -ENXIO;
1134 goto clean_clk_ret; 1114 goto clean_clk_ret;
1135 } 1115 }
1136 1116
1137 regs = ioremap(priv->cpsw_ss_res->start, 1117 regs = ioremap(priv->cpsw_ss_res->start,
1138 resource_size(priv->cpsw_ss_res)); 1118 resource_size(priv->cpsw_ss_res));
1139 if (!regs) { 1119 if (!regs) {
1140 dev_err(priv->dev, "unable to map i/o region\n"); 1120 dev_err(priv->dev, "unable to map i/o region\n");
1141 goto clean_cpsw_ss_iores_ret; 1121 goto clean_cpsw_ss_iores_ret;
1142 } 1122 }
1143 priv->ss_regs = regs; 1123 priv->ss_regs = regs;
1144 1124
1145 1125
1146 for_each_slave(priv, cpsw_slave_init, priv); 1126 for_each_slave(priv, cpsw_slave_init, priv);
1147 1127
1148 omap_ctrl_writel(CPSW_TIMER_MASK, CPSW_TIMER_CAP_REG); 1128 omap_ctrl_writel(CPSW_TIMER_MASK, CPSW_TIMER_CAP_REG);
1149 1129
1150 /* Enable Timer */ 1130 /* Enable Timer for capturing cpsw rx interrupts */
1151 stTimerRx = omap_dm_timer_request_specific(CPSW_RX_TIMER_REQ); 1131 dmtimer_rx = omap_dm_timer_request_specific(CPSW_RX_TIMER_REQ);
1152 omap_dm_timer_set_int_enable(stTimerRx, OMAP_TIMER_INT_CAPTURE); 1132 omap_dm_timer_set_int_enable(dmtimer_rx, OMAP_TIMER_INT_CAPTURE);
1153 omap_dm_timer_set_capture(stTimerRx, 1, 0, 0); 1133 omap_dm_timer_set_capture(dmtimer_rx, 1, 0, 0);
1154 omap_dm_timer_enable(stTimerRx); 1134 omap_dm_timer_enable(dmtimer_rx);
1155 1135
1156 /* Enable Timer */ 1136 /* Enable Timer for capturing cpsw tx interrupts */
1157 stTimerTx = omap_dm_timer_request_specific(CPSW_TX_TIMER_REQ); 1137 dmtimer_tx = omap_dm_timer_request_specific(CPSW_TX_TIMER_REQ);
1158 omap_dm_timer_set_int_enable(stTimerTx, OMAP_TIMER_INT_CAPTURE); 1138 omap_dm_timer_set_int_enable(dmtimer_tx, OMAP_TIMER_INT_CAPTURE);
1159 omap_dm_timer_set_capture(stTimerTx, 1, 0, 0); 1139 omap_dm_timer_set_capture(dmtimer_tx, 1, 0, 0);
1160 omap_dm_timer_enable(stTimerTx); 1140 omap_dm_timer_enable(dmtimer_tx);
1161 1141
1162 memset(&dma_params, 0, sizeof(dma_params)); 1142 memset(&dma_params, 0, sizeof(dma_params));
1163 dma_params.dev = &pdev->dev; 1143 dma_params.dev = &pdev->dev;
1164 dma_params.dmaregs = (void __iomem *)(((u32)priv->regs) + 1144 dma_params.dmaregs = (void __iomem *)(((u32)priv->regs) +
1165 data->cpdma_reg_ofs); 1145 data->cpdma_reg_ofs);
1166 dma_params.rxthresh = (void __iomem *)(((u32)priv->regs) + 1146 dma_params.rxthresh = (void __iomem *)(((u32)priv->regs) +
1167 data->cpdma_reg_ofs + CPDMA_RXTHRESH); 1147 data->cpdma_reg_ofs + CPDMA_RXTHRESH);
1168 dma_params.rxfree = (void __iomem *)(((u32)priv->regs) + 1148 dma_params.rxfree = (void __iomem *)(((u32)priv->regs) +
1169 data->cpdma_reg_ofs + CPDMA_RXFREE); 1149 data->cpdma_reg_ofs + CPDMA_RXFREE);
1170 1150
1171 if (data->version == CPSW_VERSION_2) { 1151 if (data->version == CPSW_VERSION_2) {
1172 dma_params.txhdp = (void __iomem *)(((u32)priv->regs) + 1152 dma_params.txhdp = (void __iomem *)(((u32)priv->regs) +
1173 data->cpdma_reg_ofs + CPDMA_TXHDP_VER2); 1153 data->cpdma_reg_ofs + CPDMA_TXHDP_VER2);
1174 dma_params.rxhdp = (void __iomem *)(((u32)priv->regs) + 1154 dma_params.rxhdp = (void __iomem *)(((u32)priv->regs) +
1175 data->cpdma_reg_ofs + CPDMA_RXHDP_VER2); 1155 data->cpdma_reg_ofs + CPDMA_RXHDP_VER2);
1176 dma_params.txcp = (void __iomem *)(((u32)priv->regs) + 1156 dma_params.txcp = (void __iomem *)(((u32)priv->regs) +
1177 data->cpdma_reg_ofs + CPDMA_TXCP_VER2); 1157 data->cpdma_reg_ofs + CPDMA_TXCP_VER2);
1178 dma_params.rxcp = (void __iomem *)(((u32)priv->regs) + 1158 dma_params.rxcp = (void __iomem *)(((u32)priv->regs) +
1179 data->cpdma_reg_ofs + CPDMA_RXCP_VER2); 1159 data->cpdma_reg_ofs + CPDMA_RXCP_VER2);
1180 } else { 1160 } else {
1181 dma_params.txhdp = (void __iomem *)(((u32)priv->regs) + 1161 dma_params.txhdp = (void __iomem *)(((u32)priv->regs) +
1182 data->cpdma_reg_ofs + CPDMA_TXHDP_VER1); 1162 data->cpdma_reg_ofs + CPDMA_TXHDP_VER1);
1183 dma_params.rxhdp = (void __iomem *)(((u32)priv->regs) + 1163 dma_params.rxhdp = (void __iomem *)(((u32)priv->regs) +
1184 data->cpdma_reg_ofs + CPDMA_RXHDP_VER1); 1164 data->cpdma_reg_ofs + CPDMA_RXHDP_VER1);
1185 dma_params.txcp = (void __iomem *)(((u32)priv->regs) + 1165 dma_params.txcp = (void __iomem *)(((u32)priv->regs) +
1186 data->cpdma_reg_ofs + CPDMA_TXCP_VER1); 1166 data->cpdma_reg_ofs + CPDMA_TXCP_VER1);
1187 dma_params.rxcp = (void __iomem *)(((u32)priv->regs) + 1167 dma_params.rxcp = (void __iomem *)(((u32)priv->regs) +
1188 data->cpdma_reg_ofs + CPDMA_RXCP_VER1); 1168 data->cpdma_reg_ofs + CPDMA_RXCP_VER1);
1189 } 1169 }
1190 1170
1191 dma_params.num_chan = data->channels; 1171 dma_params.num_chan = data->channels;
1192 dma_params.has_soft_reset = true; 1172 dma_params.has_soft_reset = true;
1193 dma_params.min_packet_size = CPSW_MIN_PACKET_SIZE; 1173 dma_params.min_packet_size = CPSW_MIN_PACKET_SIZE;
1194 dma_params.desc_mem_size = data->bd_ram_size; 1174 dma_params.desc_mem_size = data->bd_ram_size;
1195 dma_params.desc_align = 16; 1175 dma_params.desc_align = 16;
1196 dma_params.has_ext_regs = true; 1176 dma_params.has_ext_regs = true;
1197 dma_params.desc_mem_phys = data->no_bd_ram ? 0 : 1177 dma_params.desc_mem_phys = data->no_bd_ram ? 0 :
1198 (u32 __force)priv->cpsw_res->start + data->bd_ram_ofs; 1178 (u32 __force)priv->cpsw_res->start + data->bd_ram_ofs;
1199 dma_params.desc_hw_addr = data->hw_ram_addr ? 1179 dma_params.desc_hw_addr = data->hw_ram_addr ?
1200 data->hw_ram_addr : dma_params.desc_mem_phys ; 1180 data->hw_ram_addr : dma_params.desc_mem_phys ;
1201 1181
1202 priv->dma = cpdma_ctlr_create(&dma_params); 1182 priv->dma = cpdma_ctlr_create(&dma_params);
1203 if (!priv->dma) { 1183 if (!priv->dma) {
1204 dev_err(priv->dev, "error initializing dma\n"); 1184 dev_err(priv->dev, "error initializing dma\n");
1205 ret = -ENOMEM; 1185 ret = -ENOMEM;
1206 goto clean_iomap_ret; 1186 goto clean_iomap_ret;
1207 } 1187 }
1208 1188
1209 priv->txch = cpdma_chan_create(priv->dma, tx_chan_num(0), 1189 priv->txch = cpdma_chan_create(priv->dma, tx_chan_num(0),
1210 cpsw_tx_handler); 1190 cpsw_tx_handler);
1211 priv->rxch = cpdma_chan_create(priv->dma, rx_chan_num(0), 1191 priv->rxch = cpdma_chan_create(priv->dma, rx_chan_num(0),
1212 cpsw_rx_handler); 1192 cpsw_rx_handler);
1213 1193
1214 if (WARN_ON(!priv->txch || !priv->rxch)) { 1194 if (WARN_ON(!priv->txch || !priv->rxch)) {
1215 dev_err(priv->dev, "error initializing dma channels\n"); 1195 dev_err(priv->dev, "error initializing dma channels\n");
1216 ret = -ENOMEM; 1196 ret = -ENOMEM;
1217 goto clean_dma_ret; 1197 goto clean_dma_ret;
1218 } 1198 }
1219 1199
1220 memset(&ale_params, 0, sizeof(ale_params)); 1200 memset(&ale_params, 0, sizeof(ale_params));
1221 ale_params.dev = &ndev->dev; 1201 ale_params.dev = &ndev->dev;
1222 ale_params.ale_regs = (void *)((u32)priv->regs) + 1202 ale_params.ale_regs = (void *)((u32)priv->regs) +
1223 ((u32)data->ale_reg_ofs); 1203 ((u32)data->ale_reg_ofs);
1224 ale_params.ale_ageout = ale_ageout; 1204 ale_params.ale_ageout = ale_ageout;
1225 ale_params.ale_entries = data->ale_entries; 1205 ale_params.ale_entries = data->ale_entries;
1226 ale_params.ale_ports = data->slaves; 1206 ale_params.ale_ports = data->slaves;
1227 1207
1228 priv->ale = cpsw_ale_create(&ale_params); 1208 priv->ale = cpsw_ale_create(&ale_params);
1229 if (!priv->ale) { 1209 if (!priv->ale) {
1230 dev_err(priv->dev, "error initializing ale engine\n"); 1210 dev_err(priv->dev, "error initializing ale engine\n");
1231 ret = -ENODEV; 1211 ret = -ENODEV;
1232 goto clean_dma_ret; 1212 goto clean_dma_ret;
1233 } 1213 }
1234 1214
1235 ndev->irq = platform_get_irq(pdev, 0); 1215 ndev->irq = platform_get_irq(pdev, 0);
1236 if (ndev->irq < 0) { 1216 if (ndev->irq < 0) {
1237 dev_err(priv->dev, "error getting irq resource\n"); 1217 dev_err(priv->dev, "error getting irq resource\n");
1238 ret = -ENOENT; 1218 ret = -ENOENT;
1239 goto clean_ale_ret; 1219 goto clean_ale_ret;
1240 } 1220 }
1241 1221
1242 while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, k))) { 1222 while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, k))) {
1243 for (i = res->start; i <= res->end; i++) { 1223 for (i = res->start; i <= res->end; i++) {
1244 if (request_irq(i, cpsw_interrupt, IRQF_DISABLED, 1224 if (request_irq(i, cpsw_interrupt, IRQF_DISABLED,
1245 dev_name(&pdev->dev), priv)) { 1225 dev_name(&pdev->dev), priv)) {
1246 dev_err(priv->dev, "error attaching irq\n"); 1226 dev_err(priv->dev, "error attaching irq\n");
1247 goto clean_ale_ret; 1227 goto clean_ale_ret;
1248 } 1228 }
1249 #ifdef CPSW_IRQ_QUIRK 1229 #ifdef CPSW_IRQ_QUIRK
1250 priv->irqs_table[k] = i; 1230 priv->irqs_table[k] = i;
1251 priv->num_irqs = k; 1231 priv->num_irqs = k;
1252 #endif 1232 #endif
1253 } 1233 }
1254 k++; 1234 k++;
1255 } 1235 }
1256 1236
1257 ndev->flags |= IFF_ALLMULTI; /* see cpsw_ndo_change_rx_flags() */ 1237 ndev->flags |= IFF_ALLMULTI; /* see cpsw_ndo_change_rx_flags() */
1258 1238
1259 ndev->netdev_ops = &cpsw_netdev_ops; 1239 ndev->netdev_ops = &cpsw_netdev_ops;
1260 SET_ETHTOOL_OPS(ndev, &cpsw_ethtool_ops); 1240 SET_ETHTOOL_OPS(ndev, &cpsw_ethtool_ops);
1261 netif_napi_add(ndev, &priv->napi, cpsw_poll, CPSW_POLL_WEIGHT); 1241 netif_napi_add(ndev, &priv->napi, cpsw_poll, CPSW_POLL_WEIGHT);
1262 1242
1263 /* register the network device */ 1243 /* register the network device */
1264 SET_NETDEV_DEV(ndev, &pdev->dev); 1244 SET_NETDEV_DEV(ndev, &pdev->dev);
1265 ret = register_netdev(ndev); 1245 ret = register_netdev(ndev);
1266 if (ret) { 1246 if (ret) {
1267 dev_err(priv->dev, "error registering net device\n"); 1247 dev_err(priv->dev, "error registering net device\n");
1268 ret = -ENODEV; 1248 ret = -ENODEV;
1269 goto clean_irq_ret; 1249 goto clean_irq_ret;
1270 } 1250 }
1271 1251
1272 msg(notice, probe, "initialized device (regs %x, irq %d)\n", 1252 msg(notice, probe, "initialized device (regs %x, irq %d)\n",
1273 priv->cpsw_res->start, ndev->irq); 1253 priv->cpsw_res->start, ndev->irq);
1274 1254
1275 return 0; 1255 return 0;
1276 1256
1277 clean_irq_ret: 1257 clean_irq_ret:
1278 free_irq(ndev->irq, priv); 1258 free_irq(ndev->irq, priv);
1279 clean_ale_ret: 1259 clean_ale_ret:
1280 cpsw_ale_destroy(priv->ale); 1260 cpsw_ale_destroy(priv->ale);
1281 clean_dma_ret: 1261 clean_dma_ret:
1282 cpdma_chan_destroy(priv->txch); 1262 cpdma_chan_destroy(priv->txch);
1283 cpdma_chan_destroy(priv->rxch); 1263 cpdma_chan_destroy(priv->rxch);
1284 cpdma_ctlr_destroy(priv->dma); 1264 cpdma_ctlr_destroy(priv->dma);
1285 clean_iomap_ret: 1265 clean_iomap_ret:
1286 iounmap(priv->regs); 1266 iounmap(priv->regs);
1287 clean_cpsw_ss_iores_ret: 1267 clean_cpsw_ss_iores_ret:
1288 release_mem_region(priv->cpsw_ss_res->start, 1268 release_mem_region(priv->cpsw_ss_res->start,
1289 resource_size(priv->cpsw_ss_res)); 1269 resource_size(priv->cpsw_ss_res));
1290 clean_cpsw_iores_ret: 1270 clean_cpsw_iores_ret:
1291 release_mem_region(priv->cpsw_res->start, 1271 release_mem_region(priv->cpsw_res->start,
1292 resource_size(priv->cpsw_res)); 1272 resource_size(priv->cpsw_res));
1293 clean_clk_ret: 1273 clean_clk_ret:
1294 clk_put(priv->clk); 1274 clk_put(priv->clk);
1295 kfree(priv->slaves); 1275 kfree(priv->slaves);
1296 clean_ndev_ret: 1276 clean_ndev_ret:
1297 free_netdev(ndev); 1277 free_netdev(ndev);
1298 return ret; 1278 return ret;
1299 } 1279 }
1300 1280
1301 static int __devexit cpsw_remove(struct platform_device *pdev) 1281 static int __devexit cpsw_remove(struct platform_device *pdev)
1302 { 1282 {
1303 struct net_device *ndev = platform_get_drvdata(pdev); 1283 struct net_device *ndev = platform_get_drvdata(pdev);
1304 struct cpsw_priv *priv = netdev_priv(ndev); 1284 struct cpsw_priv *priv = netdev_priv(ndev);
1305 1285
1306 msg(notice, probe, "removing device\n"); 1286 msg(notice, probe, "removing device\n");
1307 platform_set_drvdata(pdev, NULL); 1287 platform_set_drvdata(pdev, NULL);
1308 1288
1309 free_irq(ndev->irq, priv); 1289 free_irq(ndev->irq, priv);
1310 cpsw_ale_destroy(priv->ale); 1290 cpsw_ale_destroy(priv->ale);
1311 cpdma_chan_destroy(priv->txch); 1291 cpdma_chan_destroy(priv->txch);
1312 cpdma_chan_destroy(priv->rxch); 1292 cpdma_chan_destroy(priv->rxch);
1313 cpdma_ctlr_destroy(priv->dma); 1293 cpdma_ctlr_destroy(priv->dma);
1314 iounmap(priv->regs); 1294 iounmap(priv->regs);
1315 release_mem_region(priv->cpsw_res->start, 1295 release_mem_region(priv->cpsw_res->start,
1316 resource_size(priv->cpsw_res)); 1296 resource_size(priv->cpsw_res));
1317 release_mem_region(priv->cpsw_ss_res->start, 1297 release_mem_region(priv->cpsw_ss_res->start,
1318 resource_size(priv->cpsw_ss_res)); 1298 resource_size(priv->cpsw_ss_res));
1319 clk_put(priv->clk); 1299 clk_put(priv->clk);
1320 kfree(priv->slaves); 1300 kfree(priv->slaves);
1321 free_netdev(ndev); 1301 free_netdev(ndev);
1322 1302
1323 return 0; 1303 return 0;
1324 } 1304 }
1325 1305
1326 static int cpsw_suspend(struct device *dev) 1306 static int cpsw_suspend(struct device *dev)
1327 { 1307 {
1328 struct platform_device *pdev = to_platform_device(dev); 1308 struct platform_device *pdev = to_platform_device(dev);
1329 struct net_device *ndev = platform_get_drvdata(pdev); 1309 struct net_device *ndev = platform_get_drvdata(pdev);
1330 1310
1331 if (netif_running(ndev)) 1311 if (netif_running(ndev))
1332 cpsw_ndo_stop(ndev); 1312 cpsw_ndo_stop(ndev);
1333 return 0; 1313 return 0;
1334 } 1314 }
1335 1315
1336 static int cpsw_resume(struct device *dev) 1316 static int cpsw_resume(struct device *dev)
1337 { 1317 {
1338 struct platform_device *pdev = to_platform_device(dev); 1318 struct platform_device *pdev = to_platform_device(dev);
1339 struct net_device *ndev = platform_get_drvdata(pdev); 1319 struct net_device *ndev = platform_get_drvdata(pdev);
1340 1320
1341 if (netif_running(ndev)) 1321 if (netif_running(ndev))
1342 cpsw_ndo_open(ndev); 1322 cpsw_ndo_open(ndev);
1343 return 0; 1323 return 0;
1344 } 1324 }
1345 1325
1346 static const struct dev_pm_ops cpsw_pm_ops = { 1326 static const struct dev_pm_ops cpsw_pm_ops = {
1347 .suspend = cpsw_suspend, 1327 .suspend = cpsw_suspend,
1348 .resume = cpsw_resume, 1328 .resume = cpsw_resume,
1349 }; 1329 };
1350 1330
1351 static struct platform_driver cpsw_driver = { 1331 static struct platform_driver cpsw_driver = {
1352 .driver = { 1332 .driver = {
1353 .name = "cpsw", 1333 .name = "cpsw",
1354 .owner = THIS_MODULE, 1334 .owner = THIS_MODULE,
1355 .pm = &cpsw_pm_ops, 1335 .pm = &cpsw_pm_ops,
1356 }, 1336 },
1357 .probe = cpsw_probe, 1337 .probe = cpsw_probe,
1358 .remove = __devexit_p(cpsw_remove), 1338 .remove = __devexit_p(cpsw_remove),
1359 }; 1339 };
1360 1340
1361 static int __init cpsw_init(void) 1341 static int __init cpsw_init(void)
1362 { 1342 {
1363 return platform_driver_register(&cpsw_driver); 1343 return platform_driver_register(&cpsw_driver);
1364 } 1344 }
1365 late_initcall(cpsw_init); 1345 late_initcall(cpsw_init);
1366 1346
1367 static void __exit cpsw_exit(void) 1347 static void __exit cpsw_exit(void)
1368 { 1348 {
1369 platform_driver_unregister(&cpsw_driver); 1349 platform_driver_unregister(&cpsw_driver);
1370 } 1350 }
1371 module_exit(cpsw_exit); 1351 module_exit(cpsw_exit);
1372 1352
1373 MODULE_LICENSE("GPL"); 1353 MODULE_LICENSE("GPL");
1374 MODULE_DESCRIPTION("TI CPSW Ethernet driver"); 1354 MODULE_DESCRIPTION("TI CPSW Ethernet driver");
1375 1355
drivers/net/davinci_cpdma.c
1 /* 1 /*
2 * Texas Instruments CPDMA Driver 2 * Texas Instruments CPDMA Driver
3 * 3 *
4 * Copyright (C) 2010 Texas Instruments 4 * Copyright (C) 2010 Texas Instruments
5 * 5 *
6 * This program is free software; you can redistribute it and/or 6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as 7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation version 2. 8 * published by the Free Software Foundation version 2.
9 * 9 *
10 * This program is distributed "as is" WITHOUT ANY WARRANTY of any 10 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
11 * kind, whether express or implied; without even the implied warranty 11 * kind, whether express or implied; without even the implied warranty
12 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details. 13 * GNU General Public License for more details.
14 */ 14 */
15 #include <linux/kernel.h> 15 #include <linux/kernel.h>
16 #include <linux/spinlock.h> 16 #include <linux/spinlock.h>
17 #include <linux/device.h> 17 #include <linux/device.h>
18 #include <linux/slab.h> 18 #include <linux/slab.h>
19 #include <linux/err.h> 19 #include <linux/err.h>
20 #include <linux/dma-mapping.h> 20 #include <linux/dma-mapping.h>
21 #include <linux/io.h> 21 #include <linux/io.h>
22 22
23 #include "davinci_cpdma.h" 23 #include "davinci_cpdma.h"
24 24
25 /* DMA Registers */ 25 /* DMA Registers */
26 #define CPDMA_TXIDVER 0x00 26 #define CPDMA_TXIDVER 0x00
27 #define CPDMA_TXCONTROL 0x04 27 #define CPDMA_TXCONTROL 0x04
28 #define CPDMA_TXTEARDOWN 0x08 28 #define CPDMA_TXTEARDOWN 0x08
29 #define CPDMA_RXIDVER 0x10 29 #define CPDMA_RXIDVER 0x10
30 #define CPDMA_RXCONTROL 0x14 30 #define CPDMA_RXCONTROL 0x14
31 #define CPDMA_SOFTRESET 0x1c 31 #define CPDMA_SOFTRESET 0x1c
32 #define CPDMA_RXTEARDOWN 0x18 32 #define CPDMA_RXTEARDOWN 0x18
33 #define CPDMA_TXINTSTATRAW 0x80 33 #define CPDMA_TXINTSTATRAW 0x80
34 #define CPDMA_TXINTSTATMASKED 0x84 34 #define CPDMA_TXINTSTATMASKED 0x84
35 #define CPDMA_TXINTMASKSET 0x88 35 #define CPDMA_TXINTMASKSET 0x88
36 #define CPDMA_TXINTMASKCLEAR 0x8c 36 #define CPDMA_TXINTMASKCLEAR 0x8c
37 #define CPDMA_MACINVECTOR 0x90 37 #define CPDMA_MACINVECTOR 0x90
38 #define CPDMA_MACEOIVECTOR 0x94 38 #define CPDMA_MACEOIVECTOR 0x94
39 #define CPDMA_RXINTSTATRAW 0xa0 39 #define CPDMA_RXINTSTATRAW 0xa0
40 #define CPDMA_RXINTSTATMASKED 0xa4 40 #define CPDMA_RXINTSTATMASKED 0xa4
41 #define CPDMA_RXINTMASKSET 0xa8 41 #define CPDMA_RXINTMASKSET 0xa8
42 #define CPDMA_RXINTMASKCLEAR 0xac 42 #define CPDMA_RXINTMASKCLEAR 0xac
43 #define CPDMA_DMAINTSTATRAW 0xb0 43 #define CPDMA_DMAINTSTATRAW 0xb0
44 #define CPDMA_DMAINTSTATMASKED 0xb4 44 #define CPDMA_DMAINTSTATMASKED 0xb4
45 #define CPDMA_DMAINTMASKSET 0xb8 45 #define CPDMA_DMAINTMASKSET 0xb8
46 #define CPDMA_DMAINTMASKCLEAR 0xbc 46 #define CPDMA_DMAINTMASKCLEAR 0xbc
47 #define CPDMA_DMAINT_HOSTERR BIT(1) 47 #define CPDMA_DMAINT_HOSTERR BIT(1)
48 48
49 /* the following exist only if has_ext_regs is set */ 49 /* the following exist only if has_ext_regs is set */
50 #define CPDMA_DMACONTROL 0x20 50 #define CPDMA_DMACONTROL 0x20
51 #define CPDMA_DMASTATUS 0x24 51 #define CPDMA_DMASTATUS 0x24
52 #define CPDMA_RXBUFFOFS 0x28 52 #define CPDMA_RXBUFFOFS 0x28
53 #define CPDMA_EM_CONTROL 0x2c 53 #define CPDMA_EM_CONTROL 0x2c
54 54
55 /* Descriptor mode bits */ 55 /* Descriptor mode bits */
56 #define CPDMA_DESC_SOP BIT(31) 56 #define CPDMA_DESC_SOP BIT(31)
57 #define CPDMA_DESC_EOP BIT(30) 57 #define CPDMA_DESC_EOP BIT(30)
58 #define CPDMA_DESC_OWNER BIT(29) 58 #define CPDMA_DESC_OWNER BIT(29)
59 #define CPDMA_DESC_EOQ BIT(28) 59 #define CPDMA_DESC_EOQ BIT(28)
60 #define CPDMA_DESC_TD_COMPLETE BIT(27) 60 #define CPDMA_DESC_TD_COMPLETE BIT(27)
61 #define CPDMA_DESC_PASS_CRC BIT(26) 61 #define CPDMA_DESC_PASS_CRC BIT(26)
62 62
63 #define CPDMA_TEARDOWN_VALUE 0xfffffffc 63 #define CPDMA_TEARDOWN_VALUE 0xfffffffc
64 64
65 struct cpdma_desc { 65 struct cpdma_desc {
66 /* hardware fields */ 66 /* hardware fields */
67 u32 hw_next; 67 u32 hw_next;
68 u32 hw_buffer; 68 u32 hw_buffer;
69 u32 hw_len; 69 u32 hw_len;
70 u32 hw_mode; 70 u32 hw_mode;
71 /* software fields */ 71 /* software fields */
72 void *sw_token; 72 void *sw_token;
73 u32 sw_buffer; 73 u32 sw_buffer;
74 u32 sw_len; 74 u32 sw_len;
75 }; 75 };
76 76
77 struct cpdma_desc_pool { 77 struct cpdma_desc_pool {
78 u32 phys; 78 u32 phys;
79 u32 hw_addr; 79 u32 hw_addr;
80 void __iomem *iomap; /* ioremap map */ 80 void __iomem *iomap; /* ioremap map */
81 void *cpumap; /* dma_alloc map */ 81 void *cpumap; /* dma_alloc map */
82 int desc_size, mem_size; 82 int desc_size, mem_size;
83 int num_desc, used_desc; 83 int num_desc, used_desc;
84 unsigned long *bitmap; 84 unsigned long *bitmap;
85 struct device *dev; 85 struct device *dev;
86 spinlock_t lock; 86 spinlock_t lock;
87 }; 87 };
88 88
89 enum cpdma_state { 89 enum cpdma_state {
90 CPDMA_STATE_IDLE, 90 CPDMA_STATE_IDLE,
91 CPDMA_STATE_ACTIVE, 91 CPDMA_STATE_ACTIVE,
92 CPDMA_STATE_TEARDOWN, 92 CPDMA_STATE_TEARDOWN,
93 }; 93 };
94 94
95 const char *cpdma_state_str[] = { "idle", "active", "teardown" }; 95 const char *cpdma_state_str[] = { "idle", "active", "teardown" };
96 96
97 struct cpdma_ctlr { 97 struct cpdma_ctlr {
98 enum cpdma_state state; 98 enum cpdma_state state;
99 struct cpdma_params params; 99 struct cpdma_params params;
100 struct device *dev; 100 struct device *dev;
101 struct cpdma_desc_pool *pool; 101 struct cpdma_desc_pool *pool;
102 spinlock_t lock; 102 spinlock_t lock;
103 struct cpdma_chan *channels[2 * CPDMA_MAX_CHANNELS]; 103 struct cpdma_chan *channels[2 * CPDMA_MAX_CHANNELS];
104 }; 104 };
105 105
106 struct cpdma_chan { 106 struct cpdma_chan {
107 enum cpdma_state state; 107 enum cpdma_state state;
108 struct cpdma_ctlr *ctlr; 108 struct cpdma_ctlr *ctlr;
109 int chan_num; 109 int chan_num;
110 spinlock_t lock; 110 spinlock_t lock;
111 struct cpdma_desc __iomem *head, *tail; 111 struct cpdma_desc __iomem *head, *tail;
112 int count; 112 int count;
113 void __iomem *hdp, *cp, *rxfree; 113 void __iomem *hdp, *cp, *rxfree;
114 u32 mask; 114 u32 mask;
115 cpdma_handler_fn handler; 115 cpdma_handler_fn handler;
116 enum dma_data_direction dir; 116 enum dma_data_direction dir;
117 struct cpdma_chan_stats stats; 117 struct cpdma_chan_stats stats;
118 /* offsets into dmaregs */ 118 /* offsets into dmaregs */
119 int int_set, int_clear, td; 119 int int_set, int_clear, td;
120 }; 120 };
121 121
122 /* The following make access to common cpdma_ctlr params more readable */ 122 /* The following make access to common cpdma_ctlr params more readable */
123 #define dmaregs params.dmaregs 123 #define dmaregs params.dmaregs
124 #define num_chan params.num_chan 124 #define num_chan params.num_chan
125 125
126 /* various accessors */ 126 /* various accessors */
127 #define dma_reg_read(ctlr, ofs) __raw_readl((ctlr)->dmaregs + (ofs)) 127 #define dma_reg_read(ctlr, ofs) __raw_readl((ctlr)->dmaregs + (ofs))
128 #define chan_read(chan, fld) __raw_readl((chan)->fld) 128 #define chan_read(chan, fld) __raw_readl((chan)->fld)
129 #define desc_read(desc, fld) __raw_readl(&(desc)->fld) 129 #define desc_read(desc, fld) __raw_readl(&(desc)->fld)
130 #define dma_reg_write(ctlr, ofs, v) __raw_writel(v, (ctlr)->dmaregs + (ofs)) 130 #define dma_reg_write(ctlr, ofs, v) __raw_writel(v, (ctlr)->dmaregs + (ofs))
131 #define chan_write(chan, fld, v) __raw_writel(v, (chan)->fld) 131 #define chan_write(chan, fld, v) __raw_writel(v, (chan)->fld)
132 #define desc_write(desc, fld, v) __raw_writel((u32)(v), &(desc)->fld) 132 #define desc_write(desc, fld, v) __raw_writel((u32)(v), &(desc)->fld)
133 133
134 /* 134 /*
135 * Utility constructs for a cpdma descriptor pool. Some devices (e.g. davinci 135 * Utility constructs for a cpdma descriptor pool. Some devices (e.g. davinci
136 * emac) have dedicated on-chip memory for these descriptors. Some other 136 * emac) have dedicated on-chip memory for these descriptors. Some other
137 * devices (e.g. cpsw switches) use plain old memory. Descriptor pools 137 * devices (e.g. cpsw switches) use plain old memory. Descriptor pools
138 * abstract out these details 138 * abstract out these details
139 */ 139 */
140 static struct cpdma_desc_pool * 140 static struct cpdma_desc_pool *
141 cpdma_desc_pool_create(struct device *dev, u32 phys, u32 hw_addr, 141 cpdma_desc_pool_create(struct device *dev, u32 phys, u32 hw_addr,
142 int size, int align) 142 int size, int align)
143 { 143 {
144 int bitmap_size; 144 int bitmap_size;
145 struct cpdma_desc_pool *pool; 145 struct cpdma_desc_pool *pool;
146 146
147 pool = kzalloc(sizeof(*pool), GFP_KERNEL); 147 pool = kzalloc(sizeof(*pool), GFP_KERNEL);
148 if (!pool) 148 if (!pool)
149 return NULL; 149 return NULL;
150 150
151 spin_lock_init(&pool->lock); 151 spin_lock_init(&pool->lock);
152 152
153 pool->dev = dev; 153 pool->dev = dev;
154 pool->mem_size = size; 154 pool->mem_size = size;
155 pool->desc_size = ALIGN(sizeof(struct cpdma_desc), align); 155 pool->desc_size = ALIGN(sizeof(struct cpdma_desc), align);
156 pool->num_desc = size / pool->desc_size; 156 pool->num_desc = size / pool->desc_size;
157 157
158 bitmap_size = (pool->num_desc / BITS_PER_LONG) * sizeof(long); 158 bitmap_size = (pool->num_desc / BITS_PER_LONG) * sizeof(long);
159 pool->bitmap = kzalloc(bitmap_size, GFP_KERNEL); 159 pool->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
160 if (!pool->bitmap) 160 if (!pool->bitmap)
161 goto fail; 161 goto fail;
162 162
163 if (phys) { 163 if (phys) {
164 pool->phys = phys; 164 pool->phys = phys;
165 pool->iomap = ioremap(phys, size); 165 pool->iomap = ioremap(phys, size);
166 pool->hw_addr = hw_addr; 166 pool->hw_addr = hw_addr;
167 } else { 167 } else {
168 pool->cpumap = dma_alloc_coherent(dev, size, &pool->phys, 168 pool->cpumap = dma_alloc_coherent(dev, size, &pool->phys,
169 GFP_KERNEL); 169 GFP_KERNEL);
170 pool->iomap = pool->cpumap; 170 pool->iomap = pool->cpumap;
171 pool->hw_addr = pool->phys; 171 pool->hw_addr = pool->phys;
172 } 172 }
173 173
174 if (pool->iomap) 174 if (pool->iomap)
175 return pool; 175 return pool;
176 176
177 fail: 177 fail:
178 kfree(pool->bitmap); 178 kfree(pool->bitmap);
179 kfree(pool); 179 kfree(pool);
180 return NULL; 180 return NULL;
181 } 181 }
182 182
183 static void cpdma_desc_pool_destroy(struct cpdma_desc_pool *pool) 183 static void cpdma_desc_pool_destroy(struct cpdma_desc_pool *pool)
184 { 184 {
185 unsigned long flags; 185 unsigned long flags;
186 186
187 if (!pool) 187 if (!pool)
188 return; 188 return;
189 189
190 spin_lock_irqsave(&pool->lock, flags); 190 spin_lock_irqsave(&pool->lock, flags);
191 WARN_ON(pool->used_desc); 191 WARN_ON(pool->used_desc);
192 kfree(pool->bitmap); 192 kfree(pool->bitmap);
193 if (pool->cpumap) { 193 if (pool->cpumap) {
194 dma_free_coherent(pool->dev, pool->mem_size, pool->cpumap, 194 dma_free_coherent(pool->dev, pool->mem_size, pool->cpumap,
195 pool->phys); 195 pool->phys);
196 } else { 196 } else {
197 iounmap(pool->iomap); 197 iounmap(pool->iomap);
198 } 198 }
199 spin_unlock_irqrestore(&pool->lock, flags); 199 spin_unlock_irqrestore(&pool->lock, flags);
200 kfree(pool); 200 kfree(pool);
201 } 201 }
202 202
203 static inline dma_addr_t desc_phys(struct cpdma_desc_pool *pool, 203 static inline dma_addr_t desc_phys(struct cpdma_desc_pool *pool,
204 struct cpdma_desc __iomem *desc) 204 struct cpdma_desc __iomem *desc)
205 { 205 {
206 if (!desc) 206 if (!desc)
207 return 0; 207 return 0;
208 return pool->hw_addr + (__force dma_addr_t)desc - 208 return pool->hw_addr + (__force dma_addr_t)desc -
209 (__force dma_addr_t)pool->iomap; 209 (__force dma_addr_t)pool->iomap;
210 } 210 }
211 211
212 static inline struct cpdma_desc __iomem * 212 static inline struct cpdma_desc __iomem *
213 desc_from_phys(struct cpdma_desc_pool *pool, dma_addr_t dma) 213 desc_from_phys(struct cpdma_desc_pool *pool, dma_addr_t dma)
214 { 214 {
215 return dma ? pool->iomap + dma - pool->hw_addr : NULL; 215 return dma ? pool->iomap + dma - pool->hw_addr : NULL;
216 } 216 }
217 217
218 static struct cpdma_desc __iomem * 218 static struct cpdma_desc __iomem *
219 cpdma_desc_alloc(struct cpdma_desc_pool *pool, int num_desc) 219 cpdma_desc_alloc(struct cpdma_desc_pool *pool, int num_desc)
220 { 220 {
221 unsigned long flags; 221 unsigned long flags;
222 int index; 222 int index;
223 struct cpdma_desc __iomem *desc = NULL; 223 struct cpdma_desc __iomem *desc = NULL;
224 224
225 spin_lock_irqsave(&pool->lock, flags); 225 spin_lock_irqsave(&pool->lock, flags);
226 226
227 index = bitmap_find_next_zero_area(pool->bitmap, pool->num_desc, 0, 227 index = bitmap_find_next_zero_area(pool->bitmap, pool->num_desc, 0,
228 num_desc, 0); 228 num_desc, 0);
229 if (index < pool->num_desc) { 229 if (index < pool->num_desc) {
230 bitmap_set(pool->bitmap, index, num_desc); 230 bitmap_set(pool->bitmap, index, num_desc);
231 desc = pool->iomap + pool->desc_size * index; 231 desc = pool->iomap + pool->desc_size * index;
232 pool->used_desc++; 232 pool->used_desc++;
233 } 233 }
234 234
235 spin_unlock_irqrestore(&pool->lock, flags); 235 spin_unlock_irqrestore(&pool->lock, flags);
236 return desc; 236 return desc;
237 } 237 }
238 238
239 static void cpdma_desc_free(struct cpdma_desc_pool *pool, 239 static void cpdma_desc_free(struct cpdma_desc_pool *pool,
240 struct cpdma_desc __iomem *desc, int num_desc) 240 struct cpdma_desc __iomem *desc, int num_desc)
241 { 241 {
242 unsigned long flags, index; 242 unsigned long flags, index;
243 243
244 index = ((unsigned long)desc - (unsigned long)pool->iomap) / 244 index = ((unsigned long)desc - (unsigned long)pool->iomap) /
245 pool->desc_size; 245 pool->desc_size;
246 spin_lock_irqsave(&pool->lock, flags); 246 spin_lock_irqsave(&pool->lock, flags);
247 bitmap_clear(pool->bitmap, index, num_desc); 247 bitmap_clear(pool->bitmap, index, num_desc);
248 pool->used_desc--; 248 pool->used_desc--;
249 spin_unlock_irqrestore(&pool->lock, flags); 249 spin_unlock_irqrestore(&pool->lock, flags);
250 } 250 }
251 251
252 struct cpdma_ctlr *cpdma_ctlr_create(struct cpdma_params *params) 252 struct cpdma_ctlr *cpdma_ctlr_create(struct cpdma_params *params)
253 { 253 {
254 struct cpdma_ctlr *ctlr; 254 struct cpdma_ctlr *ctlr;
255 255
256 ctlr = kzalloc(sizeof(*ctlr), GFP_KERNEL); 256 ctlr = kzalloc(sizeof(*ctlr), GFP_KERNEL);
257 if (!ctlr) 257 if (!ctlr)
258 return NULL; 258 return NULL;
259 259
260 ctlr->state = CPDMA_STATE_IDLE; 260 ctlr->state = CPDMA_STATE_IDLE;
261 ctlr->params = *params; 261 ctlr->params = *params;
262 ctlr->dev = params->dev; 262 ctlr->dev = params->dev;
263 spin_lock_init(&ctlr->lock); 263 spin_lock_init(&ctlr->lock);
264 264
265 ctlr->pool = cpdma_desc_pool_create(ctlr->dev, 265 ctlr->pool = cpdma_desc_pool_create(ctlr->dev,
266 ctlr->params.desc_mem_phys, 266 ctlr->params.desc_mem_phys,
267 ctlr->params.desc_hw_addr, 267 ctlr->params.desc_hw_addr,
268 ctlr->params.desc_mem_size, 268 ctlr->params.desc_mem_size,
269 ctlr->params.desc_align); 269 ctlr->params.desc_align);
270 if (!ctlr->pool) { 270 if (!ctlr->pool) {
271 kfree(ctlr); 271 kfree(ctlr);
272 return NULL; 272 return NULL;
273 } 273 }
274 274
275 if (WARN_ON(ctlr->num_chan > CPDMA_MAX_CHANNELS)) 275 if (WARN_ON(ctlr->num_chan > CPDMA_MAX_CHANNELS))
276 ctlr->num_chan = CPDMA_MAX_CHANNELS; 276 ctlr->num_chan = CPDMA_MAX_CHANNELS;
277 return ctlr; 277 return ctlr;
278 } 278 }
279 279
280 int cpdma_ctlr_start(struct cpdma_ctlr *ctlr) 280 int cpdma_ctlr_start(struct cpdma_ctlr *ctlr)
281 { 281 {
282 unsigned long flags; 282 unsigned long flags;
283 int i; 283 int i;
284 284
285 spin_lock_irqsave(&ctlr->lock, flags); 285 spin_lock_irqsave(&ctlr->lock, flags);
286 if (ctlr->state != CPDMA_STATE_IDLE) { 286 if (ctlr->state != CPDMA_STATE_IDLE) {
287 spin_unlock_irqrestore(&ctlr->lock, flags); 287 spin_unlock_irqrestore(&ctlr->lock, flags);
288 return -EBUSY; 288 return -EBUSY;
289 } 289 }
290 290
291 if (ctlr->params.has_soft_reset) { 291 if (ctlr->params.has_soft_reset) {
292 unsigned long timeout = jiffies + HZ/10; 292 unsigned long timeout = jiffies + HZ/10;
293 293
294 dma_reg_write(ctlr, CPDMA_SOFTRESET, 1); 294 dma_reg_write(ctlr, CPDMA_SOFTRESET, 1);
295 while (time_before(jiffies, timeout)) { 295 while (time_before(jiffies, timeout)) {
296 if (dma_reg_read(ctlr, CPDMA_SOFTRESET) == 0) 296 if (dma_reg_read(ctlr, CPDMA_SOFTRESET) == 0)
297 break; 297 break;
298 } 298 }
299 WARN_ON(!time_before(jiffies, timeout)); 299 WARN_ON(!time_before(jiffies, timeout));
300 } 300 }
301 301
302 for (i = 0; i < ctlr->num_chan; i++) { 302 for (i = 0; i < ctlr->num_chan; i++) {
303 __raw_writel(0, ctlr->params.txhdp + 4 * i); 303 __raw_writel(0, ctlr->params.txhdp + 4 * i);
304 __raw_writel(0, ctlr->params.rxhdp + 4 * i); 304 __raw_writel(0, ctlr->params.rxhdp + 4 * i);
305 __raw_writel(0, ctlr->params.txcp + 4 * i); 305 __raw_writel(0, ctlr->params.txcp + 4 * i);
306 __raw_writel(0, ctlr->params.rxcp + 4 * i); 306 __raw_writel(0, ctlr->params.rxcp + 4 * i);
307 } 307 }
308 308
309 dma_reg_write(ctlr, CPDMA_RXINTMASKCLEAR, 0xffffffff); 309 dma_reg_write(ctlr, CPDMA_RXINTMASKCLEAR, 0xffffffff);
310 dma_reg_write(ctlr, CPDMA_TXINTMASKCLEAR, 0xffffffff); 310 dma_reg_write(ctlr, CPDMA_TXINTMASKCLEAR, 0xffffffff);
311 311
312 dma_reg_write(ctlr, CPDMA_TXCONTROL, 1); 312 dma_reg_write(ctlr, CPDMA_TXCONTROL, 1);
313 dma_reg_write(ctlr, CPDMA_RXCONTROL, 1); 313 dma_reg_write(ctlr, CPDMA_RXCONTROL, 1);
314 314
315 ctlr->state = CPDMA_STATE_ACTIVE; 315 ctlr->state = CPDMA_STATE_ACTIVE;
316 316
317 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) { 317 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
318 if (ctlr->channels[i]) 318 if (ctlr->channels[i])
319 cpdma_chan_start(ctlr->channels[i]); 319 cpdma_chan_start(ctlr->channels[i]);
320 } 320 }
321 spin_unlock_irqrestore(&ctlr->lock, flags); 321 spin_unlock_irqrestore(&ctlr->lock, flags);
322 return 0; 322 return 0;
323 } 323 }
324 324
325 int cpdma_ctlr_stop(struct cpdma_ctlr *ctlr) 325 int cpdma_ctlr_stop(struct cpdma_ctlr *ctlr)
326 { 326 {
327 unsigned long flags; 327 unsigned long flags;
328 int i; 328 int i;
329 329
330 spin_lock_irqsave(&ctlr->lock, flags); 330 spin_lock_irqsave(&ctlr->lock, flags);
331 if (ctlr->state != CPDMA_STATE_ACTIVE) { 331 if (ctlr->state != CPDMA_STATE_ACTIVE) {
332 spin_unlock_irqrestore(&ctlr->lock, flags); 332 spin_unlock_irqrestore(&ctlr->lock, flags);
333 return -EINVAL; 333 return -EINVAL;
334 } 334 }
335 335
336 ctlr->state = CPDMA_STATE_TEARDOWN; 336 ctlr->state = CPDMA_STATE_TEARDOWN;
337 337
338 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) { 338 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
339 if (ctlr->channels[i]) 339 if (ctlr->channels[i])
340 cpdma_chan_stop(ctlr->channels[i]); 340 cpdma_chan_stop(ctlr->channels[i]);
341 } 341 }
342 342
343 dma_reg_write(ctlr, CPDMA_RXINTMASKCLEAR, 0xffffffff); 343 dma_reg_write(ctlr, CPDMA_RXINTMASKCLEAR, 0xffffffff);
344 dma_reg_write(ctlr, CPDMA_TXINTMASKCLEAR, 0xffffffff); 344 dma_reg_write(ctlr, CPDMA_TXINTMASKCLEAR, 0xffffffff);
345 345
346 dma_reg_write(ctlr, CPDMA_TXCONTROL, 0); 346 dma_reg_write(ctlr, CPDMA_TXCONTROL, 0);
347 dma_reg_write(ctlr, CPDMA_RXCONTROL, 0); 347 dma_reg_write(ctlr, CPDMA_RXCONTROL, 0);
348 348
349 ctlr->state = CPDMA_STATE_IDLE; 349 ctlr->state = CPDMA_STATE_IDLE;
350 350
351 spin_unlock_irqrestore(&ctlr->lock, flags); 351 spin_unlock_irqrestore(&ctlr->lock, flags);
352 return 0; 352 return 0;
353 } 353 }
354 354
355 int cpdma_ctlr_dump(struct cpdma_ctlr *ctlr) 355 int cpdma_ctlr_dump(struct cpdma_ctlr *ctlr)
356 { 356 {
357 struct device *dev = ctlr->dev; 357 struct device *dev = ctlr->dev;
358 unsigned long flags; 358 unsigned long flags;
359 int i; 359 int i;
360 360
361 spin_lock_irqsave(&ctlr->lock, flags); 361 spin_lock_irqsave(&ctlr->lock, flags);
362 362
363 dev_info(dev, "CPDMA: state: %s", cpdma_state_str[ctlr->state]); 363 dev_info(dev, "CPDMA: state: %s", cpdma_state_str[ctlr->state]);
364 364
365 dev_info(dev, "CPDMA: txidver: %x", 365 dev_info(dev, "CPDMA: txidver: %x",
366 dma_reg_read(ctlr, CPDMA_TXIDVER)); 366 dma_reg_read(ctlr, CPDMA_TXIDVER));
367 dev_info(dev, "CPDMA: txcontrol: %x", 367 dev_info(dev, "CPDMA: txcontrol: %x",
368 dma_reg_read(ctlr, CPDMA_TXCONTROL)); 368 dma_reg_read(ctlr, CPDMA_TXCONTROL));
369 dev_info(dev, "CPDMA: txteardown: %x", 369 dev_info(dev, "CPDMA: txteardown: %x",
370 dma_reg_read(ctlr, CPDMA_TXTEARDOWN)); 370 dma_reg_read(ctlr, CPDMA_TXTEARDOWN));
371 dev_info(dev, "CPDMA: rxidver: %x", 371 dev_info(dev, "CPDMA: rxidver: %x",
372 dma_reg_read(ctlr, CPDMA_RXIDVER)); 372 dma_reg_read(ctlr, CPDMA_RXIDVER));
373 dev_info(dev, "CPDMA: rxcontrol: %x", 373 dev_info(dev, "CPDMA: rxcontrol: %x",
374 dma_reg_read(ctlr, CPDMA_RXCONTROL)); 374 dma_reg_read(ctlr, CPDMA_RXCONTROL));
375 dev_info(dev, "CPDMA: softreset: %x", 375 dev_info(dev, "CPDMA: softreset: %x",
376 dma_reg_read(ctlr, CPDMA_SOFTRESET)); 376 dma_reg_read(ctlr, CPDMA_SOFTRESET));
377 dev_info(dev, "CPDMA: rxteardown: %x", 377 dev_info(dev, "CPDMA: rxteardown: %x",
378 dma_reg_read(ctlr, CPDMA_RXTEARDOWN)); 378 dma_reg_read(ctlr, CPDMA_RXTEARDOWN));
379 dev_info(dev, "CPDMA: txintstatraw: %x", 379 dev_info(dev, "CPDMA: txintstatraw: %x",
380 dma_reg_read(ctlr, CPDMA_TXINTSTATRAW)); 380 dma_reg_read(ctlr, CPDMA_TXINTSTATRAW));
381 dev_info(dev, "CPDMA: txintstatmasked: %x", 381 dev_info(dev, "CPDMA: txintstatmasked: %x",
382 dma_reg_read(ctlr, CPDMA_TXINTSTATMASKED)); 382 dma_reg_read(ctlr, CPDMA_TXINTSTATMASKED));
383 dev_info(dev, "CPDMA: txintmaskset: %x", 383 dev_info(dev, "CPDMA: txintmaskset: %x",
384 dma_reg_read(ctlr, CPDMA_TXINTMASKSET)); 384 dma_reg_read(ctlr, CPDMA_TXINTMASKSET));
385 dev_info(dev, "CPDMA: txintmaskclear: %x", 385 dev_info(dev, "CPDMA: txintmaskclear: %x",
386 dma_reg_read(ctlr, CPDMA_TXINTMASKCLEAR)); 386 dma_reg_read(ctlr, CPDMA_TXINTMASKCLEAR));
387 dev_info(dev, "CPDMA: macinvector: %x", 387 dev_info(dev, "CPDMA: macinvector: %x",
388 dma_reg_read(ctlr, CPDMA_MACINVECTOR)); 388 dma_reg_read(ctlr, CPDMA_MACINVECTOR));
389 dev_info(dev, "CPDMA: maceoivector: %x", 389 dev_info(dev, "CPDMA: maceoivector: %x",
390 dma_reg_read(ctlr, CPDMA_MACEOIVECTOR)); 390 dma_reg_read(ctlr, CPDMA_MACEOIVECTOR));
391 dev_info(dev, "CPDMA: rxintstatraw: %x", 391 dev_info(dev, "CPDMA: rxintstatraw: %x",
392 dma_reg_read(ctlr, CPDMA_RXINTSTATRAW)); 392 dma_reg_read(ctlr, CPDMA_RXINTSTATRAW));
393 dev_info(dev, "CPDMA: rxintstatmasked: %x", 393 dev_info(dev, "CPDMA: rxintstatmasked: %x",
394 dma_reg_read(ctlr, CPDMA_RXINTSTATMASKED)); 394 dma_reg_read(ctlr, CPDMA_RXINTSTATMASKED));
395 dev_info(dev, "CPDMA: rxintmaskset: %x", 395 dev_info(dev, "CPDMA: rxintmaskset: %x",
396 dma_reg_read(ctlr, CPDMA_RXINTMASKSET)); 396 dma_reg_read(ctlr, CPDMA_RXINTMASKSET));
397 dev_info(dev, "CPDMA: rxintmaskclear: %x", 397 dev_info(dev, "CPDMA: rxintmaskclear: %x",
398 dma_reg_read(ctlr, CPDMA_RXINTMASKCLEAR)); 398 dma_reg_read(ctlr, CPDMA_RXINTMASKCLEAR));
399 dev_info(dev, "CPDMA: dmaintstatraw: %x", 399 dev_info(dev, "CPDMA: dmaintstatraw: %x",
400 dma_reg_read(ctlr, CPDMA_DMAINTSTATRAW)); 400 dma_reg_read(ctlr, CPDMA_DMAINTSTATRAW));
401 dev_info(dev, "CPDMA: dmaintstatmasked: %x", 401 dev_info(dev, "CPDMA: dmaintstatmasked: %x",
402 dma_reg_read(ctlr, CPDMA_DMAINTSTATMASKED)); 402 dma_reg_read(ctlr, CPDMA_DMAINTSTATMASKED));
403 dev_info(dev, "CPDMA: dmaintmaskset: %x", 403 dev_info(dev, "CPDMA: dmaintmaskset: %x",
404 dma_reg_read(ctlr, CPDMA_DMAINTMASKSET)); 404 dma_reg_read(ctlr, CPDMA_DMAINTMASKSET));
405 dev_info(dev, "CPDMA: dmaintmaskclear: %x", 405 dev_info(dev, "CPDMA: dmaintmaskclear: %x",
406 dma_reg_read(ctlr, CPDMA_DMAINTMASKCLEAR)); 406 dma_reg_read(ctlr, CPDMA_DMAINTMASKCLEAR));
407 407
408 if (!ctlr->params.has_ext_regs) { 408 if (!ctlr->params.has_ext_regs) {
409 dev_info(dev, "CPDMA: dmacontrol: %x", 409 dev_info(dev, "CPDMA: dmacontrol: %x",
410 dma_reg_read(ctlr, CPDMA_DMACONTROL)); 410 dma_reg_read(ctlr, CPDMA_DMACONTROL));
411 dev_info(dev, "CPDMA: dmastatus: %x", 411 dev_info(dev, "CPDMA: dmastatus: %x",
412 dma_reg_read(ctlr, CPDMA_DMASTATUS)); 412 dma_reg_read(ctlr, CPDMA_DMASTATUS));
413 dev_info(dev, "CPDMA: rxbuffofs: %x", 413 dev_info(dev, "CPDMA: rxbuffofs: %x",
414 dma_reg_read(ctlr, CPDMA_RXBUFFOFS)); 414 dma_reg_read(ctlr, CPDMA_RXBUFFOFS));
415 } 415 }
416 416
417 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) 417 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++)
418 if (ctlr->channels[i]) 418 if (ctlr->channels[i])
419 cpdma_chan_dump(ctlr->channels[i]); 419 cpdma_chan_dump(ctlr->channels[i]);
420 420
421 spin_unlock_irqrestore(&ctlr->lock, flags); 421 spin_unlock_irqrestore(&ctlr->lock, flags);
422 return 0; 422 return 0;
423 } 423 }
424 424
425 int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr) 425 int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr)
426 { 426 {
427 unsigned long flags; 427 unsigned long flags;
428 int ret = 0, i; 428 int ret = 0, i;
429 429
430 if (!ctlr) 430 if (!ctlr)
431 return -EINVAL; 431 return -EINVAL;
432 432
433 spin_lock_irqsave(&ctlr->lock, flags); 433 spin_lock_irqsave(&ctlr->lock, flags);
434 if (ctlr->state != CPDMA_STATE_IDLE) 434 if (ctlr->state != CPDMA_STATE_IDLE)
435 cpdma_ctlr_stop(ctlr); 435 cpdma_ctlr_stop(ctlr);
436 436
437 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) { 437 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
438 if (ctlr->channels[i]) 438 if (ctlr->channels[i])
439 cpdma_chan_destroy(ctlr->channels[i]); 439 cpdma_chan_destroy(ctlr->channels[i]);
440 } 440 }
441 441
442 cpdma_desc_pool_destroy(ctlr->pool); 442 cpdma_desc_pool_destroy(ctlr->pool);
443 spin_unlock_irqrestore(&ctlr->lock, flags); 443 spin_unlock_irqrestore(&ctlr->lock, flags);
444 kfree(ctlr); 444 kfree(ctlr);
445 return ret; 445 return ret;
446 } 446 }
447 447
448 int cpdma_ctlr_int_ctrl(struct cpdma_ctlr *ctlr, bool enable) 448 int cpdma_ctlr_int_ctrl(struct cpdma_ctlr *ctlr, bool enable)
449 { 449 {
450 unsigned long flags; 450 unsigned long flags;
451 int i, reg; 451 int i, reg;
452 452
453 spin_lock_irqsave(&ctlr->lock, flags); 453 spin_lock_irqsave(&ctlr->lock, flags);
454 if (ctlr->state != CPDMA_STATE_ACTIVE) { 454 if (ctlr->state != CPDMA_STATE_ACTIVE) {
455 spin_unlock_irqrestore(&ctlr->lock, flags); 455 spin_unlock_irqrestore(&ctlr->lock, flags);
456 return -EINVAL; 456 return -EINVAL;
457 } 457 }
458 458
459 reg = enable ? CPDMA_DMAINTMASKSET : CPDMA_DMAINTMASKCLEAR; 459 reg = enable ? CPDMA_DMAINTMASKSET : CPDMA_DMAINTMASKCLEAR;
460 dma_reg_write(ctlr, reg, CPDMA_DMAINT_HOSTERR); 460 dma_reg_write(ctlr, reg, CPDMA_DMAINT_HOSTERR);
461 461
462 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) { 462 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
463 if (ctlr->channels[i]) 463 if (ctlr->channels[i])
464 cpdma_chan_int_ctrl(ctlr->channels[i], enable); 464 cpdma_chan_int_ctrl(ctlr->channels[i], enable);
465 } 465 }
466 466
467 spin_unlock_irqrestore(&ctlr->lock, flags); 467 spin_unlock_irqrestore(&ctlr->lock, flags);
468 return 0; 468 return 0;
469 } 469 }
470 470
471 void cpdma_ctlr_eoi(struct cpdma_ctlr *ctlr) 471 void cpdma_ctlr_eoi(struct cpdma_ctlr *ctlr)
472 { 472 {
473 dma_reg_write(ctlr, CPDMA_MACEOIVECTOR, 0); 473 dma_reg_write(ctlr, CPDMA_MACEOIVECTOR, 0);
474 dma_reg_write(ctlr, CPDMA_MACEOIVECTOR, 1);
475 dma_reg_write(ctlr, CPDMA_MACEOIVECTOR, 2);
474 } 476 }
475 477
476 struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num, 478 struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num,
477 cpdma_handler_fn handler) 479 cpdma_handler_fn handler)
478 { 480 {
479 struct cpdma_chan *chan; 481 struct cpdma_chan *chan;
480 int ret, offset = (chan_num % CPDMA_MAX_CHANNELS) * 4; 482 int ret, offset = (chan_num % CPDMA_MAX_CHANNELS) * 4;
481 unsigned long flags; 483 unsigned long flags;
482 484
483 if (__chan_linear(chan_num) >= ctlr->num_chan) 485 if (__chan_linear(chan_num) >= ctlr->num_chan)
484 return NULL; 486 return NULL;
485 487
486 ret = -ENOMEM; 488 ret = -ENOMEM;
487 chan = kzalloc(sizeof(*chan), GFP_KERNEL); 489 chan = kzalloc(sizeof(*chan), GFP_KERNEL);
488 if (!chan) 490 if (!chan)
489 goto err_chan_alloc; 491 goto err_chan_alloc;
490 492
491 spin_lock_irqsave(&ctlr->lock, flags); 493 spin_lock_irqsave(&ctlr->lock, flags);
492 ret = -EBUSY; 494 ret = -EBUSY;
493 if (ctlr->channels[chan_num]) 495 if (ctlr->channels[chan_num])
494 goto err_chan_busy; 496 goto err_chan_busy;
495 497
496 chan->ctlr = ctlr; 498 chan->ctlr = ctlr;
497 chan->state = CPDMA_STATE_IDLE; 499 chan->state = CPDMA_STATE_IDLE;
498 chan->chan_num = chan_num; 500 chan->chan_num = chan_num;
499 chan->handler = handler; 501 chan->handler = handler;
500 502
501 if (is_rx_chan(chan)) { 503 if (is_rx_chan(chan)) {
502 chan->hdp = ctlr->params.rxhdp + offset; 504 chan->hdp = ctlr->params.rxhdp + offset;
503 chan->cp = ctlr->params.rxcp + offset; 505 chan->cp = ctlr->params.rxcp + offset;
504 chan->rxfree = ctlr->params.rxfree + offset; 506 chan->rxfree = ctlr->params.rxfree + offset;
505 chan->int_set = CPDMA_RXINTMASKSET; 507 chan->int_set = CPDMA_RXINTMASKSET;
506 chan->int_clear = CPDMA_RXINTMASKCLEAR; 508 chan->int_clear = CPDMA_RXINTMASKCLEAR;
507 chan->td = CPDMA_RXTEARDOWN; 509 chan->td = CPDMA_RXTEARDOWN;
508 chan->dir = DMA_FROM_DEVICE; 510 chan->dir = DMA_FROM_DEVICE;
509 } else { 511 } else {
510 chan->hdp = ctlr->params.txhdp + offset; 512 chan->hdp = ctlr->params.txhdp + offset;
511 chan->cp = ctlr->params.txcp + offset; 513 chan->cp = ctlr->params.txcp + offset;
512 chan->int_set = CPDMA_TXINTMASKSET; 514 chan->int_set = CPDMA_TXINTMASKSET;
513 chan->int_clear = CPDMA_TXINTMASKCLEAR; 515 chan->int_clear = CPDMA_TXINTMASKCLEAR;
514 chan->td = CPDMA_TXTEARDOWN; 516 chan->td = CPDMA_TXTEARDOWN;
515 chan->dir = DMA_TO_DEVICE; 517 chan->dir = DMA_TO_DEVICE;
516 } 518 }
517 chan->mask = BIT(chan_linear(chan)); 519 chan->mask = BIT(chan_linear(chan));
518 520
519 spin_lock_init(&chan->lock); 521 spin_lock_init(&chan->lock);
520 522
521 ctlr->channels[chan_num] = chan; 523 ctlr->channels[chan_num] = chan;
522 spin_unlock_irqrestore(&ctlr->lock, flags); 524 spin_unlock_irqrestore(&ctlr->lock, flags);
523 return chan; 525 return chan;
524 526
525 err_chan_busy: 527 err_chan_busy:
526 spin_unlock_irqrestore(&ctlr->lock, flags); 528 spin_unlock_irqrestore(&ctlr->lock, flags);
527 kfree(chan); 529 kfree(chan);
528 err_chan_alloc: 530 err_chan_alloc:
529 return ERR_PTR(ret); 531 return ERR_PTR(ret);
530 } 532 }
531 533
532 int cpdma_chan_destroy(struct cpdma_chan *chan) 534 int cpdma_chan_destroy(struct cpdma_chan *chan)
533 { 535 {
534 struct cpdma_ctlr *ctlr = chan->ctlr; 536 struct cpdma_ctlr *ctlr = chan->ctlr;
535 unsigned long flags; 537 unsigned long flags;
536 538
537 if (!chan) 539 if (!chan)
538 return -EINVAL; 540 return -EINVAL;
539 541
540 spin_lock_irqsave(&ctlr->lock, flags); 542 spin_lock_irqsave(&ctlr->lock, flags);
541 if (chan->state != CPDMA_STATE_IDLE) 543 if (chan->state != CPDMA_STATE_IDLE)
542 cpdma_chan_stop(chan); 544 cpdma_chan_stop(chan);
543 ctlr->channels[chan->chan_num] = NULL; 545 ctlr->channels[chan->chan_num] = NULL;
544 spin_unlock_irqrestore(&ctlr->lock, flags); 546 spin_unlock_irqrestore(&ctlr->lock, flags);
545 kfree(chan); 547 kfree(chan);
546 return 0; 548 return 0;
547 } 549 }
548 550
549 int cpdma_chan_get_stats(struct cpdma_chan *chan, 551 int cpdma_chan_get_stats(struct cpdma_chan *chan,
550 struct cpdma_chan_stats *stats) 552 struct cpdma_chan_stats *stats)
551 { 553 {
552 unsigned long flags; 554 unsigned long flags;
553 if (!chan) 555 if (!chan)
554 return -EINVAL; 556 return -EINVAL;
555 spin_lock_irqsave(&chan->lock, flags); 557 spin_lock_irqsave(&chan->lock, flags);
556 memcpy(stats, &chan->stats, sizeof(*stats)); 558 memcpy(stats, &chan->stats, sizeof(*stats));
557 spin_unlock_irqrestore(&chan->lock, flags); 559 spin_unlock_irqrestore(&chan->lock, flags);
558 return 0; 560 return 0;
559 } 561 }
560 562
561 int cpdma_chan_dump(struct cpdma_chan *chan) 563 int cpdma_chan_dump(struct cpdma_chan *chan)
562 { 564 {
563 unsigned long flags; 565 unsigned long flags;
564 struct device *dev = chan->ctlr->dev; 566 struct device *dev = chan->ctlr->dev;
565 567
566 spin_lock_irqsave(&chan->lock, flags); 568 spin_lock_irqsave(&chan->lock, flags);
567 569
568 dev_info(dev, "channel %d (%s %d) state %s", 570 dev_info(dev, "channel %d (%s %d) state %s",
569 chan->chan_num, is_rx_chan(chan) ? "rx" : "tx", 571 chan->chan_num, is_rx_chan(chan) ? "rx" : "tx",
570 chan_linear(chan), cpdma_state_str[chan->state]); 572 chan_linear(chan), cpdma_state_str[chan->state]);
571 dev_info(dev, "\thdp: %x\n", chan_read(chan, hdp)); 573 dev_info(dev, "\thdp: %x\n", chan_read(chan, hdp));
572 dev_info(dev, "\tcp: %x\n", chan_read(chan, cp)); 574 dev_info(dev, "\tcp: %x\n", chan_read(chan, cp));
573 if (chan->rxfree) { 575 if (chan->rxfree) {
574 dev_info(dev, "\trxfree: %x\n", 576 dev_info(dev, "\trxfree: %x\n",
575 chan_read(chan, rxfree)); 577 chan_read(chan, rxfree));
576 } 578 }
577 579
578 dev_info(dev, "\tstats head_enqueue: %d\n", 580 dev_info(dev, "\tstats head_enqueue: %d\n",
579 chan->stats.head_enqueue); 581 chan->stats.head_enqueue);
580 dev_info(dev, "\tstats tail_enqueue: %d\n", 582 dev_info(dev, "\tstats tail_enqueue: %d\n",
581 chan->stats.tail_enqueue); 583 chan->stats.tail_enqueue);
582 dev_info(dev, "\tstats pad_enqueue: %d\n", 584 dev_info(dev, "\tstats pad_enqueue: %d\n",
583 chan->stats.pad_enqueue); 585 chan->stats.pad_enqueue);
584 dev_info(dev, "\tstats misqueued: %d\n", 586 dev_info(dev, "\tstats misqueued: %d\n",
585 chan->stats.misqueued); 587 chan->stats.misqueued);
586 dev_info(dev, "\tstats desc_alloc_fail: %d\n", 588 dev_info(dev, "\tstats desc_alloc_fail: %d\n",
587 chan->stats.desc_alloc_fail); 589 chan->stats.desc_alloc_fail);
588 dev_info(dev, "\tstats pad_alloc_fail: %d\n", 590 dev_info(dev, "\tstats pad_alloc_fail: %d\n",
589 chan->stats.pad_alloc_fail); 591 chan->stats.pad_alloc_fail);
590 dev_info(dev, "\tstats runt_receive_buff: %d\n", 592 dev_info(dev, "\tstats runt_receive_buff: %d\n",
591 chan->stats.runt_receive_buff); 593 chan->stats.runt_receive_buff);
592 dev_info(dev, "\tstats runt_transmit_buff: %d\n", 594 dev_info(dev, "\tstats runt_transmit_buff: %d\n",
593 chan->stats.runt_transmit_buff); 595 chan->stats.runt_transmit_buff);
594 dev_info(dev, "\tstats empty_dequeue: %d\n", 596 dev_info(dev, "\tstats empty_dequeue: %d\n",
595 chan->stats.empty_dequeue); 597 chan->stats.empty_dequeue);
596 dev_info(dev, "\tstats busy_dequeue: %d\n", 598 dev_info(dev, "\tstats busy_dequeue: %d\n",
597 chan->stats.busy_dequeue); 599 chan->stats.busy_dequeue);
598 dev_info(dev, "\tstats good_dequeue: %d\n", 600 dev_info(dev, "\tstats good_dequeue: %d\n",
599 chan->stats.good_dequeue); 601 chan->stats.good_dequeue);
600 dev_info(dev, "\tstats requeue: %d\n", 602 dev_info(dev, "\tstats requeue: %d\n",
601 chan->stats.requeue); 603 chan->stats.requeue);
602 dev_info(dev, "\tstats teardown_dequeue: %d\n", 604 dev_info(dev, "\tstats teardown_dequeue: %d\n",
603 chan->stats.teardown_dequeue); 605 chan->stats.teardown_dequeue);
604 606
605 spin_unlock_irqrestore(&chan->lock, flags); 607 spin_unlock_irqrestore(&chan->lock, flags);
606 return 0; 608 return 0;
607 } 609 }
608 610
609 static void __cpdma_chan_submit(struct cpdma_chan *chan, 611 static void __cpdma_chan_submit(struct cpdma_chan *chan,
610 struct cpdma_desc __iomem *desc) 612 struct cpdma_desc __iomem *desc)
611 { 613 {
612 struct cpdma_ctlr *ctlr = chan->ctlr; 614 struct cpdma_ctlr *ctlr = chan->ctlr;
613 struct cpdma_desc __iomem *prev = chan->tail; 615 struct cpdma_desc __iomem *prev = chan->tail;
614 struct cpdma_desc_pool *pool = ctlr->pool; 616 struct cpdma_desc_pool *pool = ctlr->pool;
615 dma_addr_t desc_dma; 617 dma_addr_t desc_dma;
616 u32 mode; 618 u32 mode;
617 619
618 desc_dma = desc_phys(pool, desc); 620 desc_dma = desc_phys(pool, desc);
619 621
620 /* simple case - idle channel */ 622 /* simple case - idle channel */
621 if (!chan->head) { 623 if (!chan->head) {
622 chan->stats.head_enqueue++; 624 chan->stats.head_enqueue++;
623 chan->head = desc; 625 chan->head = desc;
624 chan->tail = desc; 626 chan->tail = desc;
625 if (chan->state == CPDMA_STATE_ACTIVE) 627 if (chan->state == CPDMA_STATE_ACTIVE)
626 chan_write(chan, hdp, desc_dma); 628 chan_write(chan, hdp, desc_dma);
627 return; 629 return;
628 } 630 }
629 631
630 /* first chain the descriptor at the tail of the list */ 632 /* first chain the descriptor at the tail of the list */
631 desc_write(prev, hw_next, desc_dma); 633 desc_write(prev, hw_next, desc_dma);
632 chan->tail = desc; 634 chan->tail = desc;
633 chan->stats.tail_enqueue++; 635 chan->stats.tail_enqueue++;
634 636
635 /* next check if EOQ has been triggered already */ 637 /* next check if EOQ has been triggered already */
636 mode = desc_read(prev, hw_mode); 638 mode = desc_read(prev, hw_mode);
637 if (((mode & (CPDMA_DESC_EOQ | CPDMA_DESC_OWNER)) == CPDMA_DESC_EOQ) && 639 if (((mode & (CPDMA_DESC_EOQ | CPDMA_DESC_OWNER)) == CPDMA_DESC_EOQ) &&
638 (chan->state == CPDMA_STATE_ACTIVE)) { 640 (chan->state == CPDMA_STATE_ACTIVE)) {
639 desc_write(prev, hw_mode, mode & ~CPDMA_DESC_EOQ); 641 desc_write(prev, hw_mode, mode & ~CPDMA_DESC_EOQ);
640 chan_write(chan, hdp, desc_dma); 642 chan_write(chan, hdp, desc_dma);
641 chan->stats.misqueued++; 643 chan->stats.misqueued++;
642 } 644 }
643 } 645 }
644 646
645 int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data, 647 int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data,
646 int len, gfp_t gfp_mask) 648 int len, gfp_t gfp_mask)
647 { 649 {
648 struct cpdma_ctlr *ctlr = chan->ctlr; 650 struct cpdma_ctlr *ctlr = chan->ctlr;
649 struct cpdma_desc __iomem *desc; 651 struct cpdma_desc __iomem *desc;
650 dma_addr_t buffer; 652 dma_addr_t buffer;
651 unsigned long flags; 653 unsigned long flags;
652 u32 mode; 654 u32 mode;
653 int ret = 0; 655 int ret = 0;
654 656
655 spin_lock_irqsave(&chan->lock, flags); 657 spin_lock_irqsave(&chan->lock, flags);
656 658
657 if (chan->state == CPDMA_STATE_TEARDOWN) { 659 if (chan->state == CPDMA_STATE_TEARDOWN) {
658 ret = -EINVAL; 660 ret = -EINVAL;
659 goto unlock_ret; 661 goto unlock_ret;
660 } 662 }
661 663
662 desc = cpdma_desc_alloc(ctlr->pool, 1); 664 desc = cpdma_desc_alloc(ctlr->pool, 1);
663 if (!desc) { 665 if (!desc) {
664 chan->stats.desc_alloc_fail++; 666 chan->stats.desc_alloc_fail++;
665 ret = -ENOMEM; 667 ret = -ENOMEM;
666 goto unlock_ret; 668 goto unlock_ret;
667 } 669 }
668 670
669 if (len < ctlr->params.min_packet_size) { 671 if (len < ctlr->params.min_packet_size) {
670 len = ctlr->params.min_packet_size; 672 len = ctlr->params.min_packet_size;
671 chan->stats.runt_transmit_buff++; 673 chan->stats.runt_transmit_buff++;
672 } 674 }
673 675
674 buffer = dma_map_single(ctlr->dev, data, len, chan->dir); 676 buffer = dma_map_single(ctlr->dev, data, len, chan->dir);
675 mode = CPDMA_DESC_OWNER | CPDMA_DESC_SOP | CPDMA_DESC_EOP; 677 mode = CPDMA_DESC_OWNER | CPDMA_DESC_SOP | CPDMA_DESC_EOP;
676 678
677 desc_write(desc, hw_next, 0); 679 desc_write(desc, hw_next, 0);
678 desc_write(desc, hw_buffer, buffer); 680 desc_write(desc, hw_buffer, buffer);
679 desc_write(desc, hw_len, len); 681 desc_write(desc, hw_len, len);
680 desc_write(desc, hw_mode, mode | len); 682 desc_write(desc, hw_mode, mode | len);
681 desc_write(desc, sw_token, token); 683 desc_write(desc, sw_token, token);
682 desc_write(desc, sw_buffer, buffer); 684 desc_write(desc, sw_buffer, buffer);
683 desc_write(desc, sw_len, len); 685 desc_write(desc, sw_len, len);
684 686
685 __cpdma_chan_submit(chan, desc); 687 __cpdma_chan_submit(chan, desc);
686 688
687 if (chan->state == CPDMA_STATE_ACTIVE && chan->rxfree) 689 if (chan->state == CPDMA_STATE_ACTIVE && chan->rxfree)
688 chan_write(chan, rxfree, 1); 690 chan_write(chan, rxfree, 1);
689 691
690 chan->count++; 692 chan->count++;
691 693
692 unlock_ret: 694 unlock_ret:
693 spin_unlock_irqrestore(&chan->lock, flags); 695 spin_unlock_irqrestore(&chan->lock, flags);
694 return ret; 696 return ret;
695 } 697 }
696 698
697 static void __cpdma_chan_free(struct cpdma_chan *chan, 699 static void __cpdma_chan_free(struct cpdma_chan *chan,
698 struct cpdma_desc __iomem *desc, 700 struct cpdma_desc __iomem *desc,
699 int outlen, int status) 701 int outlen, int status)
700 { 702 {
701 struct cpdma_ctlr *ctlr = chan->ctlr; 703 struct cpdma_ctlr *ctlr = chan->ctlr;
702 struct cpdma_desc_pool *pool = ctlr->pool; 704 struct cpdma_desc_pool *pool = ctlr->pool;
703 dma_addr_t buff_dma; 705 dma_addr_t buff_dma;
704 int origlen; 706 int origlen;
705 void *token; 707 void *token;
706 708
707 token = (void *)desc_read(desc, sw_token); 709 token = (void *)desc_read(desc, sw_token);
708 buff_dma = desc_read(desc, sw_buffer); 710 buff_dma = desc_read(desc, sw_buffer);
709 origlen = desc_read(desc, sw_len); 711 origlen = desc_read(desc, sw_len);
710 712
711 dma_unmap_single(ctlr->dev, buff_dma, origlen, chan->dir); 713 dma_unmap_single(ctlr->dev, buff_dma, origlen, chan->dir);
712 cpdma_desc_free(pool, desc, 1); 714 cpdma_desc_free(pool, desc, 1);
713 (*chan->handler)(token, outlen, status); 715 (*chan->handler)(token, outlen, status);
714 } 716 }
715 717
716 static int __cpdma_chan_process(struct cpdma_chan *chan) 718 static int __cpdma_chan_process(struct cpdma_chan *chan)
717 { 719 {
718 struct cpdma_ctlr *ctlr = chan->ctlr; 720 struct cpdma_ctlr *ctlr = chan->ctlr;
719 struct cpdma_desc __iomem *desc; 721 struct cpdma_desc __iomem *desc;
720 int status, outlen; 722 int status, outlen;
721 struct cpdma_desc_pool *pool = ctlr->pool; 723 struct cpdma_desc_pool *pool = ctlr->pool;
722 dma_addr_t desc_dma; 724 dma_addr_t desc_dma;
723 725
724 desc = chan->head; 726 desc = chan->head;
725 if (!desc) { 727 if (!desc) {
726 chan->stats.empty_dequeue++; 728 chan->stats.empty_dequeue++;
727 status = -ENOENT; 729 status = -ENOENT;
728 goto unlock_ret; 730 goto unlock_ret;
729 } 731 }
730 desc_dma = desc_phys(pool, desc); 732 desc_dma = desc_phys(pool, desc);
731 733
732 status = __raw_readl(&desc->hw_mode); 734 status = __raw_readl(&desc->hw_mode);
733 outlen = status & 0x7ff; 735 outlen = status & 0x7ff;
734 if (status & CPDMA_DESC_OWNER) { 736 if (status & CPDMA_DESC_OWNER) {
735 chan->stats.busy_dequeue++; 737 chan->stats.busy_dequeue++;
736 status = -EBUSY; 738 status = -EBUSY;
737 goto unlock_ret; 739 goto unlock_ret;
738 } 740 }
739 status = status & (CPDMA_DESC_EOQ | CPDMA_DESC_TD_COMPLETE); 741 status = status & (CPDMA_DESC_EOQ | CPDMA_DESC_TD_COMPLETE);
740 742
741 chan->head = desc_from_phys(pool, desc_read(desc, hw_next)); 743 chan->head = desc_from_phys(pool, desc_read(desc, hw_next));
742 chan_write(chan, cp, desc_dma); 744 chan_write(chan, cp, desc_dma);
743 chan->count--; 745 chan->count--;
744 chan->stats.good_dequeue++; 746 chan->stats.good_dequeue++;
745 747
746 if (status & CPDMA_DESC_EOQ) { 748 if (status & CPDMA_DESC_EOQ) {
747 chan->stats.requeue++; 749 chan->stats.requeue++;
748 chan_write(chan, hdp, desc_phys(pool, chan->head)); 750 chan_write(chan, hdp, desc_phys(pool, chan->head));
749 } 751 }
750 752
751 __cpdma_chan_free(chan, desc, outlen, status); 753 __cpdma_chan_free(chan, desc, outlen, status);
752 return status; 754 return status;
753 755
754 unlock_ret: 756 unlock_ret:
755 return status; 757 return status;
756 } 758 }
757 759
758 int cpdma_chan_process(struct cpdma_chan *chan, int quota) 760 int cpdma_chan_process(struct cpdma_chan *chan, int quota)
759 { 761 {
760 int used = 0, ret = 0; 762 int used = 0, ret = 0;
761 763
762 if (chan->state != CPDMA_STATE_ACTIVE) 764 if (chan->state != CPDMA_STATE_ACTIVE)
763 return -EINVAL; 765 return -EINVAL;
764 766
765 while (used < quota) { 767 while (used < quota) {
766 ret = __cpdma_chan_process(chan); 768 ret = __cpdma_chan_process(chan);
767 if (ret < 0) 769 if (ret < 0)
768 break; 770 break;
769 used++; 771 used++;
770 } 772 }
771 return used; 773 return used;
772 } 774 }
773 775
774 int cpdma_chan_start(struct cpdma_chan *chan) 776 int cpdma_chan_start(struct cpdma_chan *chan)
775 { 777 {
776 struct cpdma_ctlr *ctlr = chan->ctlr; 778 struct cpdma_ctlr *ctlr = chan->ctlr;
777 struct cpdma_desc_pool *pool = ctlr->pool; 779 struct cpdma_desc_pool *pool = ctlr->pool;
778 unsigned long flags; 780 unsigned long flags;
779 781
780 spin_lock_irqsave(&chan->lock, flags); 782 spin_lock_irqsave(&chan->lock, flags);
781 if (chan->state != CPDMA_STATE_IDLE) { 783 if (chan->state != CPDMA_STATE_IDLE) {
782 spin_unlock_irqrestore(&chan->lock, flags); 784 spin_unlock_irqrestore(&chan->lock, flags);
783 return -EBUSY; 785 return -EBUSY;
784 } 786 }
785 if (ctlr->state != CPDMA_STATE_ACTIVE) { 787 if (ctlr->state != CPDMA_STATE_ACTIVE) {
786 spin_unlock_irqrestore(&chan->lock, flags); 788 spin_unlock_irqrestore(&chan->lock, flags);
787 return -EINVAL; 789 return -EINVAL;
788 } 790 }
789 dma_reg_write(ctlr, chan->int_set, chan->mask); 791 dma_reg_write(ctlr, chan->int_set, chan->mask);
790 chan->state = CPDMA_STATE_ACTIVE; 792 chan->state = CPDMA_STATE_ACTIVE;
791 if (chan->head) { 793 if (chan->head) {
792 chan_write(chan, hdp, desc_phys(pool, chan->head)); 794 chan_write(chan, hdp, desc_phys(pool, chan->head));
793 if (chan->rxfree) 795 if (chan->rxfree)
794 chan_write(chan, rxfree, chan->count); 796 chan_write(chan, rxfree, chan->count);
795 } 797 }
796 798
797 spin_unlock_irqrestore(&chan->lock, flags); 799 spin_unlock_irqrestore(&chan->lock, flags);
798 return 0; 800 return 0;
799 } 801 }
800 802
801 int cpdma_chan_stop(struct cpdma_chan *chan) 803 int cpdma_chan_stop(struct cpdma_chan *chan)
802 { 804 {
803 struct cpdma_ctlr *ctlr = chan->ctlr; 805 struct cpdma_ctlr *ctlr = chan->ctlr;
804 struct cpdma_desc_pool *pool = ctlr->pool; 806 struct cpdma_desc_pool *pool = ctlr->pool;
805 unsigned long flags; 807 unsigned long flags;
806 int ret; 808 int ret;
807 unsigned long timeout; 809 unsigned long timeout;
808 810
809 spin_lock_irqsave(&chan->lock, flags); 811 spin_lock_irqsave(&chan->lock, flags);
810 if (chan->state != CPDMA_STATE_ACTIVE) { 812 if (chan->state != CPDMA_STATE_ACTIVE) {
811 spin_unlock_irqrestore(&chan->lock, flags); 813 spin_unlock_irqrestore(&chan->lock, flags);
812 return -EINVAL; 814 return -EINVAL;
813 } 815 }
814 816
815 chan->state = CPDMA_STATE_TEARDOWN; 817 chan->state = CPDMA_STATE_TEARDOWN;
816 dma_reg_write(ctlr, chan->int_clear, chan->mask); 818 dma_reg_write(ctlr, chan->int_clear, chan->mask);
817 819
818 /* trigger teardown */ 820 /* trigger teardown */
819 dma_reg_write(ctlr, chan->td, chan->chan_num); 821 dma_reg_write(ctlr, chan->td, chan->chan_num);
820 822
821 /* wait for teardown complete */ 823 /* wait for teardown complete */
822 timeout = jiffies + HZ/10; /* 100 msec */ 824 timeout = jiffies + HZ/10; /* 100 msec */
823 while (time_before(jiffies, timeout)) { 825 while (time_before(jiffies, timeout)) {
824 u32 cp = chan_read(chan, cp); 826 u32 cp = chan_read(chan, cp);
825 if ((cp & CPDMA_TEARDOWN_VALUE) == CPDMA_TEARDOWN_VALUE) 827 if ((cp & CPDMA_TEARDOWN_VALUE) == CPDMA_TEARDOWN_VALUE)
826 break; 828 break;
827 cpu_relax(); 829 cpu_relax();
828 } 830 }
829 WARN_ON(!time_before(jiffies, timeout)); 831 WARN_ON(!time_before(jiffies, timeout));
830 chan_write(chan, cp, CPDMA_TEARDOWN_VALUE); 832 chan_write(chan, cp, CPDMA_TEARDOWN_VALUE);
831 833
832 /* handle completed packets */ 834 /* handle completed packets */
833 do { 835 do {
834 ret = __cpdma_chan_process(chan); 836 ret = __cpdma_chan_process(chan);
835 if (ret < 0) 837 if (ret < 0)
836 break; 838 break;
837 } while ((ret & CPDMA_DESC_TD_COMPLETE) == 0); 839 } while ((ret & CPDMA_DESC_TD_COMPLETE) == 0);
838 840
839 /* remaining packets haven't been tx/rx'ed, clean them up */ 841 /* remaining packets haven't been tx/rx'ed, clean them up */
840 while (chan->head) { 842 while (chan->head) {
841 struct cpdma_desc __iomem *desc = chan->head; 843 struct cpdma_desc __iomem *desc = chan->head;
842 dma_addr_t next_dma; 844 dma_addr_t next_dma;
843 845
844 next_dma = desc_read(desc, hw_next); 846 next_dma = desc_read(desc, hw_next);
845 chan->head = desc_from_phys(pool, next_dma); 847 chan->head = desc_from_phys(pool, next_dma);
846 chan->stats.teardown_dequeue++; 848 chan->stats.teardown_dequeue++;
847 849
848 /* issue callback without locks held */ 850 /* issue callback without locks held */
849 spin_unlock_irqrestore(&chan->lock, flags); 851 spin_unlock_irqrestore(&chan->lock, flags);
850 __cpdma_chan_free(chan, desc, 0, -ENOSYS); 852 __cpdma_chan_free(chan, desc, 0, -ENOSYS);
851 spin_lock_irqsave(&chan->lock, flags); 853 spin_lock_irqsave(&chan->lock, flags);
852 } 854 }
853 855
854 chan->state = CPDMA_STATE_IDLE; 856 chan->state = CPDMA_STATE_IDLE;
855 spin_unlock_irqrestore(&chan->lock, flags); 857 spin_unlock_irqrestore(&chan->lock, flags);
856 return 0; 858 return 0;
857 } 859 }
858 860
859 int cpdma_chan_int_ctrl(struct cpdma_chan *chan, bool enable) 861 int cpdma_chan_int_ctrl(struct cpdma_chan *chan, bool enable)
860 { 862 {
861 unsigned long flags; 863 unsigned long flags;
862 864
863 spin_lock_irqsave(&chan->lock, flags); 865 spin_lock_irqsave(&chan->lock, flags);
864 if (chan->state != CPDMA_STATE_ACTIVE) { 866 if (chan->state != CPDMA_STATE_ACTIVE) {
865 spin_unlock_irqrestore(&chan->lock, flags); 867 spin_unlock_irqrestore(&chan->lock, flags);
866 return -EINVAL; 868 return -EINVAL;
867 } 869 }
868 870
869 dma_reg_write(chan->ctlr, enable ? chan->int_set : chan->int_clear, 871 dma_reg_write(chan->ctlr, enable ? chan->int_set : chan->int_clear,
870 chan->mask); 872 chan->mask);
871 spin_unlock_irqrestore(&chan->lock, flags); 873 spin_unlock_irqrestore(&chan->lock, flags);
872 874
873 return 0; 875 return 0;
874 } 876 }
875 877
876 struct cpdma_control_info { 878 struct cpdma_control_info {
877 u32 reg; 879 u32 reg;
878 u32 shift, mask; 880 u32 shift, mask;
879 int access; 881 int access;
880 #define ACCESS_RO BIT(0) 882 #define ACCESS_RO BIT(0)
881 #define ACCESS_WO BIT(1) 883 #define ACCESS_WO BIT(1)
882 #define ACCESS_RW (ACCESS_RO | ACCESS_WO) 884 #define ACCESS_RW (ACCESS_RO | ACCESS_WO)
883 }; 885 };
884 886
885 struct cpdma_control_info controls[] = { 887 struct cpdma_control_info controls[] = {
886 [CPDMA_CMD_IDLE] = {CPDMA_DMACONTROL, 3, 1, ACCESS_WO}, 888 [CPDMA_CMD_IDLE] = {CPDMA_DMACONTROL, 3, 1, ACCESS_WO},
887 [CPDMA_COPY_ERROR_FRAMES] = {CPDMA_DMACONTROL, 4, 1, ACCESS_RW}, 889 [CPDMA_COPY_ERROR_FRAMES] = {CPDMA_DMACONTROL, 4, 1, ACCESS_RW},
888 [CPDMA_RX_OFF_LEN_UPDATE] = {CPDMA_DMACONTROL, 2, 1, ACCESS_RW}, 890 [CPDMA_RX_OFF_LEN_UPDATE] = {CPDMA_DMACONTROL, 2, 1, ACCESS_RW},
889 [CPDMA_RX_OWNERSHIP_FLIP] = {CPDMA_DMACONTROL, 1, 1, ACCESS_RW}, 891 [CPDMA_RX_OWNERSHIP_FLIP] = {CPDMA_DMACONTROL, 1, 1, ACCESS_RW},
890 [CPDMA_TX_PRIO_FIXED] = {CPDMA_DMACONTROL, 0, 1, ACCESS_RW}, 892 [CPDMA_TX_PRIO_FIXED] = {CPDMA_DMACONTROL, 0, 1, ACCESS_RW},
891 [CPDMA_STAT_IDLE] = {CPDMA_DMASTATUS, 31, 1, ACCESS_RO}, 893 [CPDMA_STAT_IDLE] = {CPDMA_DMASTATUS, 31, 1, ACCESS_RO},
892 [CPDMA_STAT_TX_ERR_CODE] = {CPDMA_DMASTATUS, 20, 0xf, ACCESS_RW}, 894 [CPDMA_STAT_TX_ERR_CODE] = {CPDMA_DMASTATUS, 20, 0xf, ACCESS_RW},
893 [CPDMA_STAT_TX_ERR_CHAN] = {CPDMA_DMASTATUS, 16, 0x7, ACCESS_RW}, 895 [CPDMA_STAT_TX_ERR_CHAN] = {CPDMA_DMASTATUS, 16, 0x7, ACCESS_RW},
894 [CPDMA_STAT_RX_ERR_CODE] = {CPDMA_DMASTATUS, 12, 0xf, ACCESS_RW}, 896 [CPDMA_STAT_RX_ERR_CODE] = {CPDMA_DMASTATUS, 12, 0xf, ACCESS_RW},
895 [CPDMA_STAT_RX_ERR_CHAN] = {CPDMA_DMASTATUS, 8, 0x7, ACCESS_RW}, 897 [CPDMA_STAT_RX_ERR_CHAN] = {CPDMA_DMASTATUS, 8, 0x7, ACCESS_RW},
896 [CPDMA_RX_BUFFER_OFFSET] = {CPDMA_RXBUFFOFS, 0, 0xffff, ACCESS_RW}, 898 [CPDMA_RX_BUFFER_OFFSET] = {CPDMA_RXBUFFOFS, 0, 0xffff, ACCESS_RW},
897 }; 899 };
898 900
899 int cpdma_control_get(struct cpdma_ctlr *ctlr, int control) 901 int cpdma_control_get(struct cpdma_ctlr *ctlr, int control)
900 { 902 {
901 unsigned long flags; 903 unsigned long flags;
902 struct cpdma_control_info *info = &controls[control]; 904 struct cpdma_control_info *info = &controls[control];
903 int ret; 905 int ret;
904 906
905 spin_lock_irqsave(&ctlr->lock, flags); 907 spin_lock_irqsave(&ctlr->lock, flags);
906 908
907 ret = -ENOTSUPP; 909 ret = -ENOTSUPP;
908 if (!ctlr->params.has_ext_regs) 910 if (!ctlr->params.has_ext_regs)
909 goto unlock_ret; 911 goto unlock_ret;
910 912
911 ret = -EINVAL; 913 ret = -EINVAL;
912 if (ctlr->state != CPDMA_STATE_ACTIVE) 914 if (ctlr->state != CPDMA_STATE_ACTIVE)
913 goto unlock_ret; 915 goto unlock_ret;
914 916
915 ret = -ENOENT; 917 ret = -ENOENT;
916 if (control < 0 || control >= ARRAY_SIZE(controls)) 918 if (control < 0 || control >= ARRAY_SIZE(controls))
917 goto unlock_ret; 919 goto unlock_ret;
918 920
919 ret = -EPERM; 921 ret = -EPERM;
920 if ((info->access & ACCESS_RO) != ACCESS_RO) 922 if ((info->access & ACCESS_RO) != ACCESS_RO)
921 goto unlock_ret; 923 goto unlock_ret;
922 924
923 ret = (dma_reg_read(ctlr, info->reg) >> info->shift) & info->mask; 925 ret = (dma_reg_read(ctlr, info->reg) >> info->shift) & info->mask;
924 926
925 unlock_ret: 927 unlock_ret:
926 spin_unlock_irqrestore(&ctlr->lock, flags); 928 spin_unlock_irqrestore(&ctlr->lock, flags);
927 return ret; 929 return ret;
928 } 930 }
929 931
930 int cpdma_control_set(struct cpdma_ctlr *ctlr, int control, int value) 932 int cpdma_control_set(struct cpdma_ctlr *ctlr, int control, int value)
931 { 933 {
932 unsigned long flags; 934 unsigned long flags;
933 struct cpdma_control_info *info = &controls[control]; 935 struct cpdma_control_info *info = &controls[control];
934 int ret; 936 int ret;
935 u32 val; 937 u32 val;
936 938
937 spin_lock_irqsave(&ctlr->lock, flags); 939 spin_lock_irqsave(&ctlr->lock, flags);
938 940
939 ret = -ENOTSUPP; 941 ret = -ENOTSUPP;
940 if (!ctlr->params.has_ext_regs) 942 if (!ctlr->params.has_ext_regs)
941 goto unlock_ret; 943 goto unlock_ret;
942 944
943 ret = -EINVAL; 945 ret = -EINVAL;
944 if (ctlr->state != CPDMA_STATE_ACTIVE) 946 if (ctlr->state != CPDMA_STATE_ACTIVE)
945 goto unlock_ret; 947 goto unlock_ret;
946 948
947 ret = -ENOENT; 949 ret = -ENOENT;
948 if (control < 0 || control >= ARRAY_SIZE(controls)) 950 if (control < 0 || control >= ARRAY_SIZE(controls))
949 goto unlock_ret; 951 goto unlock_ret;
950 952
951 ret = -EPERM; 953 ret = -EPERM;
952 if ((info->access & ACCESS_WO) != ACCESS_WO) 954 if ((info->access & ACCESS_WO) != ACCESS_WO)
953 goto unlock_ret; 955 goto unlock_ret;
954 956
955 val = dma_reg_read(ctlr, info->reg); 957 val = dma_reg_read(ctlr, info->reg);
956 val &= ~(info->mask << info->shift); 958 val &= ~(info->mask << info->shift);
957 val |= (value & info->mask) << info->shift; 959 val |= (value & info->mask) << info->shift;
958 dma_reg_write(ctlr, info->reg, val); 960 dma_reg_write(ctlr, info->reg, val);
959 ret = 0; 961 ret = 0;
960 962
961 unlock_ret: 963 unlock_ret:
962 spin_unlock_irqrestore(&ctlr->lock, flags); 964 spin_unlock_irqrestore(&ctlr->lock, flags);
963 return ret; 965 return ret;
964 } 966 }
965 967