Commit 9ea9021ac466f5ccc8b6238cbce37428bb58f887

Authored by Khoronzhuk, Ivan
Committed by Tom Rini
1 parent ef4547176d

dma: keystone_nav: generalize driver usage

The keystone_nav driver is general driver intended to be used for
working with queue manager and pktdma for different IPs like NETCP,
AIF, FFTC, etc. So the it's API shouldn't be named like it works only
with one of them, it should be general names. The names with prefix
like netcp_* rather do for drivers/net/keystone_net.c driver. So it's
good to generalize this driver to be used for different IP's and
delete confusion with real NETCP driver.

The current netcp_* functions of keystone navigator can be used for
other settings of pktdma, not only for NETCP. The API of this driver
is used by the keystone_net driver to work with NETCP, so net driver
also should be corrected. For convenience collect pkdma
configurations in drivers/dma/keystone_nav_cfg.c.

Acked-by: Vitaly Andrianov <vitalya@ti.com>
Signed-off-by: Ivan Khoronzhuk <ivan.khoronzhuk@ti.com>

Showing 6 changed files with 98 additions and 89 deletions Side-by-side Diff

arch/arm/include/asm/ti-common/keystone_nav.h
... ... @@ -169,6 +169,8 @@
169 169 u32 rx_flow; /* flow that is used for RX */
170 170 };
171 171  
  172 +extern struct pktdma_cfg netcp_pktdma;
  173 +
172 174 /*
173 175 * packet dma user allocates memory for rx buffers
174 176 * and describe it in the following structure
... ... @@ -180,11 +182,11 @@
180 182 u32 rx_flow;
181 183 };
182 184  
183   -int netcp_close(void);
184   -int netcp_init(struct rx_buff_desc *rx_buffers);
185   -int netcp_send(u32 *pkt, int num_bytes, u32 swinfo2);
186   -void *netcp_recv(u32 **pkt, int *num_bytes);
187   -void netcp_release_rxhd(void *hd);
  185 +int ksnav_close(struct pktdma_cfg *pktdma);
  186 +int ksnav_init(struct pktdma_cfg *pktdma, struct rx_buff_desc *rx_buffers);
  187 +int ksnav_send(struct pktdma_cfg *pktdma, u32 *pkt, int num_bytes, u32 swinfo2);
  188 +void *ksnav_recv(struct pktdma_cfg *pktdma, u32 **pkt, int *num_bytes);
  189 +void ksnav_release_rxhd(struct pktdma_cfg *pktdma, void *hd);
188 190  
189 191 #endif /* _KEYSTONE_NAV_H_ */
drivers/dma/Makefile
... ... @@ -8,5 +8,5 @@
8 8 obj-$(CONFIG_FSLDMAFEC) += MCD_tasksInit.o MCD_dmaApi.o MCD_tasks.o
9 9 obj-$(CONFIG_APBH_DMA) += apbh_dma.o
10 10 obj-$(CONFIG_FSL_DMA) += fsl_dma.o
11   -obj-$(CONFIG_TI_KSNAV) += keystone_nav.o
  11 +obj-$(CONFIG_TI_KSNAV) += keystone_nav.o keystone_nav_cfg.o
drivers/dma/keystone_nav.c
... ... @@ -156,35 +156,20 @@
156 156 /**
157 157 * DMA API
158 158 */
159   -struct pktdma_cfg netcp_pktdma = {
160   - .global = (void *)CONFIG_KSNAV_NETCP_PDMA_CTRL_BASE,
161   - .tx_ch = (void *)CONFIG_KSNAV_NETCP_PDMA_TX_BASE,
162   - .tx_ch_num = CONFIG_KSNAV_NETCP_PDMA_TX_CH_NUM,
163   - .rx_ch = (void *)CONFIG_KSNAV_NETCP_PDMA_RX_BASE,
164   - .rx_ch_num = CONFIG_KSNAV_NETCP_PDMA_RX_CH_NUM,
165   - .tx_sched = (u32 *)CONFIG_KSNAV_NETCP_PDMA_SCHED_BASE,
166   - .rx_flows = (void *)CONFIG_KSNAV_NETCP_PDMA_RX_FLOW_BASE,
167   - .rx_flow_num = CONFIG_KSNAV_NETCP_PDMA_RX_FLOW_NUM,
168   - .rx_free_q = CONFIG_KSNAV_NETCP_PDMA_RX_FREE_QUEUE,
169   - .rx_rcv_q = CONFIG_KSNAV_NETCP_PDMA_RX_RCV_QUEUE,
170   - .tx_snd_q = CONFIG_KSNAV_NETCP_PDMA_TX_SND_QUEUE,
171   -};
172 159  
173   -struct pktdma_cfg *netcp;
174   -
175   -static int netcp_rx_disable(void)
  160 +static int ksnav_rx_disable(struct pktdma_cfg *pktdma)
176 161 {
177 162 u32 j, v, k;
178 163  
179   - for (j = 0; j < netcp->rx_ch_num; j++) {
180   - v = readl(&netcp->rx_ch[j].cfg_a);
  164 + for (j = 0; j < pktdma->rx_ch_num; j++) {
  165 + v = readl(&pktdma->rx_ch[j].cfg_a);
181 166 if (!(v & CPDMA_CHAN_A_ENABLE))
182 167 continue;
183 168  
184   - writel(v | CPDMA_CHAN_A_TDOWN, &netcp->rx_ch[j].cfg_a);
  169 + writel(v | CPDMA_CHAN_A_TDOWN, &pktdma->rx_ch[j].cfg_a);
185 170 for (k = 0; k < TDOWN_TIMEOUT_COUNT; k++) {
186 171 udelay(100);
187   - v = readl(&netcp->rx_ch[j].cfg_a);
  172 + v = readl(&pktdma->rx_ch[j].cfg_a);
188 173 if (!(v & CPDMA_CHAN_A_ENABLE))
189 174 continue;
190 175 }
191 176  
192 177  
193 178  
194 179  
... ... @@ -192,33 +177,33 @@
192 177 }
193 178  
194 179 /* Clear all of the flow registers */
195   - for (j = 0; j < netcp->rx_flow_num; j++) {
196   - writel(0, &netcp->rx_flows[j].control);
197   - writel(0, &netcp->rx_flows[j].tags);
198   - writel(0, &netcp->rx_flows[j].tag_sel);
199   - writel(0, &netcp->rx_flows[j].fdq_sel[0]);
200   - writel(0, &netcp->rx_flows[j].fdq_sel[1]);
201   - writel(0, &netcp->rx_flows[j].thresh[0]);
202   - writel(0, &netcp->rx_flows[j].thresh[1]);
203   - writel(0, &netcp->rx_flows[j].thresh[2]);
  180 + for (j = 0; j < pktdma->rx_flow_num; j++) {
  181 + writel(0, &pktdma->rx_flows[j].control);
  182 + writel(0, &pktdma->rx_flows[j].tags);
  183 + writel(0, &pktdma->rx_flows[j].tag_sel);
  184 + writel(0, &pktdma->rx_flows[j].fdq_sel[0]);
  185 + writel(0, &pktdma->rx_flows[j].fdq_sel[1]);
  186 + writel(0, &pktdma->rx_flows[j].thresh[0]);
  187 + writel(0, &pktdma->rx_flows[j].thresh[1]);
  188 + writel(0, &pktdma->rx_flows[j].thresh[2]);
204 189 }
205 190  
206 191 return QM_OK;
207 192 }
208 193  
209   -static int netcp_tx_disable(void)
  194 +static int ksnav_tx_disable(struct pktdma_cfg *pktdma)
210 195 {
211 196 u32 j, v, k;
212 197  
213   - for (j = 0; j < netcp->tx_ch_num; j++) {
214   - v = readl(&netcp->tx_ch[j].cfg_a);
  198 + for (j = 0; j < pktdma->tx_ch_num; j++) {
  199 + v = readl(&pktdma->tx_ch[j].cfg_a);
215 200 if (!(v & CPDMA_CHAN_A_ENABLE))
216 201 continue;
217 202  
218   - writel(v | CPDMA_CHAN_A_TDOWN, &netcp->tx_ch[j].cfg_a);
  203 + writel(v | CPDMA_CHAN_A_TDOWN, &pktdma->tx_ch[j].cfg_a);
219 204 for (k = 0; k < TDOWN_TIMEOUT_COUNT; k++) {
220 205 udelay(100);
221   - v = readl(&netcp->tx_ch[j].cfg_a);
  206 + v = readl(&pktdma->tx_ch[j].cfg_a);
222 207 if (!(v & CPDMA_CHAN_A_ENABLE))
223 208 continue;
224 209 }
225 210  
226 211  
... ... @@ -228,19 +213,17 @@
228 213 return QM_OK;
229 214 }
230 215  
231   -static int _netcp_init(struct pktdma_cfg *netcp_cfg,
232   - struct rx_buff_desc *rx_buffers)
  216 +int ksnav_init(struct pktdma_cfg *pktdma, struct rx_buff_desc *rx_buffers)
233 217 {
234 218 u32 j, v;
235 219 struct qm_host_desc *hd;
236 220 u8 *rx_ptr;
237 221  
238   - if (netcp_cfg == NULL || rx_buffers == NULL ||
  222 + if (pktdma == NULL || rx_buffers == NULL ||
239 223 rx_buffers->buff_ptr == NULL || qm_cfg == NULL)
240 224 return QM_ERR;
241 225  
242   - netcp = netcp_cfg;
243   - netcp->rx_flow = rx_buffers->rx_flow;
  226 + pktdma->rx_flow = rx_buffers->rx_flow;
244 227  
245 228 /* init rx queue */
246 229 rx_ptr = rx_buffers->buff_ptr;
247 230  
248 231  
249 232  
250 233  
251 234  
252 235  
253 236  
254 237  
255 238  
256 239  
257 240  
258 241  
259 242  
... ... @@ -250,69 +233,64 @@
250 233 if (hd == NULL)
251 234 return QM_ERR;
252 235  
253   - qm_buff_push(hd, netcp->rx_free_q,
  236 + qm_buff_push(hd, pktdma->rx_free_q,
254 237 rx_ptr, rx_buffers->buff_len);
255 238  
256 239 rx_ptr += rx_buffers->buff_len;
257 240 }
258 241  
259   - netcp_rx_disable();
  242 + ksnav_rx_disable(pktdma);
260 243  
261 244 /* configure rx channels */
262   - v = CPDMA_REG_VAL_MAKE_RX_FLOW_A(1, 1, 0, 0, 0, 0, 0, netcp->rx_rcv_q);
263   - writel(v, &netcp->rx_flows[netcp->rx_flow].control);
264   - writel(0, &netcp->rx_flows[netcp->rx_flow].tags);
265   - writel(0, &netcp->rx_flows[netcp->rx_flow].tag_sel);
  245 + v = CPDMA_REG_VAL_MAKE_RX_FLOW_A(1, 1, 0, 0, 0, 0, 0, pktdma->rx_rcv_q);
  246 + writel(v, &pktdma->rx_flows[pktdma->rx_flow].control);
  247 + writel(0, &pktdma->rx_flows[pktdma->rx_flow].tags);
  248 + writel(0, &pktdma->rx_flows[pktdma->rx_flow].tag_sel);
266 249  
267   - v = CPDMA_REG_VAL_MAKE_RX_FLOW_D(0, netcp->rx_free_q, 0,
268   - netcp->rx_free_q);
  250 + v = CPDMA_REG_VAL_MAKE_RX_FLOW_D(0, pktdma->rx_free_q, 0,
  251 + pktdma->rx_free_q);
269 252  
270   - writel(v, &netcp->rx_flows[netcp->rx_flow].fdq_sel[0]);
271   - writel(v, &netcp->rx_flows[netcp->rx_flow].fdq_sel[1]);
272   - writel(0, &netcp->rx_flows[netcp->rx_flow].thresh[0]);
273   - writel(0, &netcp->rx_flows[netcp->rx_flow].thresh[1]);
274   - writel(0, &netcp->rx_flows[netcp->rx_flow].thresh[2]);
  253 + writel(v, &pktdma->rx_flows[pktdma->rx_flow].fdq_sel[0]);
  254 + writel(v, &pktdma->rx_flows[pktdma->rx_flow].fdq_sel[1]);
  255 + writel(0, &pktdma->rx_flows[pktdma->rx_flow].thresh[0]);
  256 + writel(0, &pktdma->rx_flows[pktdma->rx_flow].thresh[1]);
  257 + writel(0, &pktdma->rx_flows[pktdma->rx_flow].thresh[2]);
275 258  
276   - for (j = 0; j < netcp->rx_ch_num; j++)
277   - writel(CPDMA_CHAN_A_ENABLE, &netcp->rx_ch[j].cfg_a);
  259 + for (j = 0; j < pktdma->rx_ch_num; j++)
  260 + writel(CPDMA_CHAN_A_ENABLE, &pktdma->rx_ch[j].cfg_a);
278 261  
279 262 /* configure tx channels */
280 263 /* Disable loopback in the tx direction */
281   - writel(0, &netcp->global->emulation_control);
  264 + writel(0, &pktdma->global->emulation_control);
282 265  
283 266 /* Set QM base address, only for K2x devices */
284   - writel(CONFIG_KSNAV_QM_BASE_ADDRESS, &netcp->global->qm_base_addr[0]);
  267 + writel(CONFIG_KSNAV_QM_BASE_ADDRESS, &pktdma->global->qm_base_addr[0]);
285 268  
286 269 /* Enable all channels. The current state isn't important */
287   - for (j = 0; j < netcp->tx_ch_num; j++) {
288   - writel(0, &netcp->tx_ch[j].cfg_b);
289   - writel(CPDMA_CHAN_A_ENABLE, &netcp->tx_ch[j].cfg_a);
  270 + for (j = 0; j < pktdma->tx_ch_num; j++) {
  271 + writel(0, &pktdma->tx_ch[j].cfg_b);
  272 + writel(CPDMA_CHAN_A_ENABLE, &pktdma->tx_ch[j].cfg_a);
290 273 }
291 274  
292 275 return QM_OK;
293 276 }
294 277  
295   -int netcp_init(struct rx_buff_desc *rx_buffers)
  278 +int ksnav_close(struct pktdma_cfg *pktdma)
296 279 {
297   - return _netcp_init(&netcp_pktdma, rx_buffers);
298   -}
299   -
300   -int netcp_close(void)
301   -{
302   - if (!netcp)
  280 + if (!pktdma)
303 281 return QM_ERR;
304 282  
305   - netcp_tx_disable();
306   - netcp_rx_disable();
  283 + ksnav_tx_disable(pktdma);
  284 + ksnav_rx_disable(pktdma);
307 285  
308   - queue_close(netcp->rx_free_q);
309   - queue_close(netcp->rx_rcv_q);
310   - queue_close(netcp->tx_snd_q);
  286 + queue_close(pktdma->rx_free_q);
  287 + queue_close(pktdma->rx_rcv_q);
  288 + queue_close(pktdma->tx_snd_q);
311 289  
312 290 return QM_OK;
313 291 }
314 292  
315   -int netcp_send(u32 *pkt, int num_bytes, u32 swinfo2)
  293 +int ksnav_send(struct pktdma_cfg *pktdma, u32 *pkt, int num_bytes, u32 swinfo2)
316 294 {
317 295 struct qm_host_desc *hd;
318 296  
319 297  
320 298  
... ... @@ -324,16 +302,16 @@
324 302 hd->swinfo[2] = swinfo2;
325 303 hd->packet_info = qm_cfg->qpool_num;
326 304  
327   - qm_buff_push(hd, netcp->tx_snd_q, pkt, num_bytes);
  305 + qm_buff_push(hd, pktdma->tx_snd_q, pkt, num_bytes);
328 306  
329 307 return QM_OK;
330 308 }
331 309  
332   -void *netcp_recv(u32 **pkt, int *num_bytes)
  310 +void *ksnav_recv(struct pktdma_cfg *pktdma, u32 **pkt, int *num_bytes)
333 311 {
334 312 struct qm_host_desc *hd;
335 313  
336   - hd = qm_pop(netcp->rx_rcv_q);
  314 + hd = qm_pop(pktdma->rx_rcv_q);
337 315 if (!hd)
338 316 return NULL;
339 317  
340 318  
... ... @@ -343,13 +321,13 @@
343 321 return hd;
344 322 }
345 323  
346   -void netcp_release_rxhd(void *hd)
  324 +void ksnav_release_rxhd(struct pktdma_cfg *pktdma, void *hd)
347 325 {
348 326 struct qm_host_desc *_hd = (struct qm_host_desc *)hd;
349 327  
350 328 _hd->buff_len = _hd->orig_buff_len;
351 329 _hd->buff_ptr = _hd->orig_buff_ptr;
352 330  
353   - qm_push(_hd, netcp->rx_free_q);
  331 + qm_push(_hd, pktdma->rx_free_q);
354 332 }
drivers/dma/keystone_nav_cfg.c
  1 +/*
  2 + * Multicore Navigator driver for TI Keystone 2 devices.
  3 + *
  4 + * (C) Copyright 2012-2014
  5 + * Texas Instruments Incorporated, <www.ti.com>
  6 + *
  7 + * SPDX-License-Identifier: GPL-2.0+
  8 + */
  9 +
  10 +#include <asm/ti-common/keystone_nav.h>
  11 +
  12 +#ifdef CONFIG_KSNAV_PKTDMA_NETCP
  13 +/* NETCP Pktdma */
  14 +struct pktdma_cfg netcp_pktdma = {
  15 + .global = (void *)CONFIG_KSNAV_NETCP_PDMA_CTRL_BASE,
  16 + .tx_ch = (void *)CONFIG_KSNAV_NETCP_PDMA_TX_BASE,
  17 + .tx_ch_num = CONFIG_KSNAV_NETCP_PDMA_TX_CH_NUM,
  18 + .rx_ch = (void *)CONFIG_KSNAV_NETCP_PDMA_RX_BASE,
  19 + .rx_ch_num = CONFIG_KSNAV_NETCP_PDMA_RX_CH_NUM,
  20 + .tx_sched = (u32 *)CONFIG_KSNAV_NETCP_PDMA_SCHED_BASE,
  21 + .rx_flows = (void *)CONFIG_KSNAV_NETCP_PDMA_RX_FLOW_BASE,
  22 + .rx_flow_num = CONFIG_KSNAV_NETCP_PDMA_RX_FLOW_NUM,
  23 + .rx_free_q = CONFIG_KSNAV_NETCP_PDMA_RX_FREE_QUEUE,
  24 + .rx_rcv_q = CONFIG_KSNAV_NETCP_PDMA_RX_RCV_QUEUE,
  25 + .tx_snd_q = CONFIG_KSNAV_NETCP_PDMA_TX_SND_QUEUE,
  26 +};
  27 +#endif
drivers/net/keystone_net.c
... ... @@ -393,7 +393,8 @@
393 393 if (num_bytes < EMAC_MIN_ETHERNET_PKT_SIZE)
394 394 num_bytes = EMAC_MIN_ETHERNET_PKT_SIZE;
395 395  
396   - return netcp_send(buffer, num_bytes, (slave_port_num) << 16);
  396 + return ksnav_send(&netcp_pktdma, buffer,
  397 + num_bytes, (slave_port_num) << 16);
397 398 }
398 399  
399 400 /* Eth device open */
... ... @@ -431,7 +432,7 @@
431 432 printf("ERROR: qm_init()\n");
432 433 return -1;
433 434 }
434   - if (netcp_init(&net_rx_buffs)) {
  435 + if (ksnav_init(&netcp_pktdma, &net_rx_buffs)) {
435 436 qm_close();
436 437 printf("ERROR: netcp_init()\n");
437 438 return -1;
... ... @@ -456,7 +457,7 @@
456 457  
457 458 link = keystone_get_link_status(dev);
458 459 if (link == 0) {
459   - netcp_close();
  460 + ksnav_close(&netcp_pktdma);
460 461 qm_close();
461 462 return -1;
462 463 }
... ... @@ -483,7 +484,7 @@
483 484  
484 485 ethss_stop();
485 486  
486   - netcp_close();
  487 + ksnav_close(&netcp_pktdma);
487 488 qm_close();
488 489  
489 490 emac_open = 0;
490 491  
... ... @@ -530,13 +531,13 @@
530 531 int pkt_size;
531 532 u32 *pkt;
532 533  
533   - hd = netcp_recv(&pkt, &pkt_size);
  534 + hd = ksnav_recv(&netcp_pktdma, &pkt, &pkt_size);
534 535 if (hd == NULL)
535 536 return 0;
536 537  
537 538 NetReceive((uchar *)pkt, pkt_size);
538 539  
539   - netcp_release_rxhd(hd);
  540 + ksnav_release_rxhd(&netcp_pktdma, hd);
540 541  
541 542 return pkt_size;
542 543 }
include/configs/k2hk_evm.h
... ... @@ -37,6 +37,7 @@
37 37 /* Network */
38 38 #define CONFIG_DRIVER_TI_KEYSTONE_NET
39 39 #define CONFIG_TI_KSNAV
  40 +#define CONFIG_KSNAV_PKTDMA_NETCP
40 41  
41 42 #endif /* __CONFIG_K2HK_EVM_H */