Commit b4841c348b91ca5faea72c91c48491fae64cafae

Authored by Dan Murphy

Merge branch 'connectivity-ti-linux-3.12.y' of git://git.ti.com/connectivity-int…

…egration-tree/connectivity-ti-linux-kernel into ti-linux-3.12.y

TI-Feature: connectivity
TI-Tree: git://git.ti.com/connectivity-integration-tree/connectivity-ti-linux-kernel.git
TI-Branch: connectivity-ti-linux-3.12.y

* 'connectivity-ti-linux-3.12.y' of git://git.ti.com/connectivity-integration-tree/connectivity-ti-linux-kernel:
  input: touchscreen: pixcir: also trigger IRQ on rising edge
  drivers: net: cpsw: convert tx completion to NAPI
  Revert "usb: musb: musb_cppi41: Revert the Advisory 1.0.13 workaround"

Signed-off-by: Dan Murphy <DMurphy@ti.com>

Showing 3 changed files Side-by-side Diff

drivers/input/touchscreen/pixcir_i2c_ts.c
... ... @@ -480,7 +480,8 @@
480 480 }
481 481  
482 482 error = devm_request_threaded_irq(dev, client->irq, NULL, pixcir_ts_isr,
483   - IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
  483 + IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING
  484 + | IRQF_ONESHOT,
484 485 client->name, tsdata);
485 486 if (error) {
486 487 dev_err(dev, "failed to request irq %d\n", client->irq);
drivers/net/ethernet/ti/cpsw.c
... ... @@ -368,6 +368,9 @@
368 368 struct platform_device *pdev;
369 369 struct net_device *ndev;
370 370 struct napi_struct napi;
  371 + struct napi_struct napi_tx;
  372 + bool irq_enabled;
  373 + bool irq_tx_enabled;
371 374 struct device *dev;
372 375 struct cpsw_platform_data data;
373 376 struct cpsw_ss_regs __iomem *regs;
... ... @@ -390,7 +393,6 @@
390 393 /* snapshot of IRQ numbers */
391 394 u32 irqs_table[4];
392 395 u32 num_irqs;
393   - bool irq_enabled;
394 396 struct cpts *cpts;
395 397 u32 emac_port;
396 398 };
... ... @@ -484,6 +486,7 @@
484 486 #define CPSW_STATS_LEN ARRAY_SIZE(cpsw_gstrings_stats)
485 487  
486 488 #define napi_to_priv(napi) container_of(napi, struct cpsw_priv, napi)
  489 +#define napi_tx_to_priv(napi) container_of(napi, struct cpsw_priv, napi_tx)
487 490 #define for_each_slave(priv, func, arg...) \
488 491 do { \
489 492 struct cpsw_slave *slave; \
... ... @@ -713,9 +716,11 @@
713 716 {
714 717 struct cpsw_priv *priv = dev_id;
715 718  
716   - cpsw_intr_disable(priv);
717   - if (priv->irq_enabled == true) {
718   - cpsw_disable_irq(priv);
  719 + __raw_writel(0, &priv->wr_regs->rx_en);
  720 + if (priv->irq_enabled) {
  721 + disable_irq_nosync(priv->irqs_table[0]);
  722 + disable_irq_nosync(priv->irqs_table[1]);
  723 + disable_irq_nosync(priv->irqs_table[3]);
719 724 priv->irq_enabled = false;
720 725 }
721 726  
722 727  
723 728  
724 729  
725 730  
726 731  
727 732  
728 733  
... ... @@ -735,36 +740,84 @@
735 740 return IRQ_NONE;
736 741 }
737 742  
  743 +static irqreturn_t cpsw_tx_interrupt(int irq, void *dev_id)
  744 +{
  745 + struct cpsw_priv *priv = dev_id;
  746 +
  747 + __raw_writel(0, &priv->wr_regs->tx_en);
  748 + if (priv->irq_tx_enabled) {
  749 + disable_irq_nosync(priv->irqs_table[2]);
  750 + priv->irq_tx_enabled = false;
  751 + }
  752 +
  753 + if (netif_running(priv->ndev)) {
  754 + napi_schedule(&priv->napi_tx);
  755 + return IRQ_HANDLED;
  756 + }
  757 +
  758 + priv = cpsw_get_slave_priv(priv, 1);
  759 + if (!priv)
  760 + return IRQ_NONE;
  761 +
  762 + if (netif_running(priv->ndev)) {
  763 + napi_schedule(&priv->napi_tx);
  764 + return IRQ_HANDLED;
  765 + }
  766 + return IRQ_NONE;
  767 +}
  768 +
738 769 static int cpsw_poll(struct napi_struct *napi, int budget)
739 770 {
740 771 struct cpsw_priv *priv = napi_to_priv(napi);
741   - int num_tx, num_rx;
  772 + int num_rx;
742 773  
743   - num_tx = cpdma_chan_process(priv->txch, 128);
744   - if (num_tx)
745   - cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX);
746   -
747 774 num_rx = cpdma_chan_process(priv->rxch, budget);
748 775 if (num_rx < budget) {
749 776 struct cpsw_priv *prim_cpsw;
750 777  
751 778 napi_complete(napi);
752   - cpsw_intr_enable(priv);
  779 + __raw_writel(0xFF, &priv->wr_regs->rx_en);
753 780 cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX);
754 781 prim_cpsw = cpsw_get_slave_priv(priv, 0);
755   - if (prim_cpsw->irq_enabled == false) {
  782 + if (!prim_cpsw->irq_enabled) {
756 783 prim_cpsw->irq_enabled = true;
757   - cpsw_enable_irq(priv);
  784 + enable_irq(priv->irqs_table[0]);
  785 + enable_irq(priv->irqs_table[1]);
  786 + enable_irq(priv->irqs_table[3]);
758 787 }
759 788 }
760 789  
761   - if (num_rx || num_tx)
762   - cpsw_dbg(priv, intr, "poll %d rx, %d tx pkts\n",
763   - num_rx, num_tx);
  790 + if (num_rx)
  791 + cpsw_dbg(priv, intr, "poll %d rx pkts\n", num_rx);
764 792  
765 793 return num_rx;
766 794 }
767 795  
  796 +static int cpsw_tx_poll(struct napi_struct *napi, int budget)
  797 +{
  798 + struct cpsw_priv *priv = napi_tx_to_priv(napi);
  799 + int num_tx;
  800 +
  801 + num_tx = cpdma_chan_process(priv->txch, budget);
  802 + if (num_tx < budget) {
  803 + struct cpsw_priv *prim_cpsw;
  804 +
  805 + napi_complete(napi);
  806 + __raw_writel(0xFF, &priv->wr_regs->tx_en);
  807 + cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX);
  808 + prim_cpsw = cpsw_get_slave_priv(priv, 0);
  809 + if (!prim_cpsw->irq_tx_enabled) {
  810 + prim_cpsw->irq_tx_enabled = true;
  811 + enable_irq(priv->irqs_table[2]);
  812 + }
  813 + }
  814 +
  815 + if (num_tx)
  816 + cpsw_dbg(priv, intr, "poll %d tx pkts\n", num_tx);
  817 +
  818 + return num_tx;
  819 +}
  820 +
768 821 static inline void soft_reset(const char *module, void __iomem *reg)
769 822 {
770 823 unsigned long timeout = jiffies + HZ;
771 824  
772 825  
... ... @@ -1245,15 +1298,17 @@
1245 1298 }
1246 1299  
1247 1300 napi_enable(&priv->napi);
  1301 + napi_enable(&priv->napi_tx);
1248 1302 cpdma_ctlr_start(priv->dma);
1249 1303 cpsw_intr_enable(priv);
1250 1304 cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX);
1251 1305 cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX);
1252 1306  
1253 1307 prim_cpsw = cpsw_get_slave_priv(priv, 0);
1254   - if (prim_cpsw->irq_enabled == false) {
  1308 + if (!prim_cpsw->irq_enabled && !prim_cpsw->irq_tx_enabled) {
1255 1309 if ((priv == prim_cpsw) || !netif_running(prim_cpsw->ndev)) {
1256 1310 prim_cpsw->irq_enabled = true;
  1311 + prim_cpsw->irq_tx_enabled = true;
1257 1312 cpsw_enable_irq(prim_cpsw);
1258 1313 }
1259 1314 }
... ... @@ -1277,6 +1332,7 @@
1277 1332 cpsw_info(priv, ifdown, "shutting down cpsw device\n");
1278 1333 netif_stop_queue(priv->ndev);
1279 1334 napi_disable(&priv->napi);
  1335 + napi_disable(&priv->napi_tx);
1280 1336 netif_carrier_off(priv->ndev);
1281 1337  
1282 1338 if (cpsw_common_res_usage_state(priv) <= 1) {
... ... @@ -1948,6 +2004,8 @@
1948 2004 ndev->netdev_ops = &cpsw_netdev_ops;
1949 2005 SET_ETHTOOL_OPS(ndev, &cpsw_ethtool_ops);
1950 2006 netif_napi_add(ndev, &priv_sl2->napi, cpsw_poll, CPSW_POLL_WEIGHT);
  2007 + netif_napi_add(ndev, &priv_sl2->napi_tx, cpsw_tx_poll,
  2008 + CPSW_POLL_WEIGHT);
1951 2009  
1952 2010 /* register the network device */
1953 2011 SET_NETDEV_DEV(ndev, &pdev->dev);
... ... @@ -1971,7 +2029,7 @@
1971 2029 void __iomem *ss_regs;
1972 2030 struct resource *res, *ss_res;
1973 2031 u32 slave_offset, sliver_offset, slave_size;
1974   - int ret = 0, i, k = 0;
  2032 + int ret = 0, i, j = 0, k = 0;
1975 2033  
1976 2034 ndev = alloc_etherdev(sizeof(struct cpsw_priv));
1977 2035 if (!ndev) {
... ... @@ -1989,6 +2047,7 @@
1989 2047 priv->rx_packet_max = max(rx_packet_max, 128);
1990 2048 priv->cpts = devm_kzalloc(&pdev->dev, sizeof(struct cpts), GFP_KERNEL);
1991 2049 priv->irq_enabled = true;
  2050 + priv->irq_tx_enabled = true;
1992 2051 if (!priv->cpts) {
1993 2052 pr_err("error allocating cpts\n");
1994 2053 goto clean_ndev_ret;
... ... @@ -2164,6 +2223,7 @@
2164 2223 ndev->netdev_ops = &cpsw_netdev_ops;
2165 2224 SET_ETHTOOL_OPS(ndev, &cpsw_ethtool_ops);
2166 2225 netif_napi_add(ndev, &priv->napi, cpsw_poll, CPSW_POLL_WEIGHT);
  2226 + netif_napi_add(ndev, &priv->napi_tx, cpsw_tx_poll, CPSW_POLL_WEIGHT);
2167 2227  
2168 2228 /* register the network device */
2169 2229 SET_NETDEV_DEV(ndev, &pdev->dev);
... ... @@ -2175,9 +2235,16 @@
2175 2235 }
2176 2236  
2177 2237 while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, k))) {
2178   - for (i = res->start; i <= res->end; i++) {
2179   - if (devm_request_irq(&pdev->dev, i, cpsw_interrupt, 0,
2180   - dev_name(priv->dev), priv)) {
  2238 + for (i = res->start; i <= res->end; i++, j++) {
  2239 + if (j == 2)
  2240 + ret = devm_request_irq(&pdev->dev, i,
  2241 + cpsw_tx_interrupt, 0,
  2242 + "eth-tx", priv);
  2243 + else
  2244 + ret = devm_request_irq(&pdev->dev, i,
  2245 + cpsw_interrupt, 0,
  2246 + dev_name(priv->dev), priv);
  2247 + if (ret) {
2181 2248 dev_err(priv->dev, "error attaching irq\n");
2182 2249 goto clean_ale_ret;
2183 2250 }
drivers/usb/musb/musb_cppi41.c
... ... @@ -31,6 +31,7 @@
31 31 u8 port_num;
32 32 u8 is_tx;
33 33 u8 is_allocated;
  34 + u8 usb_toggle;
34 35  
35 36 dma_addr_t buf_addr;
36 37 u32 total_len;
... ... @@ -55,6 +56,50 @@
55 56 u32 auto_req;
56 57 };
57 58  
  59 +static void save_rx_toggle(struct cppi41_dma_channel *cppi41_channel)
  60 +{
  61 + u16 csr;
  62 + u8 toggle;
  63 +
  64 + if (cppi41_channel->is_tx)
  65 + return;
  66 + if (!is_host_active(cppi41_channel->controller->musb))
  67 + return;
  68 +
  69 + csr = musb_readw(cppi41_channel->hw_ep->regs, MUSB_RXCSR);
  70 + toggle = csr & MUSB_RXCSR_H_DATATOGGLE ? 1 : 0;
  71 +
  72 + cppi41_channel->usb_toggle = toggle;
  73 +}
  74 +
  75 +static void update_rx_toggle(struct cppi41_dma_channel *cppi41_channel)
  76 +{
  77 + u16 csr;
  78 + u8 toggle;
  79 +
  80 + if (cppi41_channel->is_tx)
  81 + return;
  82 + if (!is_host_active(cppi41_channel->controller->musb))
  83 + return;
  84 +
  85 + csr = musb_readw(cppi41_channel->hw_ep->regs, MUSB_RXCSR);
  86 + toggle = csr & MUSB_RXCSR_H_DATATOGGLE ? 1 : 0;
  87 +
  88 + /*
  89 + * AM335x Advisory 1.0.13: Due to internal synchronisation error the
  90 + * data toggle may reset from DATA1 to DATA0 during receiving data from
  91 + * more than one endpoint.
  92 + */
  93 + if (!toggle && toggle == cppi41_channel->usb_toggle) {
  94 + csr |= MUSB_RXCSR_H_DATATOGGLE | MUSB_RXCSR_H_WR_DATATOGGLE;
  95 + musb_writew(cppi41_channel->hw_ep->regs, MUSB_RXCSR, csr);
  96 + dev_dbg(cppi41_channel->controller->musb->controller,
  97 + "Restoring DATA1 toggle.\n");
  98 + }
  99 +
  100 + cppi41_channel->usb_toggle = toggle;
  101 +}
  102 +
58 103 static bool musb_is_tx_fifo_empty(struct musb_hw_ep *hw_ep)
59 104 {
60 105 u8 epnum = hw_ep->epnum;
... ... @@ -217,6 +262,8 @@
217 262 hw_ep->epnum, cppi41_channel->transferred,
218 263 cppi41_channel->total_len);
219 264  
  265 + update_rx_toggle(cppi41_channel);
  266 +
220 267 if (cppi41_channel->transferred == cppi41_channel->total_len ||
221 268 transferred < cppi41_channel->packet_sz)
222 269 cppi41_channel->prog_len = 0;
... ... @@ -347,6 +394,7 @@
347 394 struct dma_async_tx_descriptor *dma_desc;
348 395 enum dma_transfer_direction direction;
349 396 struct musb *musb = cppi41_channel->controller->musb;
  397 + unsigned use_gen_rndis = 0;
350 398  
351 399 dev_dbg(musb->controller,
352 400 "configure ep%d/%x packet_sz=%d, mode=%d, dma_addr=0x%llx, len=%d is_tx=%d\n",
353 401  
354 402  
355 403  
356 404  
... ... @@ -359,26 +407,39 @@
359 407 cppi41_channel->transferred = 0;
360 408 cppi41_channel->packet_sz = packet_sz;
361 409  
362   - /* RNDIS mode */
363   - if (len > packet_sz) {
364   - musb_writel(musb->ctrl_base,
365   - RNDIS_REG(cppi41_channel->port_num), len);
366   - /* gen rndis */
367   - cppi41_set_dma_mode(cppi41_channel,
368   - EP_MODE_DMA_GEN_RNDIS);
  410 + /*
  411 + * Due to AM335x' Advisory 1.0.13 we are not allowed to transfer more
  412 + * than max packet size at a time.
  413 + */
  414 + if (cppi41_channel->is_tx)
  415 + use_gen_rndis = 1;
369 416  
370   - /* auto req */
371   - cppi41_set_autoreq_mode(cppi41_channel,
  417 + if (use_gen_rndis) {
  418 + /* RNDIS mode */
  419 + if (len > packet_sz) {
  420 + musb_writel(musb->ctrl_base,
  421 + RNDIS_REG(cppi41_channel->port_num), len);
  422 + /* gen rndis */
  423 + cppi41_set_dma_mode(cppi41_channel,
  424 + EP_MODE_DMA_GEN_RNDIS);
  425 +
  426 + /* auto req */
  427 + cppi41_set_autoreq_mode(cppi41_channel,
372 428 EP_MODE_AUTOREG_ALL_NEOP);
373   - } else {
374   - musb_writel(musb->ctrl_base,
375   - RNDIS_REG(cppi41_channel->port_num), 0);
376   - cppi41_set_dma_mode(cppi41_channel,
377   - EP_MODE_DMA_TRANSPARENT);
378   - cppi41_set_autoreq_mode(cppi41_channel,
  429 + } else {
  430 + musb_writel(musb->ctrl_base,
  431 + RNDIS_REG(cppi41_channel->port_num), 0);
  432 + cppi41_set_dma_mode(cppi41_channel,
  433 + EP_MODE_DMA_TRANSPARENT);
  434 + cppi41_set_autoreq_mode(cppi41_channel,
379 435 EP_MODE_AUTOREG_NONE);
  436 + }
  437 + } else {
  438 + /* fallback mode */
  439 + cppi41_set_dma_mode(cppi41_channel, EP_MODE_DMA_TRANSPARENT);
  440 + cppi41_set_autoreq_mode(cppi41_channel, EP_MODE_AUTOREG_NONE);
  441 + len = min_t(u32, packet_sz, len);
380 442 }
381   -
382 443 cppi41_channel->prog_len = len;
383 444 direction = cppi41_channel->is_tx ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
384 445 dma_desc = dmaengine_prep_slave_single(dc, dma_addr, len, direction,
... ... @@ -390,6 +451,7 @@
390 451 dma_desc->callback_param = channel;
391 452 cppi41_channel->cookie = dma_desc->tx_submit(dma_desc);
392 453  
  454 + save_rx_toggle(cppi41_channel);
393 455 dma_async_issue_pending(dc);
394 456 return true;
395 457 }