Commit 048177ce3b3962852fd34a7e04938959271c7e70

Authored by Matt Porter
Committed by Vinod Koul
1 parent b5daabbd3d

spi: spi-davinci: convert to DMA engine API

Removes use of the DaVinci EDMA private DMA API and replaces
it with use of the DMA engine API.

Signed-off-by: Matt Porter <mporter@ti.com>
Tested-by: Tom Rini <trini@ti.com>
Tested-by: Sekhar Nori <nsekhar@ti.com>
Signed-off-by: Vinod Koul <vinod.koul@linux.intel.com>

Showing 1 changed file with 130 additions and 162 deletions Side-by-side Diff

drivers/spi/spi-davinci.c
... ... @@ -25,13 +25,14 @@
25 25 #include <linux/platform_device.h>
26 26 #include <linux/err.h>
27 27 #include <linux/clk.h>
  28 +#include <linux/dmaengine.h>
28 29 #include <linux/dma-mapping.h>
  30 +#include <linux/edma.h>
29 31 #include <linux/spi/spi.h>
30 32 #include <linux/spi/spi_bitbang.h>
31 33 #include <linux/slab.h>
32 34  
33 35 #include <mach/spi.h>
34   -#include <mach/edma.h>
35 36  
36 37 #define SPI_NO_RESOURCE ((resource_size_t)-1)
37 38  
... ... @@ -113,14 +114,6 @@
113 114 #define SPIDEF 0x4c
114 115 #define SPIFMT0 0x50
115 116  
116   -/* We have 2 DMA channels per CS, one for RX and one for TX */
117   -struct davinci_spi_dma {
118   - int tx_channel;
119   - int rx_channel;
120   - int dummy_param_slot;
121   - enum dma_event_q eventq;
122   -};
123   -
124 117 /* SPI Controller driver's private data. */
125 118 struct davinci_spi {
126 119 struct spi_bitbang bitbang;
127 120  
... ... @@ -134,11 +127,14 @@
134 127  
135 128 const void *tx;
136 129 void *rx;
137   -#define SPI_TMP_BUFSZ (SMP_CACHE_BYTES + 1)
138   - u8 rx_tmp_buf[SPI_TMP_BUFSZ];
139 130 int rcount;
140 131 int wcount;
141   - struct davinci_spi_dma dma;
  132 +
  133 + struct dma_chan *dma_rx;
  134 + struct dma_chan *dma_tx;
  135 + int dma_rx_chnum;
  136 + int dma_tx_chnum;
  137 +
142 138 struct davinci_spi_platform_data *pdata;
143 139  
144 140 void (*get_rx)(u32 rx_data, struct davinci_spi *);
145 141  
146 142  
147 143  
148 144  
... ... @@ -496,21 +492,23 @@
496 492 return errors;
497 493 }
498 494  
499   -static void davinci_spi_dma_callback(unsigned lch, u16 status, void *data)
  495 +static void davinci_spi_dma_rx_callback(void *data)
500 496 {
501   - struct davinci_spi *dspi = data;
502   - struct davinci_spi_dma *dma = &dspi->dma;
  497 + struct davinci_spi *dspi = (struct davinci_spi *)data;
503 498  
504   - edma_stop(lch);
  499 + dspi->rcount = 0;
505 500  
506   - if (status == DMA_COMPLETE) {
507   - if (lch == dma->rx_channel)
508   - dspi->rcount = 0;
509   - if (lch == dma->tx_channel)
510   - dspi->wcount = 0;
511   - }
  501 + if (!dspi->wcount && !dspi->rcount)
  502 + complete(&dspi->done);
  503 +}
512 504  
513   - if ((!dspi->wcount && !dspi->rcount) || (status != DMA_COMPLETE))
  505 +static void davinci_spi_dma_tx_callback(void *data)
  506 +{
  507 + struct davinci_spi *dspi = (struct davinci_spi *)data;
  508 +
  509 + dspi->wcount = 0;
  510 +
  511 + if (!dspi->wcount && !dspi->rcount)
514 512 complete(&dspi->done);
515 513 }
516 514  
517 515  
518 516  
... ... @@ -526,20 +524,20 @@
526 524 static int davinci_spi_bufs(struct spi_device *spi, struct spi_transfer *t)
527 525 {
528 526 struct davinci_spi *dspi;
529   - int data_type, ret;
  527 + int data_type, ret = -ENOMEM;
530 528 u32 tx_data, spidat1;
531 529 u32 errors = 0;
532 530 struct davinci_spi_config *spicfg;
533 531 struct davinci_spi_platform_data *pdata;
534 532 unsigned uninitialized_var(rx_buf_count);
535   - struct device *sdev;
  533 + void *dummy_buf = NULL;
  534 + struct scatterlist sg_rx, sg_tx;
536 535  
537 536 dspi = spi_master_get_devdata(spi->master);
538 537 pdata = dspi->pdata;
539 538 spicfg = (struct davinci_spi_config *)spi->controller_data;
540 539 if (!spicfg)
541 540 spicfg = &davinci_spi_default_cfg;
542   - sdev = dspi->bitbang.master->dev.parent;
543 541  
544 542 /* convert len to words based on bits_per_word */
545 543 data_type = dspi->bytes_per_word[spi->chip_select];
546 544  
547 545  
548 546  
549 547  
550 548  
551 549  
552 550  
553 551  
554 552  
555 553  
556 554  
... ... @@ -567,112 +565,83 @@
567 565 spidat1 |= tx_data & 0xFFFF;
568 566 iowrite32(spidat1, dspi->base + SPIDAT1);
569 567 } else {
570   - struct davinci_spi_dma *dma;
571   - unsigned long tx_reg, rx_reg;
572   - struct edmacc_param param;
573   - void *rx_buf;
574   - int b, c;
  568 + struct dma_slave_config dma_rx_conf = {
  569 + .direction = DMA_DEV_TO_MEM,
  570 + .src_addr = (unsigned long)dspi->pbase + SPIBUF,
  571 + .src_addr_width = data_type,
  572 + .src_maxburst = 1,
  573 + };
  574 + struct dma_slave_config dma_tx_conf = {
  575 + .direction = DMA_MEM_TO_DEV,
  576 + .dst_addr = (unsigned long)dspi->pbase + SPIDAT1,
  577 + .dst_addr_width = data_type,
  578 + .dst_maxburst = 1,
  579 + };
  580 + struct dma_async_tx_descriptor *rxdesc;
  581 + struct dma_async_tx_descriptor *txdesc;
  582 + void *buf;
575 583  
576   - dma = &dspi->dma;
  584 + dummy_buf = kzalloc(t->len, GFP_KERNEL);
  585 + if (!dummy_buf)
  586 + goto err_alloc_dummy_buf;
577 587  
578   - tx_reg = (unsigned long)dspi->pbase + SPIDAT1;
579   - rx_reg = (unsigned long)dspi->pbase + SPIBUF;
  588 + dmaengine_slave_config(dspi->dma_rx, &dma_rx_conf);
  589 + dmaengine_slave_config(dspi->dma_tx, &dma_tx_conf);
580 590  
581   - /*
582   - * Transmit DMA setup
583   - *
584   - * If there is transmit data, map the transmit buffer, set it
585   - * as the source of data and set the source B index to data
586   - * size. If there is no transmit data, set the transmit register
587   - * as the source of data, and set the source B index to zero.
588   - *
589   - * The destination is always the transmit register itself. And
590   - * the destination never increments.
591   - */
592   -
593   - if (t->tx_buf) {
594   - t->tx_dma = dma_map_single(&spi->dev, (void *)t->tx_buf,
595   - t->len, DMA_TO_DEVICE);
596   - if (dma_mapping_error(&spi->dev, t->tx_dma)) {
597   - dev_dbg(sdev, "Unable to DMA map %d bytes"
598   - "TX buffer\n", t->len);
599   - return -ENOMEM;
600   - }
  591 + sg_init_table(&sg_rx, 1);
  592 + if (!t->rx_buf)
  593 + buf = dummy_buf;
  594 + else
  595 + buf = t->rx_buf;
  596 + t->rx_dma = dma_map_single(&spi->dev, buf,
  597 + t->len, DMA_FROM_DEVICE);
  598 + if (!t->rx_dma) {
  599 + ret = -EFAULT;
  600 + goto err_rx_map;
601 601 }
  602 + sg_dma_address(&sg_rx) = t->rx_dma;
  603 + sg_dma_len(&sg_rx) = t->len;
602 604  
603   - /*
604   - * If number of words is greater than 65535, then we need
605   - * to configure a 3 dimension transfer. Use the BCNTRLD
606   - * feature to allow for transfers that aren't even multiples
607   - * of 65535 (or any other possible b size) by first transferring
608   - * the remainder amount then grabbing the next N blocks of
609   - * 65535 words.
610   - */
611   -
612   - c = dspi->wcount / (SZ_64K - 1); /* N 65535 Blocks */
613   - b = dspi->wcount - c * (SZ_64K - 1); /* Remainder */
614   - if (b)
615   - c++;
  605 + sg_init_table(&sg_tx, 1);
  606 + if (!t->tx_buf)
  607 + buf = dummy_buf;
616 608 else
617   - b = SZ_64K - 1;
618   -
619   - param.opt = TCINTEN | EDMA_TCC(dma->tx_channel);
620   - param.src = t->tx_buf ? t->tx_dma : tx_reg;
621   - param.a_b_cnt = b << 16 | data_type;
622   - param.dst = tx_reg;
623   - param.src_dst_bidx = t->tx_buf ? data_type : 0;
624   - param.link_bcntrld = 0xffffffff;
625   - param.src_dst_cidx = t->tx_buf ? data_type : 0;
626   - param.ccnt = c;
627   - edma_write_slot(dma->tx_channel, &param);
628   - edma_link(dma->tx_channel, dma->dummy_param_slot);
629   -
630   - /*
631   - * Receive DMA setup
632   - *
633   - * If there is receive buffer, use it to receive data. If there
634   - * is none provided, use a temporary receive buffer. Set the
635   - * destination B index to 0 so effectively only one byte is used
636   - * in the temporary buffer (address does not increment).
637   - *
638   - * The source of receive data is the receive data register. The
639   - * source address never increments.
640   - */
641   -
642   - if (t->rx_buf) {
643   - rx_buf = t->rx_buf;
644   - rx_buf_count = t->len;
645   - } else {
646   - rx_buf = dspi->rx_tmp_buf;
647   - rx_buf_count = sizeof(dspi->rx_tmp_buf);
  609 + buf = (void *)t->tx_buf;
  610 + t->tx_dma = dma_map_single(&spi->dev, buf,
  611 + t->len, DMA_FROM_DEVICE);
  612 + if (!t->tx_dma) {
  613 + ret = -EFAULT;
  614 + goto err_tx_map;
648 615 }
  616 + sg_dma_address(&sg_tx) = t->tx_dma;
  617 + sg_dma_len(&sg_tx) = t->len;
649 618  
650   - t->rx_dma = dma_map_single(&spi->dev, rx_buf, rx_buf_count,
651   - DMA_FROM_DEVICE);
652   - if (dma_mapping_error(&spi->dev, t->rx_dma)) {
653   - dev_dbg(sdev, "Couldn't DMA map a %d bytes RX buffer\n",
654   - rx_buf_count);
655   - if (t->tx_buf)
656   - dma_unmap_single(&spi->dev, t->tx_dma, t->len,
657   - DMA_TO_DEVICE);
658   - return -ENOMEM;
659   - }
  619 + rxdesc = dmaengine_prep_slave_sg(dspi->dma_rx,
  620 + &sg_rx, 1, DMA_DEV_TO_MEM,
  621 + DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
  622 + if (!rxdesc)
  623 + goto err_desc;
660 624  
661   - param.opt = TCINTEN | EDMA_TCC(dma->rx_channel);
662   - param.src = rx_reg;
663   - param.a_b_cnt = b << 16 | data_type;
664   - param.dst = t->rx_dma;
665   - param.src_dst_bidx = (t->rx_buf ? data_type : 0) << 16;
666   - param.link_bcntrld = 0xffffffff;
667   - param.src_dst_cidx = (t->rx_buf ? data_type : 0) << 16;
668   - param.ccnt = c;
669   - edma_write_slot(dma->rx_channel, &param);
  625 + txdesc = dmaengine_prep_slave_sg(dspi->dma_tx,
  626 + &sg_tx, 1, DMA_MEM_TO_DEV,
  627 + DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
  628 + if (!txdesc)
  629 + goto err_desc;
670 630  
  631 + rxdesc->callback = davinci_spi_dma_rx_callback;
  632 + rxdesc->callback_param = (void *)dspi;
  633 + txdesc->callback = davinci_spi_dma_tx_callback;
  634 + txdesc->callback_param = (void *)dspi;
  635 +
671 636 if (pdata->cshold_bug)
672 637 iowrite16(spidat1 >> 16, dspi->base + SPIDAT1 + 2);
673 638  
674   - edma_start(dma->rx_channel);
675   - edma_start(dma->tx_channel);
  639 + dmaengine_submit(rxdesc);
  640 + dmaengine_submit(txdesc);
  641 +
  642 + dma_async_issue_pending(dspi->dma_rx);
  643 + dma_async_issue_pending(dspi->dma_tx);
  644 +
676 645 set_io_bits(dspi->base + SPIINT, SPIINT_DMA_REQ_EN);
677 646 }
678 647  
679 648  
... ... @@ -690,15 +659,13 @@
690 659  
691 660 clear_io_bits(dspi->base + SPIINT, SPIINT_MASKALL);
692 661 if (spicfg->io_type == SPI_IO_TYPE_DMA) {
693   -
694   - if (t->tx_buf)
695   - dma_unmap_single(&spi->dev, t->tx_dma, t->len,
696   - DMA_TO_DEVICE);
697   -
698   - dma_unmap_single(&spi->dev, t->rx_dma, rx_buf_count,
699   - DMA_FROM_DEVICE);
700   -
701 662 clear_io_bits(dspi->base + SPIINT, SPIINT_DMA_REQ_EN);
  663 +
  664 + dma_unmap_single(&spi->dev, t->rx_dma,
  665 + t->len, DMA_FROM_DEVICE);
  666 + dma_unmap_single(&spi->dev, t->tx_dma,
  667 + t->len, DMA_TO_DEVICE);
  668 + kfree(dummy_buf);
702 669 }
703 670  
704 671 clear_io_bits(dspi->base + SPIGCR1, SPIGCR1_SPIENA_MASK);
705 672  
... ... @@ -716,11 +683,20 @@
716 683 }
717 684  
718 685 if (dspi->rcount != 0 || dspi->wcount != 0) {
719   - dev_err(sdev, "SPI data transfer error\n");
  686 + dev_err(&spi->dev, "SPI data transfer error\n");
720 687 return -EIO;
721 688 }
722 689  
723 690 return t->len;
  691 +
  692 +err_desc:
  693 + dma_unmap_single(&spi->dev, t->tx_dma, t->len, DMA_TO_DEVICE);
  694 +err_tx_map:
  695 + dma_unmap_single(&spi->dev, t->rx_dma, t->len, DMA_FROM_DEVICE);
  696 +err_rx_map:
  697 + kfree(dummy_buf);
  698 +err_alloc_dummy_buf:
  699 + return ret;
724 700 }
725 701  
726 702 /**
727 703  
728 704  
729 705  
730 706  
731 707  
732 708  
... ... @@ -751,39 +727,33 @@
751 727  
752 728 static int davinci_spi_request_dma(struct davinci_spi *dspi)
753 729 {
  730 + dma_cap_mask_t mask;
  731 + struct device *sdev = dspi->bitbang.master->dev.parent;
754 732 int r;
755   - struct davinci_spi_dma *dma = &dspi->dma;
756 733  
757   - r = edma_alloc_channel(dma->rx_channel, davinci_spi_dma_callback, dspi,
758   - dma->eventq);
759   - if (r < 0) {
760   - pr_err("Unable to request DMA channel for SPI RX\n");
761   - r = -EAGAIN;
  734 + dma_cap_zero(mask);
  735 + dma_cap_set(DMA_SLAVE, mask);
  736 +
  737 + dspi->dma_rx = dma_request_channel(mask, edma_filter_fn,
  738 + &dspi->dma_rx_chnum);
  739 + if (!dspi->dma_rx) {
  740 + dev_err(sdev, "request RX DMA channel failed\n");
  741 + r = -ENODEV;
762 742 goto rx_dma_failed;
763 743 }
764 744  
765   - r = edma_alloc_channel(dma->tx_channel, davinci_spi_dma_callback, dspi,
766   - dma->eventq);
767   - if (r < 0) {
768   - pr_err("Unable to request DMA channel for SPI TX\n");
769   - r = -EAGAIN;
  745 + dspi->dma_tx = dma_request_channel(mask, edma_filter_fn,
  746 + &dspi->dma_tx_chnum);
  747 + if (!dspi->dma_tx) {
  748 + dev_err(sdev, "request TX DMA channel failed\n");
  749 + r = -ENODEV;
770 750 goto tx_dma_failed;
771 751 }
772 752  
773   - r = edma_alloc_slot(EDMA_CTLR(dma->tx_channel), EDMA_SLOT_ANY);
774   - if (r < 0) {
775   - pr_err("Unable to request SPI TX DMA param slot\n");
776   - r = -EAGAIN;
777   - goto param_failed;
778   - }
779   - dma->dummy_param_slot = r;
780   - edma_link(dma->dummy_param_slot, dma->dummy_param_slot);
781   -
782 753 return 0;
783   -param_failed:
784   - edma_free_channel(dma->tx_channel);
  754 +
785 755 tx_dma_failed:
786   - edma_free_channel(dma->rx_channel);
  756 + dma_release_channel(dspi->dma_rx);
787 757 rx_dma_failed:
788 758 return r;
789 759 }
... ... @@ -898,9 +868,8 @@
898 868 dspi->bitbang.txrx_bufs = davinci_spi_bufs;
899 869 if (dma_rx_chan != SPI_NO_RESOURCE &&
900 870 dma_tx_chan != SPI_NO_RESOURCE) {
901   - dspi->dma.rx_channel = dma_rx_chan;
902   - dspi->dma.tx_channel = dma_tx_chan;
903   - dspi->dma.eventq = pdata->dma_event_q;
  871 + dspi->dma_rx_chnum = dma_rx_chan;
  872 + dspi->dma_tx_chnum = dma_tx_chan;
904 873  
905 874 ret = davinci_spi_request_dma(dspi);
906 875 if (ret)
... ... @@ -955,9 +924,8 @@
955 924 return ret;
956 925  
957 926 free_dma:
958   - edma_free_channel(dspi->dma.tx_channel);
959   - edma_free_channel(dspi->dma.rx_channel);
960   - edma_free_slot(dspi->dma.dummy_param_slot);
  927 + dma_release_channel(dspi->dma_rx);
  928 + dma_release_channel(dspi->dma_tx);
961 929 free_clk:
962 930 clk_disable(dspi->clk);
963 931 clk_put(dspi->clk);