Commit 4774fb0a48aacfec206e6d54ecf58706f6a5320a

Authored by Vipin Kumar
Committed by David Woodhouse
1 parent 604e75444f

mtd: nand/fsmc: Add DMA support

The fsmc_nand driver uses cpu to read/write onto the device. This is inefficient
because of two reasons
- the cpu gets locked on AHB bus while reading from NAND
- the cpu is unnecessarily used when dma can do the job

This patch adds the support for accessing the device through DMA

Signed-off-by: Vipin Kumar <vipin.kumar@st.com>
Reviewed-by: Viresh Kumar <viresh.kumar@st.com>
Signed-off-by: Artem Bityutskiy <artem.bityutskiy@linux.intel.com>
Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>

Showing 2 changed files with 167 additions and 5 deletions Side-by-side Diff

drivers/mtd/nand/fsmc_nand.c
... ... @@ -17,6 +17,10 @@
17 17 */
18 18  
19 19 #include <linux/clk.h>
  20 +#include <linux/completion.h>
  21 +#include <linux/dmaengine.h>
  22 +#include <linux/dma-direction.h>
  23 +#include <linux/dma-mapping.h>
20 24 #include <linux/err.h>
21 25 #include <linux/init.h>
22 26 #include <linux/module.h>
... ... @@ -282,6 +286,11 @@
282 286 * @bank: Bank number for probed device.
283 287 * @clk: Clock structure for FSMC.
284 288 *
  289 + * @read_dma_chan: DMA channel for read access
  290 + * @write_dma_chan: DMA channel for write access to NAND
  291 + * @dma_access_complete: Completion structure
  292 + *
  293 + * @data_pa: NAND Physical port for Data.
285 294 * @data_va: NAND port for Data.
286 295 * @cmd_va: NAND port for Command.
287 296 * @addr_va: NAND port for Address.
288 297  
289 298  
... ... @@ -297,10 +306,17 @@
297 306 struct fsmc_eccplace *ecc_place;
298 307 unsigned int bank;
299 308 struct device *dev;
  309 + enum access_mode mode;
300 310 struct clk *clk;
301 311  
  312 + /* DMA related objects */
  313 + struct dma_chan *read_dma_chan;
  314 + struct dma_chan *write_dma_chan;
  315 + struct completion dma_access_complete;
  316 +
302 317 struct fsmc_nand_timings *dev_timings;
303 318  
  319 + dma_addr_t data_pa;
304 320 void __iomem *data_va;
305 321 void __iomem *cmd_va;
306 322 void __iomem *addr_va;
... ... @@ -523,6 +539,77 @@
523 539 return written_bits;
524 540 }
525 541  
  542 +static void dma_complete(void *param)
  543 +{
  544 + struct fsmc_nand_data *host = param;
  545 +
  546 + complete(&host->dma_access_complete);
  547 +}
  548 +
  549 +static int dma_xfer(struct fsmc_nand_data *host, void *buffer, int len,
  550 + enum dma_data_direction direction)
  551 +{
  552 + struct dma_chan *chan;
  553 + struct dma_device *dma_dev;
  554 + struct dma_async_tx_descriptor *tx;
  555 + dma_addr_t dma_dst, dma_src, dma_addr;
  556 + dma_cookie_t cookie;
  557 + unsigned long flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
  558 + int ret;
  559 +
  560 + if (direction == DMA_TO_DEVICE)
  561 + chan = host->write_dma_chan;
  562 + else if (direction == DMA_FROM_DEVICE)
  563 + chan = host->read_dma_chan;
  564 + else
  565 + return -EINVAL;
  566 +
  567 + dma_dev = chan->device;
  568 + dma_addr = dma_map_single(dma_dev->dev, buffer, len, direction);
  569 +
  570 + if (direction == DMA_TO_DEVICE) {
  571 + dma_src = dma_addr;
  572 + dma_dst = host->data_pa;
  573 + flags |= DMA_COMPL_SRC_UNMAP_SINGLE | DMA_COMPL_SKIP_DEST_UNMAP;
  574 + } else {
  575 + dma_src = host->data_pa;
  576 + dma_dst = dma_addr;
  577 + flags |= DMA_COMPL_DEST_UNMAP_SINGLE | DMA_COMPL_SKIP_SRC_UNMAP;
  578 + }
  579 +
  580 + tx = dma_dev->device_prep_dma_memcpy(chan, dma_dst, dma_src,
  581 + len, flags);
  582 +
  583 + if (!tx) {
  584 + dev_err(host->dev, "device_prep_dma_memcpy error\n");
  585 + dma_unmap_single(dma_dev->dev, dma_addr, len, direction);
  586 + return -EIO;
  587 + }
  588 +
  589 + tx->callback = dma_complete;
  590 + tx->callback_param = host;
  591 + cookie = tx->tx_submit(tx);
  592 +
  593 + ret = dma_submit_error(cookie);
  594 + if (ret) {
  595 + dev_err(host->dev, "dma_submit_error %d\n", cookie);
  596 + return ret;
  597 + }
  598 +
  599 + dma_async_issue_pending(chan);
  600 +
  601 + ret =
  602 + wait_for_completion_interruptible_timeout(&host->dma_access_complete,
  603 + msecs_to_jiffies(3000));
  604 + if (ret <= 0) {
  605 + chan->device->device_control(chan, DMA_TERMINATE_ALL, 0);
  606 + dev_err(host->dev, "wait_for_completion_timeout\n");
  607 + return ret ? ret : -ETIMEDOUT;
  608 + }
  609 +
  610 + return 0;
  611 +}
  612 +
526 613 /*
527 614 * fsmc_write_buf - write buffer to chip
528 615 * @mtd: MTD device structure
... ... @@ -570,6 +657,35 @@
570 657 }
571 658  
572 659 /*
  660 + * fsmc_read_buf_dma - read chip data into buffer
  661 + * @mtd: MTD device structure
  662 + * @buf: buffer to store date
  663 + * @len: number of bytes to read
  664 + */
  665 +static void fsmc_read_buf_dma(struct mtd_info *mtd, uint8_t *buf, int len)
  666 +{
  667 + struct fsmc_nand_data *host;
  668 +
  669 + host = container_of(mtd, struct fsmc_nand_data, mtd);
  670 + dma_xfer(host, buf, len, DMA_FROM_DEVICE);
  671 +}
  672 +
  673 +/*
  674 + * fsmc_write_buf_dma - write buffer to chip
  675 + * @mtd: MTD device structure
  676 + * @buf: data buffer
  677 + * @len: number of bytes to write
  678 + */
  679 +static void fsmc_write_buf_dma(struct mtd_info *mtd, const uint8_t *buf,
  680 + int len)
  681 +{
  682 + struct fsmc_nand_data *host;
  683 +
  684 + host = container_of(mtd, struct fsmc_nand_data, mtd);
  685 + dma_xfer(host, (void *)buf, len, DMA_TO_DEVICE);
  686 +}
  687 +
  688 +/*
573 689 * fsmc_read_page_hwecc
574 690 * @mtd: mtd info structure
575 691 * @chip: nand chip info structure
... ... @@ -731,6 +847,12 @@
731 847 return i;
732 848 }
733 849  
  850 +static bool filter(struct dma_chan *chan, void *slave)
  851 +{
  852 + chan->private = slave;
  853 + return true;
  854 +}
  855 +
734 856 /*
735 857 * fsmc_nand_probe - Probe function
736 858 * @pdev: platform device structure
... ... @@ -743,6 +865,7 @@
743 865 struct nand_chip *nand;
744 866 struct fsmc_regs *regs;
745 867 struct resource *res;
  868 + dma_cap_mask_t mask;
746 869 int ret = 0;
747 870 u32 pid;
748 871 int i;
... ... @@ -769,6 +892,7 @@
769 892 return -ENOENT;
770 893 }
771 894  
  895 + host->data_pa = (dma_addr_t)res->start;
772 896 host->data_va = devm_ioremap(&pdev->dev, res->start,
773 897 resource_size(res));
774 898 if (!host->data_va) {
... ... @@ -847,6 +971,11 @@
847 971 host->nr_partitions = pdata->nr_partitions;
848 972 host->dev = &pdev->dev;
849 973 host->dev_timings = pdata->nand_timings;
  974 + host->mode = pdata->mode;
  975 +
  976 + if (host->mode == USE_DMA_ACCESS)
  977 + init_completion(&host->dma_access_complete);
  978 +
850 979 regs = host->regs_va;
851 980  
852 981 /* Link all private pointers */
853 982  
... ... @@ -871,13 +1000,31 @@
871 1000 if (pdata->width == FSMC_NAND_BW16)
872 1001 nand->options |= NAND_BUSWIDTH_16;
873 1002  
874   - /*
875   - * use customized (word by word) version of read_buf, write_buf if
876   - * access_with_dev_width is reset supported
877   - */
878   - if (pdata->mode == USE_WORD_ACCESS) {
  1003 + switch (host->mode) {
  1004 + case USE_DMA_ACCESS:
  1005 + dma_cap_zero(mask);
  1006 + dma_cap_set(DMA_MEMCPY, mask);
  1007 + host->read_dma_chan = dma_request_channel(mask, filter,
  1008 + pdata->read_dma_priv);
  1009 + if (!host->read_dma_chan) {
  1010 + dev_err(&pdev->dev, "Unable to get read dma channel\n");
  1011 + goto err_req_read_chnl;
  1012 + }
  1013 + host->write_dma_chan = dma_request_channel(mask, filter,
  1014 + pdata->write_dma_priv);
  1015 + if (!host->write_dma_chan) {
  1016 + dev_err(&pdev->dev, "Unable to get write dma channel\n");
  1017 + goto err_req_write_chnl;
  1018 + }
  1019 + nand->read_buf = fsmc_read_buf_dma;
  1020 + nand->write_buf = fsmc_write_buf_dma;
  1021 + break;
  1022 +
  1023 + default:
  1024 + case USE_WORD_ACCESS:
879 1025 nand->read_buf = fsmc_read_buf;
880 1026 nand->write_buf = fsmc_write_buf;
  1027 + break;
881 1028 }
882 1029  
883 1030 fsmc_nand_setup(regs, host->bank, nand->options & NAND_BUSWIDTH_16,
... ... @@ -978,6 +1125,12 @@
978 1125  
979 1126 err_probe:
980 1127 err_scan_ident:
  1128 + if (host->mode == USE_DMA_ACCESS)
  1129 + dma_release_channel(host->write_dma_chan);
  1130 +err_req_write_chnl:
  1131 + if (host->mode == USE_DMA_ACCESS)
  1132 + dma_release_channel(host->read_dma_chan);
  1133 +err_req_read_chnl:
981 1134 clk_disable(host->clk);
982 1135 err_clk_enable:
983 1136 clk_put(host->clk);
... ... @@ -995,6 +1148,11 @@
995 1148  
996 1149 if (host) {
997 1150 nand_release(&host->mtd);
  1151 +
  1152 + if (host->mode == USE_DMA_ACCESS) {
  1153 + dma_release_channel(host->write_dma_chan);
  1154 + dma_release_channel(host->read_dma_chan);
  1155 + }
998 1156 clk_disable(host->clk);
999 1157 clk_put(host->clk);
1000 1158 }
include/linux/mtd/fsmc.h
... ... @@ -172,6 +172,10 @@
172 172 enum access_mode mode;
173 173  
174 174 void (*select_bank)(uint32_t bank, uint32_t busw);
  175 +
  176 + /* priv structures for dma accesses */
  177 + void *read_dma_priv;
  178 + void *write_dma_priv;
175 179 };
176 180  
177 181 extern int __init fsmc_nor_init(struct platform_device *pdev,