Commit d9de451989a88a2003ca06e524aca4665c0c7f06

Authored by Hans-Christian Egtvedt
Committed by Dan Williams
1 parent 0f571515c3

dw_dmac: add cyclic API to DW DMA driver

This patch adds a cyclic DMA interface to the DW DMA driver. This is
very useful if you want to use the DMA controller in combination with a
sound device which uses cyclic buffers.

Using a DMA channel for cyclic DMA will disable the possibility to use
it as a normal DMA engine until the user calls the cyclic free function
on the DMA channel. Also a cyclic DMA list can not be prepared if the
channel is already active.

Signed-off-by: Hans-Christian Egtvedt <hans-christian.egtvedt@atmel.com>
Acked-by: Haavard Skinnemoen <haavard.skinnemoen@atmel.com>
Acked-by: Maciej Sosnowski <maciej.sosnowski@intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>

Showing 3 changed files with 356 additions and 2 deletions Side-by-side Diff

drivers/dma/dw_dmac.c
... ... @@ -363,6 +363,82 @@
363 363 dwc_descriptor_complete(dwc, bad_desc);
364 364 }
365 365  
  366 +/* --------------------- Cyclic DMA API extensions -------------------- */
  367 +
  368 +inline dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan)
  369 +{
  370 + struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
  371 + return channel_readl(dwc, SAR);
  372 +}
  373 +EXPORT_SYMBOL(dw_dma_get_src_addr);
  374 +
  375 +inline dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan)
  376 +{
  377 + struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
  378 + return channel_readl(dwc, DAR);
  379 +}
  380 +EXPORT_SYMBOL(dw_dma_get_dst_addr);
  381 +
  382 +/* called with dwc->lock held and all DMAC interrupts disabled */
  383 +static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
  384 + u32 status_block, u32 status_err, u32 status_xfer)
  385 +{
  386 + if (status_block & dwc->mask) {
  387 + void (*callback)(void *param);
  388 + void *callback_param;
  389 +
  390 + dev_vdbg(chan2dev(&dwc->chan), "new cyclic period llp 0x%08x\n",
  391 + channel_readl(dwc, LLP));
  392 + dma_writel(dw, CLEAR.BLOCK, dwc->mask);
  393 +
  394 + callback = dwc->cdesc->period_callback;
  395 + callback_param = dwc->cdesc->period_callback_param;
  396 + if (callback) {
  397 + spin_unlock(&dwc->lock);
  398 + callback(callback_param);
  399 + spin_lock(&dwc->lock);
  400 + }
  401 + }
  402 +
  403 + /*
  404 + * Error and transfer complete are highly unlikely, and will most
  405 + * likely be due to a configuration error by the user.
  406 + */
  407 + if (unlikely(status_err & dwc->mask) ||
  408 + unlikely(status_xfer & dwc->mask)) {
  409 + int i;
  410 +
  411 + dev_err(chan2dev(&dwc->chan), "cyclic DMA unexpected %s "
  412 + "interrupt, stopping DMA transfer\n",
  413 + status_xfer ? "xfer" : "error");
  414 + dev_err(chan2dev(&dwc->chan),
  415 + " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
  416 + channel_readl(dwc, SAR),
  417 + channel_readl(dwc, DAR),
  418 + channel_readl(dwc, LLP),
  419 + channel_readl(dwc, CTL_HI),
  420 + channel_readl(dwc, CTL_LO));
  421 +
  422 + channel_clear_bit(dw, CH_EN, dwc->mask);
  423 + while (dma_readl(dw, CH_EN) & dwc->mask)
  424 + cpu_relax();
  425 +
  426 + /* make sure DMA does not restart by loading a new list */
  427 + channel_writel(dwc, LLP, 0);
  428 + channel_writel(dwc, CTL_LO, 0);
  429 + channel_writel(dwc, CTL_HI, 0);
  430 +
  431 + dma_writel(dw, CLEAR.BLOCK, dwc->mask);
  432 + dma_writel(dw, CLEAR.ERROR, dwc->mask);
  433 + dma_writel(dw, CLEAR.XFER, dwc->mask);
  434 +
  435 + for (i = 0; i < dwc->cdesc->periods; i++)
  436 + dwc_dump_lli(dwc, &dwc->cdesc->desc[i]->lli);
  437 + }
  438 +}
  439 +
  440 +/* ------------------------------------------------------------------------- */
  441 +
366 442 static void dw_dma_tasklet(unsigned long data)
367 443 {
368 444 struct dw_dma *dw = (struct dw_dma *)data;
... ... @@ -382,7 +458,10 @@
382 458 for (i = 0; i < dw->dma.chancnt; i++) {
383 459 dwc = &dw->chan[i];
384 460 spin_lock(&dwc->lock);
385   - if (status_err & (1 << i))
  461 + if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags))
  462 + dwc_handle_cyclic(dw, dwc, status_block, status_err,
  463 + status_xfer);
  464 + else if (status_err & (1 << i))
386 465 dwc_handle_error(dw, dwc);
387 466 else if ((status_block | status_xfer) & (1 << i))
388 467 dwc_scan_descriptors(dw, dwc);
... ... @@ -882,6 +961,257 @@
882 961  
883 962 dev_vdbg(chan2dev(chan), "free_chan_resources done\n");
884 963 }
  964 +
  965 +/* --------------------- Cyclic DMA API extensions -------------------- */
  966 +
  967 +/**
  968 + * dw_dma_cyclic_start - start the cyclic DMA transfer
  969 + * @chan: the DMA channel to start
  970 + *
  971 + * Must be called with soft interrupts disabled. Returns zero on success or
  972 + * -errno on failure.
  973 + */
  974 +int dw_dma_cyclic_start(struct dma_chan *chan)
  975 +{
  976 + struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
  977 + struct dw_dma *dw = to_dw_dma(dwc->chan.device);
  978 +
  979 + if (!test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) {
  980 + dev_err(chan2dev(&dwc->chan), "missing prep for cyclic DMA\n");
  981 + return -ENODEV;
  982 + }
  983 +
  984 + spin_lock(&dwc->lock);
  985 +
  986 + /* assert channel is idle */
  987 + if (dma_readl(dw, CH_EN) & dwc->mask) {
  988 + dev_err(chan2dev(&dwc->chan),
  989 + "BUG: Attempted to start non-idle channel\n");
  990 + dev_err(chan2dev(&dwc->chan),
  991 + " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
  992 + channel_readl(dwc, SAR),
  993 + channel_readl(dwc, DAR),
  994 + channel_readl(dwc, LLP),
  995 + channel_readl(dwc, CTL_HI),
  996 + channel_readl(dwc, CTL_LO));
  997 + spin_unlock(&dwc->lock);
  998 + return -EBUSY;
  999 + }
  1000 +
  1001 + dma_writel(dw, CLEAR.BLOCK, dwc->mask);
  1002 + dma_writel(dw, CLEAR.ERROR, dwc->mask);
  1003 + dma_writel(dw, CLEAR.XFER, dwc->mask);
  1004 +
  1005 + /* setup DMAC channel registers */
  1006 + channel_writel(dwc, LLP, dwc->cdesc->desc[0]->txd.phys);
  1007 + channel_writel(dwc, CTL_LO, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
  1008 + channel_writel(dwc, CTL_HI, 0);
  1009 +
  1010 + channel_set_bit(dw, CH_EN, dwc->mask);
  1011 +
  1012 + spin_unlock(&dwc->lock);
  1013 +
  1014 + return 0;
  1015 +}
  1016 +EXPORT_SYMBOL(dw_dma_cyclic_start);
  1017 +
  1018 +/**
  1019 + * dw_dma_cyclic_stop - stop the cyclic DMA transfer
  1020 + * @chan: the DMA channel to stop
  1021 + *
  1022 + * Must be called with soft interrupts disabled.
  1023 + */
  1024 +void dw_dma_cyclic_stop(struct dma_chan *chan)
  1025 +{
  1026 + struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
  1027 + struct dw_dma *dw = to_dw_dma(dwc->chan.device);
  1028 +
  1029 + spin_lock(&dwc->lock);
  1030 +
  1031 + channel_clear_bit(dw, CH_EN, dwc->mask);
  1032 + while (dma_readl(dw, CH_EN) & dwc->mask)
  1033 + cpu_relax();
  1034 +
  1035 + spin_unlock(&dwc->lock);
  1036 +}
  1037 +EXPORT_SYMBOL(dw_dma_cyclic_stop);
  1038 +
  1039 +/**
  1040 + * dw_dma_cyclic_prep - prepare the cyclic DMA transfer
  1041 + * @chan: the DMA channel to prepare
  1042 + * @buf_addr: physical DMA address where the buffer starts
  1043 + * @buf_len: total number of bytes for the entire buffer
  1044 + * @period_len: number of bytes for each period
  1045 + * @direction: transfer direction, to or from device
  1046 + *
  1047 + * Must be called before trying to start the transfer. Returns a valid struct
  1048 + * dw_cyclic_desc if successful or an ERR_PTR(-errno) if not successful.
  1049 + */
  1050 +struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
  1051 + dma_addr_t buf_addr, size_t buf_len, size_t period_len,
  1052 + enum dma_data_direction direction)
  1053 +{
  1054 + struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
  1055 + struct dw_cyclic_desc *cdesc;
  1056 + struct dw_cyclic_desc *retval = NULL;
  1057 + struct dw_desc *desc;
  1058 + struct dw_desc *last = NULL;
  1059 + struct dw_dma_slave *dws = chan->private;
  1060 + unsigned long was_cyclic;
  1061 + unsigned int reg_width;
  1062 + unsigned int periods;
  1063 + unsigned int i;
  1064 +
  1065 + spin_lock_bh(&dwc->lock);
  1066 + if (!list_empty(&dwc->queue) || !list_empty(&dwc->active_list)) {
  1067 + spin_unlock_bh(&dwc->lock);
  1068 + dev_dbg(chan2dev(&dwc->chan),
  1069 + "queue and/or active list are not empty\n");
  1070 + return ERR_PTR(-EBUSY);
  1071 + }
  1072 +
  1073 + was_cyclic = test_and_set_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
  1074 + spin_unlock_bh(&dwc->lock);
  1075 + if (was_cyclic) {
  1076 + dev_dbg(chan2dev(&dwc->chan),
  1077 + "channel already prepared for cyclic DMA\n");
  1078 + return ERR_PTR(-EBUSY);
  1079 + }
  1080 +
  1081 + retval = ERR_PTR(-EINVAL);
  1082 + reg_width = dws->reg_width;
  1083 + periods = buf_len / period_len;
  1084 +
  1085 + /* Check for too big/unaligned periods and unaligned DMA buffer. */
  1086 + if (period_len > (DWC_MAX_COUNT << reg_width))
  1087 + goto out_err;
  1088 + if (unlikely(period_len & ((1 << reg_width) - 1)))
  1089 + goto out_err;
  1090 + if (unlikely(buf_addr & ((1 << reg_width) - 1)))
  1091 + goto out_err;
  1092 + if (unlikely(!(direction & (DMA_TO_DEVICE | DMA_FROM_DEVICE))))
  1093 + goto out_err;
  1094 +
  1095 + retval = ERR_PTR(-ENOMEM);
  1096 +
  1097 + if (periods > NR_DESCS_PER_CHANNEL)
  1098 + goto out_err;
  1099 +
  1100 + cdesc = kzalloc(sizeof(struct dw_cyclic_desc), GFP_KERNEL);
  1101 + if (!cdesc)
  1102 + goto out_err;
  1103 +
  1104 + cdesc->desc = kzalloc(sizeof(struct dw_desc *) * periods, GFP_KERNEL);
  1105 + if (!cdesc->desc)
  1106 + goto out_err_alloc;
  1107 +
  1108 + for (i = 0; i < periods; i++) {
  1109 + desc = dwc_desc_get(dwc);
  1110 + if (!desc)
  1111 + goto out_err_desc_get;
  1112 +
  1113 + switch (direction) {
  1114 + case DMA_TO_DEVICE:
  1115 + desc->lli.dar = dws->tx_reg;
  1116 + desc->lli.sar = buf_addr + (period_len * i);
  1117 + desc->lli.ctllo = (DWC_DEFAULT_CTLLO
  1118 + | DWC_CTLL_DST_WIDTH(reg_width)
  1119 + | DWC_CTLL_SRC_WIDTH(reg_width)
  1120 + | DWC_CTLL_DST_FIX
  1121 + | DWC_CTLL_SRC_INC
  1122 + | DWC_CTLL_FC_M2P
  1123 + | DWC_CTLL_INT_EN);
  1124 + break;
  1125 + case DMA_FROM_DEVICE:
  1126 + desc->lli.dar = buf_addr + (period_len * i);
  1127 + desc->lli.sar = dws->rx_reg;
  1128 + desc->lli.ctllo = (DWC_DEFAULT_CTLLO
  1129 + | DWC_CTLL_SRC_WIDTH(reg_width)
  1130 + | DWC_CTLL_DST_WIDTH(reg_width)
  1131 + | DWC_CTLL_DST_INC
  1132 + | DWC_CTLL_SRC_FIX
  1133 + | DWC_CTLL_FC_P2M
  1134 + | DWC_CTLL_INT_EN);
  1135 + break;
  1136 + default:
  1137 + break;
  1138 + }
  1139 +
  1140 + desc->lli.ctlhi = (period_len >> reg_width);
  1141 + cdesc->desc[i] = desc;
  1142 +
  1143 + if (last) {
  1144 + last->lli.llp = desc->txd.phys;
  1145 + dma_sync_single_for_device(chan2parent(chan),
  1146 + last->txd.phys, sizeof(last->lli),
  1147 + DMA_TO_DEVICE);
  1148 + }
  1149 +
  1150 + last = desc;
  1151 + }
  1152 +
  1153 + /* lets make a cyclic list */
  1154 + last->lli.llp = cdesc->desc[0]->txd.phys;
  1155 + dma_sync_single_for_device(chan2parent(chan), last->txd.phys,
  1156 + sizeof(last->lli), DMA_TO_DEVICE);
  1157 +
  1158 + dev_dbg(chan2dev(&dwc->chan), "cyclic prepared buf 0x%08x len %zu "
  1159 + "period %zu periods %d\n", buf_addr, buf_len,
  1160 + period_len, periods);
  1161 +
  1162 + cdesc->periods = periods;
  1163 + dwc->cdesc = cdesc;
  1164 +
  1165 + return cdesc;
  1166 +
  1167 +out_err_desc_get:
  1168 + while (i--)
  1169 + dwc_desc_put(dwc, cdesc->desc[i]);
  1170 +out_err_alloc:
  1171 + kfree(cdesc);
  1172 +out_err:
  1173 + clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
  1174 + return (struct dw_cyclic_desc *)retval;
  1175 +}
  1176 +EXPORT_SYMBOL(dw_dma_cyclic_prep);
  1177 +
  1178 +/**
  1179 + * dw_dma_cyclic_free - free a prepared cyclic DMA transfer
  1180 + * @chan: the DMA channel to free
  1181 + */
  1182 +void dw_dma_cyclic_free(struct dma_chan *chan)
  1183 +{
  1184 + struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
  1185 + struct dw_dma *dw = to_dw_dma(dwc->chan.device);
  1186 + struct dw_cyclic_desc *cdesc = dwc->cdesc;
  1187 + int i;
  1188 +
  1189 + dev_dbg(chan2dev(&dwc->chan), "cyclic free\n");
  1190 +
  1191 + if (!cdesc)
  1192 + return;
  1193 +
  1194 + spin_lock_bh(&dwc->lock);
  1195 +
  1196 + channel_clear_bit(dw, CH_EN, dwc->mask);
  1197 + while (dma_readl(dw, CH_EN) & dwc->mask)
  1198 + cpu_relax();
  1199 +
  1200 + dma_writel(dw, CLEAR.BLOCK, dwc->mask);
  1201 + dma_writel(dw, CLEAR.ERROR, dwc->mask);
  1202 + dma_writel(dw, CLEAR.XFER, dwc->mask);
  1203 +
  1204 + spin_unlock_bh(&dwc->lock);
  1205 +
  1206 + for (i = 0; i < cdesc->periods; i++)
  1207 + dwc_desc_put(dwc, cdesc->desc[i]);
  1208 +
  1209 + kfree(cdesc->desc);
  1210 + kfree(cdesc);
  1211 +
  1212 + clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
  1213 +}
  1214 +EXPORT_SYMBOL(dw_dma_cyclic_free);
885 1215  
886 1216 /*----------------------------------------------------------------------*/
887 1217  
drivers/dma/dw_dmac_regs.h
... ... @@ -126,6 +126,10 @@
126 126  
127 127 #define DW_REGLEN 0x400
128 128  
  129 +enum dw_dmac_flags {
  130 + DW_DMA_IS_CYCLIC = 0,
  131 +};
  132 +
129 133 struct dw_dma_chan {
130 134 struct dma_chan chan;
131 135 void __iomem *ch_regs;
132 136  
... ... @@ -134,10 +138,12 @@
134 138 spinlock_t lock;
135 139  
136 140 /* these other elements are all protected by lock */
  141 + unsigned long flags;
137 142 dma_cookie_t completed;
138 143 struct list_head active_list;
139 144 struct list_head queue;
140 145 struct list_head free_list;
  146 + struct dw_cyclic_desc *cdesc;
141 147  
142 148 unsigned int descs_allocated;
143 149 };
... ... @@ -157,7 +163,6 @@
157 163 {
158 164 return container_of(chan, struct dw_dma_chan, chan);
159 165 }
160   -
161 166  
162 167 struct dw_dma {
163 168 struct dma_device dma;
include/linux/dw_dmac.h
... ... @@ -74,5 +74,24 @@
74 74 #define DWC_CFGL_HS_DST_POL (1 << 18) /* dst handshake active low */
75 75 #define DWC_CFGL_HS_SRC_POL (1 << 19) /* src handshake active low */
76 76  
  77 +/* DMA API extensions */
  78 +struct dw_cyclic_desc {
  79 + struct dw_desc **desc;
  80 + unsigned long periods;
  81 + void (*period_callback)(void *param);
  82 + void *period_callback_param;
  83 +};
  84 +
  85 +struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
  86 + dma_addr_t buf_addr, size_t buf_len, size_t period_len,
  87 + enum dma_data_direction direction);
  88 +void dw_dma_cyclic_free(struct dma_chan *chan);
  89 +int dw_dma_cyclic_start(struct dma_chan *chan);
  90 +void dw_dma_cyclic_stop(struct dma_chan *chan);
  91 +
  92 +dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan);
  93 +
  94 +dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan);
  95 +
77 96 #endif /* DW_DMAC_H */