Commit 4d4e58de32a192fea65ab84509d17d199bd291c8

Authored by Russell King - ARM Linux
Committed by Vinod Koul
1 parent 08714f60b0

dmaengine: move last completed cookie into generic dma_chan structure

Every DMA engine implementation declares a last completed dma cookie
in their private dma channel structures.  This is pointless, and
forces driver specific code.  Move this out into the common dma_chan
structure.

Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Tested-by: Linus Walleij <linus.walleij@linaro.org>
Reviewed-by: Linus Walleij <linus.walleij@linaro.org>
Acked-by: Jassi Brar <jassisinghbrar@gmail.com>
[imx-sdma.c & mxs-dma.c]
Tested-by: Shawn Guo <shawn.guo@linaro.org>
Signed-off-by: Vinod Koul <vinod.koul@linux.intel.com>

Showing 37 changed files with 83 additions and 119 deletions Side-by-side Diff

arch/arm/include/asm/hardware/iop_adma.h
... ... @@ -49,7 +49,6 @@
49 49 /**
50 50 * struct iop_adma_chan - internal representation of an ADMA device
51 51 * @pending: allows batching of hardware operations
52   - * @completed_cookie: identifier for the most recently completed operation
53 52 * @lock: serializes enqueue/dequeue operations to the slot pool
54 53 * @mmr_base: memory mapped register base
55 54 * @chain: device chain view of the descriptors
... ... @@ -62,7 +61,6 @@
62 61 */
63 62 struct iop_adma_chan {
64 63 int pending;
65   - dma_cookie_t completed_cookie;
66 64 spinlock_t lock; /* protects the descriptor slot pool */
67 65 void __iomem *mmr_base;
68 66 struct list_head chain;
drivers/dma/amba-pl08x.c
... ... @@ -971,7 +971,7 @@
971 971 u32 bytesleft = 0;
972 972  
973 973 last_used = plchan->chan.cookie;
974   - last_complete = plchan->lc;
  974 + last_complete = plchan->chan.completed_cookie;
975 975  
976 976 ret = dma_async_is_complete(cookie, last_complete, last_used);
977 977 if (ret == DMA_SUCCESS) {
... ... @@ -983,7 +983,7 @@
983 983 * This cookie not complete yet
984 984 */
985 985 last_used = plchan->chan.cookie;
986   - last_complete = plchan->lc;
  986 + last_complete = plchan->chan.completed_cookie;
987 987  
988 988 /* Get number of bytes left in the active transactions and queue */
989 989 bytesleft = pl08x_getbytes_chan(plchan);
... ... @@ -1543,7 +1543,7 @@
1543 1543  
1544 1544 if (txd) {
1545 1545 /* Update last completed */
1546   - plchan->lc = txd->tx.cookie;
  1546 + plchan->chan.completed_cookie = txd->tx.cookie;
1547 1547 }
1548 1548  
1549 1549 /* If a new descriptor is queued, set it up plchan->at is NULL here */
... ... @@ -1725,7 +1725,7 @@
1725 1725  
1726 1726 chan->chan.device = dmadev;
1727 1727 chan->chan.cookie = 0;
1728   - chan->lc = 0;
  1728 + chan->chan.completed_cookie = 0;
1729 1729  
1730 1730 spin_lock_init(&chan->lock);
1731 1731 INIT_LIST_HEAD(&chan->pend_list);
drivers/dma/at_hdmac.c
... ... @@ -269,7 +269,7 @@
269 269 dev_vdbg(chan2dev(&atchan->chan_common),
270 270 "descriptor %u complete\n", txd->cookie);
271 271  
272   - atchan->completed_cookie = txd->cookie;
  272 + atchan->chan_common.completed_cookie = txd->cookie;
273 273  
274 274 /* move children to free_list */
275 275 list_splice_init(&desc->tx_list, &atchan->free_list);
276 276  
... ... @@ -1016,14 +1016,14 @@
1016 1016  
1017 1017 spin_lock_irqsave(&atchan->lock, flags);
1018 1018  
1019   - last_complete = atchan->completed_cookie;
  1019 + last_complete = chan->completed_cookie;
1020 1020 last_used = chan->cookie;
1021 1021  
1022 1022 ret = dma_async_is_complete(cookie, last_complete, last_used);
1023 1023 if (ret != DMA_SUCCESS) {
1024 1024 atc_cleanup_descriptors(atchan);
1025 1025  
1026   - last_complete = atchan->completed_cookie;
  1026 + last_complete = chan->completed_cookie;
1027 1027 last_used = chan->cookie;
1028 1028  
1029 1029 ret = dma_async_is_complete(cookie, last_complete, last_used);
... ... @@ -1129,7 +1129,7 @@
1129 1129 spin_lock_irqsave(&atchan->lock, flags);
1130 1130 atchan->descs_allocated = i;
1131 1131 list_splice(&tmp_list, &atchan->free_list);
1132   - atchan->completed_cookie = chan->cookie = 1;
  1132 + chan->completed_cookie = chan->cookie = 1;
1133 1133 spin_unlock_irqrestore(&atchan->lock, flags);
1134 1134  
1135 1135 /* channel parameters */
... ... @@ -1329,7 +1329,7 @@
1329 1329 struct at_dma_chan *atchan = &atdma->chan[i];
1330 1330  
1331 1331 atchan->chan_common.device = &atdma->dma_common;
1332   - atchan->chan_common.cookie = atchan->completed_cookie = 1;
  1332 + atchan->chan_common.cookie = atchan->chan_common.completed_cookie = 1;
1333 1333 list_add_tail(&atchan->chan_common.device_node,
1334 1334 &atdma->dma_common.channels);
1335 1335  
drivers/dma/at_hdmac_regs.h
... ... @@ -208,7 +208,6 @@
208 208 * @save_dscr: for cyclic operations, preserve next descriptor address in
209 209 * the cyclic list on suspend/resume cycle
210 210 * @lock: serializes enqueue/dequeue operations to descriptors lists
211   - * @completed_cookie: identifier for the most recently completed operation
212 211 * @active_list: list of descriptors dmaengine is being running on
213 212 * @queue: list of descriptors ready to be submitted to engine
214 213 * @free_list: list of descriptors usable by the channel
... ... @@ -227,7 +226,6 @@
227 226 spinlock_t lock;
228 227  
229 228 /* these other elements are all protected by lock */
230   - dma_cookie_t completed_cookie;
231 229 struct list_head active_list;
232 230 struct list_head queue;
233 231 struct list_head free_list;
drivers/dma/coh901318.c
... ... @@ -59,7 +59,6 @@
59 59 struct coh901318_chan {
60 60 spinlock_t lock;
61 61 int allocated;
62   - int completed;
63 62 int id;
64 63 int stopped;
65 64  
... ... @@ -705,7 +704,7 @@
705 704 callback_param = cohd_fin->desc.callback_param;
706 705  
707 706 /* sign this job as completed on the channel */
708   - cohc->completed = cohd_fin->desc.cookie;
  707 + cohc->chan.completed_cookie = cohd_fin->desc.cookie;
709 708  
710 709 /* release the lli allocation and remove the descriptor */
711 710 coh901318_lli_free(&cohc->base->pool, &cohd_fin->lli);
... ... @@ -929,7 +928,7 @@
929 928 coh901318_config(cohc, NULL);
930 929  
931 930 cohc->allocated = 1;
932   - cohc->completed = chan->cookie = 1;
  931 + chan->completed_cookie = chan->cookie = 1;
933 932  
934 933 spin_unlock_irqrestore(&cohc->lock, flags);
935 934  
... ... @@ -1169,7 +1168,7 @@
1169 1168 dma_cookie_t last_complete;
1170 1169 int ret;
1171 1170  
1172   - last_complete = cohc->completed;
  1171 + last_complete = chan->completed_cookie;
1173 1172 last_used = chan->cookie;
1174 1173  
1175 1174 ret = dma_async_is_complete(cookie, last_complete, last_used);
drivers/dma/dw_dmac.c
... ... @@ -249,7 +249,7 @@
249 249 dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie);
250 250  
251 251 spin_lock_irqsave(&dwc->lock, flags);
252   - dwc->completed = txd->cookie;
  252 + dwc->chan.completed_cookie = txd->cookie;
253 253 if (callback_required) {
254 254 callback = txd->callback;
255 255 param = txd->callback_param;
256 256  
... ... @@ -997,14 +997,14 @@
997 997 dma_cookie_t last_complete;
998 998 int ret;
999 999  
1000   - last_complete = dwc->completed;
  1000 + last_complete = chan->completed_cookie;
1001 1001 last_used = chan->cookie;
1002 1002  
1003 1003 ret = dma_async_is_complete(cookie, last_complete, last_used);
1004 1004 if (ret != DMA_SUCCESS) {
1005 1005 dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
1006 1006  
1007   - last_complete = dwc->completed;
  1007 + last_complete = chan->completed_cookie;
1008 1008 last_used = chan->cookie;
1009 1009  
1010 1010 ret = dma_async_is_complete(cookie, last_complete, last_used);
... ... @@ -1046,7 +1046,7 @@
1046 1046 return -EIO;
1047 1047 }
1048 1048  
1049   - dwc->completed = chan->cookie = 1;
  1049 + chan->completed_cookie = chan->cookie = 1;
1050 1050  
1051 1051 /*
1052 1052 * NOTE: some controllers may have additional features that we
... ... @@ -1474,7 +1474,7 @@
1474 1474 struct dw_dma_chan *dwc = &dw->chan[i];
1475 1475  
1476 1476 dwc->chan.device = &dw->dma;
1477   - dwc->chan.cookie = dwc->completed = 1;
  1477 + dwc->chan.cookie = dwc->chan.completed_cookie = 1;
1478 1478 if (pdata->chan_allocation_order == CHAN_ALLOCATION_ASCENDING)
1479 1479 list_add_tail(&dwc->chan.device_node,
1480 1480 &dw->dma.channels);
drivers/dma/dw_dmac_regs.h
... ... @@ -158,7 +158,6 @@
158 158  
159 159 /* these other elements are all protected by lock */
160 160 unsigned long flags;
161   - dma_cookie_t completed;
162 161 struct list_head active_list;
163 162 struct list_head queue;
164 163 struct list_head free_list;
drivers/dma/ep93xx_dma.c
... ... @@ -122,7 +122,6 @@
122 122 * @lock: lock protecting the fields following
123 123 * @flags: flags for the channel
124 124 * @buffer: which buffer to use next (0/1)
125   - * @last_completed: last completed cookie value
126 125 * @active: flattened chain of descriptors currently being processed
127 126 * @queue: pending descriptors which are handled next
128 127 * @free_list: list of free descriptors which can be used
... ... @@ -157,7 +156,6 @@
157 156 #define EP93XX_DMA_IS_CYCLIC 0
158 157  
159 158 int buffer;
160   - dma_cookie_t last_completed;
161 159 struct list_head active;
162 160 struct list_head queue;
163 161 struct list_head free_list;
... ... @@ -703,7 +701,7 @@
703 701 desc = ep93xx_dma_get_active(edmac);
704 702 if (desc) {
705 703 if (desc->complete) {
706   - edmac->last_completed = desc->txd.cookie;
  704 + edmac->chan.completed_cookie = desc->txd.cookie;
707 705 list_splice_init(&edmac->active, &list);
708 706 }
709 707 callback = desc->txd.callback;
... ... @@ -861,7 +859,7 @@
861 859 goto fail_clk_disable;
862 860  
863 861 spin_lock_irq(&edmac->lock);
864   - edmac->last_completed = 1;
  862 + edmac->chan.completed_cookie = 1;
865 863 edmac->chan.cookie = 1;
866 864 ret = edmac->edma->hw_setup(edmac);
867 865 spin_unlock_irq(&edmac->lock);
... ... @@ -1254,7 +1252,7 @@
1254 1252  
1255 1253 spin_lock_irqsave(&edmac->lock, flags);
1256 1254 last_used = chan->cookie;
1257   - last_completed = edmac->last_completed;
  1255 + last_completed = chan->completed_cookie;
1258 1256 spin_unlock_irqrestore(&edmac->lock, flags);
1259 1257  
1260 1258 ret = dma_async_is_complete(cookie, last_completed, last_used);
drivers/dma/fsldma.c
... ... @@ -990,7 +990,7 @@
990 990  
991 991 spin_lock_irqsave(&chan->desc_lock, flags);
992 992  
993   - last_complete = chan->completed_cookie;
  993 + last_complete = dchan->completed_cookie;
994 994 last_used = dchan->cookie;
995 995  
996 996 spin_unlock_irqrestore(&chan->desc_lock, flags);
... ... @@ -1088,7 +1088,7 @@
1088 1088 desc = to_fsl_desc(chan->ld_running.prev);
1089 1089 cookie = desc->async_tx.cookie;
1090 1090  
1091   - chan->completed_cookie = cookie;
  1091 + chan->common.completed_cookie = cookie;
1092 1092 chan_dbg(chan, "completed_cookie=%d\n", cookie);
1093 1093 }
1094 1094  
drivers/dma/fsldma.h
... ... @@ -137,7 +137,6 @@
137 137 struct fsldma_chan {
138 138 char name[8]; /* Channel name */
139 139 struct fsldma_chan_regs __iomem *regs;
140   - dma_cookie_t completed_cookie; /* The maximum cookie completed */
141 140 spinlock_t desc_lock; /* Descriptor operation lock */
142 141 struct list_head ld_pending; /* Link descriptors queue */
143 142 struct list_head ld_running; /* Link descriptors queue */
drivers/dma/imx-dma.c
... ... @@ -41,7 +41,6 @@
41 41 struct dma_chan chan;
42 42 spinlock_t lock;
43 43 struct dma_async_tx_descriptor desc;
44   - dma_cookie_t last_completed;
45 44 enum dma_status status;
46 45 int dma_request;
47 46 struct scatterlist *sg_list;
... ... @@ -65,7 +64,7 @@
65 64 {
66 65 if (imxdmac->desc.callback)
67 66 imxdmac->desc.callback(imxdmac->desc.callback_param);
68   - imxdmac->last_completed = imxdmac->desc.cookie;
  67 + imxdmac->chan.completed_cookie = imxdmac->desc.cookie;
69 68 }
70 69  
71 70 static void imxdma_irq_handler(int channel, void *data)
... ... @@ -158,8 +157,8 @@
158 157  
159 158 last_used = chan->cookie;
160 159  
161   - ret = dma_async_is_complete(cookie, imxdmac->last_completed, last_used);
162   - dma_set_tx_state(txstate, imxdmac->last_completed, last_used, 0);
  160 + ret = dma_async_is_complete(cookie, chan->completed_cookie, last_used);
  161 + dma_set_tx_state(txstate, chan->completed_cookie, last_used, 0);
163 162  
164 163 return ret;
165 164 }
drivers/dma/imx-sdma.c
... ... @@ -267,7 +267,6 @@
267 267 struct dma_chan chan;
268 268 spinlock_t lock;
269 269 struct dma_async_tx_descriptor desc;
270   - dma_cookie_t last_completed;
271 270 enum dma_status status;
272 271 unsigned int chn_count;
273 272 unsigned int chn_real_count;
... ... @@ -529,7 +528,7 @@
529 528 else
530 529 sdmac->status = DMA_SUCCESS;
531 530  
532   - sdmac->last_completed = sdmac->desc.cookie;
  531 + sdmac->chan.completed_cookie = sdmac->desc.cookie;
533 532 if (sdmac->desc.callback)
534 533 sdmac->desc.callback(sdmac->desc.callback_param);
535 534 }
... ... @@ -1127,7 +1126,7 @@
1127 1126  
1128 1127 last_used = chan->cookie;
1129 1128  
1130   - dma_set_tx_state(txstate, sdmac->last_completed, last_used,
  1129 + dma_set_tx_state(txstate, chan->completed_cookie, last_used,
1131 1130 sdmac->chn_count - sdmac->chn_real_count);
1132 1131  
1133 1132 return sdmac->status;
drivers/dma/intel_mid_dma.c
... ... @@ -288,7 +288,7 @@
288 288 struct intel_mid_dma_lli *llitem;
289 289 void *param_txd = NULL;
290 290  
291   - midc->completed = txd->cookie;
  291 + midc->chan.completed_cookie = txd->cookie;
292 292 callback_txd = txd->callback;
293 293 param_txd = txd->callback_param;
294 294  
295 295  
... ... @@ -482,12 +482,11 @@
482 482 dma_cookie_t cookie,
483 483 struct dma_tx_state *txstate)
484 484 {
485   - struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan);
486 485 dma_cookie_t last_used;
487 486 dma_cookie_t last_complete;
488 487 int ret;
489 488  
490   - last_complete = midc->completed;
  489 + last_complete = chan->completed_cookie;
491 490 last_used = chan->cookie;
492 491  
493 492 ret = dma_async_is_complete(cookie, last_complete, last_used);
... ... @@ -496,7 +495,7 @@
496 495 midc_scan_descriptors(to_middma_device(chan->device), midc);
497 496 spin_unlock_bh(&midc->lock);
498 497  
499   - last_complete = midc->completed;
  498 + last_complete = chan->completed_cookie;
500 499 last_used = chan->cookie;
501 500  
502 501 ret = dma_async_is_complete(cookie, last_complete, last_used);
... ... @@ -886,7 +885,7 @@
886 885 pm_runtime_put(&mid->pdev->dev);
887 886 return -EIO;
888 887 }
889   - midc->completed = chan->cookie = 1;
  888 + chan->completed_cookie = chan->cookie = 1;
890 889  
891 890 spin_lock_bh(&midc->lock);
892 891 while (midc->descs_allocated < DESCS_PER_CHANNEL) {
drivers/dma/intel_mid_dma_regs.h
... ... @@ -165,7 +165,6 @@
165 165 * @dma_base: MMIO register space DMA engine base pointer
166 166 * @ch_id: DMA channel id
167 167 * @lock: channel spinlock
168   - * @completed: DMA cookie
169 168 * @active_list: current active descriptors
170 169 * @queue: current queued up descriptors
171 170 * @free_list: current free descriptors
... ... @@ -183,7 +182,6 @@
183 182 void __iomem *dma_base;
184 183 int ch_id;
185 184 spinlock_t lock;
186   - dma_cookie_t completed;
187 185 struct list_head active_list;
188 186 struct list_head queue;
189 187 struct list_head free_list;
drivers/dma/ioat/dma.c
... ... @@ -603,7 +603,7 @@
603 603 */
604 604 dump_desc_dbg(ioat, desc);
605 605 if (tx->cookie) {
606   - chan->completed_cookie = tx->cookie;
  606 + chan->common.completed_cookie = tx->cookie;
607 607 tx->cookie = 0;
608 608 ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw);
609 609 ioat->active -= desc->hw->tx_cnt;
drivers/dma/ioat/dma.h
... ... @@ -90,7 +90,6 @@
90 90 void __iomem *reg_base;
91 91 unsigned long last_completion;
92 92 spinlock_t cleanup_lock;
93   - dma_cookie_t completed_cookie;
94 93 unsigned long state;
95 94 #define IOAT_COMPLETION_PENDING 0
96 95 #define IOAT_COMPLETION_ACK 1
97 96  
... ... @@ -153,12 +152,11 @@
153 152 ioat_tx_status(struct dma_chan *c, dma_cookie_t cookie,
154 153 struct dma_tx_state *txstate)
155 154 {
156   - struct ioat_chan_common *chan = to_chan_common(c);
157 155 dma_cookie_t last_used;
158 156 dma_cookie_t last_complete;
159 157  
160 158 last_used = c->cookie;
161   - last_complete = chan->completed_cookie;
  159 + last_complete = c->completed_cookie;
162 160  
163 161 dma_set_tx_state(txstate, last_complete, last_used, 0);
164 162  
drivers/dma/ioat/dma_v2.c
... ... @@ -147,7 +147,7 @@
147 147 dump_desc_dbg(ioat, desc);
148 148 if (tx->cookie) {
149 149 ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw);
150   - chan->completed_cookie = tx->cookie;
  150 + chan->common.completed_cookie = tx->cookie;
151 151 tx->cookie = 0;
152 152 if (tx->callback) {
153 153 tx->callback(tx->callback_param);
drivers/dma/ioat/dma_v3.c
... ... @@ -277,7 +277,7 @@
277 277 dump_desc_dbg(ioat, desc);
278 278 tx = &desc->txd;
279 279 if (tx->cookie) {
280   - chan->completed_cookie = tx->cookie;
  280 + chan->common.completed_cookie = tx->cookie;
281 281 ioat3_dma_unmap(ioat, desc, idx + i);
282 282 tx->cookie = 0;
283 283 if (tx->callback) {
drivers/dma/iop-adma.c
... ... @@ -317,7 +317,7 @@
317 317 }
318 318  
319 319 if (cookie > 0) {
320   - iop_chan->completed_cookie = cookie;
  320 + iop_chan->common.completed_cookie = cookie;
321 321 pr_debug("\tcompleted cookie %d\n", cookie);
322 322 }
323 323 }
... ... @@ -909,7 +909,7 @@
909 909 enum dma_status ret;
910 910  
911 911 last_used = chan->cookie;
912   - last_complete = iop_chan->completed_cookie;
  912 + last_complete = chan->completed_cookie;
913 913 dma_set_tx_state(txstate, last_complete, last_used, 0);
914 914 ret = dma_async_is_complete(cookie, last_complete, last_used);
915 915 if (ret == DMA_SUCCESS)
... ... @@ -918,7 +918,7 @@
918 918 iop_adma_slot_cleanup(iop_chan);
919 919  
920 920 last_used = chan->cookie;
921   - last_complete = iop_chan->completed_cookie;
  921 + last_complete = chan->completed_cookie;
922 922 dma_set_tx_state(txstate, last_complete, last_used, 0);
923 923  
924 924 return dma_async_is_complete(cookie, last_complete, last_used);
... ... @@ -1650,7 +1650,7 @@
1650 1650 /* initialize the completed cookie to be less than
1651 1651 * the most recently used cookie
1652 1652 */
1653   - iop_chan->completed_cookie = cookie - 1;
  1653 + iop_chan->common.completed_cookie = cookie - 1;
1654 1654 iop_chan->common.cookie = sw_desc->async_tx.cookie = cookie;
1655 1655  
1656 1656 /* channel should not be busy */
... ... @@ -1707,7 +1707,7 @@
1707 1707 /* initialize the completed cookie to be less than
1708 1708 * the most recently used cookie
1709 1709 */
1710   - iop_chan->completed_cookie = cookie - 1;
  1710 + iop_chan->common.completed_cookie = cookie - 1;
1711 1711 iop_chan->common.cookie = sw_desc->async_tx.cookie = cookie;
1712 1712  
1713 1713 /* channel should not be busy */
drivers/dma/ipu/ipu_idmac.c
... ... @@ -1295,7 +1295,7 @@
1295 1295 /* Flip the active buffer - even if update above failed */
1296 1296 ichan->active_buffer = !ichan->active_buffer;
1297 1297 if (done)
1298   - ichan->completed = desc->txd.cookie;
  1298 + ichan->dma_chan.completed_cookie = desc->txd.cookie;
1299 1299  
1300 1300 callback = desc->txd.callback;
1301 1301 callback_param = desc->txd.callback_param;
... ... @@ -1511,7 +1511,7 @@
1511 1511 WARN_ON(ichan->status != IPU_CHANNEL_FREE);
1512 1512  
1513 1513 chan->cookie = 1;
1514   - ichan->completed = -ENXIO;
  1514 + chan->completed_cookie = -ENXIO;
1515 1515  
1516 1516 ret = ipu_irq_map(chan->chan_id);
1517 1517 if (ret < 0)
... ... @@ -1600,9 +1600,7 @@
1600 1600 static enum dma_status idmac_tx_status(struct dma_chan *chan,
1601 1601 dma_cookie_t cookie, struct dma_tx_state *txstate)
1602 1602 {
1603   - struct idmac_channel *ichan = to_idmac_chan(chan);
1604   -
1605   - dma_set_tx_state(txstate, ichan->completed, chan->cookie, 0);
  1603 + dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie, 0);
1606 1604 if (cookie != chan->cookie)
1607 1605 return DMA_ERROR;
1608 1606 return DMA_SUCCESS;
1609 1607  
... ... @@ -1638,11 +1636,11 @@
1638 1636  
1639 1637 ichan->status = IPU_CHANNEL_FREE;
1640 1638 ichan->sec_chan_en = false;
1641   - ichan->completed = -ENXIO;
1642 1639 snprintf(ichan->eof_name, sizeof(ichan->eof_name), "IDMAC EOF %d", i);
1643 1640  
1644 1641 dma_chan->device = &idmac->dma;
1645 1642 dma_chan->cookie = 1;
  1643 + dma_chan->completed_cookie = -ENXIO;
1646 1644 dma_chan->chan_id = i;
1647 1645 list_add_tail(&dma_chan->device_node, &dma->channels);
1648 1646 }
drivers/dma/mpc512x_dma.c
... ... @@ -188,7 +188,6 @@
188 188 struct list_head completed;
189 189 struct mpc_dma_tcd *tcd;
190 190 dma_addr_t tcd_paddr;
191   - dma_cookie_t completed_cookie;
192 191  
193 192 /* Lock for this structure */
194 193 spinlock_t lock;
... ... @@ -365,7 +364,7 @@
365 364 /* Free descriptors */
366 365 spin_lock_irqsave(&mchan->lock, flags);
367 366 list_splice_tail_init(&list, &mchan->free);
368   - mchan->completed_cookie = last_cookie;
  367 + mchan->chan.completed_cookie = last_cookie;
369 368 spin_unlock_irqrestore(&mchan->lock, flags);
370 369 }
371 370 }
... ... @@ -568,7 +567,7 @@
568 567  
569 568 spin_lock_irqsave(&mchan->lock, flags);
570 569 last_used = mchan->chan.cookie;
571   - last_complete = mchan->completed_cookie;
  570 + last_complete = mchan->chan.completed_cookie;
572 571 spin_unlock_irqrestore(&mchan->lock, flags);
573 572  
574 573 dma_set_tx_state(txstate, last_complete, last_used, 0);
... ... @@ -742,7 +741,7 @@
742 741  
743 742 mchan->chan.device = dma;
744 743 mchan->chan.cookie = 1;
745   - mchan->completed_cookie = mchan->chan.cookie;
  744 + mchan->chan.completed_cookie = mchan->chan.cookie;
746 745  
747 746 INIT_LIST_HEAD(&mchan->free);
748 747 INIT_LIST_HEAD(&mchan->prepared);
drivers/dma/mv_xor.c
... ... @@ -435,7 +435,7 @@
435 435 }
436 436  
437 437 if (cookie > 0)
438   - mv_chan->completed_cookie = cookie;
  438 + mv_chan->common.completed_cookie = cookie;
439 439 }
440 440  
441 441 static void
... ... @@ -825,7 +825,7 @@
825 825 enum dma_status ret;
826 826  
827 827 last_used = chan->cookie;
828   - last_complete = mv_chan->completed_cookie;
  828 + last_complete = chan->completed_cookie;
829 829 dma_set_tx_state(txstate, last_complete, last_used, 0);
830 830  
831 831 ret = dma_async_is_complete(cookie, last_complete, last_used);
... ... @@ -836,7 +836,7 @@
836 836 mv_xor_slot_cleanup(mv_chan);
837 837  
838 838 last_used = chan->cookie;
839   - last_complete = mv_chan->completed_cookie;
  839 + last_complete = chan->completed_cookie;
840 840  
841 841 dma_set_tx_state(txstate, last_complete, last_used, 0);
842 842 return dma_async_is_complete(cookie, last_complete, last_used);
drivers/dma/mv_xor.h
... ... @@ -78,7 +78,6 @@
78 78 /**
79 79 * struct mv_xor_chan - internal representation of a XOR channel
80 80 * @pending: allows batching of hardware operations
81   - * @completed_cookie: identifier for the most recently completed operation
82 81 * @lock: serializes enqueue/dequeue operations to the descriptors pool
83 82 * @mmr_base: memory mapped register base
84 83 * @idx: the index of the xor channel
... ... @@ -93,7 +92,6 @@
93 92 */
94 93 struct mv_xor_chan {
95 94 int pending;
96   - dma_cookie_t completed_cookie;
97 95 spinlock_t lock; /* protects the descriptor slot pool */
98 96 void __iomem *mmr_base;
99 97 unsigned int idx;
drivers/dma/mxs-dma.c
... ... @@ -111,7 +111,6 @@
111 111 struct mxs_dma_ccw *ccw;
112 112 dma_addr_t ccw_phys;
113 113 int desc_count;
114   - dma_cookie_t last_completed;
115 114 enum dma_status status;
116 115 unsigned int flags;
117 116 #define MXS_DMA_SG_LOOP (1 << 0)
... ... @@ -274,7 +273,7 @@
274 273 stat1 &= ~(1 << channel);
275 274  
276 275 if (mxs_chan->status == DMA_SUCCESS)
277   - mxs_chan->last_completed = mxs_chan->desc.cookie;
  276 + mxs_chan->chan.completed_cookie = mxs_chan->desc.cookie;
278 277  
279 278 /* schedule tasklet on this channel */
280 279 tasklet_schedule(&mxs_chan->tasklet);
... ... @@ -538,7 +537,7 @@
538 537 dma_cookie_t last_used;
539 538  
540 539 last_used = chan->cookie;
541   - dma_set_tx_state(txstate, mxs_chan->last_completed, last_used, 0);
  540 + dma_set_tx_state(txstate, chan->completed_cookie, last_used, 0);
542 541  
543 542 return mxs_chan->status;
544 543 }
drivers/dma/pch_dma.c
... ... @@ -105,7 +105,6 @@
105 105  
106 106 spinlock_t lock;
107 107  
108   - dma_cookie_t completed_cookie;
109 108 struct list_head active_list;
110 109 struct list_head queue;
111 110 struct list_head free_list;
... ... @@ -544,7 +543,7 @@
544 543 spin_lock_irq(&pd_chan->lock);
545 544 list_splice(&tmp_list, &pd_chan->free_list);
546 545 pd_chan->descs_allocated = i;
547   - pd_chan->completed_cookie = chan->cookie = 1;
  546 + chan->completed_cookie = chan->cookie = 1;
548 547 spin_unlock_irq(&pd_chan->lock);
549 548  
550 549 pdc_enable_irq(chan, 1);
... ... @@ -583,7 +582,7 @@
583 582 int ret;
584 583  
585 584 spin_lock_irq(&pd_chan->lock);
586   - last_completed = pd_chan->completed_cookie;
  585 + last_completed = chan->completed_cookie;
587 586 last_used = chan->cookie;
588 587 spin_unlock_irq(&pd_chan->lock);
589 588  
... ... @@ -51,9 +51,6 @@
51 51 /* DMA-Engine Channel */
52 52 struct dma_chan chan;
53 53  
54   - /* Last completed cookie */
55   - dma_cookie_t completed;
56   -
57 54 /* List of to be xfered descriptors */
58 55 struct list_head work_list;
59 56  
... ... @@ -234,7 +231,7 @@
234 231 /* Pick up ripe tomatoes */
235 232 list_for_each_entry_safe(desc, _dt, &pch->work_list, node)
236 233 if (desc->status == DONE) {
237   - pch->completed = desc->txd.cookie;
  234 + pch->chan.completed_cookie = desc->txd.cookie;
238 235 list_move_tail(&desc->node, &list);
239 236 }
240 237  
... ... @@ -305,7 +302,7 @@
305 302  
306 303 spin_lock_irqsave(&pch->lock, flags);
307 304  
308   - pch->completed = chan->cookie = 1;
  305 + chan->completed_cookie = chan->cookie = 1;
309 306 pch->cyclic = false;
310 307  
311 308 pch->pl330_chid = pl330_request_channel(&pdmac->pif);
... ... @@ -400,7 +397,7 @@
400 397 dma_cookie_t last_done, last_used;
401 398 int ret;
402 399  
403   - last_done = pch->completed;
  400 + last_done = chan->completed_cookie;
404 401 last_used = chan->cookie;
405 402  
406 403 ret = dma_async_is_complete(cookie, last_done, last_used);
drivers/dma/ppc4xx/adma.c
... ... @@ -1930,7 +1930,7 @@
1930 1930 if (end_of_chain && slot_cnt) {
1931 1931 /* Should wait for ZeroSum completion */
1932 1932 if (cookie > 0)
1933   - chan->completed_cookie = cookie;
  1933 + chan->common.completed_cookie = cookie;
1934 1934 return;
1935 1935 }
1936 1936  
... ... @@ -1960,7 +1960,7 @@
1960 1960 BUG_ON(!seen_current);
1961 1961  
1962 1962 if (cookie > 0) {
1963   - chan->completed_cookie = cookie;
  1963 + chan->common.completed_cookie = cookie;
1964 1964 pr_debug("\tcompleted cookie %d\n", cookie);
1965 1965 }
1966 1966  
... ... @@ -3950,7 +3950,7 @@
3950 3950  
3951 3951 ppc440spe_chan = to_ppc440spe_adma_chan(chan);
3952 3952 last_used = chan->cookie;
3953   - last_complete = ppc440spe_chan->completed_cookie;
  3953 + last_complete = chan->completed_cookie;
3954 3954  
3955 3955 dma_set_tx_state(txstate, last_complete, last_used, 0);
3956 3956  
... ... @@ -3961,7 +3961,7 @@
3961 3961 ppc440spe_adma_slot_cleanup(ppc440spe_chan);
3962 3962  
3963 3963 last_used = chan->cookie;
3964   - last_complete = ppc440spe_chan->completed_cookie;
  3964 + last_complete = chan->completed_cookie;
3965 3965  
3966 3966 dma_set_tx_state(txstate, last_complete, last_used, 0);
3967 3967  
... ... @@ -4058,7 +4058,7 @@
4058 4058 /* initialize the completed cookie to be less than
4059 4059 * the most recently used cookie
4060 4060 */
4061   - chan->completed_cookie = cookie - 1;
  4061 + chan->common.completed_cookie = cookie - 1;
4062 4062 chan->common.cookie = sw_desc->async_tx.cookie = cookie;
4063 4063  
4064 4064 /* channel should not be busy */
drivers/dma/ppc4xx/adma.h
... ... @@ -81,7 +81,6 @@
81 81 * @common: common dmaengine channel object members
82 82 * @all_slots: complete domain of slots usable by the channel
83 83 * @pending: allows batching of hardware operations
84   - * @completed_cookie: identifier for the most recently completed operation
85 84 * @slots_allocated: records the actual size of the descriptor slot pool
86 85 * @hw_chain_inited: h/w descriptor chain initialization flag
87 86 * @irq_tasklet: bottom half where ppc440spe_adma_slot_cleanup runs
... ... @@ -99,7 +98,6 @@
99 98 struct list_head all_slots;
100 99 struct ppc440spe_adma_desc_slot *last_used;
101 100 int pending;
102   - dma_cookie_t completed_cookie;
103 101 int slots_allocated;
104 102 int hw_chain_inited;
105 103 struct tasklet_struct irq_tasklet;
... ... @@ -764,12 +764,12 @@
764 764 cookie = tx->cookie;
765 765  
766 766 if (desc->mark == DESC_COMPLETED && desc->chunks == 1) {
767   - if (sh_chan->completed_cookie != desc->cookie - 1)
  767 + if (sh_chan->common.completed_cookie != desc->cookie - 1)
768 768 dev_dbg(sh_chan->dev,
769 769 "Completing cookie %d, expected %d\n",
770 770 desc->cookie,
771   - sh_chan->completed_cookie + 1);
772   - sh_chan->completed_cookie = desc->cookie;
  771 + sh_chan->common.completed_cookie + 1);
  772 + sh_chan->common.completed_cookie = desc->cookie;
773 773 }
774 774  
775 775 /* Call callback on the last chunk */
... ... @@ -823,7 +823,7 @@
823 823 * Terminating and the loop completed normally: forgive
824 824 * uncompleted cookies
825 825 */
826   - sh_chan->completed_cookie = sh_chan->common.cookie;
  826 + sh_chan->common.completed_cookie = sh_chan->common.cookie;
827 827  
828 828 spin_unlock_irqrestore(&sh_chan->desc_lock, flags);
829 829  
... ... @@ -891,7 +891,7 @@
891 891 sh_dmae_chan_ld_cleanup(sh_chan, false);
892 892  
893 893 /* First read completed cookie to avoid a skew */
894   - last_complete = sh_chan->completed_cookie;
  894 + last_complete = chan->completed_cookie;
895 895 rmb();
896 896 last_used = chan->cookie;
897 897 BUG_ON(last_complete < 0);
... ... @@ -30,7 +30,6 @@
30 30 };
31 31  
32 32 struct sh_dmae_chan {
33   - dma_cookie_t completed_cookie; /* The maximum cookie completed */
34 33 spinlock_t desc_lock; /* Descriptor operation lock */
35 34 struct list_head ld_queue; /* Link descriptors queue */
36 35 struct list_head ld_free; /* Link descriptors free */
drivers/dma/sirf-dma.c
... ... @@ -59,7 +59,6 @@
59 59 struct list_head queued;
60 60 struct list_head active;
61 61 struct list_head completed;
62   - dma_cookie_t completed_cookie;
63 62 unsigned long happened_cyclic;
64 63 unsigned long completed_cyclic;
65 64  
... ... @@ -208,7 +207,7 @@
208 207 /* Free descriptors */
209 208 spin_lock_irqsave(&schan->lock, flags);
210 209 list_splice_tail_init(&list, &schan->free);
211   - schan->completed_cookie = last_cookie;
  210 + schan->chan.completed_cookie = last_cookie;
212 211 spin_unlock_irqrestore(&schan->lock, flags);
213 212 } else {
214 213 /* for cyclic channel, desc is always in active list */
... ... @@ -419,7 +418,7 @@
419 418  
420 419 spin_lock_irqsave(&schan->lock, flags);
421 420 last_used = schan->chan.cookie;
422   - last_complete = schan->completed_cookie;
  421 + last_complete = schan->chan.completed_cookie;
423 422 spin_unlock_irqrestore(&schan->lock, flags);
424 423  
425 424 dma_set_tx_state(txstate, last_complete, last_used, 0);
... ... @@ -636,7 +635,7 @@
636 635  
637 636 schan->chan.device = dma;
638 637 schan->chan.cookie = 1;
639   - schan->completed_cookie = schan->chan.cookie;
  638 + schan->chan.completed_cookie = schan->chan.cookie;
640 639  
641 640 INIT_LIST_HEAD(&schan->free);
642 641 INIT_LIST_HEAD(&schan->prepared);
drivers/dma/ste_dma40.c
... ... @@ -220,8 +220,6 @@
220 220 *
221 221 * @lock: A spinlock to protect this struct.
222 222 * @log_num: The logical number, if any of this channel.
223   - * @completed: Starts with 1, after first interrupt it is set to dma engine's
224   - * current cookie.
225 223 * @pending_tx: The number of pending transfers. Used between interrupt handler
226 224 * and tasklet.
227 225 * @busy: Set to true when transfer is ongoing on this channel.
... ... @@ -250,8 +248,6 @@
250 248 struct d40_chan {
251 249 spinlock_t lock;
252 250 int log_num;
253   - /* ID of the most recent completed transfer */
254   - int completed;
255 251 int pending_tx;
256 252 bool busy;
257 253 struct d40_phy_res *phy_chan;
... ... @@ -1357,7 +1353,7 @@
1357 1353 goto err;
1358 1354  
1359 1355 if (!d40d->cyclic)
1360   - d40c->completed = d40d->txd.cookie;
  1356 + d40c->chan.completed_cookie = d40d->txd.cookie;
1361 1357  
1362 1358 /*
1363 1359 * If terminating a channel pending_tx is set to zero.
... ... @@ -2182,7 +2178,7 @@
2182 2178 bool is_free_phy;
2183 2179 spin_lock_irqsave(&d40c->lock, flags);
2184 2180  
2185   - d40c->completed = chan->cookie = 1;
  2181 + chan->completed_cookie = chan->cookie = 1;
2186 2182  
2187 2183 /* If no dma configuration is set use default configuration (memcpy) */
2188 2184 if (!d40c->configured) {
... ... @@ -2351,7 +2347,7 @@
2351 2347 return -EINVAL;
2352 2348 }
2353 2349  
2354   - last_complete = d40c->completed;
  2350 + last_complete = chan->completed_cookie;
2355 2351 last_used = chan->cookie;
2356 2352  
2357 2353 if (d40_is_paused(d40c))
drivers/dma/timb_dma.c
... ... @@ -84,7 +84,6 @@
84 84 especially the lists and descriptors,
85 85 from races between the tasklet and calls
86 86 from above */
87   - dma_cookie_t last_completed_cookie;
88 87 bool ongoing;
89 88 struct list_head active_list;
90 89 struct list_head queue;
... ... @@ -284,7 +283,7 @@
284 283 else
285 284 iowrite32(0, td_chan->membase + TIMBDMA_OFFS_TX_DLAR);
286 285 */
287   - td_chan->last_completed_cookie = txd->cookie;
  286 + td_chan->chan.completed_cookie = txd->cookie;
288 287 td_chan->ongoing = false;
289 288  
290 289 callback = txd->callback;
... ... @@ -481,7 +480,7 @@
481 480 }
482 481  
483 482 spin_lock_bh(&td_chan->lock);
484   - td_chan->last_completed_cookie = 1;
  483 + chan->completed_cookie = 1;
485 484 chan->cookie = 1;
486 485 spin_unlock_bh(&td_chan->lock);
487 486  
... ... @@ -523,7 +522,7 @@
523 522  
524 523 dev_dbg(chan2dev(chan), "%s: Entry\n", __func__);
525 524  
526   - last_complete = td_chan->last_completed_cookie;
  525 + last_complete = chan->completed_cookie;
527 526 last_used = chan->cookie;
528 527  
529 528 ret = dma_async_is_complete(cookie, last_complete, last_used);
drivers/dma/txx9dmac.c
... ... @@ -424,7 +424,7 @@
424 424 dev_vdbg(chan2dev(&dc->chan), "descriptor %u %p complete\n",
425 425 txd->cookie, desc);
426 426  
427   - dc->completed = txd->cookie;
  427 + dc->chan.completed_cookie = txd->cookie;
428 428 callback = txd->callback;
429 429 param = txd->callback_param;
430 430  
... ... @@ -976,7 +976,7 @@
976 976 dma_cookie_t last_complete;
977 977 int ret;
978 978  
979   - last_complete = dc->completed;
  979 + last_complete = chan->completed_cookie;
980 980 last_used = chan->cookie;
981 981  
982 982 ret = dma_async_is_complete(cookie, last_complete, last_used);
... ... @@ -985,7 +985,7 @@
985 985 txx9dmac_scan_descriptors(dc);
986 986 spin_unlock_bh(&dc->lock);
987 987  
988   - last_complete = dc->completed;
  988 + last_complete = chan->completed_cookie;
989 989 last_used = chan->cookie;
990 990  
991 991 ret = dma_async_is_complete(cookie, last_complete, last_used);
... ... @@ -1057,7 +1057,7 @@
1057 1057 return -EIO;
1058 1058 }
1059 1059  
1060   - dc->completed = chan->cookie = 1;
  1060 + chan->completed_cookie = chan->cookie = 1;
1061 1061  
1062 1062 dc->ccr = TXX9_DMA_CCR_IMMCHN | TXX9_DMA_CCR_INTENE | CCR_LE;
1063 1063 txx9dmac_chan_set_SMPCHN(dc);
... ... @@ -1186,7 +1186,7 @@
1186 1186 dc->ddev->chan[ch] = dc;
1187 1187 dc->chan.device = &dc->dma;
1188 1188 list_add_tail(&dc->chan.device_node, &dc->chan.device->channels);
1189   - dc->chan.cookie = dc->completed = 1;
  1189 + dc->chan.cookie = dc->chan.completed_cookie = 1;
1190 1190  
1191 1191 if (is_dmac64(dc))
1192 1192 dc->ch_regs = &__txx9dmac_regs(dc->ddev)->CHAN[ch];
drivers/dma/txx9dmac.h
... ... @@ -172,7 +172,6 @@
172 172 spinlock_t lock;
173 173  
174 174 /* these other elements are all protected by lock */
175   - dma_cookie_t completed;
176 175 struct list_head active_list;
177 176 struct list_head queue;
178 177 struct list_head free_list;
include/linux/amba/pl08x.h
... ... @@ -172,7 +172,6 @@
172 172 * @runtime_addr: address for RX/TX according to the runtime config
173 173 * @runtime_direction: current direction of this channel according to
174 174 * runtime config
175   - * @lc: last completed transaction on this channel
176 175 * @pend_list: queued transactions pending on this channel
177 176 * @at: active transaction on this channel
178 177 * @lock: a lock for this channel data
... ... @@ -197,7 +196,6 @@
197 196 u32 src_cctl;
198 197 u32 dst_cctl;
199 198 enum dma_transfer_direction runtime_direction;
200   - dma_cookie_t lc;
201 199 struct list_head pend_list;
202 200 struct pl08x_txd *at;
203 201 spinlock_t lock;
include/linux/dmaengine.h
... ... @@ -258,6 +258,7 @@
258 258 * struct dma_chan - devices supply DMA channels, clients use them
259 259 * @device: ptr to the dma device who supplies this channel, always !%NULL
260 260 * @cookie: last cookie value returned to client
  261 + * @completed_cookie: last completed cookie for this channel
261 262 * @chan_id: channel ID for sysfs
262 263 * @dev: class device for sysfs
263 264 * @device_node: used to add this to the device chan list
... ... @@ -269,6 +270,7 @@
269 270 struct dma_chan {
270 271 struct dma_device *device;
271 272 dma_cookie_t cookie;
  273 + dma_cookie_t completed_cookie;
272 274  
273 275 /* sysfs */
274 276 int chan_id;