Commit 884485e1f12dcd39390f042e772cdbefc9ebb750

Authored by Russell King - ARM Linux
Committed by Vinod Koul
1 parent d2ebfb335b

dmaengine: consolidate assignment of DMA cookies

Everyone deals with assigning DMA cookies in the same way (it's part of
the API so they should be), so lets consolidate the common code into a
helper function to avoid this duplication.

Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Tested-by: Linus Walleij <linus.walleij@linaro.org>
Reviewed-by: Linus Walleij <linus.walleij@linaro.org>
Acked-by: Jassi Brar <jassisinghbrar@gmail.com>
[imx-sdma.c & mxs-dma.c]
Tested-by: Shawn Guo <shawn.guo@linaro.org>
Signed-off-by: Vinod Koul <vinod.koul@linux.intel.com>

Showing 25 changed files with 52 additions and 272 deletions Side-by-side Diff

drivers/dma/amba-pl08x.c
... ... @@ -921,14 +921,11 @@
921 921 struct pl08x_dma_chan *plchan = to_pl08x_chan(tx->chan);
922 922 struct pl08x_txd *txd = to_pl08x_txd(tx);
923 923 unsigned long flags;
  924 + dma_cookie_t cookie;
924 925  
925 926 spin_lock_irqsave(&plchan->lock, flags);
  927 + cookie = dma_cookie_assign(tx);
926 928  
927   - plchan->chan.cookie += 1;
928   - if (plchan->chan.cookie < 0)
929   - plchan->chan.cookie = 1;
930   - tx->cookie = plchan->chan.cookie;
931   -
932 929 /* Put this onto the pending list */
933 930 list_add_tail(&txd->node, &plchan->pend_list);
934 931  
... ... @@ -947,7 +944,7 @@
947 944  
948 945 spin_unlock_irqrestore(&plchan->lock, flags);
949 946  
950   - return tx->cookie;
  947 + return cookie;
951 948 }
952 949  
953 950 static struct dma_async_tx_descriptor *pl08x_prep_dma_interrupt(
drivers/dma/at_hdmac.c
... ... @@ -193,27 +193,6 @@
193 193 }
194 194  
195 195 /**
196   - * atc_assign_cookie - compute and assign new cookie
197   - * @atchan: channel we work on
198   - * @desc: descriptor to assign cookie for
199   - *
200   - * Called with atchan->lock held and bh disabled
201   - */
202   -static dma_cookie_t
203   -atc_assign_cookie(struct at_dma_chan *atchan, struct at_desc *desc)
204   -{
205   - dma_cookie_t cookie = atchan->chan_common.cookie;
206   -
207   - if (++cookie < 0)
208   - cookie = 1;
209   -
210   - atchan->chan_common.cookie = cookie;
211   - desc->txd.cookie = cookie;
212   -
213   - return cookie;
214   -}
215   -
216   -/**
217 196 * atc_dostart - starts the DMA engine for real
218 197 * @atchan: the channel we want to start
219 198 * @first: first descriptor in the list we want to begin with
... ... @@ -548,7 +527,7 @@
548 527 unsigned long flags;
549 528  
550 529 spin_lock_irqsave(&atchan->lock, flags);
551   - cookie = atc_assign_cookie(atchan, desc);
  530 + cookie = dma_cookie_assign(tx);
552 531  
553 532 if (list_empty(&atchan->active_list)) {
554 533 dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n",
drivers/dma/coh901318.c
... ... @@ -318,21 +318,7 @@
318 318  
319 319 return 0;
320 320 }
321   -static dma_cookie_t
322   -coh901318_assign_cookie(struct coh901318_chan *cohc,
323   - struct coh901318_desc *cohd)
324   -{
325   - dma_cookie_t cookie = cohc->chan.cookie;
326 321  
327   - if (++cookie < 0)
328   - cookie = 1;
329   -
330   - cohc->chan.cookie = cookie;
331   - cohd->desc.cookie = cookie;
332   -
333   - return cookie;
334   -}
335   -
336 322 static struct coh901318_desc *
337 323 coh901318_desc_get(struct coh901318_chan *cohc)
338 324 {
339 325  
340 326  
341 327  
... ... @@ -966,16 +952,16 @@
966 952 desc);
967 953 struct coh901318_chan *cohc = to_coh901318_chan(tx->chan);
968 954 unsigned long flags;
  955 + dma_cookie_t cookie;
969 956  
970 957 spin_lock_irqsave(&cohc->lock, flags);
  958 + cookie = dma_cookie_assign(tx);
971 959  
972   - tx->cookie = coh901318_assign_cookie(cohc, cohd);
973   -
974 960 coh901318_desc_queue(cohc, cohd);
975 961  
976 962 spin_unlock_irqrestore(&cohc->lock, flags);
977 963  
978   - return tx->cookie;
  964 + return cookie;
979 965 }
980 966  
981 967 static struct dma_async_tx_descriptor *
drivers/dma/dmaengine.h
... ... @@ -7,5 +7,25 @@
7 7  
8 8 #include <linux/dmaengine.h>
9 9  
  10 +/**
  11 + * dma_cookie_assign - assign a DMA engine cookie to the descriptor
  12 + * @tx: descriptor needing cookie
  13 + *
  14 + * Assign a unique non-zero per-channel cookie to the descriptor.
  15 + * Note: caller is expected to hold a lock to prevent concurrency.
  16 + */
  17 +static inline dma_cookie_t dma_cookie_assign(struct dma_async_tx_descriptor *tx)
  18 +{
  19 + struct dma_chan *chan = tx->chan;
  20 + dma_cookie_t cookie;
  21 +
  22 + cookie = chan->cookie + 1;
  23 + if (cookie < DMA_MIN_COOKIE)
  24 + cookie = DMA_MIN_COOKIE;
  25 + tx->cookie = chan->cookie = cookie;
  26 +
  27 + return cookie;
  28 +}
  29 +
10 30 #endif
drivers/dma/dw_dmac.c
... ... @@ -157,21 +157,6 @@
157 157 }
158 158 }
159 159  
160   -/* Called with dwc->lock held and bh disabled */
161   -static dma_cookie_t
162   -dwc_assign_cookie(struct dw_dma_chan *dwc, struct dw_desc *desc)
163   -{
164   - dma_cookie_t cookie = dwc->chan.cookie;
165   -
166   - if (++cookie < 0)
167   - cookie = 1;
168   -
169   - dwc->chan.cookie = cookie;
170   - desc->txd.cookie = cookie;
171   -
172   - return cookie;
173   -}
174   -
175 160 static void dwc_initialize(struct dw_dma_chan *dwc)
176 161 {
177 162 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
... ... @@ -603,7 +588,7 @@
603 588 unsigned long flags;
604 589  
605 590 spin_lock_irqsave(&dwc->lock, flags);
606   - cookie = dwc_assign_cookie(dwc, desc);
  591 + cookie = dma_cookie_assign(tx);
607 592  
608 593 /*
609 594 * REVISIT: We should attempt to chain as many descriptors as
drivers/dma/ep93xx_dma.c
... ... @@ -783,16 +783,9 @@
783 783 unsigned long flags;
784 784  
785 785 spin_lock_irqsave(&edmac->lock, flags);
  786 + cookie = dma_cookie_assign(tx);
786 787  
787   - cookie = edmac->chan.cookie;
788   -
789   - if (++cookie < 0)
790   - cookie = 1;
791   -
792 788 desc = container_of(tx, struct ep93xx_dma_desc, txd);
793   -
794   - edmac->chan.cookie = cookie;
795   - desc->txd.cookie = cookie;
796 789  
797 790 /*
798 791 * If nothing is currently prosessed, we push this descriptor
drivers/dma/fsldma.c
... ... @@ -414,16 +414,9 @@
414 414 * assign cookies to all of the software descriptors
415 415 * that make up this transaction
416 416 */
417   - cookie = chan->common.cookie;
418 417 list_for_each_entry(child, &desc->tx_list, node) {
419   - cookie++;
420   - if (cookie < DMA_MIN_COOKIE)
421   - cookie = DMA_MIN_COOKIE;
422   -
423   - child->async_tx.cookie = cookie;
  418 + cookie = dma_cookie_assign(&child->async_tx);
424 419 }
425   -
426   - chan->common.cookie = cookie;
427 420  
428 421 /* put this transaction onto the tail of the pending queue */
429 422 append_ld_queue(chan, desc);
drivers/dma/imx-dma.c
... ... @@ -165,19 +165,6 @@
165 165 return ret;
166 166 }
167 167  
168   -static dma_cookie_t imxdma_assign_cookie(struct imxdma_channel *imxdma)
169   -{
170   - dma_cookie_t cookie = imxdma->chan.cookie;
171   -
172   - if (++cookie < 0)
173   - cookie = 1;
174   -
175   - imxdma->chan.cookie = cookie;
176   - imxdma->desc.cookie = cookie;
177   -
178   - return cookie;
179   -}
180   -
181 168 static dma_cookie_t imxdma_tx_submit(struct dma_async_tx_descriptor *tx)
182 169 {
183 170 struct imxdma_channel *imxdmac = to_imxdma_chan(tx->chan);
... ... @@ -185,7 +172,7 @@
185 172  
186 173 spin_lock_irq(&imxdmac->lock);
187 174  
188   - cookie = imxdma_assign_cookie(imxdmac);
  175 + cookie = dma_cookie_assign(tx);
189 176  
190 177 spin_unlock_irq(&imxdmac->lock);
191 178  
drivers/dma/imx-sdma.c
... ... @@ -815,19 +815,6 @@
815 815 return ret;
816 816 }
817 817  
818   -static dma_cookie_t sdma_assign_cookie(struct sdma_channel *sdmac)
819   -{
820   - dma_cookie_t cookie = sdmac->chan.cookie;
821   -
822   - if (++cookie < 0)
823   - cookie = 1;
824   -
825   - sdmac->chan.cookie = cookie;
826   - sdmac->desc.cookie = cookie;
827   -
828   - return cookie;
829   -}
830   -
831 818 static struct sdma_channel *to_sdma_chan(struct dma_chan *chan)
832 819 {
833 820 return container_of(chan, struct sdma_channel, chan);
... ... @@ -841,7 +828,7 @@
841 828  
842 829 spin_lock_irqsave(&sdmac->lock, flags);
843 830  
844   - cookie = sdma_assign_cookie(sdmac);
  831 + cookie = dma_cookie_assign(tx);
845 832  
846 833 spin_unlock_irqrestore(&sdmac->lock, flags);
847 834  
... ... @@ -1140,7 +1127,6 @@
1140 1127 struct sdma_engine *sdma = sdmac->sdma;
1141 1128  
1142 1129 if (sdmac->status == DMA_IN_PROGRESS)
1143   - sdma_enable_channel(sdma, sdmac->channel);
1144 1130 }
1145 1131  
1146 1132 #define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1 34
drivers/dma/intel_mid_dma.c
... ... @@ -436,14 +436,7 @@
436 436 dma_cookie_t cookie;
437 437  
438 438 spin_lock_bh(&midc->lock);
439   - cookie = midc->chan.cookie;
440   -
441   - if (++cookie < 0)
442   - cookie = 1;
443   -
444   - midc->chan.cookie = cookie;
445   - desc->txd.cookie = cookie;
446   -
  439 + cookie = dma_cookie_assign(tx);
447 440  
448 441 if (list_empty(&midc->active_list))
449 442 list_add_tail(&desc->desc_node, &midc->active_list);
drivers/dma/ioat/dma.c
... ... @@ -237,12 +237,7 @@
237 237  
238 238 spin_lock_bh(&ioat->desc_lock);
239 239 /* cookie incr and addition to used_list must be atomic */
240   - cookie = c->cookie;
241   - cookie++;
242   - if (cookie < 0)
243   - cookie = 1;
244   - c->cookie = cookie;
245   - tx->cookie = cookie;
  240 + cookie = dma_cookie_assign(tx);
246 241 dev_dbg(to_dev(&ioat->base), "%s: cookie: %d\n", __func__, cookie);
247 242  
248 243 /* write address into NextDescriptor field of last desc in chain */
drivers/dma/ioat/dma_v2.c
... ... @@ -400,13 +400,9 @@
400 400 struct dma_chan *c = tx->chan;
401 401 struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
402 402 struct ioat_chan_common *chan = &ioat->base;
403   - dma_cookie_t cookie = c->cookie;
  403 + dma_cookie_t cookie;
404 404  
405   - cookie++;
406   - if (cookie < 0)
407   - cookie = 1;
408   - tx->cookie = cookie;
409   - c->cookie = cookie;
  405 + cookie = dma_cookie_assign(tx);
410 406 dev_dbg(to_dev(&ioat->base), "%s: cookie: %d\n", __func__, cookie);
411 407  
412 408 if (!test_and_set_bit(IOAT_COMPLETION_PENDING, &chan->state))
drivers/dma/iop-adma.c
... ... @@ -440,18 +440,6 @@
440 440 return NULL;
441 441 }
442 442  
443   -static dma_cookie_t
444   -iop_desc_assign_cookie(struct iop_adma_chan *iop_chan,
445   - struct iop_adma_desc_slot *desc)
446   -{
447   - dma_cookie_t cookie = iop_chan->common.cookie;
448   - cookie++;
449   - if (cookie < 0)
450   - cookie = 1;
451   - iop_chan->common.cookie = desc->async_tx.cookie = cookie;
452   - return cookie;
453   -}
454   -
455 443 static void iop_adma_check_threshold(struct iop_adma_chan *iop_chan)
456 444 {
457 445 dev_dbg(iop_chan->device->common.dev, "pending: %d\n",
... ... @@ -479,7 +467,7 @@
479 467 slots_per_op = grp_start->slots_per_op;
480 468  
481 469 spin_lock_bh(&iop_chan->lock);
482   - cookie = iop_desc_assign_cookie(iop_chan, sw_desc);
  470 + cookie = dma_cookie_assign(tx);
483 471  
484 472 old_chain_tail = list_entry(iop_chan->chain.prev,
485 473 struct iop_adma_desc_slot, chain_node);
drivers/dma/ipu/ipu_idmac.c
... ... @@ -867,14 +867,7 @@
867 867  
868 868 dev_dbg(dev, "Submitting sg %p\n", &desc->sg[0]);
869 869  
870   - cookie = ichan->dma_chan.cookie;
871   -
872   - if (++cookie < 0)
873   - cookie = 1;
874   -
875   - /* from dmaengine.h: "last cookie value returned to client" */
876   - ichan->dma_chan.cookie = cookie;
877   - tx->cookie = cookie;
  870 + cookie = dma_cookie_assign(tx);
878 871  
879 872 /* ipu->lock can be taken under ichan->lock, but not v.v. */
880 873 spin_lock_irqsave(&ichan->lock, flags);
drivers/dma/mpc512x_dma.c
... ... @@ -439,13 +439,7 @@
439 439 mpc_dma_execute(mchan);
440 440  
441 441 /* Update cookie */
442   - cookie = mchan->chan.cookie + 1;
443   - if (cookie <= 0)
444   - cookie = 1;
445   -
446   - mchan->chan.cookie = cookie;
447   - mdesc->desc.cookie = cookie;
448   -
  442 + cookie = dma_cookie_assign(txd);
449 443 spin_unlock_irqrestore(&mchan->lock, flags);
450 444  
451 445 return cookie;
drivers/dma/mv_xor.c
... ... @@ -536,18 +536,6 @@
536 536 return NULL;
537 537 }
538 538  
539   -static dma_cookie_t
540   -mv_desc_assign_cookie(struct mv_xor_chan *mv_chan,
541   - struct mv_xor_desc_slot *desc)
542   -{
543   - dma_cookie_t cookie = mv_chan->common.cookie;
544   -
545   - if (++cookie < 0)
546   - cookie = 1;
547   - mv_chan->common.cookie = desc->async_tx.cookie = cookie;
548   - return cookie;
549   -}
550   -
551 539 /************************ DMA engine API functions ****************************/
552 540 static dma_cookie_t
553 541 mv_xor_tx_submit(struct dma_async_tx_descriptor *tx)
... ... @@ -565,7 +553,7 @@
565 553 grp_start = sw_desc->group_head;
566 554  
567 555 spin_lock_bh(&mv_chan->lock);
568   - cookie = mv_desc_assign_cookie(mv_chan, sw_desc);
  556 + cookie = dma_cookie_assign(tx);
569 557  
570 558 if (list_empty(&mv_chan->chain))
571 559 list_splice_init(&sw_desc->tx_list, &mv_chan->chain);
drivers/dma/mxs-dma.c
... ... @@ -194,19 +194,6 @@
194 194 mxs_chan->status = DMA_IN_PROGRESS;
195 195 }
196 196  
197   -static dma_cookie_t mxs_dma_assign_cookie(struct mxs_dma_chan *mxs_chan)
198   -{
199   - dma_cookie_t cookie = mxs_chan->chan.cookie;
200   -
201   - if (++cookie < 0)
202   - cookie = 1;
203   -
204   - mxs_chan->chan.cookie = cookie;
205   - mxs_chan->desc.cookie = cookie;
206   -
207   - return cookie;
208   -}
209   -
210 197 static struct mxs_dma_chan *to_mxs_dma_chan(struct dma_chan *chan)
211 198 {
212 199 return container_of(chan, struct mxs_dma_chan, chan);
... ... @@ -218,7 +205,7 @@
218 205  
219 206 mxs_dma_enable_chan(mxs_chan);
220 207  
221   - return mxs_dma_assign_cookie(mxs_chan);
  208 + return dma_cookie_assign(tx);
222 209 }
223 210  
224 211 static void mxs_dma_tasklet(unsigned long data)
drivers/dma/pch_dma.c
... ... @@ -417,20 +417,6 @@
417 417 }
418 418 }
419 419  
420   -static dma_cookie_t pdc_assign_cookie(struct pch_dma_chan *pd_chan,
421   - struct pch_dma_desc *desc)
422   -{
423   - dma_cookie_t cookie = pd_chan->chan.cookie;
424   -
425   - if (++cookie < 0)
426   - cookie = 1;
427   -
428   - pd_chan->chan.cookie = cookie;
429   - desc->txd.cookie = cookie;
430   -
431   - return cookie;
432   -}
433   -
434 420 static dma_cookie_t pd_tx_submit(struct dma_async_tx_descriptor *txd)
435 421 {
436 422 struct pch_dma_desc *desc = to_pd_desc(txd);
... ... @@ -438,7 +424,7 @@
438 424 dma_cookie_t cookie;
439 425  
440 426 spin_lock(&pd_chan->lock);
441   - cookie = pdc_assign_cookie(pd_chan, desc);
  427 + cookie = dma_cookie_assign(txd);
442 428  
443 429 if (list_empty(&pd_chan->active_list)) {
444 430 list_add_tail(&desc->desc_node, &pd_chan->active_list);
... ... @@ -429,26 +429,16 @@
429 429 spin_lock_irqsave(&pch->lock, flags);
430 430  
431 431 /* Assign cookies to all nodes */
432   - cookie = tx->chan->cookie;
433   -
434 432 while (!list_empty(&last->node)) {
435 433 desc = list_entry(last->node.next, struct dma_pl330_desc, node);
436 434  
437   - if (++cookie < 0)
438   - cookie = 1;
439   - desc->txd.cookie = cookie;
  435 + dma_cookie_assign(&desc->txd);
440 436  
441 437 list_move_tail(&desc->node, &pch->work_list);
442 438 }
443 439  
444   - if (++cookie < 0)
445   - cookie = 1;
446   - last->txd.cookie = cookie;
447   -
  440 + cookie = dma_cookie_assign(&last->txd);
448 441 list_add_tail(&last->node, &pch->work_list);
449   -
450   - tx->chan->cookie = cookie;
451   -
452 442 spin_unlock_irqrestore(&pch->lock, flags);
453 443  
454 444 return cookie;
drivers/dma/ppc4xx/adma.c
... ... @@ -2151,22 +2151,6 @@
2151 2151 }
2152 2152  
2153 2153 /**
2154   - * ppc440spe_desc_assign_cookie - assign a cookie
2155   - */
2156   -static dma_cookie_t ppc440spe_desc_assign_cookie(
2157   - struct ppc440spe_adma_chan *chan,
2158   - struct ppc440spe_adma_desc_slot *desc)
2159   -{
2160   - dma_cookie_t cookie = chan->common.cookie;
2161   -
2162   - cookie++;
2163   - if (cookie < 0)
2164   - cookie = 1;
2165   - chan->common.cookie = desc->async_tx.cookie = cookie;
2166   - return cookie;
2167   -}
2168   -
2169   -/**
2170 2154 * ppc440spe_rxor_set_region_data -
2171 2155 */
2172 2156 static void ppc440spe_rxor_set_region(struct ppc440spe_adma_desc_slot *desc,
... ... @@ -2236,8 +2220,7 @@
2236 2220 slots_per_op = group_start->slots_per_op;
2237 2221  
2238 2222 spin_lock_bh(&chan->lock);
2239   -
2240   - cookie = ppc440spe_desc_assign_cookie(chan, sw_desc);
  2223 + cookie = dma_cookie_assign(tx);
2241 2224  
2242 2225 if (unlikely(list_empty(&chan->chain))) {
2243 2226 /* first peer */
... ... @@ -298,13 +298,7 @@
298 298 else
299 299 power_up = false;
300 300  
301   - cookie = sh_chan->common.cookie;
302   - cookie++;
303   - if (cookie < 0)
304   - cookie = 1;
305   -
306   - sh_chan->common.cookie = cookie;
307   - tx->cookie = cookie;
  301 + cookie = dma_cookie_assign(tx);
308 302  
309 303 /* Mark all chunks of this descriptor as submitted, move to the queue */
310 304 list_for_each_entry_safe(chunk, c, desc->node.prev, node) {
drivers/dma/sirf-dma.c
... ... @@ -257,13 +257,7 @@
257 257 /* Move descriptor to queue */
258 258 list_move_tail(&sdesc->node, &schan->queued);
259 259  
260   - /* Update cookie */
261   - cookie = schan->chan.cookie + 1;
262   - if (cookie <= 0)
263   - cookie = 1;
264   -
265   - schan->chan.cookie = cookie;
266   - sdesc->desc.cookie = cookie;
  260 + cookie = dma_cookie_assign(txd);
267 261  
268 262 spin_unlock_irqrestore(&schan->lock, flags);
269 263  
drivers/dma/ste_dma40.c
... ... @@ -1220,21 +1220,14 @@
1220 1220 chan);
1221 1221 struct d40_desc *d40d = container_of(tx, struct d40_desc, txd);
1222 1222 unsigned long flags;
  1223 + dma_cookie_t cookie;
1223 1224  
1224 1225 spin_lock_irqsave(&d40c->lock, flags);
1225   -
1226   - d40c->chan.cookie++;
1227   -
1228   - if (d40c->chan.cookie < 0)
1229   - d40c->chan.cookie = 1;
1230   -
1231   - d40d->txd.cookie = d40c->chan.cookie;
1232   -
  1226 + cookie = dma_cookie_assign(tx);
1233 1227 d40_desc_queue(d40c, d40d);
1234   -
1235 1228 spin_unlock_irqrestore(&d40c->lock, flags);
1236 1229  
1237   - return tx->cookie;
  1230 + return cookie;
1238 1231 }
1239 1232  
1240 1233 static int d40_start(struct d40_chan *d40c)
drivers/dma/timb_dma.c
... ... @@ -350,12 +350,7 @@
350 350 dma_cookie_t cookie;
351 351  
352 352 spin_lock_bh(&td_chan->lock);
353   -
354   - cookie = txd->chan->cookie;
355   - if (++cookie < 0)
356   - cookie = 1;
357   - txd->chan->cookie = cookie;
358   - txd->cookie = cookie;
  353 + cookie = dma_cookie_assign(txd);
359 354  
360 355 if (list_empty(&td_chan->active_list)) {
361 356 dev_dbg(chan2dev(txd->chan), "%s: started %u\n", __func__,
drivers/dma/txx9dmac.c
... ... @@ -281,21 +281,6 @@
281 281 }
282 282 }
283 283  
284   -/* Called with dc->lock held and bh disabled */
285   -static dma_cookie_t
286   -txx9dmac_assign_cookie(struct txx9dmac_chan *dc, struct txx9dmac_desc *desc)
287   -{
288   - dma_cookie_t cookie = dc->chan.cookie;
289   -
290   - if (++cookie < 0)
291   - cookie = 1;
292   -
293   - dc->chan.cookie = cookie;
294   - desc->txd.cookie = cookie;
295   -
296   - return cookie;
297   -}
298   -
299 284 /*----------------------------------------------------------------------*/
300 285  
301 286 static void txx9dmac_dump_regs(struct txx9dmac_chan *dc)
... ... @@ -740,7 +725,7 @@
740 725 dma_cookie_t cookie;
741 726  
742 727 spin_lock_bh(&dc->lock);
743   - cookie = txx9dmac_assign_cookie(dc, desc);
  728 + cookie = dma_cookie_assign(tx);
744 729  
745 730 dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u %p\n",
746 731 desc->txd.cookie, desc);