Commit 9c3a50b7d7ec45da34e73cac66cde12dd6092dd8

Authored by Ira Snyder
Committed by Dan Williams
1 parent a1c0331901

fsldma: major cleanups and fixes

Fix locking. Use two queues in the driver, one for pending transacions, and
one for transactions which are actually running on the hardware. Call
dma_run_dependencies() on descriptor cleanup so that the async_tx API works
correctly.

There are a number of places throughout the code where lists of descriptors
are freed in a loop. Create functions to handle this, and use them instead
of open-coding the loop each time.

Signed-off-by: Ira W. Snyder <iws@ovro.caltech.edu>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>

Showing 2 changed files with 207 additions and 182 deletions Side-by-side Diff

drivers/dma/fsldma.c
... ... @@ -61,7 +61,6 @@
61 61 | FSL_DMA_MR_PRC_RM, 32);
62 62 break;
63 63 }
64   -
65 64 }
66 65  
67 66 static void set_sr(struct fsldma_chan *chan, u32 val)
... ... @@ -120,11 +119,6 @@
120 119 return DMA_IN(chan, &chan->regs->cdar, 64) & ~FSL_DMA_SNEN;
121 120 }
122 121  
123   -static void set_ndar(struct fsldma_chan *chan, dma_addr_t addr)
124   -{
125   - DMA_OUT(chan, &chan->regs->ndar, addr, 64);
126   -}
127   -
128 122 static dma_addr_t get_ndar(struct fsldma_chan *chan)
129 123 {
130 124 return DMA_IN(chan, &chan->regs->ndar, 64);
131 125  
... ... @@ -178,11 +172,12 @@
178 172  
179 173 for (i = 0; i < 100; i++) {
180 174 if (dma_is_idle(chan))
181   - break;
  175 + return;
  176 +
182 177 udelay(10);
183 178 }
184 179  
185   - if (i >= 100 && !dma_is_idle(chan))
  180 + if (!dma_is_idle(chan))
186 181 dev_err(chan->dev, "DMA halt timeout!\n");
187 182 }
188 183  
... ... @@ -199,27 +194,6 @@
199 194 | snoop_bits, 64);
200 195 }
201 196  
202   -static void append_ld_queue(struct fsldma_chan *chan,
203   - struct fsl_desc_sw *new_desc)
204   -{
205   - struct fsl_desc_sw *queue_tail = to_fsl_desc(chan->ld_queue.prev);
206   -
207   - if (list_empty(&chan->ld_queue))
208   - return;
209   -
210   - /* Link to the new descriptor physical address and
211   - * Enable End-of-segment interrupt for
212   - * the last link descriptor.
213   - * (the previous node's next link descriptor)
214   - *
215   - * For FSL_DMA_IP_83xx, the snoop enable bit need be set.
216   - */
217   - queue_tail->hw.next_ln_addr = CPU_TO_DMA(chan,
218   - new_desc->async_tx.phys | FSL_DMA_EOSIE |
219   - (((chan->feature & FSL_DMA_IP_MASK)
220   - == FSL_DMA_IP_83XX) ? FSL_DMA_SNEN : 0), 64);
221   -}
222   -
223 197 /**
224 198 * fsl_chan_set_src_loop_size - Set source address hold transfer size
225 199 * @chan : Freescale DMA channel
... ... @@ -343,6 +317,31 @@
343 317 chan->feature &= ~FSL_DMA_CHAN_START_EXT;
344 318 }
345 319  
  320 +static void append_ld_queue(struct fsldma_chan *chan,
  321 + struct fsl_desc_sw *desc)
  322 +{
  323 + struct fsl_desc_sw *tail = to_fsl_desc(chan->ld_pending.prev);
  324 +
  325 + if (list_empty(&chan->ld_pending))
  326 + goto out_splice;
  327 +
  328 + /*
  329 + * Add the hardware descriptor to the chain of hardware descriptors
  330 + * that already exists in memory.
  331 + *
  332 + * This will un-set the EOL bit of the existing transaction, and the
  333 + * last link in this transaction will become the EOL descriptor.
  334 + */
  335 + set_desc_next(chan, &tail->hw, desc->async_tx.phys);
  336 +
  337 + /*
  338 + * Add the software descriptor and all children to the list
  339 + * of pending transactions
  340 + */
  341 +out_splice:
  342 + list_splice_tail_init(&desc->tx_list, &chan->ld_pending);
  343 +}
  344 +
346 345 static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx)
347 346 {
348 347 struct fsldma_chan *chan = to_fsl_chan(tx->chan);
349 348  
... ... @@ -351,9 +350,12 @@
351 350 unsigned long flags;
352 351 dma_cookie_t cookie;
353 352  
354   - /* cookie increment and adding to ld_queue must be atomic */
355 353 spin_lock_irqsave(&chan->desc_lock, flags);
356 354  
  355 + /*
  356 + * assign cookies to all of the software descriptors
  357 + * that make up this transaction
  358 + */
357 359 cookie = chan->common.cookie;
358 360 list_for_each_entry(child, &desc->tx_list, node) {
359 361 cookie++;
360 362  
... ... @@ -364,8 +366,9 @@
364 366 }
365 367  
366 368 chan->common.cookie = cookie;
  369 +
  370 + /* put this transaction onto the tail of the pending queue */
367 371 append_ld_queue(chan, desc);
368   - list_splice_init(&desc->tx_list, chan->ld_queue.prev);
369 372  
370 373 spin_unlock_irqrestore(&chan->desc_lock, flags);
371 374  
372 375  
373 376  
374 377  
... ... @@ -381,20 +384,22 @@
381 384 static struct fsl_desc_sw *fsl_dma_alloc_descriptor(
382 385 struct fsldma_chan *chan)
383 386 {
  387 + struct fsl_desc_sw *desc;
384 388 dma_addr_t pdesc;
385   - struct fsl_desc_sw *desc_sw;
386 389  
387   - desc_sw = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &pdesc);
388   - if (desc_sw) {
389   - memset(desc_sw, 0, sizeof(struct fsl_desc_sw));
390   - INIT_LIST_HEAD(&desc_sw->tx_list);
391   - dma_async_tx_descriptor_init(&desc_sw->async_tx,
392   - &chan->common);
393   - desc_sw->async_tx.tx_submit = fsl_dma_tx_submit;
394   - desc_sw->async_tx.phys = pdesc;
  390 + desc = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &pdesc);
  391 + if (!desc) {
  392 + dev_dbg(chan->dev, "out of memory for link desc\n");
  393 + return NULL;
395 394 }
396 395  
397   - return desc_sw;
  396 + memset(desc, 0, sizeof(*desc));
  397 + INIT_LIST_HEAD(&desc->tx_list);
  398 + dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
  399 + desc->async_tx.tx_submit = fsl_dma_tx_submit;
  400 + desc->async_tx.phys = pdesc;
  401 +
  402 + return desc;
398 403 }
399 404  
400 405  
401 406  
402 407  
403 408  
404 409  
405 410  
406 411  
407 412  
408 413  
... ... @@ -414,45 +419,69 @@
414 419 if (chan->desc_pool)
415 420 return 1;
416 421  
417   - /* We need the descriptor to be aligned to 32bytes
  422 + /*
  423 + * We need the descriptor to be aligned to 32bytes
418 424 * for meeting FSL DMA specification requirement.
419 425 */
420 426 chan->desc_pool = dma_pool_create("fsl_dma_engine_desc_pool",
421   - chan->dev, sizeof(struct fsl_desc_sw),
422   - 32, 0);
  427 + chan->dev,
  428 + sizeof(struct fsl_desc_sw),
  429 + __alignof__(struct fsl_desc_sw), 0);
423 430 if (!chan->desc_pool) {
424   - dev_err(chan->dev, "No memory for channel %d "
425   - "descriptor dma pool.\n", chan->id);
426   - return 0;
  431 + dev_err(chan->dev, "unable to allocate channel %d "
  432 + "descriptor pool\n", chan->id);
  433 + return -ENOMEM;
427 434 }
428 435  
  436 + /* there is at least one descriptor free to be allocated */
429 437 return 1;
430 438 }
431 439  
432 440 /**
  441 + * fsldma_free_desc_list - Free all descriptors in a queue
  442 + * @chan: Freescae DMA channel
  443 + * @list: the list to free
  444 + *
  445 + * LOCKING: must hold chan->desc_lock
  446 + */
  447 +static void fsldma_free_desc_list(struct fsldma_chan *chan,
  448 + struct list_head *list)
  449 +{
  450 + struct fsl_desc_sw *desc, *_desc;
  451 +
  452 + list_for_each_entry_safe(desc, _desc, list, node) {
  453 + list_del(&desc->node);
  454 + dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
  455 + }
  456 +}
  457 +
  458 +static void fsldma_free_desc_list_reverse(struct fsldma_chan *chan,
  459 + struct list_head *list)
  460 +{
  461 + struct fsl_desc_sw *desc, *_desc;
  462 +
  463 + list_for_each_entry_safe_reverse(desc, _desc, list, node) {
  464 + list_del(&desc->node);
  465 + dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
  466 + }
  467 +}
  468 +
  469 +/**
433 470 * fsl_dma_free_chan_resources - Free all resources of the channel.
434 471 * @chan : Freescale DMA channel
435 472 */
436 473 static void fsl_dma_free_chan_resources(struct dma_chan *dchan)
437 474 {
438 475 struct fsldma_chan *chan = to_fsl_chan(dchan);
439   - struct fsl_desc_sw *desc, *_desc;
440 476 unsigned long flags;
441 477  
442 478 dev_dbg(chan->dev, "Free all channel resources.\n");
443 479 spin_lock_irqsave(&chan->desc_lock, flags);
444   - list_for_each_entry_safe(desc, _desc, &chan->ld_queue, node) {
445   -#ifdef FSL_DMA_LD_DEBUG
446   - dev_dbg(chan->dev,
447   - "LD %p will be released.\n", desc);
448   -#endif
449   - list_del(&desc->node);
450   - /* free link descriptor */
451   - dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
452   - }
  480 + fsldma_free_desc_list(chan, &chan->ld_pending);
  481 + fsldma_free_desc_list(chan, &chan->ld_running);
453 482 spin_unlock_irqrestore(&chan->desc_lock, flags);
454   - dma_pool_destroy(chan->desc_pool);
455 483  
  484 + dma_pool_destroy(chan->desc_pool);
456 485 chan->desc_pool = NULL;
457 486 }
458 487  
... ... @@ -491,7 +520,6 @@
491 520 {
492 521 struct fsldma_chan *chan;
493 522 struct fsl_desc_sw *first = NULL, *prev = NULL, *new;
494   - struct list_head *list;
495 523 size_t copy;
496 524  
497 525 if (!dchan)
... ... @@ -550,12 +578,7 @@
550 578 if (!first)
551 579 return NULL;
552 580  
553   - list = &first->tx_list;
554   - list_for_each_entry_safe_reverse(new, prev, list, node) {
555   - list_del(&new->node);
556   - dma_pool_free(chan->desc_pool, new, new->async_tx.phys);
557   - }
558   -
  581 + fsldma_free_desc_list_reverse(chan, &first->tx_list);
559 582 return NULL;
560 583 }
561 584  
... ... @@ -578,7 +601,6 @@
578 601 struct fsldma_chan *chan;
579 602 struct fsl_desc_sw *first = NULL, *prev = NULL, *new = NULL;
580 603 struct fsl_dma_slave *slave;
581   - struct list_head *tx_list;
582 604 size_t copy;
583 605  
584 606 int i;
585 607  
... ... @@ -748,19 +770,13 @@
748 770 *
749 771 * We're re-using variables for the loop, oh well
750 772 */
751   - tx_list = &first->tx_list;
752   - list_for_each_entry_safe_reverse(new, prev, tx_list, node) {
753   - list_del_init(&new->node);
754   - dma_pool_free(chan->desc_pool, new, new->async_tx.phys);
755   - }
756   -
  773 + fsldma_free_desc_list_reverse(chan, &first->tx_list);
757 774 return NULL;
758 775 }
759 776  
760 777 static void fsl_dma_device_terminate_all(struct dma_chan *dchan)
761 778 {
762 779 struct fsldma_chan *chan;
763   - struct fsl_desc_sw *desc, *tmp;
764 780 unsigned long flags;
765 781  
766 782 if (!dchan)
... ... @@ -774,10 +790,8 @@
774 790 spin_lock_irqsave(&chan->desc_lock, flags);
775 791  
776 792 /* Remove and free all of the descriptors in the LD queue */
777   - list_for_each_entry_safe(desc, tmp, &chan->ld_queue, node) {
778   - list_del(&desc->node);
779   - dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
780   - }
  793 + fsldma_free_desc_list(chan, &chan->ld_pending);
  794 + fsldma_free_desc_list(chan, &chan->ld_running);
781 795  
782 796 spin_unlock_irqrestore(&chan->desc_lock, flags);
783 797 }
784 798  
785 799  
786 800  
787 801  
788 802  
789 803  
... ... @@ -785,40 +799,55 @@
785 799 /**
786 800 * fsl_dma_update_completed_cookie - Update the completed cookie.
787 801 * @chan : Freescale DMA channel
  802 + *
  803 + * CONTEXT: hardirq
788 804 */
789 805 static void fsl_dma_update_completed_cookie(struct fsldma_chan *chan)
790 806 {
791   - struct fsl_desc_sw *cur_desc, *desc;
792   - dma_addr_t ld_phy;
  807 + struct fsl_desc_sw *desc;
  808 + unsigned long flags;
  809 + dma_cookie_t cookie;
793 810  
794   - ld_phy = get_cdar(chan) & FSL_DMA_NLDA_MASK;
  811 + spin_lock_irqsave(&chan->desc_lock, flags);
795 812  
796   - if (ld_phy) {
797   - cur_desc = NULL;
798   - list_for_each_entry(desc, &chan->ld_queue, node)
799   - if (desc->async_tx.phys == ld_phy) {
800   - cur_desc = desc;
801   - break;
802   - }
803   -
804   - if (cur_desc && cur_desc->async_tx.cookie) {
805   - if (dma_is_idle(chan))
806   - chan->completed_cookie =
807   - cur_desc->async_tx.cookie;
808   - else
809   - chan->completed_cookie =
810   - cur_desc->async_tx.cookie - 1;
811   - }
  813 + if (list_empty(&chan->ld_running)) {
  814 + dev_dbg(chan->dev, "no running descriptors\n");
  815 + goto out_unlock;
812 816 }
  817 +
  818 + /* Get the last descriptor, update the cookie to that */
  819 + desc = to_fsl_desc(chan->ld_running.prev);
  820 + if (dma_is_idle(chan))
  821 + cookie = desc->async_tx.cookie;
  822 + else
  823 + cookie = desc->async_tx.cookie - 1;
  824 +
  825 + chan->completed_cookie = cookie;
  826 +
  827 +out_unlock:
  828 + spin_unlock_irqrestore(&chan->desc_lock, flags);
813 829 }
814 830  
815 831 /**
  832 + * fsldma_desc_status - Check the status of a descriptor
  833 + * @chan: Freescale DMA channel
  834 + * @desc: DMA SW descriptor
  835 + *
  836 + * This function will return the status of the given descriptor
  837 + */
  838 +static enum dma_status fsldma_desc_status(struct fsldma_chan *chan,
  839 + struct fsl_desc_sw *desc)
  840 +{
  841 + return dma_async_is_complete(desc->async_tx.cookie,
  842 + chan->completed_cookie,
  843 + chan->common.cookie);
  844 +}
  845 +
  846 +/**
816 847 * fsl_chan_ld_cleanup - Clean up link descriptors
817 848 * @chan : Freescale DMA channel
818 849 *
819 850 * This function clean up the ld_queue of DMA channel.
820   - * If 'in_intr' is set, the function will move the link descriptor to
821   - * the recycle list. Otherwise, free it directly.
822 851 */
823 852 static void fsl_chan_ld_cleanup(struct fsldma_chan *chan)
824 853 {
825 854  
826 855  
827 856  
828 857  
829 858  
830 859  
831 860  
832 861  
833 862  
834 863  
835 864  
836 865  
837 866  
838 867  
839 868  
840 869  
... ... @@ -827,80 +856,95 @@
827 856  
828 857 spin_lock_irqsave(&chan->desc_lock, flags);
829 858  
830   - dev_dbg(chan->dev, "chan completed_cookie = %d\n",
831   - chan->completed_cookie);
832   - list_for_each_entry_safe(desc, _desc, &chan->ld_queue, node) {
  859 + dev_dbg(chan->dev, "chan completed_cookie = %d\n", chan->completed_cookie);
  860 + list_for_each_entry_safe(desc, _desc, &chan->ld_running, node) {
833 861 dma_async_tx_callback callback;
834 862 void *callback_param;
835 863  
836   - if (dma_async_is_complete(desc->async_tx.cookie,
837   - chan->completed_cookie, chan->common.cookie)
838   - == DMA_IN_PROGRESS)
  864 + if (fsldma_desc_status(chan, desc) == DMA_IN_PROGRESS)
839 865 break;
840 866  
841   - callback = desc->async_tx.callback;
842   - callback_param = desc->async_tx.callback_param;
843   -
844   - /* Remove from ld_queue list */
  867 + /* Remove from the list of running transactions */
845 868 list_del(&desc->node);
846 869  
847   - dev_dbg(chan->dev, "link descriptor %p will be recycle.\n",
848   - desc);
849   - dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
850   -
851 870 /* Run the link descriptor callback function */
  871 + callback = desc->async_tx.callback;
  872 + callback_param = desc->async_tx.callback_param;
852 873 if (callback) {
853 874 spin_unlock_irqrestore(&chan->desc_lock, flags);
854   - dev_dbg(chan->dev, "link descriptor %p callback\n",
855   - desc);
  875 + dev_dbg(chan->dev, "LD %p callback\n", desc);
856 876 callback(callback_param);
857 877 spin_lock_irqsave(&chan->desc_lock, flags);
858 878 }
  879 +
  880 + /* Run any dependencies, then free the descriptor */
  881 + dma_run_dependencies(&desc->async_tx);
  882 + dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
859 883 }
  884 +
860 885 spin_unlock_irqrestore(&chan->desc_lock, flags);
861 886 }
862 887  
863 888 /**
864   - * fsl_chan_xfer_ld_queue - Transfer link descriptors in channel ld_queue.
  889 + * fsl_chan_xfer_ld_queue - transfer any pending transactions
865 890 * @chan : Freescale DMA channel
  891 + *
  892 + * This will make sure that any pending transactions will be run.
  893 + * If the DMA controller is idle, it will be started. Otherwise,
  894 + * the DMA controller's interrupt handler will start any pending
  895 + * transactions when it becomes idle.
866 896 */
867 897 static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan)
868 898 {
869   - struct list_head *ld_node;
870   - dma_addr_t next_dst_addr;
  899 + struct fsl_desc_sw *desc;
871 900 unsigned long flags;
872 901  
873 902 spin_lock_irqsave(&chan->desc_lock, flags);
874 903  
875   - if (!dma_is_idle(chan))
  904 + /*
  905 + * If the list of pending descriptors is empty, then we
  906 + * don't need to do any work at all
  907 + */
  908 + if (list_empty(&chan->ld_pending)) {
  909 + dev_dbg(chan->dev, "no pending LDs\n");
876 910 goto out_unlock;
  911 + }
877 912  
  913 + /*
  914 + * The DMA controller is not idle, which means the interrupt
  915 + * handler will start any queued transactions when it runs
  916 + * at the end of the current transaction
  917 + */
  918 + if (!dma_is_idle(chan)) {
  919 + dev_dbg(chan->dev, "DMA controller still busy\n");
  920 + goto out_unlock;
  921 + }
  922 +
  923 + /*
  924 + * TODO:
  925 + * make sure the dma_halt() function really un-wedges the
  926 + * controller as much as possible
  927 + */
878 928 dma_halt(chan);
879 929  
880   - /* If there are some link descriptors
881   - * not transfered in queue. We need to start it.
  930 + /*
  931 + * If there are some link descriptors which have not been
  932 + * transferred, we need to start the controller
882 933 */
883 934  
884   - /* Find the first un-transfer desciptor */
885   - for (ld_node = chan->ld_queue.next;
886   - (ld_node != &chan->ld_queue)
887   - && (dma_async_is_complete(
888   - to_fsl_desc(ld_node)->async_tx.cookie,
889   - chan->completed_cookie,
890   - chan->common.cookie) == DMA_SUCCESS);
891   - ld_node = ld_node->next);
  935 + /*
  936 + * Move all elements from the queue of pending transactions
  937 + * onto the list of running transactions
  938 + */
  939 + desc = list_first_entry(&chan->ld_pending, struct fsl_desc_sw, node);
  940 + list_splice_tail_init(&chan->ld_pending, &chan->ld_running);
892 941  
893   - if (ld_node != &chan->ld_queue) {
894   - /* Get the ld start address from ld_queue */
895   - next_dst_addr = to_fsl_desc(ld_node)->async_tx.phys;
896   - dev_dbg(chan->dev, "xfer LDs staring from 0x%llx\n",
897   - (unsigned long long)next_dst_addr);
898   - set_cdar(chan, next_dst_addr);
899   - dma_start(chan);
900   - } else {
901   - set_cdar(chan, 0);
902   - set_ndar(chan, 0);
903   - }
  942 + /*
  943 + * Program the descriptor's address into the DMA controller,
  944 + * then start the DMA transaction
  945 + */
  946 + set_cdar(chan, desc->async_tx.phys);
  947 + dma_start(chan);
904 948  
905 949 out_unlock:
906 950 spin_unlock_irqrestore(&chan->desc_lock, flags);
... ... @@ -913,30 +957,6 @@
913 957 static void fsl_dma_memcpy_issue_pending(struct dma_chan *dchan)
914 958 {
915 959 struct fsldma_chan *chan = to_fsl_chan(dchan);
916   -
917   -#ifdef FSL_DMA_LD_DEBUG
918   - struct fsl_desc_sw *ld;
919   - unsigned long flags;
920   -
921   - spin_lock_irqsave(&chan->desc_lock, flags);
922   - if (list_empty(&chan->ld_queue)) {
923   - spin_unlock_irqrestore(&chan->desc_lock, flags);
924   - return;
925   - }
926   -
927   - dev_dbg(chan->dev, "--memcpy issue--\n");
928   - list_for_each_entry(ld, &chan->ld_queue, node) {
929   - int i;
930   - dev_dbg(chan->dev, "Ch %d, LD %08x\n",
931   - chan->id, ld->async_tx.phys);
932   - for (i = 0; i < 8; i++)
933   - dev_dbg(chan->dev, "LD offset %d: %08x\n",
934   - i, *(((u32 *)&ld->hw) + i));
935   - }
936   - dev_dbg(chan->dev, "----------------\n");
937   - spin_unlock_irqrestore(&chan->desc_lock, flags);
938   -#endif
939   -
940 960 fsl_chan_xfer_ld_queue(chan);
941 961 }
942 962  
943 963  
... ... @@ -978,10 +998,10 @@
978 998 int xfer_ld_q = 0;
979 999 u32 stat;
980 1000  
  1001 + /* save and clear the status register */
981 1002 stat = get_sr(chan);
982   - dev_dbg(chan->dev, "event: channel %d, stat = 0x%x\n",
983   - chan->id, stat);
984   - set_sr(chan, stat); /* Clear the event register */
  1003 + set_sr(chan, stat);
  1004 + dev_dbg(chan->dev, "irq: channel %d, stat = 0x%x\n", chan->id, stat);
985 1005  
986 1006 stat &= ~(FSL_DMA_SR_CB | FSL_DMA_SR_CH);
987 1007 if (!stat)
988 1008  
... ... @@ -990,12 +1010,13 @@
990 1010 if (stat & FSL_DMA_SR_TE)
991 1011 dev_err(chan->dev, "Transfer Error!\n");
992 1012  
993   - /* Programming Error
  1013 + /*
  1014 + * Programming Error
994 1015 * The DMA_INTERRUPT async_tx is a NULL transfer, which will
995 1016 * triger a PE interrupt.
996 1017 */
997 1018 if (stat & FSL_DMA_SR_PE) {
998   - dev_dbg(chan->dev, "event: Programming Error INT\n");
  1019 + dev_dbg(chan->dev, "irq: Programming Error INT\n");
999 1020 if (get_bcr(chan) == 0) {
1000 1021 /* BCR register is 0, this is a DMA_INTERRUPT async_tx.
1001 1022 * Now, update the completed cookie, and continue the
1002 1023  
1003 1024  
1004 1025  
1005 1026  
1006 1027  
... ... @@ -1007,34 +1028,37 @@
1007 1028 stat &= ~FSL_DMA_SR_PE;
1008 1029 }
1009 1030  
1010   - /* If the link descriptor segment transfer finishes,
  1031 + /*
  1032 + * If the link descriptor segment transfer finishes,
1011 1033 * we will recycle the used descriptor.
1012 1034 */
1013 1035 if (stat & FSL_DMA_SR_EOSI) {
1014   - dev_dbg(chan->dev, "event: End-of-segments INT\n");
1015   - dev_dbg(chan->dev, "event: clndar 0x%llx, nlndar 0x%llx\n",
  1036 + dev_dbg(chan->dev, "irq: End-of-segments INT\n");
  1037 + dev_dbg(chan->dev, "irq: clndar 0x%llx, nlndar 0x%llx\n",
1016 1038 (unsigned long long)get_cdar(chan),
1017 1039 (unsigned long long)get_ndar(chan));
1018 1040 stat &= ~FSL_DMA_SR_EOSI;
1019 1041 update_cookie = 1;
1020 1042 }
1021 1043  
1022   - /* For MPC8349, EOCDI event need to update cookie
  1044 + /*
  1045 + * For MPC8349, EOCDI event need to update cookie
1023 1046 * and start the next transfer if it exist.
1024 1047 */
1025 1048 if (stat & FSL_DMA_SR_EOCDI) {
1026   - dev_dbg(chan->dev, "event: End-of-Chain link INT\n");
  1049 + dev_dbg(chan->dev, "irq: End-of-Chain link INT\n");
1027 1050 stat &= ~FSL_DMA_SR_EOCDI;
1028 1051 update_cookie = 1;
1029 1052 xfer_ld_q = 1;
1030 1053 }
1031 1054  
1032   - /* If it current transfer is the end-of-transfer,
  1055 + /*
  1056 + * If it current transfer is the end-of-transfer,
1033 1057 * we should clear the Channel Start bit for
1034 1058 * prepare next transfer.
1035 1059 */
1036 1060 if (stat & FSL_DMA_SR_EOLNI) {
1037   - dev_dbg(chan->dev, "event: End-of-link INT\n");
  1061 + dev_dbg(chan->dev, "irq: End-of-link INT\n");
1038 1062 stat &= ~FSL_DMA_SR_EOLNI;
1039 1063 xfer_ld_q = 1;
1040 1064 }
1041 1065  
... ... @@ -1044,10 +1068,9 @@
1044 1068 if (xfer_ld_q)
1045 1069 fsl_chan_xfer_ld_queue(chan);
1046 1070 if (stat)
1047   - dev_dbg(chan->dev, "event: unhandled sr 0x%02x\n",
1048   - stat);
  1071 + dev_dbg(chan->dev, "irq: unhandled sr 0x%02x\n", stat);
1049 1072  
1050   - dev_dbg(chan->dev, "event: Exit\n");
  1073 + dev_dbg(chan->dev, "irq: Exit\n");
1051 1074 tasklet_schedule(&chan->tasklet);
1052 1075 return IRQ_HANDLED;
1053 1076 }
... ... @@ -1235,7 +1258,8 @@
1235 1258 }
1236 1259  
1237 1260 spin_lock_init(&chan->desc_lock);
1238   - INIT_LIST_HEAD(&chan->ld_queue);
  1261 + INIT_LIST_HEAD(&chan->ld_pending);
  1262 + INIT_LIST_HEAD(&chan->ld_running);
1239 1263  
1240 1264 chan->common.device = &fdev->common;
1241 1265  
drivers/dma/fsldma.h
... ... @@ -131,7 +131,8 @@
131 131 struct fsldma_chan_regs __iomem *regs;
132 132 dma_cookie_t completed_cookie; /* The maximum cookie completed */
133 133 spinlock_t desc_lock; /* Descriptor operation lock */
134   - struct list_head ld_queue; /* Link descriptors queue */
  134 + struct list_head ld_pending; /* Link descriptors queue */
  135 + struct list_head ld_running; /* Link descriptors queue */
135 136 struct dma_chan common; /* DMA common channel */
136 137 struct dma_pool *desc_pool; /* Descriptors pool */
137 138 struct device *dev; /* Channel device */