Commit a4f56d4b103d4e5d1a59a9118db0185a6bd1a83b

Authored by Ira Snyder
Committed by Dan Williams
1 parent 4ce0e953f6

fsldma: rename struct fsl_dma_chan to struct fsldma_chan

This is the beginning of a cleanup which will change all instances of
"fsl_dma" to "fsldma" to match the name of the driver itself.

Signed-off-by: Ira W. Snyder <iws@ovro.caltech.edu>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>

Showing 2 changed files with 81 additions and 73 deletions Side-by-side Diff

drivers/dma/fsldma.c
... ... @@ -37,7 +37,7 @@
37 37 #include <asm/fsldma.h>
38 38 #include "fsldma.h"
39 39  
40   -static void dma_init(struct fsl_dma_chan *fsl_chan)
  40 +static void dma_init(struct fsldma_chan *fsl_chan)
41 41 {
42 42 /* Reset the channel */
43 43 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, 0, 32);
44 44  
45 45  
46 46  
... ... @@ -64,23 +64,23 @@
64 64  
65 65 }
66 66  
67   -static void set_sr(struct fsl_dma_chan *fsl_chan, u32 val)
  67 +static void set_sr(struct fsldma_chan *fsl_chan, u32 val)
68 68 {
69 69 DMA_OUT(fsl_chan, &fsl_chan->reg_base->sr, val, 32);
70 70 }
71 71  
72   -static u32 get_sr(struct fsl_dma_chan *fsl_chan)
  72 +static u32 get_sr(struct fsldma_chan *fsl_chan)
73 73 {
74 74 return DMA_IN(fsl_chan, &fsl_chan->reg_base->sr, 32);
75 75 }
76 76  
77   -static void set_desc_cnt(struct fsl_dma_chan *fsl_chan,
  77 +static void set_desc_cnt(struct fsldma_chan *fsl_chan,
78 78 struct fsl_dma_ld_hw *hw, u32 count)
79 79 {
80 80 hw->count = CPU_TO_DMA(fsl_chan, count, 32);
81 81 }
82 82  
83   -static void set_desc_src(struct fsl_dma_chan *fsl_chan,
  83 +static void set_desc_src(struct fsldma_chan *fsl_chan,
84 84 struct fsl_dma_ld_hw *hw, dma_addr_t src)
85 85 {
86 86 u64 snoop_bits;
... ... @@ -90,7 +90,7 @@
90 90 hw->src_addr = CPU_TO_DMA(fsl_chan, snoop_bits | src, 64);
91 91 }
92 92  
93   -static void set_desc_dest(struct fsl_dma_chan *fsl_chan,
  93 +static void set_desc_dest(struct fsldma_chan *fsl_chan,
94 94 struct fsl_dma_ld_hw *hw, dma_addr_t dest)
95 95 {
96 96 u64 snoop_bits;
... ... @@ -100,7 +100,7 @@
100 100 hw->dst_addr = CPU_TO_DMA(fsl_chan, snoop_bits | dest, 64);
101 101 }
102 102  
103   -static void set_desc_next(struct fsl_dma_chan *fsl_chan,
  103 +static void set_desc_next(struct fsldma_chan *fsl_chan,
104 104 struct fsl_dma_ld_hw *hw, dma_addr_t next)
105 105 {
106 106 u64 snoop_bits;
107 107  
108 108  
109 109  
110 110  
111 111  
112 112  
... ... @@ -110,38 +110,38 @@
110 110 hw->next_ln_addr = CPU_TO_DMA(fsl_chan, snoop_bits | next, 64);
111 111 }
112 112  
113   -static void set_cdar(struct fsl_dma_chan *fsl_chan, dma_addr_t addr)
  113 +static void set_cdar(struct fsldma_chan *fsl_chan, dma_addr_t addr)
114 114 {
115 115 DMA_OUT(fsl_chan, &fsl_chan->reg_base->cdar, addr | FSL_DMA_SNEN, 64);
116 116 }
117 117  
118   -static dma_addr_t get_cdar(struct fsl_dma_chan *fsl_chan)
  118 +static dma_addr_t get_cdar(struct fsldma_chan *fsl_chan)
119 119 {
120 120 return DMA_IN(fsl_chan, &fsl_chan->reg_base->cdar, 64) & ~FSL_DMA_SNEN;
121 121 }
122 122  
123   -static void set_ndar(struct fsl_dma_chan *fsl_chan, dma_addr_t addr)
  123 +static void set_ndar(struct fsldma_chan *fsl_chan, dma_addr_t addr)
124 124 {
125 125 DMA_OUT(fsl_chan, &fsl_chan->reg_base->ndar, addr, 64);
126 126 }
127 127  
128   -static dma_addr_t get_ndar(struct fsl_dma_chan *fsl_chan)
  128 +static dma_addr_t get_ndar(struct fsldma_chan *fsl_chan)
129 129 {
130 130 return DMA_IN(fsl_chan, &fsl_chan->reg_base->ndar, 64);
131 131 }
132 132  
133   -static u32 get_bcr(struct fsl_dma_chan *fsl_chan)
  133 +static u32 get_bcr(struct fsldma_chan *fsl_chan)
134 134 {
135 135 return DMA_IN(fsl_chan, &fsl_chan->reg_base->bcr, 32);
136 136 }
137 137  
138   -static int dma_is_idle(struct fsl_dma_chan *fsl_chan)
  138 +static int dma_is_idle(struct fsldma_chan *fsl_chan)
139 139 {
140 140 u32 sr = get_sr(fsl_chan);
141 141 return (!(sr & FSL_DMA_SR_CB)) || (sr & FSL_DMA_SR_CH);
142 142 }
143 143  
144   -static void dma_start(struct fsl_dma_chan *fsl_chan)
  144 +static void dma_start(struct fsldma_chan *fsl_chan)
145 145 {
146 146 u32 mode;
147 147  
... ... @@ -164,7 +164,7 @@
164 164 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, mode, 32);
165 165 }
166 166  
167   -static void dma_halt(struct fsl_dma_chan *fsl_chan)
  167 +static void dma_halt(struct fsldma_chan *fsl_chan)
168 168 {
169 169 u32 mode;
170 170 int i;
... ... @@ -186,7 +186,7 @@
186 186 dev_err(fsl_chan->dev, "DMA halt timeout!\n");
187 187 }
188 188  
189   -static void set_ld_eol(struct fsl_dma_chan *fsl_chan,
  189 +static void set_ld_eol(struct fsldma_chan *fsl_chan,
190 190 struct fsl_desc_sw *desc)
191 191 {
192 192 u64 snoop_bits;
... ... @@ -199,7 +199,7 @@
199 199 | snoop_bits, 64);
200 200 }
201 201  
202   -static void append_ld_queue(struct fsl_dma_chan *fsl_chan,
  202 +static void append_ld_queue(struct fsldma_chan *fsl_chan,
203 203 struct fsl_desc_sw *new_desc)
204 204 {
205 205 struct fsl_desc_sw *queue_tail = to_fsl_desc(fsl_chan->ld_queue.prev);
... ... @@ -231,7 +231,7 @@
231 231 * read data from SA, SA + 1, SA + 2, SA + 3, then loop back to SA,
232 232 * SA + 1 ... and so on.
233 233 */
234   -static void fsl_chan_set_src_loop_size(struct fsl_dma_chan *fsl_chan, int size)
  234 +static void fsl_chan_set_src_loop_size(struct fsldma_chan *fsl_chan, int size)
235 235 {
236 236 u32 mode;
237 237  
... ... @@ -263,7 +263,7 @@
263 263 * write data to TA, TA + 1, TA + 2, TA + 3, then loop back to TA,
264 264 * TA + 1 ... and so on.
265 265 */
266   -static void fsl_chan_set_dest_loop_size(struct fsl_dma_chan *fsl_chan, int size)
  266 +static void fsl_chan_set_dest_loop_size(struct fsldma_chan *fsl_chan, int size)
267 267 {
268 268 u32 mode;
269 269  
... ... @@ -296,7 +296,7 @@
296 296 *
297 297 * A size of 0 disables external pause control. The maximum size is 1024.
298 298 */
299   -static void fsl_chan_set_request_count(struct fsl_dma_chan *fsl_chan, int size)
  299 +static void fsl_chan_set_request_count(struct fsldma_chan *fsl_chan, int size)
300 300 {
301 301 u32 mode;
302 302  
... ... @@ -317,7 +317,7 @@
317 317 * The DMA Request Count feature should be used in addition to this feature
318 318 * to set the number of bytes to transfer before pausing the channel.
319 319 */
320   -static void fsl_chan_toggle_ext_pause(struct fsl_dma_chan *fsl_chan, int enable)
  320 +static void fsl_chan_toggle_ext_pause(struct fsldma_chan *fsl_chan, int enable)
321 321 {
322 322 if (enable)
323 323 fsl_chan->feature |= FSL_DMA_CHAN_PAUSE_EXT;
... ... @@ -335,7 +335,7 @@
335 335 * transfer immediately. The DMA channel will wait for the
336 336 * control pin asserted.
337 337 */
338   -static void fsl_chan_toggle_ext_start(struct fsl_dma_chan *fsl_chan, int enable)
  338 +static void fsl_chan_toggle_ext_start(struct fsldma_chan *fsl_chan, int enable)
339 339 {
340 340 if (enable)
341 341 fsl_chan->feature |= FSL_DMA_CHAN_START_EXT;
... ... @@ -345,7 +345,7 @@
345 345  
346 346 static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx)
347 347 {
348   - struct fsl_dma_chan *fsl_chan = to_fsl_chan(tx->chan);
  348 + struct fsldma_chan *fsl_chan = to_fsl_chan(tx->chan);
349 349 struct fsl_desc_sw *desc = tx_to_fsl_desc(tx);
350 350 struct fsl_desc_sw *child;
351 351 unsigned long flags;
... ... @@ -379,7 +379,7 @@
379 379 * Return - The descriptor allocated. NULL for failed.
380 380 */
381 381 static struct fsl_desc_sw *fsl_dma_alloc_descriptor(
382   - struct fsl_dma_chan *fsl_chan)
  382 + struct fsldma_chan *fsl_chan)
383 383 {
384 384 dma_addr_t pdesc;
385 385 struct fsl_desc_sw *desc_sw;
... ... @@ -408,7 +408,7 @@
408 408 */
409 409 static int fsl_dma_alloc_chan_resources(struct dma_chan *chan)
410 410 {
411   - struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan);
  411 + struct fsldma_chan *fsl_chan = to_fsl_chan(chan);
412 412  
413 413 /* Has this channel already been allocated? */
414 414 if (fsl_chan->desc_pool)
... ... @@ -435,7 +435,7 @@
435 435 */
436 436 static void fsl_dma_free_chan_resources(struct dma_chan *chan)
437 437 {
438   - struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan);
  438 + struct fsldma_chan *fsl_chan = to_fsl_chan(chan);
439 439 struct fsl_desc_sw *desc, *_desc;
440 440 unsigned long flags;
441 441  
... ... @@ -459,7 +459,7 @@
459 459 static struct dma_async_tx_descriptor *
460 460 fsl_dma_prep_interrupt(struct dma_chan *chan, unsigned long flags)
461 461 {
462   - struct fsl_dma_chan *fsl_chan;
  462 + struct fsldma_chan *fsl_chan;
463 463 struct fsl_desc_sw *new;
464 464  
465 465 if (!chan)
... ... @@ -489,7 +489,7 @@
489 489 struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src,
490 490 size_t len, unsigned long flags)
491 491 {
492   - struct fsl_dma_chan *fsl_chan;
  492 + struct fsldma_chan *fsl_chan;
493 493 struct fsl_desc_sw *first = NULL, *prev = NULL, *new;
494 494 struct list_head *list;
495 495 size_t copy;
... ... @@ -575,7 +575,7 @@
575 575 struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
576 576 enum dma_data_direction direction, unsigned long flags)
577 577 {
578   - struct fsl_dma_chan *fsl_chan;
  578 + struct fsldma_chan *fsl_chan;
579 579 struct fsl_desc_sw *first = NULL, *prev = NULL, *new = NULL;
580 580 struct fsl_dma_slave *slave;
581 581 struct list_head *tx_list;
... ... @@ -759,7 +759,7 @@
759 759  
760 760 static void fsl_dma_device_terminate_all(struct dma_chan *chan)
761 761 {
762   - struct fsl_dma_chan *fsl_chan;
  762 + struct fsldma_chan *fsl_chan;
763 763 struct fsl_desc_sw *desc, *tmp;
764 764 unsigned long flags;
765 765  
... ... @@ -786,7 +786,7 @@
786 786 * fsl_dma_update_completed_cookie - Update the completed cookie.
787 787 * @fsl_chan : Freescale DMA channel
788 788 */
789   -static void fsl_dma_update_completed_cookie(struct fsl_dma_chan *fsl_chan)
  789 +static void fsl_dma_update_completed_cookie(struct fsldma_chan *fsl_chan)
790 790 {
791 791 struct fsl_desc_sw *cur_desc, *desc;
792 792 dma_addr_t ld_phy;
... ... @@ -820,7 +820,7 @@
820 820 * If 'in_intr' is set, the function will move the link descriptor to
821 821 * the recycle list. Otherwise, free it directly.
822 822 */
823   -static void fsl_chan_ld_cleanup(struct fsl_dma_chan *fsl_chan)
  823 +static void fsl_chan_ld_cleanup(struct fsldma_chan *fsl_chan)
824 824 {
825 825 struct fsl_desc_sw *desc, *_desc;
826 826 unsigned long flags;
... ... @@ -864,7 +864,7 @@
864 864 * fsl_chan_xfer_ld_queue - Transfer link descriptors in channel ld_queue.
865 865 * @fsl_chan : Freescale DMA channel
866 866 */
867   -static void fsl_chan_xfer_ld_queue(struct fsl_dma_chan *fsl_chan)
  867 +static void fsl_chan_xfer_ld_queue(struct fsldma_chan *fsl_chan)
868 868 {
869 869 struct list_head *ld_node;
870 870 dma_addr_t next_dest_addr;
... ... @@ -912,7 +912,7 @@
912 912 */
913 913 static void fsl_dma_memcpy_issue_pending(struct dma_chan *chan)
914 914 {
915   - struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan);
  915 + struct fsldma_chan *fsl_chan = to_fsl_chan(chan);
916 916  
917 917 #ifdef FSL_DMA_LD_DEBUG
918 918 struct fsl_desc_sw *ld;
... ... @@ -949,7 +949,7 @@
949 949 dma_cookie_t *done,
950 950 dma_cookie_t *used)
951 951 {
952   - struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan);
  952 + struct fsldma_chan *fsl_chan = to_fsl_chan(chan);
953 953 dma_cookie_t last_used;
954 954 dma_cookie_t last_complete;
955 955  
... ... @@ -969,7 +969,7 @@
969 969  
970 970 static irqreturn_t fsl_dma_chan_do_interrupt(int irq, void *data)
971 971 {
972   - struct fsl_dma_chan *fsl_chan = (struct fsl_dma_chan *)data;
  972 + struct fsldma_chan *fsl_chan = data;
973 973 u32 stat;
974 974 int update_cookie = 0;
975 975 int xfer_ld_q = 0;
976 976  
... ... @@ -1050,9 +1050,9 @@
1050 1050  
1051 1051 static irqreturn_t fsl_dma_do_interrupt(int irq, void *data)
1052 1052 {
1053   - struct fsl_dma_device *fdev = (struct fsl_dma_device *)data;
1054   - u32 gsr;
  1053 + struct fsldma_device *fdev = data;
1055 1054 int ch_nr;
  1055 + u32 gsr;
1056 1056  
1057 1057 gsr = (fdev->feature & FSL_DMA_BIG_ENDIAN) ? in_be32(fdev->reg_base)
1058 1058 : in_le32(fdev->reg_base);
1059 1059  
1060 1060  
1061 1061  
... ... @@ -1064,19 +1064,23 @@
1064 1064  
1065 1065 static void dma_do_tasklet(unsigned long data)
1066 1066 {
1067   - struct fsl_dma_chan *fsl_chan = (struct fsl_dma_chan *)data;
  1067 + struct fsldma_chan *fsl_chan = (struct fsldma_chan *)data;
1068 1068 fsl_chan_ld_cleanup(fsl_chan);
1069 1069 }
1070 1070  
1071   -static int __devinit fsl_dma_chan_probe(struct fsl_dma_device *fdev,
  1071 +/*----------------------------------------------------------------------------*/
  1072 +/* OpenFirmware Subsystem */
  1073 +/*----------------------------------------------------------------------------*/
  1074 +
  1075 +static int __devinit fsl_dma_chan_probe(struct fsldma_device *fdev,
1072 1076 struct device_node *node, u32 feature, const char *compatible)
1073 1077 {
1074   - struct fsl_dma_chan *new_fsl_chan;
  1078 + struct fsldma_chan *new_fsl_chan;
1075 1079 struct resource res;
1076 1080 int err;
1077 1081  
1078 1082 /* alloc channel */
1079   - new_fsl_chan = kzalloc(sizeof(struct fsl_dma_chan), GFP_KERNEL);
  1083 + new_fsl_chan = kzalloc(sizeof(*new_fsl_chan), GFP_KERNEL);
1080 1084 if (!new_fsl_chan) {
1081 1085 dev_err(fdev->dev, "No free memory for allocating "
1082 1086 "dma channels!\n");
... ... @@ -1167,7 +1171,7 @@
1167 1171 return err;
1168 1172 }
1169 1173  
1170   -static void fsl_dma_chan_remove(struct fsl_dma_chan *fchan)
  1174 +static void fsl_dma_chan_remove(struct fsldma_chan *fchan)
1171 1175 {
1172 1176 if (fchan->irq != NO_IRQ)
1173 1177 free_irq(fchan->irq, fchan);
1174 1178  
1175 1179  
... ... @@ -1176,15 +1180,15 @@
1176 1180 kfree(fchan);
1177 1181 }
1178 1182  
1179   -static int __devinit of_fsl_dma_probe(struct of_device *dev,
  1183 +static int __devinit fsldma_of_probe(struct of_device *dev,
1180 1184 const struct of_device_id *match)
1181 1185 {
1182 1186 int err;
1183   - struct fsl_dma_device *fdev;
  1187 + struct fsldma_device *fdev;
1184 1188 struct device_node *child;
1185 1189 struct resource res;
1186 1190  
1187   - fdev = kzalloc(sizeof(struct fsl_dma_device), GFP_KERNEL);
  1191 + fdev = kzalloc(sizeof(*fdev), GFP_KERNEL);
1188 1192 if (!fdev) {
1189 1193 dev_err(&dev->dev, "No enough memory for 'priv'\n");
1190 1194 return -ENOMEM;
1191 1195  
... ... @@ -1256,9 +1260,9 @@
1256 1260 return err;
1257 1261 }
1258 1262  
1259   -static int of_fsl_dma_remove(struct of_device *of_dev)
  1263 +static int fsldma_of_remove(struct of_device *of_dev)
1260 1264 {
1261   - struct fsl_dma_device *fdev;
  1265 + struct fsldma_device *fdev;
1262 1266 unsigned int i;
1263 1267  
1264 1268 fdev = dev_get_drvdata(&of_dev->dev);
1265 1269  
1266 1270  
1267 1271  
1268 1272  
1269 1273  
1270 1274  
... ... @@ -1280,39 +1284,43 @@
1280 1284 return 0;
1281 1285 }
1282 1286  
1283   -static struct of_device_id of_fsl_dma_ids[] = {
  1287 +static struct of_device_id fsldma_of_ids[] = {
1284 1288 { .compatible = "fsl,eloplus-dma", },
1285 1289 { .compatible = "fsl,elo-dma", },
1286 1290 {}
1287 1291 };
1288 1292  
1289   -static struct of_platform_driver of_fsl_dma_driver = {
1290   - .name = "fsl-elo-dma",
1291   - .match_table = of_fsl_dma_ids,
1292   - .probe = of_fsl_dma_probe,
1293   - .remove = of_fsl_dma_remove,
  1293 +static struct of_platform_driver fsldma_of_driver = {
  1294 + .name = "fsl-elo-dma",
  1295 + .match_table = fsldma_of_ids,
  1296 + .probe = fsldma_of_probe,
  1297 + .remove = fsldma_of_remove,
1294 1298 };
1295 1299  
1296   -static __init int of_fsl_dma_init(void)
  1300 +/*----------------------------------------------------------------------------*/
  1301 +/* Module Init / Exit */
  1302 +/*----------------------------------------------------------------------------*/
  1303 +
  1304 +static __init int fsldma_init(void)
1297 1305 {
1298 1306 int ret;
1299 1307  
1300 1308 pr_info("Freescale Elo / Elo Plus DMA driver\n");
1301 1309  
1302   - ret = of_register_platform_driver(&of_fsl_dma_driver);
  1310 + ret = of_register_platform_driver(&fsldma_of_driver);
1303 1311 if (ret)
1304 1312 pr_err("fsldma: failed to register platform driver\n");
1305 1313  
1306 1314 return ret;
1307 1315 }
1308 1316  
1309   -static void __exit of_fsl_dma_exit(void)
  1317 +static void __exit fsldma_exit(void)
1310 1318 {
1311   - of_unregister_platform_driver(&of_fsl_dma_driver);
  1319 + of_unregister_platform_driver(&fsldma_of_driver);
1312 1320 }
1313 1321  
1314   -subsys_initcall(of_fsl_dma_init);
1315   -module_exit(of_fsl_dma_exit);
  1322 +subsys_initcall(fsldma_init);
  1323 +module_exit(fsldma_exit);
1316 1324  
1317 1325 MODULE_DESCRIPTION("Freescale Elo / Elo Plus DMA driver");
1318 1326 MODULE_LICENSE("GPL");
drivers/dma/fsldma.h
... ... @@ -94,7 +94,7 @@
94 94 struct dma_async_tx_descriptor async_tx;
95 95 } __attribute__((aligned(32)));
96 96  
97   -struct fsl_dma_chan_regs {
  97 +struct fsldma_chan_regs {
98 98 u32 mr; /* 0x00 - Mode Register */
99 99 u32 sr; /* 0x04 - Status Register */
100 100 u64 cdar; /* 0x08 - Current descriptor address register */
101 101  
102 102  
103 103  
... ... @@ -104,19 +104,19 @@
104 104 u64 ndar; /* 0x24 - Next Descriptor Address Register */
105 105 };
106 106  
107   -struct fsl_dma_chan;
  107 +struct fsldma_chan;
108 108 #define FSL_DMA_MAX_CHANS_PER_DEVICE 4
109 109  
110   -struct fsl_dma_device {
  110 +struct fsldma_device {
111 111 void __iomem *reg_base; /* DGSR register base */
112 112 struct device *dev;
113 113 struct dma_device common;
114   - struct fsl_dma_chan *chan[FSL_DMA_MAX_CHANS_PER_DEVICE];
  114 + struct fsldma_chan *chan[FSL_DMA_MAX_CHANS_PER_DEVICE];
115 115 u32 feature; /* The same as DMA channels */
116 116 int irq; /* Channel IRQ */
117 117 };
118 118  
119   -/* Define macros for fsl_dma_chan->feature property */
  119 +/* Define macros for fsldma_chan->feature property */
120 120 #define FSL_DMA_LITTLE_ENDIAN 0x00000000
121 121 #define FSL_DMA_BIG_ENDIAN 0x00000001
122 122  
... ... @@ -127,8 +127,8 @@
127 127 #define FSL_DMA_CHAN_PAUSE_EXT 0x00001000
128 128 #define FSL_DMA_CHAN_START_EXT 0x00002000
129 129  
130   -struct fsl_dma_chan {
131   - struct fsl_dma_chan_regs __iomem *reg_base;
  130 +struct fsldma_chan {
  131 + struct fsldma_chan_regs __iomem *reg_base;
132 132 dma_cookie_t completed_cookie; /* The maximum cookie completed */
133 133 spinlock_t desc_lock; /* Descriptor operation lock */
134 134 struct list_head ld_queue; /* Link descriptors queue */
135 135  
... ... @@ -140,14 +140,14 @@
140 140 struct tasklet_struct tasklet;
141 141 u32 feature;
142 142  
143   - void (*toggle_ext_pause)(struct fsl_dma_chan *fsl_chan, int enable);
144   - void (*toggle_ext_start)(struct fsl_dma_chan *fsl_chan, int enable);
145   - void (*set_src_loop_size)(struct fsl_dma_chan *fsl_chan, int size);
146   - void (*set_dest_loop_size)(struct fsl_dma_chan *fsl_chan, int size);
147   - void (*set_request_count)(struct fsl_dma_chan *fsl_chan, int size);
  143 + void (*toggle_ext_pause)(struct fsldma_chan *fsl_chan, int enable);
  144 + void (*toggle_ext_start)(struct fsldma_chan *fsl_chan, int enable);
  145 + void (*set_src_loop_size)(struct fsldma_chan *fsl_chan, int size);
  146 + void (*set_dest_loop_size)(struct fsldma_chan *fsl_chan, int size);
  147 + void (*set_request_count)(struct fsldma_chan *fsl_chan, int size);
148 148 };
149 149  
150   -#define to_fsl_chan(chan) container_of(chan, struct fsl_dma_chan, common)
  150 +#define to_fsl_chan(chan) container_of(chan, struct fsldma_chan, common)
151 151 #define to_fsl_desc(lh) container_of(lh, struct fsl_desc_sw, node)
152 152 #define tx_to_fsl_desc(tx) container_of(tx, struct fsl_desc_sw, async_tx)
153 153