Blame view

drivers/dma/fsldma.c 36.8 KB
173acc7ce   Zhang Wei   dmaengine: add dr...
1
2
3
  /*
   * Freescale MPC85xx, MPC83xx DMA Engine support
   *
e2c8e425b   Li Yang   fsldma: add suppo...
4
   * Copyright (C) 2007-2010 Freescale Semiconductor, Inc. All rights reserved.
173acc7ce   Zhang Wei   dmaengine: add dr...
5
6
7
8
9
10
11
12
   *
   * Author:
   *   Zhang Wei <wei.zhang@freescale.com>, Jul 2007
   *   Ebony Zhu <ebony.zhu@freescale.com>, May 2007
   *
   * Description:
   *   DMA engine driver for Freescale MPC8540 DMA controller, which is
   *   also fit for MPC8560, MPC8555, MPC8548, MPC8641, and etc.
c2e07b3a9   Stefan Weil   Fix spelling cont...
13
   *   The support for MPC8349 DMA controller is also added.
173acc7ce   Zhang Wei   dmaengine: add dr...
14
   *
a7aea373b   Ira W. Snyder   fsldma: use PCI R...
15
16
17
18
19
   * This driver instructs the DMA controller to issue the PCI Read Multiple
   * command for PCI read operations, instead of using the default PCI Read Line
   * command. Please be aware that this setting may result in read pre-fetching
   * on some platforms.
   *
173acc7ce   Zhang Wei   dmaengine: add dr...
20
21
22
23
24
25
26
27
28
29
   * This is free software; you can redistribute it and/or modify
   * it under the terms of the GNU General Public License as published by
   * the Free Software Foundation; either version 2 of the License, or
   * (at your option) any later version.
   *
   */
  
  #include <linux/init.h>
  #include <linux/module.h>
  #include <linux/pci.h>
5a0e3ad6a   Tejun Heo   include cleanup: ...
30
  #include <linux/slab.h>
173acc7ce   Zhang Wei   dmaengine: add dr...
31
32
33
34
35
36
37
38
  #include <linux/interrupt.h>
  #include <linux/dmaengine.h>
  #include <linux/delay.h>
  #include <linux/dma-mapping.h>
  #include <linux/dmapool.h>
  #include <linux/of_platform.h>
  
  #include "fsldma.h"
b158471ef   Ira Snyder   fsldma: use chann...
39
40
41
42
  #define chan_dbg(chan, fmt, arg...)					\
  	dev_dbg(chan->dev, "%s: " fmt, chan->name, ##arg)
  #define chan_err(chan, fmt, arg...)					\
  	dev_err(chan->dev, "%s: " fmt, chan->name, ##arg)
c14330417   Ira Snyder   fsldma: implement...
43

b158471ef   Ira Snyder   fsldma: use chann...
44
  static const char msg_ld_oom[] = "No free memory for link descriptor";
173acc7ce   Zhang Wei   dmaengine: add dr...
45

e8bd84df2   Ira Snyder   fsldma: move rela...
46
47
48
  /*
   * Register Helpers
   */
173acc7ce   Zhang Wei   dmaengine: add dr...
49

a1c033190   Ira Snyder   fsldma: rename fs...
50
  static void set_sr(struct fsldma_chan *chan, u32 val)
173acc7ce   Zhang Wei   dmaengine: add dr...
51
  {
a1c033190   Ira Snyder   fsldma: rename fs...
52
  	DMA_OUT(chan, &chan->regs->sr, val, 32);
173acc7ce   Zhang Wei   dmaengine: add dr...
53
  }
a1c033190   Ira Snyder   fsldma: rename fs...
54
  static u32 get_sr(struct fsldma_chan *chan)
173acc7ce   Zhang Wei   dmaengine: add dr...
55
  {
a1c033190   Ira Snyder   fsldma: rename fs...
56
  	return DMA_IN(chan, &chan->regs->sr, 32);
173acc7ce   Zhang Wei   dmaengine: add dr...
57
  }
e8bd84df2   Ira Snyder   fsldma: move rela...
58
59
60
61
62
63
64
65
66
  static void set_cdar(struct fsldma_chan *chan, dma_addr_t addr)
  {
  	DMA_OUT(chan, &chan->regs->cdar, addr | FSL_DMA_SNEN, 64);
  }
  
  static dma_addr_t get_cdar(struct fsldma_chan *chan)
  {
  	return DMA_IN(chan, &chan->regs->cdar, 64) & ~FSL_DMA_SNEN;
  }
e8bd84df2   Ira Snyder   fsldma: move rela...
67
68
69
70
71
72
73
74
  static u32 get_bcr(struct fsldma_chan *chan)
  {
  	return DMA_IN(chan, &chan->regs->bcr, 32);
  }
  
  /*
   * Descriptor Helpers
   */
a1c033190   Ira Snyder   fsldma: rename fs...
75
  static void set_desc_cnt(struct fsldma_chan *chan,
173acc7ce   Zhang Wei   dmaengine: add dr...
76
77
  				struct fsl_dma_ld_hw *hw, u32 count)
  {
a1c033190   Ira Snyder   fsldma: rename fs...
78
  	hw->count = CPU_TO_DMA(chan, count, 32);
173acc7ce   Zhang Wei   dmaengine: add dr...
79
  }
9c4d1e7bd   Ira Snyder   fsldma: support a...
80
81
82
83
  static u32 get_desc_cnt(struct fsldma_chan *chan, struct fsl_desc_sw *desc)
  {
  	return DMA_TO_CPU(chan, desc->hw.count, 32);
  }
a1c033190   Ira Snyder   fsldma: rename fs...
84
  static void set_desc_src(struct fsldma_chan *chan,
31f4306c8   Ira Snyder   fsldma: minor cod...
85
  			 struct fsl_dma_ld_hw *hw, dma_addr_t src)
173acc7ce   Zhang Wei   dmaengine: add dr...
86
87
  {
  	u64 snoop_bits;
a1c033190   Ira Snyder   fsldma: rename fs...
88
  	snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
173acc7ce   Zhang Wei   dmaengine: add dr...
89
  		? ((u64)FSL_DMA_SATR_SREADTYPE_SNOOP_READ << 32) : 0;
a1c033190   Ira Snyder   fsldma: rename fs...
90
  	hw->src_addr = CPU_TO_DMA(chan, snoop_bits | src, 64);
173acc7ce   Zhang Wei   dmaengine: add dr...
91
  }
9c4d1e7bd   Ira Snyder   fsldma: support a...
92
93
94
95
96
97
98
99
100
  static dma_addr_t get_desc_src(struct fsldma_chan *chan,
  			       struct fsl_desc_sw *desc)
  {
  	u64 snoop_bits;
  
  	snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
  		? ((u64)FSL_DMA_SATR_SREADTYPE_SNOOP_READ << 32) : 0;
  	return DMA_TO_CPU(chan, desc->hw.src_addr, 64) & ~snoop_bits;
  }
a1c033190   Ira Snyder   fsldma: rename fs...
101
  static void set_desc_dst(struct fsldma_chan *chan,
31f4306c8   Ira Snyder   fsldma: minor cod...
102
  			 struct fsl_dma_ld_hw *hw, dma_addr_t dst)
173acc7ce   Zhang Wei   dmaengine: add dr...
103
104
  {
  	u64 snoop_bits;
a1c033190   Ira Snyder   fsldma: rename fs...
105
  	snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
173acc7ce   Zhang Wei   dmaengine: add dr...
106
  		? ((u64)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE << 32) : 0;
a1c033190   Ira Snyder   fsldma: rename fs...
107
  	hw->dst_addr = CPU_TO_DMA(chan, snoop_bits | dst, 64);
173acc7ce   Zhang Wei   dmaengine: add dr...
108
  }
9c4d1e7bd   Ira Snyder   fsldma: support a...
109
110
111
112
113
114
115
116
117
  static dma_addr_t get_desc_dst(struct fsldma_chan *chan,
  			       struct fsl_desc_sw *desc)
  {
  	u64 snoop_bits;
  
  	snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
  		? ((u64)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE << 32) : 0;
  	return DMA_TO_CPU(chan, desc->hw.dst_addr, 64) & ~snoop_bits;
  }
a1c033190   Ira Snyder   fsldma: rename fs...
118
  static void set_desc_next(struct fsldma_chan *chan,
31f4306c8   Ira Snyder   fsldma: minor cod...
119
  			  struct fsl_dma_ld_hw *hw, dma_addr_t next)
173acc7ce   Zhang Wei   dmaengine: add dr...
120
121
  {
  	u64 snoop_bits;
a1c033190   Ira Snyder   fsldma: rename fs...
122
  	snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX)
173acc7ce   Zhang Wei   dmaengine: add dr...
123
  		? FSL_DMA_SNEN : 0;
a1c033190   Ira Snyder   fsldma: rename fs...
124
  	hw->next_ln_addr = CPU_TO_DMA(chan, snoop_bits | next, 64);
173acc7ce   Zhang Wei   dmaengine: add dr...
125
  }
31f4306c8   Ira Snyder   fsldma: minor cod...
126
  static void set_ld_eol(struct fsldma_chan *chan, struct fsl_desc_sw *desc)
173acc7ce   Zhang Wei   dmaengine: add dr...
127
  {
e8bd84df2   Ira Snyder   fsldma: move rela...
128
  	u64 snoop_bits;
173acc7ce   Zhang Wei   dmaengine: add dr...
129

e8bd84df2   Ira Snyder   fsldma: move rela...
130
131
  	snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX)
  		? FSL_DMA_SNEN : 0;
173acc7ce   Zhang Wei   dmaengine: add dr...
132

e8bd84df2   Ira Snyder   fsldma: move rela...
133
134
135
  	desc->hw.next_ln_addr = CPU_TO_DMA(chan,
  		DMA_TO_CPU(chan, desc->hw.next_ln_addr, 64) | FSL_DMA_EOL
  			| snoop_bits, 64);
173acc7ce   Zhang Wei   dmaengine: add dr...
136
  }
e8bd84df2   Ira Snyder   fsldma: move rela...
137
138
139
140
141
  /*
   * DMA Engine Hardware Control Helpers
   */
  
  static void dma_init(struct fsldma_chan *chan)
f79abb627   Zhang Wei   fsldma: Fix the D...
142
  {
e8bd84df2   Ira Snyder   fsldma: move rela...
143
144
145
146
147
148
149
  	/* Reset the channel */
  	DMA_OUT(chan, &chan->regs->mr, 0, 32);
  
  	switch (chan->feature & FSL_DMA_IP_MASK) {
  	case FSL_DMA_IP_85XX:
  		/* Set the channel to below modes:
  		 * EIE - Error interrupt enable
e8bd84df2   Ira Snyder   fsldma: move rela...
150
151
152
153
  		 * EOLNIE - End of links interrupt enable
  		 * BWC - Bandwidth sharing among channels
  		 */
  		DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_BWC
f04cd4070   Ira Snyder   fsldma: fix contr...
154
  				| FSL_DMA_MR_EIE | FSL_DMA_MR_EOLNIE, 32);
e8bd84df2   Ira Snyder   fsldma: move rela...
155
156
157
158
159
160
161
162
163
164
  		break;
  	case FSL_DMA_IP_83XX:
  		/* Set the channel to below modes:
  		 * EOTIE - End-of-transfer interrupt enable
  		 * PRC_RM - PCI read multiple
  		 */
  		DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_EOTIE
  				| FSL_DMA_MR_PRC_RM, 32);
  		break;
  	}
f79abb627   Zhang Wei   fsldma: Fix the D...
165
  }
a1c033190   Ira Snyder   fsldma: rename fs...
166
  static int dma_is_idle(struct fsldma_chan *chan)
173acc7ce   Zhang Wei   dmaengine: add dr...
167
  {
a1c033190   Ira Snyder   fsldma: rename fs...
168
  	u32 sr = get_sr(chan);
173acc7ce   Zhang Wei   dmaengine: add dr...
169
170
  	return (!(sr & FSL_DMA_SR_CB)) || (sr & FSL_DMA_SR_CH);
  }
f04cd4070   Ira Snyder   fsldma: fix contr...
171
172
173
174
175
176
177
  /*
   * Start the DMA controller
   *
   * Preconditions:
   * - the CDAR register must point to the start descriptor
   * - the MRn[CS] bit must be cleared
   */
a1c033190   Ira Snyder   fsldma: rename fs...
178
  static void dma_start(struct fsldma_chan *chan)
173acc7ce   Zhang Wei   dmaengine: add dr...
179
  {
272ca6550   Ira Snyder   fsldma: reduce ke...
180
  	u32 mode;
a1c033190   Ira Snyder   fsldma: rename fs...
181
  	mode = DMA_IN(chan, &chan->regs->mr, 32);
272ca6550   Ira Snyder   fsldma: reduce ke...
182

f04cd4070   Ira Snyder   fsldma: fix contr...
183
184
185
186
187
  	if (chan->feature & FSL_DMA_CHAN_PAUSE_EXT) {
  		DMA_OUT(chan, &chan->regs->bcr, 0, 32);
  		mode |= FSL_DMA_MR_EMP_EN;
  	} else {
  		mode &= ~FSL_DMA_MR_EMP_EN;
43a1a3ed6   Ira Snyder   fsldma: do not cl...
188
  	}
173acc7ce   Zhang Wei   dmaengine: add dr...
189

f04cd4070   Ira Snyder   fsldma: fix contr...
190
  	if (chan->feature & FSL_DMA_CHAN_START_EXT) {
272ca6550   Ira Snyder   fsldma: reduce ke...
191
  		mode |= FSL_DMA_MR_EMS_EN;
f04cd4070   Ira Snyder   fsldma: fix contr...
192
193
  	} else {
  		mode &= ~FSL_DMA_MR_EMS_EN;
272ca6550   Ira Snyder   fsldma: reduce ke...
194
  		mode |= FSL_DMA_MR_CS;
f04cd4070   Ira Snyder   fsldma: fix contr...
195
  	}
173acc7ce   Zhang Wei   dmaengine: add dr...
196

a1c033190   Ira Snyder   fsldma: rename fs...
197
  	DMA_OUT(chan, &chan->regs->mr, mode, 32);
173acc7ce   Zhang Wei   dmaengine: add dr...
198
  }
a1c033190   Ira Snyder   fsldma: rename fs...
199
  static void dma_halt(struct fsldma_chan *chan)
173acc7ce   Zhang Wei   dmaengine: add dr...
200
  {
272ca6550   Ira Snyder   fsldma: reduce ke...
201
  	u32 mode;
900325a6c   Dan Williams   fsldma: fix off b...
202
  	int i;
a00ae34ac   Ira Snyder   fsldma: make halt...
203
  	/* read the mode register */
a1c033190   Ira Snyder   fsldma: rename fs...
204
  	mode = DMA_IN(chan, &chan->regs->mr, 32);
272ca6550   Ira Snyder   fsldma: reduce ke...
205

a00ae34ac   Ira Snyder   fsldma: make halt...
206
207
208
209
210
211
212
213
214
215
216
217
218
219
  	/*
  	 * The 85xx controller supports channel abort, which will stop
  	 * the current transfer. On 83xx, this bit is the transfer error
  	 * mask bit, which should not be changed.
  	 */
  	if ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) {
  		mode |= FSL_DMA_MR_CA;
  		DMA_OUT(chan, &chan->regs->mr, mode, 32);
  
  		mode &= ~FSL_DMA_MR_CA;
  	}
  
  	/* stop the DMA controller */
  	mode &= ~(FSL_DMA_MR_CS | FSL_DMA_MR_EMS_EN);
a1c033190   Ira Snyder   fsldma: rename fs...
220
  	DMA_OUT(chan, &chan->regs->mr, mode, 32);
173acc7ce   Zhang Wei   dmaengine: add dr...
221

a00ae34ac   Ira Snyder   fsldma: make halt...
222
  	/* wait for the DMA controller to become idle */
900325a6c   Dan Williams   fsldma: fix off b...
223
  	for (i = 0; i < 100; i++) {
a1c033190   Ira Snyder   fsldma: rename fs...
224
  		if (dma_is_idle(chan))
9c3a50b7d   Ira Snyder   fsldma: major cle...
225
  			return;
173acc7ce   Zhang Wei   dmaengine: add dr...
226
  		udelay(10);
900325a6c   Dan Williams   fsldma: fix off b...
227
  	}
272ca6550   Ira Snyder   fsldma: reduce ke...
228

9c3a50b7d   Ira Snyder   fsldma: major cle...
229
  	if (!dma_is_idle(chan))
b158471ef   Ira Snyder   fsldma: use chann...
230
231
  		chan_err(chan, "DMA halt timeout!
  ");
173acc7ce   Zhang Wei   dmaengine: add dr...
232
  }
173acc7ce   Zhang Wei   dmaengine: add dr...
233
234
  /**
   * fsl_chan_set_src_loop_size - Set source address hold transfer size
a1c033190   Ira Snyder   fsldma: rename fs...
235
   * @chan : Freescale DMA channel
173acc7ce   Zhang Wei   dmaengine: add dr...
236
237
238
239
240
241
242
243
   * @size     : Address loop size, 0 for disable loop
   *
   * The set source address hold transfer size. The source
   * address hold or loop transfer size is when the DMA transfer
   * data from source address (SA), if the loop size is 4, the DMA will
   * read data from SA, SA + 1, SA + 2, SA + 3, then loop back to SA,
   * SA + 1 ... and so on.
   */
a1c033190   Ira Snyder   fsldma: rename fs...
244
  static void fsl_chan_set_src_loop_size(struct fsldma_chan *chan, int size)
173acc7ce   Zhang Wei   dmaengine: add dr...
245
  {
272ca6550   Ira Snyder   fsldma: reduce ke...
246
  	u32 mode;
a1c033190   Ira Snyder   fsldma: rename fs...
247
  	mode = DMA_IN(chan, &chan->regs->mr, 32);
272ca6550   Ira Snyder   fsldma: reduce ke...
248

173acc7ce   Zhang Wei   dmaengine: add dr...
249
250
  	switch (size) {
  	case 0:
272ca6550   Ira Snyder   fsldma: reduce ke...
251
  		mode &= ~FSL_DMA_MR_SAHE;
173acc7ce   Zhang Wei   dmaengine: add dr...
252
253
254
255
256
  		break;
  	case 1:
  	case 2:
  	case 4:
  	case 8:
272ca6550   Ira Snyder   fsldma: reduce ke...
257
  		mode |= FSL_DMA_MR_SAHE | (__ilog2(size) << 14);
173acc7ce   Zhang Wei   dmaengine: add dr...
258
259
  		break;
  	}
272ca6550   Ira Snyder   fsldma: reduce ke...
260

a1c033190   Ira Snyder   fsldma: rename fs...
261
  	DMA_OUT(chan, &chan->regs->mr, mode, 32);
173acc7ce   Zhang Wei   dmaengine: add dr...
262
263
264
  }
  
  /**
738f5f7e1   Ira Snyder   fsldma: rename de...
265
   * fsl_chan_set_dst_loop_size - Set destination address hold transfer size
a1c033190   Ira Snyder   fsldma: rename fs...
266
   * @chan : Freescale DMA channel
173acc7ce   Zhang Wei   dmaengine: add dr...
267
268
269
270
271
272
273
274
   * @size     : Address loop size, 0 for disable loop
   *
   * The set destination address hold transfer size. The destination
   * address hold or loop transfer size is when the DMA transfer
   * data to destination address (TA), if the loop size is 4, the DMA will
   * write data to TA, TA + 1, TA + 2, TA + 3, then loop back to TA,
   * TA + 1 ... and so on.
   */
a1c033190   Ira Snyder   fsldma: rename fs...
275
  static void fsl_chan_set_dst_loop_size(struct fsldma_chan *chan, int size)
173acc7ce   Zhang Wei   dmaengine: add dr...
276
  {
272ca6550   Ira Snyder   fsldma: reduce ke...
277
  	u32 mode;
a1c033190   Ira Snyder   fsldma: rename fs...
278
  	mode = DMA_IN(chan, &chan->regs->mr, 32);
272ca6550   Ira Snyder   fsldma: reduce ke...
279

173acc7ce   Zhang Wei   dmaengine: add dr...
280
281
  	switch (size) {
  	case 0:
272ca6550   Ira Snyder   fsldma: reduce ke...
282
  		mode &= ~FSL_DMA_MR_DAHE;
173acc7ce   Zhang Wei   dmaengine: add dr...
283
284
285
286
287
  		break;
  	case 1:
  	case 2:
  	case 4:
  	case 8:
272ca6550   Ira Snyder   fsldma: reduce ke...
288
  		mode |= FSL_DMA_MR_DAHE | (__ilog2(size) << 16);
173acc7ce   Zhang Wei   dmaengine: add dr...
289
290
  		break;
  	}
272ca6550   Ira Snyder   fsldma: reduce ke...
291

a1c033190   Ira Snyder   fsldma: rename fs...
292
  	DMA_OUT(chan, &chan->regs->mr, mode, 32);
173acc7ce   Zhang Wei   dmaengine: add dr...
293
294
295
  }
  
  /**
e6c7ecb64   Ira Snyder   fsldma: split apa...
296
   * fsl_chan_set_request_count - Set DMA Request Count for external control
a1c033190   Ira Snyder   fsldma: rename fs...
297
   * @chan : Freescale DMA channel
e6c7ecb64   Ira Snyder   fsldma: split apa...
298
299
300
301
302
303
   * @size     : Number of bytes to transfer in a single request
   *
   * The Freescale DMA channel can be controlled by the external signal DREQ#.
   * The DMA request count is how many bytes are allowed to transfer before
   * pausing the channel, after which a new assertion of DREQ# resumes channel
   * operation.
173acc7ce   Zhang Wei   dmaengine: add dr...
304
   *
e6c7ecb64   Ira Snyder   fsldma: split apa...
305
   * A size of 0 disables external pause control. The maximum size is 1024.
173acc7ce   Zhang Wei   dmaengine: add dr...
306
   */
a1c033190   Ira Snyder   fsldma: rename fs...
307
  static void fsl_chan_set_request_count(struct fsldma_chan *chan, int size)
173acc7ce   Zhang Wei   dmaengine: add dr...
308
  {
272ca6550   Ira Snyder   fsldma: reduce ke...
309
  	u32 mode;
e6c7ecb64   Ira Snyder   fsldma: split apa...
310
  	BUG_ON(size > 1024);
272ca6550   Ira Snyder   fsldma: reduce ke...
311

a1c033190   Ira Snyder   fsldma: rename fs...
312
  	mode = DMA_IN(chan, &chan->regs->mr, 32);
272ca6550   Ira Snyder   fsldma: reduce ke...
313
  	mode |= (__ilog2(size) << 24) & 0x0f000000;
a1c033190   Ira Snyder   fsldma: rename fs...
314
  	DMA_OUT(chan, &chan->regs->mr, mode, 32);
e6c7ecb64   Ira Snyder   fsldma: split apa...
315
  }
173acc7ce   Zhang Wei   dmaengine: add dr...
316

e6c7ecb64   Ira Snyder   fsldma: split apa...
317
318
  /**
   * fsl_chan_toggle_ext_pause - Toggle channel external pause status
a1c033190   Ira Snyder   fsldma: rename fs...
319
   * @chan : Freescale DMA channel
e6c7ecb64   Ira Snyder   fsldma: split apa...
320
321
322
323
324
325
   * @enable   : 0 is disabled, 1 is enabled.
   *
   * The Freescale DMA channel can be controlled by the external signal DREQ#.
   * The DMA Request Count feature should be used in addition to this feature
   * to set the number of bytes to transfer before pausing the channel.
   */
a1c033190   Ira Snyder   fsldma: rename fs...
326
  static void fsl_chan_toggle_ext_pause(struct fsldma_chan *chan, int enable)
e6c7ecb64   Ira Snyder   fsldma: split apa...
327
328
  {
  	if (enable)
a1c033190   Ira Snyder   fsldma: rename fs...
329
  		chan->feature |= FSL_DMA_CHAN_PAUSE_EXT;
e6c7ecb64   Ira Snyder   fsldma: split apa...
330
  	else
a1c033190   Ira Snyder   fsldma: rename fs...
331
  		chan->feature &= ~FSL_DMA_CHAN_PAUSE_EXT;
173acc7ce   Zhang Wei   dmaengine: add dr...
332
333
334
335
  }
  
  /**
   * fsl_chan_toggle_ext_start - Toggle channel external start status
a1c033190   Ira Snyder   fsldma: rename fs...
336
   * @chan : Freescale DMA channel
173acc7ce   Zhang Wei   dmaengine: add dr...
337
338
339
340
341
342
343
   * @enable   : 0 is disabled, 1 is enabled.
   *
   * If enable the external start, the channel can be started by an
   * external DMA start pin. So the dma_start() does not start the
   * transfer immediately. The DMA channel will wait for the
   * control pin asserted.
   */
a1c033190   Ira Snyder   fsldma: rename fs...
344
  static void fsl_chan_toggle_ext_start(struct fsldma_chan *chan, int enable)
173acc7ce   Zhang Wei   dmaengine: add dr...
345
346
  {
  	if (enable)
a1c033190   Ira Snyder   fsldma: rename fs...
347
  		chan->feature |= FSL_DMA_CHAN_START_EXT;
173acc7ce   Zhang Wei   dmaengine: add dr...
348
  	else
a1c033190   Ira Snyder   fsldma: rename fs...
349
  		chan->feature &= ~FSL_DMA_CHAN_START_EXT;
173acc7ce   Zhang Wei   dmaengine: add dr...
350
  }
31f4306c8   Ira Snyder   fsldma: minor cod...
351
  static void append_ld_queue(struct fsldma_chan *chan, struct fsl_desc_sw *desc)
9c3a50b7d   Ira Snyder   fsldma: major cle...
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
  {
  	struct fsl_desc_sw *tail = to_fsl_desc(chan->ld_pending.prev);
  
  	if (list_empty(&chan->ld_pending))
  		goto out_splice;
  
  	/*
  	 * Add the hardware descriptor to the chain of hardware descriptors
  	 * that already exists in memory.
  	 *
  	 * This will un-set the EOL bit of the existing transaction, and the
  	 * last link in this transaction will become the EOL descriptor.
  	 */
  	set_desc_next(chan, &tail->hw, desc->async_tx.phys);
  
  	/*
  	 * Add the software descriptor and all children to the list
  	 * of pending transactions
  	 */
  out_splice:
  	list_splice_tail_init(&desc->tx_list, &chan->ld_pending);
  }
173acc7ce   Zhang Wei   dmaengine: add dr...
374
375
  static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx)
  {
a1c033190   Ira Snyder   fsldma: rename fs...
376
  	struct fsldma_chan *chan = to_fsl_chan(tx->chan);
eda342345   Dan Williams   fsldma: implement...
377
378
  	struct fsl_desc_sw *desc = tx_to_fsl_desc(tx);
  	struct fsl_desc_sw *child;
173acc7ce   Zhang Wei   dmaengine: add dr...
379
380
  	unsigned long flags;
  	dma_cookie_t cookie;
a1c033190   Ira Snyder   fsldma: rename fs...
381
  	spin_lock_irqsave(&chan->desc_lock, flags);
173acc7ce   Zhang Wei   dmaengine: add dr...
382

9c3a50b7d   Ira Snyder   fsldma: major cle...
383
384
385
386
  	/*
  	 * assign cookies to all of the software descriptors
  	 * that make up this transaction
  	 */
a1c033190   Ira Snyder   fsldma: rename fs...
387
  	cookie = chan->common.cookie;
eda342345   Dan Williams   fsldma: implement...
388
  	list_for_each_entry(child, &desc->tx_list, node) {
bcfb7465c   Ira Snyder   fsldma: fix infin...
389
  		cookie++;
31f4306c8   Ira Snyder   fsldma: minor cod...
390
391
  		if (cookie < DMA_MIN_COOKIE)
  			cookie = DMA_MIN_COOKIE;
bcfb7465c   Ira Snyder   fsldma: fix infin...
392

6ca3a7a96   Steven J. Magnani   fsldma: Fix cooki...
393
  		child->async_tx.cookie = cookie;
bcfb7465c   Ira Snyder   fsldma: fix infin...
394
  	}
a1c033190   Ira Snyder   fsldma: rename fs...
395
  	chan->common.cookie = cookie;
9c3a50b7d   Ira Snyder   fsldma: major cle...
396
397
  
  	/* put this transaction onto the tail of the pending queue */
a1c033190   Ira Snyder   fsldma: rename fs...
398
  	append_ld_queue(chan, desc);
173acc7ce   Zhang Wei   dmaengine: add dr...
399

a1c033190   Ira Snyder   fsldma: rename fs...
400
  	spin_unlock_irqrestore(&chan->desc_lock, flags);
173acc7ce   Zhang Wei   dmaengine: add dr...
401
402
403
404
405
406
  
  	return cookie;
  }
  
  /**
   * fsl_dma_alloc_descriptor - Allocate descriptor from channel's DMA pool.
a1c033190   Ira Snyder   fsldma: rename fs...
407
   * @chan : Freescale DMA channel
173acc7ce   Zhang Wei   dmaengine: add dr...
408
409
410
   *
   * Return - The descriptor allocated. NULL for failed.
   */
31f4306c8   Ira Snyder   fsldma: minor cod...
411
  static struct fsl_desc_sw *fsl_dma_alloc_descriptor(struct fsldma_chan *chan)
173acc7ce   Zhang Wei   dmaengine: add dr...
412
  {
9c3a50b7d   Ira Snyder   fsldma: major cle...
413
  	struct fsl_desc_sw *desc;
173acc7ce   Zhang Wei   dmaengine: add dr...
414
  	dma_addr_t pdesc;
9c3a50b7d   Ira Snyder   fsldma: major cle...
415
416
417
  
  	desc = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &pdesc);
  	if (!desc) {
b158471ef   Ira Snyder   fsldma: use chann...
418
419
  		chan_dbg(chan, "out of memory for link descriptor
  ");
9c3a50b7d   Ira Snyder   fsldma: major cle...
420
  		return NULL;
173acc7ce   Zhang Wei   dmaengine: add dr...
421
  	}
9c3a50b7d   Ira Snyder   fsldma: major cle...
422
423
424
425
426
  	memset(desc, 0, sizeof(*desc));
  	INIT_LIST_HEAD(&desc->tx_list);
  	dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
  	desc->async_tx.tx_submit = fsl_dma_tx_submit;
  	desc->async_tx.phys = pdesc;
0ab09c368   Ira Snyder   fsldma: improve l...
427
428
429
430
  #ifdef FSL_DMA_LD_DEBUG
  	chan_dbg(chan, "LD %p allocated
  ", desc);
  #endif
9c3a50b7d   Ira Snyder   fsldma: major cle...
431
  	return desc;
173acc7ce   Zhang Wei   dmaengine: add dr...
432
  }
173acc7ce   Zhang Wei   dmaengine: add dr...
433
434
  /**
   * fsl_dma_alloc_chan_resources - Allocate resources for DMA channel.
a1c033190   Ira Snyder   fsldma: rename fs...
435
   * @chan : Freescale DMA channel
173acc7ce   Zhang Wei   dmaengine: add dr...
436
437
438
439
440
   *
   * This function will create a dma pool for descriptor allocation.
   *
   * Return - The number of descriptors allocated.
   */
a1c033190   Ira Snyder   fsldma: rename fs...
441
  static int fsl_dma_alloc_chan_resources(struct dma_chan *dchan)
173acc7ce   Zhang Wei   dmaengine: add dr...
442
  {
a1c033190   Ira Snyder   fsldma: rename fs...
443
  	struct fsldma_chan *chan = to_fsl_chan(dchan);
77cd62e80   Timur Tabi   fsldma: allow Fre...
444
445
  
  	/* Has this channel already been allocated? */
a1c033190   Ira Snyder   fsldma: rename fs...
446
  	if (chan->desc_pool)
77cd62e80   Timur Tabi   fsldma: allow Fre...
447
  		return 1;
173acc7ce   Zhang Wei   dmaengine: add dr...
448

9c3a50b7d   Ira Snyder   fsldma: major cle...
449
450
  	/*
  	 * We need the descriptor to be aligned to 32bytes
173acc7ce   Zhang Wei   dmaengine: add dr...
451
452
  	 * for meeting FSL DMA specification requirement.
  	 */
b158471ef   Ira Snyder   fsldma: use chann...
453
  	chan->desc_pool = dma_pool_create(chan->name, chan->dev,
9c3a50b7d   Ira Snyder   fsldma: major cle...
454
455
  					  sizeof(struct fsl_desc_sw),
  					  __alignof__(struct fsl_desc_sw), 0);
a1c033190   Ira Snyder   fsldma: rename fs...
456
  	if (!chan->desc_pool) {
b158471ef   Ira Snyder   fsldma: use chann...
457
458
  		chan_err(chan, "unable to allocate descriptor pool
  ");
9c3a50b7d   Ira Snyder   fsldma: major cle...
459
  		return -ENOMEM;
173acc7ce   Zhang Wei   dmaengine: add dr...
460
  	}
9c3a50b7d   Ira Snyder   fsldma: major cle...
461
  	/* there is at least one descriptor free to be allocated */
173acc7ce   Zhang Wei   dmaengine: add dr...
462
463
464
465
  	return 1;
  }
  
  /**
9c3a50b7d   Ira Snyder   fsldma: major cle...
466
467
468
469
470
471
472
473
474
475
476
477
478
   * fsldma_free_desc_list - Free all descriptors in a queue
   * @chan: Freescae DMA channel
   * @list: the list to free
   *
   * LOCKING: must hold chan->desc_lock
   */
  static void fsldma_free_desc_list(struct fsldma_chan *chan,
  				  struct list_head *list)
  {
  	struct fsl_desc_sw *desc, *_desc;
  
  	list_for_each_entry_safe(desc, _desc, list, node) {
  		list_del(&desc->node);
0ab09c368   Ira Snyder   fsldma: improve l...
479
480
481
482
  #ifdef FSL_DMA_LD_DEBUG
  		chan_dbg(chan, "LD %p free
  ", desc);
  #endif
9c3a50b7d   Ira Snyder   fsldma: major cle...
483
484
485
486
487
488
489
490
491
492
493
  		dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
  	}
  }
  
  static void fsldma_free_desc_list_reverse(struct fsldma_chan *chan,
  					  struct list_head *list)
  {
  	struct fsl_desc_sw *desc, *_desc;
  
  	list_for_each_entry_safe_reverse(desc, _desc, list, node) {
  		list_del(&desc->node);
0ab09c368   Ira Snyder   fsldma: improve l...
494
495
496
497
  #ifdef FSL_DMA_LD_DEBUG
  		chan_dbg(chan, "LD %p free
  ", desc);
  #endif
9c3a50b7d   Ira Snyder   fsldma: major cle...
498
499
500
501
502
  		dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
  	}
  }
  
  /**
173acc7ce   Zhang Wei   dmaengine: add dr...
503
   * fsl_dma_free_chan_resources - Free all resources of the channel.
a1c033190   Ira Snyder   fsldma: rename fs...
504
   * @chan : Freescale DMA channel
173acc7ce   Zhang Wei   dmaengine: add dr...
505
   */
a1c033190   Ira Snyder   fsldma: rename fs...
506
  static void fsl_dma_free_chan_resources(struct dma_chan *dchan)
173acc7ce   Zhang Wei   dmaengine: add dr...
507
  {
a1c033190   Ira Snyder   fsldma: rename fs...
508
  	struct fsldma_chan *chan = to_fsl_chan(dchan);
173acc7ce   Zhang Wei   dmaengine: add dr...
509
  	unsigned long flags;
b158471ef   Ira Snyder   fsldma: use chann...
510
511
  	chan_dbg(chan, "free all channel resources
  ");
a1c033190   Ira Snyder   fsldma: rename fs...
512
  	spin_lock_irqsave(&chan->desc_lock, flags);
9c3a50b7d   Ira Snyder   fsldma: major cle...
513
514
  	fsldma_free_desc_list(chan, &chan->ld_pending);
  	fsldma_free_desc_list(chan, &chan->ld_running);
a1c033190   Ira Snyder   fsldma: rename fs...
515
  	spin_unlock_irqrestore(&chan->desc_lock, flags);
77cd62e80   Timur Tabi   fsldma: allow Fre...
516

9c3a50b7d   Ira Snyder   fsldma: major cle...
517
  	dma_pool_destroy(chan->desc_pool);
a1c033190   Ira Snyder   fsldma: rename fs...
518
  	chan->desc_pool = NULL;
173acc7ce   Zhang Wei   dmaengine: add dr...
519
  }
2187c269a   Zhang Wei   fsldma: Add devic...
520
  static struct dma_async_tx_descriptor *
a1c033190   Ira Snyder   fsldma: rename fs...
521
  fsl_dma_prep_interrupt(struct dma_chan *dchan, unsigned long flags)
2187c269a   Zhang Wei   fsldma: Add devic...
522
  {
a1c033190   Ira Snyder   fsldma: rename fs...
523
  	struct fsldma_chan *chan;
2187c269a   Zhang Wei   fsldma: Add devic...
524
  	struct fsl_desc_sw *new;
a1c033190   Ira Snyder   fsldma: rename fs...
525
  	if (!dchan)
2187c269a   Zhang Wei   fsldma: Add devic...
526
  		return NULL;
a1c033190   Ira Snyder   fsldma: rename fs...
527
  	chan = to_fsl_chan(dchan);
2187c269a   Zhang Wei   fsldma: Add devic...
528

a1c033190   Ira Snyder   fsldma: rename fs...
529
  	new = fsl_dma_alloc_descriptor(chan);
2187c269a   Zhang Wei   fsldma: Add devic...
530
  	if (!new) {
b158471ef   Ira Snyder   fsldma: use chann...
531
532
  		chan_err(chan, "%s
  ", msg_ld_oom);
2187c269a   Zhang Wei   fsldma: Add devic...
533
534
535
536
  		return NULL;
  	}
  
  	new->async_tx.cookie = -EBUSY;
636bdeaa1   Dan Williams   dmaengine: ack to...
537
  	new->async_tx.flags = flags;
2187c269a   Zhang Wei   fsldma: Add devic...
538

f79abb627   Zhang Wei   fsldma: Fix the D...
539
  	/* Insert the link descriptor to the LD ring */
eda342345   Dan Williams   fsldma: implement...
540
  	list_add_tail(&new->node, &new->tx_list);
f79abb627   Zhang Wei   fsldma: Fix the D...
541

31f4306c8   Ira Snyder   fsldma: minor cod...
542
  	/* Set End-of-link to the last link descriptor of new list */
a1c033190   Ira Snyder   fsldma: rename fs...
543
  	set_ld_eol(chan, new);
2187c269a   Zhang Wei   fsldma: Add devic...
544
545
546
  
  	return &new->async_tx;
  }
31f4306c8   Ira Snyder   fsldma: minor cod...
547
548
549
  static struct dma_async_tx_descriptor *
  fsl_dma_prep_memcpy(struct dma_chan *dchan,
  	dma_addr_t dma_dst, dma_addr_t dma_src,
173acc7ce   Zhang Wei   dmaengine: add dr...
550
551
  	size_t len, unsigned long flags)
  {
a1c033190   Ira Snyder   fsldma: rename fs...
552
  	struct fsldma_chan *chan;
173acc7ce   Zhang Wei   dmaengine: add dr...
553
554
  	struct fsl_desc_sw *first = NULL, *prev = NULL, *new;
  	size_t copy;
173acc7ce   Zhang Wei   dmaengine: add dr...
555

a1c033190   Ira Snyder   fsldma: rename fs...
556
  	if (!dchan)
173acc7ce   Zhang Wei   dmaengine: add dr...
557
558
559
560
  		return NULL;
  
  	if (!len)
  		return NULL;
a1c033190   Ira Snyder   fsldma: rename fs...
561
  	chan = to_fsl_chan(dchan);
173acc7ce   Zhang Wei   dmaengine: add dr...
562
563
564
565
  
  	do {
  
  		/* Allocate the link descriptor from DMA pool */
a1c033190   Ira Snyder   fsldma: rename fs...
566
  		new = fsl_dma_alloc_descriptor(chan);
173acc7ce   Zhang Wei   dmaengine: add dr...
567
  		if (!new) {
b158471ef   Ira Snyder   fsldma: use chann...
568
569
  			chan_err(chan, "%s
  ", msg_ld_oom);
2e077f8e8   Ira Snyder   fsldma: fix memor...
570
  			goto fail;
173acc7ce   Zhang Wei   dmaengine: add dr...
571
  		}
173acc7ce   Zhang Wei   dmaengine: add dr...
572

56822843f   Zhang Wei   fsldma: Fix fsldm...
573
  		copy = min(len, (size_t)FSL_DMA_BCR_MAX_CNT);
173acc7ce   Zhang Wei   dmaengine: add dr...
574

a1c033190   Ira Snyder   fsldma: rename fs...
575
576
577
  		set_desc_cnt(chan, &new->hw, copy);
  		set_desc_src(chan, &new->hw, dma_src);
  		set_desc_dst(chan, &new->hw, dma_dst);
173acc7ce   Zhang Wei   dmaengine: add dr...
578
579
580
581
  
  		if (!first)
  			first = new;
  		else
a1c033190   Ira Snyder   fsldma: rename fs...
582
  			set_desc_next(chan, &prev->hw, new->async_tx.phys);
173acc7ce   Zhang Wei   dmaengine: add dr...
583
584
  
  		new->async_tx.cookie = 0;
636bdeaa1   Dan Williams   dmaengine: ack to...
585
  		async_tx_ack(&new->async_tx);
173acc7ce   Zhang Wei   dmaengine: add dr...
586
587
588
589
  
  		prev = new;
  		len -= copy;
  		dma_src += copy;
738f5f7e1   Ira Snyder   fsldma: rename de...
590
  		dma_dst += copy;
173acc7ce   Zhang Wei   dmaengine: add dr...
591
592
  
  		/* Insert the link descriptor to the LD ring */
eda342345   Dan Williams   fsldma: implement...
593
  		list_add_tail(&new->node, &first->tx_list);
173acc7ce   Zhang Wei   dmaengine: add dr...
594
  	} while (len);
636bdeaa1   Dan Williams   dmaengine: ack to...
595
  	new->async_tx.flags = flags; /* client is in control of this ack */
173acc7ce   Zhang Wei   dmaengine: add dr...
596
  	new->async_tx.cookie = -EBUSY;
31f4306c8   Ira Snyder   fsldma: minor cod...
597
  	/* Set End-of-link to the last link descriptor of new list */
a1c033190   Ira Snyder   fsldma: rename fs...
598
  	set_ld_eol(chan, new);
173acc7ce   Zhang Wei   dmaengine: add dr...
599

2e077f8e8   Ira Snyder   fsldma: fix memor...
600
601
602
603
604
  	return &first->async_tx;
  
  fail:
  	if (!first)
  		return NULL;
9c3a50b7d   Ira Snyder   fsldma: major cle...
605
  	fsldma_free_desc_list_reverse(chan, &first->tx_list);
2e077f8e8   Ira Snyder   fsldma: fix memor...
606
  	return NULL;
173acc7ce   Zhang Wei   dmaengine: add dr...
607
  }
c14330417   Ira Snyder   fsldma: implement...
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
  static struct dma_async_tx_descriptor *fsl_dma_prep_sg(struct dma_chan *dchan,
  	struct scatterlist *dst_sg, unsigned int dst_nents,
  	struct scatterlist *src_sg, unsigned int src_nents,
  	unsigned long flags)
  {
  	struct fsl_desc_sw *first = NULL, *prev = NULL, *new = NULL;
  	struct fsldma_chan *chan = to_fsl_chan(dchan);
  	size_t dst_avail, src_avail;
  	dma_addr_t dst, src;
  	size_t len;
  
  	/* basic sanity checks */
  	if (dst_nents == 0 || src_nents == 0)
  		return NULL;
  
  	if (dst_sg == NULL || src_sg == NULL)
  		return NULL;
  
  	/*
  	 * TODO: should we check that both scatterlists have the same
  	 * TODO: number of bytes in total? Is that really an error?
  	 */
  
  	/* get prepared for the loop */
  	dst_avail = sg_dma_len(dst_sg);
  	src_avail = sg_dma_len(src_sg);
  
  	/* run until we are out of scatterlist entries */
  	while (true) {
  
  		/* create the largest transaction possible */
  		len = min_t(size_t, src_avail, dst_avail);
  		len = min_t(size_t, len, FSL_DMA_BCR_MAX_CNT);
  		if (len == 0)
  			goto fetch;
  
  		dst = sg_dma_address(dst_sg) + sg_dma_len(dst_sg) - dst_avail;
  		src = sg_dma_address(src_sg) + sg_dma_len(src_sg) - src_avail;
  
  		/* allocate and populate the descriptor */
  		new = fsl_dma_alloc_descriptor(chan);
  		if (!new) {
b158471ef   Ira Snyder   fsldma: use chann...
650
651
  			chan_err(chan, "%s
  ", msg_ld_oom);
c14330417   Ira Snyder   fsldma: implement...
652
653
  			goto fail;
  		}
c14330417   Ira Snyder   fsldma: implement...
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
  
  		set_desc_cnt(chan, &new->hw, len);
  		set_desc_src(chan, &new->hw, src);
  		set_desc_dst(chan, &new->hw, dst);
  
  		if (!first)
  			first = new;
  		else
  			set_desc_next(chan, &prev->hw, new->async_tx.phys);
  
  		new->async_tx.cookie = 0;
  		async_tx_ack(&new->async_tx);
  		prev = new;
  
  		/* Insert the link descriptor to the LD ring */
  		list_add_tail(&new->node, &first->tx_list);
  
  		/* update metadata */
  		dst_avail -= len;
  		src_avail -= len;
  
  fetch:
  		/* fetch the next dst scatterlist entry */
  		if (dst_avail == 0) {
  
  			/* no more entries: we're done */
  			if (dst_nents == 0)
  				break;
  
  			/* fetch the next entry: if there are no more: done */
  			dst_sg = sg_next(dst_sg);
  			if (dst_sg == NULL)
  				break;
  
  			dst_nents--;
  			dst_avail = sg_dma_len(dst_sg);
  		}
  
  		/* fetch the next src scatterlist entry */
  		if (src_avail == 0) {
  
  			/* no more entries: we're done */
  			if (src_nents == 0)
  				break;
  
  			/* fetch the next entry: if there are no more: done */
  			src_sg = sg_next(src_sg);
  			if (src_sg == NULL)
  				break;
  
  			src_nents--;
  			src_avail = sg_dma_len(src_sg);
  		}
  	}
  
  	new->async_tx.flags = flags; /* client is in control of this ack */
  	new->async_tx.cookie = -EBUSY;
  
  	/* Set End-of-link to the last link descriptor of new list */
  	set_ld_eol(chan, new);
  
  	return &first->async_tx;
  
  fail:
  	if (!first)
  		return NULL;
  
  	fsldma_free_desc_list_reverse(chan, &first->tx_list);
  	return NULL;
  }
173acc7ce   Zhang Wei   dmaengine: add dr...
724
  /**
bbea0b6e0   Ira Snyder   fsldma: Add DMA_S...
725
726
727
728
729
730
731
732
733
734
735
736
   * fsl_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
   * @chan: DMA channel
   * @sgl: scatterlist to transfer to/from
   * @sg_len: number of entries in @scatterlist
   * @direction: DMA direction
   * @flags: DMAEngine flags
   *
   * Prepare a set of descriptors for a DMA_SLAVE transaction. Following the
   * DMA_SLAVE API, this gets the device-specific information from the
   * chan->private variable.
   */
  static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg(
a1c033190   Ira Snyder   fsldma: rename fs...
737
  	struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len,
bbea0b6e0   Ira Snyder   fsldma: Add DMA_S...
738
739
  	enum dma_data_direction direction, unsigned long flags)
  {
bbea0b6e0   Ira Snyder   fsldma: Add DMA_S...
740
  	/*
968f19ae8   Ira Snyder   fsldma: improved ...
741
  	 * This operation is not supported on the Freescale DMA controller
bbea0b6e0   Ira Snyder   fsldma: Add DMA_S...
742
  	 *
968f19ae8   Ira Snyder   fsldma: improved ...
743
744
  	 * However, we need to provide the function pointer to allow the
  	 * device_control() method to work.
bbea0b6e0   Ira Snyder   fsldma: Add DMA_S...
745
  	 */
bbea0b6e0   Ira Snyder   fsldma: Add DMA_S...
746
747
  	return NULL;
  }
c3635c78e   Linus Walleij   DMAENGINE: generi...
748
  static int fsl_dma_device_control(struct dma_chan *dchan,
058276303   Linus Walleij   DMAENGINE: extend...
749
  				  enum dma_ctrl_cmd cmd, unsigned long arg)
bbea0b6e0   Ira Snyder   fsldma: Add DMA_S...
750
  {
968f19ae8   Ira Snyder   fsldma: improved ...
751
  	struct dma_slave_config *config;
a1c033190   Ira Snyder   fsldma: rename fs...
752
  	struct fsldma_chan *chan;
bbea0b6e0   Ira Snyder   fsldma: Add DMA_S...
753
  	unsigned long flags;
968f19ae8   Ira Snyder   fsldma: improved ...
754
  	int size;
c3635c78e   Linus Walleij   DMAENGINE: generi...
755

a1c033190   Ira Snyder   fsldma: rename fs...
756
  	if (!dchan)
c3635c78e   Linus Walleij   DMAENGINE: generi...
757
  		return -EINVAL;
bbea0b6e0   Ira Snyder   fsldma: Add DMA_S...
758

a1c033190   Ira Snyder   fsldma: rename fs...
759
  	chan = to_fsl_chan(dchan);
bbea0b6e0   Ira Snyder   fsldma: Add DMA_S...
760

968f19ae8   Ira Snyder   fsldma: improved ...
761
762
  	switch (cmd) {
  	case DMA_TERMINATE_ALL:
f04cd4070   Ira Snyder   fsldma: fix contr...
763
  		spin_lock_irqsave(&chan->desc_lock, flags);
968f19ae8   Ira Snyder   fsldma: improved ...
764
765
  		/* Halt the DMA engine */
  		dma_halt(chan);
bbea0b6e0   Ira Snyder   fsldma: Add DMA_S...
766

968f19ae8   Ira Snyder   fsldma: improved ...
767
768
769
  		/* Remove and free all of the descriptors in the LD queue */
  		fsldma_free_desc_list(chan, &chan->ld_pending);
  		fsldma_free_desc_list(chan, &chan->ld_running);
f04cd4070   Ira Snyder   fsldma: fix contr...
770
  		chan->idle = true;
bbea0b6e0   Ira Snyder   fsldma: Add DMA_S...
771

968f19ae8   Ira Snyder   fsldma: improved ...
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
  		spin_unlock_irqrestore(&chan->desc_lock, flags);
  		return 0;
  
  	case DMA_SLAVE_CONFIG:
  		config = (struct dma_slave_config *)arg;
  
  		/* make sure the channel supports setting burst size */
  		if (!chan->set_request_count)
  			return -ENXIO;
  
  		/* we set the controller burst size depending on direction */
  		if (config->direction == DMA_TO_DEVICE)
  			size = config->dst_addr_width * config->dst_maxburst;
  		else
  			size = config->src_addr_width * config->src_maxburst;
  
  		chan->set_request_count(chan, size);
  		return 0;
  
  	case FSLDMA_EXTERNAL_START:
  
  		/* make sure the channel supports external start */
  		if (!chan->toggle_ext_start)
  			return -ENXIO;
  
  		chan->toggle_ext_start(chan, arg);
  		return 0;
  
  	default:
  		return -ENXIO;
  	}
c3635c78e   Linus Walleij   DMAENGINE: generi...
803
804
  
  	return 0;
bbea0b6e0   Ira Snyder   fsldma: Add DMA_S...
805
806
807
  }
  
  /**
9c4d1e7bd   Ira Snyder   fsldma: support a...
808
   * fsldma_cleanup_descriptor - cleanup and free a single link descriptor
9c3a50b7d   Ira Snyder   fsldma: major cle...
809
   * @chan: Freescale DMA channel
9c4d1e7bd   Ira Snyder   fsldma: support a...
810
   * @desc: descriptor to cleanup and free
173acc7ce   Zhang Wei   dmaengine: add dr...
811
   *
9c4d1e7bd   Ira Snyder   fsldma: support a...
812
813
814
   * This function is used on a descriptor which has been executed by the DMA
   * controller. It will run any callbacks, submit any dependencies, and then
   * free the descriptor.
173acc7ce   Zhang Wei   dmaengine: add dr...
815
   */
9c4d1e7bd   Ira Snyder   fsldma: support a...
816
817
  static void fsldma_cleanup_descriptor(struct fsldma_chan *chan,
  				      struct fsl_desc_sw *desc)
173acc7ce   Zhang Wei   dmaengine: add dr...
818
  {
9c4d1e7bd   Ira Snyder   fsldma: support a...
819
820
821
822
823
824
825
826
827
828
829
830
831
832
  	struct dma_async_tx_descriptor *txd = &desc->async_tx;
  	struct device *dev = chan->common.device->dev;
  	dma_addr_t src = get_desc_src(chan, desc);
  	dma_addr_t dst = get_desc_dst(chan, desc);
  	u32 len = get_desc_cnt(chan, desc);
  
  	/* Run the link descriptor callback function */
  	if (txd->callback) {
  #ifdef FSL_DMA_LD_DEBUG
  		chan_dbg(chan, "LD %p callback
  ", desc);
  #endif
  		txd->callback(txd->callback_param);
  	}
173acc7ce   Zhang Wei   dmaengine: add dr...
833

9c4d1e7bd   Ira Snyder   fsldma: support a...
834
835
  	/* Run any dependencies */
  	dma_run_dependencies(txd);
173acc7ce   Zhang Wei   dmaengine: add dr...
836

9c4d1e7bd   Ira Snyder   fsldma: support a...
837
838
839
840
841
842
843
  	/* Unmap the dst buffer, if requested */
  	if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
  		if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE)
  			dma_unmap_single(dev, dst, len, DMA_FROM_DEVICE);
  		else
  			dma_unmap_page(dev, dst, len, DMA_FROM_DEVICE);
  	}
9c3a50b7d   Ira Snyder   fsldma: major cle...
844

9c4d1e7bd   Ira Snyder   fsldma: support a...
845
846
847
848
849
850
  	/* Unmap the src buffer, if requested */
  	if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
  		if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE)
  			dma_unmap_single(dev, src, len, DMA_TO_DEVICE);
  		else
  			dma_unmap_page(dev, src, len, DMA_TO_DEVICE);
173acc7ce   Zhang Wei   dmaengine: add dr...
851
  	}
9c3a50b7d   Ira Snyder   fsldma: major cle...
852

9c4d1e7bd   Ira Snyder   fsldma: support a...
853
854
855
856
857
  #ifdef FSL_DMA_LD_DEBUG
  	chan_dbg(chan, "LD %p free
  ", desc);
  #endif
  	dma_pool_free(chan->desc_pool, desc, txd->phys);
173acc7ce   Zhang Wei   dmaengine: add dr...
858
859
860
  }
  
  /**
9c3a50b7d   Ira Snyder   fsldma: major cle...
861
   * fsl_chan_xfer_ld_queue - transfer any pending transactions
a1c033190   Ira Snyder   fsldma: rename fs...
862
   * @chan : Freescale DMA channel
9c3a50b7d   Ira Snyder   fsldma: major cle...
863
   *
f04cd4070   Ira Snyder   fsldma: fix contr...
864
   * HARDWARE STATE: idle
dc8d40915   Ira Snyder   fsldma: reduce lo...
865
   * LOCKING: must hold chan->desc_lock
173acc7ce   Zhang Wei   dmaengine: add dr...
866
   */
a1c033190   Ira Snyder   fsldma: rename fs...
867
  static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan)
173acc7ce   Zhang Wei   dmaengine: add dr...
868
  {
9c3a50b7d   Ira Snyder   fsldma: major cle...
869
  	struct fsl_desc_sw *desc;
138ef0185   Ira Snyder   fsldma: fix "DMA ...
870

9c3a50b7d   Ira Snyder   fsldma: major cle...
871
872
873
874
875
  	/*
  	 * If the list of pending descriptors is empty, then we
  	 * don't need to do any work at all
  	 */
  	if (list_empty(&chan->ld_pending)) {
b158471ef   Ira Snyder   fsldma: use chann...
876
877
  		chan_dbg(chan, "no pending LDs
  ");
dc8d40915   Ira Snyder   fsldma: reduce lo...
878
  		return;
9c3a50b7d   Ira Snyder   fsldma: major cle...
879
  	}
173acc7ce   Zhang Wei   dmaengine: add dr...
880

9c3a50b7d   Ira Snyder   fsldma: major cle...
881
  	/*
f04cd4070   Ira Snyder   fsldma: fix contr...
882
883
884
  	 * The DMA controller is not idle, which means that the interrupt
  	 * handler will start any queued transactions when it runs after
  	 * this transaction finishes
9c3a50b7d   Ira Snyder   fsldma: major cle...
885
  	 */
f04cd4070   Ira Snyder   fsldma: fix contr...
886
  	if (!chan->idle) {
b158471ef   Ira Snyder   fsldma: use chann...
887
888
  		chan_dbg(chan, "DMA controller still busy
  ");
dc8d40915   Ira Snyder   fsldma: reduce lo...
889
  		return;
9c3a50b7d   Ira Snyder   fsldma: major cle...
890
891
892
  	}
  
  	/*
9c3a50b7d   Ira Snyder   fsldma: major cle...
893
894
  	 * If there are some link descriptors which have not been
  	 * transferred, we need to start the controller
173acc7ce   Zhang Wei   dmaengine: add dr...
895
  	 */
173acc7ce   Zhang Wei   dmaengine: add dr...
896

9c3a50b7d   Ira Snyder   fsldma: major cle...
897
898
899
900
  	/*
  	 * Move all elements from the queue of pending transactions
  	 * onto the list of running transactions
  	 */
f04cd4070   Ira Snyder   fsldma: fix contr...
901
902
  	chan_dbg(chan, "idle, starting controller
  ");
9c3a50b7d   Ira Snyder   fsldma: major cle...
903
904
905
906
  	desc = list_first_entry(&chan->ld_pending, struct fsl_desc_sw, node);
  	list_splice_tail_init(&chan->ld_pending, &chan->ld_running);
  
  	/*
f04cd4070   Ira Snyder   fsldma: fix contr...
907
908
909
910
911
912
913
914
915
916
917
918
919
  	 * The 85xx DMA controller doesn't clear the channel start bit
  	 * automatically at the end of a transfer. Therefore we must clear
  	 * it in software before starting the transfer.
  	 */
  	if ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) {
  		u32 mode;
  
  		mode = DMA_IN(chan, &chan->regs->mr, 32);
  		mode &= ~FSL_DMA_MR_CS;
  		DMA_OUT(chan, &chan->regs->mr, mode, 32);
  	}
  
  	/*
9c3a50b7d   Ira Snyder   fsldma: major cle...
920
921
922
923
  	 * Program the descriptor's address into the DMA controller,
  	 * then start the DMA transaction
  	 */
  	set_cdar(chan, desc->async_tx.phys);
f04cd4070   Ira Snyder   fsldma: fix contr...
924
  	get_cdar(chan);
138ef0185   Ira Snyder   fsldma: fix "DMA ...
925

9c3a50b7d   Ira Snyder   fsldma: major cle...
926
  	dma_start(chan);
f04cd4070   Ira Snyder   fsldma: fix contr...
927
  	chan->idle = false;
173acc7ce   Zhang Wei   dmaengine: add dr...
928
929
930
931
  }
  
  /**
   * fsl_dma_memcpy_issue_pending - Issue the DMA start command
a1c033190   Ira Snyder   fsldma: rename fs...
932
   * @chan : Freescale DMA channel
173acc7ce   Zhang Wei   dmaengine: add dr...
933
   */
a1c033190   Ira Snyder   fsldma: rename fs...
934
  static void fsl_dma_memcpy_issue_pending(struct dma_chan *dchan)
173acc7ce   Zhang Wei   dmaengine: add dr...
935
  {
a1c033190   Ira Snyder   fsldma: rename fs...
936
  	struct fsldma_chan *chan = to_fsl_chan(dchan);
dc8d40915   Ira Snyder   fsldma: reduce lo...
937
938
939
  	unsigned long flags;
  
  	spin_lock_irqsave(&chan->desc_lock, flags);
a1c033190   Ira Snyder   fsldma: rename fs...
940
  	fsl_chan_xfer_ld_queue(chan);
dc8d40915   Ira Snyder   fsldma: reduce lo...
941
  	spin_unlock_irqrestore(&chan->desc_lock, flags);
173acc7ce   Zhang Wei   dmaengine: add dr...
942
  }
173acc7ce   Zhang Wei   dmaengine: add dr...
943
  /**
079344818   Linus Walleij   DMAENGINE: generi...
944
   * fsl_tx_status - Determine the DMA status
a1c033190   Ira Snyder   fsldma: rename fs...
945
   * @chan : Freescale DMA channel
173acc7ce   Zhang Wei   dmaengine: add dr...
946
   */
079344818   Linus Walleij   DMAENGINE: generi...
947
  static enum dma_status fsl_tx_status(struct dma_chan *dchan,
173acc7ce   Zhang Wei   dmaengine: add dr...
948
  					dma_cookie_t cookie,
079344818   Linus Walleij   DMAENGINE: generi...
949
  					struct dma_tx_state *txstate)
173acc7ce   Zhang Wei   dmaengine: add dr...
950
  {
a1c033190   Ira Snyder   fsldma: rename fs...
951
  	struct fsldma_chan *chan = to_fsl_chan(dchan);
173acc7ce   Zhang Wei   dmaengine: add dr...
952
  	dma_cookie_t last_complete;
f04cd4070   Ira Snyder   fsldma: fix contr...
953
954
  	dma_cookie_t last_used;
  	unsigned long flags;
173acc7ce   Zhang Wei   dmaengine: add dr...
955

f04cd4070   Ira Snyder   fsldma: fix contr...
956
  	spin_lock_irqsave(&chan->desc_lock, flags);
173acc7ce   Zhang Wei   dmaengine: add dr...
957

a1c033190   Ira Snyder   fsldma: rename fs...
958
  	last_complete = chan->completed_cookie;
f04cd4070   Ira Snyder   fsldma: fix contr...
959
  	last_used = dchan->cookie;
173acc7ce   Zhang Wei   dmaengine: add dr...
960

f04cd4070   Ira Snyder   fsldma: fix contr...
961
  	spin_unlock_irqrestore(&chan->desc_lock, flags);
173acc7ce   Zhang Wei   dmaengine: add dr...
962

f04cd4070   Ira Snyder   fsldma: fix contr...
963
  	dma_set_tx_state(txstate, last_complete, last_used, 0);
173acc7ce   Zhang Wei   dmaengine: add dr...
964
965
  	return dma_async_is_complete(cookie, last_complete, last_used);
  }
d3f620b2c   Ira Snyder   fsldma: simplify ...
966
967
968
  /*----------------------------------------------------------------------------*/
  /* Interrupt Handling                                                         */
  /*----------------------------------------------------------------------------*/
e7a29151d   Ira Snyder   fsldma: clean up ...
969
  static irqreturn_t fsldma_chan_irq(int irq, void *data)
173acc7ce   Zhang Wei   dmaengine: add dr...
970
  {
a1c033190   Ira Snyder   fsldma: rename fs...
971
  	struct fsldma_chan *chan = data;
a1c033190   Ira Snyder   fsldma: rename fs...
972
  	u32 stat;
173acc7ce   Zhang Wei   dmaengine: add dr...
973

9c3a50b7d   Ira Snyder   fsldma: major cle...
974
  	/* save and clear the status register */
a1c033190   Ira Snyder   fsldma: rename fs...
975
  	stat = get_sr(chan);
9c3a50b7d   Ira Snyder   fsldma: major cle...
976
  	set_sr(chan, stat);
b158471ef   Ira Snyder   fsldma: use chann...
977
978
  	chan_dbg(chan, "irq: stat = 0x%x
  ", stat);
173acc7ce   Zhang Wei   dmaengine: add dr...
979

f04cd4070   Ira Snyder   fsldma: fix contr...
980
  	/* check that this was really our device */
173acc7ce   Zhang Wei   dmaengine: add dr...
981
982
983
984
985
  	stat &= ~(FSL_DMA_SR_CB | FSL_DMA_SR_CH);
  	if (!stat)
  		return IRQ_NONE;
  
  	if (stat & FSL_DMA_SR_TE)
b158471ef   Ira Snyder   fsldma: use chann...
986
987
  		chan_err(chan, "Transfer Error!
  ");
173acc7ce   Zhang Wei   dmaengine: add dr...
988

9c3a50b7d   Ira Snyder   fsldma: major cle...
989
990
  	/*
  	 * Programming Error
f79abb627   Zhang Wei   fsldma: Fix the D...
991
992
993
994
  	 * The DMA_INTERRUPT async_tx is a NULL transfer, which will
  	 * triger a PE interrupt.
  	 */
  	if (stat & FSL_DMA_SR_PE) {
b158471ef   Ira Snyder   fsldma: use chann...
995
996
  		chan_dbg(chan, "irq: Programming Error INT
  ");
f79abb627   Zhang Wei   fsldma: Fix the D...
997
  		stat &= ~FSL_DMA_SR_PE;
f04cd4070   Ira Snyder   fsldma: fix contr...
998
999
1000
  		if (get_bcr(chan) != 0)
  			chan_err(chan, "Programming Error!
  ");
1c62979ed   Zhang Wei   fsldma: Split the...
1001
  	}
9c3a50b7d   Ira Snyder   fsldma: major cle...
1002
1003
  	/*
  	 * For MPC8349, EOCDI event need to update cookie
1c62979ed   Zhang Wei   fsldma: Split the...
1004
1005
1006
  	 * and start the next transfer if it exist.
  	 */
  	if (stat & FSL_DMA_SR_EOCDI) {
b158471ef   Ira Snyder   fsldma: use chann...
1007
1008
  		chan_dbg(chan, "irq: End-of-Chain link INT
  ");
1c62979ed   Zhang Wei   fsldma: Split the...
1009
  		stat &= ~FSL_DMA_SR_EOCDI;
173acc7ce   Zhang Wei   dmaengine: add dr...
1010
  	}
9c3a50b7d   Ira Snyder   fsldma: major cle...
1011
1012
  	/*
  	 * If it current transfer is the end-of-transfer,
173acc7ce   Zhang Wei   dmaengine: add dr...
1013
1014
1015
  	 * we should clear the Channel Start bit for
  	 * prepare next transfer.
  	 */
1c62979ed   Zhang Wei   fsldma: Split the...
1016
  	if (stat & FSL_DMA_SR_EOLNI) {
b158471ef   Ira Snyder   fsldma: use chann...
1017
1018
  		chan_dbg(chan, "irq: End-of-link INT
  ");
173acc7ce   Zhang Wei   dmaengine: add dr...
1019
  		stat &= ~FSL_DMA_SR_EOLNI;
173acc7ce   Zhang Wei   dmaengine: add dr...
1020
  	}
f04cd4070   Ira Snyder   fsldma: fix contr...
1021
1022
1023
1024
1025
1026
  	/* check that the DMA controller is really idle */
  	if (!dma_is_idle(chan))
  		chan_err(chan, "irq: controller not idle!
  ");
  
  	/* check that we handled all of the bits */
173acc7ce   Zhang Wei   dmaengine: add dr...
1027
  	if (stat)
f04cd4070   Ira Snyder   fsldma: fix contr...
1028
1029
  		chan_err(chan, "irq: unhandled sr 0x%08x
  ", stat);
173acc7ce   Zhang Wei   dmaengine: add dr...
1030

f04cd4070   Ira Snyder   fsldma: fix contr...
1031
1032
1033
1034
1035
  	/*
  	 * Schedule the tasklet to handle all cleanup of the current
  	 * transaction. It will start a new transaction if there is
  	 * one pending.
  	 */
a1c033190   Ira Snyder   fsldma: rename fs...
1036
  	tasklet_schedule(&chan->tasklet);
f04cd4070   Ira Snyder   fsldma: fix contr...
1037
1038
  	chan_dbg(chan, "irq: Exit
  ");
173acc7ce   Zhang Wei   dmaengine: add dr...
1039
1040
  	return IRQ_HANDLED;
  }
d3f620b2c   Ira Snyder   fsldma: simplify ...
1041
1042
  static void dma_do_tasklet(unsigned long data)
  {
a1c033190   Ira Snyder   fsldma: rename fs...
1043
  	struct fsldma_chan *chan = (struct fsldma_chan *)data;
dc8d40915   Ira Snyder   fsldma: reduce lo...
1044
1045
  	struct fsl_desc_sw *desc, *_desc;
  	LIST_HEAD(ld_cleanup);
f04cd4070   Ira Snyder   fsldma: fix contr...
1046
1047
1048
1049
  	unsigned long flags;
  
  	chan_dbg(chan, "tasklet entry
  ");
f04cd4070   Ira Snyder   fsldma: fix contr...
1050
  	spin_lock_irqsave(&chan->desc_lock, flags);
dc8d40915   Ira Snyder   fsldma: reduce lo...
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
  
  	/* update the cookie if we have some descriptors to cleanup */
  	if (!list_empty(&chan->ld_running)) {
  		dma_cookie_t cookie;
  
  		desc = to_fsl_desc(chan->ld_running.prev);
  		cookie = desc->async_tx.cookie;
  
  		chan->completed_cookie = cookie;
  		chan_dbg(chan, "completed_cookie=%d
  ", cookie);
  	}
  
  	/*
  	 * move the descriptors to a temporary list so we can drop the lock
  	 * during the entire cleanup operation
  	 */
  	list_splice_tail_init(&chan->ld_running, &ld_cleanup);
  
  	/* the hardware is now idle and ready for more */
f04cd4070   Ira Snyder   fsldma: fix contr...
1071
  	chan->idle = true;
f04cd4070   Ira Snyder   fsldma: fix contr...
1072

dc8d40915   Ira Snyder   fsldma: reduce lo...
1073
1074
1075
1076
1077
1078
  	/*
  	 * Start any pending transactions automatically
  	 *
  	 * In the ideal case, we keep the DMA controller busy while we go
  	 * ahead and free the descriptors below.
  	 */
f04cd4070   Ira Snyder   fsldma: fix contr...
1079
  	fsl_chan_xfer_ld_queue(chan);
dc8d40915   Ira Snyder   fsldma: reduce lo...
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
  	spin_unlock_irqrestore(&chan->desc_lock, flags);
  
  	/* Run the callback for each descriptor, in order */
  	list_for_each_entry_safe(desc, _desc, &ld_cleanup, node) {
  
  		/* Remove from the list of transactions */
  		list_del(&desc->node);
  
  		/* Run all cleanup for this descriptor */
  		fsldma_cleanup_descriptor(chan, desc);
  	}
f04cd4070   Ira Snyder   fsldma: fix contr...
1091
1092
  	chan_dbg(chan, "tasklet exit
  ");
d3f620b2c   Ira Snyder   fsldma: simplify ...
1093
1094
1095
  }
  
  static irqreturn_t fsldma_ctrl_irq(int irq, void *data)
173acc7ce   Zhang Wei   dmaengine: add dr...
1096
  {
a4f56d4b1   Ira Snyder   fsldma: rename st...
1097
  	struct fsldma_device *fdev = data;
d3f620b2c   Ira Snyder   fsldma: simplify ...
1098
1099
1100
1101
  	struct fsldma_chan *chan;
  	unsigned int handled = 0;
  	u32 gsr, mask;
  	int i;
173acc7ce   Zhang Wei   dmaengine: add dr...
1102

e7a29151d   Ira Snyder   fsldma: clean up ...
1103
  	gsr = (fdev->feature & FSL_DMA_BIG_ENDIAN) ? in_be32(fdev->regs)
d3f620b2c   Ira Snyder   fsldma: simplify ...
1104
1105
1106
1107
  						   : in_le32(fdev->regs);
  	mask = 0xff000000;
  	dev_dbg(fdev->dev, "IRQ: gsr 0x%.8x
  ", gsr);
173acc7ce   Zhang Wei   dmaengine: add dr...
1108

d3f620b2c   Ira Snyder   fsldma: simplify ...
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
  	for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
  		chan = fdev->chan[i];
  		if (!chan)
  			continue;
  
  		if (gsr & mask) {
  			dev_dbg(fdev->dev, "IRQ: chan %d
  ", chan->id);
  			fsldma_chan_irq(irq, chan);
  			handled++;
  		}
  
  		gsr &= ~mask;
  		mask >>= 8;
  	}
  
  	return IRQ_RETVAL(handled);
173acc7ce   Zhang Wei   dmaengine: add dr...
1126
  }
d3f620b2c   Ira Snyder   fsldma: simplify ...
1127
  static void fsldma_free_irqs(struct fsldma_device *fdev)
173acc7ce   Zhang Wei   dmaengine: add dr...
1128
  {
d3f620b2c   Ira Snyder   fsldma: simplify ...
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
  	struct fsldma_chan *chan;
  	int i;
  
  	if (fdev->irq != NO_IRQ) {
  		dev_dbg(fdev->dev, "free per-controller IRQ
  ");
  		free_irq(fdev->irq, fdev);
  		return;
  	}
  
  	for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
  		chan = fdev->chan[i];
  		if (chan && chan->irq != NO_IRQ) {
b158471ef   Ira Snyder   fsldma: use chann...
1142
1143
  			chan_dbg(chan, "free per-channel IRQ
  ");
d3f620b2c   Ira Snyder   fsldma: simplify ...
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
  			free_irq(chan->irq, chan);
  		}
  	}
  }
  
  static int fsldma_request_irqs(struct fsldma_device *fdev)
  {
  	struct fsldma_chan *chan;
  	int ret;
  	int i;
  
  	/* if we have a per-controller IRQ, use that */
  	if (fdev->irq != NO_IRQ) {
  		dev_dbg(fdev->dev, "request per-controller IRQ
  ");
  		ret = request_irq(fdev->irq, fsldma_ctrl_irq, IRQF_SHARED,
  				  "fsldma-controller", fdev);
  		return ret;
  	}
  
  	/* no per-controller IRQ, use the per-channel IRQs */
  	for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
  		chan = fdev->chan[i];
  		if (!chan)
  			continue;
  
  		if (chan->irq == NO_IRQ) {
b158471ef   Ira Snyder   fsldma: use chann...
1171
1172
  			chan_err(chan, "interrupts property missing in device tree
  ");
d3f620b2c   Ira Snyder   fsldma: simplify ...
1173
1174
1175
  			ret = -ENODEV;
  			goto out_unwind;
  		}
b158471ef   Ira Snyder   fsldma: use chann...
1176
1177
  		chan_dbg(chan, "request per-channel IRQ
  ");
d3f620b2c   Ira Snyder   fsldma: simplify ...
1178
1179
1180
  		ret = request_irq(chan->irq, fsldma_chan_irq, IRQF_SHARED,
  				  "fsldma-chan", chan);
  		if (ret) {
b158471ef   Ira Snyder   fsldma: use chann...
1181
1182
  			chan_err(chan, "unable to request per-channel IRQ
  ");
d3f620b2c   Ira Snyder   fsldma: simplify ...
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
  			goto out_unwind;
  		}
  	}
  
  	return 0;
  
  out_unwind:
  	for (/* none */; i >= 0; i--) {
  		chan = fdev->chan[i];
  		if (!chan)
  			continue;
  
  		if (chan->irq == NO_IRQ)
  			continue;
  
  		free_irq(chan->irq, chan);
  	}
  
  	return ret;
173acc7ce   Zhang Wei   dmaengine: add dr...
1202
  }
a4f56d4b1   Ira Snyder   fsldma: rename st...
1203
1204
1205
1206
1207
  /*----------------------------------------------------------------------------*/
  /* OpenFirmware Subsystem                                                     */
  /*----------------------------------------------------------------------------*/
  
  static int __devinit fsl_dma_chan_probe(struct fsldma_device *fdev,
77cd62e80   Timur Tabi   fsldma: allow Fre...
1208
  	struct device_node *node, u32 feature, const char *compatible)
173acc7ce   Zhang Wei   dmaengine: add dr...
1209
  {
a1c033190   Ira Snyder   fsldma: rename fs...
1210
  	struct fsldma_chan *chan;
4ce0e953f   Ira Snyder   fsldma: remove un...
1211
  	struct resource res;
173acc7ce   Zhang Wei   dmaengine: add dr...
1212
  	int err;
173acc7ce   Zhang Wei   dmaengine: add dr...
1213
  	/* alloc channel */
a1c033190   Ira Snyder   fsldma: rename fs...
1214
1215
  	chan = kzalloc(sizeof(*chan), GFP_KERNEL);
  	if (!chan) {
e7a29151d   Ira Snyder   fsldma: clean up ...
1216
1217
1218
1219
1220
1221
1222
  		dev_err(fdev->dev, "no free memory for DMA channels!
  ");
  		err = -ENOMEM;
  		goto out_return;
  	}
  
  	/* ioremap registers for use */
a1c033190   Ira Snyder   fsldma: rename fs...
1223
1224
  	chan->regs = of_iomap(node, 0);
  	if (!chan->regs) {
e7a29151d   Ira Snyder   fsldma: clean up ...
1225
1226
1227
  		dev_err(fdev->dev, "unable to ioremap registers
  ");
  		err = -ENOMEM;
a1c033190   Ira Snyder   fsldma: rename fs...
1228
  		goto out_free_chan;
173acc7ce   Zhang Wei   dmaengine: add dr...
1229
  	}
4ce0e953f   Ira Snyder   fsldma: remove un...
1230
  	err = of_address_to_resource(node, 0, &res);
173acc7ce   Zhang Wei   dmaengine: add dr...
1231
  	if (err) {
e7a29151d   Ira Snyder   fsldma: clean up ...
1232
1233
1234
  		dev_err(fdev->dev, "unable to find 'reg' property
  ");
  		goto out_iounmap_regs;
173acc7ce   Zhang Wei   dmaengine: add dr...
1235
  	}
a1c033190   Ira Snyder   fsldma: rename fs...
1236
  	chan->feature = feature;
173acc7ce   Zhang Wei   dmaengine: add dr...
1237
  	if (!fdev->feature)
a1c033190   Ira Snyder   fsldma: rename fs...
1238
  		fdev->feature = chan->feature;
173acc7ce   Zhang Wei   dmaengine: add dr...
1239

e7a29151d   Ira Snyder   fsldma: clean up ...
1240
1241
1242
  	/*
  	 * If the DMA device's feature is different than the feature
  	 * of its channels, report the bug
173acc7ce   Zhang Wei   dmaengine: add dr...
1243
  	 */
a1c033190   Ira Snyder   fsldma: rename fs...
1244
  	WARN_ON(fdev->feature != chan->feature);
e7a29151d   Ira Snyder   fsldma: clean up ...
1245

a1c033190   Ira Snyder   fsldma: rename fs...
1246
1247
1248
  	chan->dev = fdev->dev;
  	chan->id = ((res.start - 0x100) & 0xfff) >> 7;
  	if (chan->id >= FSL_DMA_MAX_CHANS_PER_DEVICE) {
e7a29151d   Ira Snyder   fsldma: clean up ...
1249
1250
  		dev_err(fdev->dev, "too many channels for device
  ");
173acc7ce   Zhang Wei   dmaengine: add dr...
1251
  		err = -EINVAL;
e7a29151d   Ira Snyder   fsldma: clean up ...
1252
  		goto out_iounmap_regs;
173acc7ce   Zhang Wei   dmaengine: add dr...
1253
  	}
173acc7ce   Zhang Wei   dmaengine: add dr...
1254

a1c033190   Ira Snyder   fsldma: rename fs...
1255
1256
  	fdev->chan[chan->id] = chan;
  	tasklet_init(&chan->tasklet, dma_do_tasklet, (unsigned long)chan);
b158471ef   Ira Snyder   fsldma: use chann...
1257
  	snprintf(chan->name, sizeof(chan->name), "chan%d", chan->id);
e7a29151d   Ira Snyder   fsldma: clean up ...
1258
1259
  
  	/* Initialize the channel */
a1c033190   Ira Snyder   fsldma: rename fs...
1260
  	dma_init(chan);
173acc7ce   Zhang Wei   dmaengine: add dr...
1261
1262
  
  	/* Clear cdar registers */
a1c033190   Ira Snyder   fsldma: rename fs...
1263
  	set_cdar(chan, 0);
173acc7ce   Zhang Wei   dmaengine: add dr...
1264

a1c033190   Ira Snyder   fsldma: rename fs...
1265
  	switch (chan->feature & FSL_DMA_IP_MASK) {
173acc7ce   Zhang Wei   dmaengine: add dr...
1266
  	case FSL_DMA_IP_85XX:
a1c033190   Ira Snyder   fsldma: rename fs...
1267
  		chan->toggle_ext_pause = fsl_chan_toggle_ext_pause;
173acc7ce   Zhang Wei   dmaengine: add dr...
1268
  	case FSL_DMA_IP_83XX:
a1c033190   Ira Snyder   fsldma: rename fs...
1269
1270
1271
1272
  		chan->toggle_ext_start = fsl_chan_toggle_ext_start;
  		chan->set_src_loop_size = fsl_chan_set_src_loop_size;
  		chan->set_dst_loop_size = fsl_chan_set_dst_loop_size;
  		chan->set_request_count = fsl_chan_set_request_count;
173acc7ce   Zhang Wei   dmaengine: add dr...
1273
  	}
a1c033190   Ira Snyder   fsldma: rename fs...
1274
  	spin_lock_init(&chan->desc_lock);
9c3a50b7d   Ira Snyder   fsldma: major cle...
1275
1276
  	INIT_LIST_HEAD(&chan->ld_pending);
  	INIT_LIST_HEAD(&chan->ld_running);
f04cd4070   Ira Snyder   fsldma: fix contr...
1277
  	chan->idle = true;
173acc7ce   Zhang Wei   dmaengine: add dr...
1278

a1c033190   Ira Snyder   fsldma: rename fs...
1279
  	chan->common.device = &fdev->common;
173acc7ce   Zhang Wei   dmaengine: add dr...
1280

d3f620b2c   Ira Snyder   fsldma: simplify ...
1281
  	/* find the IRQ line, if it exists in the device tree */
a1c033190   Ira Snyder   fsldma: rename fs...
1282
  	chan->irq = irq_of_parse_and_map(node, 0);
d3f620b2c   Ira Snyder   fsldma: simplify ...
1283

173acc7ce   Zhang Wei   dmaengine: add dr...
1284
  	/* Add the channel to DMA device channel list */
a1c033190   Ira Snyder   fsldma: rename fs...
1285
  	list_add_tail(&chan->common.device_node, &fdev->common.channels);
173acc7ce   Zhang Wei   dmaengine: add dr...
1286
  	fdev->common.chancnt++;
a1c033190   Ira Snyder   fsldma: rename fs...
1287
1288
1289
  	dev_info(fdev->dev, "#%d (%s), irq %d
  ", chan->id, compatible,
  		 chan->irq != NO_IRQ ? chan->irq : fdev->irq);
173acc7ce   Zhang Wei   dmaengine: add dr...
1290
1291
  
  	return 0;
51ee87f27   Li Yang   fsldma: fix incor...
1292

e7a29151d   Ira Snyder   fsldma: clean up ...
1293
  out_iounmap_regs:
a1c033190   Ira Snyder   fsldma: rename fs...
1294
1295
1296
  	iounmap(chan->regs);
  out_free_chan:
  	kfree(chan);
e7a29151d   Ira Snyder   fsldma: clean up ...
1297
  out_return:
173acc7ce   Zhang Wei   dmaengine: add dr...
1298
1299
  	return err;
  }
a1c033190   Ira Snyder   fsldma: rename fs...
1300
  static void fsl_dma_chan_remove(struct fsldma_chan *chan)
173acc7ce   Zhang Wei   dmaengine: add dr...
1301
  {
a1c033190   Ira Snyder   fsldma: rename fs...
1302
1303
1304
1305
  	irq_dispose_mapping(chan->irq);
  	list_del(&chan->common.device_node);
  	iounmap(chan->regs);
  	kfree(chan);
173acc7ce   Zhang Wei   dmaengine: add dr...
1306
  }
000061245   Grant Likely   dt/powerpc: Elimi...
1307
  static int __devinit fsldma_of_probe(struct platform_device *op)
173acc7ce   Zhang Wei   dmaengine: add dr...
1308
  {
a4f56d4b1   Ira Snyder   fsldma: rename st...
1309
  	struct fsldma_device *fdev;
77cd62e80   Timur Tabi   fsldma: allow Fre...
1310
  	struct device_node *child;
e7a29151d   Ira Snyder   fsldma: clean up ...
1311
  	int err;
173acc7ce   Zhang Wei   dmaengine: add dr...
1312

a4f56d4b1   Ira Snyder   fsldma: rename st...
1313
  	fdev = kzalloc(sizeof(*fdev), GFP_KERNEL);
173acc7ce   Zhang Wei   dmaengine: add dr...
1314
  	if (!fdev) {
e7a29151d   Ira Snyder   fsldma: clean up ...
1315
1316
1317
1318
  		dev_err(&op->dev, "No enough memory for 'priv'
  ");
  		err = -ENOMEM;
  		goto out_return;
173acc7ce   Zhang Wei   dmaengine: add dr...
1319
  	}
e7a29151d   Ira Snyder   fsldma: clean up ...
1320
1321
  
  	fdev->dev = &op->dev;
173acc7ce   Zhang Wei   dmaengine: add dr...
1322
  	INIT_LIST_HEAD(&fdev->common.channels);
e7a29151d   Ira Snyder   fsldma: clean up ...
1323
  	/* ioremap the registers for use */
61c7a080a   Grant Likely   of: Always use 's...
1324
  	fdev->regs = of_iomap(op->dev.of_node, 0);
e7a29151d   Ira Snyder   fsldma: clean up ...
1325
1326
1327
1328
1329
  	if (!fdev->regs) {
  		dev_err(&op->dev, "unable to ioremap registers
  ");
  		err = -ENOMEM;
  		goto out_free_fdev;
173acc7ce   Zhang Wei   dmaengine: add dr...
1330
  	}
d3f620b2c   Ira Snyder   fsldma: simplify ...
1331
  	/* map the channel IRQ if it exists, but don't hookup the handler yet */
61c7a080a   Grant Likely   of: Always use 's...
1332
  	fdev->irq = irq_of_parse_and_map(op->dev.of_node, 0);
d3f620b2c   Ira Snyder   fsldma: simplify ...
1333

173acc7ce   Zhang Wei   dmaengine: add dr...
1334
1335
  	dma_cap_set(DMA_MEMCPY, fdev->common.cap_mask);
  	dma_cap_set(DMA_INTERRUPT, fdev->common.cap_mask);
c14330417   Ira Snyder   fsldma: implement...
1336
  	dma_cap_set(DMA_SG, fdev->common.cap_mask);
bbea0b6e0   Ira Snyder   fsldma: Add DMA_S...
1337
  	dma_cap_set(DMA_SLAVE, fdev->common.cap_mask);
173acc7ce   Zhang Wei   dmaengine: add dr...
1338
1339
  	fdev->common.device_alloc_chan_resources = fsl_dma_alloc_chan_resources;
  	fdev->common.device_free_chan_resources = fsl_dma_free_chan_resources;
2187c269a   Zhang Wei   fsldma: Add devic...
1340
  	fdev->common.device_prep_dma_interrupt = fsl_dma_prep_interrupt;
173acc7ce   Zhang Wei   dmaengine: add dr...
1341
  	fdev->common.device_prep_dma_memcpy = fsl_dma_prep_memcpy;
c14330417   Ira Snyder   fsldma: implement...
1342
  	fdev->common.device_prep_dma_sg = fsl_dma_prep_sg;
079344818   Linus Walleij   DMAENGINE: generi...
1343
  	fdev->common.device_tx_status = fsl_tx_status;
173acc7ce   Zhang Wei   dmaengine: add dr...
1344
  	fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending;
bbea0b6e0   Ira Snyder   fsldma: Add DMA_S...
1345
  	fdev->common.device_prep_slave_sg = fsl_dma_prep_slave_sg;
c3635c78e   Linus Walleij   DMAENGINE: generi...
1346
  	fdev->common.device_control = fsl_dma_device_control;
e7a29151d   Ira Snyder   fsldma: clean up ...
1347
  	fdev->common.dev = &op->dev;
173acc7ce   Zhang Wei   dmaengine: add dr...
1348

e2c8e425b   Li Yang   fsldma: add suppo...
1349
  	dma_set_mask(&(op->dev), DMA_BIT_MASK(36));
e7a29151d   Ira Snyder   fsldma: clean up ...
1350
  	dev_set_drvdata(&op->dev, fdev);
77cd62e80   Timur Tabi   fsldma: allow Fre...
1351

e7a29151d   Ira Snyder   fsldma: clean up ...
1352
1353
1354
  	/*
  	 * We cannot use of_platform_bus_probe() because there is no
  	 * of_platform_bus_remove(). Instead, we manually instantiate every DMA
77cd62e80   Timur Tabi   fsldma: allow Fre...
1355
1356
  	 * channel object.
  	 */
61c7a080a   Grant Likely   of: Always use 's...
1357
  	for_each_child_of_node(op->dev.of_node, child) {
e7a29151d   Ira Snyder   fsldma: clean up ...
1358
  		if (of_device_is_compatible(child, "fsl,eloplus-dma-channel")) {
77cd62e80   Timur Tabi   fsldma: allow Fre...
1359
1360
1361
  			fsl_dma_chan_probe(fdev, child,
  				FSL_DMA_IP_85XX | FSL_DMA_BIG_ENDIAN,
  				"fsl,eloplus-dma-channel");
e7a29151d   Ira Snyder   fsldma: clean up ...
1362
1363
1364
  		}
  
  		if (of_device_is_compatible(child, "fsl,elo-dma-channel")) {
77cd62e80   Timur Tabi   fsldma: allow Fre...
1365
1366
1367
  			fsl_dma_chan_probe(fdev, child,
  				FSL_DMA_IP_83XX | FSL_DMA_LITTLE_ENDIAN,
  				"fsl,elo-dma-channel");
e7a29151d   Ira Snyder   fsldma: clean up ...
1368
  		}
77cd62e80   Timur Tabi   fsldma: allow Fre...
1369
  	}
173acc7ce   Zhang Wei   dmaengine: add dr...
1370

d3f620b2c   Ira Snyder   fsldma: simplify ...
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
  	/*
  	 * Hookup the IRQ handler(s)
  	 *
  	 * If we have a per-controller interrupt, we prefer that to the
  	 * per-channel interrupts to reduce the number of shared interrupt
  	 * handlers on the same IRQ line
  	 */
  	err = fsldma_request_irqs(fdev);
  	if (err) {
  		dev_err(fdev->dev, "unable to request IRQs
  ");
  		goto out_free_fdev;
  	}
173acc7ce   Zhang Wei   dmaengine: add dr...
1384
1385
  	dma_async_device_register(&fdev->common);
  	return 0;
e7a29151d   Ira Snyder   fsldma: clean up ...
1386
  out_free_fdev:
d3f620b2c   Ira Snyder   fsldma: simplify ...
1387
  	irq_dispose_mapping(fdev->irq);
173acc7ce   Zhang Wei   dmaengine: add dr...
1388
  	kfree(fdev);
e7a29151d   Ira Snyder   fsldma: clean up ...
1389
  out_return:
173acc7ce   Zhang Wei   dmaengine: add dr...
1390
1391
  	return err;
  }
2dc115813   Grant Likely   of/device: Replac...
1392
  static int fsldma_of_remove(struct platform_device *op)
77cd62e80   Timur Tabi   fsldma: allow Fre...
1393
  {
a4f56d4b1   Ira Snyder   fsldma: rename st...
1394
  	struct fsldma_device *fdev;
77cd62e80   Timur Tabi   fsldma: allow Fre...
1395
  	unsigned int i;
e7a29151d   Ira Snyder   fsldma: clean up ...
1396
  	fdev = dev_get_drvdata(&op->dev);
77cd62e80   Timur Tabi   fsldma: allow Fre...
1397
  	dma_async_device_unregister(&fdev->common);
d3f620b2c   Ira Snyder   fsldma: simplify ...
1398
  	fsldma_free_irqs(fdev);
e7a29151d   Ira Snyder   fsldma: clean up ...
1399
  	for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
77cd62e80   Timur Tabi   fsldma: allow Fre...
1400
1401
  		if (fdev->chan[i])
  			fsl_dma_chan_remove(fdev->chan[i]);
e7a29151d   Ira Snyder   fsldma: clean up ...
1402
  	}
77cd62e80   Timur Tabi   fsldma: allow Fre...
1403

e7a29151d   Ira Snyder   fsldma: clean up ...
1404
1405
  	iounmap(fdev->regs);
  	dev_set_drvdata(&op->dev, NULL);
77cd62e80   Timur Tabi   fsldma: allow Fre...
1406
  	kfree(fdev);
77cd62e80   Timur Tabi   fsldma: allow Fre...
1407
1408
1409
  
  	return 0;
  }
4b1cf1fac   Márton Németh   dma: make Open Fi...
1410
  static const struct of_device_id fsldma_of_ids[] = {
049c9d455   Kumar Gala   [POWERPC] fsldma:...
1411
1412
  	{ .compatible = "fsl,eloplus-dma", },
  	{ .compatible = "fsl,elo-dma", },
173acc7ce   Zhang Wei   dmaengine: add dr...
1413
1414
  	{}
  };
8faa7cf82   Ira W. Snyder   dt/fsldma: fix bu...
1415
  static struct platform_driver fsldma_of_driver = {
4018294b5   Grant Likely   of: Remove duplic...
1416
1417
1418
1419
1420
1421
1422
  	.driver = {
  		.name = "fsl-elo-dma",
  		.owner = THIS_MODULE,
  		.of_match_table = fsldma_of_ids,
  	},
  	.probe = fsldma_of_probe,
  	.remove = fsldma_of_remove,
173acc7ce   Zhang Wei   dmaengine: add dr...
1423
  };
a4f56d4b1   Ira Snyder   fsldma: rename st...
1424
1425
1426
1427
1428
  /*----------------------------------------------------------------------------*/
  /* Module Init / Exit                                                         */
  /*----------------------------------------------------------------------------*/
  
  static __init int fsldma_init(void)
173acc7ce   Zhang Wei   dmaengine: add dr...
1429
  {
77cd62e80   Timur Tabi   fsldma: allow Fre...
1430
1431
  	pr_info("Freescale Elo / Elo Plus DMA driver
  ");
000061245   Grant Likely   dt/powerpc: Elimi...
1432
  	return platform_driver_register(&fsldma_of_driver);
77cd62e80   Timur Tabi   fsldma: allow Fre...
1433
  }
a4f56d4b1   Ira Snyder   fsldma: rename st...
1434
  static void __exit fsldma_exit(void)
77cd62e80   Timur Tabi   fsldma: allow Fre...
1435
  {
000061245   Grant Likely   dt/powerpc: Elimi...
1436
  	platform_driver_unregister(&fsldma_of_driver);
173acc7ce   Zhang Wei   dmaengine: add dr...
1437
  }
a4f56d4b1   Ira Snyder   fsldma: rename st...
1438
1439
  subsys_initcall(fsldma_init);
  module_exit(fsldma_exit);
77cd62e80   Timur Tabi   fsldma: allow Fre...
1440
1441
1442
  
  MODULE_DESCRIPTION("Freescale Elo / Elo Plus DMA driver");
  MODULE_LICENSE("GPL");