Commit a1c03319018061304be28d131073ac13a5cb86fb
Committed by
Dan Williams
1 parent
d3f620b2c4
Exists in
master
and in
39 other branches
fsldma: rename fsl_chan to chan
The name fsl_chan seems too long, so it has been shortened to chan. There are only a few places where the higher level "struct dma_chan *chan" name conflicts. These have been changed to "struct dma_chan *dchan" instead. Signed-off-by: Ira W. Snyder <iws@ovro.caltech.edu> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Showing 1 changed file with 275 additions and 275 deletions Side-by-side Diff
drivers/dma/fsldma.c
Changes suppressed. Click to show
... | ... | @@ -37,19 +37,19 @@ |
37 | 37 | #include <asm/fsldma.h> |
38 | 38 | #include "fsldma.h" |
39 | 39 | |
40 | -static void dma_init(struct fsldma_chan *fsl_chan) | |
40 | +static void dma_init(struct fsldma_chan *chan) | |
41 | 41 | { |
42 | 42 | /* Reset the channel */ |
43 | - DMA_OUT(fsl_chan, &fsl_chan->regs->mr, 0, 32); | |
43 | + DMA_OUT(chan, &chan->regs->mr, 0, 32); | |
44 | 44 | |
45 | - switch (fsl_chan->feature & FSL_DMA_IP_MASK) { | |
45 | + switch (chan->feature & FSL_DMA_IP_MASK) { | |
46 | 46 | case FSL_DMA_IP_85XX: |
47 | 47 | /* Set the channel to below modes: |
48 | 48 | * EIE - Error interrupt enable |
49 | 49 | * EOSIE - End of segments interrupt enable (basic mode) |
50 | 50 | * EOLNIE - End of links interrupt enable |
51 | 51 | */ |
52 | - DMA_OUT(fsl_chan, &fsl_chan->regs->mr, FSL_DMA_MR_EIE | |
52 | + DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_EIE | |
53 | 53 | | FSL_DMA_MR_EOLNIE | FSL_DMA_MR_EOSIE, 32); |
54 | 54 | break; |
55 | 55 | case FSL_DMA_IP_83XX: |
56 | 56 | |
57 | 57 | |
58 | 58 | |
59 | 59 | |
60 | 60 | |
61 | 61 | |
62 | 62 | |
63 | 63 | |
64 | 64 | |
65 | 65 | |
66 | 66 | |
67 | 67 | |
68 | 68 | |
69 | 69 | |
70 | 70 | |
71 | 71 | |
72 | 72 | |
73 | 73 | |
74 | 74 | |
75 | 75 | |
76 | 76 | |
77 | 77 | |
78 | 78 | |
79 | 79 | |
80 | 80 | |
81 | 81 | |
82 | 82 | |
83 | 83 | |
84 | 84 | |
85 | 85 | |
86 | 86 | |
87 | 87 | |
88 | 88 | |
89 | 89 | |
90 | 90 | |
91 | 91 | |
92 | 92 | |
93 | 93 | |
94 | 94 | |
95 | 95 | |
96 | 96 | |
97 | 97 | |
98 | 98 | |
99 | 99 | |
... | ... | @@ -57,154 +57,154 @@ |
57 | 57 | * EOTIE - End-of-transfer interrupt enable |
58 | 58 | * PRC_RM - PCI read multiple |
59 | 59 | */ |
60 | - DMA_OUT(fsl_chan, &fsl_chan->regs->mr, FSL_DMA_MR_EOTIE | |
60 | + DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_EOTIE | |
61 | 61 | | FSL_DMA_MR_PRC_RM, 32); |
62 | 62 | break; |
63 | 63 | } |
64 | 64 | |
65 | 65 | } |
66 | 66 | |
67 | -static void set_sr(struct fsldma_chan *fsl_chan, u32 val) | |
67 | +static void set_sr(struct fsldma_chan *chan, u32 val) | |
68 | 68 | { |
69 | - DMA_OUT(fsl_chan, &fsl_chan->regs->sr, val, 32); | |
69 | + DMA_OUT(chan, &chan->regs->sr, val, 32); | |
70 | 70 | } |
71 | 71 | |
72 | -static u32 get_sr(struct fsldma_chan *fsl_chan) | |
72 | +static u32 get_sr(struct fsldma_chan *chan) | |
73 | 73 | { |
74 | - return DMA_IN(fsl_chan, &fsl_chan->regs->sr, 32); | |
74 | + return DMA_IN(chan, &chan->regs->sr, 32); | |
75 | 75 | } |
76 | 76 | |
77 | -static void set_desc_cnt(struct fsldma_chan *fsl_chan, | |
77 | +static void set_desc_cnt(struct fsldma_chan *chan, | |
78 | 78 | struct fsl_dma_ld_hw *hw, u32 count) |
79 | 79 | { |
80 | - hw->count = CPU_TO_DMA(fsl_chan, count, 32); | |
80 | + hw->count = CPU_TO_DMA(chan, count, 32); | |
81 | 81 | } |
82 | 82 | |
83 | -static void set_desc_src(struct fsldma_chan *fsl_chan, | |
83 | +static void set_desc_src(struct fsldma_chan *chan, | |
84 | 84 | struct fsl_dma_ld_hw *hw, dma_addr_t src) |
85 | 85 | { |
86 | 86 | u64 snoop_bits; |
87 | 87 | |
88 | - snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) | |
88 | + snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) | |
89 | 89 | ? ((u64)FSL_DMA_SATR_SREADTYPE_SNOOP_READ << 32) : 0; |
90 | - hw->src_addr = CPU_TO_DMA(fsl_chan, snoop_bits | src, 64); | |
90 | + hw->src_addr = CPU_TO_DMA(chan, snoop_bits | src, 64); | |
91 | 91 | } |
92 | 92 | |
93 | -static void set_desc_dst(struct fsldma_chan *fsl_chan, | |
93 | +static void set_desc_dst(struct fsldma_chan *chan, | |
94 | 94 | struct fsl_dma_ld_hw *hw, dma_addr_t dst) |
95 | 95 | { |
96 | 96 | u64 snoop_bits; |
97 | 97 | |
98 | - snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) | |
98 | + snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) | |
99 | 99 | ? ((u64)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE << 32) : 0; |
100 | - hw->dst_addr = CPU_TO_DMA(fsl_chan, snoop_bits | dst, 64); | |
100 | + hw->dst_addr = CPU_TO_DMA(chan, snoop_bits | dst, 64); | |
101 | 101 | } |
102 | 102 | |
103 | -static void set_desc_next(struct fsldma_chan *fsl_chan, | |
103 | +static void set_desc_next(struct fsldma_chan *chan, | |
104 | 104 | struct fsl_dma_ld_hw *hw, dma_addr_t next) |
105 | 105 | { |
106 | 106 | u64 snoop_bits; |
107 | 107 | |
108 | - snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX) | |
108 | + snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX) | |
109 | 109 | ? FSL_DMA_SNEN : 0; |
110 | - hw->next_ln_addr = CPU_TO_DMA(fsl_chan, snoop_bits | next, 64); | |
110 | + hw->next_ln_addr = CPU_TO_DMA(chan, snoop_bits | next, 64); | |
111 | 111 | } |
112 | 112 | |
113 | -static void set_cdar(struct fsldma_chan *fsl_chan, dma_addr_t addr) | |
113 | +static void set_cdar(struct fsldma_chan *chan, dma_addr_t addr) | |
114 | 114 | { |
115 | - DMA_OUT(fsl_chan, &fsl_chan->regs->cdar, addr | FSL_DMA_SNEN, 64); | |
115 | + DMA_OUT(chan, &chan->regs->cdar, addr | FSL_DMA_SNEN, 64); | |
116 | 116 | } |
117 | 117 | |
118 | -static dma_addr_t get_cdar(struct fsldma_chan *fsl_chan) | |
118 | +static dma_addr_t get_cdar(struct fsldma_chan *chan) | |
119 | 119 | { |
120 | - return DMA_IN(fsl_chan, &fsl_chan->regs->cdar, 64) & ~FSL_DMA_SNEN; | |
120 | + return DMA_IN(chan, &chan->regs->cdar, 64) & ~FSL_DMA_SNEN; | |
121 | 121 | } |
122 | 122 | |
123 | -static void set_ndar(struct fsldma_chan *fsl_chan, dma_addr_t addr) | |
123 | +static void set_ndar(struct fsldma_chan *chan, dma_addr_t addr) | |
124 | 124 | { |
125 | - DMA_OUT(fsl_chan, &fsl_chan->regs->ndar, addr, 64); | |
125 | + DMA_OUT(chan, &chan->regs->ndar, addr, 64); | |
126 | 126 | } |
127 | 127 | |
128 | -static dma_addr_t get_ndar(struct fsldma_chan *fsl_chan) | |
128 | +static dma_addr_t get_ndar(struct fsldma_chan *chan) | |
129 | 129 | { |
130 | - return DMA_IN(fsl_chan, &fsl_chan->regs->ndar, 64); | |
130 | + return DMA_IN(chan, &chan->regs->ndar, 64); | |
131 | 131 | } |
132 | 132 | |
133 | -static u32 get_bcr(struct fsldma_chan *fsl_chan) | |
133 | +static u32 get_bcr(struct fsldma_chan *chan) | |
134 | 134 | { |
135 | - return DMA_IN(fsl_chan, &fsl_chan->regs->bcr, 32); | |
135 | + return DMA_IN(chan, &chan->regs->bcr, 32); | |
136 | 136 | } |
137 | 137 | |
138 | -static int dma_is_idle(struct fsldma_chan *fsl_chan) | |
138 | +static int dma_is_idle(struct fsldma_chan *chan) | |
139 | 139 | { |
140 | - u32 sr = get_sr(fsl_chan); | |
140 | + u32 sr = get_sr(chan); | |
141 | 141 | return (!(sr & FSL_DMA_SR_CB)) || (sr & FSL_DMA_SR_CH); |
142 | 142 | } |
143 | 143 | |
144 | -static void dma_start(struct fsldma_chan *fsl_chan) | |
144 | +static void dma_start(struct fsldma_chan *chan) | |
145 | 145 | { |
146 | 146 | u32 mode; |
147 | 147 | |
148 | - mode = DMA_IN(fsl_chan, &fsl_chan->regs->mr, 32); | |
148 | + mode = DMA_IN(chan, &chan->regs->mr, 32); | |
149 | 149 | |
150 | - if ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) { | |
151 | - if (fsl_chan->feature & FSL_DMA_CHAN_PAUSE_EXT) { | |
152 | - DMA_OUT(fsl_chan, &fsl_chan->regs->bcr, 0, 32); | |
150 | + if ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) { | |
151 | + if (chan->feature & FSL_DMA_CHAN_PAUSE_EXT) { | |
152 | + DMA_OUT(chan, &chan->regs->bcr, 0, 32); | |
153 | 153 | mode |= FSL_DMA_MR_EMP_EN; |
154 | 154 | } else { |
155 | 155 | mode &= ~FSL_DMA_MR_EMP_EN; |
156 | 156 | } |
157 | 157 | } |
158 | 158 | |
159 | - if (fsl_chan->feature & FSL_DMA_CHAN_START_EXT) | |
159 | + if (chan->feature & FSL_DMA_CHAN_START_EXT) | |
160 | 160 | mode |= FSL_DMA_MR_EMS_EN; |
161 | 161 | else |
162 | 162 | mode |= FSL_DMA_MR_CS; |
163 | 163 | |
164 | - DMA_OUT(fsl_chan, &fsl_chan->regs->mr, mode, 32); | |
164 | + DMA_OUT(chan, &chan->regs->mr, mode, 32); | |
165 | 165 | } |
166 | 166 | |
167 | -static void dma_halt(struct fsldma_chan *fsl_chan) | |
167 | +static void dma_halt(struct fsldma_chan *chan) | |
168 | 168 | { |
169 | 169 | u32 mode; |
170 | 170 | int i; |
171 | 171 | |
172 | - mode = DMA_IN(fsl_chan, &fsl_chan->regs->mr, 32); | |
172 | + mode = DMA_IN(chan, &chan->regs->mr, 32); | |
173 | 173 | mode |= FSL_DMA_MR_CA; |
174 | - DMA_OUT(fsl_chan, &fsl_chan->regs->mr, mode, 32); | |
174 | + DMA_OUT(chan, &chan->regs->mr, mode, 32); | |
175 | 175 | |
176 | 176 | mode &= ~(FSL_DMA_MR_CS | FSL_DMA_MR_EMS_EN | FSL_DMA_MR_CA); |
177 | - DMA_OUT(fsl_chan, &fsl_chan->regs->mr, mode, 32); | |
177 | + DMA_OUT(chan, &chan->regs->mr, mode, 32); | |
178 | 178 | |
179 | 179 | for (i = 0; i < 100; i++) { |
180 | - if (dma_is_idle(fsl_chan)) | |
180 | + if (dma_is_idle(chan)) | |
181 | 181 | break; |
182 | 182 | udelay(10); |
183 | 183 | } |
184 | 184 | |
185 | - if (i >= 100 && !dma_is_idle(fsl_chan)) | |
186 | - dev_err(fsl_chan->dev, "DMA halt timeout!\n"); | |
185 | + if (i >= 100 && !dma_is_idle(chan)) | |
186 | + dev_err(chan->dev, "DMA halt timeout!\n"); | |
187 | 187 | } |
188 | 188 | |
189 | -static void set_ld_eol(struct fsldma_chan *fsl_chan, | |
189 | +static void set_ld_eol(struct fsldma_chan *chan, | |
190 | 190 | struct fsl_desc_sw *desc) |
191 | 191 | { |
192 | 192 | u64 snoop_bits; |
193 | 193 | |
194 | - snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX) | |
194 | + snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX) | |
195 | 195 | ? FSL_DMA_SNEN : 0; |
196 | 196 | |
197 | - desc->hw.next_ln_addr = CPU_TO_DMA(fsl_chan, | |
198 | - DMA_TO_CPU(fsl_chan, desc->hw.next_ln_addr, 64) | FSL_DMA_EOL | |
197 | + desc->hw.next_ln_addr = CPU_TO_DMA(chan, | |
198 | + DMA_TO_CPU(chan, desc->hw.next_ln_addr, 64) | FSL_DMA_EOL | |
199 | 199 | | snoop_bits, 64); |
200 | 200 | } |
201 | 201 | |
202 | -static void append_ld_queue(struct fsldma_chan *fsl_chan, | |
202 | +static void append_ld_queue(struct fsldma_chan *chan, | |
203 | 203 | struct fsl_desc_sw *new_desc) |
204 | 204 | { |
205 | - struct fsl_desc_sw *queue_tail = to_fsl_desc(fsl_chan->ld_queue.prev); | |
205 | + struct fsl_desc_sw *queue_tail = to_fsl_desc(chan->ld_queue.prev); | |
206 | 206 | |
207 | - if (list_empty(&fsl_chan->ld_queue)) | |
207 | + if (list_empty(&chan->ld_queue)) | |
208 | 208 | return; |
209 | 209 | |
210 | 210 | /* Link to the new descriptor physical address and |
211 | 211 | |
212 | 212 | |
... | ... | @@ -214,15 +214,15 @@ |
214 | 214 | * |
215 | 215 | * For FSL_DMA_IP_83xx, the snoop enable bit need be set. |
216 | 216 | */ |
217 | - queue_tail->hw.next_ln_addr = CPU_TO_DMA(fsl_chan, | |
217 | + queue_tail->hw.next_ln_addr = CPU_TO_DMA(chan, | |
218 | 218 | new_desc->async_tx.phys | FSL_DMA_EOSIE | |
219 | - (((fsl_chan->feature & FSL_DMA_IP_MASK) | |
219 | + (((chan->feature & FSL_DMA_IP_MASK) | |
220 | 220 | == FSL_DMA_IP_83XX) ? FSL_DMA_SNEN : 0), 64); |
221 | 221 | } |
222 | 222 | |
223 | 223 | /** |
224 | 224 | * fsl_chan_set_src_loop_size - Set source address hold transfer size |
225 | - * @fsl_chan : Freescale DMA channel | |
225 | + * @chan : Freescale DMA channel | |
226 | 226 | * @size : Address loop size, 0 for disable loop |
227 | 227 | * |
228 | 228 | * The set source address hold transfer size. The source |
229 | 229 | |
... | ... | @@ -231,11 +231,11 @@ |
231 | 231 | * read data from SA, SA + 1, SA + 2, SA + 3, then loop back to SA, |
232 | 232 | * SA + 1 ... and so on. |
233 | 233 | */ |
234 | -static void fsl_chan_set_src_loop_size(struct fsldma_chan *fsl_chan, int size) | |
234 | +static void fsl_chan_set_src_loop_size(struct fsldma_chan *chan, int size) | |
235 | 235 | { |
236 | 236 | u32 mode; |
237 | 237 | |
238 | - mode = DMA_IN(fsl_chan, &fsl_chan->regs->mr, 32); | |
238 | + mode = DMA_IN(chan, &chan->regs->mr, 32); | |
239 | 239 | |
240 | 240 | switch (size) { |
241 | 241 | case 0: |
242 | 242 | |
... | ... | @@ -249,12 +249,12 @@ |
249 | 249 | break; |
250 | 250 | } |
251 | 251 | |
252 | - DMA_OUT(fsl_chan, &fsl_chan->regs->mr, mode, 32); | |
252 | + DMA_OUT(chan, &chan->regs->mr, mode, 32); | |
253 | 253 | } |
254 | 254 | |
255 | 255 | /** |
256 | 256 | * fsl_chan_set_dst_loop_size - Set destination address hold transfer size |
257 | - * @fsl_chan : Freescale DMA channel | |
257 | + * @chan : Freescale DMA channel | |
258 | 258 | * @size : Address loop size, 0 for disable loop |
259 | 259 | * |
260 | 260 | * The set destination address hold transfer size. The destination |
261 | 261 | |
... | ... | @@ -263,11 +263,11 @@ |
263 | 263 | * write data to TA, TA + 1, TA + 2, TA + 3, then loop back to TA, |
264 | 264 | * TA + 1 ... and so on. |
265 | 265 | */ |
266 | -static void fsl_chan_set_dst_loop_size(struct fsldma_chan *fsl_chan, int size) | |
266 | +static void fsl_chan_set_dst_loop_size(struct fsldma_chan *chan, int size) | |
267 | 267 | { |
268 | 268 | u32 mode; |
269 | 269 | |
270 | - mode = DMA_IN(fsl_chan, &fsl_chan->regs->mr, 32); | |
270 | + mode = DMA_IN(chan, &chan->regs->mr, 32); | |
271 | 271 | |
272 | 272 | switch (size) { |
273 | 273 | case 0: |
274 | 274 | |
... | ... | @@ -281,12 +281,12 @@ |
281 | 281 | break; |
282 | 282 | } |
283 | 283 | |
284 | - DMA_OUT(fsl_chan, &fsl_chan->regs->mr, mode, 32); | |
284 | + DMA_OUT(chan, &chan->regs->mr, mode, 32); | |
285 | 285 | } |
286 | 286 | |
287 | 287 | /** |
288 | 288 | * fsl_chan_set_request_count - Set DMA Request Count for external control |
289 | - * @fsl_chan : Freescale DMA channel | |
289 | + * @chan : Freescale DMA channel | |
290 | 290 | * @size : Number of bytes to transfer in a single request |
291 | 291 | * |
292 | 292 | * The Freescale DMA channel can be controlled by the external signal DREQ#. |
293 | 293 | |
294 | 294 | |
295 | 295 | |
296 | 296 | |
297 | 297 | |
298 | 298 | |
299 | 299 | |
... | ... | @@ -296,38 +296,38 @@ |
296 | 296 | * |
297 | 297 | * A size of 0 disables external pause control. The maximum size is 1024. |
298 | 298 | */ |
299 | -static void fsl_chan_set_request_count(struct fsldma_chan *fsl_chan, int size) | |
299 | +static void fsl_chan_set_request_count(struct fsldma_chan *chan, int size) | |
300 | 300 | { |
301 | 301 | u32 mode; |
302 | 302 | |
303 | 303 | BUG_ON(size > 1024); |
304 | 304 | |
305 | - mode = DMA_IN(fsl_chan, &fsl_chan->regs->mr, 32); | |
305 | + mode = DMA_IN(chan, &chan->regs->mr, 32); | |
306 | 306 | mode |= (__ilog2(size) << 24) & 0x0f000000; |
307 | 307 | |
308 | - DMA_OUT(fsl_chan, &fsl_chan->regs->mr, mode, 32); | |
308 | + DMA_OUT(chan, &chan->regs->mr, mode, 32); | |
309 | 309 | } |
310 | 310 | |
311 | 311 | /** |
312 | 312 | * fsl_chan_toggle_ext_pause - Toggle channel external pause status |
313 | - * @fsl_chan : Freescale DMA channel | |
313 | + * @chan : Freescale DMA channel | |
314 | 314 | * @enable : 0 is disabled, 1 is enabled. |
315 | 315 | * |
316 | 316 | * The Freescale DMA channel can be controlled by the external signal DREQ#. |
317 | 317 | * The DMA Request Count feature should be used in addition to this feature |
318 | 318 | * to set the number of bytes to transfer before pausing the channel. |
319 | 319 | */ |
320 | -static void fsl_chan_toggle_ext_pause(struct fsldma_chan *fsl_chan, int enable) | |
320 | +static void fsl_chan_toggle_ext_pause(struct fsldma_chan *chan, int enable) | |
321 | 321 | { |
322 | 322 | if (enable) |
323 | - fsl_chan->feature |= FSL_DMA_CHAN_PAUSE_EXT; | |
323 | + chan->feature |= FSL_DMA_CHAN_PAUSE_EXT; | |
324 | 324 | else |
325 | - fsl_chan->feature &= ~FSL_DMA_CHAN_PAUSE_EXT; | |
325 | + chan->feature &= ~FSL_DMA_CHAN_PAUSE_EXT; | |
326 | 326 | } |
327 | 327 | |
328 | 328 | /** |
329 | 329 | * fsl_chan_toggle_ext_start - Toggle channel external start status |
330 | - * @fsl_chan : Freescale DMA channel | |
330 | + * @chan : Freescale DMA channel | |
331 | 331 | * @enable : 0 is disabled, 1 is enabled. |
332 | 332 | * |
333 | 333 | * If enable the external start, the channel can be started by an |
334 | 334 | |
335 | 335 | |
336 | 336 | |
337 | 337 | |
338 | 338 | |
... | ... | @@ -335,26 +335,26 @@ |
335 | 335 | * transfer immediately. The DMA channel will wait for the |
336 | 336 | * control pin asserted. |
337 | 337 | */ |
338 | -static void fsl_chan_toggle_ext_start(struct fsldma_chan *fsl_chan, int enable) | |
338 | +static void fsl_chan_toggle_ext_start(struct fsldma_chan *chan, int enable) | |
339 | 339 | { |
340 | 340 | if (enable) |
341 | - fsl_chan->feature |= FSL_DMA_CHAN_START_EXT; | |
341 | + chan->feature |= FSL_DMA_CHAN_START_EXT; | |
342 | 342 | else |
343 | - fsl_chan->feature &= ~FSL_DMA_CHAN_START_EXT; | |
343 | + chan->feature &= ~FSL_DMA_CHAN_START_EXT; | |
344 | 344 | } |
345 | 345 | |
346 | 346 | static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx) |
347 | 347 | { |
348 | - struct fsldma_chan *fsl_chan = to_fsl_chan(tx->chan); | |
348 | + struct fsldma_chan *chan = to_fsl_chan(tx->chan); | |
349 | 349 | struct fsl_desc_sw *desc = tx_to_fsl_desc(tx); |
350 | 350 | struct fsl_desc_sw *child; |
351 | 351 | unsigned long flags; |
352 | 352 | dma_cookie_t cookie; |
353 | 353 | |
354 | 354 | /* cookie increment and adding to ld_queue must be atomic */ |
355 | - spin_lock_irqsave(&fsl_chan->desc_lock, flags); | |
355 | + spin_lock_irqsave(&chan->desc_lock, flags); | |
356 | 356 | |
357 | - cookie = fsl_chan->common.cookie; | |
357 | + cookie = chan->common.cookie; | |
358 | 358 | list_for_each_entry(child, &desc->tx_list, node) { |
359 | 359 | cookie++; |
360 | 360 | if (cookie < 0) |
361 | 361 | |
362 | 362 | |
363 | 363 | |
364 | 364 | |
365 | 365 | |
... | ... | @@ -363,33 +363,33 @@ |
363 | 363 | desc->async_tx.cookie = cookie; |
364 | 364 | } |
365 | 365 | |
366 | - fsl_chan->common.cookie = cookie; | |
367 | - append_ld_queue(fsl_chan, desc); | |
368 | - list_splice_init(&desc->tx_list, fsl_chan->ld_queue.prev); | |
366 | + chan->common.cookie = cookie; | |
367 | + append_ld_queue(chan, desc); | |
368 | + list_splice_init(&desc->tx_list, chan->ld_queue.prev); | |
369 | 369 | |
370 | - spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); | |
370 | + spin_unlock_irqrestore(&chan->desc_lock, flags); | |
371 | 371 | |
372 | 372 | return cookie; |
373 | 373 | } |
374 | 374 | |
375 | 375 | /** |
376 | 376 | * fsl_dma_alloc_descriptor - Allocate descriptor from channel's DMA pool. |
377 | - * @fsl_chan : Freescale DMA channel | |
377 | + * @chan : Freescale DMA channel | |
378 | 378 | * |
379 | 379 | * Return - The descriptor allocated. NULL for failed. |
380 | 380 | */ |
381 | 381 | static struct fsl_desc_sw *fsl_dma_alloc_descriptor( |
382 | - struct fsldma_chan *fsl_chan) | |
382 | + struct fsldma_chan *chan) | |
383 | 383 | { |
384 | 384 | dma_addr_t pdesc; |
385 | 385 | struct fsl_desc_sw *desc_sw; |
386 | 386 | |
387 | - desc_sw = dma_pool_alloc(fsl_chan->desc_pool, GFP_ATOMIC, &pdesc); | |
387 | + desc_sw = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &pdesc); | |
388 | 388 | if (desc_sw) { |
389 | 389 | memset(desc_sw, 0, sizeof(struct fsl_desc_sw)); |
390 | 390 | INIT_LIST_HEAD(&desc_sw->tx_list); |
391 | 391 | dma_async_tx_descriptor_init(&desc_sw->async_tx, |
392 | - &fsl_chan->common); | |
392 | + &chan->common); | |
393 | 393 | desc_sw->async_tx.tx_submit = fsl_dma_tx_submit; |
394 | 394 | desc_sw->async_tx.phys = pdesc; |
395 | 395 | } |
396 | 396 | |
397 | 397 | |
398 | 398 | |
399 | 399 | |
400 | 400 | |
... | ... | @@ -400,29 +400,29 @@ |
400 | 400 | |
401 | 401 | /** |
402 | 402 | * fsl_dma_alloc_chan_resources - Allocate resources for DMA channel. |
403 | - * @fsl_chan : Freescale DMA channel | |
403 | + * @chan : Freescale DMA channel | |
404 | 404 | * |
405 | 405 | * This function will create a dma pool for descriptor allocation. |
406 | 406 | * |
407 | 407 | * Return - The number of descriptors allocated. |
408 | 408 | */ |
409 | -static int fsl_dma_alloc_chan_resources(struct dma_chan *chan) | |
409 | +static int fsl_dma_alloc_chan_resources(struct dma_chan *dchan) | |
410 | 410 | { |
411 | - struct fsldma_chan *fsl_chan = to_fsl_chan(chan); | |
411 | + struct fsldma_chan *chan = to_fsl_chan(dchan); | |
412 | 412 | |
413 | 413 | /* Has this channel already been allocated? */ |
414 | - if (fsl_chan->desc_pool) | |
414 | + if (chan->desc_pool) | |
415 | 415 | return 1; |
416 | 416 | |
417 | 417 | /* We need the descriptor to be aligned to 32bytes |
418 | 418 | * for meeting FSL DMA specification requirement. |
419 | 419 | */ |
420 | - fsl_chan->desc_pool = dma_pool_create("fsl_dma_engine_desc_pool", | |
421 | - fsl_chan->dev, sizeof(struct fsl_desc_sw), | |
420 | + chan->desc_pool = dma_pool_create("fsl_dma_engine_desc_pool", | |
421 | + chan->dev, sizeof(struct fsl_desc_sw), | |
422 | 422 | 32, 0); |
423 | - if (!fsl_chan->desc_pool) { | |
424 | - dev_err(fsl_chan->dev, "No memory for channel %d " | |
425 | - "descriptor dma pool.\n", fsl_chan->id); | |
423 | + if (!chan->desc_pool) { | |
424 | + dev_err(chan->dev, "No memory for channel %d " | |
425 | + "descriptor dma pool.\n", chan->id); | |
426 | 426 | return 0; |
427 | 427 | } |
428 | 428 | |
429 | 429 | |
430 | 430 | |
431 | 431 | |
432 | 432 | |
433 | 433 | |
434 | 434 | |
435 | 435 | |
436 | 436 | |
437 | 437 | |
438 | 438 | |
439 | 439 | |
440 | 440 | |
441 | 441 | |
... | ... | @@ -431,45 +431,45 @@ |
431 | 431 | |
432 | 432 | /** |
433 | 433 | * fsl_dma_free_chan_resources - Free all resources of the channel. |
434 | - * @fsl_chan : Freescale DMA channel | |
434 | + * @chan : Freescale DMA channel | |
435 | 435 | */ |
436 | -static void fsl_dma_free_chan_resources(struct dma_chan *chan) | |
436 | +static void fsl_dma_free_chan_resources(struct dma_chan *dchan) | |
437 | 437 | { |
438 | - struct fsldma_chan *fsl_chan = to_fsl_chan(chan); | |
438 | + struct fsldma_chan *chan = to_fsl_chan(dchan); | |
439 | 439 | struct fsl_desc_sw *desc, *_desc; |
440 | 440 | unsigned long flags; |
441 | 441 | |
442 | - dev_dbg(fsl_chan->dev, "Free all channel resources.\n"); | |
443 | - spin_lock_irqsave(&fsl_chan->desc_lock, flags); | |
444 | - list_for_each_entry_safe(desc, _desc, &fsl_chan->ld_queue, node) { | |
442 | + dev_dbg(chan->dev, "Free all channel resources.\n"); | |
443 | + spin_lock_irqsave(&chan->desc_lock, flags); | |
444 | + list_for_each_entry_safe(desc, _desc, &chan->ld_queue, node) { | |
445 | 445 | #ifdef FSL_DMA_LD_DEBUG |
446 | - dev_dbg(fsl_chan->dev, | |
446 | + dev_dbg(chan->dev, | |
447 | 447 | "LD %p will be released.\n", desc); |
448 | 448 | #endif |
449 | 449 | list_del(&desc->node); |
450 | 450 | /* free link descriptor */ |
451 | - dma_pool_free(fsl_chan->desc_pool, desc, desc->async_tx.phys); | |
451 | + dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys); | |
452 | 452 | } |
453 | - spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); | |
454 | - dma_pool_destroy(fsl_chan->desc_pool); | |
453 | + spin_unlock_irqrestore(&chan->desc_lock, flags); | |
454 | + dma_pool_destroy(chan->desc_pool); | |
455 | 455 | |
456 | - fsl_chan->desc_pool = NULL; | |
456 | + chan->desc_pool = NULL; | |
457 | 457 | } |
458 | 458 | |
459 | 459 | static struct dma_async_tx_descriptor * |
460 | -fsl_dma_prep_interrupt(struct dma_chan *chan, unsigned long flags) | |
460 | +fsl_dma_prep_interrupt(struct dma_chan *dchan, unsigned long flags) | |
461 | 461 | { |
462 | - struct fsldma_chan *fsl_chan; | |
462 | + struct fsldma_chan *chan; | |
463 | 463 | struct fsl_desc_sw *new; |
464 | 464 | |
465 | - if (!chan) | |
465 | + if (!dchan) | |
466 | 466 | return NULL; |
467 | 467 | |
468 | - fsl_chan = to_fsl_chan(chan); | |
468 | + chan = to_fsl_chan(dchan); | |
469 | 469 | |
470 | - new = fsl_dma_alloc_descriptor(fsl_chan); | |
470 | + new = fsl_dma_alloc_descriptor(chan); | |
471 | 471 | if (!new) { |
472 | - dev_err(fsl_chan->dev, "No free memory for link descriptor\n"); | |
472 | + dev_err(chan->dev, "No free memory for link descriptor\n"); | |
473 | 473 | return NULL; |
474 | 474 | } |
475 | 475 | |
476 | 476 | |
477 | 477 | |
478 | 478 | |
479 | 479 | |
480 | 480 | |
481 | 481 | |
482 | 482 | |
483 | 483 | |
484 | 484 | |
... | ... | @@ -480,51 +480,51 @@ |
480 | 480 | list_add_tail(&new->node, &new->tx_list); |
481 | 481 | |
482 | 482 | /* Set End-of-link to the last link descriptor of new list*/ |
483 | - set_ld_eol(fsl_chan, new); | |
483 | + set_ld_eol(chan, new); | |
484 | 484 | |
485 | 485 | return &new->async_tx; |
486 | 486 | } |
487 | 487 | |
488 | 488 | static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy( |
489 | - struct dma_chan *chan, dma_addr_t dma_dst, dma_addr_t dma_src, | |
489 | + struct dma_chan *dchan, dma_addr_t dma_dst, dma_addr_t dma_src, | |
490 | 490 | size_t len, unsigned long flags) |
491 | 491 | { |
492 | - struct fsldma_chan *fsl_chan; | |
492 | + struct fsldma_chan *chan; | |
493 | 493 | struct fsl_desc_sw *first = NULL, *prev = NULL, *new; |
494 | 494 | struct list_head *list; |
495 | 495 | size_t copy; |
496 | 496 | |
497 | - if (!chan) | |
497 | + if (!dchan) | |
498 | 498 | return NULL; |
499 | 499 | |
500 | 500 | if (!len) |
501 | 501 | return NULL; |
502 | 502 | |
503 | - fsl_chan = to_fsl_chan(chan); | |
503 | + chan = to_fsl_chan(dchan); | |
504 | 504 | |
505 | 505 | do { |
506 | 506 | |
507 | 507 | /* Allocate the link descriptor from DMA pool */ |
508 | - new = fsl_dma_alloc_descriptor(fsl_chan); | |
508 | + new = fsl_dma_alloc_descriptor(chan); | |
509 | 509 | if (!new) { |
510 | - dev_err(fsl_chan->dev, | |
510 | + dev_err(chan->dev, | |
511 | 511 | "No free memory for link descriptor\n"); |
512 | 512 | goto fail; |
513 | 513 | } |
514 | 514 | #ifdef FSL_DMA_LD_DEBUG |
515 | - dev_dbg(fsl_chan->dev, "new link desc alloc %p\n", new); | |
515 | + dev_dbg(chan->dev, "new link desc alloc %p\n", new); | |
516 | 516 | #endif |
517 | 517 | |
518 | 518 | copy = min(len, (size_t)FSL_DMA_BCR_MAX_CNT); |
519 | 519 | |
520 | - set_desc_cnt(fsl_chan, &new->hw, copy); | |
521 | - set_desc_src(fsl_chan, &new->hw, dma_src); | |
522 | - set_desc_dst(fsl_chan, &new->hw, dma_dst); | |
520 | + set_desc_cnt(chan, &new->hw, copy); | |
521 | + set_desc_src(chan, &new->hw, dma_src); | |
522 | + set_desc_dst(chan, &new->hw, dma_dst); | |
523 | 523 | |
524 | 524 | if (!first) |
525 | 525 | first = new; |
526 | 526 | else |
527 | - set_desc_next(fsl_chan, &prev->hw, new->async_tx.phys); | |
527 | + set_desc_next(chan, &prev->hw, new->async_tx.phys); | |
528 | 528 | |
529 | 529 | new->async_tx.cookie = 0; |
530 | 530 | async_tx_ack(&new->async_tx); |
... | ... | @@ -542,7 +542,7 @@ |
542 | 542 | new->async_tx.cookie = -EBUSY; |
543 | 543 | |
544 | 544 | /* Set End-of-link to the last link descriptor of new list*/ |
545 | - set_ld_eol(fsl_chan, new); | |
545 | + set_ld_eol(chan, new); | |
546 | 546 | |
547 | 547 | return &first->async_tx; |
548 | 548 | |
... | ... | @@ -553,7 +553,7 @@ |
553 | 553 | list = &first->tx_list; |
554 | 554 | list_for_each_entry_safe_reverse(new, prev, list, node) { |
555 | 555 | list_del(&new->node); |
556 | - dma_pool_free(fsl_chan->desc_pool, new, new->async_tx.phys); | |
556 | + dma_pool_free(chan->desc_pool, new, new->async_tx.phys); | |
557 | 557 | } |
558 | 558 | |
559 | 559 | return NULL; |
560 | 560 | |
... | ... | @@ -572,10 +572,10 @@ |
572 | 572 | * chan->private variable. |
573 | 573 | */ |
574 | 574 | static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg( |
575 | - struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len, | |
575 | + struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len, | |
576 | 576 | enum dma_data_direction direction, unsigned long flags) |
577 | 577 | { |
578 | - struct fsldma_chan *fsl_chan; | |
578 | + struct fsldma_chan *chan; | |
579 | 579 | struct fsl_desc_sw *first = NULL, *prev = NULL, *new = NULL; |
580 | 580 | struct fsl_dma_slave *slave; |
581 | 581 | struct list_head *tx_list; |
582 | 582 | |
583 | 583 | |
... | ... | @@ -588,14 +588,14 @@ |
588 | 588 | struct fsl_dma_hw_addr *hw; |
589 | 589 | dma_addr_t dma_dst, dma_src; |
590 | 590 | |
591 | - if (!chan) | |
591 | + if (!dchan) | |
592 | 592 | return NULL; |
593 | 593 | |
594 | - if (!chan->private) | |
594 | + if (!dchan->private) | |
595 | 595 | return NULL; |
596 | 596 | |
597 | - fsl_chan = to_fsl_chan(chan); | |
598 | - slave = chan->private; | |
597 | + chan = to_fsl_chan(dchan); | |
598 | + slave = dchan->private; | |
599 | 599 | |
600 | 600 | if (list_empty(&slave->addresses)) |
601 | 601 | return NULL; |
602 | 602 | |
603 | 603 | |
... | ... | @@ -644,14 +644,14 @@ |
644 | 644 | } |
645 | 645 | |
646 | 646 | /* Allocate the link descriptor from DMA pool */ |
647 | - new = fsl_dma_alloc_descriptor(fsl_chan); | |
647 | + new = fsl_dma_alloc_descriptor(chan); | |
648 | 648 | if (!new) { |
649 | - dev_err(fsl_chan->dev, "No free memory for " | |
649 | + dev_err(chan->dev, "No free memory for " | |
650 | 650 | "link descriptor\n"); |
651 | 651 | goto fail; |
652 | 652 | } |
653 | 653 | #ifdef FSL_DMA_LD_DEBUG |
654 | - dev_dbg(fsl_chan->dev, "new link desc alloc %p\n", new); | |
654 | + dev_dbg(chan->dev, "new link desc alloc %p\n", new); | |
655 | 655 | #endif |
656 | 656 | |
657 | 657 | /* |
... | ... | @@ -678,9 +678,9 @@ |
678 | 678 | } |
679 | 679 | |
680 | 680 | /* Fill in the descriptor */ |
681 | - set_desc_cnt(fsl_chan, &new->hw, copy); | |
682 | - set_desc_src(fsl_chan, &new->hw, dma_src); | |
683 | - set_desc_dst(fsl_chan, &new->hw, dma_dst); | |
681 | + set_desc_cnt(chan, &new->hw, copy); | |
682 | + set_desc_src(chan, &new->hw, dma_src); | |
683 | + set_desc_dst(chan, &new->hw, dma_dst); | |
684 | 684 | |
685 | 685 | /* |
686 | 686 | * If this is not the first descriptor, chain the |
... | ... | @@ -689,7 +689,7 @@ |
689 | 689 | if (!first) { |
690 | 690 | first = new; |
691 | 691 | } else { |
692 | - set_desc_next(fsl_chan, &prev->hw, | |
692 | + set_desc_next(chan, &prev->hw, | |
693 | 693 | new->async_tx.phys); |
694 | 694 | } |
695 | 695 | |
696 | 696 | |
697 | 697 | |
698 | 698 | |
699 | 699 | |
700 | 700 | |
... | ... | @@ -715,23 +715,23 @@ |
715 | 715 | new->async_tx.cookie = -EBUSY; |
716 | 716 | |
717 | 717 | /* Set End-of-link to the last link descriptor of new list */ |
718 | - set_ld_eol(fsl_chan, new); | |
718 | + set_ld_eol(chan, new); | |
719 | 719 | |
720 | 720 | /* Enable extra controller features */ |
721 | - if (fsl_chan->set_src_loop_size) | |
722 | - fsl_chan->set_src_loop_size(fsl_chan, slave->src_loop_size); | |
721 | + if (chan->set_src_loop_size) | |
722 | + chan->set_src_loop_size(chan, slave->src_loop_size); | |
723 | 723 | |
724 | - if (fsl_chan->set_dst_loop_size) | |
725 | - fsl_chan->set_dst_loop_size(fsl_chan, slave->dst_loop_size); | |
724 | + if (chan->set_dst_loop_size) | |
725 | + chan->set_dst_loop_size(chan, slave->dst_loop_size); | |
726 | 726 | |
727 | - if (fsl_chan->toggle_ext_start) | |
728 | - fsl_chan->toggle_ext_start(fsl_chan, slave->external_start); | |
727 | + if (chan->toggle_ext_start) | |
728 | + chan->toggle_ext_start(chan, slave->external_start); | |
729 | 729 | |
730 | - if (fsl_chan->toggle_ext_pause) | |
731 | - fsl_chan->toggle_ext_pause(fsl_chan, slave->external_pause); | |
730 | + if (chan->toggle_ext_pause) | |
731 | + chan->toggle_ext_pause(chan, slave->external_pause); | |
732 | 732 | |
733 | - if (fsl_chan->set_request_count) | |
734 | - fsl_chan->set_request_count(fsl_chan, slave->request_count); | |
733 | + if (chan->set_request_count) | |
734 | + chan->set_request_count(chan, slave->request_count); | |
735 | 735 | |
736 | 736 | return &first->async_tx; |
737 | 737 | |
738 | 738 | |
739 | 739 | |
740 | 740 | |
741 | 741 | |
742 | 742 | |
743 | 743 | |
744 | 744 | |
745 | 745 | |
746 | 746 | |
747 | 747 | |
748 | 748 | |
749 | 749 | |
750 | 750 | |
751 | 751 | |
752 | 752 | |
... | ... | @@ -751,62 +751,62 @@ |
751 | 751 | tx_list = &first->tx_list; |
752 | 752 | list_for_each_entry_safe_reverse(new, prev, tx_list, node) { |
753 | 753 | list_del_init(&new->node); |
754 | - dma_pool_free(fsl_chan->desc_pool, new, new->async_tx.phys); | |
754 | + dma_pool_free(chan->desc_pool, new, new->async_tx.phys); | |
755 | 755 | } |
756 | 756 | |
757 | 757 | return NULL; |
758 | 758 | } |
759 | 759 | |
760 | -static void fsl_dma_device_terminate_all(struct dma_chan *chan) | |
760 | +static void fsl_dma_device_terminate_all(struct dma_chan *dchan) | |
761 | 761 | { |
762 | - struct fsldma_chan *fsl_chan; | |
762 | + struct fsldma_chan *chan; | |
763 | 763 | struct fsl_desc_sw *desc, *tmp; |
764 | 764 | unsigned long flags; |
765 | 765 | |
766 | - if (!chan) | |
766 | + if (!dchan) | |
767 | 767 | return; |
768 | 768 | |
769 | - fsl_chan = to_fsl_chan(chan); | |
769 | + chan = to_fsl_chan(dchan); | |
770 | 770 | |
771 | 771 | /* Halt the DMA engine */ |
772 | - dma_halt(fsl_chan); | |
772 | + dma_halt(chan); | |
773 | 773 | |
774 | - spin_lock_irqsave(&fsl_chan->desc_lock, flags); | |
774 | + spin_lock_irqsave(&chan->desc_lock, flags); | |
775 | 775 | |
776 | 776 | /* Remove and free all of the descriptors in the LD queue */ |
777 | - list_for_each_entry_safe(desc, tmp, &fsl_chan->ld_queue, node) { | |
777 | + list_for_each_entry_safe(desc, tmp, &chan->ld_queue, node) { | |
778 | 778 | list_del(&desc->node); |
779 | - dma_pool_free(fsl_chan->desc_pool, desc, desc->async_tx.phys); | |
779 | + dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys); | |
780 | 780 | } |
781 | 781 | |
782 | - spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); | |
782 | + spin_unlock_irqrestore(&chan->desc_lock, flags); | |
783 | 783 | } |
784 | 784 | |
785 | 785 | /** |
786 | 786 | * fsl_dma_update_completed_cookie - Update the completed cookie. |
787 | - * @fsl_chan : Freescale DMA channel | |
787 | + * @chan : Freescale DMA channel | |
788 | 788 | */ |
789 | -static void fsl_dma_update_completed_cookie(struct fsldma_chan *fsl_chan) | |
789 | +static void fsl_dma_update_completed_cookie(struct fsldma_chan *chan) | |
790 | 790 | { |
791 | 791 | struct fsl_desc_sw *cur_desc, *desc; |
792 | 792 | dma_addr_t ld_phy; |
793 | 793 | |
794 | - ld_phy = get_cdar(fsl_chan) & FSL_DMA_NLDA_MASK; | |
794 | + ld_phy = get_cdar(chan) & FSL_DMA_NLDA_MASK; | |
795 | 795 | |
796 | 796 | if (ld_phy) { |
797 | 797 | cur_desc = NULL; |
798 | - list_for_each_entry(desc, &fsl_chan->ld_queue, node) | |
798 | + list_for_each_entry(desc, &chan->ld_queue, node) | |
799 | 799 | if (desc->async_tx.phys == ld_phy) { |
800 | 800 | cur_desc = desc; |
801 | 801 | break; |
802 | 802 | } |
803 | 803 | |
804 | 804 | if (cur_desc && cur_desc->async_tx.cookie) { |
805 | - if (dma_is_idle(fsl_chan)) | |
806 | - fsl_chan->completed_cookie = | |
805 | + if (dma_is_idle(chan)) | |
806 | + chan->completed_cookie = | |
807 | 807 | cur_desc->async_tx.cookie; |
808 | 808 | else |
809 | - fsl_chan->completed_cookie = | |
809 | + chan->completed_cookie = | |
810 | 810 | cur_desc->async_tx.cookie - 1; |
811 | 811 | } |
812 | 812 | } |
813 | 813 | |
814 | 814 | |
815 | 815 | |
816 | 816 | |
... | ... | @@ -814,27 +814,27 @@ |
814 | 814 | |
815 | 815 | /** |
816 | 816 | * fsl_chan_ld_cleanup - Clean up link descriptors |
817 | - * @fsl_chan : Freescale DMA channel | |
817 | + * @chan : Freescale DMA channel | |
818 | 818 | * |
819 | 819 | * This function clean up the ld_queue of DMA channel. |
820 | 820 | * If 'in_intr' is set, the function will move the link descriptor to |
821 | 821 | * the recycle list. Otherwise, free it directly. |
822 | 822 | */ |
823 | -static void fsl_chan_ld_cleanup(struct fsldma_chan *fsl_chan) | |
823 | +static void fsl_chan_ld_cleanup(struct fsldma_chan *chan) | |
824 | 824 | { |
825 | 825 | struct fsl_desc_sw *desc, *_desc; |
826 | 826 | unsigned long flags; |
827 | 827 | |
828 | - spin_lock_irqsave(&fsl_chan->desc_lock, flags); | |
828 | + spin_lock_irqsave(&chan->desc_lock, flags); | |
829 | 829 | |
830 | - dev_dbg(fsl_chan->dev, "chan completed_cookie = %d\n", | |
831 | - fsl_chan->completed_cookie); | |
832 | - list_for_each_entry_safe(desc, _desc, &fsl_chan->ld_queue, node) { | |
830 | + dev_dbg(chan->dev, "chan completed_cookie = %d\n", | |
831 | + chan->completed_cookie); | |
832 | + list_for_each_entry_safe(desc, _desc, &chan->ld_queue, node) { | |
833 | 833 | dma_async_tx_callback callback; |
834 | 834 | void *callback_param; |
835 | 835 | |
836 | 836 | if (dma_async_is_complete(desc->async_tx.cookie, |
837 | - fsl_chan->completed_cookie, fsl_chan->common.cookie) | |
837 | + chan->completed_cookie, chan->common.cookie) | |
838 | 838 | == DMA_IN_PROGRESS) |
839 | 839 | break; |
840 | 840 | |
841 | 841 | |
842 | 842 | |
843 | 843 | |
844 | 844 | |
845 | 845 | |
846 | 846 | |
847 | 847 | |
848 | 848 | |
849 | 849 | |
850 | 850 | |
851 | 851 | |
852 | 852 | |
853 | 853 | |
854 | 854 | |
855 | 855 | |
856 | 856 | |
857 | 857 | |
858 | 858 | |
859 | 859 | |
860 | 860 | |
861 | 861 | |
862 | 862 | |
863 | 863 | |
864 | 864 | |
865 | 865 | |
866 | 866 | |
867 | 867 | |
868 | 868 | |
869 | 869 | |
870 | 870 | |
... | ... | @@ -844,119 +844,119 @@ |
844 | 844 | /* Remove from ld_queue list */ |
845 | 845 | list_del(&desc->node); |
846 | 846 | |
847 | - dev_dbg(fsl_chan->dev, "link descriptor %p will be recycle.\n", | |
847 | + dev_dbg(chan->dev, "link descriptor %p will be recycle.\n", | |
848 | 848 | desc); |
849 | - dma_pool_free(fsl_chan->desc_pool, desc, desc->async_tx.phys); | |
849 | + dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys); | |
850 | 850 | |
851 | 851 | /* Run the link descriptor callback function */ |
852 | 852 | if (callback) { |
853 | - spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); | |
854 | - dev_dbg(fsl_chan->dev, "link descriptor %p callback\n", | |
853 | + spin_unlock_irqrestore(&chan->desc_lock, flags); | |
854 | + dev_dbg(chan->dev, "link descriptor %p callback\n", | |
855 | 855 | desc); |
856 | 856 | callback(callback_param); |
857 | - spin_lock_irqsave(&fsl_chan->desc_lock, flags); | |
857 | + spin_lock_irqsave(&chan->desc_lock, flags); | |
858 | 858 | } |
859 | 859 | } |
860 | - spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); | |
860 | + spin_unlock_irqrestore(&chan->desc_lock, flags); | |
861 | 861 | } |
862 | 862 | |
863 | 863 | /** |
864 | 864 | * fsl_chan_xfer_ld_queue - Transfer link descriptors in channel ld_queue. |
865 | - * @fsl_chan : Freescale DMA channel | |
865 | + * @chan : Freescale DMA channel | |
866 | 866 | */ |
867 | -static void fsl_chan_xfer_ld_queue(struct fsldma_chan *fsl_chan) | |
867 | +static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan) | |
868 | 868 | { |
869 | 869 | struct list_head *ld_node; |
870 | 870 | dma_addr_t next_dst_addr; |
871 | 871 | unsigned long flags; |
872 | 872 | |
873 | - spin_lock_irqsave(&fsl_chan->desc_lock, flags); | |
873 | + spin_lock_irqsave(&chan->desc_lock, flags); | |
874 | 874 | |
875 | - if (!dma_is_idle(fsl_chan)) | |
875 | + if (!dma_is_idle(chan)) | |
876 | 876 | goto out_unlock; |
877 | 877 | |
878 | - dma_halt(fsl_chan); | |
878 | + dma_halt(chan); | |
879 | 879 | |
880 | 880 | /* If there are some link descriptors |
881 | 881 | * not transfered in queue. We need to start it. |
882 | 882 | */ |
883 | 883 | |
884 | 884 | /* Find the first un-transfer desciptor */ |
885 | - for (ld_node = fsl_chan->ld_queue.next; | |
886 | - (ld_node != &fsl_chan->ld_queue) | |
885 | + for (ld_node = chan->ld_queue.next; | |
886 | + (ld_node != &chan->ld_queue) | |
887 | 887 | && (dma_async_is_complete( |
888 | 888 | to_fsl_desc(ld_node)->async_tx.cookie, |
889 | - fsl_chan->completed_cookie, | |
890 | - fsl_chan->common.cookie) == DMA_SUCCESS); | |
889 | + chan->completed_cookie, | |
890 | + chan->common.cookie) == DMA_SUCCESS); | |
891 | 891 | ld_node = ld_node->next); |
892 | 892 | |
893 | - if (ld_node != &fsl_chan->ld_queue) { | |
893 | + if (ld_node != &chan->ld_queue) { | |
894 | 894 | /* Get the ld start address from ld_queue */ |
895 | 895 | next_dst_addr = to_fsl_desc(ld_node)->async_tx.phys; |
896 | - dev_dbg(fsl_chan->dev, "xfer LDs staring from 0x%llx\n", | |
896 | + dev_dbg(chan->dev, "xfer LDs staring from 0x%llx\n", | |
897 | 897 | (unsigned long long)next_dst_addr); |
898 | - set_cdar(fsl_chan, next_dst_addr); | |
899 | - dma_start(fsl_chan); | |
898 | + set_cdar(chan, next_dst_addr); | |
899 | + dma_start(chan); | |
900 | 900 | } else { |
901 | - set_cdar(fsl_chan, 0); | |
902 | - set_ndar(fsl_chan, 0); | |
901 | + set_cdar(chan, 0); | |
902 | + set_ndar(chan, 0); | |
903 | 903 | } |
904 | 904 | |
905 | 905 | out_unlock: |
906 | - spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); | |
906 | + spin_unlock_irqrestore(&chan->desc_lock, flags); | |
907 | 907 | } |
908 | 908 | |
909 | 909 | /** |
910 | 910 | * fsl_dma_memcpy_issue_pending - Issue the DMA start command |
911 | - * @fsl_chan : Freescale DMA channel | |
911 | + * @chan : Freescale DMA channel | |
912 | 912 | */ |
913 | -static void fsl_dma_memcpy_issue_pending(struct dma_chan *chan) | |
913 | +static void fsl_dma_memcpy_issue_pending(struct dma_chan *dchan) | |
914 | 914 | { |
915 | - struct fsldma_chan *fsl_chan = to_fsl_chan(chan); | |
915 | + struct fsldma_chan *chan = to_fsl_chan(dchan); | |
916 | 916 | |
917 | 917 | #ifdef FSL_DMA_LD_DEBUG |
918 | 918 | struct fsl_desc_sw *ld; |
919 | 919 | unsigned long flags; |
920 | 920 | |
921 | - spin_lock_irqsave(&fsl_chan->desc_lock, flags); | |
922 | - if (list_empty(&fsl_chan->ld_queue)) { | |
923 | - spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); | |
921 | + spin_lock_irqsave(&chan->desc_lock, flags); | |
922 | + if (list_empty(&chan->ld_queue)) { | |
923 | + spin_unlock_irqrestore(&chan->desc_lock, flags); | |
924 | 924 | return; |
925 | 925 | } |
926 | 926 | |
927 | - dev_dbg(fsl_chan->dev, "--memcpy issue--\n"); | |
928 | - list_for_each_entry(ld, &fsl_chan->ld_queue, node) { | |
927 | + dev_dbg(chan->dev, "--memcpy issue--\n"); | |
928 | + list_for_each_entry(ld, &chan->ld_queue, node) { | |
929 | 929 | int i; |
930 | - dev_dbg(fsl_chan->dev, "Ch %d, LD %08x\n", | |
931 | - fsl_chan->id, ld->async_tx.phys); | |
930 | + dev_dbg(chan->dev, "Ch %d, LD %08x\n", | |
931 | + chan->id, ld->async_tx.phys); | |
932 | 932 | for (i = 0; i < 8; i++) |
933 | - dev_dbg(fsl_chan->dev, "LD offset %d: %08x\n", | |
933 | + dev_dbg(chan->dev, "LD offset %d: %08x\n", | |
934 | 934 | i, *(((u32 *)&ld->hw) + i)); |
935 | 935 | } |
936 | - dev_dbg(fsl_chan->dev, "----------------\n"); | |
937 | - spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); | |
936 | + dev_dbg(chan->dev, "----------------\n"); | |
937 | + spin_unlock_irqrestore(&chan->desc_lock, flags); | |
938 | 938 | #endif |
939 | 939 | |
940 | - fsl_chan_xfer_ld_queue(fsl_chan); | |
940 | + fsl_chan_xfer_ld_queue(chan); | |
941 | 941 | } |
942 | 942 | |
943 | 943 | /** |
944 | 944 | * fsl_dma_is_complete - Determine the DMA status |
945 | - * @fsl_chan : Freescale DMA channel | |
945 | + * @chan : Freescale DMA channel | |
946 | 946 | */ |
947 | -static enum dma_status fsl_dma_is_complete(struct dma_chan *chan, | |
947 | +static enum dma_status fsl_dma_is_complete(struct dma_chan *dchan, | |
948 | 948 | dma_cookie_t cookie, |
949 | 949 | dma_cookie_t *done, |
950 | 950 | dma_cookie_t *used) |
951 | 951 | { |
952 | - struct fsldma_chan *fsl_chan = to_fsl_chan(chan); | |
952 | + struct fsldma_chan *chan = to_fsl_chan(dchan); | |
953 | 953 | dma_cookie_t last_used; |
954 | 954 | dma_cookie_t last_complete; |
955 | 955 | |
956 | - fsl_chan_ld_cleanup(fsl_chan); | |
956 | + fsl_chan_ld_cleanup(chan); | |
957 | 957 | |
958 | - last_used = chan->cookie; | |
959 | - last_complete = fsl_chan->completed_cookie; | |
958 | + last_used = dchan->cookie; | |
959 | + last_complete = chan->completed_cookie; | |
960 | 960 | |
961 | 961 | if (done) |
962 | 962 | *done = last_complete; |
963 | 963 | |
964 | 964 | |
965 | 965 | |
966 | 966 | |
... | ... | @@ -973,30 +973,30 @@ |
973 | 973 | |
974 | 974 | static irqreturn_t fsldma_chan_irq(int irq, void *data) |
975 | 975 | { |
976 | - struct fsldma_chan *fsl_chan = data; | |
977 | - u32 stat; | |
976 | + struct fsldma_chan *chan = data; | |
978 | 977 | int update_cookie = 0; |
979 | 978 | int xfer_ld_q = 0; |
979 | + u32 stat; | |
980 | 980 | |
981 | - stat = get_sr(fsl_chan); | |
982 | - dev_dbg(fsl_chan->dev, "event: channel %d, stat = 0x%x\n", | |
983 | - fsl_chan->id, stat); | |
984 | - set_sr(fsl_chan, stat); /* Clear the event register */ | |
981 | + stat = get_sr(chan); | |
982 | + dev_dbg(chan->dev, "event: channel %d, stat = 0x%x\n", | |
983 | + chan->id, stat); | |
984 | + set_sr(chan, stat); /* Clear the event register */ | |
985 | 985 | |
986 | 986 | stat &= ~(FSL_DMA_SR_CB | FSL_DMA_SR_CH); |
987 | 987 | if (!stat) |
988 | 988 | return IRQ_NONE; |
989 | 989 | |
990 | 990 | if (stat & FSL_DMA_SR_TE) |
991 | - dev_err(fsl_chan->dev, "Transfer Error!\n"); | |
991 | + dev_err(chan->dev, "Transfer Error!\n"); | |
992 | 992 | |
993 | 993 | /* Programming Error |
994 | 994 | * The DMA_INTERRUPT async_tx is a NULL transfer, which will |
995 | 995 | * triger a PE interrupt. |
996 | 996 | */ |
997 | 997 | if (stat & FSL_DMA_SR_PE) { |
998 | - dev_dbg(fsl_chan->dev, "event: Programming Error INT\n"); | |
999 | - if (get_bcr(fsl_chan) == 0) { | |
998 | + dev_dbg(chan->dev, "event: Programming Error INT\n"); | |
999 | + if (get_bcr(chan) == 0) { | |
1000 | 1000 | /* BCR register is 0, this is a DMA_INTERRUPT async_tx. |
1001 | 1001 | * Now, update the completed cookie, and continue the |
1002 | 1002 | * next uncompleted transfer. |
... | ... | @@ -1011,10 +1011,10 @@ |
1011 | 1011 | * we will recycle the used descriptor. |
1012 | 1012 | */ |
1013 | 1013 | if (stat & FSL_DMA_SR_EOSI) { |
1014 | - dev_dbg(fsl_chan->dev, "event: End-of-segments INT\n"); | |
1015 | - dev_dbg(fsl_chan->dev, "event: clndar 0x%llx, nlndar 0x%llx\n", | |
1016 | - (unsigned long long)get_cdar(fsl_chan), | |
1017 | - (unsigned long long)get_ndar(fsl_chan)); | |
1014 | + dev_dbg(chan->dev, "event: End-of-segments INT\n"); | |
1015 | + dev_dbg(chan->dev, "event: clndar 0x%llx, nlndar 0x%llx\n", | |
1016 | + (unsigned long long)get_cdar(chan), | |
1017 | + (unsigned long long)get_ndar(chan)); | |
1018 | 1018 | stat &= ~FSL_DMA_SR_EOSI; |
1019 | 1019 | update_cookie = 1; |
1020 | 1020 | } |
... | ... | @@ -1023,7 +1023,7 @@ |
1023 | 1023 | * and start the next transfer if it exist. |
1024 | 1024 | */ |
1025 | 1025 | if (stat & FSL_DMA_SR_EOCDI) { |
1026 | - dev_dbg(fsl_chan->dev, "event: End-of-Chain link INT\n"); | |
1026 | + dev_dbg(chan->dev, "event: End-of-Chain link INT\n"); | |
1027 | 1027 | stat &= ~FSL_DMA_SR_EOCDI; |
1028 | 1028 | update_cookie = 1; |
1029 | 1029 | xfer_ld_q = 1; |
1030 | 1030 | |
1031 | 1031 | |
1032 | 1032 | |
1033 | 1033 | |
1034 | 1034 | |
... | ... | @@ -1034,28 +1034,28 @@ |
1034 | 1034 | * prepare next transfer. |
1035 | 1035 | */ |
1036 | 1036 | if (stat & FSL_DMA_SR_EOLNI) { |
1037 | - dev_dbg(fsl_chan->dev, "event: End-of-link INT\n"); | |
1037 | + dev_dbg(chan->dev, "event: End-of-link INT\n"); | |
1038 | 1038 | stat &= ~FSL_DMA_SR_EOLNI; |
1039 | 1039 | xfer_ld_q = 1; |
1040 | 1040 | } |
1041 | 1041 | |
1042 | 1042 | if (update_cookie) |
1043 | - fsl_dma_update_completed_cookie(fsl_chan); | |
1043 | + fsl_dma_update_completed_cookie(chan); | |
1044 | 1044 | if (xfer_ld_q) |
1045 | - fsl_chan_xfer_ld_queue(fsl_chan); | |
1045 | + fsl_chan_xfer_ld_queue(chan); | |
1046 | 1046 | if (stat) |
1047 | - dev_dbg(fsl_chan->dev, "event: unhandled sr 0x%02x\n", | |
1047 | + dev_dbg(chan->dev, "event: unhandled sr 0x%02x\n", | |
1048 | 1048 | stat); |
1049 | 1049 | |
1050 | - dev_dbg(fsl_chan->dev, "event: Exit\n"); | |
1051 | - tasklet_schedule(&fsl_chan->tasklet); | |
1050 | + dev_dbg(chan->dev, "event: Exit\n"); | |
1051 | + tasklet_schedule(&chan->tasklet); | |
1052 | 1052 | return IRQ_HANDLED; |
1053 | 1053 | } |
1054 | 1054 | |
1055 | 1055 | static void dma_do_tasklet(unsigned long data) |
1056 | 1056 | { |
1057 | - struct fsldma_chan *fsl_chan = (struct fsldma_chan *)data; | |
1058 | - fsl_chan_ld_cleanup(fsl_chan); | |
1057 | + struct fsldma_chan *chan = (struct fsldma_chan *)data; | |
1058 | + fsl_chan_ld_cleanup(chan); | |
1059 | 1059 | } |
1060 | 1060 | |
1061 | 1061 | static irqreturn_t fsldma_ctrl_irq(int irq, void *data) |
1062 | 1062 | |
1063 | 1063 | |
1064 | 1064 | |
... | ... | @@ -1171,24 +1171,24 @@ |
1171 | 1171 | static int __devinit fsl_dma_chan_probe(struct fsldma_device *fdev, |
1172 | 1172 | struct device_node *node, u32 feature, const char *compatible) |
1173 | 1173 | { |
1174 | - struct fsldma_chan *fchan; | |
1174 | + struct fsldma_chan *chan; | |
1175 | 1175 | struct resource res; |
1176 | 1176 | int err; |
1177 | 1177 | |
1178 | 1178 | /* alloc channel */ |
1179 | - fchan = kzalloc(sizeof(*fchan), GFP_KERNEL); | |
1180 | - if (!fchan) { | |
1179 | + chan = kzalloc(sizeof(*chan), GFP_KERNEL); | |
1180 | + if (!chan) { | |
1181 | 1181 | dev_err(fdev->dev, "no free memory for DMA channels!\n"); |
1182 | 1182 | err = -ENOMEM; |
1183 | 1183 | goto out_return; |
1184 | 1184 | } |
1185 | 1185 | |
1186 | 1186 | /* ioremap registers for use */ |
1187 | - fchan->regs = of_iomap(node, 0); | |
1188 | - if (!fchan->regs) { | |
1187 | + chan->regs = of_iomap(node, 0); | |
1188 | + if (!chan->regs) { | |
1189 | 1189 | dev_err(fdev->dev, "unable to ioremap registers\n"); |
1190 | 1190 | err = -ENOMEM; |
1191 | - goto out_free_fchan; | |
1191 | + goto out_free_chan; | |
1192 | 1192 | } |
1193 | 1193 | |
1194 | 1194 | err = of_address_to_resource(node, 0, &res); |
1195 | 1195 | |
1196 | 1196 | |
1197 | 1197 | |
1198 | 1198 | |
1199 | 1199 | |
1200 | 1200 | |
1201 | 1201 | |
1202 | 1202 | |
1203 | 1203 | |
1204 | 1204 | |
1205 | 1205 | |
1206 | 1206 | |
1207 | 1207 | |
1208 | 1208 | |
1209 | 1209 | |
1210 | 1210 | |
1211 | 1211 | |
... | ... | @@ -1197,74 +1197,74 @@ |
1197 | 1197 | goto out_iounmap_regs; |
1198 | 1198 | } |
1199 | 1199 | |
1200 | - fchan->feature = feature; | |
1200 | + chan->feature = feature; | |
1201 | 1201 | if (!fdev->feature) |
1202 | - fdev->feature = fchan->feature; | |
1202 | + fdev->feature = chan->feature; | |
1203 | 1203 | |
1204 | 1204 | /* |
1205 | 1205 | * If the DMA device's feature is different than the feature |
1206 | 1206 | * of its channels, report the bug |
1207 | 1207 | */ |
1208 | - WARN_ON(fdev->feature != fchan->feature); | |
1208 | + WARN_ON(fdev->feature != chan->feature); | |
1209 | 1209 | |
1210 | - fchan->dev = fdev->dev; | |
1211 | - fchan->id = ((res.start - 0x100) & 0xfff) >> 7; | |
1212 | - if (fchan->id >= FSL_DMA_MAX_CHANS_PER_DEVICE) { | |
1210 | + chan->dev = fdev->dev; | |
1211 | + chan->id = ((res.start - 0x100) & 0xfff) >> 7; | |
1212 | + if (chan->id >= FSL_DMA_MAX_CHANS_PER_DEVICE) { | |
1213 | 1213 | dev_err(fdev->dev, "too many channels for device\n"); |
1214 | 1214 | err = -EINVAL; |
1215 | 1215 | goto out_iounmap_regs; |
1216 | 1216 | } |
1217 | 1217 | |
1218 | - fdev->chan[fchan->id] = fchan; | |
1219 | - tasklet_init(&fchan->tasklet, dma_do_tasklet, (unsigned long)fchan); | |
1218 | + fdev->chan[chan->id] = chan; | |
1219 | + tasklet_init(&chan->tasklet, dma_do_tasklet, (unsigned long)chan); | |
1220 | 1220 | |
1221 | 1221 | /* Initialize the channel */ |
1222 | - dma_init(fchan); | |
1222 | + dma_init(chan); | |
1223 | 1223 | |
1224 | 1224 | /* Clear cdar registers */ |
1225 | - set_cdar(fchan, 0); | |
1225 | + set_cdar(chan, 0); | |
1226 | 1226 | |
1227 | - switch (fchan->feature & FSL_DMA_IP_MASK) { | |
1227 | + switch (chan->feature & FSL_DMA_IP_MASK) { | |
1228 | 1228 | case FSL_DMA_IP_85XX: |
1229 | - fchan->toggle_ext_pause = fsl_chan_toggle_ext_pause; | |
1229 | + chan->toggle_ext_pause = fsl_chan_toggle_ext_pause; | |
1230 | 1230 | case FSL_DMA_IP_83XX: |
1231 | - fchan->toggle_ext_start = fsl_chan_toggle_ext_start; | |
1232 | - fchan->set_src_loop_size = fsl_chan_set_src_loop_size; | |
1233 | - fchan->set_dst_loop_size = fsl_chan_set_dst_loop_size; | |
1234 | - fchan->set_request_count = fsl_chan_set_request_count; | |
1231 | + chan->toggle_ext_start = fsl_chan_toggle_ext_start; | |
1232 | + chan->set_src_loop_size = fsl_chan_set_src_loop_size; | |
1233 | + chan->set_dst_loop_size = fsl_chan_set_dst_loop_size; | |
1234 | + chan->set_request_count = fsl_chan_set_request_count; | |
1235 | 1235 | } |
1236 | 1236 | |
1237 | - spin_lock_init(&fchan->desc_lock); | |
1238 | - INIT_LIST_HEAD(&fchan->ld_queue); | |
1237 | + spin_lock_init(&chan->desc_lock); | |
1238 | + INIT_LIST_HEAD(&chan->ld_queue); | |
1239 | 1239 | |
1240 | - fchan->common.device = &fdev->common; | |
1240 | + chan->common.device = &fdev->common; | |
1241 | 1241 | |
1242 | 1242 | /* find the IRQ line, if it exists in the device tree */ |
1243 | - fchan->irq = irq_of_parse_and_map(node, 0); | |
1243 | + chan->irq = irq_of_parse_and_map(node, 0); | |
1244 | 1244 | |
1245 | 1245 | /* Add the channel to DMA device channel list */ |
1246 | - list_add_tail(&fchan->common.device_node, &fdev->common.channels); | |
1246 | + list_add_tail(&chan->common.device_node, &fdev->common.channels); | |
1247 | 1247 | fdev->common.chancnt++; |
1248 | 1248 | |
1249 | - dev_info(fdev->dev, "#%d (%s), irq %d\n", fchan->id, compatible, | |
1250 | - fchan->irq != NO_IRQ ? fchan->irq : fdev->irq); | |
1249 | + dev_info(fdev->dev, "#%d (%s), irq %d\n", chan->id, compatible, | |
1250 | + chan->irq != NO_IRQ ? chan->irq : fdev->irq); | |
1251 | 1251 | |
1252 | 1252 | return 0; |
1253 | 1253 | |
1254 | 1254 | out_iounmap_regs: |
1255 | - iounmap(fchan->regs); | |
1256 | -out_free_fchan: | |
1257 | - kfree(fchan); | |
1255 | + iounmap(chan->regs); | |
1256 | +out_free_chan: | |
1257 | + kfree(chan); | |
1258 | 1258 | out_return: |
1259 | 1259 | return err; |
1260 | 1260 | } |
1261 | 1261 | |
1262 | -static void fsl_dma_chan_remove(struct fsldma_chan *fchan) | |
1262 | +static void fsl_dma_chan_remove(struct fsldma_chan *chan) | |
1263 | 1263 | { |
1264 | - irq_dispose_mapping(fchan->irq); | |
1265 | - list_del(&fchan->common.device_node); | |
1266 | - iounmap(fchan->regs); | |
1267 | - kfree(fchan); | |
1264 | + irq_dispose_mapping(chan->irq); | |
1265 | + list_del(&chan->common.device_node); | |
1266 | + iounmap(chan->regs); | |
1267 | + kfree(chan); | |
1268 | 1268 | } |
1269 | 1269 | |
1270 | 1270 | static int __devinit fsldma_of_probe(struct of_device *op, |