Commit f0dad6e701cb66301287074c39183c7312139530

Authored by Vinod Koul

Merge branch 'dma_complete' into next

Showing 30 changed files Inline Diff

crypto/async_tx/async_tx.c
1 /* 1 /*
2 * core routines for the asynchronous memory transfer/transform api 2 * core routines for the asynchronous memory transfer/transform api
3 * 3 *
4 * Copyright © 2006, Intel Corporation. 4 * Copyright © 2006, Intel Corporation.
5 * 5 *
6 * Dan Williams <dan.j.williams@intel.com> 6 * Dan Williams <dan.j.williams@intel.com>
7 * 7 *
8 * with architecture considerations by: 8 * with architecture considerations by:
9 * Neil Brown <neilb@suse.de> 9 * Neil Brown <neilb@suse.de>
10 * Jeff Garzik <jeff@garzik.org> 10 * Jeff Garzik <jeff@garzik.org>
11 * 11 *
12 * This program is free software; you can redistribute it and/or modify it 12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms and conditions of the GNU General Public License, 13 * under the terms and conditions of the GNU General Public License,
14 * version 2, as published by the Free Software Foundation. 14 * version 2, as published by the Free Software Foundation.
15 * 15 *
16 * This program is distributed in the hope it will be useful, but WITHOUT 16 * This program is distributed in the hope it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
19 * more details. 19 * more details.
20 * 20 *
21 * You should have received a copy of the GNU General Public License along with 21 * You should have received a copy of the GNU General Public License along with
22 * this program; if not, write to the Free Software Foundation, Inc., 22 * this program; if not, write to the Free Software Foundation, Inc.,
23 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 23 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
24 * 24 *
25 */ 25 */
26 #include <linux/rculist.h> 26 #include <linux/rculist.h>
27 #include <linux/module.h> 27 #include <linux/module.h>
28 #include <linux/kernel.h> 28 #include <linux/kernel.h>
29 #include <linux/async_tx.h> 29 #include <linux/async_tx.h>
30 30
31 #ifdef CONFIG_DMA_ENGINE 31 #ifdef CONFIG_DMA_ENGINE
32 static int __init async_tx_init(void) 32 static int __init async_tx_init(void)
33 { 33 {
34 async_dmaengine_get(); 34 async_dmaengine_get();
35 35
36 printk(KERN_INFO "async_tx: api initialized (async)\n"); 36 printk(KERN_INFO "async_tx: api initialized (async)\n");
37 37
38 return 0; 38 return 0;
39 } 39 }
40 40
41 static void __exit async_tx_exit(void) 41 static void __exit async_tx_exit(void)
42 { 42 {
43 async_dmaengine_put(); 43 async_dmaengine_put();
44 } 44 }
45 45
46 module_init(async_tx_init); 46 module_init(async_tx_init);
47 module_exit(async_tx_exit); 47 module_exit(async_tx_exit);
48 48
49 /** 49 /**
50 * __async_tx_find_channel - find a channel to carry out the operation or let 50 * __async_tx_find_channel - find a channel to carry out the operation or let
51 * the transaction execute synchronously 51 * the transaction execute synchronously
52 * @submit: transaction dependency and submission modifiers 52 * @submit: transaction dependency and submission modifiers
53 * @tx_type: transaction type 53 * @tx_type: transaction type
54 */ 54 */
55 struct dma_chan * 55 struct dma_chan *
56 __async_tx_find_channel(struct async_submit_ctl *submit, 56 __async_tx_find_channel(struct async_submit_ctl *submit,
57 enum dma_transaction_type tx_type) 57 enum dma_transaction_type tx_type)
58 { 58 {
59 struct dma_async_tx_descriptor *depend_tx = submit->depend_tx; 59 struct dma_async_tx_descriptor *depend_tx = submit->depend_tx;
60 60
61 /* see if we can keep the chain on one channel */ 61 /* see if we can keep the chain on one channel */
62 if (depend_tx && 62 if (depend_tx &&
63 dma_has_cap(tx_type, depend_tx->chan->device->cap_mask)) 63 dma_has_cap(tx_type, depend_tx->chan->device->cap_mask))
64 return depend_tx->chan; 64 return depend_tx->chan;
65 return async_dma_find_channel(tx_type); 65 return async_dma_find_channel(tx_type);
66 } 66 }
67 EXPORT_SYMBOL_GPL(__async_tx_find_channel); 67 EXPORT_SYMBOL_GPL(__async_tx_find_channel);
68 #endif 68 #endif
69 69
70 70
71 /** 71 /**
72 * async_tx_channel_switch - queue an interrupt descriptor with a dependency 72 * async_tx_channel_switch - queue an interrupt descriptor with a dependency
73 * pre-attached. 73 * pre-attached.
74 * @depend_tx: the operation that must finish before the new operation runs 74 * @depend_tx: the operation that must finish before the new operation runs
75 * @tx: the new operation 75 * @tx: the new operation
76 */ 76 */
77 static void 77 static void
78 async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx, 78 async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx,
79 struct dma_async_tx_descriptor *tx) 79 struct dma_async_tx_descriptor *tx)
80 { 80 {
81 struct dma_chan *chan = depend_tx->chan; 81 struct dma_chan *chan = depend_tx->chan;
82 struct dma_device *device = chan->device; 82 struct dma_device *device = chan->device;
83 struct dma_async_tx_descriptor *intr_tx = (void *) ~0; 83 struct dma_async_tx_descriptor *intr_tx = (void *) ~0;
84 84
85 /* first check to see if we can still append to depend_tx */ 85 /* first check to see if we can still append to depend_tx */
86 txd_lock(depend_tx); 86 txd_lock(depend_tx);
87 if (txd_parent(depend_tx) && depend_tx->chan == tx->chan) { 87 if (txd_parent(depend_tx) && depend_tx->chan == tx->chan) {
88 txd_chain(depend_tx, tx); 88 txd_chain(depend_tx, tx);
89 intr_tx = NULL; 89 intr_tx = NULL;
90 } 90 }
91 txd_unlock(depend_tx); 91 txd_unlock(depend_tx);
92 92
93 /* attached dependency, flush the parent channel */ 93 /* attached dependency, flush the parent channel */
94 if (!intr_tx) { 94 if (!intr_tx) {
95 device->device_issue_pending(chan); 95 device->device_issue_pending(chan);
96 return; 96 return;
97 } 97 }
98 98
99 /* see if we can schedule an interrupt 99 /* see if we can schedule an interrupt
100 * otherwise poll for completion 100 * otherwise poll for completion
101 */ 101 */
102 if (dma_has_cap(DMA_INTERRUPT, device->cap_mask)) 102 if (dma_has_cap(DMA_INTERRUPT, device->cap_mask))
103 intr_tx = device->device_prep_dma_interrupt(chan, 0); 103 intr_tx = device->device_prep_dma_interrupt(chan, 0);
104 else 104 else
105 intr_tx = NULL; 105 intr_tx = NULL;
106 106
107 if (intr_tx) { 107 if (intr_tx) {
108 intr_tx->callback = NULL; 108 intr_tx->callback = NULL;
109 intr_tx->callback_param = NULL; 109 intr_tx->callback_param = NULL;
110 /* safe to chain outside the lock since we know we are 110 /* safe to chain outside the lock since we know we are
111 * not submitted yet 111 * not submitted yet
112 */ 112 */
113 txd_chain(intr_tx, tx); 113 txd_chain(intr_tx, tx);
114 114
115 /* check if we need to append */ 115 /* check if we need to append */
116 txd_lock(depend_tx); 116 txd_lock(depend_tx);
117 if (txd_parent(depend_tx)) { 117 if (txd_parent(depend_tx)) {
118 txd_chain(depend_tx, intr_tx); 118 txd_chain(depend_tx, intr_tx);
119 async_tx_ack(intr_tx); 119 async_tx_ack(intr_tx);
120 intr_tx = NULL; 120 intr_tx = NULL;
121 } 121 }
122 txd_unlock(depend_tx); 122 txd_unlock(depend_tx);
123 123
124 if (intr_tx) { 124 if (intr_tx) {
125 txd_clear_parent(intr_tx); 125 txd_clear_parent(intr_tx);
126 intr_tx->tx_submit(intr_tx); 126 intr_tx->tx_submit(intr_tx);
127 async_tx_ack(intr_tx); 127 async_tx_ack(intr_tx);
128 } 128 }
129 device->device_issue_pending(chan); 129 device->device_issue_pending(chan);
130 } else { 130 } else {
131 if (dma_wait_for_async_tx(depend_tx) != DMA_SUCCESS) 131 if (dma_wait_for_async_tx(depend_tx) != DMA_COMPLETE)
132 panic("%s: DMA error waiting for depend_tx\n", 132 panic("%s: DMA error waiting for depend_tx\n",
133 __func__); 133 __func__);
134 tx->tx_submit(tx); 134 tx->tx_submit(tx);
135 } 135 }
136 } 136 }
137 137
138 138
139 /** 139 /**
140 * submit_disposition - flags for routing an incoming operation 140 * submit_disposition - flags for routing an incoming operation
141 * @ASYNC_TX_SUBMITTED: we were able to append the new operation under the lock 141 * @ASYNC_TX_SUBMITTED: we were able to append the new operation under the lock
142 * @ASYNC_TX_CHANNEL_SWITCH: when the lock is dropped schedule a channel switch 142 * @ASYNC_TX_CHANNEL_SWITCH: when the lock is dropped schedule a channel switch
143 * @ASYNC_TX_DIRECT_SUBMIT: when the lock is dropped submit directly 143 * @ASYNC_TX_DIRECT_SUBMIT: when the lock is dropped submit directly
144 * 144 *
145 * while holding depend_tx->lock we must avoid submitting new operations 145 * while holding depend_tx->lock we must avoid submitting new operations
146 * to prevent a circular locking dependency with drivers that already 146 * to prevent a circular locking dependency with drivers that already
147 * hold a channel lock when calling async_tx_run_dependencies. 147 * hold a channel lock when calling async_tx_run_dependencies.
148 */ 148 */
149 enum submit_disposition { 149 enum submit_disposition {
150 ASYNC_TX_SUBMITTED, 150 ASYNC_TX_SUBMITTED,
151 ASYNC_TX_CHANNEL_SWITCH, 151 ASYNC_TX_CHANNEL_SWITCH,
152 ASYNC_TX_DIRECT_SUBMIT, 152 ASYNC_TX_DIRECT_SUBMIT,
153 }; 153 };
154 154
155 void 155 void
156 async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx, 156 async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx,
157 struct async_submit_ctl *submit) 157 struct async_submit_ctl *submit)
158 { 158 {
159 struct dma_async_tx_descriptor *depend_tx = submit->depend_tx; 159 struct dma_async_tx_descriptor *depend_tx = submit->depend_tx;
160 160
161 tx->callback = submit->cb_fn; 161 tx->callback = submit->cb_fn;
162 tx->callback_param = submit->cb_param; 162 tx->callback_param = submit->cb_param;
163 163
164 if (depend_tx) { 164 if (depend_tx) {
165 enum submit_disposition s; 165 enum submit_disposition s;
166 166
167 /* sanity check the dependency chain: 167 /* sanity check the dependency chain:
168 * 1/ if ack is already set then we cannot be sure 168 * 1/ if ack is already set then we cannot be sure
169 * we are referring to the correct operation 169 * we are referring to the correct operation
170 * 2/ dependencies are 1:1 i.e. two transactions can 170 * 2/ dependencies are 1:1 i.e. two transactions can
171 * not depend on the same parent 171 * not depend on the same parent
172 */ 172 */
173 BUG_ON(async_tx_test_ack(depend_tx) || txd_next(depend_tx) || 173 BUG_ON(async_tx_test_ack(depend_tx) || txd_next(depend_tx) ||
174 txd_parent(tx)); 174 txd_parent(tx));
175 175
176 /* the lock prevents async_tx_run_dependencies from missing 176 /* the lock prevents async_tx_run_dependencies from missing
177 * the setting of ->next when ->parent != NULL 177 * the setting of ->next when ->parent != NULL
178 */ 178 */
179 txd_lock(depend_tx); 179 txd_lock(depend_tx);
180 if (txd_parent(depend_tx)) { 180 if (txd_parent(depend_tx)) {
181 /* we have a parent so we can not submit directly 181 /* we have a parent so we can not submit directly
182 * if we are staying on the same channel: append 182 * if we are staying on the same channel: append
183 * else: channel switch 183 * else: channel switch
184 */ 184 */
185 if (depend_tx->chan == chan) { 185 if (depend_tx->chan == chan) {
186 txd_chain(depend_tx, tx); 186 txd_chain(depend_tx, tx);
187 s = ASYNC_TX_SUBMITTED; 187 s = ASYNC_TX_SUBMITTED;
188 } else 188 } else
189 s = ASYNC_TX_CHANNEL_SWITCH; 189 s = ASYNC_TX_CHANNEL_SWITCH;
190 } else { 190 } else {
191 /* we do not have a parent so we may be able to submit 191 /* we do not have a parent so we may be able to submit
192 * directly if we are staying on the same channel 192 * directly if we are staying on the same channel
193 */ 193 */
194 if (depend_tx->chan == chan) 194 if (depend_tx->chan == chan)
195 s = ASYNC_TX_DIRECT_SUBMIT; 195 s = ASYNC_TX_DIRECT_SUBMIT;
196 else 196 else
197 s = ASYNC_TX_CHANNEL_SWITCH; 197 s = ASYNC_TX_CHANNEL_SWITCH;
198 } 198 }
199 txd_unlock(depend_tx); 199 txd_unlock(depend_tx);
200 200
201 switch (s) { 201 switch (s) {
202 case ASYNC_TX_SUBMITTED: 202 case ASYNC_TX_SUBMITTED:
203 break; 203 break;
204 case ASYNC_TX_CHANNEL_SWITCH: 204 case ASYNC_TX_CHANNEL_SWITCH:
205 async_tx_channel_switch(depend_tx, tx); 205 async_tx_channel_switch(depend_tx, tx);
206 break; 206 break;
207 case ASYNC_TX_DIRECT_SUBMIT: 207 case ASYNC_TX_DIRECT_SUBMIT:
208 txd_clear_parent(tx); 208 txd_clear_parent(tx);
209 tx->tx_submit(tx); 209 tx->tx_submit(tx);
210 break; 210 break;
211 } 211 }
212 } else { 212 } else {
213 txd_clear_parent(tx); 213 txd_clear_parent(tx);
214 tx->tx_submit(tx); 214 tx->tx_submit(tx);
215 } 215 }
216 216
217 if (submit->flags & ASYNC_TX_ACK) 217 if (submit->flags & ASYNC_TX_ACK)
218 async_tx_ack(tx); 218 async_tx_ack(tx);
219 219
220 if (depend_tx) 220 if (depend_tx)
221 async_tx_ack(depend_tx); 221 async_tx_ack(depend_tx);
222 } 222 }
223 EXPORT_SYMBOL_GPL(async_tx_submit); 223 EXPORT_SYMBOL_GPL(async_tx_submit);
224 224
225 /** 225 /**
226 * async_trigger_callback - schedules the callback function to be run 226 * async_trigger_callback - schedules the callback function to be run
227 * @submit: submission and completion parameters 227 * @submit: submission and completion parameters
228 * 228 *
229 * honored flags: ASYNC_TX_ACK 229 * honored flags: ASYNC_TX_ACK
230 * 230 *
231 * The callback is run after any dependent operations have completed. 231 * The callback is run after any dependent operations have completed.
232 */ 232 */
233 struct dma_async_tx_descriptor * 233 struct dma_async_tx_descriptor *
234 async_trigger_callback(struct async_submit_ctl *submit) 234 async_trigger_callback(struct async_submit_ctl *submit)
235 { 235 {
236 struct dma_chan *chan; 236 struct dma_chan *chan;
237 struct dma_device *device; 237 struct dma_device *device;
238 struct dma_async_tx_descriptor *tx; 238 struct dma_async_tx_descriptor *tx;
239 struct dma_async_tx_descriptor *depend_tx = submit->depend_tx; 239 struct dma_async_tx_descriptor *depend_tx = submit->depend_tx;
240 240
241 if (depend_tx) { 241 if (depend_tx) {
242 chan = depend_tx->chan; 242 chan = depend_tx->chan;
243 device = chan->device; 243 device = chan->device;
244 244
245 /* see if we can schedule an interrupt 245 /* see if we can schedule an interrupt
246 * otherwise poll for completion 246 * otherwise poll for completion
247 */ 247 */
248 if (device && !dma_has_cap(DMA_INTERRUPT, device->cap_mask)) 248 if (device && !dma_has_cap(DMA_INTERRUPT, device->cap_mask))
249 device = NULL; 249 device = NULL;
250 250
251 tx = device ? device->device_prep_dma_interrupt(chan, 0) : NULL; 251 tx = device ? device->device_prep_dma_interrupt(chan, 0) : NULL;
252 } else 252 } else
253 tx = NULL; 253 tx = NULL;
254 254
255 if (tx) { 255 if (tx) {
256 pr_debug("%s: (async)\n", __func__); 256 pr_debug("%s: (async)\n", __func__);
257 257
258 async_tx_submit(chan, tx, submit); 258 async_tx_submit(chan, tx, submit);
259 } else { 259 } else {
260 pr_debug("%s: (sync)\n", __func__); 260 pr_debug("%s: (sync)\n", __func__);
261 261
262 /* wait for any prerequisite operations */ 262 /* wait for any prerequisite operations */
263 async_tx_quiesce(&submit->depend_tx); 263 async_tx_quiesce(&submit->depend_tx);
264 264
265 async_tx_sync_epilog(submit); 265 async_tx_sync_epilog(submit);
266 } 266 }
267 267
268 return tx; 268 return tx;
269 } 269 }
270 EXPORT_SYMBOL_GPL(async_trigger_callback); 270 EXPORT_SYMBOL_GPL(async_trigger_callback);
271 271
272 /** 272 /**
273 * async_tx_quiesce - ensure tx is complete and freeable upon return 273 * async_tx_quiesce - ensure tx is complete and freeable upon return
274 * @tx - transaction to quiesce 274 * @tx - transaction to quiesce
275 */ 275 */
276 void async_tx_quiesce(struct dma_async_tx_descriptor **tx) 276 void async_tx_quiesce(struct dma_async_tx_descriptor **tx)
277 { 277 {
278 if (*tx) { 278 if (*tx) {
279 /* if ack is already set then we cannot be sure 279 /* if ack is already set then we cannot be sure
280 * we are referring to the correct operation 280 * we are referring to the correct operation
281 */ 281 */
282 BUG_ON(async_tx_test_ack(*tx)); 282 BUG_ON(async_tx_test_ack(*tx));
283 if (dma_wait_for_async_tx(*tx) != DMA_SUCCESS) 283 if (dma_wait_for_async_tx(*tx) != DMA_COMPLETE)
284 panic("%s: DMA error waiting for transaction\n", 284 panic("%s: DMA error waiting for transaction\n",
285 __func__); 285 __func__);
286 async_tx_ack(*tx); 286 async_tx_ack(*tx);
287 *tx = NULL; 287 *tx = NULL;
288 } 288 }
289 } 289 }
290 EXPORT_SYMBOL_GPL(async_tx_quiesce); 290 EXPORT_SYMBOL_GPL(async_tx_quiesce);
291 291
292 MODULE_AUTHOR("Intel Corporation"); 292 MODULE_AUTHOR("Intel Corporation");
293 MODULE_DESCRIPTION("Asynchronous Bulk Memory Transactions API"); 293 MODULE_DESCRIPTION("Asynchronous Bulk Memory Transactions API");
294 MODULE_LICENSE("GPL"); 294 MODULE_LICENSE("GPL");
295 295
drivers/dma/amba-pl08x.c
1 /* 1 /*
2 * Copyright (c) 2006 ARM Ltd. 2 * Copyright (c) 2006 ARM Ltd.
3 * Copyright (c) 2010 ST-Ericsson SA 3 * Copyright (c) 2010 ST-Ericsson SA
4 * 4 *
5 * Author: Peter Pearse <peter.pearse@arm.com> 5 * Author: Peter Pearse <peter.pearse@arm.com>
6 * Author: Linus Walleij <linus.walleij@stericsson.com> 6 * Author: Linus Walleij <linus.walleij@stericsson.com>
7 * 7 *
8 * This program is free software; you can redistribute it and/or modify it 8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free 9 * under the terms of the GNU General Public License as published by the Free
10 * Software Foundation; either version 2 of the License, or (at your option) 10 * Software Foundation; either version 2 of the License, or (at your option)
11 * any later version. 11 * any later version.
12 * 12 *
13 * This program is distributed in the hope that it will be useful, but WITHOUT 13 * This program is distributed in the hope that it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * more details. 16 * more details.
17 * 17 *
18 * You should have received a copy of the GNU General Public License along with 18 * You should have received a copy of the GNU General Public License along with
19 * this program; if not, write to the Free Software Foundation, Inc., 59 19 * this program; if not, write to the Free Software Foundation, Inc., 59
20 * Temple Place - Suite 330, Boston, MA 02111-1307, USA. 20 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21 * 21 *
22 * The full GNU General Public License is in this distribution in the file 22 * The full GNU General Public License is in this distribution in the file
23 * called COPYING. 23 * called COPYING.
24 * 24 *
25 * Documentation: ARM DDI 0196G == PL080 25 * Documentation: ARM DDI 0196G == PL080
26 * Documentation: ARM DDI 0218E == PL081 26 * Documentation: ARM DDI 0218E == PL081
27 * Documentation: S3C6410 User's Manual == PL080S 27 * Documentation: S3C6410 User's Manual == PL080S
28 * 28 *
29 * PL080 & PL081 both have 16 sets of DMA signals that can be routed to any 29 * PL080 & PL081 both have 16 sets of DMA signals that can be routed to any
30 * channel. 30 * channel.
31 * 31 *
32 * The PL080 has 8 channels available for simultaneous use, and the PL081 32 * The PL080 has 8 channels available for simultaneous use, and the PL081
33 * has only two channels. So on these DMA controllers the number of channels 33 * has only two channels. So on these DMA controllers the number of channels
34 * and the number of incoming DMA signals are two totally different things. 34 * and the number of incoming DMA signals are two totally different things.
35 * It is usually not possible to theoretically handle all physical signals, 35 * It is usually not possible to theoretically handle all physical signals,
36 * so a multiplexing scheme with possible denial of use is necessary. 36 * so a multiplexing scheme with possible denial of use is necessary.
37 * 37 *
38 * The PL080 has a dual bus master, PL081 has a single master. 38 * The PL080 has a dual bus master, PL081 has a single master.
39 * 39 *
40 * PL080S is a version modified by Samsung and used in S3C64xx SoCs. 40 * PL080S is a version modified by Samsung and used in S3C64xx SoCs.
41 * It differs in following aspects: 41 * It differs in following aspects:
42 * - CH_CONFIG register at different offset, 42 * - CH_CONFIG register at different offset,
43 * - separate CH_CONTROL2 register for transfer size, 43 * - separate CH_CONTROL2 register for transfer size,
44 * - bigger maximum transfer size, 44 * - bigger maximum transfer size,
45 * - 8-word aligned LLI, instead of 4-word, due to extra CCTL2 word, 45 * - 8-word aligned LLI, instead of 4-word, due to extra CCTL2 word,
46 * - no support for peripheral flow control. 46 * - no support for peripheral flow control.
47 * 47 *
48 * Memory to peripheral transfer may be visualized as 48 * Memory to peripheral transfer may be visualized as
49 * Get data from memory to DMAC 49 * Get data from memory to DMAC
50 * Until no data left 50 * Until no data left
51 * On burst request from peripheral 51 * On burst request from peripheral
52 * Destination burst from DMAC to peripheral 52 * Destination burst from DMAC to peripheral
53 * Clear burst request 53 * Clear burst request
54 * Raise terminal count interrupt 54 * Raise terminal count interrupt
55 * 55 *
56 * For peripherals with a FIFO: 56 * For peripherals with a FIFO:
57 * Source burst size == half the depth of the peripheral FIFO 57 * Source burst size == half the depth of the peripheral FIFO
58 * Destination burst size == the depth of the peripheral FIFO 58 * Destination burst size == the depth of the peripheral FIFO
59 * 59 *
60 * (Bursts are irrelevant for mem to mem transfers - there are no burst 60 * (Bursts are irrelevant for mem to mem transfers - there are no burst
61 * signals, the DMA controller will simply facilitate its AHB master.) 61 * signals, the DMA controller will simply facilitate its AHB master.)
62 * 62 *
63 * ASSUMES default (little) endianness for DMA transfers 63 * ASSUMES default (little) endianness for DMA transfers
64 * 64 *
65 * The PL08x has two flow control settings: 65 * The PL08x has two flow control settings:
66 * - DMAC flow control: the transfer size defines the number of transfers 66 * - DMAC flow control: the transfer size defines the number of transfers
67 * which occur for the current LLI entry, and the DMAC raises TC at the 67 * which occur for the current LLI entry, and the DMAC raises TC at the
68 * end of every LLI entry. Observed behaviour shows the DMAC listening 68 * end of every LLI entry. Observed behaviour shows the DMAC listening
69 * to both the BREQ and SREQ signals (contrary to documented), 69 * to both the BREQ and SREQ signals (contrary to documented),
70 * transferring data if either is active. The LBREQ and LSREQ signals 70 * transferring data if either is active. The LBREQ and LSREQ signals
71 * are ignored. 71 * are ignored.
72 * 72 *
73 * - Peripheral flow control: the transfer size is ignored (and should be 73 * - Peripheral flow control: the transfer size is ignored (and should be
74 * zero). The data is transferred from the current LLI entry, until 74 * zero). The data is transferred from the current LLI entry, until
75 * after the final transfer signalled by LBREQ or LSREQ. The DMAC 75 * after the final transfer signalled by LBREQ or LSREQ. The DMAC
76 * will then move to the next LLI entry. Unsupported by PL080S. 76 * will then move to the next LLI entry. Unsupported by PL080S.
77 */ 77 */
78 #include <linux/amba/bus.h> 78 #include <linux/amba/bus.h>
79 #include <linux/amba/pl08x.h> 79 #include <linux/amba/pl08x.h>
80 #include <linux/debugfs.h> 80 #include <linux/debugfs.h>
81 #include <linux/delay.h> 81 #include <linux/delay.h>
82 #include <linux/device.h> 82 #include <linux/device.h>
83 #include <linux/dmaengine.h> 83 #include <linux/dmaengine.h>
84 #include <linux/dmapool.h> 84 #include <linux/dmapool.h>
85 #include <linux/dma-mapping.h> 85 #include <linux/dma-mapping.h>
86 #include <linux/init.h> 86 #include <linux/init.h>
87 #include <linux/interrupt.h> 87 #include <linux/interrupt.h>
88 #include <linux/module.h> 88 #include <linux/module.h>
89 #include <linux/pm_runtime.h> 89 #include <linux/pm_runtime.h>
90 #include <linux/seq_file.h> 90 #include <linux/seq_file.h>
91 #include <linux/slab.h> 91 #include <linux/slab.h>
92 #include <linux/amba/pl080.h> 92 #include <linux/amba/pl080.h>
93 93
94 #include "dmaengine.h" 94 #include "dmaengine.h"
95 #include "virt-dma.h" 95 #include "virt-dma.h"
96 96
97 #define DRIVER_NAME "pl08xdmac" 97 #define DRIVER_NAME "pl08xdmac"
98 98
99 static struct amba_driver pl08x_amba_driver; 99 static struct amba_driver pl08x_amba_driver;
100 struct pl08x_driver_data; 100 struct pl08x_driver_data;
101 101
102 /** 102 /**
103 * struct vendor_data - vendor-specific config parameters for PL08x derivatives 103 * struct vendor_data - vendor-specific config parameters for PL08x derivatives
104 * @channels: the number of channels available in this variant 104 * @channels: the number of channels available in this variant
105 * @dualmaster: whether this version supports dual AHB masters or not. 105 * @dualmaster: whether this version supports dual AHB masters or not.
106 * @nomadik: whether the channels have Nomadik security extension bits 106 * @nomadik: whether the channels have Nomadik security extension bits
107 * that need to be checked for permission before use and some registers are 107 * that need to be checked for permission before use and some registers are
108 * missing 108 * missing
109 * @pl080s: whether this version is a PL080S, which has separate register and 109 * @pl080s: whether this version is a PL080S, which has separate register and
110 * LLI word for transfer size. 110 * LLI word for transfer size.
111 */ 111 */
112 struct vendor_data { 112 struct vendor_data {
113 u8 config_offset; 113 u8 config_offset;
114 u8 channels; 114 u8 channels;
115 bool dualmaster; 115 bool dualmaster;
116 bool nomadik; 116 bool nomadik;
117 bool pl080s; 117 bool pl080s;
118 u32 max_transfer_size; 118 u32 max_transfer_size;
119 }; 119 };
120 120
121 /** 121 /**
122 * struct pl08x_bus_data - information of source or destination 122 * struct pl08x_bus_data - information of source or destination
123 * busses for a transfer 123 * busses for a transfer
124 * @addr: current address 124 * @addr: current address
125 * @maxwidth: the maximum width of a transfer on this bus 125 * @maxwidth: the maximum width of a transfer on this bus
126 * @buswidth: the width of this bus in bytes: 1, 2 or 4 126 * @buswidth: the width of this bus in bytes: 1, 2 or 4
127 */ 127 */
128 struct pl08x_bus_data { 128 struct pl08x_bus_data {
129 dma_addr_t addr; 129 dma_addr_t addr;
130 u8 maxwidth; 130 u8 maxwidth;
131 u8 buswidth; 131 u8 buswidth;
132 }; 132 };
133 133
134 #define IS_BUS_ALIGNED(bus) IS_ALIGNED((bus)->addr, (bus)->buswidth) 134 #define IS_BUS_ALIGNED(bus) IS_ALIGNED((bus)->addr, (bus)->buswidth)
135 135
136 /** 136 /**
137 * struct pl08x_phy_chan - holder for the physical channels 137 * struct pl08x_phy_chan - holder for the physical channels
138 * @id: physical index to this channel 138 * @id: physical index to this channel
139 * @lock: a lock to use when altering an instance of this struct 139 * @lock: a lock to use when altering an instance of this struct
140 * @serving: the virtual channel currently being served by this physical 140 * @serving: the virtual channel currently being served by this physical
141 * channel 141 * channel
142 * @locked: channel unavailable for the system, e.g. dedicated to secure 142 * @locked: channel unavailable for the system, e.g. dedicated to secure
143 * world 143 * world
144 */ 144 */
145 struct pl08x_phy_chan { 145 struct pl08x_phy_chan {
146 unsigned int id; 146 unsigned int id;
147 void __iomem *base; 147 void __iomem *base;
148 void __iomem *reg_config; 148 void __iomem *reg_config;
149 spinlock_t lock; 149 spinlock_t lock;
150 struct pl08x_dma_chan *serving; 150 struct pl08x_dma_chan *serving;
151 bool locked; 151 bool locked;
152 }; 152 };
153 153
154 /** 154 /**
155 * struct pl08x_sg - structure containing data per sg 155 * struct pl08x_sg - structure containing data per sg
156 * @src_addr: src address of sg 156 * @src_addr: src address of sg
157 * @dst_addr: dst address of sg 157 * @dst_addr: dst address of sg
158 * @len: transfer len in bytes 158 * @len: transfer len in bytes
159 * @node: node for txd's dsg_list 159 * @node: node for txd's dsg_list
160 */ 160 */
161 struct pl08x_sg { 161 struct pl08x_sg {
162 dma_addr_t src_addr; 162 dma_addr_t src_addr;
163 dma_addr_t dst_addr; 163 dma_addr_t dst_addr;
164 size_t len; 164 size_t len;
165 struct list_head node; 165 struct list_head node;
166 }; 166 };
167 167
168 /** 168 /**
169 * struct pl08x_txd - wrapper for struct dma_async_tx_descriptor 169 * struct pl08x_txd - wrapper for struct dma_async_tx_descriptor
170 * @vd: virtual DMA descriptor 170 * @vd: virtual DMA descriptor
171 * @dsg_list: list of children sg's 171 * @dsg_list: list of children sg's
172 * @llis_bus: DMA memory address (physical) start for the LLIs 172 * @llis_bus: DMA memory address (physical) start for the LLIs
173 * @llis_va: virtual memory address start for the LLIs 173 * @llis_va: virtual memory address start for the LLIs
174 * @cctl: control reg values for current txd 174 * @cctl: control reg values for current txd
175 * @ccfg: config reg values for current txd 175 * @ccfg: config reg values for current txd
176 * @done: this marks completed descriptors, which should not have their 176 * @done: this marks completed descriptors, which should not have their
177 * mux released. 177 * mux released.
178 * @cyclic: indicate cyclic transfers 178 * @cyclic: indicate cyclic transfers
179 */ 179 */
180 struct pl08x_txd { 180 struct pl08x_txd {
181 struct virt_dma_desc vd; 181 struct virt_dma_desc vd;
182 struct list_head dsg_list; 182 struct list_head dsg_list;
183 dma_addr_t llis_bus; 183 dma_addr_t llis_bus;
184 u32 *llis_va; 184 u32 *llis_va;
185 /* Default cctl value for LLIs */ 185 /* Default cctl value for LLIs */
186 u32 cctl; 186 u32 cctl;
187 /* 187 /*
188 * Settings to be put into the physical channel when we 188 * Settings to be put into the physical channel when we
189 * trigger this txd. Other registers are in llis_va[0]. 189 * trigger this txd. Other registers are in llis_va[0].
190 */ 190 */
191 u32 ccfg; 191 u32 ccfg;
192 bool done; 192 bool done;
193 bool cyclic; 193 bool cyclic;
194 }; 194 };
195 195
196 /** 196 /**
197 * struct pl08x_dma_chan_state - holds the PL08x specific virtual channel 197 * struct pl08x_dma_chan_state - holds the PL08x specific virtual channel
198 * states 198 * states
199 * @PL08X_CHAN_IDLE: the channel is idle 199 * @PL08X_CHAN_IDLE: the channel is idle
200 * @PL08X_CHAN_RUNNING: the channel has allocated a physical transport 200 * @PL08X_CHAN_RUNNING: the channel has allocated a physical transport
201 * channel and is running a transfer on it 201 * channel and is running a transfer on it
202 * @PL08X_CHAN_PAUSED: the channel has allocated a physical transport 202 * @PL08X_CHAN_PAUSED: the channel has allocated a physical transport
203 * channel, but the transfer is currently paused 203 * channel, but the transfer is currently paused
204 * @PL08X_CHAN_WAITING: the channel is waiting for a physical transport 204 * @PL08X_CHAN_WAITING: the channel is waiting for a physical transport
205 * channel to become available (only pertains to memcpy channels) 205 * channel to become available (only pertains to memcpy channels)
206 */ 206 */
207 enum pl08x_dma_chan_state { 207 enum pl08x_dma_chan_state {
208 PL08X_CHAN_IDLE, 208 PL08X_CHAN_IDLE,
209 PL08X_CHAN_RUNNING, 209 PL08X_CHAN_RUNNING,
210 PL08X_CHAN_PAUSED, 210 PL08X_CHAN_PAUSED,
211 PL08X_CHAN_WAITING, 211 PL08X_CHAN_WAITING,
212 }; 212 };
213 213
214 /** 214 /**
215 * struct pl08x_dma_chan - this structure wraps a DMA ENGINE channel 215 * struct pl08x_dma_chan - this structure wraps a DMA ENGINE channel
216 * @vc: wrappped virtual channel 216 * @vc: wrappped virtual channel
217 * @phychan: the physical channel utilized by this channel, if there is one 217 * @phychan: the physical channel utilized by this channel, if there is one
218 * @name: name of channel 218 * @name: name of channel
219 * @cd: channel platform data 219 * @cd: channel platform data
220 * @runtime_addr: address for RX/TX according to the runtime config 220 * @runtime_addr: address for RX/TX according to the runtime config
221 * @at: active transaction on this channel 221 * @at: active transaction on this channel
222 * @lock: a lock for this channel data 222 * @lock: a lock for this channel data
223 * @host: a pointer to the host (internal use) 223 * @host: a pointer to the host (internal use)
224 * @state: whether the channel is idle, paused, running etc 224 * @state: whether the channel is idle, paused, running etc
225 * @slave: whether this channel is a device (slave) or for memcpy 225 * @slave: whether this channel is a device (slave) or for memcpy
226 * @signal: the physical DMA request signal which this channel is using 226 * @signal: the physical DMA request signal which this channel is using
227 * @mux_use: count of descriptors using this DMA request signal setting 227 * @mux_use: count of descriptors using this DMA request signal setting
228 */ 228 */
229 struct pl08x_dma_chan { 229 struct pl08x_dma_chan {
230 struct virt_dma_chan vc; 230 struct virt_dma_chan vc;
231 struct pl08x_phy_chan *phychan; 231 struct pl08x_phy_chan *phychan;
232 const char *name; 232 const char *name;
233 const struct pl08x_channel_data *cd; 233 const struct pl08x_channel_data *cd;
234 struct dma_slave_config cfg; 234 struct dma_slave_config cfg;
235 struct pl08x_txd *at; 235 struct pl08x_txd *at;
236 struct pl08x_driver_data *host; 236 struct pl08x_driver_data *host;
237 enum pl08x_dma_chan_state state; 237 enum pl08x_dma_chan_state state;
238 bool slave; 238 bool slave;
239 int signal; 239 int signal;
240 unsigned mux_use; 240 unsigned mux_use;
241 }; 241 };
242 242
243 /** 243 /**
244 * struct pl08x_driver_data - the local state holder for the PL08x 244 * struct pl08x_driver_data - the local state holder for the PL08x
245 * @slave: slave engine for this instance 245 * @slave: slave engine for this instance
246 * @memcpy: memcpy engine for this instance 246 * @memcpy: memcpy engine for this instance
247 * @base: virtual memory base (remapped) for the PL08x 247 * @base: virtual memory base (remapped) for the PL08x
248 * @adev: the corresponding AMBA (PrimeCell) bus entry 248 * @adev: the corresponding AMBA (PrimeCell) bus entry
249 * @vd: vendor data for this PL08x variant 249 * @vd: vendor data for this PL08x variant
250 * @pd: platform data passed in from the platform/machine 250 * @pd: platform data passed in from the platform/machine
251 * @phy_chans: array of data for the physical channels 251 * @phy_chans: array of data for the physical channels
252 * @pool: a pool for the LLI descriptors 252 * @pool: a pool for the LLI descriptors
253 * @lli_buses: bitmask to or in to LLI pointer selecting AHB port for LLI 253 * @lli_buses: bitmask to or in to LLI pointer selecting AHB port for LLI
254 * fetches 254 * fetches
255 * @mem_buses: set to indicate memory transfers on AHB2. 255 * @mem_buses: set to indicate memory transfers on AHB2.
256 * @lock: a spinlock for this struct 256 * @lock: a spinlock for this struct
257 */ 257 */
258 struct pl08x_driver_data { 258 struct pl08x_driver_data {
259 struct dma_device slave; 259 struct dma_device slave;
260 struct dma_device memcpy; 260 struct dma_device memcpy;
261 void __iomem *base; 261 void __iomem *base;
262 struct amba_device *adev; 262 struct amba_device *adev;
263 const struct vendor_data *vd; 263 const struct vendor_data *vd;
264 struct pl08x_platform_data *pd; 264 struct pl08x_platform_data *pd;
265 struct pl08x_phy_chan *phy_chans; 265 struct pl08x_phy_chan *phy_chans;
266 struct dma_pool *pool; 266 struct dma_pool *pool;
267 u8 lli_buses; 267 u8 lli_buses;
268 u8 mem_buses; 268 u8 mem_buses;
269 u8 lli_words; 269 u8 lli_words;
270 }; 270 };
271 271
272 /* 272 /*
273 * PL08X specific defines 273 * PL08X specific defines
274 */ 274 */
275 275
276 /* The order of words in an LLI. */ 276 /* The order of words in an LLI. */
277 #define PL080_LLI_SRC 0 277 #define PL080_LLI_SRC 0
278 #define PL080_LLI_DST 1 278 #define PL080_LLI_DST 1
279 #define PL080_LLI_LLI 2 279 #define PL080_LLI_LLI 2
280 #define PL080_LLI_CCTL 3 280 #define PL080_LLI_CCTL 3
281 #define PL080S_LLI_CCTL2 4 281 #define PL080S_LLI_CCTL2 4
282 282
283 /* Total words in an LLI. */ 283 /* Total words in an LLI. */
284 #define PL080_LLI_WORDS 4 284 #define PL080_LLI_WORDS 4
285 #define PL080S_LLI_WORDS 8 285 #define PL080S_LLI_WORDS 8
286 286
287 /* 287 /*
288 * Number of LLIs in each LLI buffer allocated for one transfer 288 * Number of LLIs in each LLI buffer allocated for one transfer
289 * (maximum times we call dma_pool_alloc on this pool without freeing) 289 * (maximum times we call dma_pool_alloc on this pool without freeing)
290 */ 290 */
291 #define MAX_NUM_TSFR_LLIS 512 291 #define MAX_NUM_TSFR_LLIS 512
292 #define PL08X_ALIGN 8 292 #define PL08X_ALIGN 8
293 293
294 static inline struct pl08x_dma_chan *to_pl08x_chan(struct dma_chan *chan) 294 static inline struct pl08x_dma_chan *to_pl08x_chan(struct dma_chan *chan)
295 { 295 {
296 return container_of(chan, struct pl08x_dma_chan, vc.chan); 296 return container_of(chan, struct pl08x_dma_chan, vc.chan);
297 } 297 }
298 298
299 static inline struct pl08x_txd *to_pl08x_txd(struct dma_async_tx_descriptor *tx) 299 static inline struct pl08x_txd *to_pl08x_txd(struct dma_async_tx_descriptor *tx)
300 { 300 {
301 return container_of(tx, struct pl08x_txd, vd.tx); 301 return container_of(tx, struct pl08x_txd, vd.tx);
302 } 302 }
303 303
304 /* 304 /*
305 * Mux handling. 305 * Mux handling.
306 * 306 *
307 * This gives us the DMA request input to the PL08x primecell which the 307 * This gives us the DMA request input to the PL08x primecell which the
308 * peripheral described by the channel data will be routed to, possibly 308 * peripheral described by the channel data will be routed to, possibly
309 * via a board/SoC specific external MUX. One important point to note 309 * via a board/SoC specific external MUX. One important point to note
310 * here is that this does not depend on the physical channel. 310 * here is that this does not depend on the physical channel.
311 */ 311 */
312 static int pl08x_request_mux(struct pl08x_dma_chan *plchan) 312 static int pl08x_request_mux(struct pl08x_dma_chan *plchan)
313 { 313 {
314 const struct pl08x_platform_data *pd = plchan->host->pd; 314 const struct pl08x_platform_data *pd = plchan->host->pd;
315 int ret; 315 int ret;
316 316
317 if (plchan->mux_use++ == 0 && pd->get_xfer_signal) { 317 if (plchan->mux_use++ == 0 && pd->get_xfer_signal) {
318 ret = pd->get_xfer_signal(plchan->cd); 318 ret = pd->get_xfer_signal(plchan->cd);
319 if (ret < 0) { 319 if (ret < 0) {
320 plchan->mux_use = 0; 320 plchan->mux_use = 0;
321 return ret; 321 return ret;
322 } 322 }
323 323
324 plchan->signal = ret; 324 plchan->signal = ret;
325 } 325 }
326 return 0; 326 return 0;
327 } 327 }
328 328
329 static void pl08x_release_mux(struct pl08x_dma_chan *plchan) 329 static void pl08x_release_mux(struct pl08x_dma_chan *plchan)
330 { 330 {
331 const struct pl08x_platform_data *pd = plchan->host->pd; 331 const struct pl08x_platform_data *pd = plchan->host->pd;
332 332
333 if (plchan->signal >= 0) { 333 if (plchan->signal >= 0) {
334 WARN_ON(plchan->mux_use == 0); 334 WARN_ON(plchan->mux_use == 0);
335 335
336 if (--plchan->mux_use == 0 && pd->put_xfer_signal) { 336 if (--plchan->mux_use == 0 && pd->put_xfer_signal) {
337 pd->put_xfer_signal(plchan->cd, plchan->signal); 337 pd->put_xfer_signal(plchan->cd, plchan->signal);
338 plchan->signal = -1; 338 plchan->signal = -1;
339 } 339 }
340 } 340 }
341 } 341 }
342 342
343 /* 343 /*
344 * Physical channel handling 344 * Physical channel handling
345 */ 345 */
346 346
347 /* Whether a certain channel is busy or not */ 347 /* Whether a certain channel is busy or not */
348 static int pl08x_phy_channel_busy(struct pl08x_phy_chan *ch) 348 static int pl08x_phy_channel_busy(struct pl08x_phy_chan *ch)
349 { 349 {
350 unsigned int val; 350 unsigned int val;
351 351
352 val = readl(ch->reg_config); 352 val = readl(ch->reg_config);
353 return val & PL080_CONFIG_ACTIVE; 353 return val & PL080_CONFIG_ACTIVE;
354 } 354 }
355 355
356 static void pl08x_write_lli(struct pl08x_driver_data *pl08x, 356 static void pl08x_write_lli(struct pl08x_driver_data *pl08x,
357 struct pl08x_phy_chan *phychan, const u32 *lli, u32 ccfg) 357 struct pl08x_phy_chan *phychan, const u32 *lli, u32 ccfg)
358 { 358 {
359 if (pl08x->vd->pl080s) 359 if (pl08x->vd->pl080s)
360 dev_vdbg(&pl08x->adev->dev, 360 dev_vdbg(&pl08x->adev->dev,
361 "WRITE channel %d: csrc=0x%08x, cdst=0x%08x, " 361 "WRITE channel %d: csrc=0x%08x, cdst=0x%08x, "
362 "clli=0x%08x, cctl=0x%08x, cctl2=0x%08x, ccfg=0x%08x\n", 362 "clli=0x%08x, cctl=0x%08x, cctl2=0x%08x, ccfg=0x%08x\n",
363 phychan->id, lli[PL080_LLI_SRC], lli[PL080_LLI_DST], 363 phychan->id, lli[PL080_LLI_SRC], lli[PL080_LLI_DST],
364 lli[PL080_LLI_LLI], lli[PL080_LLI_CCTL], 364 lli[PL080_LLI_LLI], lli[PL080_LLI_CCTL],
365 lli[PL080S_LLI_CCTL2], ccfg); 365 lli[PL080S_LLI_CCTL2], ccfg);
366 else 366 else
367 dev_vdbg(&pl08x->adev->dev, 367 dev_vdbg(&pl08x->adev->dev,
368 "WRITE channel %d: csrc=0x%08x, cdst=0x%08x, " 368 "WRITE channel %d: csrc=0x%08x, cdst=0x%08x, "
369 "clli=0x%08x, cctl=0x%08x, ccfg=0x%08x\n", 369 "clli=0x%08x, cctl=0x%08x, ccfg=0x%08x\n",
370 phychan->id, lli[PL080_LLI_SRC], lli[PL080_LLI_DST], 370 phychan->id, lli[PL080_LLI_SRC], lli[PL080_LLI_DST],
371 lli[PL080_LLI_LLI], lli[PL080_LLI_CCTL], ccfg); 371 lli[PL080_LLI_LLI], lli[PL080_LLI_CCTL], ccfg);
372 372
373 writel_relaxed(lli[PL080_LLI_SRC], phychan->base + PL080_CH_SRC_ADDR); 373 writel_relaxed(lli[PL080_LLI_SRC], phychan->base + PL080_CH_SRC_ADDR);
374 writel_relaxed(lli[PL080_LLI_DST], phychan->base + PL080_CH_DST_ADDR); 374 writel_relaxed(lli[PL080_LLI_DST], phychan->base + PL080_CH_DST_ADDR);
375 writel_relaxed(lli[PL080_LLI_LLI], phychan->base + PL080_CH_LLI); 375 writel_relaxed(lli[PL080_LLI_LLI], phychan->base + PL080_CH_LLI);
376 writel_relaxed(lli[PL080_LLI_CCTL], phychan->base + PL080_CH_CONTROL); 376 writel_relaxed(lli[PL080_LLI_CCTL], phychan->base + PL080_CH_CONTROL);
377 377
378 if (pl08x->vd->pl080s) 378 if (pl08x->vd->pl080s)
379 writel_relaxed(lli[PL080S_LLI_CCTL2], 379 writel_relaxed(lli[PL080S_LLI_CCTL2],
380 phychan->base + PL080S_CH_CONTROL2); 380 phychan->base + PL080S_CH_CONTROL2);
381 381
382 writel(ccfg, phychan->reg_config); 382 writel(ccfg, phychan->reg_config);
383 } 383 }
384 384
385 /* 385 /*
386 * Set the initial DMA register values i.e. those for the first LLI 386 * Set the initial DMA register values i.e. those for the first LLI
387 * The next LLI pointer and the configuration interrupt bit have 387 * The next LLI pointer and the configuration interrupt bit have
388 * been set when the LLIs were constructed. Poke them into the hardware 388 * been set when the LLIs were constructed. Poke them into the hardware
389 * and start the transfer. 389 * and start the transfer.
390 */ 390 */
391 static void pl08x_start_next_txd(struct pl08x_dma_chan *plchan) 391 static void pl08x_start_next_txd(struct pl08x_dma_chan *plchan)
392 { 392 {
393 struct pl08x_driver_data *pl08x = plchan->host; 393 struct pl08x_driver_data *pl08x = plchan->host;
394 struct pl08x_phy_chan *phychan = plchan->phychan; 394 struct pl08x_phy_chan *phychan = plchan->phychan;
395 struct virt_dma_desc *vd = vchan_next_desc(&plchan->vc); 395 struct virt_dma_desc *vd = vchan_next_desc(&plchan->vc);
396 struct pl08x_txd *txd = to_pl08x_txd(&vd->tx); 396 struct pl08x_txd *txd = to_pl08x_txd(&vd->tx);
397 u32 val; 397 u32 val;
398 398
399 list_del(&txd->vd.node); 399 list_del(&txd->vd.node);
400 400
401 plchan->at = txd; 401 plchan->at = txd;
402 402
403 /* Wait for channel inactive */ 403 /* Wait for channel inactive */
404 while (pl08x_phy_channel_busy(phychan)) 404 while (pl08x_phy_channel_busy(phychan))
405 cpu_relax(); 405 cpu_relax();
406 406
407 pl08x_write_lli(pl08x, phychan, &txd->llis_va[0], txd->ccfg); 407 pl08x_write_lli(pl08x, phychan, &txd->llis_va[0], txd->ccfg);
408 408
409 /* Enable the DMA channel */ 409 /* Enable the DMA channel */
410 /* Do not access config register until channel shows as disabled */ 410 /* Do not access config register until channel shows as disabled */
411 while (readl(pl08x->base + PL080_EN_CHAN) & (1 << phychan->id)) 411 while (readl(pl08x->base + PL080_EN_CHAN) & (1 << phychan->id))
412 cpu_relax(); 412 cpu_relax();
413 413
414 /* Do not access config register until channel shows as inactive */ 414 /* Do not access config register until channel shows as inactive */
415 val = readl(phychan->reg_config); 415 val = readl(phychan->reg_config);
416 while ((val & PL080_CONFIG_ACTIVE) || (val & PL080_CONFIG_ENABLE)) 416 while ((val & PL080_CONFIG_ACTIVE) || (val & PL080_CONFIG_ENABLE))
417 val = readl(phychan->reg_config); 417 val = readl(phychan->reg_config);
418 418
419 writel(val | PL080_CONFIG_ENABLE, phychan->reg_config); 419 writel(val | PL080_CONFIG_ENABLE, phychan->reg_config);
420 } 420 }
421 421
422 /* 422 /*
423 * Pause the channel by setting the HALT bit. 423 * Pause the channel by setting the HALT bit.
424 * 424 *
425 * For M->P transfers, pause the DMAC first and then stop the peripheral - 425 * For M->P transfers, pause the DMAC first and then stop the peripheral -
426 * the FIFO can only drain if the peripheral is still requesting data. 426 * the FIFO can only drain if the peripheral is still requesting data.
427 * (note: this can still timeout if the DMAC FIFO never drains of data.) 427 * (note: this can still timeout if the DMAC FIFO never drains of data.)
428 * 428 *
429 * For P->M transfers, disable the peripheral first to stop it filling 429 * For P->M transfers, disable the peripheral first to stop it filling
430 * the DMAC FIFO, and then pause the DMAC. 430 * the DMAC FIFO, and then pause the DMAC.
431 */ 431 */
432 static void pl08x_pause_phy_chan(struct pl08x_phy_chan *ch) 432 static void pl08x_pause_phy_chan(struct pl08x_phy_chan *ch)
433 { 433 {
434 u32 val; 434 u32 val;
435 int timeout; 435 int timeout;
436 436
437 /* Set the HALT bit and wait for the FIFO to drain */ 437 /* Set the HALT bit and wait for the FIFO to drain */
438 val = readl(ch->reg_config); 438 val = readl(ch->reg_config);
439 val |= PL080_CONFIG_HALT; 439 val |= PL080_CONFIG_HALT;
440 writel(val, ch->reg_config); 440 writel(val, ch->reg_config);
441 441
442 /* Wait for channel inactive */ 442 /* Wait for channel inactive */
443 for (timeout = 1000; timeout; timeout--) { 443 for (timeout = 1000; timeout; timeout--) {
444 if (!pl08x_phy_channel_busy(ch)) 444 if (!pl08x_phy_channel_busy(ch))
445 break; 445 break;
446 udelay(1); 446 udelay(1);
447 } 447 }
448 if (pl08x_phy_channel_busy(ch)) 448 if (pl08x_phy_channel_busy(ch))
449 pr_err("pl08x: channel%u timeout waiting for pause\n", ch->id); 449 pr_err("pl08x: channel%u timeout waiting for pause\n", ch->id);
450 } 450 }
451 451
452 static void pl08x_resume_phy_chan(struct pl08x_phy_chan *ch) 452 static void pl08x_resume_phy_chan(struct pl08x_phy_chan *ch)
453 { 453 {
454 u32 val; 454 u32 val;
455 455
456 /* Clear the HALT bit */ 456 /* Clear the HALT bit */
457 val = readl(ch->reg_config); 457 val = readl(ch->reg_config);
458 val &= ~PL080_CONFIG_HALT; 458 val &= ~PL080_CONFIG_HALT;
459 writel(val, ch->reg_config); 459 writel(val, ch->reg_config);
460 } 460 }
461 461
462 /* 462 /*
463 * pl08x_terminate_phy_chan() stops the channel, clears the FIFO and 463 * pl08x_terminate_phy_chan() stops the channel, clears the FIFO and
464 * clears any pending interrupt status. This should not be used for 464 * clears any pending interrupt status. This should not be used for
465 * an on-going transfer, but as a method of shutting down a channel 465 * an on-going transfer, but as a method of shutting down a channel
466 * (eg, when it's no longer used) or terminating a transfer. 466 * (eg, when it's no longer used) or terminating a transfer.
467 */ 467 */
468 static void pl08x_terminate_phy_chan(struct pl08x_driver_data *pl08x, 468 static void pl08x_terminate_phy_chan(struct pl08x_driver_data *pl08x,
469 struct pl08x_phy_chan *ch) 469 struct pl08x_phy_chan *ch)
470 { 470 {
471 u32 val = readl(ch->reg_config); 471 u32 val = readl(ch->reg_config);
472 472
473 val &= ~(PL080_CONFIG_ENABLE | PL080_CONFIG_ERR_IRQ_MASK | 473 val &= ~(PL080_CONFIG_ENABLE | PL080_CONFIG_ERR_IRQ_MASK |
474 PL080_CONFIG_TC_IRQ_MASK); 474 PL080_CONFIG_TC_IRQ_MASK);
475 475
476 writel(val, ch->reg_config); 476 writel(val, ch->reg_config);
477 477
478 writel(1 << ch->id, pl08x->base + PL080_ERR_CLEAR); 478 writel(1 << ch->id, pl08x->base + PL080_ERR_CLEAR);
479 writel(1 << ch->id, pl08x->base + PL080_TC_CLEAR); 479 writel(1 << ch->id, pl08x->base + PL080_TC_CLEAR);
480 } 480 }
481 481
482 static inline u32 get_bytes_in_cctl(u32 cctl) 482 static inline u32 get_bytes_in_cctl(u32 cctl)
483 { 483 {
484 /* The source width defines the number of bytes */ 484 /* The source width defines the number of bytes */
485 u32 bytes = cctl & PL080_CONTROL_TRANSFER_SIZE_MASK; 485 u32 bytes = cctl & PL080_CONTROL_TRANSFER_SIZE_MASK;
486 486
487 cctl &= PL080_CONTROL_SWIDTH_MASK; 487 cctl &= PL080_CONTROL_SWIDTH_MASK;
488 488
489 switch (cctl >> PL080_CONTROL_SWIDTH_SHIFT) { 489 switch (cctl >> PL080_CONTROL_SWIDTH_SHIFT) {
490 case PL080_WIDTH_8BIT: 490 case PL080_WIDTH_8BIT:
491 break; 491 break;
492 case PL080_WIDTH_16BIT: 492 case PL080_WIDTH_16BIT:
493 bytes *= 2; 493 bytes *= 2;
494 break; 494 break;
495 case PL080_WIDTH_32BIT: 495 case PL080_WIDTH_32BIT:
496 bytes *= 4; 496 bytes *= 4;
497 break; 497 break;
498 } 498 }
499 return bytes; 499 return bytes;
500 } 500 }
501 501
502 static inline u32 get_bytes_in_cctl_pl080s(u32 cctl, u32 cctl1) 502 static inline u32 get_bytes_in_cctl_pl080s(u32 cctl, u32 cctl1)
503 { 503 {
504 /* The source width defines the number of bytes */ 504 /* The source width defines the number of bytes */
505 u32 bytes = cctl1 & PL080S_CONTROL_TRANSFER_SIZE_MASK; 505 u32 bytes = cctl1 & PL080S_CONTROL_TRANSFER_SIZE_MASK;
506 506
507 cctl &= PL080_CONTROL_SWIDTH_MASK; 507 cctl &= PL080_CONTROL_SWIDTH_MASK;
508 508
509 switch (cctl >> PL080_CONTROL_SWIDTH_SHIFT) { 509 switch (cctl >> PL080_CONTROL_SWIDTH_SHIFT) {
510 case PL080_WIDTH_8BIT: 510 case PL080_WIDTH_8BIT:
511 break; 511 break;
512 case PL080_WIDTH_16BIT: 512 case PL080_WIDTH_16BIT:
513 bytes *= 2; 513 bytes *= 2;
514 break; 514 break;
515 case PL080_WIDTH_32BIT: 515 case PL080_WIDTH_32BIT:
516 bytes *= 4; 516 bytes *= 4;
517 break; 517 break;
518 } 518 }
519 return bytes; 519 return bytes;
520 } 520 }
521 521
522 /* The channel should be paused when calling this */ 522 /* The channel should be paused when calling this */
523 static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan) 523 static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan)
524 { 524 {
525 struct pl08x_driver_data *pl08x = plchan->host; 525 struct pl08x_driver_data *pl08x = plchan->host;
526 const u32 *llis_va, *llis_va_limit; 526 const u32 *llis_va, *llis_va_limit;
527 struct pl08x_phy_chan *ch; 527 struct pl08x_phy_chan *ch;
528 dma_addr_t llis_bus; 528 dma_addr_t llis_bus;
529 struct pl08x_txd *txd; 529 struct pl08x_txd *txd;
530 u32 llis_max_words; 530 u32 llis_max_words;
531 size_t bytes; 531 size_t bytes;
532 u32 clli; 532 u32 clli;
533 533
534 ch = plchan->phychan; 534 ch = plchan->phychan;
535 txd = plchan->at; 535 txd = plchan->at;
536 536
537 if (!ch || !txd) 537 if (!ch || !txd)
538 return 0; 538 return 0;
539 539
540 /* 540 /*
541 * Follow the LLIs to get the number of remaining 541 * Follow the LLIs to get the number of remaining
542 * bytes in the currently active transaction. 542 * bytes in the currently active transaction.
543 */ 543 */
544 clli = readl(ch->base + PL080_CH_LLI) & ~PL080_LLI_LM_AHB2; 544 clli = readl(ch->base + PL080_CH_LLI) & ~PL080_LLI_LM_AHB2;
545 545
546 /* First get the remaining bytes in the active transfer */ 546 /* First get the remaining bytes in the active transfer */
547 if (pl08x->vd->pl080s) 547 if (pl08x->vd->pl080s)
548 bytes = get_bytes_in_cctl_pl080s( 548 bytes = get_bytes_in_cctl_pl080s(
549 readl(ch->base + PL080_CH_CONTROL), 549 readl(ch->base + PL080_CH_CONTROL),
550 readl(ch->base + PL080S_CH_CONTROL2)); 550 readl(ch->base + PL080S_CH_CONTROL2));
551 else 551 else
552 bytes = get_bytes_in_cctl(readl(ch->base + PL080_CH_CONTROL)); 552 bytes = get_bytes_in_cctl(readl(ch->base + PL080_CH_CONTROL));
553 553
554 if (!clli) 554 if (!clli)
555 return bytes; 555 return bytes;
556 556
557 llis_va = txd->llis_va; 557 llis_va = txd->llis_va;
558 llis_bus = txd->llis_bus; 558 llis_bus = txd->llis_bus;
559 559
560 llis_max_words = pl08x->lli_words * MAX_NUM_TSFR_LLIS; 560 llis_max_words = pl08x->lli_words * MAX_NUM_TSFR_LLIS;
561 BUG_ON(clli < llis_bus || clli >= llis_bus + 561 BUG_ON(clli < llis_bus || clli >= llis_bus +
562 sizeof(u32) * llis_max_words); 562 sizeof(u32) * llis_max_words);
563 563
564 /* 564 /*
565 * Locate the next LLI - as this is an array, 565 * Locate the next LLI - as this is an array,
566 * it's simple maths to find. 566 * it's simple maths to find.
567 */ 567 */
568 llis_va += (clli - llis_bus) / sizeof(u32); 568 llis_va += (clli - llis_bus) / sizeof(u32);
569 569
570 llis_va_limit = llis_va + llis_max_words; 570 llis_va_limit = llis_va + llis_max_words;
571 571
572 for (; llis_va < llis_va_limit; llis_va += pl08x->lli_words) { 572 for (; llis_va < llis_va_limit; llis_va += pl08x->lli_words) {
573 if (pl08x->vd->pl080s) 573 if (pl08x->vd->pl080s)
574 bytes += get_bytes_in_cctl_pl080s( 574 bytes += get_bytes_in_cctl_pl080s(
575 llis_va[PL080_LLI_CCTL], 575 llis_va[PL080_LLI_CCTL],
576 llis_va[PL080S_LLI_CCTL2]); 576 llis_va[PL080S_LLI_CCTL2]);
577 else 577 else
578 bytes += get_bytes_in_cctl(llis_va[PL080_LLI_CCTL]); 578 bytes += get_bytes_in_cctl(llis_va[PL080_LLI_CCTL]);
579 579
580 /* 580 /*
581 * A LLI pointer going backward terminates the LLI list 581 * A LLI pointer going backward terminates the LLI list
582 */ 582 */
583 if (llis_va[PL080_LLI_LLI] <= clli) 583 if (llis_va[PL080_LLI_LLI] <= clli)
584 break; 584 break;
585 } 585 }
586 586
587 return bytes; 587 return bytes;
588 } 588 }
589 589
590 /* 590 /*
591 * Allocate a physical channel for a virtual channel 591 * Allocate a physical channel for a virtual channel
592 * 592 *
593 * Try to locate a physical channel to be used for this transfer. If all 593 * Try to locate a physical channel to be used for this transfer. If all
594 * are taken return NULL and the requester will have to cope by using 594 * are taken return NULL and the requester will have to cope by using
595 * some fallback PIO mode or retrying later. 595 * some fallback PIO mode or retrying later.
596 */ 596 */
597 static struct pl08x_phy_chan * 597 static struct pl08x_phy_chan *
598 pl08x_get_phy_channel(struct pl08x_driver_data *pl08x, 598 pl08x_get_phy_channel(struct pl08x_driver_data *pl08x,
599 struct pl08x_dma_chan *virt_chan) 599 struct pl08x_dma_chan *virt_chan)
600 { 600 {
601 struct pl08x_phy_chan *ch = NULL; 601 struct pl08x_phy_chan *ch = NULL;
602 unsigned long flags; 602 unsigned long flags;
603 int i; 603 int i;
604 604
605 for (i = 0; i < pl08x->vd->channels; i++) { 605 for (i = 0; i < pl08x->vd->channels; i++) {
606 ch = &pl08x->phy_chans[i]; 606 ch = &pl08x->phy_chans[i];
607 607
608 spin_lock_irqsave(&ch->lock, flags); 608 spin_lock_irqsave(&ch->lock, flags);
609 609
610 if (!ch->locked && !ch->serving) { 610 if (!ch->locked && !ch->serving) {
611 ch->serving = virt_chan; 611 ch->serving = virt_chan;
612 spin_unlock_irqrestore(&ch->lock, flags); 612 spin_unlock_irqrestore(&ch->lock, flags);
613 break; 613 break;
614 } 614 }
615 615
616 spin_unlock_irqrestore(&ch->lock, flags); 616 spin_unlock_irqrestore(&ch->lock, flags);
617 } 617 }
618 618
619 if (i == pl08x->vd->channels) { 619 if (i == pl08x->vd->channels) {
620 /* No physical channel available, cope with it */ 620 /* No physical channel available, cope with it */
621 return NULL; 621 return NULL;
622 } 622 }
623 623
624 return ch; 624 return ch;
625 } 625 }
626 626
627 /* Mark the physical channel as free. Note, this write is atomic. */ 627 /* Mark the physical channel as free. Note, this write is atomic. */
628 static inline void pl08x_put_phy_channel(struct pl08x_driver_data *pl08x, 628 static inline void pl08x_put_phy_channel(struct pl08x_driver_data *pl08x,
629 struct pl08x_phy_chan *ch) 629 struct pl08x_phy_chan *ch)
630 { 630 {
631 ch->serving = NULL; 631 ch->serving = NULL;
632 } 632 }
633 633
634 /* 634 /*
635 * Try to allocate a physical channel. When successful, assign it to 635 * Try to allocate a physical channel. When successful, assign it to
636 * this virtual channel, and initiate the next descriptor. The 636 * this virtual channel, and initiate the next descriptor. The
637 * virtual channel lock must be held at this point. 637 * virtual channel lock must be held at this point.
638 */ 638 */
639 static void pl08x_phy_alloc_and_start(struct pl08x_dma_chan *plchan) 639 static void pl08x_phy_alloc_and_start(struct pl08x_dma_chan *plchan)
640 { 640 {
641 struct pl08x_driver_data *pl08x = plchan->host; 641 struct pl08x_driver_data *pl08x = plchan->host;
642 struct pl08x_phy_chan *ch; 642 struct pl08x_phy_chan *ch;
643 643
644 ch = pl08x_get_phy_channel(pl08x, plchan); 644 ch = pl08x_get_phy_channel(pl08x, plchan);
645 if (!ch) { 645 if (!ch) {
646 dev_dbg(&pl08x->adev->dev, "no physical channel available for xfer on %s\n", plchan->name); 646 dev_dbg(&pl08x->adev->dev, "no physical channel available for xfer on %s\n", plchan->name);
647 plchan->state = PL08X_CHAN_WAITING; 647 plchan->state = PL08X_CHAN_WAITING;
648 return; 648 return;
649 } 649 }
650 650
651 dev_dbg(&pl08x->adev->dev, "allocated physical channel %d for xfer on %s\n", 651 dev_dbg(&pl08x->adev->dev, "allocated physical channel %d for xfer on %s\n",
652 ch->id, plchan->name); 652 ch->id, plchan->name);
653 653
654 plchan->phychan = ch; 654 plchan->phychan = ch;
655 plchan->state = PL08X_CHAN_RUNNING; 655 plchan->state = PL08X_CHAN_RUNNING;
656 pl08x_start_next_txd(plchan); 656 pl08x_start_next_txd(plchan);
657 } 657 }
658 658
659 static void pl08x_phy_reassign_start(struct pl08x_phy_chan *ch, 659 static void pl08x_phy_reassign_start(struct pl08x_phy_chan *ch,
660 struct pl08x_dma_chan *plchan) 660 struct pl08x_dma_chan *plchan)
661 { 661 {
662 struct pl08x_driver_data *pl08x = plchan->host; 662 struct pl08x_driver_data *pl08x = plchan->host;
663 663
664 dev_dbg(&pl08x->adev->dev, "reassigned physical channel %d for xfer on %s\n", 664 dev_dbg(&pl08x->adev->dev, "reassigned physical channel %d for xfer on %s\n",
665 ch->id, plchan->name); 665 ch->id, plchan->name);
666 666
667 /* 667 /*
668 * We do this without taking the lock; we're really only concerned 668 * We do this without taking the lock; we're really only concerned
669 * about whether this pointer is NULL or not, and we're guaranteed 669 * about whether this pointer is NULL or not, and we're guaranteed
670 * that this will only be called when it _already_ is non-NULL. 670 * that this will only be called when it _already_ is non-NULL.
671 */ 671 */
672 ch->serving = plchan; 672 ch->serving = plchan;
673 plchan->phychan = ch; 673 plchan->phychan = ch;
674 plchan->state = PL08X_CHAN_RUNNING; 674 plchan->state = PL08X_CHAN_RUNNING;
675 pl08x_start_next_txd(plchan); 675 pl08x_start_next_txd(plchan);
676 } 676 }
677 677
678 /* 678 /*
679 * Free a physical DMA channel, potentially reallocating it to another 679 * Free a physical DMA channel, potentially reallocating it to another
680 * virtual channel if we have any pending. 680 * virtual channel if we have any pending.
681 */ 681 */
682 static void pl08x_phy_free(struct pl08x_dma_chan *plchan) 682 static void pl08x_phy_free(struct pl08x_dma_chan *plchan)
683 { 683 {
684 struct pl08x_driver_data *pl08x = plchan->host; 684 struct pl08x_driver_data *pl08x = plchan->host;
685 struct pl08x_dma_chan *p, *next; 685 struct pl08x_dma_chan *p, *next;
686 686
687 retry: 687 retry:
688 next = NULL; 688 next = NULL;
689 689
690 /* Find a waiting virtual channel for the next transfer. */ 690 /* Find a waiting virtual channel for the next transfer. */
691 list_for_each_entry(p, &pl08x->memcpy.channels, vc.chan.device_node) 691 list_for_each_entry(p, &pl08x->memcpy.channels, vc.chan.device_node)
692 if (p->state == PL08X_CHAN_WAITING) { 692 if (p->state == PL08X_CHAN_WAITING) {
693 next = p; 693 next = p;
694 break; 694 break;
695 } 695 }
696 696
697 if (!next) { 697 if (!next) {
698 list_for_each_entry(p, &pl08x->slave.channels, vc.chan.device_node) 698 list_for_each_entry(p, &pl08x->slave.channels, vc.chan.device_node)
699 if (p->state == PL08X_CHAN_WAITING) { 699 if (p->state == PL08X_CHAN_WAITING) {
700 next = p; 700 next = p;
701 break; 701 break;
702 } 702 }
703 } 703 }
704 704
705 /* Ensure that the physical channel is stopped */ 705 /* Ensure that the physical channel is stopped */
706 pl08x_terminate_phy_chan(pl08x, plchan->phychan); 706 pl08x_terminate_phy_chan(pl08x, plchan->phychan);
707 707
708 if (next) { 708 if (next) {
709 bool success; 709 bool success;
710 710
711 /* 711 /*
712 * Eww. We know this isn't going to deadlock 712 * Eww. We know this isn't going to deadlock
713 * but lockdep probably doesn't. 713 * but lockdep probably doesn't.
714 */ 714 */
715 spin_lock(&next->vc.lock); 715 spin_lock(&next->vc.lock);
716 /* Re-check the state now that we have the lock */ 716 /* Re-check the state now that we have the lock */
717 success = next->state == PL08X_CHAN_WAITING; 717 success = next->state == PL08X_CHAN_WAITING;
718 if (success) 718 if (success)
719 pl08x_phy_reassign_start(plchan->phychan, next); 719 pl08x_phy_reassign_start(plchan->phychan, next);
720 spin_unlock(&next->vc.lock); 720 spin_unlock(&next->vc.lock);
721 721
722 /* If the state changed, try to find another channel */ 722 /* If the state changed, try to find another channel */
723 if (!success) 723 if (!success)
724 goto retry; 724 goto retry;
725 } else { 725 } else {
726 /* No more jobs, so free up the physical channel */ 726 /* No more jobs, so free up the physical channel */
727 pl08x_put_phy_channel(pl08x, plchan->phychan); 727 pl08x_put_phy_channel(pl08x, plchan->phychan);
728 } 728 }
729 729
730 plchan->phychan = NULL; 730 plchan->phychan = NULL;
731 plchan->state = PL08X_CHAN_IDLE; 731 plchan->state = PL08X_CHAN_IDLE;
732 } 732 }
733 733
734 /* 734 /*
735 * LLI handling 735 * LLI handling
736 */ 736 */
737 737
738 static inline unsigned int pl08x_get_bytes_for_cctl(unsigned int coded) 738 static inline unsigned int pl08x_get_bytes_for_cctl(unsigned int coded)
739 { 739 {
740 switch (coded) { 740 switch (coded) {
741 case PL080_WIDTH_8BIT: 741 case PL080_WIDTH_8BIT:
742 return 1; 742 return 1;
743 case PL080_WIDTH_16BIT: 743 case PL080_WIDTH_16BIT:
744 return 2; 744 return 2;
745 case PL080_WIDTH_32BIT: 745 case PL080_WIDTH_32BIT:
746 return 4; 746 return 4;
747 default: 747 default:
748 break; 748 break;
749 } 749 }
750 BUG(); 750 BUG();
751 return 0; 751 return 0;
752 } 752 }
753 753
754 static inline u32 pl08x_cctl_bits(u32 cctl, u8 srcwidth, u8 dstwidth, 754 static inline u32 pl08x_cctl_bits(u32 cctl, u8 srcwidth, u8 dstwidth,
755 size_t tsize) 755 size_t tsize)
756 { 756 {
757 u32 retbits = cctl; 757 u32 retbits = cctl;
758 758
759 /* Remove all src, dst and transfer size bits */ 759 /* Remove all src, dst and transfer size bits */
760 retbits &= ~PL080_CONTROL_DWIDTH_MASK; 760 retbits &= ~PL080_CONTROL_DWIDTH_MASK;
761 retbits &= ~PL080_CONTROL_SWIDTH_MASK; 761 retbits &= ~PL080_CONTROL_SWIDTH_MASK;
762 retbits &= ~PL080_CONTROL_TRANSFER_SIZE_MASK; 762 retbits &= ~PL080_CONTROL_TRANSFER_SIZE_MASK;
763 763
764 /* Then set the bits according to the parameters */ 764 /* Then set the bits according to the parameters */
765 switch (srcwidth) { 765 switch (srcwidth) {
766 case 1: 766 case 1:
767 retbits |= PL080_WIDTH_8BIT << PL080_CONTROL_SWIDTH_SHIFT; 767 retbits |= PL080_WIDTH_8BIT << PL080_CONTROL_SWIDTH_SHIFT;
768 break; 768 break;
769 case 2: 769 case 2:
770 retbits |= PL080_WIDTH_16BIT << PL080_CONTROL_SWIDTH_SHIFT; 770 retbits |= PL080_WIDTH_16BIT << PL080_CONTROL_SWIDTH_SHIFT;
771 break; 771 break;
772 case 4: 772 case 4:
773 retbits |= PL080_WIDTH_32BIT << PL080_CONTROL_SWIDTH_SHIFT; 773 retbits |= PL080_WIDTH_32BIT << PL080_CONTROL_SWIDTH_SHIFT;
774 break; 774 break;
775 default: 775 default:
776 BUG(); 776 BUG();
777 break; 777 break;
778 } 778 }
779 779
780 switch (dstwidth) { 780 switch (dstwidth) {
781 case 1: 781 case 1:
782 retbits |= PL080_WIDTH_8BIT << PL080_CONTROL_DWIDTH_SHIFT; 782 retbits |= PL080_WIDTH_8BIT << PL080_CONTROL_DWIDTH_SHIFT;
783 break; 783 break;
784 case 2: 784 case 2:
785 retbits |= PL080_WIDTH_16BIT << PL080_CONTROL_DWIDTH_SHIFT; 785 retbits |= PL080_WIDTH_16BIT << PL080_CONTROL_DWIDTH_SHIFT;
786 break; 786 break;
787 case 4: 787 case 4:
788 retbits |= PL080_WIDTH_32BIT << PL080_CONTROL_DWIDTH_SHIFT; 788 retbits |= PL080_WIDTH_32BIT << PL080_CONTROL_DWIDTH_SHIFT;
789 break; 789 break;
790 default: 790 default:
791 BUG(); 791 BUG();
792 break; 792 break;
793 } 793 }
794 794
795 tsize &= PL080_CONTROL_TRANSFER_SIZE_MASK; 795 tsize &= PL080_CONTROL_TRANSFER_SIZE_MASK;
796 retbits |= tsize << PL080_CONTROL_TRANSFER_SIZE_SHIFT; 796 retbits |= tsize << PL080_CONTROL_TRANSFER_SIZE_SHIFT;
797 return retbits; 797 return retbits;
798 } 798 }
799 799
800 struct pl08x_lli_build_data { 800 struct pl08x_lli_build_data {
801 struct pl08x_txd *txd; 801 struct pl08x_txd *txd;
802 struct pl08x_bus_data srcbus; 802 struct pl08x_bus_data srcbus;
803 struct pl08x_bus_data dstbus; 803 struct pl08x_bus_data dstbus;
804 size_t remainder; 804 size_t remainder;
805 u32 lli_bus; 805 u32 lli_bus;
806 }; 806 };
807 807
808 /* 808 /*
809 * Autoselect a master bus to use for the transfer. Slave will be the chosen as 809 * Autoselect a master bus to use for the transfer. Slave will be the chosen as
810 * victim in case src & dest are not similarly aligned. i.e. If after aligning 810 * victim in case src & dest are not similarly aligned. i.e. If after aligning
811 * masters address with width requirements of transfer (by sending few byte by 811 * masters address with width requirements of transfer (by sending few byte by
812 * byte data), slave is still not aligned, then its width will be reduced to 812 * byte data), slave is still not aligned, then its width will be reduced to
813 * BYTE. 813 * BYTE.
814 * - prefers the destination bus if both available 814 * - prefers the destination bus if both available
815 * - prefers bus with fixed address (i.e. peripheral) 815 * - prefers bus with fixed address (i.e. peripheral)
816 */ 816 */
817 static void pl08x_choose_master_bus(struct pl08x_lli_build_data *bd, 817 static void pl08x_choose_master_bus(struct pl08x_lli_build_data *bd,
818 struct pl08x_bus_data **mbus, struct pl08x_bus_data **sbus, u32 cctl) 818 struct pl08x_bus_data **mbus, struct pl08x_bus_data **sbus, u32 cctl)
819 { 819 {
820 if (!(cctl & PL080_CONTROL_DST_INCR)) { 820 if (!(cctl & PL080_CONTROL_DST_INCR)) {
821 *mbus = &bd->dstbus; 821 *mbus = &bd->dstbus;
822 *sbus = &bd->srcbus; 822 *sbus = &bd->srcbus;
823 } else if (!(cctl & PL080_CONTROL_SRC_INCR)) { 823 } else if (!(cctl & PL080_CONTROL_SRC_INCR)) {
824 *mbus = &bd->srcbus; 824 *mbus = &bd->srcbus;
825 *sbus = &bd->dstbus; 825 *sbus = &bd->dstbus;
826 } else { 826 } else {
827 if (bd->dstbus.buswidth >= bd->srcbus.buswidth) { 827 if (bd->dstbus.buswidth >= bd->srcbus.buswidth) {
828 *mbus = &bd->dstbus; 828 *mbus = &bd->dstbus;
829 *sbus = &bd->srcbus; 829 *sbus = &bd->srcbus;
830 } else { 830 } else {
831 *mbus = &bd->srcbus; 831 *mbus = &bd->srcbus;
832 *sbus = &bd->dstbus; 832 *sbus = &bd->dstbus;
833 } 833 }
834 } 834 }
835 } 835 }
836 836
837 /* 837 /*
838 * Fills in one LLI for a certain transfer descriptor and advance the counter 838 * Fills in one LLI for a certain transfer descriptor and advance the counter
839 */ 839 */
840 static void pl08x_fill_lli_for_desc(struct pl08x_driver_data *pl08x, 840 static void pl08x_fill_lli_for_desc(struct pl08x_driver_data *pl08x,
841 struct pl08x_lli_build_data *bd, 841 struct pl08x_lli_build_data *bd,
842 int num_llis, int len, u32 cctl, u32 cctl2) 842 int num_llis, int len, u32 cctl, u32 cctl2)
843 { 843 {
844 u32 offset = num_llis * pl08x->lli_words; 844 u32 offset = num_llis * pl08x->lli_words;
845 u32 *llis_va = bd->txd->llis_va + offset; 845 u32 *llis_va = bd->txd->llis_va + offset;
846 dma_addr_t llis_bus = bd->txd->llis_bus; 846 dma_addr_t llis_bus = bd->txd->llis_bus;
847 847
848 BUG_ON(num_llis >= MAX_NUM_TSFR_LLIS); 848 BUG_ON(num_llis >= MAX_NUM_TSFR_LLIS);
849 849
850 /* Advance the offset to next LLI. */ 850 /* Advance the offset to next LLI. */
851 offset += pl08x->lli_words; 851 offset += pl08x->lli_words;
852 852
853 llis_va[PL080_LLI_SRC] = bd->srcbus.addr; 853 llis_va[PL080_LLI_SRC] = bd->srcbus.addr;
854 llis_va[PL080_LLI_DST] = bd->dstbus.addr; 854 llis_va[PL080_LLI_DST] = bd->dstbus.addr;
855 llis_va[PL080_LLI_LLI] = (llis_bus + sizeof(u32) * offset); 855 llis_va[PL080_LLI_LLI] = (llis_bus + sizeof(u32) * offset);
856 llis_va[PL080_LLI_LLI] |= bd->lli_bus; 856 llis_va[PL080_LLI_LLI] |= bd->lli_bus;
857 llis_va[PL080_LLI_CCTL] = cctl; 857 llis_va[PL080_LLI_CCTL] = cctl;
858 if (pl08x->vd->pl080s) 858 if (pl08x->vd->pl080s)
859 llis_va[PL080S_LLI_CCTL2] = cctl2; 859 llis_va[PL080S_LLI_CCTL2] = cctl2;
860 860
861 if (cctl & PL080_CONTROL_SRC_INCR) 861 if (cctl & PL080_CONTROL_SRC_INCR)
862 bd->srcbus.addr += len; 862 bd->srcbus.addr += len;
863 if (cctl & PL080_CONTROL_DST_INCR) 863 if (cctl & PL080_CONTROL_DST_INCR)
864 bd->dstbus.addr += len; 864 bd->dstbus.addr += len;
865 865
866 BUG_ON(bd->remainder < len); 866 BUG_ON(bd->remainder < len);
867 867
868 bd->remainder -= len; 868 bd->remainder -= len;
869 } 869 }
870 870
871 static inline void prep_byte_width_lli(struct pl08x_driver_data *pl08x, 871 static inline void prep_byte_width_lli(struct pl08x_driver_data *pl08x,
872 struct pl08x_lli_build_data *bd, u32 *cctl, u32 len, 872 struct pl08x_lli_build_data *bd, u32 *cctl, u32 len,
873 int num_llis, size_t *total_bytes) 873 int num_llis, size_t *total_bytes)
874 { 874 {
875 *cctl = pl08x_cctl_bits(*cctl, 1, 1, len); 875 *cctl = pl08x_cctl_bits(*cctl, 1, 1, len);
876 pl08x_fill_lli_for_desc(pl08x, bd, num_llis, len, *cctl, len); 876 pl08x_fill_lli_for_desc(pl08x, bd, num_llis, len, *cctl, len);
877 (*total_bytes) += len; 877 (*total_bytes) += len;
878 } 878 }
879 879
880 #ifdef VERBOSE_DEBUG 880 #ifdef VERBOSE_DEBUG
881 static void pl08x_dump_lli(struct pl08x_driver_data *pl08x, 881 static void pl08x_dump_lli(struct pl08x_driver_data *pl08x,
882 const u32 *llis_va, int num_llis) 882 const u32 *llis_va, int num_llis)
883 { 883 {
884 int i; 884 int i;
885 885
886 if (pl08x->vd->pl080s) { 886 if (pl08x->vd->pl080s) {
887 dev_vdbg(&pl08x->adev->dev, 887 dev_vdbg(&pl08x->adev->dev,
888 "%-3s %-9s %-10s %-10s %-10s %-10s %s\n", 888 "%-3s %-9s %-10s %-10s %-10s %-10s %s\n",
889 "lli", "", "csrc", "cdst", "clli", "cctl", "cctl2"); 889 "lli", "", "csrc", "cdst", "clli", "cctl", "cctl2");
890 for (i = 0; i < num_llis; i++) { 890 for (i = 0; i < num_llis; i++) {
891 dev_vdbg(&pl08x->adev->dev, 891 dev_vdbg(&pl08x->adev->dev,
892 "%3d @%p: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", 892 "%3d @%p: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
893 i, llis_va, llis_va[PL080_LLI_SRC], 893 i, llis_va, llis_va[PL080_LLI_SRC],
894 llis_va[PL080_LLI_DST], llis_va[PL080_LLI_LLI], 894 llis_va[PL080_LLI_DST], llis_va[PL080_LLI_LLI],
895 llis_va[PL080_LLI_CCTL], 895 llis_va[PL080_LLI_CCTL],
896 llis_va[PL080S_LLI_CCTL2]); 896 llis_va[PL080S_LLI_CCTL2]);
897 llis_va += pl08x->lli_words; 897 llis_va += pl08x->lli_words;
898 } 898 }
899 } else { 899 } else {
900 dev_vdbg(&pl08x->adev->dev, 900 dev_vdbg(&pl08x->adev->dev,
901 "%-3s %-9s %-10s %-10s %-10s %s\n", 901 "%-3s %-9s %-10s %-10s %-10s %s\n",
902 "lli", "", "csrc", "cdst", "clli", "cctl"); 902 "lli", "", "csrc", "cdst", "clli", "cctl");
903 for (i = 0; i < num_llis; i++) { 903 for (i = 0; i < num_llis; i++) {
904 dev_vdbg(&pl08x->adev->dev, 904 dev_vdbg(&pl08x->adev->dev,
905 "%3d @%p: 0x%08x 0x%08x 0x%08x 0x%08x\n", 905 "%3d @%p: 0x%08x 0x%08x 0x%08x 0x%08x\n",
906 i, llis_va, llis_va[PL080_LLI_SRC], 906 i, llis_va, llis_va[PL080_LLI_SRC],
907 llis_va[PL080_LLI_DST], llis_va[PL080_LLI_LLI], 907 llis_va[PL080_LLI_DST], llis_va[PL080_LLI_LLI],
908 llis_va[PL080_LLI_CCTL]); 908 llis_va[PL080_LLI_CCTL]);
909 llis_va += pl08x->lli_words; 909 llis_va += pl08x->lli_words;
910 } 910 }
911 } 911 }
912 } 912 }
913 #else 913 #else
914 static inline void pl08x_dump_lli(struct pl08x_driver_data *pl08x, 914 static inline void pl08x_dump_lli(struct pl08x_driver_data *pl08x,
915 const u32 *llis_va, int num_llis) {} 915 const u32 *llis_va, int num_llis) {}
916 #endif 916 #endif
917 917
918 /* 918 /*
919 * This fills in the table of LLIs for the transfer descriptor 919 * This fills in the table of LLIs for the transfer descriptor
920 * Note that we assume we never have to change the burst sizes 920 * Note that we assume we never have to change the burst sizes
921 * Return 0 for error 921 * Return 0 for error
922 */ 922 */
923 static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x, 923 static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
924 struct pl08x_txd *txd) 924 struct pl08x_txd *txd)
925 { 925 {
926 struct pl08x_bus_data *mbus, *sbus; 926 struct pl08x_bus_data *mbus, *sbus;
927 struct pl08x_lli_build_data bd; 927 struct pl08x_lli_build_data bd;
928 int num_llis = 0; 928 int num_llis = 0;
929 u32 cctl, early_bytes = 0; 929 u32 cctl, early_bytes = 0;
930 size_t max_bytes_per_lli, total_bytes; 930 size_t max_bytes_per_lli, total_bytes;
931 u32 *llis_va, *last_lli; 931 u32 *llis_va, *last_lli;
932 struct pl08x_sg *dsg; 932 struct pl08x_sg *dsg;
933 933
934 txd->llis_va = dma_pool_alloc(pl08x->pool, GFP_NOWAIT, &txd->llis_bus); 934 txd->llis_va = dma_pool_alloc(pl08x->pool, GFP_NOWAIT, &txd->llis_bus);
935 if (!txd->llis_va) { 935 if (!txd->llis_va) {
936 dev_err(&pl08x->adev->dev, "%s no memory for llis\n", __func__); 936 dev_err(&pl08x->adev->dev, "%s no memory for llis\n", __func__);
937 return 0; 937 return 0;
938 } 938 }
939 939
940 bd.txd = txd; 940 bd.txd = txd;
941 bd.lli_bus = (pl08x->lli_buses & PL08X_AHB2) ? PL080_LLI_LM_AHB2 : 0; 941 bd.lli_bus = (pl08x->lli_buses & PL08X_AHB2) ? PL080_LLI_LM_AHB2 : 0;
942 cctl = txd->cctl; 942 cctl = txd->cctl;
943 943
944 /* Find maximum width of the source bus */ 944 /* Find maximum width of the source bus */
945 bd.srcbus.maxwidth = 945 bd.srcbus.maxwidth =
946 pl08x_get_bytes_for_cctl((cctl & PL080_CONTROL_SWIDTH_MASK) >> 946 pl08x_get_bytes_for_cctl((cctl & PL080_CONTROL_SWIDTH_MASK) >>
947 PL080_CONTROL_SWIDTH_SHIFT); 947 PL080_CONTROL_SWIDTH_SHIFT);
948 948
949 /* Find maximum width of the destination bus */ 949 /* Find maximum width of the destination bus */
950 bd.dstbus.maxwidth = 950 bd.dstbus.maxwidth =
951 pl08x_get_bytes_for_cctl((cctl & PL080_CONTROL_DWIDTH_MASK) >> 951 pl08x_get_bytes_for_cctl((cctl & PL080_CONTROL_DWIDTH_MASK) >>
952 PL080_CONTROL_DWIDTH_SHIFT); 952 PL080_CONTROL_DWIDTH_SHIFT);
953 953
954 list_for_each_entry(dsg, &txd->dsg_list, node) { 954 list_for_each_entry(dsg, &txd->dsg_list, node) {
955 total_bytes = 0; 955 total_bytes = 0;
956 cctl = txd->cctl; 956 cctl = txd->cctl;
957 957
958 bd.srcbus.addr = dsg->src_addr; 958 bd.srcbus.addr = dsg->src_addr;
959 bd.dstbus.addr = dsg->dst_addr; 959 bd.dstbus.addr = dsg->dst_addr;
960 bd.remainder = dsg->len; 960 bd.remainder = dsg->len;
961 bd.srcbus.buswidth = bd.srcbus.maxwidth; 961 bd.srcbus.buswidth = bd.srcbus.maxwidth;
962 bd.dstbus.buswidth = bd.dstbus.maxwidth; 962 bd.dstbus.buswidth = bd.dstbus.maxwidth;
963 963
964 pl08x_choose_master_bus(&bd, &mbus, &sbus, cctl); 964 pl08x_choose_master_bus(&bd, &mbus, &sbus, cctl);
965 965
966 dev_vdbg(&pl08x->adev->dev, 966 dev_vdbg(&pl08x->adev->dev,
967 "src=0x%08llx%s/%u dst=0x%08llx%s/%u len=%zu\n", 967 "src=0x%08llx%s/%u dst=0x%08llx%s/%u len=%zu\n",
968 (u64)bd.srcbus.addr, 968 (u64)bd.srcbus.addr,
969 cctl & PL080_CONTROL_SRC_INCR ? "+" : "", 969 cctl & PL080_CONTROL_SRC_INCR ? "+" : "",
970 bd.srcbus.buswidth, 970 bd.srcbus.buswidth,
971 (u64)bd.dstbus.addr, 971 (u64)bd.dstbus.addr,
972 cctl & PL080_CONTROL_DST_INCR ? "+" : "", 972 cctl & PL080_CONTROL_DST_INCR ? "+" : "",
973 bd.dstbus.buswidth, 973 bd.dstbus.buswidth,
974 bd.remainder); 974 bd.remainder);
975 dev_vdbg(&pl08x->adev->dev, "mbus=%s sbus=%s\n", 975 dev_vdbg(&pl08x->adev->dev, "mbus=%s sbus=%s\n",
976 mbus == &bd.srcbus ? "src" : "dst", 976 mbus == &bd.srcbus ? "src" : "dst",
977 sbus == &bd.srcbus ? "src" : "dst"); 977 sbus == &bd.srcbus ? "src" : "dst");
978 978
979 /* 979 /*
980 * Zero length is only allowed if all these requirements are 980 * Zero length is only allowed if all these requirements are
981 * met: 981 * met:
982 * - flow controller is peripheral. 982 * - flow controller is peripheral.
983 * - src.addr is aligned to src.width 983 * - src.addr is aligned to src.width
984 * - dst.addr is aligned to dst.width 984 * - dst.addr is aligned to dst.width
985 * 985 *
986 * sg_len == 1 should be true, as there can be two cases here: 986 * sg_len == 1 should be true, as there can be two cases here:
987 * 987 *
988 * - Memory addresses are contiguous and are not scattered. 988 * - Memory addresses are contiguous and are not scattered.
989 * Here, Only one sg will be passed by user driver, with 989 * Here, Only one sg will be passed by user driver, with
990 * memory address and zero length. We pass this to controller 990 * memory address and zero length. We pass this to controller
991 * and after the transfer it will receive the last burst 991 * and after the transfer it will receive the last burst
992 * request from peripheral and so transfer finishes. 992 * request from peripheral and so transfer finishes.
993 * 993 *
994 * - Memory addresses are scattered and are not contiguous. 994 * - Memory addresses are scattered and are not contiguous.
995 * Here, Obviously as DMA controller doesn't know when a lli's 995 * Here, Obviously as DMA controller doesn't know when a lli's
996 * transfer gets over, it can't load next lli. So in this 996 * transfer gets over, it can't load next lli. So in this
997 * case, there has to be an assumption that only one lli is 997 * case, there has to be an assumption that only one lli is
998 * supported. Thus, we can't have scattered addresses. 998 * supported. Thus, we can't have scattered addresses.
999 */ 999 */
1000 if (!bd.remainder) { 1000 if (!bd.remainder) {
1001 u32 fc = (txd->ccfg & PL080_CONFIG_FLOW_CONTROL_MASK) >> 1001 u32 fc = (txd->ccfg & PL080_CONFIG_FLOW_CONTROL_MASK) >>
1002 PL080_CONFIG_FLOW_CONTROL_SHIFT; 1002 PL080_CONFIG_FLOW_CONTROL_SHIFT;
1003 if (!((fc >= PL080_FLOW_SRC2DST_DST) && 1003 if (!((fc >= PL080_FLOW_SRC2DST_DST) &&
1004 (fc <= PL080_FLOW_SRC2DST_SRC))) { 1004 (fc <= PL080_FLOW_SRC2DST_SRC))) {
1005 dev_err(&pl08x->adev->dev, "%s sg len can't be zero", 1005 dev_err(&pl08x->adev->dev, "%s sg len can't be zero",
1006 __func__); 1006 __func__);
1007 return 0; 1007 return 0;
1008 } 1008 }
1009 1009
1010 if (!IS_BUS_ALIGNED(&bd.srcbus) || 1010 if (!IS_BUS_ALIGNED(&bd.srcbus) ||
1011 !IS_BUS_ALIGNED(&bd.dstbus)) { 1011 !IS_BUS_ALIGNED(&bd.dstbus)) {
1012 dev_err(&pl08x->adev->dev, 1012 dev_err(&pl08x->adev->dev,
1013 "%s src & dst address must be aligned to src" 1013 "%s src & dst address must be aligned to src"
1014 " & dst width if peripheral is flow controller", 1014 " & dst width if peripheral is flow controller",
1015 __func__); 1015 __func__);
1016 return 0; 1016 return 0;
1017 } 1017 }
1018 1018
1019 cctl = pl08x_cctl_bits(cctl, bd.srcbus.buswidth, 1019 cctl = pl08x_cctl_bits(cctl, bd.srcbus.buswidth,
1020 bd.dstbus.buswidth, 0); 1020 bd.dstbus.buswidth, 0);
1021 pl08x_fill_lli_for_desc(pl08x, &bd, num_llis++, 1021 pl08x_fill_lli_for_desc(pl08x, &bd, num_llis++,
1022 0, cctl, 0); 1022 0, cctl, 0);
1023 break; 1023 break;
1024 } 1024 }
1025 1025
1026 /* 1026 /*
1027 * Send byte by byte for following cases 1027 * Send byte by byte for following cases
1028 * - Less than a bus width available 1028 * - Less than a bus width available
1029 * - until master bus is aligned 1029 * - until master bus is aligned
1030 */ 1030 */
1031 if (bd.remainder < mbus->buswidth) 1031 if (bd.remainder < mbus->buswidth)
1032 early_bytes = bd.remainder; 1032 early_bytes = bd.remainder;
1033 else if (!IS_BUS_ALIGNED(mbus)) { 1033 else if (!IS_BUS_ALIGNED(mbus)) {
1034 early_bytes = mbus->buswidth - 1034 early_bytes = mbus->buswidth -
1035 (mbus->addr & (mbus->buswidth - 1)); 1035 (mbus->addr & (mbus->buswidth - 1));
1036 if ((bd.remainder - early_bytes) < mbus->buswidth) 1036 if ((bd.remainder - early_bytes) < mbus->buswidth)
1037 early_bytes = bd.remainder; 1037 early_bytes = bd.remainder;
1038 } 1038 }
1039 1039
1040 if (early_bytes) { 1040 if (early_bytes) {
1041 dev_vdbg(&pl08x->adev->dev, 1041 dev_vdbg(&pl08x->adev->dev,
1042 "%s byte width LLIs (remain 0x%08x)\n", 1042 "%s byte width LLIs (remain 0x%08x)\n",
1043 __func__, bd.remainder); 1043 __func__, bd.remainder);
1044 prep_byte_width_lli(pl08x, &bd, &cctl, early_bytes, 1044 prep_byte_width_lli(pl08x, &bd, &cctl, early_bytes,
1045 num_llis++, &total_bytes); 1045 num_llis++, &total_bytes);
1046 } 1046 }
1047 1047
1048 if (bd.remainder) { 1048 if (bd.remainder) {
1049 /* 1049 /*
1050 * Master now aligned 1050 * Master now aligned
1051 * - if slave is not then we must set its width down 1051 * - if slave is not then we must set its width down
1052 */ 1052 */
1053 if (!IS_BUS_ALIGNED(sbus)) { 1053 if (!IS_BUS_ALIGNED(sbus)) {
1054 dev_dbg(&pl08x->adev->dev, 1054 dev_dbg(&pl08x->adev->dev,
1055 "%s set down bus width to one byte\n", 1055 "%s set down bus width to one byte\n",
1056 __func__); 1056 __func__);
1057 1057
1058 sbus->buswidth = 1; 1058 sbus->buswidth = 1;
1059 } 1059 }
1060 1060
1061 /* 1061 /*
1062 * Bytes transferred = tsize * src width, not 1062 * Bytes transferred = tsize * src width, not
1063 * MIN(buswidths) 1063 * MIN(buswidths)
1064 */ 1064 */
1065 max_bytes_per_lli = bd.srcbus.buswidth * 1065 max_bytes_per_lli = bd.srcbus.buswidth *
1066 pl08x->vd->max_transfer_size; 1066 pl08x->vd->max_transfer_size;
1067 dev_vdbg(&pl08x->adev->dev, 1067 dev_vdbg(&pl08x->adev->dev,
1068 "%s max bytes per lli = %zu\n", 1068 "%s max bytes per lli = %zu\n",
1069 __func__, max_bytes_per_lli); 1069 __func__, max_bytes_per_lli);
1070 1070
1071 /* 1071 /*
1072 * Make largest possible LLIs until less than one bus 1072 * Make largest possible LLIs until less than one bus
1073 * width left 1073 * width left
1074 */ 1074 */
1075 while (bd.remainder > (mbus->buswidth - 1)) { 1075 while (bd.remainder > (mbus->buswidth - 1)) {
1076 size_t lli_len, tsize, width; 1076 size_t lli_len, tsize, width;
1077 1077
1078 /* 1078 /*
1079 * If enough left try to send max possible, 1079 * If enough left try to send max possible,
1080 * otherwise try to send the remainder 1080 * otherwise try to send the remainder
1081 */ 1081 */
1082 lli_len = min(bd.remainder, max_bytes_per_lli); 1082 lli_len = min(bd.remainder, max_bytes_per_lli);
1083 1083
1084 /* 1084 /*
1085 * Check against maximum bus alignment: 1085 * Check against maximum bus alignment:
1086 * Calculate actual transfer size in relation to 1086 * Calculate actual transfer size in relation to
1087 * bus width an get a maximum remainder of the 1087 * bus width an get a maximum remainder of the
1088 * highest bus width - 1 1088 * highest bus width - 1
1089 */ 1089 */
1090 width = max(mbus->buswidth, sbus->buswidth); 1090 width = max(mbus->buswidth, sbus->buswidth);
1091 lli_len = (lli_len / width) * width; 1091 lli_len = (lli_len / width) * width;
1092 tsize = lli_len / bd.srcbus.buswidth; 1092 tsize = lli_len / bd.srcbus.buswidth;
1093 1093
1094 dev_vdbg(&pl08x->adev->dev, 1094 dev_vdbg(&pl08x->adev->dev,
1095 "%s fill lli with single lli chunk of " 1095 "%s fill lli with single lli chunk of "
1096 "size 0x%08zx (remainder 0x%08zx)\n", 1096 "size 0x%08zx (remainder 0x%08zx)\n",
1097 __func__, lli_len, bd.remainder); 1097 __func__, lli_len, bd.remainder);
1098 1098
1099 cctl = pl08x_cctl_bits(cctl, bd.srcbus.buswidth, 1099 cctl = pl08x_cctl_bits(cctl, bd.srcbus.buswidth,
1100 bd.dstbus.buswidth, tsize); 1100 bd.dstbus.buswidth, tsize);
1101 pl08x_fill_lli_for_desc(pl08x, &bd, num_llis++, 1101 pl08x_fill_lli_for_desc(pl08x, &bd, num_llis++,
1102 lli_len, cctl, tsize); 1102 lli_len, cctl, tsize);
1103 total_bytes += lli_len; 1103 total_bytes += lli_len;
1104 } 1104 }
1105 1105
1106 /* 1106 /*
1107 * Send any odd bytes 1107 * Send any odd bytes
1108 */ 1108 */
1109 if (bd.remainder) { 1109 if (bd.remainder) {
1110 dev_vdbg(&pl08x->adev->dev, 1110 dev_vdbg(&pl08x->adev->dev,
1111 "%s align with boundary, send odd bytes (remain %zu)\n", 1111 "%s align with boundary, send odd bytes (remain %zu)\n",
1112 __func__, bd.remainder); 1112 __func__, bd.remainder);
1113 prep_byte_width_lli(pl08x, &bd, &cctl, 1113 prep_byte_width_lli(pl08x, &bd, &cctl,
1114 bd.remainder, num_llis++, &total_bytes); 1114 bd.remainder, num_llis++, &total_bytes);
1115 } 1115 }
1116 } 1116 }
1117 1117
1118 if (total_bytes != dsg->len) { 1118 if (total_bytes != dsg->len) {
1119 dev_err(&pl08x->adev->dev, 1119 dev_err(&pl08x->adev->dev,
1120 "%s size of encoded lli:s don't match total txd, transferred 0x%08zx from size 0x%08zx\n", 1120 "%s size of encoded lli:s don't match total txd, transferred 0x%08zx from size 0x%08zx\n",
1121 __func__, total_bytes, dsg->len); 1121 __func__, total_bytes, dsg->len);
1122 return 0; 1122 return 0;
1123 } 1123 }
1124 1124
1125 if (num_llis >= MAX_NUM_TSFR_LLIS) { 1125 if (num_llis >= MAX_NUM_TSFR_LLIS) {
1126 dev_err(&pl08x->adev->dev, 1126 dev_err(&pl08x->adev->dev,
1127 "%s need to increase MAX_NUM_TSFR_LLIS from 0x%08x\n", 1127 "%s need to increase MAX_NUM_TSFR_LLIS from 0x%08x\n",
1128 __func__, MAX_NUM_TSFR_LLIS); 1128 __func__, MAX_NUM_TSFR_LLIS);
1129 return 0; 1129 return 0;
1130 } 1130 }
1131 } 1131 }
1132 1132
1133 llis_va = txd->llis_va; 1133 llis_va = txd->llis_va;
1134 last_lli = llis_va + (num_llis - 1) * pl08x->lli_words; 1134 last_lli = llis_va + (num_llis - 1) * pl08x->lli_words;
1135 1135
1136 if (txd->cyclic) { 1136 if (txd->cyclic) {
1137 /* Link back to the first LLI. */ 1137 /* Link back to the first LLI. */
1138 last_lli[PL080_LLI_LLI] = txd->llis_bus | bd.lli_bus; 1138 last_lli[PL080_LLI_LLI] = txd->llis_bus | bd.lli_bus;
1139 } else { 1139 } else {
1140 /* The final LLI terminates the LLI. */ 1140 /* The final LLI terminates the LLI. */
1141 last_lli[PL080_LLI_LLI] = 0; 1141 last_lli[PL080_LLI_LLI] = 0;
1142 /* The final LLI element shall also fire an interrupt. */ 1142 /* The final LLI element shall also fire an interrupt. */
1143 last_lli[PL080_LLI_CCTL] |= PL080_CONTROL_TC_IRQ_EN; 1143 last_lli[PL080_LLI_CCTL] |= PL080_CONTROL_TC_IRQ_EN;
1144 } 1144 }
1145 1145
1146 pl08x_dump_lli(pl08x, llis_va, num_llis); 1146 pl08x_dump_lli(pl08x, llis_va, num_llis);
1147 1147
1148 return num_llis; 1148 return num_llis;
1149 } 1149 }
1150 1150
1151 static void pl08x_free_txd(struct pl08x_driver_data *pl08x, 1151 static void pl08x_free_txd(struct pl08x_driver_data *pl08x,
1152 struct pl08x_txd *txd) 1152 struct pl08x_txd *txd)
1153 { 1153 {
1154 struct pl08x_sg *dsg, *_dsg; 1154 struct pl08x_sg *dsg, *_dsg;
1155 1155
1156 if (txd->llis_va) 1156 if (txd->llis_va)
1157 dma_pool_free(pl08x->pool, txd->llis_va, txd->llis_bus); 1157 dma_pool_free(pl08x->pool, txd->llis_va, txd->llis_bus);
1158 1158
1159 list_for_each_entry_safe(dsg, _dsg, &txd->dsg_list, node) { 1159 list_for_each_entry_safe(dsg, _dsg, &txd->dsg_list, node) {
1160 list_del(&dsg->node); 1160 list_del(&dsg->node);
1161 kfree(dsg); 1161 kfree(dsg);
1162 } 1162 }
1163 1163
1164 kfree(txd); 1164 kfree(txd);
1165 } 1165 }
1166 1166
1167 static void pl08x_unmap_buffers(struct pl08x_txd *txd) 1167 static void pl08x_unmap_buffers(struct pl08x_txd *txd)
1168 { 1168 {
1169 struct device *dev = txd->vd.tx.chan->device->dev; 1169 struct device *dev = txd->vd.tx.chan->device->dev;
1170 struct pl08x_sg *dsg; 1170 struct pl08x_sg *dsg;
1171 1171
1172 if (!(txd->vd.tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) { 1172 if (!(txd->vd.tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
1173 if (txd->vd.tx.flags & DMA_COMPL_SRC_UNMAP_SINGLE) 1173 if (txd->vd.tx.flags & DMA_COMPL_SRC_UNMAP_SINGLE)
1174 list_for_each_entry(dsg, &txd->dsg_list, node) 1174 list_for_each_entry(dsg, &txd->dsg_list, node)
1175 dma_unmap_single(dev, dsg->src_addr, dsg->len, 1175 dma_unmap_single(dev, dsg->src_addr, dsg->len,
1176 DMA_TO_DEVICE); 1176 DMA_TO_DEVICE);
1177 else { 1177 else {
1178 list_for_each_entry(dsg, &txd->dsg_list, node) 1178 list_for_each_entry(dsg, &txd->dsg_list, node)
1179 dma_unmap_page(dev, dsg->src_addr, dsg->len, 1179 dma_unmap_page(dev, dsg->src_addr, dsg->len,
1180 DMA_TO_DEVICE); 1180 DMA_TO_DEVICE);
1181 } 1181 }
1182 } 1182 }
1183 if (!(txd->vd.tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) { 1183 if (!(txd->vd.tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
1184 if (txd->vd.tx.flags & DMA_COMPL_DEST_UNMAP_SINGLE) 1184 if (txd->vd.tx.flags & DMA_COMPL_DEST_UNMAP_SINGLE)
1185 list_for_each_entry(dsg, &txd->dsg_list, node) 1185 list_for_each_entry(dsg, &txd->dsg_list, node)
1186 dma_unmap_single(dev, dsg->dst_addr, dsg->len, 1186 dma_unmap_single(dev, dsg->dst_addr, dsg->len,
1187 DMA_FROM_DEVICE); 1187 DMA_FROM_DEVICE);
1188 else 1188 else
1189 list_for_each_entry(dsg, &txd->dsg_list, node) 1189 list_for_each_entry(dsg, &txd->dsg_list, node)
1190 dma_unmap_page(dev, dsg->dst_addr, dsg->len, 1190 dma_unmap_page(dev, dsg->dst_addr, dsg->len,
1191 DMA_FROM_DEVICE); 1191 DMA_FROM_DEVICE);
1192 } 1192 }
1193 } 1193 }
1194 1194
1195 static void pl08x_desc_free(struct virt_dma_desc *vd) 1195 static void pl08x_desc_free(struct virt_dma_desc *vd)
1196 { 1196 {
1197 struct pl08x_txd *txd = to_pl08x_txd(&vd->tx); 1197 struct pl08x_txd *txd = to_pl08x_txd(&vd->tx);
1198 struct pl08x_dma_chan *plchan = to_pl08x_chan(vd->tx.chan); 1198 struct pl08x_dma_chan *plchan = to_pl08x_chan(vd->tx.chan);
1199 1199
1200 if (!plchan->slave) 1200 if (!plchan->slave)
1201 pl08x_unmap_buffers(txd); 1201 pl08x_unmap_buffers(txd);
1202 1202
1203 if (!txd->done) 1203 if (!txd->done)
1204 pl08x_release_mux(plchan); 1204 pl08x_release_mux(plchan);
1205 1205
1206 pl08x_free_txd(plchan->host, txd); 1206 pl08x_free_txd(plchan->host, txd);
1207 } 1207 }
1208 1208
1209 static void pl08x_free_txd_list(struct pl08x_driver_data *pl08x, 1209 static void pl08x_free_txd_list(struct pl08x_driver_data *pl08x,
1210 struct pl08x_dma_chan *plchan) 1210 struct pl08x_dma_chan *plchan)
1211 { 1211 {
1212 LIST_HEAD(head); 1212 LIST_HEAD(head);
1213 1213
1214 vchan_get_all_descriptors(&plchan->vc, &head); 1214 vchan_get_all_descriptors(&plchan->vc, &head);
1215 vchan_dma_desc_free_list(&plchan->vc, &head); 1215 vchan_dma_desc_free_list(&plchan->vc, &head);
1216 } 1216 }
1217 1217
1218 /* 1218 /*
1219 * The DMA ENGINE API 1219 * The DMA ENGINE API
1220 */ 1220 */
1221 static int pl08x_alloc_chan_resources(struct dma_chan *chan) 1221 static int pl08x_alloc_chan_resources(struct dma_chan *chan)
1222 { 1222 {
1223 return 0; 1223 return 0;
1224 } 1224 }
1225 1225
1226 static void pl08x_free_chan_resources(struct dma_chan *chan) 1226 static void pl08x_free_chan_resources(struct dma_chan *chan)
1227 { 1227 {
1228 /* Ensure all queued descriptors are freed */ 1228 /* Ensure all queued descriptors are freed */
1229 vchan_free_chan_resources(to_virt_chan(chan)); 1229 vchan_free_chan_resources(to_virt_chan(chan));
1230 } 1230 }
1231 1231
1232 static struct dma_async_tx_descriptor *pl08x_prep_dma_interrupt( 1232 static struct dma_async_tx_descriptor *pl08x_prep_dma_interrupt(
1233 struct dma_chan *chan, unsigned long flags) 1233 struct dma_chan *chan, unsigned long flags)
1234 { 1234 {
1235 struct dma_async_tx_descriptor *retval = NULL; 1235 struct dma_async_tx_descriptor *retval = NULL;
1236 1236
1237 return retval; 1237 return retval;
1238 } 1238 }
1239 1239
1240 /* 1240 /*
1241 * Code accessing dma_async_is_complete() in a tight loop may give problems. 1241 * Code accessing dma_async_is_complete() in a tight loop may give problems.
1242 * If slaves are relying on interrupts to signal completion this function 1242 * If slaves are relying on interrupts to signal completion this function
1243 * must not be called with interrupts disabled. 1243 * must not be called with interrupts disabled.
1244 */ 1244 */
1245 static enum dma_status pl08x_dma_tx_status(struct dma_chan *chan, 1245 static enum dma_status pl08x_dma_tx_status(struct dma_chan *chan,
1246 dma_cookie_t cookie, struct dma_tx_state *txstate) 1246 dma_cookie_t cookie, struct dma_tx_state *txstate)
1247 { 1247 {
1248 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1248 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1249 struct virt_dma_desc *vd; 1249 struct virt_dma_desc *vd;
1250 unsigned long flags; 1250 unsigned long flags;
1251 enum dma_status ret; 1251 enum dma_status ret;
1252 size_t bytes = 0; 1252 size_t bytes = 0;
1253 1253
1254 ret = dma_cookie_status(chan, cookie, txstate); 1254 ret = dma_cookie_status(chan, cookie, txstate);
1255 if (ret == DMA_SUCCESS) 1255 if (ret == DMA_COMPLETE)
1256 return ret; 1256 return ret;
1257 1257
1258 /* 1258 /*
1259 * There's no point calculating the residue if there's 1259 * There's no point calculating the residue if there's
1260 * no txstate to store the value. 1260 * no txstate to store the value.
1261 */ 1261 */
1262 if (!txstate) { 1262 if (!txstate) {
1263 if (plchan->state == PL08X_CHAN_PAUSED) 1263 if (plchan->state == PL08X_CHAN_PAUSED)
1264 ret = DMA_PAUSED; 1264 ret = DMA_PAUSED;
1265 return ret; 1265 return ret;
1266 } 1266 }
1267 1267
1268 spin_lock_irqsave(&plchan->vc.lock, flags); 1268 spin_lock_irqsave(&plchan->vc.lock, flags);
1269 ret = dma_cookie_status(chan, cookie, txstate); 1269 ret = dma_cookie_status(chan, cookie, txstate);
1270 if (ret != DMA_SUCCESS) { 1270 if (ret != DMA_COMPLETE) {
1271 vd = vchan_find_desc(&plchan->vc, cookie); 1271 vd = vchan_find_desc(&plchan->vc, cookie);
1272 if (vd) { 1272 if (vd) {
1273 /* On the issued list, so hasn't been processed yet */ 1273 /* On the issued list, so hasn't been processed yet */
1274 struct pl08x_txd *txd = to_pl08x_txd(&vd->tx); 1274 struct pl08x_txd *txd = to_pl08x_txd(&vd->tx);
1275 struct pl08x_sg *dsg; 1275 struct pl08x_sg *dsg;
1276 1276
1277 list_for_each_entry(dsg, &txd->dsg_list, node) 1277 list_for_each_entry(dsg, &txd->dsg_list, node)
1278 bytes += dsg->len; 1278 bytes += dsg->len;
1279 } else { 1279 } else {
1280 bytes = pl08x_getbytes_chan(plchan); 1280 bytes = pl08x_getbytes_chan(plchan);
1281 } 1281 }
1282 } 1282 }
1283 spin_unlock_irqrestore(&plchan->vc.lock, flags); 1283 spin_unlock_irqrestore(&plchan->vc.lock, flags);
1284 1284
1285 /* 1285 /*
1286 * This cookie not complete yet 1286 * This cookie not complete yet
1287 * Get number of bytes left in the active transactions and queue 1287 * Get number of bytes left in the active transactions and queue
1288 */ 1288 */
1289 dma_set_residue(txstate, bytes); 1289 dma_set_residue(txstate, bytes);
1290 1290
1291 if (plchan->state == PL08X_CHAN_PAUSED && ret == DMA_IN_PROGRESS) 1291 if (plchan->state == PL08X_CHAN_PAUSED && ret == DMA_IN_PROGRESS)
1292 ret = DMA_PAUSED; 1292 ret = DMA_PAUSED;
1293 1293
1294 /* Whether waiting or running, we're in progress */ 1294 /* Whether waiting or running, we're in progress */
1295 return ret; 1295 return ret;
1296 } 1296 }
1297 1297
1298 /* PrimeCell DMA extension */ 1298 /* PrimeCell DMA extension */
1299 struct burst_table { 1299 struct burst_table {
1300 u32 burstwords; 1300 u32 burstwords;
1301 u32 reg; 1301 u32 reg;
1302 }; 1302 };
1303 1303
1304 static const struct burst_table burst_sizes[] = { 1304 static const struct burst_table burst_sizes[] = {
1305 { 1305 {
1306 .burstwords = 256, 1306 .burstwords = 256,
1307 .reg = PL080_BSIZE_256, 1307 .reg = PL080_BSIZE_256,
1308 }, 1308 },
1309 { 1309 {
1310 .burstwords = 128, 1310 .burstwords = 128,
1311 .reg = PL080_BSIZE_128, 1311 .reg = PL080_BSIZE_128,
1312 }, 1312 },
1313 { 1313 {
1314 .burstwords = 64, 1314 .burstwords = 64,
1315 .reg = PL080_BSIZE_64, 1315 .reg = PL080_BSIZE_64,
1316 }, 1316 },
1317 { 1317 {
1318 .burstwords = 32, 1318 .burstwords = 32,
1319 .reg = PL080_BSIZE_32, 1319 .reg = PL080_BSIZE_32,
1320 }, 1320 },
1321 { 1321 {
1322 .burstwords = 16, 1322 .burstwords = 16,
1323 .reg = PL080_BSIZE_16, 1323 .reg = PL080_BSIZE_16,
1324 }, 1324 },
1325 { 1325 {
1326 .burstwords = 8, 1326 .burstwords = 8,
1327 .reg = PL080_BSIZE_8, 1327 .reg = PL080_BSIZE_8,
1328 }, 1328 },
1329 { 1329 {
1330 .burstwords = 4, 1330 .burstwords = 4,
1331 .reg = PL080_BSIZE_4, 1331 .reg = PL080_BSIZE_4,
1332 }, 1332 },
1333 { 1333 {
1334 .burstwords = 0, 1334 .burstwords = 0,
1335 .reg = PL080_BSIZE_1, 1335 .reg = PL080_BSIZE_1,
1336 }, 1336 },
1337 }; 1337 };
1338 1338
1339 /* 1339 /*
1340 * Given the source and destination available bus masks, select which 1340 * Given the source and destination available bus masks, select which
1341 * will be routed to each port. We try to have source and destination 1341 * will be routed to each port. We try to have source and destination
1342 * on separate ports, but always respect the allowable settings. 1342 * on separate ports, but always respect the allowable settings.
1343 */ 1343 */
1344 static u32 pl08x_select_bus(u8 src, u8 dst) 1344 static u32 pl08x_select_bus(u8 src, u8 dst)
1345 { 1345 {
1346 u32 cctl = 0; 1346 u32 cctl = 0;
1347 1347
1348 if (!(dst & PL08X_AHB1) || ((dst & PL08X_AHB2) && (src & PL08X_AHB1))) 1348 if (!(dst & PL08X_AHB1) || ((dst & PL08X_AHB2) && (src & PL08X_AHB1)))
1349 cctl |= PL080_CONTROL_DST_AHB2; 1349 cctl |= PL080_CONTROL_DST_AHB2;
1350 if (!(src & PL08X_AHB1) || ((src & PL08X_AHB2) && !(dst & PL08X_AHB2))) 1350 if (!(src & PL08X_AHB1) || ((src & PL08X_AHB2) && !(dst & PL08X_AHB2)))
1351 cctl |= PL080_CONTROL_SRC_AHB2; 1351 cctl |= PL080_CONTROL_SRC_AHB2;
1352 1352
1353 return cctl; 1353 return cctl;
1354 } 1354 }
1355 1355
1356 static u32 pl08x_cctl(u32 cctl) 1356 static u32 pl08x_cctl(u32 cctl)
1357 { 1357 {
1358 cctl &= ~(PL080_CONTROL_SRC_AHB2 | PL080_CONTROL_DST_AHB2 | 1358 cctl &= ~(PL080_CONTROL_SRC_AHB2 | PL080_CONTROL_DST_AHB2 |
1359 PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR | 1359 PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR |
1360 PL080_CONTROL_PROT_MASK); 1360 PL080_CONTROL_PROT_MASK);
1361 1361
1362 /* Access the cell in privileged mode, non-bufferable, non-cacheable */ 1362 /* Access the cell in privileged mode, non-bufferable, non-cacheable */
1363 return cctl | PL080_CONTROL_PROT_SYS; 1363 return cctl | PL080_CONTROL_PROT_SYS;
1364 } 1364 }
1365 1365
1366 static u32 pl08x_width(enum dma_slave_buswidth width) 1366 static u32 pl08x_width(enum dma_slave_buswidth width)
1367 { 1367 {
1368 switch (width) { 1368 switch (width) {
1369 case DMA_SLAVE_BUSWIDTH_1_BYTE: 1369 case DMA_SLAVE_BUSWIDTH_1_BYTE:
1370 return PL080_WIDTH_8BIT; 1370 return PL080_WIDTH_8BIT;
1371 case DMA_SLAVE_BUSWIDTH_2_BYTES: 1371 case DMA_SLAVE_BUSWIDTH_2_BYTES:
1372 return PL080_WIDTH_16BIT; 1372 return PL080_WIDTH_16BIT;
1373 case DMA_SLAVE_BUSWIDTH_4_BYTES: 1373 case DMA_SLAVE_BUSWIDTH_4_BYTES:
1374 return PL080_WIDTH_32BIT; 1374 return PL080_WIDTH_32BIT;
1375 default: 1375 default:
1376 return ~0; 1376 return ~0;
1377 } 1377 }
1378 } 1378 }
1379 1379
1380 static u32 pl08x_burst(u32 maxburst) 1380 static u32 pl08x_burst(u32 maxburst)
1381 { 1381 {
1382 int i; 1382 int i;
1383 1383
1384 for (i = 0; i < ARRAY_SIZE(burst_sizes); i++) 1384 for (i = 0; i < ARRAY_SIZE(burst_sizes); i++)
1385 if (burst_sizes[i].burstwords <= maxburst) 1385 if (burst_sizes[i].burstwords <= maxburst)
1386 break; 1386 break;
1387 1387
1388 return burst_sizes[i].reg; 1388 return burst_sizes[i].reg;
1389 } 1389 }
1390 1390
1391 static u32 pl08x_get_cctl(struct pl08x_dma_chan *plchan, 1391 static u32 pl08x_get_cctl(struct pl08x_dma_chan *plchan,
1392 enum dma_slave_buswidth addr_width, u32 maxburst) 1392 enum dma_slave_buswidth addr_width, u32 maxburst)
1393 { 1393 {
1394 u32 width, burst, cctl = 0; 1394 u32 width, burst, cctl = 0;
1395 1395
1396 width = pl08x_width(addr_width); 1396 width = pl08x_width(addr_width);
1397 if (width == ~0) 1397 if (width == ~0)
1398 return ~0; 1398 return ~0;
1399 1399
1400 cctl |= width << PL080_CONTROL_SWIDTH_SHIFT; 1400 cctl |= width << PL080_CONTROL_SWIDTH_SHIFT;
1401 cctl |= width << PL080_CONTROL_DWIDTH_SHIFT; 1401 cctl |= width << PL080_CONTROL_DWIDTH_SHIFT;
1402 1402
1403 /* 1403 /*
1404 * If this channel will only request single transfers, set this 1404 * If this channel will only request single transfers, set this
1405 * down to ONE element. Also select one element if no maxburst 1405 * down to ONE element. Also select one element if no maxburst
1406 * is specified. 1406 * is specified.
1407 */ 1407 */
1408 if (plchan->cd->single) 1408 if (plchan->cd->single)
1409 maxburst = 1; 1409 maxburst = 1;
1410 1410
1411 burst = pl08x_burst(maxburst); 1411 burst = pl08x_burst(maxburst);
1412 cctl |= burst << PL080_CONTROL_SB_SIZE_SHIFT; 1412 cctl |= burst << PL080_CONTROL_SB_SIZE_SHIFT;
1413 cctl |= burst << PL080_CONTROL_DB_SIZE_SHIFT; 1413 cctl |= burst << PL080_CONTROL_DB_SIZE_SHIFT;
1414 1414
1415 return pl08x_cctl(cctl); 1415 return pl08x_cctl(cctl);
1416 } 1416 }
1417 1417
1418 static int dma_set_runtime_config(struct dma_chan *chan, 1418 static int dma_set_runtime_config(struct dma_chan *chan,
1419 struct dma_slave_config *config) 1419 struct dma_slave_config *config)
1420 { 1420 {
1421 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1421 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1422 struct pl08x_driver_data *pl08x = plchan->host; 1422 struct pl08x_driver_data *pl08x = plchan->host;
1423 1423
1424 if (!plchan->slave) 1424 if (!plchan->slave)
1425 return -EINVAL; 1425 return -EINVAL;
1426 1426
1427 /* Reject definitely invalid configurations */ 1427 /* Reject definitely invalid configurations */
1428 if (config->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES || 1428 if (config->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
1429 config->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES) 1429 config->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
1430 return -EINVAL; 1430 return -EINVAL;
1431 1431
1432 if (config->device_fc && pl08x->vd->pl080s) { 1432 if (config->device_fc && pl08x->vd->pl080s) {
1433 dev_err(&pl08x->adev->dev, 1433 dev_err(&pl08x->adev->dev,
1434 "%s: PL080S does not support peripheral flow control\n", 1434 "%s: PL080S does not support peripheral flow control\n",
1435 __func__); 1435 __func__);
1436 return -EINVAL; 1436 return -EINVAL;
1437 } 1437 }
1438 1438
1439 plchan->cfg = *config; 1439 plchan->cfg = *config;
1440 1440
1441 return 0; 1441 return 0;
1442 } 1442 }
1443 1443
1444 /* 1444 /*
1445 * Slave transactions callback to the slave device to allow 1445 * Slave transactions callback to the slave device to allow
1446 * synchronization of slave DMA signals with the DMAC enable 1446 * synchronization of slave DMA signals with the DMAC enable
1447 */ 1447 */
1448 static void pl08x_issue_pending(struct dma_chan *chan) 1448 static void pl08x_issue_pending(struct dma_chan *chan)
1449 { 1449 {
1450 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1450 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1451 unsigned long flags; 1451 unsigned long flags;
1452 1452
1453 spin_lock_irqsave(&plchan->vc.lock, flags); 1453 spin_lock_irqsave(&plchan->vc.lock, flags);
1454 if (vchan_issue_pending(&plchan->vc)) { 1454 if (vchan_issue_pending(&plchan->vc)) {
1455 if (!plchan->phychan && plchan->state != PL08X_CHAN_WAITING) 1455 if (!plchan->phychan && plchan->state != PL08X_CHAN_WAITING)
1456 pl08x_phy_alloc_and_start(plchan); 1456 pl08x_phy_alloc_and_start(plchan);
1457 } 1457 }
1458 spin_unlock_irqrestore(&plchan->vc.lock, flags); 1458 spin_unlock_irqrestore(&plchan->vc.lock, flags);
1459 } 1459 }
1460 1460
1461 static struct pl08x_txd *pl08x_get_txd(struct pl08x_dma_chan *plchan) 1461 static struct pl08x_txd *pl08x_get_txd(struct pl08x_dma_chan *plchan)
1462 { 1462 {
1463 struct pl08x_txd *txd = kzalloc(sizeof(*txd), GFP_NOWAIT); 1463 struct pl08x_txd *txd = kzalloc(sizeof(*txd), GFP_NOWAIT);
1464 1464
1465 if (txd) { 1465 if (txd) {
1466 INIT_LIST_HEAD(&txd->dsg_list); 1466 INIT_LIST_HEAD(&txd->dsg_list);
1467 1467
1468 /* Always enable error and terminal interrupts */ 1468 /* Always enable error and terminal interrupts */
1469 txd->ccfg = PL080_CONFIG_ERR_IRQ_MASK | 1469 txd->ccfg = PL080_CONFIG_ERR_IRQ_MASK |
1470 PL080_CONFIG_TC_IRQ_MASK; 1470 PL080_CONFIG_TC_IRQ_MASK;
1471 } 1471 }
1472 return txd; 1472 return txd;
1473 } 1473 }
1474 1474
1475 /* 1475 /*
1476 * Initialize a descriptor to be used by memcpy submit 1476 * Initialize a descriptor to be used by memcpy submit
1477 */ 1477 */
1478 static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy( 1478 static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy(
1479 struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, 1479 struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
1480 size_t len, unsigned long flags) 1480 size_t len, unsigned long flags)
1481 { 1481 {
1482 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1482 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1483 struct pl08x_driver_data *pl08x = plchan->host; 1483 struct pl08x_driver_data *pl08x = plchan->host;
1484 struct pl08x_txd *txd; 1484 struct pl08x_txd *txd;
1485 struct pl08x_sg *dsg; 1485 struct pl08x_sg *dsg;
1486 int ret; 1486 int ret;
1487 1487
1488 txd = pl08x_get_txd(plchan); 1488 txd = pl08x_get_txd(plchan);
1489 if (!txd) { 1489 if (!txd) {
1490 dev_err(&pl08x->adev->dev, 1490 dev_err(&pl08x->adev->dev,
1491 "%s no memory for descriptor\n", __func__); 1491 "%s no memory for descriptor\n", __func__);
1492 return NULL; 1492 return NULL;
1493 } 1493 }
1494 1494
1495 dsg = kzalloc(sizeof(struct pl08x_sg), GFP_NOWAIT); 1495 dsg = kzalloc(sizeof(struct pl08x_sg), GFP_NOWAIT);
1496 if (!dsg) { 1496 if (!dsg) {
1497 pl08x_free_txd(pl08x, txd); 1497 pl08x_free_txd(pl08x, txd);
1498 dev_err(&pl08x->adev->dev, "%s no memory for pl080 sg\n", 1498 dev_err(&pl08x->adev->dev, "%s no memory for pl080 sg\n",
1499 __func__); 1499 __func__);
1500 return NULL; 1500 return NULL;
1501 } 1501 }
1502 list_add_tail(&dsg->node, &txd->dsg_list); 1502 list_add_tail(&dsg->node, &txd->dsg_list);
1503 1503
1504 dsg->src_addr = src; 1504 dsg->src_addr = src;
1505 dsg->dst_addr = dest; 1505 dsg->dst_addr = dest;
1506 dsg->len = len; 1506 dsg->len = len;
1507 1507
1508 /* Set platform data for m2m */ 1508 /* Set platform data for m2m */
1509 txd->ccfg |= PL080_FLOW_MEM2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT; 1509 txd->ccfg |= PL080_FLOW_MEM2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT;
1510 txd->cctl = pl08x->pd->memcpy_channel.cctl_memcpy & 1510 txd->cctl = pl08x->pd->memcpy_channel.cctl_memcpy &
1511 ~(PL080_CONTROL_DST_AHB2 | PL080_CONTROL_SRC_AHB2); 1511 ~(PL080_CONTROL_DST_AHB2 | PL080_CONTROL_SRC_AHB2);
1512 1512
1513 /* Both to be incremented or the code will break */ 1513 /* Both to be incremented or the code will break */
1514 txd->cctl |= PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR; 1514 txd->cctl |= PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR;
1515 1515
1516 if (pl08x->vd->dualmaster) 1516 if (pl08x->vd->dualmaster)
1517 txd->cctl |= pl08x_select_bus(pl08x->mem_buses, 1517 txd->cctl |= pl08x_select_bus(pl08x->mem_buses,
1518 pl08x->mem_buses); 1518 pl08x->mem_buses);
1519 1519
1520 ret = pl08x_fill_llis_for_desc(plchan->host, txd); 1520 ret = pl08x_fill_llis_for_desc(plchan->host, txd);
1521 if (!ret) { 1521 if (!ret) {
1522 pl08x_free_txd(pl08x, txd); 1522 pl08x_free_txd(pl08x, txd);
1523 return NULL; 1523 return NULL;
1524 } 1524 }
1525 1525
1526 return vchan_tx_prep(&plchan->vc, &txd->vd, flags); 1526 return vchan_tx_prep(&plchan->vc, &txd->vd, flags);
1527 } 1527 }
1528 1528
1529 static struct pl08x_txd *pl08x_init_txd( 1529 static struct pl08x_txd *pl08x_init_txd(
1530 struct dma_chan *chan, 1530 struct dma_chan *chan,
1531 enum dma_transfer_direction direction, 1531 enum dma_transfer_direction direction,
1532 dma_addr_t *slave_addr) 1532 dma_addr_t *slave_addr)
1533 { 1533 {
1534 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1534 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1535 struct pl08x_driver_data *pl08x = plchan->host; 1535 struct pl08x_driver_data *pl08x = plchan->host;
1536 struct pl08x_txd *txd; 1536 struct pl08x_txd *txd;
1537 enum dma_slave_buswidth addr_width; 1537 enum dma_slave_buswidth addr_width;
1538 int ret, tmp; 1538 int ret, tmp;
1539 u8 src_buses, dst_buses; 1539 u8 src_buses, dst_buses;
1540 u32 maxburst, cctl; 1540 u32 maxburst, cctl;
1541 1541
1542 txd = pl08x_get_txd(plchan); 1542 txd = pl08x_get_txd(plchan);
1543 if (!txd) { 1543 if (!txd) {
1544 dev_err(&pl08x->adev->dev, "%s no txd\n", __func__); 1544 dev_err(&pl08x->adev->dev, "%s no txd\n", __func__);
1545 return NULL; 1545 return NULL;
1546 } 1546 }
1547 1547
1548 /* 1548 /*
1549 * Set up addresses, the PrimeCell configured address 1549 * Set up addresses, the PrimeCell configured address
1550 * will take precedence since this may configure the 1550 * will take precedence since this may configure the
1551 * channel target address dynamically at runtime. 1551 * channel target address dynamically at runtime.
1552 */ 1552 */
1553 if (direction == DMA_MEM_TO_DEV) { 1553 if (direction == DMA_MEM_TO_DEV) {
1554 cctl = PL080_CONTROL_SRC_INCR; 1554 cctl = PL080_CONTROL_SRC_INCR;
1555 *slave_addr = plchan->cfg.dst_addr; 1555 *slave_addr = plchan->cfg.dst_addr;
1556 addr_width = plchan->cfg.dst_addr_width; 1556 addr_width = plchan->cfg.dst_addr_width;
1557 maxburst = plchan->cfg.dst_maxburst; 1557 maxburst = plchan->cfg.dst_maxburst;
1558 src_buses = pl08x->mem_buses; 1558 src_buses = pl08x->mem_buses;
1559 dst_buses = plchan->cd->periph_buses; 1559 dst_buses = plchan->cd->periph_buses;
1560 } else if (direction == DMA_DEV_TO_MEM) { 1560 } else if (direction == DMA_DEV_TO_MEM) {
1561 cctl = PL080_CONTROL_DST_INCR; 1561 cctl = PL080_CONTROL_DST_INCR;
1562 *slave_addr = plchan->cfg.src_addr; 1562 *slave_addr = plchan->cfg.src_addr;
1563 addr_width = plchan->cfg.src_addr_width; 1563 addr_width = plchan->cfg.src_addr_width;
1564 maxburst = plchan->cfg.src_maxburst; 1564 maxburst = plchan->cfg.src_maxburst;
1565 src_buses = plchan->cd->periph_buses; 1565 src_buses = plchan->cd->periph_buses;
1566 dst_buses = pl08x->mem_buses; 1566 dst_buses = pl08x->mem_buses;
1567 } else { 1567 } else {
1568 pl08x_free_txd(pl08x, txd); 1568 pl08x_free_txd(pl08x, txd);
1569 dev_err(&pl08x->adev->dev, 1569 dev_err(&pl08x->adev->dev,
1570 "%s direction unsupported\n", __func__); 1570 "%s direction unsupported\n", __func__);
1571 return NULL; 1571 return NULL;
1572 } 1572 }
1573 1573
1574 cctl |= pl08x_get_cctl(plchan, addr_width, maxburst); 1574 cctl |= pl08x_get_cctl(plchan, addr_width, maxburst);
1575 if (cctl == ~0) { 1575 if (cctl == ~0) {
1576 pl08x_free_txd(pl08x, txd); 1576 pl08x_free_txd(pl08x, txd);
1577 dev_err(&pl08x->adev->dev, 1577 dev_err(&pl08x->adev->dev,
1578 "DMA slave configuration botched?\n"); 1578 "DMA slave configuration botched?\n");
1579 return NULL; 1579 return NULL;
1580 } 1580 }
1581 1581
1582 txd->cctl = cctl | pl08x_select_bus(src_buses, dst_buses); 1582 txd->cctl = cctl | pl08x_select_bus(src_buses, dst_buses);
1583 1583
1584 if (plchan->cfg.device_fc) 1584 if (plchan->cfg.device_fc)
1585 tmp = (direction == DMA_MEM_TO_DEV) ? PL080_FLOW_MEM2PER_PER : 1585 tmp = (direction == DMA_MEM_TO_DEV) ? PL080_FLOW_MEM2PER_PER :
1586 PL080_FLOW_PER2MEM_PER; 1586 PL080_FLOW_PER2MEM_PER;
1587 else 1587 else
1588 tmp = (direction == DMA_MEM_TO_DEV) ? PL080_FLOW_MEM2PER : 1588 tmp = (direction == DMA_MEM_TO_DEV) ? PL080_FLOW_MEM2PER :
1589 PL080_FLOW_PER2MEM; 1589 PL080_FLOW_PER2MEM;
1590 1590
1591 txd->ccfg |= tmp << PL080_CONFIG_FLOW_CONTROL_SHIFT; 1591 txd->ccfg |= tmp << PL080_CONFIG_FLOW_CONTROL_SHIFT;
1592 1592
1593 ret = pl08x_request_mux(plchan); 1593 ret = pl08x_request_mux(plchan);
1594 if (ret < 0) { 1594 if (ret < 0) {
1595 pl08x_free_txd(pl08x, txd); 1595 pl08x_free_txd(pl08x, txd);
1596 dev_dbg(&pl08x->adev->dev, 1596 dev_dbg(&pl08x->adev->dev,
1597 "unable to mux for transfer on %s due to platform restrictions\n", 1597 "unable to mux for transfer on %s due to platform restrictions\n",
1598 plchan->name); 1598 plchan->name);
1599 return NULL; 1599 return NULL;
1600 } 1600 }
1601 1601
1602 dev_dbg(&pl08x->adev->dev, "allocated DMA request signal %d for xfer on %s\n", 1602 dev_dbg(&pl08x->adev->dev, "allocated DMA request signal %d for xfer on %s\n",
1603 plchan->signal, plchan->name); 1603 plchan->signal, plchan->name);
1604 1604
1605 /* Assign the flow control signal to this channel */ 1605 /* Assign the flow control signal to this channel */
1606 if (direction == DMA_MEM_TO_DEV) 1606 if (direction == DMA_MEM_TO_DEV)
1607 txd->ccfg |= plchan->signal << PL080_CONFIG_DST_SEL_SHIFT; 1607 txd->ccfg |= plchan->signal << PL080_CONFIG_DST_SEL_SHIFT;
1608 else 1608 else
1609 txd->ccfg |= plchan->signal << PL080_CONFIG_SRC_SEL_SHIFT; 1609 txd->ccfg |= plchan->signal << PL080_CONFIG_SRC_SEL_SHIFT;
1610 1610
1611 return txd; 1611 return txd;
1612 } 1612 }
1613 1613
1614 static int pl08x_tx_add_sg(struct pl08x_txd *txd, 1614 static int pl08x_tx_add_sg(struct pl08x_txd *txd,
1615 enum dma_transfer_direction direction, 1615 enum dma_transfer_direction direction,
1616 dma_addr_t slave_addr, 1616 dma_addr_t slave_addr,
1617 dma_addr_t buf_addr, 1617 dma_addr_t buf_addr,
1618 unsigned int len) 1618 unsigned int len)
1619 { 1619 {
1620 struct pl08x_sg *dsg; 1620 struct pl08x_sg *dsg;
1621 1621
1622 dsg = kzalloc(sizeof(struct pl08x_sg), GFP_NOWAIT); 1622 dsg = kzalloc(sizeof(struct pl08x_sg), GFP_NOWAIT);
1623 if (!dsg) 1623 if (!dsg)
1624 return -ENOMEM; 1624 return -ENOMEM;
1625 1625
1626 list_add_tail(&dsg->node, &txd->dsg_list); 1626 list_add_tail(&dsg->node, &txd->dsg_list);
1627 1627
1628 dsg->len = len; 1628 dsg->len = len;
1629 if (direction == DMA_MEM_TO_DEV) { 1629 if (direction == DMA_MEM_TO_DEV) {
1630 dsg->src_addr = buf_addr; 1630 dsg->src_addr = buf_addr;
1631 dsg->dst_addr = slave_addr; 1631 dsg->dst_addr = slave_addr;
1632 } else { 1632 } else {
1633 dsg->src_addr = slave_addr; 1633 dsg->src_addr = slave_addr;
1634 dsg->dst_addr = buf_addr; 1634 dsg->dst_addr = buf_addr;
1635 } 1635 }
1636 1636
1637 return 0; 1637 return 0;
1638 } 1638 }
1639 1639
1640 static struct dma_async_tx_descriptor *pl08x_prep_slave_sg( 1640 static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
1641 struct dma_chan *chan, struct scatterlist *sgl, 1641 struct dma_chan *chan, struct scatterlist *sgl,
1642 unsigned int sg_len, enum dma_transfer_direction direction, 1642 unsigned int sg_len, enum dma_transfer_direction direction,
1643 unsigned long flags, void *context) 1643 unsigned long flags, void *context)
1644 { 1644 {
1645 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1645 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1646 struct pl08x_driver_data *pl08x = plchan->host; 1646 struct pl08x_driver_data *pl08x = plchan->host;
1647 struct pl08x_txd *txd; 1647 struct pl08x_txd *txd;
1648 struct scatterlist *sg; 1648 struct scatterlist *sg;
1649 int ret, tmp; 1649 int ret, tmp;
1650 dma_addr_t slave_addr; 1650 dma_addr_t slave_addr;
1651 1651
1652 dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n", 1652 dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n",
1653 __func__, sg_dma_len(sgl), plchan->name); 1653 __func__, sg_dma_len(sgl), plchan->name);
1654 1654
1655 txd = pl08x_init_txd(chan, direction, &slave_addr); 1655 txd = pl08x_init_txd(chan, direction, &slave_addr);
1656 if (!txd) 1656 if (!txd)
1657 return NULL; 1657 return NULL;
1658 1658
1659 for_each_sg(sgl, sg, sg_len, tmp) { 1659 for_each_sg(sgl, sg, sg_len, tmp) {
1660 ret = pl08x_tx_add_sg(txd, direction, slave_addr, 1660 ret = pl08x_tx_add_sg(txd, direction, slave_addr,
1661 sg_dma_address(sg), 1661 sg_dma_address(sg),
1662 sg_dma_len(sg)); 1662 sg_dma_len(sg));
1663 if (ret) { 1663 if (ret) {
1664 pl08x_release_mux(plchan); 1664 pl08x_release_mux(plchan);
1665 pl08x_free_txd(pl08x, txd); 1665 pl08x_free_txd(pl08x, txd);
1666 dev_err(&pl08x->adev->dev, "%s no mem for pl080 sg\n", 1666 dev_err(&pl08x->adev->dev, "%s no mem for pl080 sg\n",
1667 __func__); 1667 __func__);
1668 return NULL; 1668 return NULL;
1669 } 1669 }
1670 } 1670 }
1671 1671
1672 ret = pl08x_fill_llis_for_desc(plchan->host, txd); 1672 ret = pl08x_fill_llis_for_desc(plchan->host, txd);
1673 if (!ret) { 1673 if (!ret) {
1674 pl08x_release_mux(plchan); 1674 pl08x_release_mux(plchan);
1675 pl08x_free_txd(pl08x, txd); 1675 pl08x_free_txd(pl08x, txd);
1676 return NULL; 1676 return NULL;
1677 } 1677 }
1678 1678
1679 return vchan_tx_prep(&plchan->vc, &txd->vd, flags); 1679 return vchan_tx_prep(&plchan->vc, &txd->vd, flags);
1680 } 1680 }
1681 1681
1682 static struct dma_async_tx_descriptor *pl08x_prep_dma_cyclic( 1682 static struct dma_async_tx_descriptor *pl08x_prep_dma_cyclic(
1683 struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, 1683 struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
1684 size_t period_len, enum dma_transfer_direction direction, 1684 size_t period_len, enum dma_transfer_direction direction,
1685 unsigned long flags, void *context) 1685 unsigned long flags, void *context)
1686 { 1686 {
1687 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1687 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1688 struct pl08x_driver_data *pl08x = plchan->host; 1688 struct pl08x_driver_data *pl08x = plchan->host;
1689 struct pl08x_txd *txd; 1689 struct pl08x_txd *txd;
1690 int ret, tmp; 1690 int ret, tmp;
1691 dma_addr_t slave_addr; 1691 dma_addr_t slave_addr;
1692 1692
1693 dev_dbg(&pl08x->adev->dev, 1693 dev_dbg(&pl08x->adev->dev,
1694 "%s prepare cyclic transaction of %d/%d bytes %s %s\n", 1694 "%s prepare cyclic transaction of %d/%d bytes %s %s\n",
1695 __func__, period_len, buf_len, 1695 __func__, period_len, buf_len,
1696 direction == DMA_MEM_TO_DEV ? "to" : "from", 1696 direction == DMA_MEM_TO_DEV ? "to" : "from",
1697 plchan->name); 1697 plchan->name);
1698 1698
1699 txd = pl08x_init_txd(chan, direction, &slave_addr); 1699 txd = pl08x_init_txd(chan, direction, &slave_addr);
1700 if (!txd) 1700 if (!txd)
1701 return NULL; 1701 return NULL;
1702 1702
1703 txd->cyclic = true; 1703 txd->cyclic = true;
1704 txd->cctl |= PL080_CONTROL_TC_IRQ_EN; 1704 txd->cctl |= PL080_CONTROL_TC_IRQ_EN;
1705 for (tmp = 0; tmp < buf_len; tmp += period_len) { 1705 for (tmp = 0; tmp < buf_len; tmp += period_len) {
1706 ret = pl08x_tx_add_sg(txd, direction, slave_addr, 1706 ret = pl08x_tx_add_sg(txd, direction, slave_addr,
1707 buf_addr + tmp, period_len); 1707 buf_addr + tmp, period_len);
1708 if (ret) { 1708 if (ret) {
1709 pl08x_release_mux(plchan); 1709 pl08x_release_mux(plchan);
1710 pl08x_free_txd(pl08x, txd); 1710 pl08x_free_txd(pl08x, txd);
1711 return NULL; 1711 return NULL;
1712 } 1712 }
1713 } 1713 }
1714 1714
1715 ret = pl08x_fill_llis_for_desc(plchan->host, txd); 1715 ret = pl08x_fill_llis_for_desc(plchan->host, txd);
1716 if (!ret) { 1716 if (!ret) {
1717 pl08x_release_mux(plchan); 1717 pl08x_release_mux(plchan);
1718 pl08x_free_txd(pl08x, txd); 1718 pl08x_free_txd(pl08x, txd);
1719 return NULL; 1719 return NULL;
1720 } 1720 }
1721 1721
1722 return vchan_tx_prep(&plchan->vc, &txd->vd, flags); 1722 return vchan_tx_prep(&plchan->vc, &txd->vd, flags);
1723 } 1723 }
1724 1724
1725 static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 1725 static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
1726 unsigned long arg) 1726 unsigned long arg)
1727 { 1727 {
1728 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1728 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1729 struct pl08x_driver_data *pl08x = plchan->host; 1729 struct pl08x_driver_data *pl08x = plchan->host;
1730 unsigned long flags; 1730 unsigned long flags;
1731 int ret = 0; 1731 int ret = 0;
1732 1732
1733 /* Controls applicable to inactive channels */ 1733 /* Controls applicable to inactive channels */
1734 if (cmd == DMA_SLAVE_CONFIG) { 1734 if (cmd == DMA_SLAVE_CONFIG) {
1735 return dma_set_runtime_config(chan, 1735 return dma_set_runtime_config(chan,
1736 (struct dma_slave_config *)arg); 1736 (struct dma_slave_config *)arg);
1737 } 1737 }
1738 1738
1739 /* 1739 /*
1740 * Anything succeeds on channels with no physical allocation and 1740 * Anything succeeds on channels with no physical allocation and
1741 * no queued transfers. 1741 * no queued transfers.
1742 */ 1742 */
1743 spin_lock_irqsave(&plchan->vc.lock, flags); 1743 spin_lock_irqsave(&plchan->vc.lock, flags);
1744 if (!plchan->phychan && !plchan->at) { 1744 if (!plchan->phychan && !plchan->at) {
1745 spin_unlock_irqrestore(&plchan->vc.lock, flags); 1745 spin_unlock_irqrestore(&plchan->vc.lock, flags);
1746 return 0; 1746 return 0;
1747 } 1747 }
1748 1748
1749 switch (cmd) { 1749 switch (cmd) {
1750 case DMA_TERMINATE_ALL: 1750 case DMA_TERMINATE_ALL:
1751 plchan->state = PL08X_CHAN_IDLE; 1751 plchan->state = PL08X_CHAN_IDLE;
1752 1752
1753 if (plchan->phychan) { 1753 if (plchan->phychan) {
1754 /* 1754 /*
1755 * Mark physical channel as free and free any slave 1755 * Mark physical channel as free and free any slave
1756 * signal 1756 * signal
1757 */ 1757 */
1758 pl08x_phy_free(plchan); 1758 pl08x_phy_free(plchan);
1759 } 1759 }
1760 /* Dequeue jobs and free LLIs */ 1760 /* Dequeue jobs and free LLIs */
1761 if (plchan->at) { 1761 if (plchan->at) {
1762 pl08x_desc_free(&plchan->at->vd); 1762 pl08x_desc_free(&plchan->at->vd);
1763 plchan->at = NULL; 1763 plchan->at = NULL;
1764 } 1764 }
1765 /* Dequeue jobs not yet fired as well */ 1765 /* Dequeue jobs not yet fired as well */
1766 pl08x_free_txd_list(pl08x, plchan); 1766 pl08x_free_txd_list(pl08x, plchan);
1767 break; 1767 break;
1768 case DMA_PAUSE: 1768 case DMA_PAUSE:
1769 pl08x_pause_phy_chan(plchan->phychan); 1769 pl08x_pause_phy_chan(plchan->phychan);
1770 plchan->state = PL08X_CHAN_PAUSED; 1770 plchan->state = PL08X_CHAN_PAUSED;
1771 break; 1771 break;
1772 case DMA_RESUME: 1772 case DMA_RESUME:
1773 pl08x_resume_phy_chan(plchan->phychan); 1773 pl08x_resume_phy_chan(plchan->phychan);
1774 plchan->state = PL08X_CHAN_RUNNING; 1774 plchan->state = PL08X_CHAN_RUNNING;
1775 break; 1775 break;
1776 default: 1776 default:
1777 /* Unknown command */ 1777 /* Unknown command */
1778 ret = -ENXIO; 1778 ret = -ENXIO;
1779 break; 1779 break;
1780 } 1780 }
1781 1781
1782 spin_unlock_irqrestore(&plchan->vc.lock, flags); 1782 spin_unlock_irqrestore(&plchan->vc.lock, flags);
1783 1783
1784 return ret; 1784 return ret;
1785 } 1785 }
1786 1786
1787 bool pl08x_filter_id(struct dma_chan *chan, void *chan_id) 1787 bool pl08x_filter_id(struct dma_chan *chan, void *chan_id)
1788 { 1788 {
1789 struct pl08x_dma_chan *plchan; 1789 struct pl08x_dma_chan *plchan;
1790 char *name = chan_id; 1790 char *name = chan_id;
1791 1791
1792 /* Reject channels for devices not bound to this driver */ 1792 /* Reject channels for devices not bound to this driver */
1793 if (chan->device->dev->driver != &pl08x_amba_driver.drv) 1793 if (chan->device->dev->driver != &pl08x_amba_driver.drv)
1794 return false; 1794 return false;
1795 1795
1796 plchan = to_pl08x_chan(chan); 1796 plchan = to_pl08x_chan(chan);
1797 1797
1798 /* Check that the channel is not taken! */ 1798 /* Check that the channel is not taken! */
1799 if (!strcmp(plchan->name, name)) 1799 if (!strcmp(plchan->name, name))
1800 return true; 1800 return true;
1801 1801
1802 return false; 1802 return false;
1803 } 1803 }
1804 1804
1805 /* 1805 /*
1806 * Just check that the device is there and active 1806 * Just check that the device is there and active
1807 * TODO: turn this bit on/off depending on the number of physical channels 1807 * TODO: turn this bit on/off depending on the number of physical channels
1808 * actually used, if it is zero... well shut it off. That will save some 1808 * actually used, if it is zero... well shut it off. That will save some
1809 * power. Cut the clock at the same time. 1809 * power. Cut the clock at the same time.
1810 */ 1810 */
1811 static void pl08x_ensure_on(struct pl08x_driver_data *pl08x) 1811 static void pl08x_ensure_on(struct pl08x_driver_data *pl08x)
1812 { 1812 {
1813 /* The Nomadik variant does not have the config register */ 1813 /* The Nomadik variant does not have the config register */
1814 if (pl08x->vd->nomadik) 1814 if (pl08x->vd->nomadik)
1815 return; 1815 return;
1816 writel(PL080_CONFIG_ENABLE, pl08x->base + PL080_CONFIG); 1816 writel(PL080_CONFIG_ENABLE, pl08x->base + PL080_CONFIG);
1817 } 1817 }
1818 1818
1819 static irqreturn_t pl08x_irq(int irq, void *dev) 1819 static irqreturn_t pl08x_irq(int irq, void *dev)
1820 { 1820 {
1821 struct pl08x_driver_data *pl08x = dev; 1821 struct pl08x_driver_data *pl08x = dev;
1822 u32 mask = 0, err, tc, i; 1822 u32 mask = 0, err, tc, i;
1823 1823
1824 /* check & clear - ERR & TC interrupts */ 1824 /* check & clear - ERR & TC interrupts */
1825 err = readl(pl08x->base + PL080_ERR_STATUS); 1825 err = readl(pl08x->base + PL080_ERR_STATUS);
1826 if (err) { 1826 if (err) {
1827 dev_err(&pl08x->adev->dev, "%s error interrupt, register value 0x%08x\n", 1827 dev_err(&pl08x->adev->dev, "%s error interrupt, register value 0x%08x\n",
1828 __func__, err); 1828 __func__, err);
1829 writel(err, pl08x->base + PL080_ERR_CLEAR); 1829 writel(err, pl08x->base + PL080_ERR_CLEAR);
1830 } 1830 }
1831 tc = readl(pl08x->base + PL080_TC_STATUS); 1831 tc = readl(pl08x->base + PL080_TC_STATUS);
1832 if (tc) 1832 if (tc)
1833 writel(tc, pl08x->base + PL080_TC_CLEAR); 1833 writel(tc, pl08x->base + PL080_TC_CLEAR);
1834 1834
1835 if (!err && !tc) 1835 if (!err && !tc)
1836 return IRQ_NONE; 1836 return IRQ_NONE;
1837 1837
1838 for (i = 0; i < pl08x->vd->channels; i++) { 1838 for (i = 0; i < pl08x->vd->channels; i++) {
1839 if (((1 << i) & err) || ((1 << i) & tc)) { 1839 if (((1 << i) & err) || ((1 << i) & tc)) {
1840 /* Locate physical channel */ 1840 /* Locate physical channel */
1841 struct pl08x_phy_chan *phychan = &pl08x->phy_chans[i]; 1841 struct pl08x_phy_chan *phychan = &pl08x->phy_chans[i];
1842 struct pl08x_dma_chan *plchan = phychan->serving; 1842 struct pl08x_dma_chan *plchan = phychan->serving;
1843 struct pl08x_txd *tx; 1843 struct pl08x_txd *tx;
1844 1844
1845 if (!plchan) { 1845 if (!plchan) {
1846 dev_err(&pl08x->adev->dev, 1846 dev_err(&pl08x->adev->dev,
1847 "%s Error TC interrupt on unused channel: 0x%08x\n", 1847 "%s Error TC interrupt on unused channel: 0x%08x\n",
1848 __func__, i); 1848 __func__, i);
1849 continue; 1849 continue;
1850 } 1850 }
1851 1851
1852 spin_lock(&plchan->vc.lock); 1852 spin_lock(&plchan->vc.lock);
1853 tx = plchan->at; 1853 tx = plchan->at;
1854 if (tx && tx->cyclic) { 1854 if (tx && tx->cyclic) {
1855 vchan_cyclic_callback(&tx->vd); 1855 vchan_cyclic_callback(&tx->vd);
1856 } else if (tx) { 1856 } else if (tx) {
1857 plchan->at = NULL; 1857 plchan->at = NULL;
1858 /* 1858 /*
1859 * This descriptor is done, release its mux 1859 * This descriptor is done, release its mux
1860 * reservation. 1860 * reservation.
1861 */ 1861 */
1862 pl08x_release_mux(plchan); 1862 pl08x_release_mux(plchan);
1863 tx->done = true; 1863 tx->done = true;
1864 vchan_cookie_complete(&tx->vd); 1864 vchan_cookie_complete(&tx->vd);
1865 1865
1866 /* 1866 /*
1867 * And start the next descriptor (if any), 1867 * And start the next descriptor (if any),
1868 * otherwise free this channel. 1868 * otherwise free this channel.
1869 */ 1869 */
1870 if (vchan_next_desc(&plchan->vc)) 1870 if (vchan_next_desc(&plchan->vc))
1871 pl08x_start_next_txd(plchan); 1871 pl08x_start_next_txd(plchan);
1872 else 1872 else
1873 pl08x_phy_free(plchan); 1873 pl08x_phy_free(plchan);
1874 } 1874 }
1875 spin_unlock(&plchan->vc.lock); 1875 spin_unlock(&plchan->vc.lock);
1876 1876
1877 mask |= (1 << i); 1877 mask |= (1 << i);
1878 } 1878 }
1879 } 1879 }
1880 1880
1881 return mask ? IRQ_HANDLED : IRQ_NONE; 1881 return mask ? IRQ_HANDLED : IRQ_NONE;
1882 } 1882 }
1883 1883
1884 static void pl08x_dma_slave_init(struct pl08x_dma_chan *chan) 1884 static void pl08x_dma_slave_init(struct pl08x_dma_chan *chan)
1885 { 1885 {
1886 chan->slave = true; 1886 chan->slave = true;
1887 chan->name = chan->cd->bus_id; 1887 chan->name = chan->cd->bus_id;
1888 chan->cfg.src_addr = chan->cd->addr; 1888 chan->cfg.src_addr = chan->cd->addr;
1889 chan->cfg.dst_addr = chan->cd->addr; 1889 chan->cfg.dst_addr = chan->cd->addr;
1890 } 1890 }
1891 1891
1892 /* 1892 /*
1893 * Initialise the DMAC memcpy/slave channels. 1893 * Initialise the DMAC memcpy/slave channels.
1894 * Make a local wrapper to hold required data 1894 * Make a local wrapper to hold required data
1895 */ 1895 */
1896 static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x, 1896 static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x,
1897 struct dma_device *dmadev, unsigned int channels, bool slave) 1897 struct dma_device *dmadev, unsigned int channels, bool slave)
1898 { 1898 {
1899 struct pl08x_dma_chan *chan; 1899 struct pl08x_dma_chan *chan;
1900 int i; 1900 int i;
1901 1901
1902 INIT_LIST_HEAD(&dmadev->channels); 1902 INIT_LIST_HEAD(&dmadev->channels);
1903 1903
1904 /* 1904 /*
1905 * Register as many many memcpy as we have physical channels, 1905 * Register as many many memcpy as we have physical channels,
1906 * we won't always be able to use all but the code will have 1906 * we won't always be able to use all but the code will have
1907 * to cope with that situation. 1907 * to cope with that situation.
1908 */ 1908 */
1909 for (i = 0; i < channels; i++) { 1909 for (i = 0; i < channels; i++) {
1910 chan = kzalloc(sizeof(*chan), GFP_KERNEL); 1910 chan = kzalloc(sizeof(*chan), GFP_KERNEL);
1911 if (!chan) { 1911 if (!chan) {
1912 dev_err(&pl08x->adev->dev, 1912 dev_err(&pl08x->adev->dev,
1913 "%s no memory for channel\n", __func__); 1913 "%s no memory for channel\n", __func__);
1914 return -ENOMEM; 1914 return -ENOMEM;
1915 } 1915 }
1916 1916
1917 chan->host = pl08x; 1917 chan->host = pl08x;
1918 chan->state = PL08X_CHAN_IDLE; 1918 chan->state = PL08X_CHAN_IDLE;
1919 chan->signal = -1; 1919 chan->signal = -1;
1920 1920
1921 if (slave) { 1921 if (slave) {
1922 chan->cd = &pl08x->pd->slave_channels[i]; 1922 chan->cd = &pl08x->pd->slave_channels[i];
1923 pl08x_dma_slave_init(chan); 1923 pl08x_dma_slave_init(chan);
1924 } else { 1924 } else {
1925 chan->cd = &pl08x->pd->memcpy_channel; 1925 chan->cd = &pl08x->pd->memcpy_channel;
1926 chan->name = kasprintf(GFP_KERNEL, "memcpy%d", i); 1926 chan->name = kasprintf(GFP_KERNEL, "memcpy%d", i);
1927 if (!chan->name) { 1927 if (!chan->name) {
1928 kfree(chan); 1928 kfree(chan);
1929 return -ENOMEM; 1929 return -ENOMEM;
1930 } 1930 }
1931 } 1931 }
1932 dev_dbg(&pl08x->adev->dev, 1932 dev_dbg(&pl08x->adev->dev,
1933 "initialize virtual channel \"%s\"\n", 1933 "initialize virtual channel \"%s\"\n",
1934 chan->name); 1934 chan->name);
1935 1935
1936 chan->vc.desc_free = pl08x_desc_free; 1936 chan->vc.desc_free = pl08x_desc_free;
1937 vchan_init(&chan->vc, dmadev); 1937 vchan_init(&chan->vc, dmadev);
1938 } 1938 }
1939 dev_info(&pl08x->adev->dev, "initialized %d virtual %s channels\n", 1939 dev_info(&pl08x->adev->dev, "initialized %d virtual %s channels\n",
1940 i, slave ? "slave" : "memcpy"); 1940 i, slave ? "slave" : "memcpy");
1941 return i; 1941 return i;
1942 } 1942 }
1943 1943
1944 static void pl08x_free_virtual_channels(struct dma_device *dmadev) 1944 static void pl08x_free_virtual_channels(struct dma_device *dmadev)
1945 { 1945 {
1946 struct pl08x_dma_chan *chan = NULL; 1946 struct pl08x_dma_chan *chan = NULL;
1947 struct pl08x_dma_chan *next; 1947 struct pl08x_dma_chan *next;
1948 1948
1949 list_for_each_entry_safe(chan, 1949 list_for_each_entry_safe(chan,
1950 next, &dmadev->channels, vc.chan.device_node) { 1950 next, &dmadev->channels, vc.chan.device_node) {
1951 list_del(&chan->vc.chan.device_node); 1951 list_del(&chan->vc.chan.device_node);
1952 kfree(chan); 1952 kfree(chan);
1953 } 1953 }
1954 } 1954 }
1955 1955
1956 #ifdef CONFIG_DEBUG_FS 1956 #ifdef CONFIG_DEBUG_FS
1957 static const char *pl08x_state_str(enum pl08x_dma_chan_state state) 1957 static const char *pl08x_state_str(enum pl08x_dma_chan_state state)
1958 { 1958 {
1959 switch (state) { 1959 switch (state) {
1960 case PL08X_CHAN_IDLE: 1960 case PL08X_CHAN_IDLE:
1961 return "idle"; 1961 return "idle";
1962 case PL08X_CHAN_RUNNING: 1962 case PL08X_CHAN_RUNNING:
1963 return "running"; 1963 return "running";
1964 case PL08X_CHAN_PAUSED: 1964 case PL08X_CHAN_PAUSED:
1965 return "paused"; 1965 return "paused";
1966 case PL08X_CHAN_WAITING: 1966 case PL08X_CHAN_WAITING:
1967 return "waiting"; 1967 return "waiting";
1968 default: 1968 default:
1969 break; 1969 break;
1970 } 1970 }
1971 return "UNKNOWN STATE"; 1971 return "UNKNOWN STATE";
1972 } 1972 }
1973 1973
1974 static int pl08x_debugfs_show(struct seq_file *s, void *data) 1974 static int pl08x_debugfs_show(struct seq_file *s, void *data)
1975 { 1975 {
1976 struct pl08x_driver_data *pl08x = s->private; 1976 struct pl08x_driver_data *pl08x = s->private;
1977 struct pl08x_dma_chan *chan; 1977 struct pl08x_dma_chan *chan;
1978 struct pl08x_phy_chan *ch; 1978 struct pl08x_phy_chan *ch;
1979 unsigned long flags; 1979 unsigned long flags;
1980 int i; 1980 int i;
1981 1981
1982 seq_printf(s, "PL08x physical channels:\n"); 1982 seq_printf(s, "PL08x physical channels:\n");
1983 seq_printf(s, "CHANNEL:\tUSER:\n"); 1983 seq_printf(s, "CHANNEL:\tUSER:\n");
1984 seq_printf(s, "--------\t-----\n"); 1984 seq_printf(s, "--------\t-----\n");
1985 for (i = 0; i < pl08x->vd->channels; i++) { 1985 for (i = 0; i < pl08x->vd->channels; i++) {
1986 struct pl08x_dma_chan *virt_chan; 1986 struct pl08x_dma_chan *virt_chan;
1987 1987
1988 ch = &pl08x->phy_chans[i]; 1988 ch = &pl08x->phy_chans[i];
1989 1989
1990 spin_lock_irqsave(&ch->lock, flags); 1990 spin_lock_irqsave(&ch->lock, flags);
1991 virt_chan = ch->serving; 1991 virt_chan = ch->serving;
1992 1992
1993 seq_printf(s, "%d\t\t%s%s\n", 1993 seq_printf(s, "%d\t\t%s%s\n",
1994 ch->id, 1994 ch->id,
1995 virt_chan ? virt_chan->name : "(none)", 1995 virt_chan ? virt_chan->name : "(none)",
1996 ch->locked ? " LOCKED" : ""); 1996 ch->locked ? " LOCKED" : "");
1997 1997
1998 spin_unlock_irqrestore(&ch->lock, flags); 1998 spin_unlock_irqrestore(&ch->lock, flags);
1999 } 1999 }
2000 2000
2001 seq_printf(s, "\nPL08x virtual memcpy channels:\n"); 2001 seq_printf(s, "\nPL08x virtual memcpy channels:\n");
2002 seq_printf(s, "CHANNEL:\tSTATE:\n"); 2002 seq_printf(s, "CHANNEL:\tSTATE:\n");
2003 seq_printf(s, "--------\t------\n"); 2003 seq_printf(s, "--------\t------\n");
2004 list_for_each_entry(chan, &pl08x->memcpy.channels, vc.chan.device_node) { 2004 list_for_each_entry(chan, &pl08x->memcpy.channels, vc.chan.device_node) {
2005 seq_printf(s, "%s\t\t%s\n", chan->name, 2005 seq_printf(s, "%s\t\t%s\n", chan->name,
2006 pl08x_state_str(chan->state)); 2006 pl08x_state_str(chan->state));
2007 } 2007 }
2008 2008
2009 seq_printf(s, "\nPL08x virtual slave channels:\n"); 2009 seq_printf(s, "\nPL08x virtual slave channels:\n");
2010 seq_printf(s, "CHANNEL:\tSTATE:\n"); 2010 seq_printf(s, "CHANNEL:\tSTATE:\n");
2011 seq_printf(s, "--------\t------\n"); 2011 seq_printf(s, "--------\t------\n");
2012 list_for_each_entry(chan, &pl08x->slave.channels, vc.chan.device_node) { 2012 list_for_each_entry(chan, &pl08x->slave.channels, vc.chan.device_node) {
2013 seq_printf(s, "%s\t\t%s\n", chan->name, 2013 seq_printf(s, "%s\t\t%s\n", chan->name,
2014 pl08x_state_str(chan->state)); 2014 pl08x_state_str(chan->state));
2015 } 2015 }
2016 2016
2017 return 0; 2017 return 0;
2018 } 2018 }
2019 2019
2020 static int pl08x_debugfs_open(struct inode *inode, struct file *file) 2020 static int pl08x_debugfs_open(struct inode *inode, struct file *file)
2021 { 2021 {
2022 return single_open(file, pl08x_debugfs_show, inode->i_private); 2022 return single_open(file, pl08x_debugfs_show, inode->i_private);
2023 } 2023 }
2024 2024
2025 static const struct file_operations pl08x_debugfs_operations = { 2025 static const struct file_operations pl08x_debugfs_operations = {
2026 .open = pl08x_debugfs_open, 2026 .open = pl08x_debugfs_open,
2027 .read = seq_read, 2027 .read = seq_read,
2028 .llseek = seq_lseek, 2028 .llseek = seq_lseek,
2029 .release = single_release, 2029 .release = single_release,
2030 }; 2030 };
2031 2031
2032 static void init_pl08x_debugfs(struct pl08x_driver_data *pl08x) 2032 static void init_pl08x_debugfs(struct pl08x_driver_data *pl08x)
2033 { 2033 {
2034 /* Expose a simple debugfs interface to view all clocks */ 2034 /* Expose a simple debugfs interface to view all clocks */
2035 (void) debugfs_create_file(dev_name(&pl08x->adev->dev), 2035 (void) debugfs_create_file(dev_name(&pl08x->adev->dev),
2036 S_IFREG | S_IRUGO, NULL, pl08x, 2036 S_IFREG | S_IRUGO, NULL, pl08x,
2037 &pl08x_debugfs_operations); 2037 &pl08x_debugfs_operations);
2038 } 2038 }
2039 2039
2040 #else 2040 #else
2041 static inline void init_pl08x_debugfs(struct pl08x_driver_data *pl08x) 2041 static inline void init_pl08x_debugfs(struct pl08x_driver_data *pl08x)
2042 { 2042 {
2043 } 2043 }
2044 #endif 2044 #endif
2045 2045
2046 static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) 2046 static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
2047 { 2047 {
2048 struct pl08x_driver_data *pl08x; 2048 struct pl08x_driver_data *pl08x;
2049 const struct vendor_data *vd = id->data; 2049 const struct vendor_data *vd = id->data;
2050 u32 tsfr_size; 2050 u32 tsfr_size;
2051 int ret = 0; 2051 int ret = 0;
2052 int i; 2052 int i;
2053 2053
2054 ret = amba_request_regions(adev, NULL); 2054 ret = amba_request_regions(adev, NULL);
2055 if (ret) 2055 if (ret)
2056 return ret; 2056 return ret;
2057 2057
2058 /* Create the driver state holder */ 2058 /* Create the driver state holder */
2059 pl08x = kzalloc(sizeof(*pl08x), GFP_KERNEL); 2059 pl08x = kzalloc(sizeof(*pl08x), GFP_KERNEL);
2060 if (!pl08x) { 2060 if (!pl08x) {
2061 ret = -ENOMEM; 2061 ret = -ENOMEM;
2062 goto out_no_pl08x; 2062 goto out_no_pl08x;
2063 } 2063 }
2064 2064
2065 /* Initialize memcpy engine */ 2065 /* Initialize memcpy engine */
2066 dma_cap_set(DMA_MEMCPY, pl08x->memcpy.cap_mask); 2066 dma_cap_set(DMA_MEMCPY, pl08x->memcpy.cap_mask);
2067 pl08x->memcpy.dev = &adev->dev; 2067 pl08x->memcpy.dev = &adev->dev;
2068 pl08x->memcpy.device_alloc_chan_resources = pl08x_alloc_chan_resources; 2068 pl08x->memcpy.device_alloc_chan_resources = pl08x_alloc_chan_resources;
2069 pl08x->memcpy.device_free_chan_resources = pl08x_free_chan_resources; 2069 pl08x->memcpy.device_free_chan_resources = pl08x_free_chan_resources;
2070 pl08x->memcpy.device_prep_dma_memcpy = pl08x_prep_dma_memcpy; 2070 pl08x->memcpy.device_prep_dma_memcpy = pl08x_prep_dma_memcpy;
2071 pl08x->memcpy.device_prep_dma_interrupt = pl08x_prep_dma_interrupt; 2071 pl08x->memcpy.device_prep_dma_interrupt = pl08x_prep_dma_interrupt;
2072 pl08x->memcpy.device_tx_status = pl08x_dma_tx_status; 2072 pl08x->memcpy.device_tx_status = pl08x_dma_tx_status;
2073 pl08x->memcpy.device_issue_pending = pl08x_issue_pending; 2073 pl08x->memcpy.device_issue_pending = pl08x_issue_pending;
2074 pl08x->memcpy.device_control = pl08x_control; 2074 pl08x->memcpy.device_control = pl08x_control;
2075 2075
2076 /* Initialize slave engine */ 2076 /* Initialize slave engine */
2077 dma_cap_set(DMA_SLAVE, pl08x->slave.cap_mask); 2077 dma_cap_set(DMA_SLAVE, pl08x->slave.cap_mask);
2078 dma_cap_set(DMA_CYCLIC, pl08x->slave.cap_mask); 2078 dma_cap_set(DMA_CYCLIC, pl08x->slave.cap_mask);
2079 pl08x->slave.dev = &adev->dev; 2079 pl08x->slave.dev = &adev->dev;
2080 pl08x->slave.device_alloc_chan_resources = pl08x_alloc_chan_resources; 2080 pl08x->slave.device_alloc_chan_resources = pl08x_alloc_chan_resources;
2081 pl08x->slave.device_free_chan_resources = pl08x_free_chan_resources; 2081 pl08x->slave.device_free_chan_resources = pl08x_free_chan_resources;
2082 pl08x->slave.device_prep_dma_interrupt = pl08x_prep_dma_interrupt; 2082 pl08x->slave.device_prep_dma_interrupt = pl08x_prep_dma_interrupt;
2083 pl08x->slave.device_tx_status = pl08x_dma_tx_status; 2083 pl08x->slave.device_tx_status = pl08x_dma_tx_status;
2084 pl08x->slave.device_issue_pending = pl08x_issue_pending; 2084 pl08x->slave.device_issue_pending = pl08x_issue_pending;
2085 pl08x->slave.device_prep_slave_sg = pl08x_prep_slave_sg; 2085 pl08x->slave.device_prep_slave_sg = pl08x_prep_slave_sg;
2086 pl08x->slave.device_prep_dma_cyclic = pl08x_prep_dma_cyclic; 2086 pl08x->slave.device_prep_dma_cyclic = pl08x_prep_dma_cyclic;
2087 pl08x->slave.device_control = pl08x_control; 2087 pl08x->slave.device_control = pl08x_control;
2088 2088
2089 /* Get the platform data */ 2089 /* Get the platform data */
2090 pl08x->pd = dev_get_platdata(&adev->dev); 2090 pl08x->pd = dev_get_platdata(&adev->dev);
2091 if (!pl08x->pd) { 2091 if (!pl08x->pd) {
2092 dev_err(&adev->dev, "no platform data supplied\n"); 2092 dev_err(&adev->dev, "no platform data supplied\n");
2093 ret = -EINVAL; 2093 ret = -EINVAL;
2094 goto out_no_platdata; 2094 goto out_no_platdata;
2095 } 2095 }
2096 2096
2097 /* Assign useful pointers to the driver state */ 2097 /* Assign useful pointers to the driver state */
2098 pl08x->adev = adev; 2098 pl08x->adev = adev;
2099 pl08x->vd = vd; 2099 pl08x->vd = vd;
2100 2100
2101 /* By default, AHB1 only. If dualmaster, from platform */ 2101 /* By default, AHB1 only. If dualmaster, from platform */
2102 pl08x->lli_buses = PL08X_AHB1; 2102 pl08x->lli_buses = PL08X_AHB1;
2103 pl08x->mem_buses = PL08X_AHB1; 2103 pl08x->mem_buses = PL08X_AHB1;
2104 if (pl08x->vd->dualmaster) { 2104 if (pl08x->vd->dualmaster) {
2105 pl08x->lli_buses = pl08x->pd->lli_buses; 2105 pl08x->lli_buses = pl08x->pd->lli_buses;
2106 pl08x->mem_buses = pl08x->pd->mem_buses; 2106 pl08x->mem_buses = pl08x->pd->mem_buses;
2107 } 2107 }
2108 2108
2109 if (vd->pl080s) 2109 if (vd->pl080s)
2110 pl08x->lli_words = PL080S_LLI_WORDS; 2110 pl08x->lli_words = PL080S_LLI_WORDS;
2111 else 2111 else
2112 pl08x->lli_words = PL080_LLI_WORDS; 2112 pl08x->lli_words = PL080_LLI_WORDS;
2113 tsfr_size = MAX_NUM_TSFR_LLIS * pl08x->lli_words * sizeof(u32); 2113 tsfr_size = MAX_NUM_TSFR_LLIS * pl08x->lli_words * sizeof(u32);
2114 2114
2115 /* A DMA memory pool for LLIs, align on 1-byte boundary */ 2115 /* A DMA memory pool for LLIs, align on 1-byte boundary */
2116 pl08x->pool = dma_pool_create(DRIVER_NAME, &pl08x->adev->dev, 2116 pl08x->pool = dma_pool_create(DRIVER_NAME, &pl08x->adev->dev,
2117 tsfr_size, PL08X_ALIGN, 0); 2117 tsfr_size, PL08X_ALIGN, 0);
2118 if (!pl08x->pool) { 2118 if (!pl08x->pool) {
2119 ret = -ENOMEM; 2119 ret = -ENOMEM;
2120 goto out_no_lli_pool; 2120 goto out_no_lli_pool;
2121 } 2121 }
2122 2122
2123 pl08x->base = ioremap(adev->res.start, resource_size(&adev->res)); 2123 pl08x->base = ioremap(adev->res.start, resource_size(&adev->res));
2124 if (!pl08x->base) { 2124 if (!pl08x->base) {
2125 ret = -ENOMEM; 2125 ret = -ENOMEM;
2126 goto out_no_ioremap; 2126 goto out_no_ioremap;
2127 } 2127 }
2128 2128
2129 /* Turn on the PL08x */ 2129 /* Turn on the PL08x */
2130 pl08x_ensure_on(pl08x); 2130 pl08x_ensure_on(pl08x);
2131 2131
2132 /* Attach the interrupt handler */ 2132 /* Attach the interrupt handler */
2133 writel(0x000000FF, pl08x->base + PL080_ERR_CLEAR); 2133 writel(0x000000FF, pl08x->base + PL080_ERR_CLEAR);
2134 writel(0x000000FF, pl08x->base + PL080_TC_CLEAR); 2134 writel(0x000000FF, pl08x->base + PL080_TC_CLEAR);
2135 2135
2136 ret = request_irq(adev->irq[0], pl08x_irq, 0, DRIVER_NAME, pl08x); 2136 ret = request_irq(adev->irq[0], pl08x_irq, 0, DRIVER_NAME, pl08x);
2137 if (ret) { 2137 if (ret) {
2138 dev_err(&adev->dev, "%s failed to request interrupt %d\n", 2138 dev_err(&adev->dev, "%s failed to request interrupt %d\n",
2139 __func__, adev->irq[0]); 2139 __func__, adev->irq[0]);
2140 goto out_no_irq; 2140 goto out_no_irq;
2141 } 2141 }
2142 2142
2143 /* Initialize physical channels */ 2143 /* Initialize physical channels */
2144 pl08x->phy_chans = kzalloc((vd->channels * sizeof(*pl08x->phy_chans)), 2144 pl08x->phy_chans = kzalloc((vd->channels * sizeof(*pl08x->phy_chans)),
2145 GFP_KERNEL); 2145 GFP_KERNEL);
2146 if (!pl08x->phy_chans) { 2146 if (!pl08x->phy_chans) {
2147 dev_err(&adev->dev, "%s failed to allocate " 2147 dev_err(&adev->dev, "%s failed to allocate "
2148 "physical channel holders\n", 2148 "physical channel holders\n",
2149 __func__); 2149 __func__);
2150 ret = -ENOMEM; 2150 ret = -ENOMEM;
2151 goto out_no_phychans; 2151 goto out_no_phychans;
2152 } 2152 }
2153 2153
2154 for (i = 0; i < vd->channels; i++) { 2154 for (i = 0; i < vd->channels; i++) {
2155 struct pl08x_phy_chan *ch = &pl08x->phy_chans[i]; 2155 struct pl08x_phy_chan *ch = &pl08x->phy_chans[i];
2156 2156
2157 ch->id = i; 2157 ch->id = i;
2158 ch->base = pl08x->base + PL080_Cx_BASE(i); 2158 ch->base = pl08x->base + PL080_Cx_BASE(i);
2159 ch->reg_config = ch->base + vd->config_offset; 2159 ch->reg_config = ch->base + vd->config_offset;
2160 spin_lock_init(&ch->lock); 2160 spin_lock_init(&ch->lock);
2161 2161
2162 /* 2162 /*
2163 * Nomadik variants can have channels that are locked 2163 * Nomadik variants can have channels that are locked
2164 * down for the secure world only. Lock up these channels 2164 * down for the secure world only. Lock up these channels
2165 * by perpetually serving a dummy virtual channel. 2165 * by perpetually serving a dummy virtual channel.
2166 */ 2166 */
2167 if (vd->nomadik) { 2167 if (vd->nomadik) {
2168 u32 val; 2168 u32 val;
2169 2169
2170 val = readl(ch->reg_config); 2170 val = readl(ch->reg_config);
2171 if (val & (PL080N_CONFIG_ITPROT | PL080N_CONFIG_SECPROT)) { 2171 if (val & (PL080N_CONFIG_ITPROT | PL080N_CONFIG_SECPROT)) {
2172 dev_info(&adev->dev, "physical channel %d reserved for secure access only\n", i); 2172 dev_info(&adev->dev, "physical channel %d reserved for secure access only\n", i);
2173 ch->locked = true; 2173 ch->locked = true;
2174 } 2174 }
2175 } 2175 }
2176 2176
2177 dev_dbg(&adev->dev, "physical channel %d is %s\n", 2177 dev_dbg(&adev->dev, "physical channel %d is %s\n",
2178 i, pl08x_phy_channel_busy(ch) ? "BUSY" : "FREE"); 2178 i, pl08x_phy_channel_busy(ch) ? "BUSY" : "FREE");
2179 } 2179 }
2180 2180
2181 /* Register as many memcpy channels as there are physical channels */ 2181 /* Register as many memcpy channels as there are physical channels */
2182 ret = pl08x_dma_init_virtual_channels(pl08x, &pl08x->memcpy, 2182 ret = pl08x_dma_init_virtual_channels(pl08x, &pl08x->memcpy,
2183 pl08x->vd->channels, false); 2183 pl08x->vd->channels, false);
2184 if (ret <= 0) { 2184 if (ret <= 0) {
2185 dev_warn(&pl08x->adev->dev, 2185 dev_warn(&pl08x->adev->dev,
2186 "%s failed to enumerate memcpy channels - %d\n", 2186 "%s failed to enumerate memcpy channels - %d\n",
2187 __func__, ret); 2187 __func__, ret);
2188 goto out_no_memcpy; 2188 goto out_no_memcpy;
2189 } 2189 }
2190 pl08x->memcpy.chancnt = ret; 2190 pl08x->memcpy.chancnt = ret;
2191 2191
2192 /* Register slave channels */ 2192 /* Register slave channels */
2193 ret = pl08x_dma_init_virtual_channels(pl08x, &pl08x->slave, 2193 ret = pl08x_dma_init_virtual_channels(pl08x, &pl08x->slave,
2194 pl08x->pd->num_slave_channels, true); 2194 pl08x->pd->num_slave_channels, true);
2195 if (ret <= 0) { 2195 if (ret <= 0) {
2196 dev_warn(&pl08x->adev->dev, 2196 dev_warn(&pl08x->adev->dev,
2197 "%s failed to enumerate slave channels - %d\n", 2197 "%s failed to enumerate slave channels - %d\n",
2198 __func__, ret); 2198 __func__, ret);
2199 goto out_no_slave; 2199 goto out_no_slave;
2200 } 2200 }
2201 pl08x->slave.chancnt = ret; 2201 pl08x->slave.chancnt = ret;
2202 2202
2203 ret = dma_async_device_register(&pl08x->memcpy); 2203 ret = dma_async_device_register(&pl08x->memcpy);
2204 if (ret) { 2204 if (ret) {
2205 dev_warn(&pl08x->adev->dev, 2205 dev_warn(&pl08x->adev->dev,
2206 "%s failed to register memcpy as an async device - %d\n", 2206 "%s failed to register memcpy as an async device - %d\n",
2207 __func__, ret); 2207 __func__, ret);
2208 goto out_no_memcpy_reg; 2208 goto out_no_memcpy_reg;
2209 } 2209 }
2210 2210
2211 ret = dma_async_device_register(&pl08x->slave); 2211 ret = dma_async_device_register(&pl08x->slave);
2212 if (ret) { 2212 if (ret) {
2213 dev_warn(&pl08x->adev->dev, 2213 dev_warn(&pl08x->adev->dev,
2214 "%s failed to register slave as an async device - %d\n", 2214 "%s failed to register slave as an async device - %d\n",
2215 __func__, ret); 2215 __func__, ret);
2216 goto out_no_slave_reg; 2216 goto out_no_slave_reg;
2217 } 2217 }
2218 2218
2219 amba_set_drvdata(adev, pl08x); 2219 amba_set_drvdata(adev, pl08x);
2220 init_pl08x_debugfs(pl08x); 2220 init_pl08x_debugfs(pl08x);
2221 dev_info(&pl08x->adev->dev, "DMA: PL%03x%s rev%u at 0x%08llx irq %d\n", 2221 dev_info(&pl08x->adev->dev, "DMA: PL%03x%s rev%u at 0x%08llx irq %d\n",
2222 amba_part(adev), pl08x->vd->pl080s ? "s" : "", amba_rev(adev), 2222 amba_part(adev), pl08x->vd->pl080s ? "s" : "", amba_rev(adev),
2223 (unsigned long long)adev->res.start, adev->irq[0]); 2223 (unsigned long long)adev->res.start, adev->irq[0]);
2224 2224
2225 return 0; 2225 return 0;
2226 2226
2227 out_no_slave_reg: 2227 out_no_slave_reg:
2228 dma_async_device_unregister(&pl08x->memcpy); 2228 dma_async_device_unregister(&pl08x->memcpy);
2229 out_no_memcpy_reg: 2229 out_no_memcpy_reg:
2230 pl08x_free_virtual_channels(&pl08x->slave); 2230 pl08x_free_virtual_channels(&pl08x->slave);
2231 out_no_slave: 2231 out_no_slave:
2232 pl08x_free_virtual_channels(&pl08x->memcpy); 2232 pl08x_free_virtual_channels(&pl08x->memcpy);
2233 out_no_memcpy: 2233 out_no_memcpy:
2234 kfree(pl08x->phy_chans); 2234 kfree(pl08x->phy_chans);
2235 out_no_phychans: 2235 out_no_phychans:
2236 free_irq(adev->irq[0], pl08x); 2236 free_irq(adev->irq[0], pl08x);
2237 out_no_irq: 2237 out_no_irq:
2238 iounmap(pl08x->base); 2238 iounmap(pl08x->base);
2239 out_no_ioremap: 2239 out_no_ioremap:
2240 dma_pool_destroy(pl08x->pool); 2240 dma_pool_destroy(pl08x->pool);
2241 out_no_lli_pool: 2241 out_no_lli_pool:
2242 out_no_platdata: 2242 out_no_platdata:
2243 kfree(pl08x); 2243 kfree(pl08x);
2244 out_no_pl08x: 2244 out_no_pl08x:
2245 amba_release_regions(adev); 2245 amba_release_regions(adev);
2246 return ret; 2246 return ret;
2247 } 2247 }
2248 2248
2249 /* PL080 has 8 channels and the PL080 have just 2 */ 2249 /* PL080 has 8 channels and the PL080 have just 2 */
2250 static struct vendor_data vendor_pl080 = { 2250 static struct vendor_data vendor_pl080 = {
2251 .config_offset = PL080_CH_CONFIG, 2251 .config_offset = PL080_CH_CONFIG,
2252 .channels = 8, 2252 .channels = 8,
2253 .dualmaster = true, 2253 .dualmaster = true,
2254 .max_transfer_size = PL080_CONTROL_TRANSFER_SIZE_MASK, 2254 .max_transfer_size = PL080_CONTROL_TRANSFER_SIZE_MASK,
2255 }; 2255 };
2256 2256
2257 static struct vendor_data vendor_nomadik = { 2257 static struct vendor_data vendor_nomadik = {
2258 .config_offset = PL080_CH_CONFIG, 2258 .config_offset = PL080_CH_CONFIG,
2259 .channels = 8, 2259 .channels = 8,
2260 .dualmaster = true, 2260 .dualmaster = true,
2261 .nomadik = true, 2261 .nomadik = true,
2262 .max_transfer_size = PL080_CONTROL_TRANSFER_SIZE_MASK, 2262 .max_transfer_size = PL080_CONTROL_TRANSFER_SIZE_MASK,
2263 }; 2263 };
2264 2264
2265 static struct vendor_data vendor_pl080s = { 2265 static struct vendor_data vendor_pl080s = {
2266 .config_offset = PL080S_CH_CONFIG, 2266 .config_offset = PL080S_CH_CONFIG,
2267 .channels = 8, 2267 .channels = 8,
2268 .pl080s = true, 2268 .pl080s = true,
2269 .max_transfer_size = PL080S_CONTROL_TRANSFER_SIZE_MASK, 2269 .max_transfer_size = PL080S_CONTROL_TRANSFER_SIZE_MASK,
2270 }; 2270 };
2271 2271
2272 static struct vendor_data vendor_pl081 = { 2272 static struct vendor_data vendor_pl081 = {
2273 .config_offset = PL080_CH_CONFIG, 2273 .config_offset = PL080_CH_CONFIG,
2274 .channels = 2, 2274 .channels = 2,
2275 .dualmaster = false, 2275 .dualmaster = false,
2276 .max_transfer_size = PL080_CONTROL_TRANSFER_SIZE_MASK, 2276 .max_transfer_size = PL080_CONTROL_TRANSFER_SIZE_MASK,
2277 }; 2277 };
2278 2278
2279 static struct amba_id pl08x_ids[] = { 2279 static struct amba_id pl08x_ids[] = {
2280 /* Samsung PL080S variant */ 2280 /* Samsung PL080S variant */
2281 { 2281 {
2282 .id = 0x0a141080, 2282 .id = 0x0a141080,
2283 .mask = 0xffffffff, 2283 .mask = 0xffffffff,
2284 .data = &vendor_pl080s, 2284 .data = &vendor_pl080s,
2285 }, 2285 },
2286 /* PL080 */ 2286 /* PL080 */
2287 { 2287 {
2288 .id = 0x00041080, 2288 .id = 0x00041080,
2289 .mask = 0x000fffff, 2289 .mask = 0x000fffff,
2290 .data = &vendor_pl080, 2290 .data = &vendor_pl080,
2291 }, 2291 },
2292 /* PL081 */ 2292 /* PL081 */
2293 { 2293 {
2294 .id = 0x00041081, 2294 .id = 0x00041081,
2295 .mask = 0x000fffff, 2295 .mask = 0x000fffff,
2296 .data = &vendor_pl081, 2296 .data = &vendor_pl081,
2297 }, 2297 },
2298 /* Nomadik 8815 PL080 variant */ 2298 /* Nomadik 8815 PL080 variant */
2299 { 2299 {
2300 .id = 0x00280080, 2300 .id = 0x00280080,
2301 .mask = 0x00ffffff, 2301 .mask = 0x00ffffff,
2302 .data = &vendor_nomadik, 2302 .data = &vendor_nomadik,
2303 }, 2303 },
2304 { 0, 0 }, 2304 { 0, 0 },
2305 }; 2305 };
2306 2306
2307 MODULE_DEVICE_TABLE(amba, pl08x_ids); 2307 MODULE_DEVICE_TABLE(amba, pl08x_ids);
2308 2308
2309 static struct amba_driver pl08x_amba_driver = { 2309 static struct amba_driver pl08x_amba_driver = {
2310 .drv.name = DRIVER_NAME, 2310 .drv.name = DRIVER_NAME,
2311 .id_table = pl08x_ids, 2311 .id_table = pl08x_ids,
2312 .probe = pl08x_probe, 2312 .probe = pl08x_probe,
2313 }; 2313 };
2314 2314
2315 static int __init pl08x_init(void) 2315 static int __init pl08x_init(void)
2316 { 2316 {
2317 int retval; 2317 int retval;
2318 retval = amba_driver_register(&pl08x_amba_driver); 2318 retval = amba_driver_register(&pl08x_amba_driver);
2319 if (retval) 2319 if (retval)
2320 printk(KERN_WARNING DRIVER_NAME 2320 printk(KERN_WARNING DRIVER_NAME
2321 "failed to register as an AMBA device (%d)\n", 2321 "failed to register as an AMBA device (%d)\n",
2322 retval); 2322 retval);
2323 return retval; 2323 return retval;
2324 } 2324 }
2325 subsys_initcall(pl08x_init); 2325 subsys_initcall(pl08x_init);
2326 2326
drivers/dma/at_hdmac.c
1 /* 1 /*
2 * Driver for the Atmel AHB DMA Controller (aka HDMA or DMAC on AT91 systems) 2 * Driver for the Atmel AHB DMA Controller (aka HDMA or DMAC on AT91 systems)
3 * 3 *
4 * Copyright (C) 2008 Atmel Corporation 4 * Copyright (C) 2008 Atmel Corporation
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by 7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or 8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version. 9 * (at your option) any later version.
10 * 10 *
11 * 11 *
12 * This supports the Atmel AHB DMA Controller found in several Atmel SoCs. 12 * This supports the Atmel AHB DMA Controller found in several Atmel SoCs.
13 * The only Atmel DMA Controller that is not covered by this driver is the one 13 * The only Atmel DMA Controller that is not covered by this driver is the one
14 * found on AT91SAM9263. 14 * found on AT91SAM9263.
15 */ 15 */
16 16
17 #include <dt-bindings/dma/at91.h> 17 #include <dt-bindings/dma/at91.h>
18 #include <linux/clk.h> 18 #include <linux/clk.h>
19 #include <linux/dmaengine.h> 19 #include <linux/dmaengine.h>
20 #include <linux/dma-mapping.h> 20 #include <linux/dma-mapping.h>
21 #include <linux/dmapool.h> 21 #include <linux/dmapool.h>
22 #include <linux/interrupt.h> 22 #include <linux/interrupt.h>
23 #include <linux/module.h> 23 #include <linux/module.h>
24 #include <linux/platform_device.h> 24 #include <linux/platform_device.h>
25 #include <linux/slab.h> 25 #include <linux/slab.h>
26 #include <linux/of.h> 26 #include <linux/of.h>
27 #include <linux/of_device.h> 27 #include <linux/of_device.h>
28 #include <linux/of_dma.h> 28 #include <linux/of_dma.h>
29 29
30 #include "at_hdmac_regs.h" 30 #include "at_hdmac_regs.h"
31 #include "dmaengine.h" 31 #include "dmaengine.h"
32 32
33 /* 33 /*
34 * Glossary 34 * Glossary
35 * -------- 35 * --------
36 * 36 *
37 * at_hdmac : Name of the ATmel AHB DMA Controller 37 * at_hdmac : Name of the ATmel AHB DMA Controller
38 * at_dma_ / atdma : ATmel DMA controller entity related 38 * at_dma_ / atdma : ATmel DMA controller entity related
39 * atc_ / atchan : ATmel DMA Channel entity related 39 * atc_ / atchan : ATmel DMA Channel entity related
40 */ 40 */
41 41
42 #define ATC_DEFAULT_CFG (ATC_FIFOCFG_HALFFIFO) 42 #define ATC_DEFAULT_CFG (ATC_FIFOCFG_HALFFIFO)
43 #define ATC_DEFAULT_CTRLB (ATC_SIF(AT_DMA_MEM_IF) \ 43 #define ATC_DEFAULT_CTRLB (ATC_SIF(AT_DMA_MEM_IF) \
44 |ATC_DIF(AT_DMA_MEM_IF)) 44 |ATC_DIF(AT_DMA_MEM_IF))
45 45
46 /* 46 /*
47 * Initial number of descriptors to allocate for each channel. This could 47 * Initial number of descriptors to allocate for each channel. This could
48 * be increased during dma usage. 48 * be increased during dma usage.
49 */ 49 */
50 static unsigned int init_nr_desc_per_channel = 64; 50 static unsigned int init_nr_desc_per_channel = 64;
51 module_param(init_nr_desc_per_channel, uint, 0644); 51 module_param(init_nr_desc_per_channel, uint, 0644);
52 MODULE_PARM_DESC(init_nr_desc_per_channel, 52 MODULE_PARM_DESC(init_nr_desc_per_channel,
53 "initial descriptors per channel (default: 64)"); 53 "initial descriptors per channel (default: 64)");
54 54
55 55
56 /* prototypes */ 56 /* prototypes */
57 static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx); 57 static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx);
58 static void atc_issue_pending(struct dma_chan *chan); 58 static void atc_issue_pending(struct dma_chan *chan);
59 59
60 60
61 /*----------------------------------------------------------------------*/ 61 /*----------------------------------------------------------------------*/
62 62
63 static struct at_desc *atc_first_active(struct at_dma_chan *atchan) 63 static struct at_desc *atc_first_active(struct at_dma_chan *atchan)
64 { 64 {
65 return list_first_entry(&atchan->active_list, 65 return list_first_entry(&atchan->active_list,
66 struct at_desc, desc_node); 66 struct at_desc, desc_node);
67 } 67 }
68 68
69 static struct at_desc *atc_first_queued(struct at_dma_chan *atchan) 69 static struct at_desc *atc_first_queued(struct at_dma_chan *atchan)
70 { 70 {
71 return list_first_entry(&atchan->queue, 71 return list_first_entry(&atchan->queue,
72 struct at_desc, desc_node); 72 struct at_desc, desc_node);
73 } 73 }
74 74
75 /** 75 /**
76 * atc_alloc_descriptor - allocate and return an initialized descriptor 76 * atc_alloc_descriptor - allocate and return an initialized descriptor
77 * @chan: the channel to allocate descriptors for 77 * @chan: the channel to allocate descriptors for
78 * @gfp_flags: GFP allocation flags 78 * @gfp_flags: GFP allocation flags
79 * 79 *
80 * Note: The ack-bit is positioned in the descriptor flag at creation time 80 * Note: The ack-bit is positioned in the descriptor flag at creation time
81 * to make initial allocation more convenient. This bit will be cleared 81 * to make initial allocation more convenient. This bit will be cleared
82 * and control will be given to client at usage time (during 82 * and control will be given to client at usage time (during
83 * preparation functions). 83 * preparation functions).
84 */ 84 */
85 static struct at_desc *atc_alloc_descriptor(struct dma_chan *chan, 85 static struct at_desc *atc_alloc_descriptor(struct dma_chan *chan,
86 gfp_t gfp_flags) 86 gfp_t gfp_flags)
87 { 87 {
88 struct at_desc *desc = NULL; 88 struct at_desc *desc = NULL;
89 struct at_dma *atdma = to_at_dma(chan->device); 89 struct at_dma *atdma = to_at_dma(chan->device);
90 dma_addr_t phys; 90 dma_addr_t phys;
91 91
92 desc = dma_pool_alloc(atdma->dma_desc_pool, gfp_flags, &phys); 92 desc = dma_pool_alloc(atdma->dma_desc_pool, gfp_flags, &phys);
93 if (desc) { 93 if (desc) {
94 memset(desc, 0, sizeof(struct at_desc)); 94 memset(desc, 0, sizeof(struct at_desc));
95 INIT_LIST_HEAD(&desc->tx_list); 95 INIT_LIST_HEAD(&desc->tx_list);
96 dma_async_tx_descriptor_init(&desc->txd, chan); 96 dma_async_tx_descriptor_init(&desc->txd, chan);
97 /* txd.flags will be overwritten in prep functions */ 97 /* txd.flags will be overwritten in prep functions */
98 desc->txd.flags = DMA_CTRL_ACK; 98 desc->txd.flags = DMA_CTRL_ACK;
99 desc->txd.tx_submit = atc_tx_submit; 99 desc->txd.tx_submit = atc_tx_submit;
100 desc->txd.phys = phys; 100 desc->txd.phys = phys;
101 } 101 }
102 102
103 return desc; 103 return desc;
104 } 104 }
105 105
106 /** 106 /**
107 * atc_desc_get - get an unused descriptor from free_list 107 * atc_desc_get - get an unused descriptor from free_list
108 * @atchan: channel we want a new descriptor for 108 * @atchan: channel we want a new descriptor for
109 */ 109 */
110 static struct at_desc *atc_desc_get(struct at_dma_chan *atchan) 110 static struct at_desc *atc_desc_get(struct at_dma_chan *atchan)
111 { 111 {
112 struct at_desc *desc, *_desc; 112 struct at_desc *desc, *_desc;
113 struct at_desc *ret = NULL; 113 struct at_desc *ret = NULL;
114 unsigned long flags; 114 unsigned long flags;
115 unsigned int i = 0; 115 unsigned int i = 0;
116 LIST_HEAD(tmp_list); 116 LIST_HEAD(tmp_list);
117 117
118 spin_lock_irqsave(&atchan->lock, flags); 118 spin_lock_irqsave(&atchan->lock, flags);
119 list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) { 119 list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) {
120 i++; 120 i++;
121 if (async_tx_test_ack(&desc->txd)) { 121 if (async_tx_test_ack(&desc->txd)) {
122 list_del(&desc->desc_node); 122 list_del(&desc->desc_node);
123 ret = desc; 123 ret = desc;
124 break; 124 break;
125 } 125 }
126 dev_dbg(chan2dev(&atchan->chan_common), 126 dev_dbg(chan2dev(&atchan->chan_common),
127 "desc %p not ACKed\n", desc); 127 "desc %p not ACKed\n", desc);
128 } 128 }
129 spin_unlock_irqrestore(&atchan->lock, flags); 129 spin_unlock_irqrestore(&atchan->lock, flags);
130 dev_vdbg(chan2dev(&atchan->chan_common), 130 dev_vdbg(chan2dev(&atchan->chan_common),
131 "scanned %u descriptors on freelist\n", i); 131 "scanned %u descriptors on freelist\n", i);
132 132
133 /* no more descriptor available in initial pool: create one more */ 133 /* no more descriptor available in initial pool: create one more */
134 if (!ret) { 134 if (!ret) {
135 ret = atc_alloc_descriptor(&atchan->chan_common, GFP_ATOMIC); 135 ret = atc_alloc_descriptor(&atchan->chan_common, GFP_ATOMIC);
136 if (ret) { 136 if (ret) {
137 spin_lock_irqsave(&atchan->lock, flags); 137 spin_lock_irqsave(&atchan->lock, flags);
138 atchan->descs_allocated++; 138 atchan->descs_allocated++;
139 spin_unlock_irqrestore(&atchan->lock, flags); 139 spin_unlock_irqrestore(&atchan->lock, flags);
140 } else { 140 } else {
141 dev_err(chan2dev(&atchan->chan_common), 141 dev_err(chan2dev(&atchan->chan_common),
142 "not enough descriptors available\n"); 142 "not enough descriptors available\n");
143 } 143 }
144 } 144 }
145 145
146 return ret; 146 return ret;
147 } 147 }
148 148
149 /** 149 /**
150 * atc_desc_put - move a descriptor, including any children, to the free list 150 * atc_desc_put - move a descriptor, including any children, to the free list
151 * @atchan: channel we work on 151 * @atchan: channel we work on
152 * @desc: descriptor, at the head of a chain, to move to free list 152 * @desc: descriptor, at the head of a chain, to move to free list
153 */ 153 */
154 static void atc_desc_put(struct at_dma_chan *atchan, struct at_desc *desc) 154 static void atc_desc_put(struct at_dma_chan *atchan, struct at_desc *desc)
155 { 155 {
156 if (desc) { 156 if (desc) {
157 struct at_desc *child; 157 struct at_desc *child;
158 unsigned long flags; 158 unsigned long flags;
159 159
160 spin_lock_irqsave(&atchan->lock, flags); 160 spin_lock_irqsave(&atchan->lock, flags);
161 list_for_each_entry(child, &desc->tx_list, desc_node) 161 list_for_each_entry(child, &desc->tx_list, desc_node)
162 dev_vdbg(chan2dev(&atchan->chan_common), 162 dev_vdbg(chan2dev(&atchan->chan_common),
163 "moving child desc %p to freelist\n", 163 "moving child desc %p to freelist\n",
164 child); 164 child);
165 list_splice_init(&desc->tx_list, &atchan->free_list); 165 list_splice_init(&desc->tx_list, &atchan->free_list);
166 dev_vdbg(chan2dev(&atchan->chan_common), 166 dev_vdbg(chan2dev(&atchan->chan_common),
167 "moving desc %p to freelist\n", desc); 167 "moving desc %p to freelist\n", desc);
168 list_add(&desc->desc_node, &atchan->free_list); 168 list_add(&desc->desc_node, &atchan->free_list);
169 spin_unlock_irqrestore(&atchan->lock, flags); 169 spin_unlock_irqrestore(&atchan->lock, flags);
170 } 170 }
171 } 171 }
172 172
173 /** 173 /**
174 * atc_desc_chain - build chain adding a descriptor 174 * atc_desc_chain - build chain adding a descriptor
175 * @first: address of first descriptor of the chain 175 * @first: address of first descriptor of the chain
176 * @prev: address of previous descriptor of the chain 176 * @prev: address of previous descriptor of the chain
177 * @desc: descriptor to queue 177 * @desc: descriptor to queue
178 * 178 *
179 * Called from prep_* functions 179 * Called from prep_* functions
180 */ 180 */
181 static void atc_desc_chain(struct at_desc **first, struct at_desc **prev, 181 static void atc_desc_chain(struct at_desc **first, struct at_desc **prev,
182 struct at_desc *desc) 182 struct at_desc *desc)
183 { 183 {
184 if (!(*first)) { 184 if (!(*first)) {
185 *first = desc; 185 *first = desc;
186 } else { 186 } else {
187 /* inform the HW lli about chaining */ 187 /* inform the HW lli about chaining */
188 (*prev)->lli.dscr = desc->txd.phys; 188 (*prev)->lli.dscr = desc->txd.phys;
189 /* insert the link descriptor to the LD ring */ 189 /* insert the link descriptor to the LD ring */
190 list_add_tail(&desc->desc_node, 190 list_add_tail(&desc->desc_node,
191 &(*first)->tx_list); 191 &(*first)->tx_list);
192 } 192 }
193 *prev = desc; 193 *prev = desc;
194 } 194 }
195 195
196 /** 196 /**
197 * atc_dostart - starts the DMA engine for real 197 * atc_dostart - starts the DMA engine for real
198 * @atchan: the channel we want to start 198 * @atchan: the channel we want to start
199 * @first: first descriptor in the list we want to begin with 199 * @first: first descriptor in the list we want to begin with
200 * 200 *
201 * Called with atchan->lock held and bh disabled 201 * Called with atchan->lock held and bh disabled
202 */ 202 */
203 static void atc_dostart(struct at_dma_chan *atchan, struct at_desc *first) 203 static void atc_dostart(struct at_dma_chan *atchan, struct at_desc *first)
204 { 204 {
205 struct at_dma *atdma = to_at_dma(atchan->chan_common.device); 205 struct at_dma *atdma = to_at_dma(atchan->chan_common.device);
206 206
207 /* ASSERT: channel is idle */ 207 /* ASSERT: channel is idle */
208 if (atc_chan_is_enabled(atchan)) { 208 if (atc_chan_is_enabled(atchan)) {
209 dev_err(chan2dev(&atchan->chan_common), 209 dev_err(chan2dev(&atchan->chan_common),
210 "BUG: Attempted to start non-idle channel\n"); 210 "BUG: Attempted to start non-idle channel\n");
211 dev_err(chan2dev(&atchan->chan_common), 211 dev_err(chan2dev(&atchan->chan_common),
212 " channel: s0x%x d0x%x ctrl0x%x:0x%x l0x%x\n", 212 " channel: s0x%x d0x%x ctrl0x%x:0x%x l0x%x\n",
213 channel_readl(atchan, SADDR), 213 channel_readl(atchan, SADDR),
214 channel_readl(atchan, DADDR), 214 channel_readl(atchan, DADDR),
215 channel_readl(atchan, CTRLA), 215 channel_readl(atchan, CTRLA),
216 channel_readl(atchan, CTRLB), 216 channel_readl(atchan, CTRLB),
217 channel_readl(atchan, DSCR)); 217 channel_readl(atchan, DSCR));
218 218
219 /* The tasklet will hopefully advance the queue... */ 219 /* The tasklet will hopefully advance the queue... */
220 return; 220 return;
221 } 221 }
222 222
223 vdbg_dump_regs(atchan); 223 vdbg_dump_regs(atchan);
224 224
225 channel_writel(atchan, SADDR, 0); 225 channel_writel(atchan, SADDR, 0);
226 channel_writel(atchan, DADDR, 0); 226 channel_writel(atchan, DADDR, 0);
227 channel_writel(atchan, CTRLA, 0); 227 channel_writel(atchan, CTRLA, 0);
228 channel_writel(atchan, CTRLB, 0); 228 channel_writel(atchan, CTRLB, 0);
229 channel_writel(atchan, DSCR, first->txd.phys); 229 channel_writel(atchan, DSCR, first->txd.phys);
230 dma_writel(atdma, CHER, atchan->mask); 230 dma_writel(atdma, CHER, atchan->mask);
231 231
232 vdbg_dump_regs(atchan); 232 vdbg_dump_regs(atchan);
233 } 233 }
234 234
235 /* 235 /*
236 * atc_get_current_descriptors - 236 * atc_get_current_descriptors -
237 * locate the descriptor which equal to physical address in DSCR 237 * locate the descriptor which equal to physical address in DSCR
238 * @atchan: the channel we want to start 238 * @atchan: the channel we want to start
239 * @dscr_addr: physical descriptor address in DSCR 239 * @dscr_addr: physical descriptor address in DSCR
240 */ 240 */
241 static struct at_desc *atc_get_current_descriptors(struct at_dma_chan *atchan, 241 static struct at_desc *atc_get_current_descriptors(struct at_dma_chan *atchan,
242 u32 dscr_addr) 242 u32 dscr_addr)
243 { 243 {
244 struct at_desc *desc, *_desc, *child, *desc_cur = NULL; 244 struct at_desc *desc, *_desc, *child, *desc_cur = NULL;
245 245
246 list_for_each_entry_safe(desc, _desc, &atchan->active_list, desc_node) { 246 list_for_each_entry_safe(desc, _desc, &atchan->active_list, desc_node) {
247 if (desc->lli.dscr == dscr_addr) { 247 if (desc->lli.dscr == dscr_addr) {
248 desc_cur = desc; 248 desc_cur = desc;
249 break; 249 break;
250 } 250 }
251 251
252 list_for_each_entry(child, &desc->tx_list, desc_node) { 252 list_for_each_entry(child, &desc->tx_list, desc_node) {
253 if (child->lli.dscr == dscr_addr) { 253 if (child->lli.dscr == dscr_addr) {
254 desc_cur = child; 254 desc_cur = child;
255 break; 255 break;
256 } 256 }
257 } 257 }
258 } 258 }
259 259
260 return desc_cur; 260 return desc_cur;
261 } 261 }
262 262
263 /* 263 /*
264 * atc_get_bytes_left - 264 * atc_get_bytes_left -
265 * Get the number of bytes residue in dma buffer, 265 * Get the number of bytes residue in dma buffer,
266 * @chan: the channel we want to start 266 * @chan: the channel we want to start
267 */ 267 */
268 static int atc_get_bytes_left(struct dma_chan *chan) 268 static int atc_get_bytes_left(struct dma_chan *chan)
269 { 269 {
270 struct at_dma_chan *atchan = to_at_dma_chan(chan); 270 struct at_dma_chan *atchan = to_at_dma_chan(chan);
271 struct at_dma *atdma = to_at_dma(chan->device); 271 struct at_dma *atdma = to_at_dma(chan->device);
272 int chan_id = atchan->chan_common.chan_id; 272 int chan_id = atchan->chan_common.chan_id;
273 struct at_desc *desc_first = atc_first_active(atchan); 273 struct at_desc *desc_first = atc_first_active(atchan);
274 struct at_desc *desc_cur; 274 struct at_desc *desc_cur;
275 int ret = 0, count = 0; 275 int ret = 0, count = 0;
276 276
277 /* 277 /*
278 * Initialize necessary values in the first time. 278 * Initialize necessary values in the first time.
279 * remain_desc record remain desc length. 279 * remain_desc record remain desc length.
280 */ 280 */
281 if (atchan->remain_desc == 0) 281 if (atchan->remain_desc == 0)
282 /* First descriptor embedds the transaction length */ 282 /* First descriptor embedds the transaction length */
283 atchan->remain_desc = desc_first->len; 283 atchan->remain_desc = desc_first->len;
284 284
285 /* 285 /*
286 * This happens when current descriptor transfer complete. 286 * This happens when current descriptor transfer complete.
287 * The residual buffer size should reduce current descriptor length. 287 * The residual buffer size should reduce current descriptor length.
288 */ 288 */
289 if (unlikely(test_bit(ATC_IS_BTC, &atchan->status))) { 289 if (unlikely(test_bit(ATC_IS_BTC, &atchan->status))) {
290 clear_bit(ATC_IS_BTC, &atchan->status); 290 clear_bit(ATC_IS_BTC, &atchan->status);
291 desc_cur = atc_get_current_descriptors(atchan, 291 desc_cur = atc_get_current_descriptors(atchan,
292 channel_readl(atchan, DSCR)); 292 channel_readl(atchan, DSCR));
293 if (!desc_cur) { 293 if (!desc_cur) {
294 ret = -EINVAL; 294 ret = -EINVAL;
295 goto out; 295 goto out;
296 } 296 }
297 atchan->remain_desc -= (desc_cur->lli.ctrla & ATC_BTSIZE_MAX) 297 atchan->remain_desc -= (desc_cur->lli.ctrla & ATC_BTSIZE_MAX)
298 << (desc_first->tx_width); 298 << (desc_first->tx_width);
299 if (atchan->remain_desc < 0) { 299 if (atchan->remain_desc < 0) {
300 ret = -EINVAL; 300 ret = -EINVAL;
301 goto out; 301 goto out;
302 } else { 302 } else {
303 ret = atchan->remain_desc; 303 ret = atchan->remain_desc;
304 } 304 }
305 } else { 305 } else {
306 /* 306 /*
307 * Get residual bytes when current 307 * Get residual bytes when current
308 * descriptor transfer in progress. 308 * descriptor transfer in progress.
309 */ 309 */
310 count = (channel_readl(atchan, CTRLA) & ATC_BTSIZE_MAX) 310 count = (channel_readl(atchan, CTRLA) & ATC_BTSIZE_MAX)
311 << (desc_first->tx_width); 311 << (desc_first->tx_width);
312 ret = atchan->remain_desc - count; 312 ret = atchan->remain_desc - count;
313 } 313 }
314 /* 314 /*
315 * Check fifo empty. 315 * Check fifo empty.
316 */ 316 */
317 if (!(dma_readl(atdma, CHSR) & AT_DMA_EMPT(chan_id))) 317 if (!(dma_readl(atdma, CHSR) & AT_DMA_EMPT(chan_id)))
318 atc_issue_pending(chan); 318 atc_issue_pending(chan);
319 319
320 out: 320 out:
321 return ret; 321 return ret;
322 } 322 }
323 323
324 /** 324 /**
325 * atc_chain_complete - finish work for one transaction chain 325 * atc_chain_complete - finish work for one transaction chain
326 * @atchan: channel we work on 326 * @atchan: channel we work on
327 * @desc: descriptor at the head of the chain we want do complete 327 * @desc: descriptor at the head of the chain we want do complete
328 * 328 *
329 * Called with atchan->lock held and bh disabled */ 329 * Called with atchan->lock held and bh disabled */
330 static void 330 static void
331 atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc) 331 atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
332 { 332 {
333 struct dma_async_tx_descriptor *txd = &desc->txd; 333 struct dma_async_tx_descriptor *txd = &desc->txd;
334 334
335 dev_vdbg(chan2dev(&atchan->chan_common), 335 dev_vdbg(chan2dev(&atchan->chan_common),
336 "descriptor %u complete\n", txd->cookie); 336 "descriptor %u complete\n", txd->cookie);
337 337
338 /* mark the descriptor as complete for non cyclic cases only */ 338 /* mark the descriptor as complete for non cyclic cases only */
339 if (!atc_chan_is_cyclic(atchan)) 339 if (!atc_chan_is_cyclic(atchan))
340 dma_cookie_complete(txd); 340 dma_cookie_complete(txd);
341 341
342 /* move children to free_list */ 342 /* move children to free_list */
343 list_splice_init(&desc->tx_list, &atchan->free_list); 343 list_splice_init(&desc->tx_list, &atchan->free_list);
344 /* move myself to free_list */ 344 /* move myself to free_list */
345 list_move(&desc->desc_node, &atchan->free_list); 345 list_move(&desc->desc_node, &atchan->free_list);
346 346
347 /* unmap dma addresses (not on slave channels) */ 347 /* unmap dma addresses (not on slave channels) */
348 if (!atchan->chan_common.private) { 348 if (!atchan->chan_common.private) {
349 struct device *parent = chan2parent(&atchan->chan_common); 349 struct device *parent = chan2parent(&atchan->chan_common);
350 if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) { 350 if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
351 if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE) 351 if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE)
352 dma_unmap_single(parent, 352 dma_unmap_single(parent,
353 desc->lli.daddr, 353 desc->lli.daddr,
354 desc->len, DMA_FROM_DEVICE); 354 desc->len, DMA_FROM_DEVICE);
355 else 355 else
356 dma_unmap_page(parent, 356 dma_unmap_page(parent,
357 desc->lli.daddr, 357 desc->lli.daddr,
358 desc->len, DMA_FROM_DEVICE); 358 desc->len, DMA_FROM_DEVICE);
359 } 359 }
360 if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) { 360 if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
361 if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE) 361 if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE)
362 dma_unmap_single(parent, 362 dma_unmap_single(parent,
363 desc->lli.saddr, 363 desc->lli.saddr,
364 desc->len, DMA_TO_DEVICE); 364 desc->len, DMA_TO_DEVICE);
365 else 365 else
366 dma_unmap_page(parent, 366 dma_unmap_page(parent,
367 desc->lli.saddr, 367 desc->lli.saddr,
368 desc->len, DMA_TO_DEVICE); 368 desc->len, DMA_TO_DEVICE);
369 } 369 }
370 } 370 }
371 371
372 /* for cyclic transfers, 372 /* for cyclic transfers,
373 * no need to replay callback function while stopping */ 373 * no need to replay callback function while stopping */
374 if (!atc_chan_is_cyclic(atchan)) { 374 if (!atc_chan_is_cyclic(atchan)) {
375 dma_async_tx_callback callback = txd->callback; 375 dma_async_tx_callback callback = txd->callback;
376 void *param = txd->callback_param; 376 void *param = txd->callback_param;
377 377
378 /* 378 /*
379 * The API requires that no submissions are done from a 379 * The API requires that no submissions are done from a
380 * callback, so we don't need to drop the lock here 380 * callback, so we don't need to drop the lock here
381 */ 381 */
382 if (callback) 382 if (callback)
383 callback(param); 383 callback(param);
384 } 384 }
385 385
386 dma_run_dependencies(txd); 386 dma_run_dependencies(txd);
387 } 387 }
388 388
389 /** 389 /**
390 * atc_complete_all - finish work for all transactions 390 * atc_complete_all - finish work for all transactions
391 * @atchan: channel to complete transactions for 391 * @atchan: channel to complete transactions for
392 * 392 *
393 * Eventually submit queued descriptors if any 393 * Eventually submit queued descriptors if any
394 * 394 *
395 * Assume channel is idle while calling this function 395 * Assume channel is idle while calling this function
396 * Called with atchan->lock held and bh disabled 396 * Called with atchan->lock held and bh disabled
397 */ 397 */
398 static void atc_complete_all(struct at_dma_chan *atchan) 398 static void atc_complete_all(struct at_dma_chan *atchan)
399 { 399 {
400 struct at_desc *desc, *_desc; 400 struct at_desc *desc, *_desc;
401 LIST_HEAD(list); 401 LIST_HEAD(list);
402 402
403 dev_vdbg(chan2dev(&atchan->chan_common), "complete all\n"); 403 dev_vdbg(chan2dev(&atchan->chan_common), "complete all\n");
404 404
405 /* 405 /*
406 * Submit queued descriptors ASAP, i.e. before we go through 406 * Submit queued descriptors ASAP, i.e. before we go through
407 * the completed ones. 407 * the completed ones.
408 */ 408 */
409 if (!list_empty(&atchan->queue)) 409 if (!list_empty(&atchan->queue))
410 atc_dostart(atchan, atc_first_queued(atchan)); 410 atc_dostart(atchan, atc_first_queued(atchan));
411 /* empty active_list now it is completed */ 411 /* empty active_list now it is completed */
412 list_splice_init(&atchan->active_list, &list); 412 list_splice_init(&atchan->active_list, &list);
413 /* empty queue list by moving descriptors (if any) to active_list */ 413 /* empty queue list by moving descriptors (if any) to active_list */
414 list_splice_init(&atchan->queue, &atchan->active_list); 414 list_splice_init(&atchan->queue, &atchan->active_list);
415 415
416 list_for_each_entry_safe(desc, _desc, &list, desc_node) 416 list_for_each_entry_safe(desc, _desc, &list, desc_node)
417 atc_chain_complete(atchan, desc); 417 atc_chain_complete(atchan, desc);
418 } 418 }
419 419
420 /** 420 /**
421 * atc_advance_work - at the end of a transaction, move forward 421 * atc_advance_work - at the end of a transaction, move forward
422 * @atchan: channel where the transaction ended 422 * @atchan: channel where the transaction ended
423 * 423 *
424 * Called with atchan->lock held and bh disabled 424 * Called with atchan->lock held and bh disabled
425 */ 425 */
426 static void atc_advance_work(struct at_dma_chan *atchan) 426 static void atc_advance_work(struct at_dma_chan *atchan)
427 { 427 {
428 dev_vdbg(chan2dev(&atchan->chan_common), "advance_work\n"); 428 dev_vdbg(chan2dev(&atchan->chan_common), "advance_work\n");
429 429
430 if (atc_chan_is_enabled(atchan)) 430 if (atc_chan_is_enabled(atchan))
431 return; 431 return;
432 432
433 if (list_empty(&atchan->active_list) || 433 if (list_empty(&atchan->active_list) ||
434 list_is_singular(&atchan->active_list)) { 434 list_is_singular(&atchan->active_list)) {
435 atc_complete_all(atchan); 435 atc_complete_all(atchan);
436 } else { 436 } else {
437 atc_chain_complete(atchan, atc_first_active(atchan)); 437 atc_chain_complete(atchan, atc_first_active(atchan));
438 /* advance work */ 438 /* advance work */
439 atc_dostart(atchan, atc_first_active(atchan)); 439 atc_dostart(atchan, atc_first_active(atchan));
440 } 440 }
441 } 441 }
442 442
443 443
444 /** 444 /**
445 * atc_handle_error - handle errors reported by DMA controller 445 * atc_handle_error - handle errors reported by DMA controller
446 * @atchan: channel where error occurs 446 * @atchan: channel where error occurs
447 * 447 *
448 * Called with atchan->lock held and bh disabled 448 * Called with atchan->lock held and bh disabled
449 */ 449 */
450 static void atc_handle_error(struct at_dma_chan *atchan) 450 static void atc_handle_error(struct at_dma_chan *atchan)
451 { 451 {
452 struct at_desc *bad_desc; 452 struct at_desc *bad_desc;
453 struct at_desc *child; 453 struct at_desc *child;
454 454
455 /* 455 /*
456 * The descriptor currently at the head of the active list is 456 * The descriptor currently at the head of the active list is
457 * broked. Since we don't have any way to report errors, we'll 457 * broked. Since we don't have any way to report errors, we'll
458 * just have to scream loudly and try to carry on. 458 * just have to scream loudly and try to carry on.
459 */ 459 */
460 bad_desc = atc_first_active(atchan); 460 bad_desc = atc_first_active(atchan);
461 list_del_init(&bad_desc->desc_node); 461 list_del_init(&bad_desc->desc_node);
462 462
463 /* As we are stopped, take advantage to push queued descriptors 463 /* As we are stopped, take advantage to push queued descriptors
464 * in active_list */ 464 * in active_list */
465 list_splice_init(&atchan->queue, atchan->active_list.prev); 465 list_splice_init(&atchan->queue, atchan->active_list.prev);
466 466
467 /* Try to restart the controller */ 467 /* Try to restart the controller */
468 if (!list_empty(&atchan->active_list)) 468 if (!list_empty(&atchan->active_list))
469 atc_dostart(atchan, atc_first_active(atchan)); 469 atc_dostart(atchan, atc_first_active(atchan));
470 470
471 /* 471 /*
472 * KERN_CRITICAL may seem harsh, but since this only happens 472 * KERN_CRITICAL may seem harsh, but since this only happens
473 * when someone submits a bad physical address in a 473 * when someone submits a bad physical address in a
474 * descriptor, we should consider ourselves lucky that the 474 * descriptor, we should consider ourselves lucky that the
475 * controller flagged an error instead of scribbling over 475 * controller flagged an error instead of scribbling over
476 * random memory locations. 476 * random memory locations.
477 */ 477 */
478 dev_crit(chan2dev(&atchan->chan_common), 478 dev_crit(chan2dev(&atchan->chan_common),
479 "Bad descriptor submitted for DMA!\n"); 479 "Bad descriptor submitted for DMA!\n");
480 dev_crit(chan2dev(&atchan->chan_common), 480 dev_crit(chan2dev(&atchan->chan_common),
481 " cookie: %d\n", bad_desc->txd.cookie); 481 " cookie: %d\n", bad_desc->txd.cookie);
482 atc_dump_lli(atchan, &bad_desc->lli); 482 atc_dump_lli(atchan, &bad_desc->lli);
483 list_for_each_entry(child, &bad_desc->tx_list, desc_node) 483 list_for_each_entry(child, &bad_desc->tx_list, desc_node)
484 atc_dump_lli(atchan, &child->lli); 484 atc_dump_lli(atchan, &child->lli);
485 485
486 /* Pretend the descriptor completed successfully */ 486 /* Pretend the descriptor completed successfully */
487 atc_chain_complete(atchan, bad_desc); 487 atc_chain_complete(atchan, bad_desc);
488 } 488 }
489 489
490 /** 490 /**
491 * atc_handle_cyclic - at the end of a period, run callback function 491 * atc_handle_cyclic - at the end of a period, run callback function
492 * @atchan: channel used for cyclic operations 492 * @atchan: channel used for cyclic operations
493 * 493 *
494 * Called with atchan->lock held and bh disabled 494 * Called with atchan->lock held and bh disabled
495 */ 495 */
496 static void atc_handle_cyclic(struct at_dma_chan *atchan) 496 static void atc_handle_cyclic(struct at_dma_chan *atchan)
497 { 497 {
498 struct at_desc *first = atc_first_active(atchan); 498 struct at_desc *first = atc_first_active(atchan);
499 struct dma_async_tx_descriptor *txd = &first->txd; 499 struct dma_async_tx_descriptor *txd = &first->txd;
500 dma_async_tx_callback callback = txd->callback; 500 dma_async_tx_callback callback = txd->callback;
501 void *param = txd->callback_param; 501 void *param = txd->callback_param;
502 502
503 dev_vdbg(chan2dev(&atchan->chan_common), 503 dev_vdbg(chan2dev(&atchan->chan_common),
504 "new cyclic period llp 0x%08x\n", 504 "new cyclic period llp 0x%08x\n",
505 channel_readl(atchan, DSCR)); 505 channel_readl(atchan, DSCR));
506 506
507 if (callback) 507 if (callback)
508 callback(param); 508 callback(param);
509 } 509 }
510 510
511 /*-- IRQ & Tasklet ---------------------------------------------------*/ 511 /*-- IRQ & Tasklet ---------------------------------------------------*/
512 512
513 static void atc_tasklet(unsigned long data) 513 static void atc_tasklet(unsigned long data)
514 { 514 {
515 struct at_dma_chan *atchan = (struct at_dma_chan *)data; 515 struct at_dma_chan *atchan = (struct at_dma_chan *)data;
516 unsigned long flags; 516 unsigned long flags;
517 517
518 spin_lock_irqsave(&atchan->lock, flags); 518 spin_lock_irqsave(&atchan->lock, flags);
519 if (test_and_clear_bit(ATC_IS_ERROR, &atchan->status)) 519 if (test_and_clear_bit(ATC_IS_ERROR, &atchan->status))
520 atc_handle_error(atchan); 520 atc_handle_error(atchan);
521 else if (atc_chan_is_cyclic(atchan)) 521 else if (atc_chan_is_cyclic(atchan))
522 atc_handle_cyclic(atchan); 522 atc_handle_cyclic(atchan);
523 else 523 else
524 atc_advance_work(atchan); 524 atc_advance_work(atchan);
525 525
526 spin_unlock_irqrestore(&atchan->lock, flags); 526 spin_unlock_irqrestore(&atchan->lock, flags);
527 } 527 }
528 528
529 static irqreturn_t at_dma_interrupt(int irq, void *dev_id) 529 static irqreturn_t at_dma_interrupt(int irq, void *dev_id)
530 { 530 {
531 struct at_dma *atdma = (struct at_dma *)dev_id; 531 struct at_dma *atdma = (struct at_dma *)dev_id;
532 struct at_dma_chan *atchan; 532 struct at_dma_chan *atchan;
533 int i; 533 int i;
534 u32 status, pending, imr; 534 u32 status, pending, imr;
535 int ret = IRQ_NONE; 535 int ret = IRQ_NONE;
536 536
537 do { 537 do {
538 imr = dma_readl(atdma, EBCIMR); 538 imr = dma_readl(atdma, EBCIMR);
539 status = dma_readl(atdma, EBCISR); 539 status = dma_readl(atdma, EBCISR);
540 pending = status & imr; 540 pending = status & imr;
541 541
542 if (!pending) 542 if (!pending)
543 break; 543 break;
544 544
545 dev_vdbg(atdma->dma_common.dev, 545 dev_vdbg(atdma->dma_common.dev,
546 "interrupt: status = 0x%08x, 0x%08x, 0x%08x\n", 546 "interrupt: status = 0x%08x, 0x%08x, 0x%08x\n",
547 status, imr, pending); 547 status, imr, pending);
548 548
549 for (i = 0; i < atdma->dma_common.chancnt; i++) { 549 for (i = 0; i < atdma->dma_common.chancnt; i++) {
550 atchan = &atdma->chan[i]; 550 atchan = &atdma->chan[i];
551 if (pending & (AT_DMA_BTC(i) | AT_DMA_ERR(i))) { 551 if (pending & (AT_DMA_BTC(i) | AT_DMA_ERR(i))) {
552 if (pending & AT_DMA_ERR(i)) { 552 if (pending & AT_DMA_ERR(i)) {
553 /* Disable channel on AHB error */ 553 /* Disable channel on AHB error */
554 dma_writel(atdma, CHDR, 554 dma_writel(atdma, CHDR,
555 AT_DMA_RES(i) | atchan->mask); 555 AT_DMA_RES(i) | atchan->mask);
556 /* Give information to tasklet */ 556 /* Give information to tasklet */
557 set_bit(ATC_IS_ERROR, &atchan->status); 557 set_bit(ATC_IS_ERROR, &atchan->status);
558 } 558 }
559 if (pending & AT_DMA_BTC(i)) 559 if (pending & AT_DMA_BTC(i))
560 set_bit(ATC_IS_BTC, &atchan->status); 560 set_bit(ATC_IS_BTC, &atchan->status);
561 tasklet_schedule(&atchan->tasklet); 561 tasklet_schedule(&atchan->tasklet);
562 ret = IRQ_HANDLED; 562 ret = IRQ_HANDLED;
563 } 563 }
564 } 564 }
565 565
566 } while (pending); 566 } while (pending);
567 567
568 return ret; 568 return ret;
569 } 569 }
570 570
571 571
572 /*-- DMA Engine API --------------------------------------------------*/ 572 /*-- DMA Engine API --------------------------------------------------*/
573 573
574 /** 574 /**
575 * atc_tx_submit - set the prepared descriptor(s) to be executed by the engine 575 * atc_tx_submit - set the prepared descriptor(s) to be executed by the engine
576 * @desc: descriptor at the head of the transaction chain 576 * @desc: descriptor at the head of the transaction chain
577 * 577 *
578 * Queue chain if DMA engine is working already 578 * Queue chain if DMA engine is working already
579 * 579 *
580 * Cookie increment and adding to active_list or queue must be atomic 580 * Cookie increment and adding to active_list or queue must be atomic
581 */ 581 */
582 static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx) 582 static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx)
583 { 583 {
584 struct at_desc *desc = txd_to_at_desc(tx); 584 struct at_desc *desc = txd_to_at_desc(tx);
585 struct at_dma_chan *atchan = to_at_dma_chan(tx->chan); 585 struct at_dma_chan *atchan = to_at_dma_chan(tx->chan);
586 dma_cookie_t cookie; 586 dma_cookie_t cookie;
587 unsigned long flags; 587 unsigned long flags;
588 588
589 spin_lock_irqsave(&atchan->lock, flags); 589 spin_lock_irqsave(&atchan->lock, flags);
590 cookie = dma_cookie_assign(tx); 590 cookie = dma_cookie_assign(tx);
591 591
592 if (list_empty(&atchan->active_list)) { 592 if (list_empty(&atchan->active_list)) {
593 dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n", 593 dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n",
594 desc->txd.cookie); 594 desc->txd.cookie);
595 atc_dostart(atchan, desc); 595 atc_dostart(atchan, desc);
596 list_add_tail(&desc->desc_node, &atchan->active_list); 596 list_add_tail(&desc->desc_node, &atchan->active_list);
597 } else { 597 } else {
598 dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n", 598 dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n",
599 desc->txd.cookie); 599 desc->txd.cookie);
600 list_add_tail(&desc->desc_node, &atchan->queue); 600 list_add_tail(&desc->desc_node, &atchan->queue);
601 } 601 }
602 602
603 spin_unlock_irqrestore(&atchan->lock, flags); 603 spin_unlock_irqrestore(&atchan->lock, flags);
604 604
605 return cookie; 605 return cookie;
606 } 606 }
607 607
608 /** 608 /**
609 * atc_prep_dma_memcpy - prepare a memcpy operation 609 * atc_prep_dma_memcpy - prepare a memcpy operation
610 * @chan: the channel to prepare operation on 610 * @chan: the channel to prepare operation on
611 * @dest: operation virtual destination address 611 * @dest: operation virtual destination address
612 * @src: operation virtual source address 612 * @src: operation virtual source address
613 * @len: operation length 613 * @len: operation length
614 * @flags: tx descriptor status flags 614 * @flags: tx descriptor status flags
615 */ 615 */
616 static struct dma_async_tx_descriptor * 616 static struct dma_async_tx_descriptor *
617 atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, 617 atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
618 size_t len, unsigned long flags) 618 size_t len, unsigned long flags)
619 { 619 {
620 struct at_dma_chan *atchan = to_at_dma_chan(chan); 620 struct at_dma_chan *atchan = to_at_dma_chan(chan);
621 struct at_desc *desc = NULL; 621 struct at_desc *desc = NULL;
622 struct at_desc *first = NULL; 622 struct at_desc *first = NULL;
623 struct at_desc *prev = NULL; 623 struct at_desc *prev = NULL;
624 size_t xfer_count; 624 size_t xfer_count;
625 size_t offset; 625 size_t offset;
626 unsigned int src_width; 626 unsigned int src_width;
627 unsigned int dst_width; 627 unsigned int dst_width;
628 u32 ctrla; 628 u32 ctrla;
629 u32 ctrlb; 629 u32 ctrlb;
630 630
631 dev_vdbg(chan2dev(chan), "prep_dma_memcpy: d0x%x s0x%x l0x%zx f0x%lx\n", 631 dev_vdbg(chan2dev(chan), "prep_dma_memcpy: d0x%x s0x%x l0x%zx f0x%lx\n",
632 dest, src, len, flags); 632 dest, src, len, flags);
633 633
634 if (unlikely(!len)) { 634 if (unlikely(!len)) {
635 dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n"); 635 dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n");
636 return NULL; 636 return NULL;
637 } 637 }
638 638
639 ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN 639 ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN
640 | ATC_SRC_ADDR_MODE_INCR 640 | ATC_SRC_ADDR_MODE_INCR
641 | ATC_DST_ADDR_MODE_INCR 641 | ATC_DST_ADDR_MODE_INCR
642 | ATC_FC_MEM2MEM; 642 | ATC_FC_MEM2MEM;
643 643
644 /* 644 /*
645 * We can be a lot more clever here, but this should take care 645 * We can be a lot more clever here, but this should take care
646 * of the most common optimization. 646 * of the most common optimization.
647 */ 647 */
648 if (!((src | dest | len) & 3)) { 648 if (!((src | dest | len) & 3)) {
649 ctrla = ATC_SRC_WIDTH_WORD | ATC_DST_WIDTH_WORD; 649 ctrla = ATC_SRC_WIDTH_WORD | ATC_DST_WIDTH_WORD;
650 src_width = dst_width = 2; 650 src_width = dst_width = 2;
651 } else if (!((src | dest | len) & 1)) { 651 } else if (!((src | dest | len) & 1)) {
652 ctrla = ATC_SRC_WIDTH_HALFWORD | ATC_DST_WIDTH_HALFWORD; 652 ctrla = ATC_SRC_WIDTH_HALFWORD | ATC_DST_WIDTH_HALFWORD;
653 src_width = dst_width = 1; 653 src_width = dst_width = 1;
654 } else { 654 } else {
655 ctrla = ATC_SRC_WIDTH_BYTE | ATC_DST_WIDTH_BYTE; 655 ctrla = ATC_SRC_WIDTH_BYTE | ATC_DST_WIDTH_BYTE;
656 src_width = dst_width = 0; 656 src_width = dst_width = 0;
657 } 657 }
658 658
659 for (offset = 0; offset < len; offset += xfer_count << src_width) { 659 for (offset = 0; offset < len; offset += xfer_count << src_width) {
660 xfer_count = min_t(size_t, (len - offset) >> src_width, 660 xfer_count = min_t(size_t, (len - offset) >> src_width,
661 ATC_BTSIZE_MAX); 661 ATC_BTSIZE_MAX);
662 662
663 desc = atc_desc_get(atchan); 663 desc = atc_desc_get(atchan);
664 if (!desc) 664 if (!desc)
665 goto err_desc_get; 665 goto err_desc_get;
666 666
667 desc->lli.saddr = src + offset; 667 desc->lli.saddr = src + offset;
668 desc->lli.daddr = dest + offset; 668 desc->lli.daddr = dest + offset;
669 desc->lli.ctrla = ctrla | xfer_count; 669 desc->lli.ctrla = ctrla | xfer_count;
670 desc->lli.ctrlb = ctrlb; 670 desc->lli.ctrlb = ctrlb;
671 671
672 desc->txd.cookie = 0; 672 desc->txd.cookie = 0;
673 673
674 atc_desc_chain(&first, &prev, desc); 674 atc_desc_chain(&first, &prev, desc);
675 } 675 }
676 676
677 /* First descriptor of the chain embedds additional information */ 677 /* First descriptor of the chain embedds additional information */
678 first->txd.cookie = -EBUSY; 678 first->txd.cookie = -EBUSY;
679 first->len = len; 679 first->len = len;
680 first->tx_width = src_width; 680 first->tx_width = src_width;
681 681
682 /* set end-of-link to the last link descriptor of list*/ 682 /* set end-of-link to the last link descriptor of list*/
683 set_desc_eol(desc); 683 set_desc_eol(desc);
684 684
685 first->txd.flags = flags; /* client is in control of this ack */ 685 first->txd.flags = flags; /* client is in control of this ack */
686 686
687 return &first->txd; 687 return &first->txd;
688 688
689 err_desc_get: 689 err_desc_get:
690 atc_desc_put(atchan, first); 690 atc_desc_put(atchan, first);
691 return NULL; 691 return NULL;
692 } 692 }
693 693
694 694
695 /** 695 /**
696 * atc_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction 696 * atc_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
697 * @chan: DMA channel 697 * @chan: DMA channel
698 * @sgl: scatterlist to transfer to/from 698 * @sgl: scatterlist to transfer to/from
699 * @sg_len: number of entries in @scatterlist 699 * @sg_len: number of entries in @scatterlist
700 * @direction: DMA direction 700 * @direction: DMA direction
701 * @flags: tx descriptor status flags 701 * @flags: tx descriptor status flags
702 * @context: transaction context (ignored) 702 * @context: transaction context (ignored)
703 */ 703 */
704 static struct dma_async_tx_descriptor * 704 static struct dma_async_tx_descriptor *
705 atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, 705 atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
706 unsigned int sg_len, enum dma_transfer_direction direction, 706 unsigned int sg_len, enum dma_transfer_direction direction,
707 unsigned long flags, void *context) 707 unsigned long flags, void *context)
708 { 708 {
709 struct at_dma_chan *atchan = to_at_dma_chan(chan); 709 struct at_dma_chan *atchan = to_at_dma_chan(chan);
710 struct at_dma_slave *atslave = chan->private; 710 struct at_dma_slave *atslave = chan->private;
711 struct dma_slave_config *sconfig = &atchan->dma_sconfig; 711 struct dma_slave_config *sconfig = &atchan->dma_sconfig;
712 struct at_desc *first = NULL; 712 struct at_desc *first = NULL;
713 struct at_desc *prev = NULL; 713 struct at_desc *prev = NULL;
714 u32 ctrla; 714 u32 ctrla;
715 u32 ctrlb; 715 u32 ctrlb;
716 dma_addr_t reg; 716 dma_addr_t reg;
717 unsigned int reg_width; 717 unsigned int reg_width;
718 unsigned int mem_width; 718 unsigned int mem_width;
719 unsigned int i; 719 unsigned int i;
720 struct scatterlist *sg; 720 struct scatterlist *sg;
721 size_t total_len = 0; 721 size_t total_len = 0;
722 722
723 dev_vdbg(chan2dev(chan), "prep_slave_sg (%d): %s f0x%lx\n", 723 dev_vdbg(chan2dev(chan), "prep_slave_sg (%d): %s f0x%lx\n",
724 sg_len, 724 sg_len,
725 direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE", 725 direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE",
726 flags); 726 flags);
727 727
728 if (unlikely(!atslave || !sg_len)) { 728 if (unlikely(!atslave || !sg_len)) {
729 dev_dbg(chan2dev(chan), "prep_slave_sg: sg length is zero!\n"); 729 dev_dbg(chan2dev(chan), "prep_slave_sg: sg length is zero!\n");
730 return NULL; 730 return NULL;
731 } 731 }
732 732
733 ctrla = ATC_SCSIZE(sconfig->src_maxburst) 733 ctrla = ATC_SCSIZE(sconfig->src_maxburst)
734 | ATC_DCSIZE(sconfig->dst_maxburst); 734 | ATC_DCSIZE(sconfig->dst_maxburst);
735 ctrlb = ATC_IEN; 735 ctrlb = ATC_IEN;
736 736
737 switch (direction) { 737 switch (direction) {
738 case DMA_MEM_TO_DEV: 738 case DMA_MEM_TO_DEV:
739 reg_width = convert_buswidth(sconfig->dst_addr_width); 739 reg_width = convert_buswidth(sconfig->dst_addr_width);
740 ctrla |= ATC_DST_WIDTH(reg_width); 740 ctrla |= ATC_DST_WIDTH(reg_width);
741 ctrlb |= ATC_DST_ADDR_MODE_FIXED 741 ctrlb |= ATC_DST_ADDR_MODE_FIXED
742 | ATC_SRC_ADDR_MODE_INCR 742 | ATC_SRC_ADDR_MODE_INCR
743 | ATC_FC_MEM2PER 743 | ATC_FC_MEM2PER
744 | ATC_SIF(atchan->mem_if) | ATC_DIF(atchan->per_if); 744 | ATC_SIF(atchan->mem_if) | ATC_DIF(atchan->per_if);
745 reg = sconfig->dst_addr; 745 reg = sconfig->dst_addr;
746 for_each_sg(sgl, sg, sg_len, i) { 746 for_each_sg(sgl, sg, sg_len, i) {
747 struct at_desc *desc; 747 struct at_desc *desc;
748 u32 len; 748 u32 len;
749 u32 mem; 749 u32 mem;
750 750
751 desc = atc_desc_get(atchan); 751 desc = atc_desc_get(atchan);
752 if (!desc) 752 if (!desc)
753 goto err_desc_get; 753 goto err_desc_get;
754 754
755 mem = sg_dma_address(sg); 755 mem = sg_dma_address(sg);
756 len = sg_dma_len(sg); 756 len = sg_dma_len(sg);
757 if (unlikely(!len)) { 757 if (unlikely(!len)) {
758 dev_dbg(chan2dev(chan), 758 dev_dbg(chan2dev(chan),
759 "prep_slave_sg: sg(%d) data length is zero\n", i); 759 "prep_slave_sg: sg(%d) data length is zero\n", i);
760 goto err; 760 goto err;
761 } 761 }
762 mem_width = 2; 762 mem_width = 2;
763 if (unlikely(mem & 3 || len & 3)) 763 if (unlikely(mem & 3 || len & 3))
764 mem_width = 0; 764 mem_width = 0;
765 765
766 desc->lli.saddr = mem; 766 desc->lli.saddr = mem;
767 desc->lli.daddr = reg; 767 desc->lli.daddr = reg;
768 desc->lli.ctrla = ctrla 768 desc->lli.ctrla = ctrla
769 | ATC_SRC_WIDTH(mem_width) 769 | ATC_SRC_WIDTH(mem_width)
770 | len >> mem_width; 770 | len >> mem_width;
771 desc->lli.ctrlb = ctrlb; 771 desc->lli.ctrlb = ctrlb;
772 772
773 atc_desc_chain(&first, &prev, desc); 773 atc_desc_chain(&first, &prev, desc);
774 total_len += len; 774 total_len += len;
775 } 775 }
776 break; 776 break;
777 case DMA_DEV_TO_MEM: 777 case DMA_DEV_TO_MEM:
778 reg_width = convert_buswidth(sconfig->src_addr_width); 778 reg_width = convert_buswidth(sconfig->src_addr_width);
779 ctrla |= ATC_SRC_WIDTH(reg_width); 779 ctrla |= ATC_SRC_WIDTH(reg_width);
780 ctrlb |= ATC_DST_ADDR_MODE_INCR 780 ctrlb |= ATC_DST_ADDR_MODE_INCR
781 | ATC_SRC_ADDR_MODE_FIXED 781 | ATC_SRC_ADDR_MODE_FIXED
782 | ATC_FC_PER2MEM 782 | ATC_FC_PER2MEM
783 | ATC_SIF(atchan->per_if) | ATC_DIF(atchan->mem_if); 783 | ATC_SIF(atchan->per_if) | ATC_DIF(atchan->mem_if);
784 784
785 reg = sconfig->src_addr; 785 reg = sconfig->src_addr;
786 for_each_sg(sgl, sg, sg_len, i) { 786 for_each_sg(sgl, sg, sg_len, i) {
787 struct at_desc *desc; 787 struct at_desc *desc;
788 u32 len; 788 u32 len;
789 u32 mem; 789 u32 mem;
790 790
791 desc = atc_desc_get(atchan); 791 desc = atc_desc_get(atchan);
792 if (!desc) 792 if (!desc)
793 goto err_desc_get; 793 goto err_desc_get;
794 794
795 mem = sg_dma_address(sg); 795 mem = sg_dma_address(sg);
796 len = sg_dma_len(sg); 796 len = sg_dma_len(sg);
797 if (unlikely(!len)) { 797 if (unlikely(!len)) {
798 dev_dbg(chan2dev(chan), 798 dev_dbg(chan2dev(chan),
799 "prep_slave_sg: sg(%d) data length is zero\n", i); 799 "prep_slave_sg: sg(%d) data length is zero\n", i);
800 goto err; 800 goto err;
801 } 801 }
802 mem_width = 2; 802 mem_width = 2;
803 if (unlikely(mem & 3 || len & 3)) 803 if (unlikely(mem & 3 || len & 3))
804 mem_width = 0; 804 mem_width = 0;
805 805
806 desc->lli.saddr = reg; 806 desc->lli.saddr = reg;
807 desc->lli.daddr = mem; 807 desc->lli.daddr = mem;
808 desc->lli.ctrla = ctrla 808 desc->lli.ctrla = ctrla
809 | ATC_DST_WIDTH(mem_width) 809 | ATC_DST_WIDTH(mem_width)
810 | len >> reg_width; 810 | len >> reg_width;
811 desc->lli.ctrlb = ctrlb; 811 desc->lli.ctrlb = ctrlb;
812 812
813 atc_desc_chain(&first, &prev, desc); 813 atc_desc_chain(&first, &prev, desc);
814 total_len += len; 814 total_len += len;
815 } 815 }
816 break; 816 break;
817 default: 817 default:
818 return NULL; 818 return NULL;
819 } 819 }
820 820
821 /* set end-of-link to the last link descriptor of list*/ 821 /* set end-of-link to the last link descriptor of list*/
822 set_desc_eol(prev); 822 set_desc_eol(prev);
823 823
824 /* First descriptor of the chain embedds additional information */ 824 /* First descriptor of the chain embedds additional information */
825 first->txd.cookie = -EBUSY; 825 first->txd.cookie = -EBUSY;
826 first->len = total_len; 826 first->len = total_len;
827 first->tx_width = reg_width; 827 first->tx_width = reg_width;
828 828
829 /* first link descriptor of list is responsible of flags */ 829 /* first link descriptor of list is responsible of flags */
830 first->txd.flags = flags; /* client is in control of this ack */ 830 first->txd.flags = flags; /* client is in control of this ack */
831 831
832 return &first->txd; 832 return &first->txd;
833 833
834 err_desc_get: 834 err_desc_get:
835 dev_err(chan2dev(chan), "not enough descriptors available\n"); 835 dev_err(chan2dev(chan), "not enough descriptors available\n");
836 err: 836 err:
837 atc_desc_put(atchan, first); 837 atc_desc_put(atchan, first);
838 return NULL; 838 return NULL;
839 } 839 }
840 840
841 /** 841 /**
842 * atc_dma_cyclic_check_values 842 * atc_dma_cyclic_check_values
843 * Check for too big/unaligned periods and unaligned DMA buffer 843 * Check for too big/unaligned periods and unaligned DMA buffer
844 */ 844 */
845 static int 845 static int
846 atc_dma_cyclic_check_values(unsigned int reg_width, dma_addr_t buf_addr, 846 atc_dma_cyclic_check_values(unsigned int reg_width, dma_addr_t buf_addr,
847 size_t period_len) 847 size_t period_len)
848 { 848 {
849 if (period_len > (ATC_BTSIZE_MAX << reg_width)) 849 if (period_len > (ATC_BTSIZE_MAX << reg_width))
850 goto err_out; 850 goto err_out;
851 if (unlikely(period_len & ((1 << reg_width) - 1))) 851 if (unlikely(period_len & ((1 << reg_width) - 1)))
852 goto err_out; 852 goto err_out;
853 if (unlikely(buf_addr & ((1 << reg_width) - 1))) 853 if (unlikely(buf_addr & ((1 << reg_width) - 1)))
854 goto err_out; 854 goto err_out;
855 855
856 return 0; 856 return 0;
857 857
858 err_out: 858 err_out:
859 return -EINVAL; 859 return -EINVAL;
860 } 860 }
861 861
862 /** 862 /**
863 * atc_dma_cyclic_fill_desc - Fill one period descriptor 863 * atc_dma_cyclic_fill_desc - Fill one period descriptor
864 */ 864 */
865 static int 865 static int
866 atc_dma_cyclic_fill_desc(struct dma_chan *chan, struct at_desc *desc, 866 atc_dma_cyclic_fill_desc(struct dma_chan *chan, struct at_desc *desc,
867 unsigned int period_index, dma_addr_t buf_addr, 867 unsigned int period_index, dma_addr_t buf_addr,
868 unsigned int reg_width, size_t period_len, 868 unsigned int reg_width, size_t period_len,
869 enum dma_transfer_direction direction) 869 enum dma_transfer_direction direction)
870 { 870 {
871 struct at_dma_chan *atchan = to_at_dma_chan(chan); 871 struct at_dma_chan *atchan = to_at_dma_chan(chan);
872 struct dma_slave_config *sconfig = &atchan->dma_sconfig; 872 struct dma_slave_config *sconfig = &atchan->dma_sconfig;
873 u32 ctrla; 873 u32 ctrla;
874 874
875 /* prepare common CRTLA value */ 875 /* prepare common CRTLA value */
876 ctrla = ATC_SCSIZE(sconfig->src_maxburst) 876 ctrla = ATC_SCSIZE(sconfig->src_maxburst)
877 | ATC_DCSIZE(sconfig->dst_maxburst) 877 | ATC_DCSIZE(sconfig->dst_maxburst)
878 | ATC_DST_WIDTH(reg_width) 878 | ATC_DST_WIDTH(reg_width)
879 | ATC_SRC_WIDTH(reg_width) 879 | ATC_SRC_WIDTH(reg_width)
880 | period_len >> reg_width; 880 | period_len >> reg_width;
881 881
882 switch (direction) { 882 switch (direction) {
883 case DMA_MEM_TO_DEV: 883 case DMA_MEM_TO_DEV:
884 desc->lli.saddr = buf_addr + (period_len * period_index); 884 desc->lli.saddr = buf_addr + (period_len * period_index);
885 desc->lli.daddr = sconfig->dst_addr; 885 desc->lli.daddr = sconfig->dst_addr;
886 desc->lli.ctrla = ctrla; 886 desc->lli.ctrla = ctrla;
887 desc->lli.ctrlb = ATC_DST_ADDR_MODE_FIXED 887 desc->lli.ctrlb = ATC_DST_ADDR_MODE_FIXED
888 | ATC_SRC_ADDR_MODE_INCR 888 | ATC_SRC_ADDR_MODE_INCR
889 | ATC_FC_MEM2PER 889 | ATC_FC_MEM2PER
890 | ATC_SIF(atchan->mem_if) 890 | ATC_SIF(atchan->mem_if)
891 | ATC_DIF(atchan->per_if); 891 | ATC_DIF(atchan->per_if);
892 break; 892 break;
893 893
894 case DMA_DEV_TO_MEM: 894 case DMA_DEV_TO_MEM:
895 desc->lli.saddr = sconfig->src_addr; 895 desc->lli.saddr = sconfig->src_addr;
896 desc->lli.daddr = buf_addr + (period_len * period_index); 896 desc->lli.daddr = buf_addr + (period_len * period_index);
897 desc->lli.ctrla = ctrla; 897 desc->lli.ctrla = ctrla;
898 desc->lli.ctrlb = ATC_DST_ADDR_MODE_INCR 898 desc->lli.ctrlb = ATC_DST_ADDR_MODE_INCR
899 | ATC_SRC_ADDR_MODE_FIXED 899 | ATC_SRC_ADDR_MODE_FIXED
900 | ATC_FC_PER2MEM 900 | ATC_FC_PER2MEM
901 | ATC_SIF(atchan->per_if) 901 | ATC_SIF(atchan->per_if)
902 | ATC_DIF(atchan->mem_if); 902 | ATC_DIF(atchan->mem_if);
903 break; 903 break;
904 904
905 default: 905 default:
906 return -EINVAL; 906 return -EINVAL;
907 } 907 }
908 908
909 return 0; 909 return 0;
910 } 910 }
911 911
912 /** 912 /**
913 * atc_prep_dma_cyclic - prepare the cyclic DMA transfer 913 * atc_prep_dma_cyclic - prepare the cyclic DMA transfer
914 * @chan: the DMA channel to prepare 914 * @chan: the DMA channel to prepare
915 * @buf_addr: physical DMA address where the buffer starts 915 * @buf_addr: physical DMA address where the buffer starts
916 * @buf_len: total number of bytes for the entire buffer 916 * @buf_len: total number of bytes for the entire buffer
917 * @period_len: number of bytes for each period 917 * @period_len: number of bytes for each period
918 * @direction: transfer direction, to or from device 918 * @direction: transfer direction, to or from device
919 * @flags: tx descriptor status flags 919 * @flags: tx descriptor status flags
920 * @context: transfer context (ignored) 920 * @context: transfer context (ignored)
921 */ 921 */
922 static struct dma_async_tx_descriptor * 922 static struct dma_async_tx_descriptor *
923 atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, 923 atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
924 size_t period_len, enum dma_transfer_direction direction, 924 size_t period_len, enum dma_transfer_direction direction,
925 unsigned long flags, void *context) 925 unsigned long flags, void *context)
926 { 926 {
927 struct at_dma_chan *atchan = to_at_dma_chan(chan); 927 struct at_dma_chan *atchan = to_at_dma_chan(chan);
928 struct at_dma_slave *atslave = chan->private; 928 struct at_dma_slave *atslave = chan->private;
929 struct dma_slave_config *sconfig = &atchan->dma_sconfig; 929 struct dma_slave_config *sconfig = &atchan->dma_sconfig;
930 struct at_desc *first = NULL; 930 struct at_desc *first = NULL;
931 struct at_desc *prev = NULL; 931 struct at_desc *prev = NULL;
932 unsigned long was_cyclic; 932 unsigned long was_cyclic;
933 unsigned int reg_width; 933 unsigned int reg_width;
934 unsigned int periods = buf_len / period_len; 934 unsigned int periods = buf_len / period_len;
935 unsigned int i; 935 unsigned int i;
936 936
937 dev_vdbg(chan2dev(chan), "prep_dma_cyclic: %s buf@0x%08x - %d (%d/%d)\n", 937 dev_vdbg(chan2dev(chan), "prep_dma_cyclic: %s buf@0x%08x - %d (%d/%d)\n",
938 direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE", 938 direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE",
939 buf_addr, 939 buf_addr,
940 periods, buf_len, period_len); 940 periods, buf_len, period_len);
941 941
942 if (unlikely(!atslave || !buf_len || !period_len)) { 942 if (unlikely(!atslave || !buf_len || !period_len)) {
943 dev_dbg(chan2dev(chan), "prep_dma_cyclic: length is zero!\n"); 943 dev_dbg(chan2dev(chan), "prep_dma_cyclic: length is zero!\n");
944 return NULL; 944 return NULL;
945 } 945 }
946 946
947 was_cyclic = test_and_set_bit(ATC_IS_CYCLIC, &atchan->status); 947 was_cyclic = test_and_set_bit(ATC_IS_CYCLIC, &atchan->status);
948 if (was_cyclic) { 948 if (was_cyclic) {
949 dev_dbg(chan2dev(chan), "prep_dma_cyclic: channel in use!\n"); 949 dev_dbg(chan2dev(chan), "prep_dma_cyclic: channel in use!\n");
950 return NULL; 950 return NULL;
951 } 951 }
952 952
953 if (unlikely(!is_slave_direction(direction))) 953 if (unlikely(!is_slave_direction(direction)))
954 goto err_out; 954 goto err_out;
955 955
956 if (sconfig->direction == DMA_MEM_TO_DEV) 956 if (sconfig->direction == DMA_MEM_TO_DEV)
957 reg_width = convert_buswidth(sconfig->dst_addr_width); 957 reg_width = convert_buswidth(sconfig->dst_addr_width);
958 else 958 else
959 reg_width = convert_buswidth(sconfig->src_addr_width); 959 reg_width = convert_buswidth(sconfig->src_addr_width);
960 960
961 /* Check for too big/unaligned periods and unaligned DMA buffer */ 961 /* Check for too big/unaligned periods and unaligned DMA buffer */
962 if (atc_dma_cyclic_check_values(reg_width, buf_addr, period_len)) 962 if (atc_dma_cyclic_check_values(reg_width, buf_addr, period_len))
963 goto err_out; 963 goto err_out;
964 964
965 /* build cyclic linked list */ 965 /* build cyclic linked list */
966 for (i = 0; i < periods; i++) { 966 for (i = 0; i < periods; i++) {
967 struct at_desc *desc; 967 struct at_desc *desc;
968 968
969 desc = atc_desc_get(atchan); 969 desc = atc_desc_get(atchan);
970 if (!desc) 970 if (!desc)
971 goto err_desc_get; 971 goto err_desc_get;
972 972
973 if (atc_dma_cyclic_fill_desc(chan, desc, i, buf_addr, 973 if (atc_dma_cyclic_fill_desc(chan, desc, i, buf_addr,
974 reg_width, period_len, direction)) 974 reg_width, period_len, direction))
975 goto err_desc_get; 975 goto err_desc_get;
976 976
977 atc_desc_chain(&first, &prev, desc); 977 atc_desc_chain(&first, &prev, desc);
978 } 978 }
979 979
980 /* lets make a cyclic list */ 980 /* lets make a cyclic list */
981 prev->lli.dscr = first->txd.phys; 981 prev->lli.dscr = first->txd.phys;
982 982
983 /* First descriptor of the chain embedds additional information */ 983 /* First descriptor of the chain embedds additional information */
984 first->txd.cookie = -EBUSY; 984 first->txd.cookie = -EBUSY;
985 first->len = buf_len; 985 first->len = buf_len;
986 first->tx_width = reg_width; 986 first->tx_width = reg_width;
987 987
988 return &first->txd; 988 return &first->txd;
989 989
990 err_desc_get: 990 err_desc_get:
991 dev_err(chan2dev(chan), "not enough descriptors available\n"); 991 dev_err(chan2dev(chan), "not enough descriptors available\n");
992 atc_desc_put(atchan, first); 992 atc_desc_put(atchan, first);
993 err_out: 993 err_out:
994 clear_bit(ATC_IS_CYCLIC, &atchan->status); 994 clear_bit(ATC_IS_CYCLIC, &atchan->status);
995 return NULL; 995 return NULL;
996 } 996 }
997 997
998 static int set_runtime_config(struct dma_chan *chan, 998 static int set_runtime_config(struct dma_chan *chan,
999 struct dma_slave_config *sconfig) 999 struct dma_slave_config *sconfig)
1000 { 1000 {
1001 struct at_dma_chan *atchan = to_at_dma_chan(chan); 1001 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1002 1002
1003 /* Check if it is chan is configured for slave transfers */ 1003 /* Check if it is chan is configured for slave transfers */
1004 if (!chan->private) 1004 if (!chan->private)
1005 return -EINVAL; 1005 return -EINVAL;
1006 1006
1007 memcpy(&atchan->dma_sconfig, sconfig, sizeof(*sconfig)); 1007 memcpy(&atchan->dma_sconfig, sconfig, sizeof(*sconfig));
1008 1008
1009 convert_burst(&atchan->dma_sconfig.src_maxburst); 1009 convert_burst(&atchan->dma_sconfig.src_maxburst);
1010 convert_burst(&atchan->dma_sconfig.dst_maxburst); 1010 convert_burst(&atchan->dma_sconfig.dst_maxburst);
1011 1011
1012 return 0; 1012 return 0;
1013 } 1013 }
1014 1014
1015 1015
1016 static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 1016 static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
1017 unsigned long arg) 1017 unsigned long arg)
1018 { 1018 {
1019 struct at_dma_chan *atchan = to_at_dma_chan(chan); 1019 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1020 struct at_dma *atdma = to_at_dma(chan->device); 1020 struct at_dma *atdma = to_at_dma(chan->device);
1021 int chan_id = atchan->chan_common.chan_id; 1021 int chan_id = atchan->chan_common.chan_id;
1022 unsigned long flags; 1022 unsigned long flags;
1023 1023
1024 LIST_HEAD(list); 1024 LIST_HEAD(list);
1025 1025
1026 dev_vdbg(chan2dev(chan), "atc_control (%d)\n", cmd); 1026 dev_vdbg(chan2dev(chan), "atc_control (%d)\n", cmd);
1027 1027
1028 if (cmd == DMA_PAUSE) { 1028 if (cmd == DMA_PAUSE) {
1029 spin_lock_irqsave(&atchan->lock, flags); 1029 spin_lock_irqsave(&atchan->lock, flags);
1030 1030
1031 dma_writel(atdma, CHER, AT_DMA_SUSP(chan_id)); 1031 dma_writel(atdma, CHER, AT_DMA_SUSP(chan_id));
1032 set_bit(ATC_IS_PAUSED, &atchan->status); 1032 set_bit(ATC_IS_PAUSED, &atchan->status);
1033 1033
1034 spin_unlock_irqrestore(&atchan->lock, flags); 1034 spin_unlock_irqrestore(&atchan->lock, flags);
1035 } else if (cmd == DMA_RESUME) { 1035 } else if (cmd == DMA_RESUME) {
1036 if (!atc_chan_is_paused(atchan)) 1036 if (!atc_chan_is_paused(atchan))
1037 return 0; 1037 return 0;
1038 1038
1039 spin_lock_irqsave(&atchan->lock, flags); 1039 spin_lock_irqsave(&atchan->lock, flags);
1040 1040
1041 dma_writel(atdma, CHDR, AT_DMA_RES(chan_id)); 1041 dma_writel(atdma, CHDR, AT_DMA_RES(chan_id));
1042 clear_bit(ATC_IS_PAUSED, &atchan->status); 1042 clear_bit(ATC_IS_PAUSED, &atchan->status);
1043 1043
1044 spin_unlock_irqrestore(&atchan->lock, flags); 1044 spin_unlock_irqrestore(&atchan->lock, flags);
1045 } else if (cmd == DMA_TERMINATE_ALL) { 1045 } else if (cmd == DMA_TERMINATE_ALL) {
1046 struct at_desc *desc, *_desc; 1046 struct at_desc *desc, *_desc;
1047 /* 1047 /*
1048 * This is only called when something went wrong elsewhere, so 1048 * This is only called when something went wrong elsewhere, so
1049 * we don't really care about the data. Just disable the 1049 * we don't really care about the data. Just disable the
1050 * channel. We still have to poll the channel enable bit due 1050 * channel. We still have to poll the channel enable bit due
1051 * to AHB/HSB limitations. 1051 * to AHB/HSB limitations.
1052 */ 1052 */
1053 spin_lock_irqsave(&atchan->lock, flags); 1053 spin_lock_irqsave(&atchan->lock, flags);
1054 1054
1055 /* disabling channel: must also remove suspend state */ 1055 /* disabling channel: must also remove suspend state */
1056 dma_writel(atdma, CHDR, AT_DMA_RES(chan_id) | atchan->mask); 1056 dma_writel(atdma, CHDR, AT_DMA_RES(chan_id) | atchan->mask);
1057 1057
1058 /* confirm that this channel is disabled */ 1058 /* confirm that this channel is disabled */
1059 while (dma_readl(atdma, CHSR) & atchan->mask) 1059 while (dma_readl(atdma, CHSR) & atchan->mask)
1060 cpu_relax(); 1060 cpu_relax();
1061 1061
1062 /* active_list entries will end up before queued entries */ 1062 /* active_list entries will end up before queued entries */
1063 list_splice_init(&atchan->queue, &list); 1063 list_splice_init(&atchan->queue, &list);
1064 list_splice_init(&atchan->active_list, &list); 1064 list_splice_init(&atchan->active_list, &list);
1065 1065
1066 /* Flush all pending and queued descriptors */ 1066 /* Flush all pending and queued descriptors */
1067 list_for_each_entry_safe(desc, _desc, &list, desc_node) 1067 list_for_each_entry_safe(desc, _desc, &list, desc_node)
1068 atc_chain_complete(atchan, desc); 1068 atc_chain_complete(atchan, desc);
1069 1069
1070 clear_bit(ATC_IS_PAUSED, &atchan->status); 1070 clear_bit(ATC_IS_PAUSED, &atchan->status);
1071 /* if channel dedicated to cyclic operations, free it */ 1071 /* if channel dedicated to cyclic operations, free it */
1072 clear_bit(ATC_IS_CYCLIC, &atchan->status); 1072 clear_bit(ATC_IS_CYCLIC, &atchan->status);
1073 1073
1074 spin_unlock_irqrestore(&atchan->lock, flags); 1074 spin_unlock_irqrestore(&atchan->lock, flags);
1075 } else if (cmd == DMA_SLAVE_CONFIG) { 1075 } else if (cmd == DMA_SLAVE_CONFIG) {
1076 return set_runtime_config(chan, (struct dma_slave_config *)arg); 1076 return set_runtime_config(chan, (struct dma_slave_config *)arg);
1077 } else { 1077 } else {
1078 return -ENXIO; 1078 return -ENXIO;
1079 } 1079 }
1080 1080
1081 return 0; 1081 return 0;
1082 } 1082 }
1083 1083
1084 /** 1084 /**
1085 * atc_tx_status - poll for transaction completion 1085 * atc_tx_status - poll for transaction completion
1086 * @chan: DMA channel 1086 * @chan: DMA channel
1087 * @cookie: transaction identifier to check status of 1087 * @cookie: transaction identifier to check status of
1088 * @txstate: if not %NULL updated with transaction state 1088 * @txstate: if not %NULL updated with transaction state
1089 * 1089 *
1090 * If @txstate is passed in, upon return it reflect the driver 1090 * If @txstate is passed in, upon return it reflect the driver
1091 * internal state and can be used with dma_async_is_complete() to check 1091 * internal state and can be used with dma_async_is_complete() to check
1092 * the status of multiple cookies without re-checking hardware state. 1092 * the status of multiple cookies without re-checking hardware state.
1093 */ 1093 */
1094 static enum dma_status 1094 static enum dma_status
1095 atc_tx_status(struct dma_chan *chan, 1095 atc_tx_status(struct dma_chan *chan,
1096 dma_cookie_t cookie, 1096 dma_cookie_t cookie,
1097 struct dma_tx_state *txstate) 1097 struct dma_tx_state *txstate)
1098 { 1098 {
1099 struct at_dma_chan *atchan = to_at_dma_chan(chan); 1099 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1100 unsigned long flags; 1100 unsigned long flags;
1101 enum dma_status ret; 1101 enum dma_status ret;
1102 int bytes = 0; 1102 int bytes = 0;
1103 1103
1104 ret = dma_cookie_status(chan, cookie, txstate); 1104 ret = dma_cookie_status(chan, cookie, txstate);
1105 if (ret == DMA_SUCCESS) 1105 if (ret == DMA_COMPLETE)
1106 return ret; 1106 return ret;
1107 /* 1107 /*
1108 * There's no point calculating the residue if there's 1108 * There's no point calculating the residue if there's
1109 * no txstate to store the value. 1109 * no txstate to store the value.
1110 */ 1110 */
1111 if (!txstate) 1111 if (!txstate)
1112 return DMA_ERROR; 1112 return DMA_ERROR;
1113 1113
1114 spin_lock_irqsave(&atchan->lock, flags); 1114 spin_lock_irqsave(&atchan->lock, flags);
1115 1115
1116 /* Get number of bytes left in the active transactions */ 1116 /* Get number of bytes left in the active transactions */
1117 bytes = atc_get_bytes_left(chan); 1117 bytes = atc_get_bytes_left(chan);
1118 1118
1119 spin_unlock_irqrestore(&atchan->lock, flags); 1119 spin_unlock_irqrestore(&atchan->lock, flags);
1120 1120
1121 if (unlikely(bytes < 0)) { 1121 if (unlikely(bytes < 0)) {
1122 dev_vdbg(chan2dev(chan), "get residual bytes error\n"); 1122 dev_vdbg(chan2dev(chan), "get residual bytes error\n");
1123 return DMA_ERROR; 1123 return DMA_ERROR;
1124 } else { 1124 } else {
1125 dma_set_residue(txstate, bytes); 1125 dma_set_residue(txstate, bytes);
1126 } 1126 }
1127 1127
1128 dev_vdbg(chan2dev(chan), "tx_status %d: cookie = %d residue = %d\n", 1128 dev_vdbg(chan2dev(chan), "tx_status %d: cookie = %d residue = %d\n",
1129 ret, cookie, bytes); 1129 ret, cookie, bytes);
1130 1130
1131 return ret; 1131 return ret;
1132 } 1132 }
1133 1133
1134 /** 1134 /**
1135 * atc_issue_pending - try to finish work 1135 * atc_issue_pending - try to finish work
1136 * @chan: target DMA channel 1136 * @chan: target DMA channel
1137 */ 1137 */
1138 static void atc_issue_pending(struct dma_chan *chan) 1138 static void atc_issue_pending(struct dma_chan *chan)
1139 { 1139 {
1140 struct at_dma_chan *atchan = to_at_dma_chan(chan); 1140 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1141 unsigned long flags; 1141 unsigned long flags;
1142 1142
1143 dev_vdbg(chan2dev(chan), "issue_pending\n"); 1143 dev_vdbg(chan2dev(chan), "issue_pending\n");
1144 1144
1145 /* Not needed for cyclic transfers */ 1145 /* Not needed for cyclic transfers */
1146 if (atc_chan_is_cyclic(atchan)) 1146 if (atc_chan_is_cyclic(atchan))
1147 return; 1147 return;
1148 1148
1149 spin_lock_irqsave(&atchan->lock, flags); 1149 spin_lock_irqsave(&atchan->lock, flags);
1150 atc_advance_work(atchan); 1150 atc_advance_work(atchan);
1151 spin_unlock_irqrestore(&atchan->lock, flags); 1151 spin_unlock_irqrestore(&atchan->lock, flags);
1152 } 1152 }
1153 1153
1154 /** 1154 /**
1155 * atc_alloc_chan_resources - allocate resources for DMA channel 1155 * atc_alloc_chan_resources - allocate resources for DMA channel
1156 * @chan: allocate descriptor resources for this channel 1156 * @chan: allocate descriptor resources for this channel
1157 * @client: current client requesting the channel be ready for requests 1157 * @client: current client requesting the channel be ready for requests
1158 * 1158 *
1159 * return - the number of allocated descriptors 1159 * return - the number of allocated descriptors
1160 */ 1160 */
1161 static int atc_alloc_chan_resources(struct dma_chan *chan) 1161 static int atc_alloc_chan_resources(struct dma_chan *chan)
1162 { 1162 {
1163 struct at_dma_chan *atchan = to_at_dma_chan(chan); 1163 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1164 struct at_dma *atdma = to_at_dma(chan->device); 1164 struct at_dma *atdma = to_at_dma(chan->device);
1165 struct at_desc *desc; 1165 struct at_desc *desc;
1166 struct at_dma_slave *atslave; 1166 struct at_dma_slave *atslave;
1167 unsigned long flags; 1167 unsigned long flags;
1168 int i; 1168 int i;
1169 u32 cfg; 1169 u32 cfg;
1170 LIST_HEAD(tmp_list); 1170 LIST_HEAD(tmp_list);
1171 1171
1172 dev_vdbg(chan2dev(chan), "alloc_chan_resources\n"); 1172 dev_vdbg(chan2dev(chan), "alloc_chan_resources\n");
1173 1173
1174 /* ASSERT: channel is idle */ 1174 /* ASSERT: channel is idle */
1175 if (atc_chan_is_enabled(atchan)) { 1175 if (atc_chan_is_enabled(atchan)) {
1176 dev_dbg(chan2dev(chan), "DMA channel not idle ?\n"); 1176 dev_dbg(chan2dev(chan), "DMA channel not idle ?\n");
1177 return -EIO; 1177 return -EIO;
1178 } 1178 }
1179 1179
1180 cfg = ATC_DEFAULT_CFG; 1180 cfg = ATC_DEFAULT_CFG;
1181 1181
1182 atslave = chan->private; 1182 atslave = chan->private;
1183 if (atslave) { 1183 if (atslave) {
1184 /* 1184 /*
1185 * We need controller-specific data to set up slave 1185 * We need controller-specific data to set up slave
1186 * transfers. 1186 * transfers.
1187 */ 1187 */
1188 BUG_ON(!atslave->dma_dev || atslave->dma_dev != atdma->dma_common.dev); 1188 BUG_ON(!atslave->dma_dev || atslave->dma_dev != atdma->dma_common.dev);
1189 1189
1190 /* if cfg configuration specified take it instead of default */ 1190 /* if cfg configuration specified take it instead of default */
1191 if (atslave->cfg) 1191 if (atslave->cfg)
1192 cfg = atslave->cfg; 1192 cfg = atslave->cfg;
1193 } 1193 }
1194 1194
1195 /* have we already been set up? 1195 /* have we already been set up?
1196 * reconfigure channel but no need to reallocate descriptors */ 1196 * reconfigure channel but no need to reallocate descriptors */
1197 if (!list_empty(&atchan->free_list)) 1197 if (!list_empty(&atchan->free_list))
1198 return atchan->descs_allocated; 1198 return atchan->descs_allocated;
1199 1199
1200 /* Allocate initial pool of descriptors */ 1200 /* Allocate initial pool of descriptors */
1201 for (i = 0; i < init_nr_desc_per_channel; i++) { 1201 for (i = 0; i < init_nr_desc_per_channel; i++) {
1202 desc = atc_alloc_descriptor(chan, GFP_KERNEL); 1202 desc = atc_alloc_descriptor(chan, GFP_KERNEL);
1203 if (!desc) { 1203 if (!desc) {
1204 dev_err(atdma->dma_common.dev, 1204 dev_err(atdma->dma_common.dev,
1205 "Only %d initial descriptors\n", i); 1205 "Only %d initial descriptors\n", i);
1206 break; 1206 break;
1207 } 1207 }
1208 list_add_tail(&desc->desc_node, &tmp_list); 1208 list_add_tail(&desc->desc_node, &tmp_list);
1209 } 1209 }
1210 1210
1211 spin_lock_irqsave(&atchan->lock, flags); 1211 spin_lock_irqsave(&atchan->lock, flags);
1212 atchan->descs_allocated = i; 1212 atchan->descs_allocated = i;
1213 atchan->remain_desc = 0; 1213 atchan->remain_desc = 0;
1214 list_splice(&tmp_list, &atchan->free_list); 1214 list_splice(&tmp_list, &atchan->free_list);
1215 dma_cookie_init(chan); 1215 dma_cookie_init(chan);
1216 spin_unlock_irqrestore(&atchan->lock, flags); 1216 spin_unlock_irqrestore(&atchan->lock, flags);
1217 1217
1218 /* channel parameters */ 1218 /* channel parameters */
1219 channel_writel(atchan, CFG, cfg); 1219 channel_writel(atchan, CFG, cfg);
1220 1220
1221 dev_dbg(chan2dev(chan), 1221 dev_dbg(chan2dev(chan),
1222 "alloc_chan_resources: allocated %d descriptors\n", 1222 "alloc_chan_resources: allocated %d descriptors\n",
1223 atchan->descs_allocated); 1223 atchan->descs_allocated);
1224 1224
1225 return atchan->descs_allocated; 1225 return atchan->descs_allocated;
1226 } 1226 }
1227 1227
1228 /** 1228 /**
1229 * atc_free_chan_resources - free all channel resources 1229 * atc_free_chan_resources - free all channel resources
1230 * @chan: DMA channel 1230 * @chan: DMA channel
1231 */ 1231 */
1232 static void atc_free_chan_resources(struct dma_chan *chan) 1232 static void atc_free_chan_resources(struct dma_chan *chan)
1233 { 1233 {
1234 struct at_dma_chan *atchan = to_at_dma_chan(chan); 1234 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1235 struct at_dma *atdma = to_at_dma(chan->device); 1235 struct at_dma *atdma = to_at_dma(chan->device);
1236 struct at_desc *desc, *_desc; 1236 struct at_desc *desc, *_desc;
1237 LIST_HEAD(list); 1237 LIST_HEAD(list);
1238 1238
1239 dev_dbg(chan2dev(chan), "free_chan_resources: (descs allocated=%u)\n", 1239 dev_dbg(chan2dev(chan), "free_chan_resources: (descs allocated=%u)\n",
1240 atchan->descs_allocated); 1240 atchan->descs_allocated);
1241 1241
1242 /* ASSERT: channel is idle */ 1242 /* ASSERT: channel is idle */
1243 BUG_ON(!list_empty(&atchan->active_list)); 1243 BUG_ON(!list_empty(&atchan->active_list));
1244 BUG_ON(!list_empty(&atchan->queue)); 1244 BUG_ON(!list_empty(&atchan->queue));
1245 BUG_ON(atc_chan_is_enabled(atchan)); 1245 BUG_ON(atc_chan_is_enabled(atchan));
1246 1246
1247 list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) { 1247 list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) {
1248 dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc); 1248 dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc);
1249 list_del(&desc->desc_node); 1249 list_del(&desc->desc_node);
1250 /* free link descriptor */ 1250 /* free link descriptor */
1251 dma_pool_free(atdma->dma_desc_pool, desc, desc->txd.phys); 1251 dma_pool_free(atdma->dma_desc_pool, desc, desc->txd.phys);
1252 } 1252 }
1253 list_splice_init(&atchan->free_list, &list); 1253 list_splice_init(&atchan->free_list, &list);
1254 atchan->descs_allocated = 0; 1254 atchan->descs_allocated = 0;
1255 atchan->status = 0; 1255 atchan->status = 0;
1256 atchan->remain_desc = 0; 1256 atchan->remain_desc = 0;
1257 1257
1258 dev_vdbg(chan2dev(chan), "free_chan_resources: done\n"); 1258 dev_vdbg(chan2dev(chan), "free_chan_resources: done\n");
1259 } 1259 }
1260 1260
1261 #ifdef CONFIG_OF 1261 #ifdef CONFIG_OF
1262 static bool at_dma_filter(struct dma_chan *chan, void *slave) 1262 static bool at_dma_filter(struct dma_chan *chan, void *slave)
1263 { 1263 {
1264 struct at_dma_slave *atslave = slave; 1264 struct at_dma_slave *atslave = slave;
1265 1265
1266 if (atslave->dma_dev == chan->device->dev) { 1266 if (atslave->dma_dev == chan->device->dev) {
1267 chan->private = atslave; 1267 chan->private = atslave;
1268 return true; 1268 return true;
1269 } else { 1269 } else {
1270 return false; 1270 return false;
1271 } 1271 }
1272 } 1272 }
1273 1273
1274 static struct dma_chan *at_dma_xlate(struct of_phandle_args *dma_spec, 1274 static struct dma_chan *at_dma_xlate(struct of_phandle_args *dma_spec,
1275 struct of_dma *of_dma) 1275 struct of_dma *of_dma)
1276 { 1276 {
1277 struct dma_chan *chan; 1277 struct dma_chan *chan;
1278 struct at_dma_chan *atchan; 1278 struct at_dma_chan *atchan;
1279 struct at_dma_slave *atslave; 1279 struct at_dma_slave *atslave;
1280 dma_cap_mask_t mask; 1280 dma_cap_mask_t mask;
1281 unsigned int per_id; 1281 unsigned int per_id;
1282 struct platform_device *dmac_pdev; 1282 struct platform_device *dmac_pdev;
1283 1283
1284 if (dma_spec->args_count != 2) 1284 if (dma_spec->args_count != 2)
1285 return NULL; 1285 return NULL;
1286 1286
1287 dmac_pdev = of_find_device_by_node(dma_spec->np); 1287 dmac_pdev = of_find_device_by_node(dma_spec->np);
1288 1288
1289 dma_cap_zero(mask); 1289 dma_cap_zero(mask);
1290 dma_cap_set(DMA_SLAVE, mask); 1290 dma_cap_set(DMA_SLAVE, mask);
1291 1291
1292 atslave = devm_kzalloc(&dmac_pdev->dev, sizeof(*atslave), GFP_KERNEL); 1292 atslave = devm_kzalloc(&dmac_pdev->dev, sizeof(*atslave), GFP_KERNEL);
1293 if (!atslave) 1293 if (!atslave)
1294 return NULL; 1294 return NULL;
1295 1295
1296 atslave->cfg = ATC_DST_H2SEL_HW | ATC_SRC_H2SEL_HW; 1296 atslave->cfg = ATC_DST_H2SEL_HW | ATC_SRC_H2SEL_HW;
1297 /* 1297 /*
1298 * We can fill both SRC_PER and DST_PER, one of these fields will be 1298 * We can fill both SRC_PER and DST_PER, one of these fields will be
1299 * ignored depending on DMA transfer direction. 1299 * ignored depending on DMA transfer direction.
1300 */ 1300 */
1301 per_id = dma_spec->args[1] & AT91_DMA_CFG_PER_ID_MASK; 1301 per_id = dma_spec->args[1] & AT91_DMA_CFG_PER_ID_MASK;
1302 atslave->cfg |= ATC_DST_PER_MSB(per_id) | ATC_DST_PER(per_id) 1302 atslave->cfg |= ATC_DST_PER_MSB(per_id) | ATC_DST_PER(per_id)
1303 | ATC_SRC_PER_MSB(per_id) | ATC_SRC_PER(per_id); 1303 | ATC_SRC_PER_MSB(per_id) | ATC_SRC_PER(per_id);
1304 /* 1304 /*
1305 * We have to translate the value we get from the device tree since 1305 * We have to translate the value we get from the device tree since
1306 * the half FIFO configuration value had to be 0 to keep backward 1306 * the half FIFO configuration value had to be 0 to keep backward
1307 * compatibility. 1307 * compatibility.
1308 */ 1308 */
1309 switch (dma_spec->args[1] & AT91_DMA_CFG_FIFOCFG_MASK) { 1309 switch (dma_spec->args[1] & AT91_DMA_CFG_FIFOCFG_MASK) {
1310 case AT91_DMA_CFG_FIFOCFG_ALAP: 1310 case AT91_DMA_CFG_FIFOCFG_ALAP:
1311 atslave->cfg |= ATC_FIFOCFG_LARGESTBURST; 1311 atslave->cfg |= ATC_FIFOCFG_LARGESTBURST;
1312 break; 1312 break;
1313 case AT91_DMA_CFG_FIFOCFG_ASAP: 1313 case AT91_DMA_CFG_FIFOCFG_ASAP:
1314 atslave->cfg |= ATC_FIFOCFG_ENOUGHSPACE; 1314 atslave->cfg |= ATC_FIFOCFG_ENOUGHSPACE;
1315 break; 1315 break;
1316 case AT91_DMA_CFG_FIFOCFG_HALF: 1316 case AT91_DMA_CFG_FIFOCFG_HALF:
1317 default: 1317 default:
1318 atslave->cfg |= ATC_FIFOCFG_HALFFIFO; 1318 atslave->cfg |= ATC_FIFOCFG_HALFFIFO;
1319 } 1319 }
1320 atslave->dma_dev = &dmac_pdev->dev; 1320 atslave->dma_dev = &dmac_pdev->dev;
1321 1321
1322 chan = dma_request_channel(mask, at_dma_filter, atslave); 1322 chan = dma_request_channel(mask, at_dma_filter, atslave);
1323 if (!chan) 1323 if (!chan)
1324 return NULL; 1324 return NULL;
1325 1325
1326 atchan = to_at_dma_chan(chan); 1326 atchan = to_at_dma_chan(chan);
1327 atchan->per_if = dma_spec->args[0] & 0xff; 1327 atchan->per_if = dma_spec->args[0] & 0xff;
1328 atchan->mem_if = (dma_spec->args[0] >> 16) & 0xff; 1328 atchan->mem_if = (dma_spec->args[0] >> 16) & 0xff;
1329 1329
1330 return chan; 1330 return chan;
1331 } 1331 }
1332 #else 1332 #else
1333 static struct dma_chan *at_dma_xlate(struct of_phandle_args *dma_spec, 1333 static struct dma_chan *at_dma_xlate(struct of_phandle_args *dma_spec,
1334 struct of_dma *of_dma) 1334 struct of_dma *of_dma)
1335 { 1335 {
1336 return NULL; 1336 return NULL;
1337 } 1337 }
1338 #endif 1338 #endif
1339 1339
1340 /*-- Module Management -----------------------------------------------*/ 1340 /*-- Module Management -----------------------------------------------*/
1341 1341
1342 /* cap_mask is a multi-u32 bitfield, fill it with proper C code. */ 1342 /* cap_mask is a multi-u32 bitfield, fill it with proper C code. */
1343 static struct at_dma_platform_data at91sam9rl_config = { 1343 static struct at_dma_platform_data at91sam9rl_config = {
1344 .nr_channels = 2, 1344 .nr_channels = 2,
1345 }; 1345 };
1346 static struct at_dma_platform_data at91sam9g45_config = { 1346 static struct at_dma_platform_data at91sam9g45_config = {
1347 .nr_channels = 8, 1347 .nr_channels = 8,
1348 }; 1348 };
1349 1349
1350 #if defined(CONFIG_OF) 1350 #if defined(CONFIG_OF)
1351 static const struct of_device_id atmel_dma_dt_ids[] = { 1351 static const struct of_device_id atmel_dma_dt_ids[] = {
1352 { 1352 {
1353 .compatible = "atmel,at91sam9rl-dma", 1353 .compatible = "atmel,at91sam9rl-dma",
1354 .data = &at91sam9rl_config, 1354 .data = &at91sam9rl_config,
1355 }, { 1355 }, {
1356 .compatible = "atmel,at91sam9g45-dma", 1356 .compatible = "atmel,at91sam9g45-dma",
1357 .data = &at91sam9g45_config, 1357 .data = &at91sam9g45_config,
1358 }, { 1358 }, {
1359 /* sentinel */ 1359 /* sentinel */
1360 } 1360 }
1361 }; 1361 };
1362 1362
1363 MODULE_DEVICE_TABLE(of, atmel_dma_dt_ids); 1363 MODULE_DEVICE_TABLE(of, atmel_dma_dt_ids);
1364 #endif 1364 #endif
1365 1365
1366 static const struct platform_device_id atdma_devtypes[] = { 1366 static const struct platform_device_id atdma_devtypes[] = {
1367 { 1367 {
1368 .name = "at91sam9rl_dma", 1368 .name = "at91sam9rl_dma",
1369 .driver_data = (unsigned long) &at91sam9rl_config, 1369 .driver_data = (unsigned long) &at91sam9rl_config,
1370 }, { 1370 }, {
1371 .name = "at91sam9g45_dma", 1371 .name = "at91sam9g45_dma",
1372 .driver_data = (unsigned long) &at91sam9g45_config, 1372 .driver_data = (unsigned long) &at91sam9g45_config,
1373 }, { 1373 }, {
1374 /* sentinel */ 1374 /* sentinel */
1375 } 1375 }
1376 }; 1376 };
1377 1377
1378 static inline const struct at_dma_platform_data * __init at_dma_get_driver_data( 1378 static inline const struct at_dma_platform_data * __init at_dma_get_driver_data(
1379 struct platform_device *pdev) 1379 struct platform_device *pdev)
1380 { 1380 {
1381 if (pdev->dev.of_node) { 1381 if (pdev->dev.of_node) {
1382 const struct of_device_id *match; 1382 const struct of_device_id *match;
1383 match = of_match_node(atmel_dma_dt_ids, pdev->dev.of_node); 1383 match = of_match_node(atmel_dma_dt_ids, pdev->dev.of_node);
1384 if (match == NULL) 1384 if (match == NULL)
1385 return NULL; 1385 return NULL;
1386 return match->data; 1386 return match->data;
1387 } 1387 }
1388 return (struct at_dma_platform_data *) 1388 return (struct at_dma_platform_data *)
1389 platform_get_device_id(pdev)->driver_data; 1389 platform_get_device_id(pdev)->driver_data;
1390 } 1390 }
1391 1391
1392 /** 1392 /**
1393 * at_dma_off - disable DMA controller 1393 * at_dma_off - disable DMA controller
1394 * @atdma: the Atmel HDAMC device 1394 * @atdma: the Atmel HDAMC device
1395 */ 1395 */
1396 static void at_dma_off(struct at_dma *atdma) 1396 static void at_dma_off(struct at_dma *atdma)
1397 { 1397 {
1398 dma_writel(atdma, EN, 0); 1398 dma_writel(atdma, EN, 0);
1399 1399
1400 /* disable all interrupts */ 1400 /* disable all interrupts */
1401 dma_writel(atdma, EBCIDR, -1L); 1401 dma_writel(atdma, EBCIDR, -1L);
1402 1402
1403 /* confirm that all channels are disabled */ 1403 /* confirm that all channels are disabled */
1404 while (dma_readl(atdma, CHSR) & atdma->all_chan_mask) 1404 while (dma_readl(atdma, CHSR) & atdma->all_chan_mask)
1405 cpu_relax(); 1405 cpu_relax();
1406 } 1406 }
1407 1407
1408 static int __init at_dma_probe(struct platform_device *pdev) 1408 static int __init at_dma_probe(struct platform_device *pdev)
1409 { 1409 {
1410 struct resource *io; 1410 struct resource *io;
1411 struct at_dma *atdma; 1411 struct at_dma *atdma;
1412 size_t size; 1412 size_t size;
1413 int irq; 1413 int irq;
1414 int err; 1414 int err;
1415 int i; 1415 int i;
1416 const struct at_dma_platform_data *plat_dat; 1416 const struct at_dma_platform_data *plat_dat;
1417 1417
1418 /* setup platform data for each SoC */ 1418 /* setup platform data for each SoC */
1419 dma_cap_set(DMA_MEMCPY, at91sam9rl_config.cap_mask); 1419 dma_cap_set(DMA_MEMCPY, at91sam9rl_config.cap_mask);
1420 dma_cap_set(DMA_MEMCPY, at91sam9g45_config.cap_mask); 1420 dma_cap_set(DMA_MEMCPY, at91sam9g45_config.cap_mask);
1421 dma_cap_set(DMA_SLAVE, at91sam9g45_config.cap_mask); 1421 dma_cap_set(DMA_SLAVE, at91sam9g45_config.cap_mask);
1422 1422
1423 /* get DMA parameters from controller type */ 1423 /* get DMA parameters from controller type */
1424 plat_dat = at_dma_get_driver_data(pdev); 1424 plat_dat = at_dma_get_driver_data(pdev);
1425 if (!plat_dat) 1425 if (!plat_dat)
1426 return -ENODEV; 1426 return -ENODEV;
1427 1427
1428 io = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1428 io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1429 if (!io) 1429 if (!io)
1430 return -EINVAL; 1430 return -EINVAL;
1431 1431
1432 irq = platform_get_irq(pdev, 0); 1432 irq = platform_get_irq(pdev, 0);
1433 if (irq < 0) 1433 if (irq < 0)
1434 return irq; 1434 return irq;
1435 1435
1436 size = sizeof(struct at_dma); 1436 size = sizeof(struct at_dma);
1437 size += plat_dat->nr_channels * sizeof(struct at_dma_chan); 1437 size += plat_dat->nr_channels * sizeof(struct at_dma_chan);
1438 atdma = kzalloc(size, GFP_KERNEL); 1438 atdma = kzalloc(size, GFP_KERNEL);
1439 if (!atdma) 1439 if (!atdma)
1440 return -ENOMEM; 1440 return -ENOMEM;
1441 1441
1442 /* discover transaction capabilities */ 1442 /* discover transaction capabilities */
1443 atdma->dma_common.cap_mask = plat_dat->cap_mask; 1443 atdma->dma_common.cap_mask = plat_dat->cap_mask;
1444 atdma->all_chan_mask = (1 << plat_dat->nr_channels) - 1; 1444 atdma->all_chan_mask = (1 << plat_dat->nr_channels) - 1;
1445 1445
1446 size = resource_size(io); 1446 size = resource_size(io);
1447 if (!request_mem_region(io->start, size, pdev->dev.driver->name)) { 1447 if (!request_mem_region(io->start, size, pdev->dev.driver->name)) {
1448 err = -EBUSY; 1448 err = -EBUSY;
1449 goto err_kfree; 1449 goto err_kfree;
1450 } 1450 }
1451 1451
1452 atdma->regs = ioremap(io->start, size); 1452 atdma->regs = ioremap(io->start, size);
1453 if (!atdma->regs) { 1453 if (!atdma->regs) {
1454 err = -ENOMEM; 1454 err = -ENOMEM;
1455 goto err_release_r; 1455 goto err_release_r;
1456 } 1456 }
1457 1457
1458 atdma->clk = clk_get(&pdev->dev, "dma_clk"); 1458 atdma->clk = clk_get(&pdev->dev, "dma_clk");
1459 if (IS_ERR(atdma->clk)) { 1459 if (IS_ERR(atdma->clk)) {
1460 err = PTR_ERR(atdma->clk); 1460 err = PTR_ERR(atdma->clk);
1461 goto err_clk; 1461 goto err_clk;
1462 } 1462 }
1463 err = clk_prepare_enable(atdma->clk); 1463 err = clk_prepare_enable(atdma->clk);
1464 if (err) 1464 if (err)
1465 goto err_clk_prepare; 1465 goto err_clk_prepare;
1466 1466
1467 /* force dma off, just in case */ 1467 /* force dma off, just in case */
1468 at_dma_off(atdma); 1468 at_dma_off(atdma);
1469 1469
1470 err = request_irq(irq, at_dma_interrupt, 0, "at_hdmac", atdma); 1470 err = request_irq(irq, at_dma_interrupt, 0, "at_hdmac", atdma);
1471 if (err) 1471 if (err)
1472 goto err_irq; 1472 goto err_irq;
1473 1473
1474 platform_set_drvdata(pdev, atdma); 1474 platform_set_drvdata(pdev, atdma);
1475 1475
1476 /* create a pool of consistent memory blocks for hardware descriptors */ 1476 /* create a pool of consistent memory blocks for hardware descriptors */
1477 atdma->dma_desc_pool = dma_pool_create("at_hdmac_desc_pool", 1477 atdma->dma_desc_pool = dma_pool_create("at_hdmac_desc_pool",
1478 &pdev->dev, sizeof(struct at_desc), 1478 &pdev->dev, sizeof(struct at_desc),
1479 4 /* word alignment */, 0); 1479 4 /* word alignment */, 0);
1480 if (!atdma->dma_desc_pool) { 1480 if (!atdma->dma_desc_pool) {
1481 dev_err(&pdev->dev, "No memory for descriptors dma pool\n"); 1481 dev_err(&pdev->dev, "No memory for descriptors dma pool\n");
1482 err = -ENOMEM; 1482 err = -ENOMEM;
1483 goto err_pool_create; 1483 goto err_pool_create;
1484 } 1484 }
1485 1485
1486 /* clear any pending interrupt */ 1486 /* clear any pending interrupt */
1487 while (dma_readl(atdma, EBCISR)) 1487 while (dma_readl(atdma, EBCISR))
1488 cpu_relax(); 1488 cpu_relax();
1489 1489
1490 /* initialize channels related values */ 1490 /* initialize channels related values */
1491 INIT_LIST_HEAD(&atdma->dma_common.channels); 1491 INIT_LIST_HEAD(&atdma->dma_common.channels);
1492 for (i = 0; i < plat_dat->nr_channels; i++) { 1492 for (i = 0; i < plat_dat->nr_channels; i++) {
1493 struct at_dma_chan *atchan = &atdma->chan[i]; 1493 struct at_dma_chan *atchan = &atdma->chan[i];
1494 1494
1495 atchan->mem_if = AT_DMA_MEM_IF; 1495 atchan->mem_if = AT_DMA_MEM_IF;
1496 atchan->per_if = AT_DMA_PER_IF; 1496 atchan->per_if = AT_DMA_PER_IF;
1497 atchan->chan_common.device = &atdma->dma_common; 1497 atchan->chan_common.device = &atdma->dma_common;
1498 dma_cookie_init(&atchan->chan_common); 1498 dma_cookie_init(&atchan->chan_common);
1499 list_add_tail(&atchan->chan_common.device_node, 1499 list_add_tail(&atchan->chan_common.device_node,
1500 &atdma->dma_common.channels); 1500 &atdma->dma_common.channels);
1501 1501
1502 atchan->ch_regs = atdma->regs + ch_regs(i); 1502 atchan->ch_regs = atdma->regs + ch_regs(i);
1503 spin_lock_init(&atchan->lock); 1503 spin_lock_init(&atchan->lock);
1504 atchan->mask = 1 << i; 1504 atchan->mask = 1 << i;
1505 1505
1506 INIT_LIST_HEAD(&atchan->active_list); 1506 INIT_LIST_HEAD(&atchan->active_list);
1507 INIT_LIST_HEAD(&atchan->queue); 1507 INIT_LIST_HEAD(&atchan->queue);
1508 INIT_LIST_HEAD(&atchan->free_list); 1508 INIT_LIST_HEAD(&atchan->free_list);
1509 1509
1510 tasklet_init(&atchan->tasklet, atc_tasklet, 1510 tasklet_init(&atchan->tasklet, atc_tasklet,
1511 (unsigned long)atchan); 1511 (unsigned long)atchan);
1512 atc_enable_chan_irq(atdma, i); 1512 atc_enable_chan_irq(atdma, i);
1513 } 1513 }
1514 1514
1515 /* set base routines */ 1515 /* set base routines */
1516 atdma->dma_common.device_alloc_chan_resources = atc_alloc_chan_resources; 1516 atdma->dma_common.device_alloc_chan_resources = atc_alloc_chan_resources;
1517 atdma->dma_common.device_free_chan_resources = atc_free_chan_resources; 1517 atdma->dma_common.device_free_chan_resources = atc_free_chan_resources;
1518 atdma->dma_common.device_tx_status = atc_tx_status; 1518 atdma->dma_common.device_tx_status = atc_tx_status;
1519 atdma->dma_common.device_issue_pending = atc_issue_pending; 1519 atdma->dma_common.device_issue_pending = atc_issue_pending;
1520 atdma->dma_common.dev = &pdev->dev; 1520 atdma->dma_common.dev = &pdev->dev;
1521 1521
1522 /* set prep routines based on capability */ 1522 /* set prep routines based on capability */
1523 if (dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask)) 1523 if (dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask))
1524 atdma->dma_common.device_prep_dma_memcpy = atc_prep_dma_memcpy; 1524 atdma->dma_common.device_prep_dma_memcpy = atc_prep_dma_memcpy;
1525 1525
1526 if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)) { 1526 if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)) {
1527 atdma->dma_common.device_prep_slave_sg = atc_prep_slave_sg; 1527 atdma->dma_common.device_prep_slave_sg = atc_prep_slave_sg;
1528 /* controller can do slave DMA: can trigger cyclic transfers */ 1528 /* controller can do slave DMA: can trigger cyclic transfers */
1529 dma_cap_set(DMA_CYCLIC, atdma->dma_common.cap_mask); 1529 dma_cap_set(DMA_CYCLIC, atdma->dma_common.cap_mask);
1530 atdma->dma_common.device_prep_dma_cyclic = atc_prep_dma_cyclic; 1530 atdma->dma_common.device_prep_dma_cyclic = atc_prep_dma_cyclic;
1531 atdma->dma_common.device_control = atc_control; 1531 atdma->dma_common.device_control = atc_control;
1532 } 1532 }
1533 1533
1534 dma_writel(atdma, EN, AT_DMA_ENABLE); 1534 dma_writel(atdma, EN, AT_DMA_ENABLE);
1535 1535
1536 dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s), %d channels\n", 1536 dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s), %d channels\n",
1537 dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask) ? "cpy " : "", 1537 dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask) ? "cpy " : "",
1538 dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ? "slave " : "", 1538 dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ? "slave " : "",
1539 plat_dat->nr_channels); 1539 plat_dat->nr_channels);
1540 1540
1541 dma_async_device_register(&atdma->dma_common); 1541 dma_async_device_register(&atdma->dma_common);
1542 1542
1543 /* 1543 /*
1544 * Do not return an error if the dmac node is not present in order to 1544 * Do not return an error if the dmac node is not present in order to
1545 * not break the existing way of requesting channel with 1545 * not break the existing way of requesting channel with
1546 * dma_request_channel(). 1546 * dma_request_channel().
1547 */ 1547 */
1548 if (pdev->dev.of_node) { 1548 if (pdev->dev.of_node) {
1549 err = of_dma_controller_register(pdev->dev.of_node, 1549 err = of_dma_controller_register(pdev->dev.of_node,
1550 at_dma_xlate, atdma); 1550 at_dma_xlate, atdma);
1551 if (err) { 1551 if (err) {
1552 dev_err(&pdev->dev, "could not register of_dma_controller\n"); 1552 dev_err(&pdev->dev, "could not register of_dma_controller\n");
1553 goto err_of_dma_controller_register; 1553 goto err_of_dma_controller_register;
1554 } 1554 }
1555 } 1555 }
1556 1556
1557 return 0; 1557 return 0;
1558 1558
1559 err_of_dma_controller_register: 1559 err_of_dma_controller_register:
1560 dma_async_device_unregister(&atdma->dma_common); 1560 dma_async_device_unregister(&atdma->dma_common);
1561 dma_pool_destroy(atdma->dma_desc_pool); 1561 dma_pool_destroy(atdma->dma_desc_pool);
1562 err_pool_create: 1562 err_pool_create:
1563 free_irq(platform_get_irq(pdev, 0), atdma); 1563 free_irq(platform_get_irq(pdev, 0), atdma);
1564 err_irq: 1564 err_irq:
1565 clk_disable_unprepare(atdma->clk); 1565 clk_disable_unprepare(atdma->clk);
1566 err_clk_prepare: 1566 err_clk_prepare:
1567 clk_put(atdma->clk); 1567 clk_put(atdma->clk);
1568 err_clk: 1568 err_clk:
1569 iounmap(atdma->regs); 1569 iounmap(atdma->regs);
1570 atdma->regs = NULL; 1570 atdma->regs = NULL;
1571 err_release_r: 1571 err_release_r:
1572 release_mem_region(io->start, size); 1572 release_mem_region(io->start, size);
1573 err_kfree: 1573 err_kfree:
1574 kfree(atdma); 1574 kfree(atdma);
1575 return err; 1575 return err;
1576 } 1576 }
1577 1577
1578 static int at_dma_remove(struct platform_device *pdev) 1578 static int at_dma_remove(struct platform_device *pdev)
1579 { 1579 {
1580 struct at_dma *atdma = platform_get_drvdata(pdev); 1580 struct at_dma *atdma = platform_get_drvdata(pdev);
1581 struct dma_chan *chan, *_chan; 1581 struct dma_chan *chan, *_chan;
1582 struct resource *io; 1582 struct resource *io;
1583 1583
1584 at_dma_off(atdma); 1584 at_dma_off(atdma);
1585 dma_async_device_unregister(&atdma->dma_common); 1585 dma_async_device_unregister(&atdma->dma_common);
1586 1586
1587 dma_pool_destroy(atdma->dma_desc_pool); 1587 dma_pool_destroy(atdma->dma_desc_pool);
1588 free_irq(platform_get_irq(pdev, 0), atdma); 1588 free_irq(platform_get_irq(pdev, 0), atdma);
1589 1589
1590 list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels, 1590 list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
1591 device_node) { 1591 device_node) {
1592 struct at_dma_chan *atchan = to_at_dma_chan(chan); 1592 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1593 1593
1594 /* Disable interrupts */ 1594 /* Disable interrupts */
1595 atc_disable_chan_irq(atdma, chan->chan_id); 1595 atc_disable_chan_irq(atdma, chan->chan_id);
1596 tasklet_disable(&atchan->tasklet); 1596 tasklet_disable(&atchan->tasklet);
1597 1597
1598 tasklet_kill(&atchan->tasklet); 1598 tasklet_kill(&atchan->tasklet);
1599 list_del(&chan->device_node); 1599 list_del(&chan->device_node);
1600 } 1600 }
1601 1601
1602 clk_disable_unprepare(atdma->clk); 1602 clk_disable_unprepare(atdma->clk);
1603 clk_put(atdma->clk); 1603 clk_put(atdma->clk);
1604 1604
1605 iounmap(atdma->regs); 1605 iounmap(atdma->regs);
1606 atdma->regs = NULL; 1606 atdma->regs = NULL;
1607 1607
1608 io = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1608 io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1609 release_mem_region(io->start, resource_size(io)); 1609 release_mem_region(io->start, resource_size(io));
1610 1610
1611 kfree(atdma); 1611 kfree(atdma);
1612 1612
1613 return 0; 1613 return 0;
1614 } 1614 }
1615 1615
1616 static void at_dma_shutdown(struct platform_device *pdev) 1616 static void at_dma_shutdown(struct platform_device *pdev)
1617 { 1617 {
1618 struct at_dma *atdma = platform_get_drvdata(pdev); 1618 struct at_dma *atdma = platform_get_drvdata(pdev);
1619 1619
1620 at_dma_off(platform_get_drvdata(pdev)); 1620 at_dma_off(platform_get_drvdata(pdev));
1621 clk_disable_unprepare(atdma->clk); 1621 clk_disable_unprepare(atdma->clk);
1622 } 1622 }
1623 1623
1624 static int at_dma_prepare(struct device *dev) 1624 static int at_dma_prepare(struct device *dev)
1625 { 1625 {
1626 struct platform_device *pdev = to_platform_device(dev); 1626 struct platform_device *pdev = to_platform_device(dev);
1627 struct at_dma *atdma = platform_get_drvdata(pdev); 1627 struct at_dma *atdma = platform_get_drvdata(pdev);
1628 struct dma_chan *chan, *_chan; 1628 struct dma_chan *chan, *_chan;
1629 1629
1630 list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels, 1630 list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
1631 device_node) { 1631 device_node) {
1632 struct at_dma_chan *atchan = to_at_dma_chan(chan); 1632 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1633 /* wait for transaction completion (except in cyclic case) */ 1633 /* wait for transaction completion (except in cyclic case) */
1634 if (atc_chan_is_enabled(atchan) && !atc_chan_is_cyclic(atchan)) 1634 if (atc_chan_is_enabled(atchan) && !atc_chan_is_cyclic(atchan))
1635 return -EAGAIN; 1635 return -EAGAIN;
1636 } 1636 }
1637 return 0; 1637 return 0;
1638 } 1638 }
1639 1639
1640 static void atc_suspend_cyclic(struct at_dma_chan *atchan) 1640 static void atc_suspend_cyclic(struct at_dma_chan *atchan)
1641 { 1641 {
1642 struct dma_chan *chan = &atchan->chan_common; 1642 struct dma_chan *chan = &atchan->chan_common;
1643 1643
1644 /* Channel should be paused by user 1644 /* Channel should be paused by user
1645 * do it anyway even if it is not done already */ 1645 * do it anyway even if it is not done already */
1646 if (!atc_chan_is_paused(atchan)) { 1646 if (!atc_chan_is_paused(atchan)) {
1647 dev_warn(chan2dev(chan), 1647 dev_warn(chan2dev(chan),
1648 "cyclic channel not paused, should be done by channel user\n"); 1648 "cyclic channel not paused, should be done by channel user\n");
1649 atc_control(chan, DMA_PAUSE, 0); 1649 atc_control(chan, DMA_PAUSE, 0);
1650 } 1650 }
1651 1651
1652 /* now preserve additional data for cyclic operations */ 1652 /* now preserve additional data for cyclic operations */
1653 /* next descriptor address in the cyclic list */ 1653 /* next descriptor address in the cyclic list */
1654 atchan->save_dscr = channel_readl(atchan, DSCR); 1654 atchan->save_dscr = channel_readl(atchan, DSCR);
1655 1655
1656 vdbg_dump_regs(atchan); 1656 vdbg_dump_regs(atchan);
1657 } 1657 }
1658 1658
1659 static int at_dma_suspend_noirq(struct device *dev) 1659 static int at_dma_suspend_noirq(struct device *dev)
1660 { 1660 {
1661 struct platform_device *pdev = to_platform_device(dev); 1661 struct platform_device *pdev = to_platform_device(dev);
1662 struct at_dma *atdma = platform_get_drvdata(pdev); 1662 struct at_dma *atdma = platform_get_drvdata(pdev);
1663 struct dma_chan *chan, *_chan; 1663 struct dma_chan *chan, *_chan;
1664 1664
1665 /* preserve data */ 1665 /* preserve data */
1666 list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels, 1666 list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
1667 device_node) { 1667 device_node) {
1668 struct at_dma_chan *atchan = to_at_dma_chan(chan); 1668 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1669 1669
1670 if (atc_chan_is_cyclic(atchan)) 1670 if (atc_chan_is_cyclic(atchan))
1671 atc_suspend_cyclic(atchan); 1671 atc_suspend_cyclic(atchan);
1672 atchan->save_cfg = channel_readl(atchan, CFG); 1672 atchan->save_cfg = channel_readl(atchan, CFG);
1673 } 1673 }
1674 atdma->save_imr = dma_readl(atdma, EBCIMR); 1674 atdma->save_imr = dma_readl(atdma, EBCIMR);
1675 1675
1676 /* disable DMA controller */ 1676 /* disable DMA controller */
1677 at_dma_off(atdma); 1677 at_dma_off(atdma);
1678 clk_disable_unprepare(atdma->clk); 1678 clk_disable_unprepare(atdma->clk);
1679 return 0; 1679 return 0;
1680 } 1680 }
1681 1681
1682 static void atc_resume_cyclic(struct at_dma_chan *atchan) 1682 static void atc_resume_cyclic(struct at_dma_chan *atchan)
1683 { 1683 {
1684 struct at_dma *atdma = to_at_dma(atchan->chan_common.device); 1684 struct at_dma *atdma = to_at_dma(atchan->chan_common.device);
1685 1685
1686 /* restore channel status for cyclic descriptors list: 1686 /* restore channel status for cyclic descriptors list:
1687 * next descriptor in the cyclic list at the time of suspend */ 1687 * next descriptor in the cyclic list at the time of suspend */
1688 channel_writel(atchan, SADDR, 0); 1688 channel_writel(atchan, SADDR, 0);
1689 channel_writel(atchan, DADDR, 0); 1689 channel_writel(atchan, DADDR, 0);
1690 channel_writel(atchan, CTRLA, 0); 1690 channel_writel(atchan, CTRLA, 0);
1691 channel_writel(atchan, CTRLB, 0); 1691 channel_writel(atchan, CTRLB, 0);
1692 channel_writel(atchan, DSCR, atchan->save_dscr); 1692 channel_writel(atchan, DSCR, atchan->save_dscr);
1693 dma_writel(atdma, CHER, atchan->mask); 1693 dma_writel(atdma, CHER, atchan->mask);
1694 1694
1695 /* channel pause status should be removed by channel user 1695 /* channel pause status should be removed by channel user
1696 * We cannot take the initiative to do it here */ 1696 * We cannot take the initiative to do it here */
1697 1697
1698 vdbg_dump_regs(atchan); 1698 vdbg_dump_regs(atchan);
1699 } 1699 }
1700 1700
1701 static int at_dma_resume_noirq(struct device *dev) 1701 static int at_dma_resume_noirq(struct device *dev)
1702 { 1702 {
1703 struct platform_device *pdev = to_platform_device(dev); 1703 struct platform_device *pdev = to_platform_device(dev);
1704 struct at_dma *atdma = platform_get_drvdata(pdev); 1704 struct at_dma *atdma = platform_get_drvdata(pdev);
1705 struct dma_chan *chan, *_chan; 1705 struct dma_chan *chan, *_chan;
1706 1706
1707 /* bring back DMA controller */ 1707 /* bring back DMA controller */
1708 clk_prepare_enable(atdma->clk); 1708 clk_prepare_enable(atdma->clk);
1709 dma_writel(atdma, EN, AT_DMA_ENABLE); 1709 dma_writel(atdma, EN, AT_DMA_ENABLE);
1710 1710
1711 /* clear any pending interrupt */ 1711 /* clear any pending interrupt */
1712 while (dma_readl(atdma, EBCISR)) 1712 while (dma_readl(atdma, EBCISR))
1713 cpu_relax(); 1713 cpu_relax();
1714 1714
1715 /* restore saved data */ 1715 /* restore saved data */
1716 dma_writel(atdma, EBCIER, atdma->save_imr); 1716 dma_writel(atdma, EBCIER, atdma->save_imr);
1717 list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels, 1717 list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
1718 device_node) { 1718 device_node) {
1719 struct at_dma_chan *atchan = to_at_dma_chan(chan); 1719 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1720 1720
1721 channel_writel(atchan, CFG, atchan->save_cfg); 1721 channel_writel(atchan, CFG, atchan->save_cfg);
1722 if (atc_chan_is_cyclic(atchan)) 1722 if (atc_chan_is_cyclic(atchan))
1723 atc_resume_cyclic(atchan); 1723 atc_resume_cyclic(atchan);
1724 } 1724 }
1725 return 0; 1725 return 0;
1726 } 1726 }
1727 1727
1728 static const struct dev_pm_ops at_dma_dev_pm_ops = { 1728 static const struct dev_pm_ops at_dma_dev_pm_ops = {
1729 .prepare = at_dma_prepare, 1729 .prepare = at_dma_prepare,
1730 .suspend_noirq = at_dma_suspend_noirq, 1730 .suspend_noirq = at_dma_suspend_noirq,
1731 .resume_noirq = at_dma_resume_noirq, 1731 .resume_noirq = at_dma_resume_noirq,
1732 }; 1732 };
1733 1733
1734 static struct platform_driver at_dma_driver = { 1734 static struct platform_driver at_dma_driver = {
1735 .remove = at_dma_remove, 1735 .remove = at_dma_remove,
1736 .shutdown = at_dma_shutdown, 1736 .shutdown = at_dma_shutdown,
1737 .id_table = atdma_devtypes, 1737 .id_table = atdma_devtypes,
1738 .driver = { 1738 .driver = {
1739 .name = "at_hdmac", 1739 .name = "at_hdmac",
1740 .pm = &at_dma_dev_pm_ops, 1740 .pm = &at_dma_dev_pm_ops,
1741 .of_match_table = of_match_ptr(atmel_dma_dt_ids), 1741 .of_match_table = of_match_ptr(atmel_dma_dt_ids),
1742 }, 1742 },
1743 }; 1743 };
1744 1744
1745 static int __init at_dma_init(void) 1745 static int __init at_dma_init(void)
1746 { 1746 {
1747 return platform_driver_probe(&at_dma_driver, at_dma_probe); 1747 return platform_driver_probe(&at_dma_driver, at_dma_probe);
1748 } 1748 }
1749 subsys_initcall(at_dma_init); 1749 subsys_initcall(at_dma_init);
1750 1750
1751 static void __exit at_dma_exit(void) 1751 static void __exit at_dma_exit(void)
1752 { 1752 {
1753 platform_driver_unregister(&at_dma_driver); 1753 platform_driver_unregister(&at_dma_driver);
1754 } 1754 }
1755 module_exit(at_dma_exit); 1755 module_exit(at_dma_exit);
1756 1756
1757 MODULE_DESCRIPTION("Atmel AHB DMA Controller driver"); 1757 MODULE_DESCRIPTION("Atmel AHB DMA Controller driver");
1758 MODULE_AUTHOR("Nicolas Ferre <nicolas.ferre@atmel.com>"); 1758 MODULE_AUTHOR("Nicolas Ferre <nicolas.ferre@atmel.com>");
1759 MODULE_LICENSE("GPL"); 1759 MODULE_LICENSE("GPL");
1760 MODULE_ALIAS("platform:at_hdmac"); 1760 MODULE_ALIAS("platform:at_hdmac");
1761 1761
drivers/dma/coh901318.c
1 /* 1 /*
2 * driver/dma/coh901318.c 2 * driver/dma/coh901318.c
3 * 3 *
4 * Copyright (C) 2007-2009 ST-Ericsson 4 * Copyright (C) 2007-2009 ST-Ericsson
5 * License terms: GNU General Public License (GPL) version 2 5 * License terms: GNU General Public License (GPL) version 2
6 * DMA driver for COH 901 318 6 * DMA driver for COH 901 318
7 * Author: Per Friden <per.friden@stericsson.com> 7 * Author: Per Friden <per.friden@stericsson.com>
8 */ 8 */
9 9
10 #include <linux/init.h> 10 #include <linux/init.h>
11 #include <linux/module.h> 11 #include <linux/module.h>
12 #include <linux/kernel.h> /* printk() */ 12 #include <linux/kernel.h> /* printk() */
13 #include <linux/fs.h> /* everything... */ 13 #include <linux/fs.h> /* everything... */
14 #include <linux/scatterlist.h> 14 #include <linux/scatterlist.h>
15 #include <linux/slab.h> /* kmalloc() */ 15 #include <linux/slab.h> /* kmalloc() */
16 #include <linux/dmaengine.h> 16 #include <linux/dmaengine.h>
17 #include <linux/platform_device.h> 17 #include <linux/platform_device.h>
18 #include <linux/device.h> 18 #include <linux/device.h>
19 #include <linux/irqreturn.h> 19 #include <linux/irqreturn.h>
20 #include <linux/interrupt.h> 20 #include <linux/interrupt.h>
21 #include <linux/io.h> 21 #include <linux/io.h>
22 #include <linux/uaccess.h> 22 #include <linux/uaccess.h>
23 #include <linux/debugfs.h> 23 #include <linux/debugfs.h>
24 #include <linux/platform_data/dma-coh901318.h> 24 #include <linux/platform_data/dma-coh901318.h>
25 #include <linux/of_dma.h> 25 #include <linux/of_dma.h>
26 26
27 #include "coh901318.h" 27 #include "coh901318.h"
28 #include "dmaengine.h" 28 #include "dmaengine.h"
29 29
30 #define COH901318_MOD32_MASK (0x1F) 30 #define COH901318_MOD32_MASK (0x1F)
31 #define COH901318_WORD_MASK (0xFFFFFFFF) 31 #define COH901318_WORD_MASK (0xFFFFFFFF)
32 /* INT_STATUS - Interrupt Status Registers 32bit (R/-) */ 32 /* INT_STATUS - Interrupt Status Registers 32bit (R/-) */
33 #define COH901318_INT_STATUS1 (0x0000) 33 #define COH901318_INT_STATUS1 (0x0000)
34 #define COH901318_INT_STATUS2 (0x0004) 34 #define COH901318_INT_STATUS2 (0x0004)
35 /* TC_INT_STATUS - Terminal Count Interrupt Status Registers 32bit (R/-) */ 35 /* TC_INT_STATUS - Terminal Count Interrupt Status Registers 32bit (R/-) */
36 #define COH901318_TC_INT_STATUS1 (0x0008) 36 #define COH901318_TC_INT_STATUS1 (0x0008)
37 #define COH901318_TC_INT_STATUS2 (0x000C) 37 #define COH901318_TC_INT_STATUS2 (0x000C)
38 /* TC_INT_CLEAR - Terminal Count Interrupt Clear Registers 32bit (-/W) */ 38 /* TC_INT_CLEAR - Terminal Count Interrupt Clear Registers 32bit (-/W) */
39 #define COH901318_TC_INT_CLEAR1 (0x0010) 39 #define COH901318_TC_INT_CLEAR1 (0x0010)
40 #define COH901318_TC_INT_CLEAR2 (0x0014) 40 #define COH901318_TC_INT_CLEAR2 (0x0014)
41 /* RAW_TC_INT_STATUS - Raw Term Count Interrupt Status Registers 32bit (R/-) */ 41 /* RAW_TC_INT_STATUS - Raw Term Count Interrupt Status Registers 32bit (R/-) */
42 #define COH901318_RAW_TC_INT_STATUS1 (0x0018) 42 #define COH901318_RAW_TC_INT_STATUS1 (0x0018)
43 #define COH901318_RAW_TC_INT_STATUS2 (0x001C) 43 #define COH901318_RAW_TC_INT_STATUS2 (0x001C)
44 /* BE_INT_STATUS - Bus Error Interrupt Status Registers 32bit (R/-) */ 44 /* BE_INT_STATUS - Bus Error Interrupt Status Registers 32bit (R/-) */
45 #define COH901318_BE_INT_STATUS1 (0x0020) 45 #define COH901318_BE_INT_STATUS1 (0x0020)
46 #define COH901318_BE_INT_STATUS2 (0x0024) 46 #define COH901318_BE_INT_STATUS2 (0x0024)
47 /* BE_INT_CLEAR - Bus Error Interrupt Clear Registers 32bit (-/W) */ 47 /* BE_INT_CLEAR - Bus Error Interrupt Clear Registers 32bit (-/W) */
48 #define COH901318_BE_INT_CLEAR1 (0x0028) 48 #define COH901318_BE_INT_CLEAR1 (0x0028)
49 #define COH901318_BE_INT_CLEAR2 (0x002C) 49 #define COH901318_BE_INT_CLEAR2 (0x002C)
50 /* RAW_BE_INT_STATUS - Raw Term Count Interrupt Status Registers 32bit (R/-) */ 50 /* RAW_BE_INT_STATUS - Raw Term Count Interrupt Status Registers 32bit (R/-) */
51 #define COH901318_RAW_BE_INT_STATUS1 (0x0030) 51 #define COH901318_RAW_BE_INT_STATUS1 (0x0030)
52 #define COH901318_RAW_BE_INT_STATUS2 (0x0034) 52 #define COH901318_RAW_BE_INT_STATUS2 (0x0034)
53 53
54 /* 54 /*
55 * CX_CFG - Channel Configuration Registers 32bit (R/W) 55 * CX_CFG - Channel Configuration Registers 32bit (R/W)
56 */ 56 */
57 #define COH901318_CX_CFG (0x0100) 57 #define COH901318_CX_CFG (0x0100)
58 #define COH901318_CX_CFG_SPACING (0x04) 58 #define COH901318_CX_CFG_SPACING (0x04)
59 /* Channel enable activates tha dma job */ 59 /* Channel enable activates tha dma job */
60 #define COH901318_CX_CFG_CH_ENABLE (0x00000001) 60 #define COH901318_CX_CFG_CH_ENABLE (0x00000001)
61 #define COH901318_CX_CFG_CH_DISABLE (0x00000000) 61 #define COH901318_CX_CFG_CH_DISABLE (0x00000000)
62 /* Request Mode */ 62 /* Request Mode */
63 #define COH901318_CX_CFG_RM_MASK (0x00000006) 63 #define COH901318_CX_CFG_RM_MASK (0x00000006)
64 #define COH901318_CX_CFG_RM_MEMORY_TO_MEMORY (0x0 << 1) 64 #define COH901318_CX_CFG_RM_MEMORY_TO_MEMORY (0x0 << 1)
65 #define COH901318_CX_CFG_RM_PRIMARY_TO_MEMORY (0x1 << 1) 65 #define COH901318_CX_CFG_RM_PRIMARY_TO_MEMORY (0x1 << 1)
66 #define COH901318_CX_CFG_RM_MEMORY_TO_PRIMARY (0x1 << 1) 66 #define COH901318_CX_CFG_RM_MEMORY_TO_PRIMARY (0x1 << 1)
67 #define COH901318_CX_CFG_RM_PRIMARY_TO_SECONDARY (0x3 << 1) 67 #define COH901318_CX_CFG_RM_PRIMARY_TO_SECONDARY (0x3 << 1)
68 #define COH901318_CX_CFG_RM_SECONDARY_TO_PRIMARY (0x3 << 1) 68 #define COH901318_CX_CFG_RM_SECONDARY_TO_PRIMARY (0x3 << 1)
69 /* Linked channel request field. RM must == 11 */ 69 /* Linked channel request field. RM must == 11 */
70 #define COH901318_CX_CFG_LCRF_SHIFT 3 70 #define COH901318_CX_CFG_LCRF_SHIFT 3
71 #define COH901318_CX_CFG_LCRF_MASK (0x000001F8) 71 #define COH901318_CX_CFG_LCRF_MASK (0x000001F8)
72 #define COH901318_CX_CFG_LCR_DISABLE (0x00000000) 72 #define COH901318_CX_CFG_LCR_DISABLE (0x00000000)
73 /* Terminal Counter Interrupt Request Mask */ 73 /* Terminal Counter Interrupt Request Mask */
74 #define COH901318_CX_CFG_TC_IRQ_ENABLE (0x00000200) 74 #define COH901318_CX_CFG_TC_IRQ_ENABLE (0x00000200)
75 #define COH901318_CX_CFG_TC_IRQ_DISABLE (0x00000000) 75 #define COH901318_CX_CFG_TC_IRQ_DISABLE (0x00000000)
76 /* Bus Error interrupt Mask */ 76 /* Bus Error interrupt Mask */
77 #define COH901318_CX_CFG_BE_IRQ_ENABLE (0x00000400) 77 #define COH901318_CX_CFG_BE_IRQ_ENABLE (0x00000400)
78 #define COH901318_CX_CFG_BE_IRQ_DISABLE (0x00000000) 78 #define COH901318_CX_CFG_BE_IRQ_DISABLE (0x00000000)
79 79
80 /* 80 /*
81 * CX_STAT - Channel Status Registers 32bit (R/-) 81 * CX_STAT - Channel Status Registers 32bit (R/-)
82 */ 82 */
83 #define COH901318_CX_STAT (0x0200) 83 #define COH901318_CX_STAT (0x0200)
84 #define COH901318_CX_STAT_SPACING (0x04) 84 #define COH901318_CX_STAT_SPACING (0x04)
85 #define COH901318_CX_STAT_RBE_IRQ_IND (0x00000008) 85 #define COH901318_CX_STAT_RBE_IRQ_IND (0x00000008)
86 #define COH901318_CX_STAT_RTC_IRQ_IND (0x00000004) 86 #define COH901318_CX_STAT_RTC_IRQ_IND (0x00000004)
87 #define COH901318_CX_STAT_ACTIVE (0x00000002) 87 #define COH901318_CX_STAT_ACTIVE (0x00000002)
88 #define COH901318_CX_STAT_ENABLED (0x00000001) 88 #define COH901318_CX_STAT_ENABLED (0x00000001)
89 89
90 /* 90 /*
91 * CX_CTRL - Channel Control Registers 32bit (R/W) 91 * CX_CTRL - Channel Control Registers 32bit (R/W)
92 */ 92 */
93 #define COH901318_CX_CTRL (0x0400) 93 #define COH901318_CX_CTRL (0x0400)
94 #define COH901318_CX_CTRL_SPACING (0x10) 94 #define COH901318_CX_CTRL_SPACING (0x10)
95 /* Transfer Count Enable */ 95 /* Transfer Count Enable */
96 #define COH901318_CX_CTRL_TC_ENABLE (0x00001000) 96 #define COH901318_CX_CTRL_TC_ENABLE (0x00001000)
97 #define COH901318_CX_CTRL_TC_DISABLE (0x00000000) 97 #define COH901318_CX_CTRL_TC_DISABLE (0x00000000)
98 /* Transfer Count Value 0 - 4095 */ 98 /* Transfer Count Value 0 - 4095 */
99 #define COH901318_CX_CTRL_TC_VALUE_MASK (0x00000FFF) 99 #define COH901318_CX_CTRL_TC_VALUE_MASK (0x00000FFF)
100 /* Burst count */ 100 /* Burst count */
101 #define COH901318_CX_CTRL_BURST_COUNT_MASK (0x0000E000) 101 #define COH901318_CX_CTRL_BURST_COUNT_MASK (0x0000E000)
102 #define COH901318_CX_CTRL_BURST_COUNT_64_BYTES (0x7 << 13) 102 #define COH901318_CX_CTRL_BURST_COUNT_64_BYTES (0x7 << 13)
103 #define COH901318_CX_CTRL_BURST_COUNT_48_BYTES (0x6 << 13) 103 #define COH901318_CX_CTRL_BURST_COUNT_48_BYTES (0x6 << 13)
104 #define COH901318_CX_CTRL_BURST_COUNT_32_BYTES (0x5 << 13) 104 #define COH901318_CX_CTRL_BURST_COUNT_32_BYTES (0x5 << 13)
105 #define COH901318_CX_CTRL_BURST_COUNT_16_BYTES (0x4 << 13) 105 #define COH901318_CX_CTRL_BURST_COUNT_16_BYTES (0x4 << 13)
106 #define COH901318_CX_CTRL_BURST_COUNT_8_BYTES (0x3 << 13) 106 #define COH901318_CX_CTRL_BURST_COUNT_8_BYTES (0x3 << 13)
107 #define COH901318_CX_CTRL_BURST_COUNT_4_BYTES (0x2 << 13) 107 #define COH901318_CX_CTRL_BURST_COUNT_4_BYTES (0x2 << 13)
108 #define COH901318_CX_CTRL_BURST_COUNT_2_BYTES (0x1 << 13) 108 #define COH901318_CX_CTRL_BURST_COUNT_2_BYTES (0x1 << 13)
109 #define COH901318_CX_CTRL_BURST_COUNT_1_BYTE (0x0 << 13) 109 #define COH901318_CX_CTRL_BURST_COUNT_1_BYTE (0x0 << 13)
110 /* Source bus size */ 110 /* Source bus size */
111 #define COH901318_CX_CTRL_SRC_BUS_SIZE_MASK (0x00030000) 111 #define COH901318_CX_CTRL_SRC_BUS_SIZE_MASK (0x00030000)
112 #define COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS (0x2 << 16) 112 #define COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS (0x2 << 16)
113 #define COH901318_CX_CTRL_SRC_BUS_SIZE_16_BITS (0x1 << 16) 113 #define COH901318_CX_CTRL_SRC_BUS_SIZE_16_BITS (0x1 << 16)
114 #define COH901318_CX_CTRL_SRC_BUS_SIZE_8_BITS (0x0 << 16) 114 #define COH901318_CX_CTRL_SRC_BUS_SIZE_8_BITS (0x0 << 16)
115 /* Source address increment */ 115 /* Source address increment */
116 #define COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE (0x00040000) 116 #define COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE (0x00040000)
117 #define COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE (0x00000000) 117 #define COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE (0x00000000)
118 /* Destination Bus Size */ 118 /* Destination Bus Size */
119 #define COH901318_CX_CTRL_DST_BUS_SIZE_MASK (0x00180000) 119 #define COH901318_CX_CTRL_DST_BUS_SIZE_MASK (0x00180000)
120 #define COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS (0x2 << 19) 120 #define COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS (0x2 << 19)
121 #define COH901318_CX_CTRL_DST_BUS_SIZE_16_BITS (0x1 << 19) 121 #define COH901318_CX_CTRL_DST_BUS_SIZE_16_BITS (0x1 << 19)
122 #define COH901318_CX_CTRL_DST_BUS_SIZE_8_BITS (0x0 << 19) 122 #define COH901318_CX_CTRL_DST_BUS_SIZE_8_BITS (0x0 << 19)
123 /* Destination address increment */ 123 /* Destination address increment */
124 #define COH901318_CX_CTRL_DST_ADDR_INC_ENABLE (0x00200000) 124 #define COH901318_CX_CTRL_DST_ADDR_INC_ENABLE (0x00200000)
125 #define COH901318_CX_CTRL_DST_ADDR_INC_DISABLE (0x00000000) 125 #define COH901318_CX_CTRL_DST_ADDR_INC_DISABLE (0x00000000)
126 /* Master Mode (Master2 is only connected to MSL) */ 126 /* Master Mode (Master2 is only connected to MSL) */
127 #define COH901318_CX_CTRL_MASTER_MODE_MASK (0x00C00000) 127 #define COH901318_CX_CTRL_MASTER_MODE_MASK (0x00C00000)
128 #define COH901318_CX_CTRL_MASTER_MODE_M2R_M1W (0x3 << 22) 128 #define COH901318_CX_CTRL_MASTER_MODE_M2R_M1W (0x3 << 22)
129 #define COH901318_CX_CTRL_MASTER_MODE_M1R_M2W (0x2 << 22) 129 #define COH901318_CX_CTRL_MASTER_MODE_M1R_M2W (0x2 << 22)
130 #define COH901318_CX_CTRL_MASTER_MODE_M2RW (0x1 << 22) 130 #define COH901318_CX_CTRL_MASTER_MODE_M2RW (0x1 << 22)
131 #define COH901318_CX_CTRL_MASTER_MODE_M1RW (0x0 << 22) 131 #define COH901318_CX_CTRL_MASTER_MODE_M1RW (0x0 << 22)
132 /* Terminal Count flag to PER enable */ 132 /* Terminal Count flag to PER enable */
133 #define COH901318_CX_CTRL_TCP_ENABLE (0x01000000) 133 #define COH901318_CX_CTRL_TCP_ENABLE (0x01000000)
134 #define COH901318_CX_CTRL_TCP_DISABLE (0x00000000) 134 #define COH901318_CX_CTRL_TCP_DISABLE (0x00000000)
135 /* Terminal Count flags to CPU enable */ 135 /* Terminal Count flags to CPU enable */
136 #define COH901318_CX_CTRL_TC_IRQ_ENABLE (0x02000000) 136 #define COH901318_CX_CTRL_TC_IRQ_ENABLE (0x02000000)
137 #define COH901318_CX_CTRL_TC_IRQ_DISABLE (0x00000000) 137 #define COH901318_CX_CTRL_TC_IRQ_DISABLE (0x00000000)
138 /* Hand shake to peripheral */ 138 /* Hand shake to peripheral */
139 #define COH901318_CX_CTRL_HSP_ENABLE (0x04000000) 139 #define COH901318_CX_CTRL_HSP_ENABLE (0x04000000)
140 #define COH901318_CX_CTRL_HSP_DISABLE (0x00000000) 140 #define COH901318_CX_CTRL_HSP_DISABLE (0x00000000)
141 #define COH901318_CX_CTRL_HSS_ENABLE (0x08000000) 141 #define COH901318_CX_CTRL_HSS_ENABLE (0x08000000)
142 #define COH901318_CX_CTRL_HSS_DISABLE (0x00000000) 142 #define COH901318_CX_CTRL_HSS_DISABLE (0x00000000)
143 /* DMA mode */ 143 /* DMA mode */
144 #define COH901318_CX_CTRL_DDMA_MASK (0x30000000) 144 #define COH901318_CX_CTRL_DDMA_MASK (0x30000000)
145 #define COH901318_CX_CTRL_DDMA_LEGACY (0x0 << 28) 145 #define COH901318_CX_CTRL_DDMA_LEGACY (0x0 << 28)
146 #define COH901318_CX_CTRL_DDMA_DEMAND_DMA1 (0x1 << 28) 146 #define COH901318_CX_CTRL_DDMA_DEMAND_DMA1 (0x1 << 28)
147 #define COH901318_CX_CTRL_DDMA_DEMAND_DMA2 (0x2 << 28) 147 #define COH901318_CX_CTRL_DDMA_DEMAND_DMA2 (0x2 << 28)
148 /* Primary Request Data Destination */ 148 /* Primary Request Data Destination */
149 #define COH901318_CX_CTRL_PRDD_MASK (0x40000000) 149 #define COH901318_CX_CTRL_PRDD_MASK (0x40000000)
150 #define COH901318_CX_CTRL_PRDD_DEST (0x1 << 30) 150 #define COH901318_CX_CTRL_PRDD_DEST (0x1 << 30)
151 #define COH901318_CX_CTRL_PRDD_SOURCE (0x0 << 30) 151 #define COH901318_CX_CTRL_PRDD_SOURCE (0x0 << 30)
152 152
153 /* 153 /*
154 * CX_SRC_ADDR - Channel Source Address Registers 32bit (R/W) 154 * CX_SRC_ADDR - Channel Source Address Registers 32bit (R/W)
155 */ 155 */
156 #define COH901318_CX_SRC_ADDR (0x0404) 156 #define COH901318_CX_SRC_ADDR (0x0404)
157 #define COH901318_CX_SRC_ADDR_SPACING (0x10) 157 #define COH901318_CX_SRC_ADDR_SPACING (0x10)
158 158
159 /* 159 /*
160 * CX_DST_ADDR - Channel Destination Address Registers 32bit R/W 160 * CX_DST_ADDR - Channel Destination Address Registers 32bit R/W
161 */ 161 */
162 #define COH901318_CX_DST_ADDR (0x0408) 162 #define COH901318_CX_DST_ADDR (0x0408)
163 #define COH901318_CX_DST_ADDR_SPACING (0x10) 163 #define COH901318_CX_DST_ADDR_SPACING (0x10)
164 164
165 /* 165 /*
166 * CX_LNK_ADDR - Channel Link Address Registers 32bit (R/W) 166 * CX_LNK_ADDR - Channel Link Address Registers 32bit (R/W)
167 */ 167 */
168 #define COH901318_CX_LNK_ADDR (0x040C) 168 #define COH901318_CX_LNK_ADDR (0x040C)
169 #define COH901318_CX_LNK_ADDR_SPACING (0x10) 169 #define COH901318_CX_LNK_ADDR_SPACING (0x10)
170 #define COH901318_CX_LNK_LINK_IMMEDIATE (0x00000001) 170 #define COH901318_CX_LNK_LINK_IMMEDIATE (0x00000001)
171 171
172 /** 172 /**
173 * struct coh901318_params - parameters for DMAC configuration 173 * struct coh901318_params - parameters for DMAC configuration
174 * @config: DMA config register 174 * @config: DMA config register
175 * @ctrl_lli_last: DMA control register for the last lli in the list 175 * @ctrl_lli_last: DMA control register for the last lli in the list
176 * @ctrl_lli: DMA control register for an lli 176 * @ctrl_lli: DMA control register for an lli
177 * @ctrl_lli_chained: DMA control register for a chained lli 177 * @ctrl_lli_chained: DMA control register for a chained lli
178 */ 178 */
179 struct coh901318_params { 179 struct coh901318_params {
180 u32 config; 180 u32 config;
181 u32 ctrl_lli_last; 181 u32 ctrl_lli_last;
182 u32 ctrl_lli; 182 u32 ctrl_lli;
183 u32 ctrl_lli_chained; 183 u32 ctrl_lli_chained;
184 }; 184 };
185 185
186 /** 186 /**
187 * struct coh_dma_channel - dma channel base 187 * struct coh_dma_channel - dma channel base
188 * @name: ascii name of dma channel 188 * @name: ascii name of dma channel
189 * @number: channel id number 189 * @number: channel id number
190 * @desc_nbr_max: number of preallocated descriptors 190 * @desc_nbr_max: number of preallocated descriptors
191 * @priority_high: prio of channel, 0 low otherwise high. 191 * @priority_high: prio of channel, 0 low otherwise high.
192 * @param: configuration parameters 192 * @param: configuration parameters
193 */ 193 */
194 struct coh_dma_channel { 194 struct coh_dma_channel {
195 const char name[32]; 195 const char name[32];
196 const int number; 196 const int number;
197 const int desc_nbr_max; 197 const int desc_nbr_max;
198 const int priority_high; 198 const int priority_high;
199 const struct coh901318_params param; 199 const struct coh901318_params param;
200 }; 200 };
201 201
202 /** 202 /**
203 * struct powersave - DMA power save structure 203 * struct powersave - DMA power save structure
204 * @lock: lock protecting data in this struct 204 * @lock: lock protecting data in this struct
205 * @started_channels: bit mask indicating active dma channels 205 * @started_channels: bit mask indicating active dma channels
206 */ 206 */
207 struct powersave { 207 struct powersave {
208 spinlock_t lock; 208 spinlock_t lock;
209 u64 started_channels; 209 u64 started_channels;
210 }; 210 };
211 211
212 /* points out all dma slave channels. 212 /* points out all dma slave channels.
213 * Syntax is [A1, B1, A2, B2, .... ,-1,-1] 213 * Syntax is [A1, B1, A2, B2, .... ,-1,-1]
214 * Select all channels from A to B, end of list is marked with -1,-1 214 * Select all channels from A to B, end of list is marked with -1,-1
215 */ 215 */
216 static int dma_slave_channels[] = { 216 static int dma_slave_channels[] = {
217 U300_DMA_MSL_TX_0, U300_DMA_SPI_RX, 217 U300_DMA_MSL_TX_0, U300_DMA_SPI_RX,
218 U300_DMA_UART1_TX, U300_DMA_UART1_RX, -1, -1}; 218 U300_DMA_UART1_TX, U300_DMA_UART1_RX, -1, -1};
219 219
220 /* points out all dma memcpy channels. */ 220 /* points out all dma memcpy channels. */
221 static int dma_memcpy_channels[] = { 221 static int dma_memcpy_channels[] = {
222 U300_DMA_GENERAL_PURPOSE_0, U300_DMA_GENERAL_PURPOSE_8, -1, -1}; 222 U300_DMA_GENERAL_PURPOSE_0, U300_DMA_GENERAL_PURPOSE_8, -1, -1};
223 223
224 #define flags_memcpy_config (COH901318_CX_CFG_CH_DISABLE | \ 224 #define flags_memcpy_config (COH901318_CX_CFG_CH_DISABLE | \
225 COH901318_CX_CFG_RM_MEMORY_TO_MEMORY | \ 225 COH901318_CX_CFG_RM_MEMORY_TO_MEMORY | \
226 COH901318_CX_CFG_LCR_DISABLE | \ 226 COH901318_CX_CFG_LCR_DISABLE | \
227 COH901318_CX_CFG_TC_IRQ_ENABLE | \ 227 COH901318_CX_CFG_TC_IRQ_ENABLE | \
228 COH901318_CX_CFG_BE_IRQ_ENABLE) 228 COH901318_CX_CFG_BE_IRQ_ENABLE)
229 #define flags_memcpy_lli_chained (COH901318_CX_CTRL_TC_ENABLE | \ 229 #define flags_memcpy_lli_chained (COH901318_CX_CTRL_TC_ENABLE | \
230 COH901318_CX_CTRL_BURST_COUNT_32_BYTES | \ 230 COH901318_CX_CTRL_BURST_COUNT_32_BYTES | \
231 COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | \ 231 COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | \
232 COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE | \ 232 COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE | \
233 COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | \ 233 COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | \
234 COH901318_CX_CTRL_DST_ADDR_INC_ENABLE | \ 234 COH901318_CX_CTRL_DST_ADDR_INC_ENABLE | \
235 COH901318_CX_CTRL_MASTER_MODE_M1RW | \ 235 COH901318_CX_CTRL_MASTER_MODE_M1RW | \
236 COH901318_CX_CTRL_TCP_DISABLE | \ 236 COH901318_CX_CTRL_TCP_DISABLE | \
237 COH901318_CX_CTRL_TC_IRQ_DISABLE | \ 237 COH901318_CX_CTRL_TC_IRQ_DISABLE | \
238 COH901318_CX_CTRL_HSP_DISABLE | \ 238 COH901318_CX_CTRL_HSP_DISABLE | \
239 COH901318_CX_CTRL_HSS_DISABLE | \ 239 COH901318_CX_CTRL_HSS_DISABLE | \
240 COH901318_CX_CTRL_DDMA_LEGACY | \ 240 COH901318_CX_CTRL_DDMA_LEGACY | \
241 COH901318_CX_CTRL_PRDD_SOURCE) 241 COH901318_CX_CTRL_PRDD_SOURCE)
242 #define flags_memcpy_lli (COH901318_CX_CTRL_TC_ENABLE | \ 242 #define flags_memcpy_lli (COH901318_CX_CTRL_TC_ENABLE | \
243 COH901318_CX_CTRL_BURST_COUNT_32_BYTES | \ 243 COH901318_CX_CTRL_BURST_COUNT_32_BYTES | \
244 COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | \ 244 COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | \
245 COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE | \ 245 COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE | \
246 COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | \ 246 COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | \
247 COH901318_CX_CTRL_DST_ADDR_INC_ENABLE | \ 247 COH901318_CX_CTRL_DST_ADDR_INC_ENABLE | \
248 COH901318_CX_CTRL_MASTER_MODE_M1RW | \ 248 COH901318_CX_CTRL_MASTER_MODE_M1RW | \
249 COH901318_CX_CTRL_TCP_DISABLE | \ 249 COH901318_CX_CTRL_TCP_DISABLE | \
250 COH901318_CX_CTRL_TC_IRQ_DISABLE | \ 250 COH901318_CX_CTRL_TC_IRQ_DISABLE | \
251 COH901318_CX_CTRL_HSP_DISABLE | \ 251 COH901318_CX_CTRL_HSP_DISABLE | \
252 COH901318_CX_CTRL_HSS_DISABLE | \ 252 COH901318_CX_CTRL_HSS_DISABLE | \
253 COH901318_CX_CTRL_DDMA_LEGACY | \ 253 COH901318_CX_CTRL_DDMA_LEGACY | \
254 COH901318_CX_CTRL_PRDD_SOURCE) 254 COH901318_CX_CTRL_PRDD_SOURCE)
255 #define flags_memcpy_lli_last (COH901318_CX_CTRL_TC_ENABLE | \ 255 #define flags_memcpy_lli_last (COH901318_CX_CTRL_TC_ENABLE | \
256 COH901318_CX_CTRL_BURST_COUNT_32_BYTES | \ 256 COH901318_CX_CTRL_BURST_COUNT_32_BYTES | \
257 COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | \ 257 COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | \
258 COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE | \ 258 COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE | \
259 COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | \ 259 COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | \
260 COH901318_CX_CTRL_DST_ADDR_INC_ENABLE | \ 260 COH901318_CX_CTRL_DST_ADDR_INC_ENABLE | \
261 COH901318_CX_CTRL_MASTER_MODE_M1RW | \ 261 COH901318_CX_CTRL_MASTER_MODE_M1RW | \
262 COH901318_CX_CTRL_TCP_DISABLE | \ 262 COH901318_CX_CTRL_TCP_DISABLE | \
263 COH901318_CX_CTRL_TC_IRQ_ENABLE | \ 263 COH901318_CX_CTRL_TC_IRQ_ENABLE | \
264 COH901318_CX_CTRL_HSP_DISABLE | \ 264 COH901318_CX_CTRL_HSP_DISABLE | \
265 COH901318_CX_CTRL_HSS_DISABLE | \ 265 COH901318_CX_CTRL_HSS_DISABLE | \
266 COH901318_CX_CTRL_DDMA_LEGACY | \ 266 COH901318_CX_CTRL_DDMA_LEGACY | \
267 COH901318_CX_CTRL_PRDD_SOURCE) 267 COH901318_CX_CTRL_PRDD_SOURCE)
268 268
269 const struct coh_dma_channel chan_config[U300_DMA_CHANNELS] = { 269 const struct coh_dma_channel chan_config[U300_DMA_CHANNELS] = {
270 { 270 {
271 .number = U300_DMA_MSL_TX_0, 271 .number = U300_DMA_MSL_TX_0,
272 .name = "MSL TX 0", 272 .name = "MSL TX 0",
273 .priority_high = 0, 273 .priority_high = 0,
274 }, 274 },
275 { 275 {
276 .number = U300_DMA_MSL_TX_1, 276 .number = U300_DMA_MSL_TX_1,
277 .name = "MSL TX 1", 277 .name = "MSL TX 1",
278 .priority_high = 0, 278 .priority_high = 0,
279 .param.config = COH901318_CX_CFG_CH_DISABLE | 279 .param.config = COH901318_CX_CFG_CH_DISABLE |
280 COH901318_CX_CFG_LCR_DISABLE | 280 COH901318_CX_CFG_LCR_DISABLE |
281 COH901318_CX_CFG_TC_IRQ_ENABLE | 281 COH901318_CX_CFG_TC_IRQ_ENABLE |
282 COH901318_CX_CFG_BE_IRQ_ENABLE, 282 COH901318_CX_CFG_BE_IRQ_ENABLE,
283 .param.ctrl_lli_chained = 0 | 283 .param.ctrl_lli_chained = 0 |
284 COH901318_CX_CTRL_TC_ENABLE | 284 COH901318_CX_CTRL_TC_ENABLE |
285 COH901318_CX_CTRL_BURST_COUNT_32_BYTES | 285 COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
286 COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | 286 COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
287 COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE | 287 COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
288 COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | 288 COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
289 COH901318_CX_CTRL_DST_ADDR_INC_DISABLE | 289 COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
290 COH901318_CX_CTRL_MASTER_MODE_M1R_M2W | 290 COH901318_CX_CTRL_MASTER_MODE_M1R_M2W |
291 COH901318_CX_CTRL_TCP_DISABLE | 291 COH901318_CX_CTRL_TCP_DISABLE |
292 COH901318_CX_CTRL_TC_IRQ_DISABLE | 292 COH901318_CX_CTRL_TC_IRQ_DISABLE |
293 COH901318_CX_CTRL_HSP_ENABLE | 293 COH901318_CX_CTRL_HSP_ENABLE |
294 COH901318_CX_CTRL_HSS_DISABLE | 294 COH901318_CX_CTRL_HSS_DISABLE |
295 COH901318_CX_CTRL_DDMA_LEGACY | 295 COH901318_CX_CTRL_DDMA_LEGACY |
296 COH901318_CX_CTRL_PRDD_SOURCE, 296 COH901318_CX_CTRL_PRDD_SOURCE,
297 .param.ctrl_lli = 0 | 297 .param.ctrl_lli = 0 |
298 COH901318_CX_CTRL_TC_ENABLE | 298 COH901318_CX_CTRL_TC_ENABLE |
299 COH901318_CX_CTRL_BURST_COUNT_32_BYTES | 299 COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
300 COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | 300 COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
301 COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE | 301 COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
302 COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | 302 COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
303 COH901318_CX_CTRL_DST_ADDR_INC_DISABLE | 303 COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
304 COH901318_CX_CTRL_MASTER_MODE_M1R_M2W | 304 COH901318_CX_CTRL_MASTER_MODE_M1R_M2W |
305 COH901318_CX_CTRL_TCP_ENABLE | 305 COH901318_CX_CTRL_TCP_ENABLE |
306 COH901318_CX_CTRL_TC_IRQ_DISABLE | 306 COH901318_CX_CTRL_TC_IRQ_DISABLE |
307 COH901318_CX_CTRL_HSP_ENABLE | 307 COH901318_CX_CTRL_HSP_ENABLE |
308 COH901318_CX_CTRL_HSS_DISABLE | 308 COH901318_CX_CTRL_HSS_DISABLE |
309 COH901318_CX_CTRL_DDMA_LEGACY | 309 COH901318_CX_CTRL_DDMA_LEGACY |
310 COH901318_CX_CTRL_PRDD_SOURCE, 310 COH901318_CX_CTRL_PRDD_SOURCE,
311 .param.ctrl_lli_last = 0 | 311 .param.ctrl_lli_last = 0 |
312 COH901318_CX_CTRL_TC_ENABLE | 312 COH901318_CX_CTRL_TC_ENABLE |
313 COH901318_CX_CTRL_BURST_COUNT_32_BYTES | 313 COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
314 COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | 314 COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
315 COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE | 315 COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
316 COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | 316 COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
317 COH901318_CX_CTRL_DST_ADDR_INC_DISABLE | 317 COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
318 COH901318_CX_CTRL_MASTER_MODE_M1R_M2W | 318 COH901318_CX_CTRL_MASTER_MODE_M1R_M2W |
319 COH901318_CX_CTRL_TCP_ENABLE | 319 COH901318_CX_CTRL_TCP_ENABLE |
320 COH901318_CX_CTRL_TC_IRQ_ENABLE | 320 COH901318_CX_CTRL_TC_IRQ_ENABLE |
321 COH901318_CX_CTRL_HSP_ENABLE | 321 COH901318_CX_CTRL_HSP_ENABLE |
322 COH901318_CX_CTRL_HSS_DISABLE | 322 COH901318_CX_CTRL_HSS_DISABLE |
323 COH901318_CX_CTRL_DDMA_LEGACY | 323 COH901318_CX_CTRL_DDMA_LEGACY |
324 COH901318_CX_CTRL_PRDD_SOURCE, 324 COH901318_CX_CTRL_PRDD_SOURCE,
325 }, 325 },
326 { 326 {
327 .number = U300_DMA_MSL_TX_2, 327 .number = U300_DMA_MSL_TX_2,
328 .name = "MSL TX 2", 328 .name = "MSL TX 2",
329 .priority_high = 0, 329 .priority_high = 0,
330 .param.config = COH901318_CX_CFG_CH_DISABLE | 330 .param.config = COH901318_CX_CFG_CH_DISABLE |
331 COH901318_CX_CFG_LCR_DISABLE | 331 COH901318_CX_CFG_LCR_DISABLE |
332 COH901318_CX_CFG_TC_IRQ_ENABLE | 332 COH901318_CX_CFG_TC_IRQ_ENABLE |
333 COH901318_CX_CFG_BE_IRQ_ENABLE, 333 COH901318_CX_CFG_BE_IRQ_ENABLE,
334 .param.ctrl_lli_chained = 0 | 334 .param.ctrl_lli_chained = 0 |
335 COH901318_CX_CTRL_TC_ENABLE | 335 COH901318_CX_CTRL_TC_ENABLE |
336 COH901318_CX_CTRL_BURST_COUNT_32_BYTES | 336 COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
337 COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | 337 COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
338 COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE | 338 COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
339 COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | 339 COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
340 COH901318_CX_CTRL_DST_ADDR_INC_DISABLE | 340 COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
341 COH901318_CX_CTRL_MASTER_MODE_M1R_M2W | 341 COH901318_CX_CTRL_MASTER_MODE_M1R_M2W |
342 COH901318_CX_CTRL_TCP_DISABLE | 342 COH901318_CX_CTRL_TCP_DISABLE |
343 COH901318_CX_CTRL_TC_IRQ_DISABLE | 343 COH901318_CX_CTRL_TC_IRQ_DISABLE |
344 COH901318_CX_CTRL_HSP_ENABLE | 344 COH901318_CX_CTRL_HSP_ENABLE |
345 COH901318_CX_CTRL_HSS_DISABLE | 345 COH901318_CX_CTRL_HSS_DISABLE |
346 COH901318_CX_CTRL_DDMA_LEGACY | 346 COH901318_CX_CTRL_DDMA_LEGACY |
347 COH901318_CX_CTRL_PRDD_SOURCE, 347 COH901318_CX_CTRL_PRDD_SOURCE,
348 .param.ctrl_lli = 0 | 348 .param.ctrl_lli = 0 |
349 COH901318_CX_CTRL_TC_ENABLE | 349 COH901318_CX_CTRL_TC_ENABLE |
350 COH901318_CX_CTRL_BURST_COUNT_32_BYTES | 350 COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
351 COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | 351 COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
352 COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE | 352 COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
353 COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | 353 COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
354 COH901318_CX_CTRL_DST_ADDR_INC_DISABLE | 354 COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
355 COH901318_CX_CTRL_MASTER_MODE_M1R_M2W | 355 COH901318_CX_CTRL_MASTER_MODE_M1R_M2W |
356 COH901318_CX_CTRL_TCP_ENABLE | 356 COH901318_CX_CTRL_TCP_ENABLE |
357 COH901318_CX_CTRL_TC_IRQ_DISABLE | 357 COH901318_CX_CTRL_TC_IRQ_DISABLE |
358 COH901318_CX_CTRL_HSP_ENABLE | 358 COH901318_CX_CTRL_HSP_ENABLE |
359 COH901318_CX_CTRL_HSS_DISABLE | 359 COH901318_CX_CTRL_HSS_DISABLE |
360 COH901318_CX_CTRL_DDMA_LEGACY | 360 COH901318_CX_CTRL_DDMA_LEGACY |
361 COH901318_CX_CTRL_PRDD_SOURCE, 361 COH901318_CX_CTRL_PRDD_SOURCE,
362 .param.ctrl_lli_last = 0 | 362 .param.ctrl_lli_last = 0 |
363 COH901318_CX_CTRL_TC_ENABLE | 363 COH901318_CX_CTRL_TC_ENABLE |
364 COH901318_CX_CTRL_BURST_COUNT_32_BYTES | 364 COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
365 COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | 365 COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
366 COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE | 366 COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
367 COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | 367 COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
368 COH901318_CX_CTRL_DST_ADDR_INC_DISABLE | 368 COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
369 COH901318_CX_CTRL_MASTER_MODE_M1R_M2W | 369 COH901318_CX_CTRL_MASTER_MODE_M1R_M2W |
370 COH901318_CX_CTRL_TCP_ENABLE | 370 COH901318_CX_CTRL_TCP_ENABLE |
371 COH901318_CX_CTRL_TC_IRQ_ENABLE | 371 COH901318_CX_CTRL_TC_IRQ_ENABLE |
372 COH901318_CX_CTRL_HSP_ENABLE | 372 COH901318_CX_CTRL_HSP_ENABLE |
373 COH901318_CX_CTRL_HSS_DISABLE | 373 COH901318_CX_CTRL_HSS_DISABLE |
374 COH901318_CX_CTRL_DDMA_LEGACY | 374 COH901318_CX_CTRL_DDMA_LEGACY |
375 COH901318_CX_CTRL_PRDD_SOURCE, 375 COH901318_CX_CTRL_PRDD_SOURCE,
376 .desc_nbr_max = 10, 376 .desc_nbr_max = 10,
377 }, 377 },
378 { 378 {
379 .number = U300_DMA_MSL_TX_3, 379 .number = U300_DMA_MSL_TX_3,
380 .name = "MSL TX 3", 380 .name = "MSL TX 3",
381 .priority_high = 0, 381 .priority_high = 0,
382 .param.config = COH901318_CX_CFG_CH_DISABLE | 382 .param.config = COH901318_CX_CFG_CH_DISABLE |
383 COH901318_CX_CFG_LCR_DISABLE | 383 COH901318_CX_CFG_LCR_DISABLE |
384 COH901318_CX_CFG_TC_IRQ_ENABLE | 384 COH901318_CX_CFG_TC_IRQ_ENABLE |
385 COH901318_CX_CFG_BE_IRQ_ENABLE, 385 COH901318_CX_CFG_BE_IRQ_ENABLE,
386 .param.ctrl_lli_chained = 0 | 386 .param.ctrl_lli_chained = 0 |
387 COH901318_CX_CTRL_TC_ENABLE | 387 COH901318_CX_CTRL_TC_ENABLE |
388 COH901318_CX_CTRL_BURST_COUNT_32_BYTES | 388 COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
389 COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | 389 COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
390 COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE | 390 COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
391 COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | 391 COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
392 COH901318_CX_CTRL_DST_ADDR_INC_DISABLE | 392 COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
393 COH901318_CX_CTRL_MASTER_MODE_M1R_M2W | 393 COH901318_CX_CTRL_MASTER_MODE_M1R_M2W |
394 COH901318_CX_CTRL_TCP_DISABLE | 394 COH901318_CX_CTRL_TCP_DISABLE |
395 COH901318_CX_CTRL_TC_IRQ_DISABLE | 395 COH901318_CX_CTRL_TC_IRQ_DISABLE |
396 COH901318_CX_CTRL_HSP_ENABLE | 396 COH901318_CX_CTRL_HSP_ENABLE |
397 COH901318_CX_CTRL_HSS_DISABLE | 397 COH901318_CX_CTRL_HSS_DISABLE |
398 COH901318_CX_CTRL_DDMA_LEGACY | 398 COH901318_CX_CTRL_DDMA_LEGACY |
399 COH901318_CX_CTRL_PRDD_SOURCE, 399 COH901318_CX_CTRL_PRDD_SOURCE,
400 .param.ctrl_lli = 0 | 400 .param.ctrl_lli = 0 |
401 COH901318_CX_CTRL_TC_ENABLE | 401 COH901318_CX_CTRL_TC_ENABLE |
402 COH901318_CX_CTRL_BURST_COUNT_32_BYTES | 402 COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
403 COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | 403 COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
404 COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE | 404 COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
405 COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | 405 COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
406 COH901318_CX_CTRL_DST_ADDR_INC_DISABLE | 406 COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
407 COH901318_CX_CTRL_MASTER_MODE_M1R_M2W | 407 COH901318_CX_CTRL_MASTER_MODE_M1R_M2W |
408 COH901318_CX_CTRL_TCP_ENABLE | 408 COH901318_CX_CTRL_TCP_ENABLE |
409 COH901318_CX_CTRL_TC_IRQ_DISABLE | 409 COH901318_CX_CTRL_TC_IRQ_DISABLE |
410 COH901318_CX_CTRL_HSP_ENABLE | 410 COH901318_CX_CTRL_HSP_ENABLE |
411 COH901318_CX_CTRL_HSS_DISABLE | 411 COH901318_CX_CTRL_HSS_DISABLE |
412 COH901318_CX_CTRL_DDMA_LEGACY | 412 COH901318_CX_CTRL_DDMA_LEGACY |
413 COH901318_CX_CTRL_PRDD_SOURCE, 413 COH901318_CX_CTRL_PRDD_SOURCE,
414 .param.ctrl_lli_last = 0 | 414 .param.ctrl_lli_last = 0 |
415 COH901318_CX_CTRL_TC_ENABLE | 415 COH901318_CX_CTRL_TC_ENABLE |
416 COH901318_CX_CTRL_BURST_COUNT_32_BYTES | 416 COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
417 COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | 417 COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
418 COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE | 418 COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
419 COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | 419 COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
420 COH901318_CX_CTRL_DST_ADDR_INC_DISABLE | 420 COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
421 COH901318_CX_CTRL_MASTER_MODE_M1R_M2W | 421 COH901318_CX_CTRL_MASTER_MODE_M1R_M2W |
422 COH901318_CX_CTRL_TCP_ENABLE | 422 COH901318_CX_CTRL_TCP_ENABLE |
423 COH901318_CX_CTRL_TC_IRQ_ENABLE | 423 COH901318_CX_CTRL_TC_IRQ_ENABLE |
424 COH901318_CX_CTRL_HSP_ENABLE | 424 COH901318_CX_CTRL_HSP_ENABLE |
425 COH901318_CX_CTRL_HSS_DISABLE | 425 COH901318_CX_CTRL_HSS_DISABLE |
426 COH901318_CX_CTRL_DDMA_LEGACY | 426 COH901318_CX_CTRL_DDMA_LEGACY |
427 COH901318_CX_CTRL_PRDD_SOURCE, 427 COH901318_CX_CTRL_PRDD_SOURCE,
428 }, 428 },
429 { 429 {
430 .number = U300_DMA_MSL_TX_4, 430 .number = U300_DMA_MSL_TX_4,
431 .name = "MSL TX 4", 431 .name = "MSL TX 4",
432 .priority_high = 0, 432 .priority_high = 0,
433 .param.config = COH901318_CX_CFG_CH_DISABLE | 433 .param.config = COH901318_CX_CFG_CH_DISABLE |
434 COH901318_CX_CFG_LCR_DISABLE | 434 COH901318_CX_CFG_LCR_DISABLE |
435 COH901318_CX_CFG_TC_IRQ_ENABLE | 435 COH901318_CX_CFG_TC_IRQ_ENABLE |
436 COH901318_CX_CFG_BE_IRQ_ENABLE, 436 COH901318_CX_CFG_BE_IRQ_ENABLE,
437 .param.ctrl_lli_chained = 0 | 437 .param.ctrl_lli_chained = 0 |
438 COH901318_CX_CTRL_TC_ENABLE | 438 COH901318_CX_CTRL_TC_ENABLE |
439 COH901318_CX_CTRL_BURST_COUNT_32_BYTES | 439 COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
440 COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | 440 COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
441 COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE | 441 COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
442 COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | 442 COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
443 COH901318_CX_CTRL_DST_ADDR_INC_DISABLE | 443 COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
444 COH901318_CX_CTRL_MASTER_MODE_M1R_M2W | 444 COH901318_CX_CTRL_MASTER_MODE_M1R_M2W |
445 COH901318_CX_CTRL_TCP_DISABLE | 445 COH901318_CX_CTRL_TCP_DISABLE |
446 COH901318_CX_CTRL_TC_IRQ_DISABLE | 446 COH901318_CX_CTRL_TC_IRQ_DISABLE |
447 COH901318_CX_CTRL_HSP_ENABLE | 447 COH901318_CX_CTRL_HSP_ENABLE |
448 COH901318_CX_CTRL_HSS_DISABLE | 448 COH901318_CX_CTRL_HSS_DISABLE |
449 COH901318_CX_CTRL_DDMA_LEGACY | 449 COH901318_CX_CTRL_DDMA_LEGACY |
450 COH901318_CX_CTRL_PRDD_SOURCE, 450 COH901318_CX_CTRL_PRDD_SOURCE,
451 .param.ctrl_lli = 0 | 451 .param.ctrl_lli = 0 |
452 COH901318_CX_CTRL_TC_ENABLE | 452 COH901318_CX_CTRL_TC_ENABLE |
453 COH901318_CX_CTRL_BURST_COUNT_32_BYTES | 453 COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
454 COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | 454 COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
455 COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE | 455 COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
456 COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | 456 COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
457 COH901318_CX_CTRL_DST_ADDR_INC_DISABLE | 457 COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
458 COH901318_CX_CTRL_MASTER_MODE_M1R_M2W | 458 COH901318_CX_CTRL_MASTER_MODE_M1R_M2W |
459 COH901318_CX_CTRL_TCP_ENABLE | 459 COH901318_CX_CTRL_TCP_ENABLE |
460 COH901318_CX_CTRL_TC_IRQ_DISABLE | 460 COH901318_CX_CTRL_TC_IRQ_DISABLE |
461 COH901318_CX_CTRL_HSP_ENABLE | 461 COH901318_CX_CTRL_HSP_ENABLE |
462 COH901318_CX_CTRL_HSS_DISABLE | 462 COH901318_CX_CTRL_HSS_DISABLE |
463 COH901318_CX_CTRL_DDMA_LEGACY | 463 COH901318_CX_CTRL_DDMA_LEGACY |
464 COH901318_CX_CTRL_PRDD_SOURCE, 464 COH901318_CX_CTRL_PRDD_SOURCE,
465 .param.ctrl_lli_last = 0 | 465 .param.ctrl_lli_last = 0 |
466 COH901318_CX_CTRL_TC_ENABLE | 466 COH901318_CX_CTRL_TC_ENABLE |
467 COH901318_CX_CTRL_BURST_COUNT_32_BYTES | 467 COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
468 COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | 468 COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
469 COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE | 469 COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
470 COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | 470 COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
471 COH901318_CX_CTRL_DST_ADDR_INC_DISABLE | 471 COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
472 COH901318_CX_CTRL_MASTER_MODE_M1R_M2W | 472 COH901318_CX_CTRL_MASTER_MODE_M1R_M2W |
473 COH901318_CX_CTRL_TCP_ENABLE | 473 COH901318_CX_CTRL_TCP_ENABLE |
474 COH901318_CX_CTRL_TC_IRQ_ENABLE | 474 COH901318_CX_CTRL_TC_IRQ_ENABLE |
475 COH901318_CX_CTRL_HSP_ENABLE | 475 COH901318_CX_CTRL_HSP_ENABLE |
476 COH901318_CX_CTRL_HSS_DISABLE | 476 COH901318_CX_CTRL_HSS_DISABLE |
477 COH901318_CX_CTRL_DDMA_LEGACY | 477 COH901318_CX_CTRL_DDMA_LEGACY |
478 COH901318_CX_CTRL_PRDD_SOURCE, 478 COH901318_CX_CTRL_PRDD_SOURCE,
479 }, 479 },
480 { 480 {
481 .number = U300_DMA_MSL_TX_5, 481 .number = U300_DMA_MSL_TX_5,
482 .name = "MSL TX 5", 482 .name = "MSL TX 5",
483 .priority_high = 0, 483 .priority_high = 0,
484 }, 484 },
485 { 485 {
486 .number = U300_DMA_MSL_TX_6, 486 .number = U300_DMA_MSL_TX_6,
487 .name = "MSL TX 6", 487 .name = "MSL TX 6",
488 .priority_high = 0, 488 .priority_high = 0,
489 }, 489 },
490 { 490 {
491 .number = U300_DMA_MSL_RX_0, 491 .number = U300_DMA_MSL_RX_0,
492 .name = "MSL RX 0", 492 .name = "MSL RX 0",
493 .priority_high = 0, 493 .priority_high = 0,
494 }, 494 },
495 { 495 {
496 .number = U300_DMA_MSL_RX_1, 496 .number = U300_DMA_MSL_RX_1,
497 .name = "MSL RX 1", 497 .name = "MSL RX 1",
498 .priority_high = 0, 498 .priority_high = 0,
499 .param.config = COH901318_CX_CFG_CH_DISABLE | 499 .param.config = COH901318_CX_CFG_CH_DISABLE |
500 COH901318_CX_CFG_LCR_DISABLE | 500 COH901318_CX_CFG_LCR_DISABLE |
501 COH901318_CX_CFG_TC_IRQ_ENABLE | 501 COH901318_CX_CFG_TC_IRQ_ENABLE |
502 COH901318_CX_CFG_BE_IRQ_ENABLE, 502 COH901318_CX_CFG_BE_IRQ_ENABLE,
503 .param.ctrl_lli_chained = 0 | 503 .param.ctrl_lli_chained = 0 |
504 COH901318_CX_CTRL_TC_ENABLE | 504 COH901318_CX_CTRL_TC_ENABLE |
505 COH901318_CX_CTRL_BURST_COUNT_32_BYTES | 505 COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
506 COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | 506 COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
507 COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE | 507 COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
508 COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | 508 COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
509 COH901318_CX_CTRL_DST_ADDR_INC_ENABLE | 509 COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
510 COH901318_CX_CTRL_MASTER_MODE_M2R_M1W | 510 COH901318_CX_CTRL_MASTER_MODE_M2R_M1W |
511 COH901318_CX_CTRL_TCP_DISABLE | 511 COH901318_CX_CTRL_TCP_DISABLE |
512 COH901318_CX_CTRL_TC_IRQ_DISABLE | 512 COH901318_CX_CTRL_TC_IRQ_DISABLE |
513 COH901318_CX_CTRL_HSP_ENABLE | 513 COH901318_CX_CTRL_HSP_ENABLE |
514 COH901318_CX_CTRL_HSS_DISABLE | 514 COH901318_CX_CTRL_HSS_DISABLE |
515 COH901318_CX_CTRL_DDMA_DEMAND_DMA1 | 515 COH901318_CX_CTRL_DDMA_DEMAND_DMA1 |
516 COH901318_CX_CTRL_PRDD_DEST, 516 COH901318_CX_CTRL_PRDD_DEST,
517 .param.ctrl_lli = 0, 517 .param.ctrl_lli = 0,
518 .param.ctrl_lli_last = 0 | 518 .param.ctrl_lli_last = 0 |
519 COH901318_CX_CTRL_TC_ENABLE | 519 COH901318_CX_CTRL_TC_ENABLE |
520 COH901318_CX_CTRL_BURST_COUNT_32_BYTES | 520 COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
521 COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | 521 COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
522 COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE | 522 COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
523 COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | 523 COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
524 COH901318_CX_CTRL_DST_ADDR_INC_ENABLE | 524 COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
525 COH901318_CX_CTRL_MASTER_MODE_M2R_M1W | 525 COH901318_CX_CTRL_MASTER_MODE_M2R_M1W |
526 COH901318_CX_CTRL_TCP_DISABLE | 526 COH901318_CX_CTRL_TCP_DISABLE |
527 COH901318_CX_CTRL_TC_IRQ_ENABLE | 527 COH901318_CX_CTRL_TC_IRQ_ENABLE |
528 COH901318_CX_CTRL_HSP_ENABLE | 528 COH901318_CX_CTRL_HSP_ENABLE |
529 COH901318_CX_CTRL_HSS_DISABLE | 529 COH901318_CX_CTRL_HSS_DISABLE |
530 COH901318_CX_CTRL_DDMA_DEMAND_DMA1 | 530 COH901318_CX_CTRL_DDMA_DEMAND_DMA1 |
531 COH901318_CX_CTRL_PRDD_DEST, 531 COH901318_CX_CTRL_PRDD_DEST,
532 }, 532 },
533 { 533 {
534 .number = U300_DMA_MSL_RX_2, 534 .number = U300_DMA_MSL_RX_2,
535 .name = "MSL RX 2", 535 .name = "MSL RX 2",
536 .priority_high = 0, 536 .priority_high = 0,
537 .param.config = COH901318_CX_CFG_CH_DISABLE | 537 .param.config = COH901318_CX_CFG_CH_DISABLE |
538 COH901318_CX_CFG_LCR_DISABLE | 538 COH901318_CX_CFG_LCR_DISABLE |
539 COH901318_CX_CFG_TC_IRQ_ENABLE | 539 COH901318_CX_CFG_TC_IRQ_ENABLE |
540 COH901318_CX_CFG_BE_IRQ_ENABLE, 540 COH901318_CX_CFG_BE_IRQ_ENABLE,
541 .param.ctrl_lli_chained = 0 | 541 .param.ctrl_lli_chained = 0 |
542 COH901318_CX_CTRL_TC_ENABLE | 542 COH901318_CX_CTRL_TC_ENABLE |
543 COH901318_CX_CTRL_BURST_COUNT_32_BYTES | 543 COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
544 COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | 544 COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
545 COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE | 545 COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
546 COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | 546 COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
547 COH901318_CX_CTRL_DST_ADDR_INC_ENABLE | 547 COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
548 COH901318_CX_CTRL_MASTER_MODE_M2R_M1W | 548 COH901318_CX_CTRL_MASTER_MODE_M2R_M1W |
549 COH901318_CX_CTRL_TCP_DISABLE | 549 COH901318_CX_CTRL_TCP_DISABLE |
550 COH901318_CX_CTRL_TC_IRQ_DISABLE | 550 COH901318_CX_CTRL_TC_IRQ_DISABLE |
551 COH901318_CX_CTRL_HSP_ENABLE | 551 COH901318_CX_CTRL_HSP_ENABLE |
552 COH901318_CX_CTRL_HSS_DISABLE | 552 COH901318_CX_CTRL_HSS_DISABLE |
553 COH901318_CX_CTRL_DDMA_DEMAND_DMA1 | 553 COH901318_CX_CTRL_DDMA_DEMAND_DMA1 |
554 COH901318_CX_CTRL_PRDD_DEST, 554 COH901318_CX_CTRL_PRDD_DEST,
555 .param.ctrl_lli = 0 | 555 .param.ctrl_lli = 0 |
556 COH901318_CX_CTRL_TC_ENABLE | 556 COH901318_CX_CTRL_TC_ENABLE |
557 COH901318_CX_CTRL_BURST_COUNT_32_BYTES | 557 COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
558 COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | 558 COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
559 COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE | 559 COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
560 COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | 560 COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
561 COH901318_CX_CTRL_DST_ADDR_INC_ENABLE | 561 COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
562 COH901318_CX_CTRL_MASTER_MODE_M2R_M1W | 562 COH901318_CX_CTRL_MASTER_MODE_M2R_M1W |
563 COH901318_CX_CTRL_TCP_DISABLE | 563 COH901318_CX_CTRL_TCP_DISABLE |
564 COH901318_CX_CTRL_TC_IRQ_ENABLE | 564 COH901318_CX_CTRL_TC_IRQ_ENABLE |
565 COH901318_CX_CTRL_HSP_ENABLE | 565 COH901318_CX_CTRL_HSP_ENABLE |
566 COH901318_CX_CTRL_HSS_DISABLE | 566 COH901318_CX_CTRL_HSS_DISABLE |
567 COH901318_CX_CTRL_DDMA_DEMAND_DMA1 | 567 COH901318_CX_CTRL_DDMA_DEMAND_DMA1 |
568 COH901318_CX_CTRL_PRDD_DEST, 568 COH901318_CX_CTRL_PRDD_DEST,
569 .param.ctrl_lli_last = 0 | 569 .param.ctrl_lli_last = 0 |
570 COH901318_CX_CTRL_TC_ENABLE | 570 COH901318_CX_CTRL_TC_ENABLE |
571 COH901318_CX_CTRL_BURST_COUNT_32_BYTES | 571 COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
572 COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | 572 COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
573 COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE | 573 COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
574 COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | 574 COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
575 COH901318_CX_CTRL_DST_ADDR_INC_ENABLE | 575 COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
576 COH901318_CX_CTRL_MASTER_MODE_M2R_M1W | 576 COH901318_CX_CTRL_MASTER_MODE_M2R_M1W |
577 COH901318_CX_CTRL_TCP_DISABLE | 577 COH901318_CX_CTRL_TCP_DISABLE |
578 COH901318_CX_CTRL_TC_IRQ_ENABLE | 578 COH901318_CX_CTRL_TC_IRQ_ENABLE |
579 COH901318_CX_CTRL_HSP_ENABLE | 579 COH901318_CX_CTRL_HSP_ENABLE |
580 COH901318_CX_CTRL_HSS_DISABLE | 580 COH901318_CX_CTRL_HSS_DISABLE |
581 COH901318_CX_CTRL_DDMA_DEMAND_DMA1 | 581 COH901318_CX_CTRL_DDMA_DEMAND_DMA1 |
582 COH901318_CX_CTRL_PRDD_DEST, 582 COH901318_CX_CTRL_PRDD_DEST,
583 }, 583 },
584 { 584 {
585 .number = U300_DMA_MSL_RX_3, 585 .number = U300_DMA_MSL_RX_3,
586 .name = "MSL RX 3", 586 .name = "MSL RX 3",
587 .priority_high = 0, 587 .priority_high = 0,
588 .param.config = COH901318_CX_CFG_CH_DISABLE | 588 .param.config = COH901318_CX_CFG_CH_DISABLE |
589 COH901318_CX_CFG_LCR_DISABLE | 589 COH901318_CX_CFG_LCR_DISABLE |
590 COH901318_CX_CFG_TC_IRQ_ENABLE | 590 COH901318_CX_CFG_TC_IRQ_ENABLE |
591 COH901318_CX_CFG_BE_IRQ_ENABLE, 591 COH901318_CX_CFG_BE_IRQ_ENABLE,
592 .param.ctrl_lli_chained = 0 | 592 .param.ctrl_lli_chained = 0 |
593 COH901318_CX_CTRL_TC_ENABLE | 593 COH901318_CX_CTRL_TC_ENABLE |
594 COH901318_CX_CTRL_BURST_COUNT_32_BYTES | 594 COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
595 COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | 595 COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
596 COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE | 596 COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
597 COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | 597 COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
598 COH901318_CX_CTRL_DST_ADDR_INC_ENABLE | 598 COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
599 COH901318_CX_CTRL_MASTER_MODE_M2R_M1W | 599 COH901318_CX_CTRL_MASTER_MODE_M2R_M1W |
600 COH901318_CX_CTRL_TCP_DISABLE | 600 COH901318_CX_CTRL_TCP_DISABLE |
601 COH901318_CX_CTRL_TC_IRQ_DISABLE | 601 COH901318_CX_CTRL_TC_IRQ_DISABLE |
602 COH901318_CX_CTRL_HSP_ENABLE | 602 COH901318_CX_CTRL_HSP_ENABLE |
603 COH901318_CX_CTRL_HSS_DISABLE | 603 COH901318_CX_CTRL_HSS_DISABLE |
604 COH901318_CX_CTRL_DDMA_DEMAND_DMA1 | 604 COH901318_CX_CTRL_DDMA_DEMAND_DMA1 |
605 COH901318_CX_CTRL_PRDD_DEST, 605 COH901318_CX_CTRL_PRDD_DEST,
606 .param.ctrl_lli = 0 | 606 .param.ctrl_lli = 0 |
607 COH901318_CX_CTRL_TC_ENABLE | 607 COH901318_CX_CTRL_TC_ENABLE |
608 COH901318_CX_CTRL_BURST_COUNT_32_BYTES | 608 COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
609 COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | 609 COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
610 COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE | 610 COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
611 COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | 611 COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
612 COH901318_CX_CTRL_DST_ADDR_INC_ENABLE | 612 COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
613 COH901318_CX_CTRL_MASTER_MODE_M2R_M1W | 613 COH901318_CX_CTRL_MASTER_MODE_M2R_M1W |
614 COH901318_CX_CTRL_TCP_DISABLE | 614 COH901318_CX_CTRL_TCP_DISABLE |
615 COH901318_CX_CTRL_TC_IRQ_ENABLE | 615 COH901318_CX_CTRL_TC_IRQ_ENABLE |
616 COH901318_CX_CTRL_HSP_ENABLE | 616 COH901318_CX_CTRL_HSP_ENABLE |
617 COH901318_CX_CTRL_HSS_DISABLE | 617 COH901318_CX_CTRL_HSS_DISABLE |
618 COH901318_CX_CTRL_DDMA_DEMAND_DMA1 | 618 COH901318_CX_CTRL_DDMA_DEMAND_DMA1 |
619 COH901318_CX_CTRL_PRDD_DEST, 619 COH901318_CX_CTRL_PRDD_DEST,
620 .param.ctrl_lli_last = 0 | 620 .param.ctrl_lli_last = 0 |
621 COH901318_CX_CTRL_TC_ENABLE | 621 COH901318_CX_CTRL_TC_ENABLE |
622 COH901318_CX_CTRL_BURST_COUNT_32_BYTES | 622 COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
623 COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | 623 COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
624 COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE | 624 COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
625 COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | 625 COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
626 COH901318_CX_CTRL_DST_ADDR_INC_ENABLE | 626 COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
627 COH901318_CX_CTRL_MASTER_MODE_M2R_M1W | 627 COH901318_CX_CTRL_MASTER_MODE_M2R_M1W |
628 COH901318_CX_CTRL_TCP_DISABLE | 628 COH901318_CX_CTRL_TCP_DISABLE |
629 COH901318_CX_CTRL_TC_IRQ_ENABLE | 629 COH901318_CX_CTRL_TC_IRQ_ENABLE |
630 COH901318_CX_CTRL_HSP_ENABLE | 630 COH901318_CX_CTRL_HSP_ENABLE |
631 COH901318_CX_CTRL_HSS_DISABLE | 631 COH901318_CX_CTRL_HSS_DISABLE |
632 COH901318_CX_CTRL_DDMA_DEMAND_DMA1 | 632 COH901318_CX_CTRL_DDMA_DEMAND_DMA1 |
633 COH901318_CX_CTRL_PRDD_DEST, 633 COH901318_CX_CTRL_PRDD_DEST,
634 }, 634 },
635 { 635 {
636 .number = U300_DMA_MSL_RX_4, 636 .number = U300_DMA_MSL_RX_4,
637 .name = "MSL RX 4", 637 .name = "MSL RX 4",
638 .priority_high = 0, 638 .priority_high = 0,
639 .param.config = COH901318_CX_CFG_CH_DISABLE | 639 .param.config = COH901318_CX_CFG_CH_DISABLE |
640 COH901318_CX_CFG_LCR_DISABLE | 640 COH901318_CX_CFG_LCR_DISABLE |
641 COH901318_CX_CFG_TC_IRQ_ENABLE | 641 COH901318_CX_CFG_TC_IRQ_ENABLE |
642 COH901318_CX_CFG_BE_IRQ_ENABLE, 642 COH901318_CX_CFG_BE_IRQ_ENABLE,
643 .param.ctrl_lli_chained = 0 | 643 .param.ctrl_lli_chained = 0 |
644 COH901318_CX_CTRL_TC_ENABLE | 644 COH901318_CX_CTRL_TC_ENABLE |
645 COH901318_CX_CTRL_BURST_COUNT_32_BYTES | 645 COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
646 COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | 646 COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
647 COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE | 647 COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
648 COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | 648 COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
649 COH901318_CX_CTRL_DST_ADDR_INC_ENABLE | 649 COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
650 COH901318_CX_CTRL_MASTER_MODE_M2R_M1W | 650 COH901318_CX_CTRL_MASTER_MODE_M2R_M1W |
651 COH901318_CX_CTRL_TCP_DISABLE | 651 COH901318_CX_CTRL_TCP_DISABLE |
652 COH901318_CX_CTRL_TC_IRQ_DISABLE | 652 COH901318_CX_CTRL_TC_IRQ_DISABLE |
653 COH901318_CX_CTRL_HSP_ENABLE | 653 COH901318_CX_CTRL_HSP_ENABLE |
654 COH901318_CX_CTRL_HSS_DISABLE | 654 COH901318_CX_CTRL_HSS_DISABLE |
655 COH901318_CX_CTRL_DDMA_DEMAND_DMA1 | 655 COH901318_CX_CTRL_DDMA_DEMAND_DMA1 |
656 COH901318_CX_CTRL_PRDD_DEST, 656 COH901318_CX_CTRL_PRDD_DEST,
657 .param.ctrl_lli = 0 | 657 .param.ctrl_lli = 0 |
658 COH901318_CX_CTRL_TC_ENABLE | 658 COH901318_CX_CTRL_TC_ENABLE |
659 COH901318_CX_CTRL_BURST_COUNT_32_BYTES | 659 COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
660 COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | 660 COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
661 COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE | 661 COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
662 COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | 662 COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
663 COH901318_CX_CTRL_DST_ADDR_INC_ENABLE | 663 COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
664 COH901318_CX_CTRL_MASTER_MODE_M2R_M1W | 664 COH901318_CX_CTRL_MASTER_MODE_M2R_M1W |
665 COH901318_CX_CTRL_TCP_DISABLE | 665 COH901318_CX_CTRL_TCP_DISABLE |
666 COH901318_CX_CTRL_TC_IRQ_ENABLE | 666 COH901318_CX_CTRL_TC_IRQ_ENABLE |
667 COH901318_CX_CTRL_HSP_ENABLE | 667 COH901318_CX_CTRL_HSP_ENABLE |
668 COH901318_CX_CTRL_HSS_DISABLE | 668 COH901318_CX_CTRL_HSS_DISABLE |
669 COH901318_CX_CTRL_DDMA_DEMAND_DMA1 | 669 COH901318_CX_CTRL_DDMA_DEMAND_DMA1 |
670 COH901318_CX_CTRL_PRDD_DEST, 670 COH901318_CX_CTRL_PRDD_DEST,
671 .param.ctrl_lli_last = 0 | 671 .param.ctrl_lli_last = 0 |
672 COH901318_CX_CTRL_TC_ENABLE | 672 COH901318_CX_CTRL_TC_ENABLE |
673 COH901318_CX_CTRL_BURST_COUNT_32_BYTES | 673 COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
674 COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | 674 COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
675 COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE | 675 COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
676 COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | 676 COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
677 COH901318_CX_CTRL_DST_ADDR_INC_ENABLE | 677 COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
678 COH901318_CX_CTRL_MASTER_MODE_M2R_M1W | 678 COH901318_CX_CTRL_MASTER_MODE_M2R_M1W |
679 COH901318_CX_CTRL_TCP_DISABLE | 679 COH901318_CX_CTRL_TCP_DISABLE |
680 COH901318_CX_CTRL_TC_IRQ_ENABLE | 680 COH901318_CX_CTRL_TC_IRQ_ENABLE |
681 COH901318_CX_CTRL_HSP_ENABLE | 681 COH901318_CX_CTRL_HSP_ENABLE |
682 COH901318_CX_CTRL_HSS_DISABLE | 682 COH901318_CX_CTRL_HSS_DISABLE |
683 COH901318_CX_CTRL_DDMA_DEMAND_DMA1 | 683 COH901318_CX_CTRL_DDMA_DEMAND_DMA1 |
684 COH901318_CX_CTRL_PRDD_DEST, 684 COH901318_CX_CTRL_PRDD_DEST,
685 }, 685 },
686 { 686 {
687 .number = U300_DMA_MSL_RX_5, 687 .number = U300_DMA_MSL_RX_5,
688 .name = "MSL RX 5", 688 .name = "MSL RX 5",
689 .priority_high = 0, 689 .priority_high = 0,
690 .param.config = COH901318_CX_CFG_CH_DISABLE | 690 .param.config = COH901318_CX_CFG_CH_DISABLE |
691 COH901318_CX_CFG_LCR_DISABLE | 691 COH901318_CX_CFG_LCR_DISABLE |
692 COH901318_CX_CFG_TC_IRQ_ENABLE | 692 COH901318_CX_CFG_TC_IRQ_ENABLE |
693 COH901318_CX_CFG_BE_IRQ_ENABLE, 693 COH901318_CX_CFG_BE_IRQ_ENABLE,
694 .param.ctrl_lli_chained = 0 | 694 .param.ctrl_lli_chained = 0 |
695 COH901318_CX_CTRL_TC_ENABLE | 695 COH901318_CX_CTRL_TC_ENABLE |
696 COH901318_CX_CTRL_BURST_COUNT_32_BYTES | 696 COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
697 COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | 697 COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
698 COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE | 698 COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
699 COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | 699 COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
700 COH901318_CX_CTRL_DST_ADDR_INC_ENABLE | 700 COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
701 COH901318_CX_CTRL_MASTER_MODE_M2R_M1W | 701 COH901318_CX_CTRL_MASTER_MODE_M2R_M1W |
702 COH901318_CX_CTRL_TCP_DISABLE | 702 COH901318_CX_CTRL_TCP_DISABLE |
703 COH901318_CX_CTRL_TC_IRQ_DISABLE | 703 COH901318_CX_CTRL_TC_IRQ_DISABLE |
704 COH901318_CX_CTRL_HSP_ENABLE | 704 COH901318_CX_CTRL_HSP_ENABLE |
705 COH901318_CX_CTRL_HSS_DISABLE | 705 COH901318_CX_CTRL_HSS_DISABLE |
706 COH901318_CX_CTRL_DDMA_DEMAND_DMA1 | 706 COH901318_CX_CTRL_DDMA_DEMAND_DMA1 |
707 COH901318_CX_CTRL_PRDD_DEST, 707 COH901318_CX_CTRL_PRDD_DEST,
708 .param.ctrl_lli = 0 | 708 .param.ctrl_lli = 0 |
709 COH901318_CX_CTRL_TC_ENABLE | 709 COH901318_CX_CTRL_TC_ENABLE |
710 COH901318_CX_CTRL_BURST_COUNT_32_BYTES | 710 COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
711 COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | 711 COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
712 COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE | 712 COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
713 COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | 713 COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
714 COH901318_CX_CTRL_DST_ADDR_INC_ENABLE | 714 COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
715 COH901318_CX_CTRL_MASTER_MODE_M2R_M1W | 715 COH901318_CX_CTRL_MASTER_MODE_M2R_M1W |
716 COH901318_CX_CTRL_TCP_DISABLE | 716 COH901318_CX_CTRL_TCP_DISABLE |
717 COH901318_CX_CTRL_TC_IRQ_ENABLE | 717 COH901318_CX_CTRL_TC_IRQ_ENABLE |
718 COH901318_CX_CTRL_HSP_ENABLE | 718 COH901318_CX_CTRL_HSP_ENABLE |
719 COH901318_CX_CTRL_HSS_DISABLE | 719 COH901318_CX_CTRL_HSS_DISABLE |
720 COH901318_CX_CTRL_DDMA_DEMAND_DMA1 | 720 COH901318_CX_CTRL_DDMA_DEMAND_DMA1 |
721 COH901318_CX_CTRL_PRDD_DEST, 721 COH901318_CX_CTRL_PRDD_DEST,
722 .param.ctrl_lli_last = 0 | 722 .param.ctrl_lli_last = 0 |
723 COH901318_CX_CTRL_TC_ENABLE | 723 COH901318_CX_CTRL_TC_ENABLE |
724 COH901318_CX_CTRL_BURST_COUNT_32_BYTES | 724 COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
725 COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | 725 COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
726 COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE | 726 COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
727 COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | 727 COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
728 COH901318_CX_CTRL_DST_ADDR_INC_ENABLE | 728 COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
729 COH901318_CX_CTRL_MASTER_MODE_M2R_M1W | 729 COH901318_CX_CTRL_MASTER_MODE_M2R_M1W |
730 COH901318_CX_CTRL_TCP_DISABLE | 730 COH901318_CX_CTRL_TCP_DISABLE |
731 COH901318_CX_CTRL_TC_IRQ_ENABLE | 731 COH901318_CX_CTRL_TC_IRQ_ENABLE |
732 COH901318_CX_CTRL_HSP_ENABLE | 732 COH901318_CX_CTRL_HSP_ENABLE |
733 COH901318_CX_CTRL_HSS_DISABLE | 733 COH901318_CX_CTRL_HSS_DISABLE |
734 COH901318_CX_CTRL_DDMA_DEMAND_DMA1 | 734 COH901318_CX_CTRL_DDMA_DEMAND_DMA1 |
735 COH901318_CX_CTRL_PRDD_DEST, 735 COH901318_CX_CTRL_PRDD_DEST,
736 }, 736 },
737 { 737 {
738 .number = U300_DMA_MSL_RX_6, 738 .number = U300_DMA_MSL_RX_6,
739 .name = "MSL RX 6", 739 .name = "MSL RX 6",
740 .priority_high = 0, 740 .priority_high = 0,
741 }, 741 },
742 /* 742 /*
743 * Don't set up device address, burst count or size of src 743 * Don't set up device address, burst count or size of src
744 * or dst bus for this peripheral - handled by PrimeCell 744 * or dst bus for this peripheral - handled by PrimeCell
745 * DMA extension. 745 * DMA extension.
746 */ 746 */
747 { 747 {
748 .number = U300_DMA_MMCSD_RX_TX, 748 .number = U300_DMA_MMCSD_RX_TX,
749 .name = "MMCSD RX TX", 749 .name = "MMCSD RX TX",
750 .priority_high = 0, 750 .priority_high = 0,
751 .param.config = COH901318_CX_CFG_CH_DISABLE | 751 .param.config = COH901318_CX_CFG_CH_DISABLE |
752 COH901318_CX_CFG_LCR_DISABLE | 752 COH901318_CX_CFG_LCR_DISABLE |
753 COH901318_CX_CFG_TC_IRQ_ENABLE | 753 COH901318_CX_CFG_TC_IRQ_ENABLE |
754 COH901318_CX_CFG_BE_IRQ_ENABLE, 754 COH901318_CX_CFG_BE_IRQ_ENABLE,
755 .param.ctrl_lli_chained = 0 | 755 .param.ctrl_lli_chained = 0 |
756 COH901318_CX_CTRL_TC_ENABLE | 756 COH901318_CX_CTRL_TC_ENABLE |
757 COH901318_CX_CTRL_MASTER_MODE_M1RW | 757 COH901318_CX_CTRL_MASTER_MODE_M1RW |
758 COH901318_CX_CTRL_TCP_ENABLE | 758 COH901318_CX_CTRL_TCP_ENABLE |
759 COH901318_CX_CTRL_TC_IRQ_DISABLE | 759 COH901318_CX_CTRL_TC_IRQ_DISABLE |
760 COH901318_CX_CTRL_HSP_ENABLE | 760 COH901318_CX_CTRL_HSP_ENABLE |
761 COH901318_CX_CTRL_HSS_DISABLE | 761 COH901318_CX_CTRL_HSS_DISABLE |
762 COH901318_CX_CTRL_DDMA_LEGACY, 762 COH901318_CX_CTRL_DDMA_LEGACY,
763 .param.ctrl_lli = 0 | 763 .param.ctrl_lli = 0 |
764 COH901318_CX_CTRL_TC_ENABLE | 764 COH901318_CX_CTRL_TC_ENABLE |
765 COH901318_CX_CTRL_MASTER_MODE_M1RW | 765 COH901318_CX_CTRL_MASTER_MODE_M1RW |
766 COH901318_CX_CTRL_TCP_ENABLE | 766 COH901318_CX_CTRL_TCP_ENABLE |
767 COH901318_CX_CTRL_TC_IRQ_DISABLE | 767 COH901318_CX_CTRL_TC_IRQ_DISABLE |
768 COH901318_CX_CTRL_HSP_ENABLE | 768 COH901318_CX_CTRL_HSP_ENABLE |
769 COH901318_CX_CTRL_HSS_DISABLE | 769 COH901318_CX_CTRL_HSS_DISABLE |
770 COH901318_CX_CTRL_DDMA_LEGACY, 770 COH901318_CX_CTRL_DDMA_LEGACY,
771 .param.ctrl_lli_last = 0 | 771 .param.ctrl_lli_last = 0 |
772 COH901318_CX_CTRL_TC_ENABLE | 772 COH901318_CX_CTRL_TC_ENABLE |
773 COH901318_CX_CTRL_MASTER_MODE_M1RW | 773 COH901318_CX_CTRL_MASTER_MODE_M1RW |
774 COH901318_CX_CTRL_TCP_DISABLE | 774 COH901318_CX_CTRL_TCP_DISABLE |
775 COH901318_CX_CTRL_TC_IRQ_ENABLE | 775 COH901318_CX_CTRL_TC_IRQ_ENABLE |
776 COH901318_CX_CTRL_HSP_ENABLE | 776 COH901318_CX_CTRL_HSP_ENABLE |
777 COH901318_CX_CTRL_HSS_DISABLE | 777 COH901318_CX_CTRL_HSS_DISABLE |
778 COH901318_CX_CTRL_DDMA_LEGACY, 778 COH901318_CX_CTRL_DDMA_LEGACY,
779 779
780 }, 780 },
781 { 781 {
782 .number = U300_DMA_MSPRO_TX, 782 .number = U300_DMA_MSPRO_TX,
783 .name = "MSPRO TX", 783 .name = "MSPRO TX",
784 .priority_high = 0, 784 .priority_high = 0,
785 }, 785 },
786 { 786 {
787 .number = U300_DMA_MSPRO_RX, 787 .number = U300_DMA_MSPRO_RX,
788 .name = "MSPRO RX", 788 .name = "MSPRO RX",
789 .priority_high = 0, 789 .priority_high = 0,
790 }, 790 },
791 /* 791 /*
792 * Don't set up device address, burst count or size of src 792 * Don't set up device address, burst count or size of src
793 * or dst bus for this peripheral - handled by PrimeCell 793 * or dst bus for this peripheral - handled by PrimeCell
794 * DMA extension. 794 * DMA extension.
795 */ 795 */
796 { 796 {
797 .number = U300_DMA_UART0_TX, 797 .number = U300_DMA_UART0_TX,
798 .name = "UART0 TX", 798 .name = "UART0 TX",
799 .priority_high = 0, 799 .priority_high = 0,
800 .param.config = COH901318_CX_CFG_CH_DISABLE | 800 .param.config = COH901318_CX_CFG_CH_DISABLE |
801 COH901318_CX_CFG_LCR_DISABLE | 801 COH901318_CX_CFG_LCR_DISABLE |
802 COH901318_CX_CFG_TC_IRQ_ENABLE | 802 COH901318_CX_CFG_TC_IRQ_ENABLE |
803 COH901318_CX_CFG_BE_IRQ_ENABLE, 803 COH901318_CX_CFG_BE_IRQ_ENABLE,
804 .param.ctrl_lli_chained = 0 | 804 .param.ctrl_lli_chained = 0 |
805 COH901318_CX_CTRL_TC_ENABLE | 805 COH901318_CX_CTRL_TC_ENABLE |
806 COH901318_CX_CTRL_MASTER_MODE_M1RW | 806 COH901318_CX_CTRL_MASTER_MODE_M1RW |
807 COH901318_CX_CTRL_TCP_ENABLE | 807 COH901318_CX_CTRL_TCP_ENABLE |
808 COH901318_CX_CTRL_TC_IRQ_DISABLE | 808 COH901318_CX_CTRL_TC_IRQ_DISABLE |
809 COH901318_CX_CTRL_HSP_ENABLE | 809 COH901318_CX_CTRL_HSP_ENABLE |
810 COH901318_CX_CTRL_HSS_DISABLE | 810 COH901318_CX_CTRL_HSS_DISABLE |
811 COH901318_CX_CTRL_DDMA_LEGACY, 811 COH901318_CX_CTRL_DDMA_LEGACY,
812 .param.ctrl_lli = 0 | 812 .param.ctrl_lli = 0 |
813 COH901318_CX_CTRL_TC_ENABLE | 813 COH901318_CX_CTRL_TC_ENABLE |
814 COH901318_CX_CTRL_MASTER_MODE_M1RW | 814 COH901318_CX_CTRL_MASTER_MODE_M1RW |
815 COH901318_CX_CTRL_TCP_ENABLE | 815 COH901318_CX_CTRL_TCP_ENABLE |
816 COH901318_CX_CTRL_TC_IRQ_ENABLE | 816 COH901318_CX_CTRL_TC_IRQ_ENABLE |
817 COH901318_CX_CTRL_HSP_ENABLE | 817 COH901318_CX_CTRL_HSP_ENABLE |
818 COH901318_CX_CTRL_HSS_DISABLE | 818 COH901318_CX_CTRL_HSS_DISABLE |
819 COH901318_CX_CTRL_DDMA_LEGACY, 819 COH901318_CX_CTRL_DDMA_LEGACY,
820 .param.ctrl_lli_last = 0 | 820 .param.ctrl_lli_last = 0 |
821 COH901318_CX_CTRL_TC_ENABLE | 821 COH901318_CX_CTRL_TC_ENABLE |
822 COH901318_CX_CTRL_MASTER_MODE_M1RW | 822 COH901318_CX_CTRL_MASTER_MODE_M1RW |
823 COH901318_CX_CTRL_TCP_ENABLE | 823 COH901318_CX_CTRL_TCP_ENABLE |
824 COH901318_CX_CTRL_TC_IRQ_ENABLE | 824 COH901318_CX_CTRL_TC_IRQ_ENABLE |
825 COH901318_CX_CTRL_HSP_ENABLE | 825 COH901318_CX_CTRL_HSP_ENABLE |
826 COH901318_CX_CTRL_HSS_DISABLE | 826 COH901318_CX_CTRL_HSS_DISABLE |
827 COH901318_CX_CTRL_DDMA_LEGACY, 827 COH901318_CX_CTRL_DDMA_LEGACY,
828 }, 828 },
829 { 829 {
830 .number = U300_DMA_UART0_RX, 830 .number = U300_DMA_UART0_RX,
831 .name = "UART0 RX", 831 .name = "UART0 RX",
832 .priority_high = 0, 832 .priority_high = 0,
833 .param.config = COH901318_CX_CFG_CH_DISABLE | 833 .param.config = COH901318_CX_CFG_CH_DISABLE |
834 COH901318_CX_CFG_LCR_DISABLE | 834 COH901318_CX_CFG_LCR_DISABLE |
835 COH901318_CX_CFG_TC_IRQ_ENABLE | 835 COH901318_CX_CFG_TC_IRQ_ENABLE |
836 COH901318_CX_CFG_BE_IRQ_ENABLE, 836 COH901318_CX_CFG_BE_IRQ_ENABLE,
837 .param.ctrl_lli_chained = 0 | 837 .param.ctrl_lli_chained = 0 |
838 COH901318_CX_CTRL_TC_ENABLE | 838 COH901318_CX_CTRL_TC_ENABLE |
839 COH901318_CX_CTRL_MASTER_MODE_M1RW | 839 COH901318_CX_CTRL_MASTER_MODE_M1RW |
840 COH901318_CX_CTRL_TCP_ENABLE | 840 COH901318_CX_CTRL_TCP_ENABLE |
841 COH901318_CX_CTRL_TC_IRQ_DISABLE | 841 COH901318_CX_CTRL_TC_IRQ_DISABLE |
842 COH901318_CX_CTRL_HSP_ENABLE | 842 COH901318_CX_CTRL_HSP_ENABLE |
843 COH901318_CX_CTRL_HSS_DISABLE | 843 COH901318_CX_CTRL_HSS_DISABLE |
844 COH901318_CX_CTRL_DDMA_LEGACY, 844 COH901318_CX_CTRL_DDMA_LEGACY,
845 .param.ctrl_lli = 0 | 845 .param.ctrl_lli = 0 |
846 COH901318_CX_CTRL_TC_ENABLE | 846 COH901318_CX_CTRL_TC_ENABLE |
847 COH901318_CX_CTRL_MASTER_MODE_M1RW | 847 COH901318_CX_CTRL_MASTER_MODE_M1RW |
848 COH901318_CX_CTRL_TCP_ENABLE | 848 COH901318_CX_CTRL_TCP_ENABLE |
849 COH901318_CX_CTRL_TC_IRQ_ENABLE | 849 COH901318_CX_CTRL_TC_IRQ_ENABLE |
850 COH901318_CX_CTRL_HSP_ENABLE | 850 COH901318_CX_CTRL_HSP_ENABLE |
851 COH901318_CX_CTRL_HSS_DISABLE | 851 COH901318_CX_CTRL_HSS_DISABLE |
852 COH901318_CX_CTRL_DDMA_LEGACY, 852 COH901318_CX_CTRL_DDMA_LEGACY,
853 .param.ctrl_lli_last = 0 | 853 .param.ctrl_lli_last = 0 |
854 COH901318_CX_CTRL_TC_ENABLE | 854 COH901318_CX_CTRL_TC_ENABLE |
855 COH901318_CX_CTRL_MASTER_MODE_M1RW | 855 COH901318_CX_CTRL_MASTER_MODE_M1RW |
856 COH901318_CX_CTRL_TCP_ENABLE | 856 COH901318_CX_CTRL_TCP_ENABLE |
857 COH901318_CX_CTRL_TC_IRQ_ENABLE | 857 COH901318_CX_CTRL_TC_IRQ_ENABLE |
858 COH901318_CX_CTRL_HSP_ENABLE | 858 COH901318_CX_CTRL_HSP_ENABLE |
859 COH901318_CX_CTRL_HSS_DISABLE | 859 COH901318_CX_CTRL_HSS_DISABLE |
860 COH901318_CX_CTRL_DDMA_LEGACY, 860 COH901318_CX_CTRL_DDMA_LEGACY,
861 }, 861 },
862 { 862 {
863 .number = U300_DMA_APEX_TX, 863 .number = U300_DMA_APEX_TX,
864 .name = "APEX TX", 864 .name = "APEX TX",
865 .priority_high = 0, 865 .priority_high = 0,
866 }, 866 },
867 { 867 {
868 .number = U300_DMA_APEX_RX, 868 .number = U300_DMA_APEX_RX,
869 .name = "APEX RX", 869 .name = "APEX RX",
870 .priority_high = 0, 870 .priority_high = 0,
871 }, 871 },
872 { 872 {
873 .number = U300_DMA_PCM_I2S0_TX, 873 .number = U300_DMA_PCM_I2S0_TX,
874 .name = "PCM I2S0 TX", 874 .name = "PCM I2S0 TX",
875 .priority_high = 1, 875 .priority_high = 1,
876 .param.config = COH901318_CX_CFG_CH_DISABLE | 876 .param.config = COH901318_CX_CFG_CH_DISABLE |
877 COH901318_CX_CFG_LCR_DISABLE | 877 COH901318_CX_CFG_LCR_DISABLE |
878 COH901318_CX_CFG_TC_IRQ_ENABLE | 878 COH901318_CX_CFG_TC_IRQ_ENABLE |
879 COH901318_CX_CFG_BE_IRQ_ENABLE, 879 COH901318_CX_CFG_BE_IRQ_ENABLE,
880 .param.ctrl_lli_chained = 0 | 880 .param.ctrl_lli_chained = 0 |
881 COH901318_CX_CTRL_TC_ENABLE | 881 COH901318_CX_CTRL_TC_ENABLE |
882 COH901318_CX_CTRL_BURST_COUNT_16_BYTES | 882 COH901318_CX_CTRL_BURST_COUNT_16_BYTES |
883 COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | 883 COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
884 COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE | 884 COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
885 COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | 885 COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
886 COH901318_CX_CTRL_DST_ADDR_INC_DISABLE | 886 COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
887 COH901318_CX_CTRL_MASTER_MODE_M1RW | 887 COH901318_CX_CTRL_MASTER_MODE_M1RW |
888 COH901318_CX_CTRL_TCP_DISABLE | 888 COH901318_CX_CTRL_TCP_DISABLE |
889 COH901318_CX_CTRL_TC_IRQ_DISABLE | 889 COH901318_CX_CTRL_TC_IRQ_DISABLE |
890 COH901318_CX_CTRL_HSP_ENABLE | 890 COH901318_CX_CTRL_HSP_ENABLE |
891 COH901318_CX_CTRL_HSS_DISABLE | 891 COH901318_CX_CTRL_HSS_DISABLE |
892 COH901318_CX_CTRL_DDMA_LEGACY | 892 COH901318_CX_CTRL_DDMA_LEGACY |
893 COH901318_CX_CTRL_PRDD_SOURCE, 893 COH901318_CX_CTRL_PRDD_SOURCE,
894 .param.ctrl_lli = 0 | 894 .param.ctrl_lli = 0 |
895 COH901318_CX_CTRL_TC_ENABLE | 895 COH901318_CX_CTRL_TC_ENABLE |
896 COH901318_CX_CTRL_BURST_COUNT_16_BYTES | 896 COH901318_CX_CTRL_BURST_COUNT_16_BYTES |
897 COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | 897 COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
898 COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE | 898 COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
899 COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | 899 COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
900 COH901318_CX_CTRL_DST_ADDR_INC_DISABLE | 900 COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
901 COH901318_CX_CTRL_MASTER_MODE_M1RW | 901 COH901318_CX_CTRL_MASTER_MODE_M1RW |
902 COH901318_CX_CTRL_TCP_ENABLE | 902 COH901318_CX_CTRL_TCP_ENABLE |
903 COH901318_CX_CTRL_TC_IRQ_DISABLE | 903 COH901318_CX_CTRL_TC_IRQ_DISABLE |
904 COH901318_CX_CTRL_HSP_ENABLE | 904 COH901318_CX_CTRL_HSP_ENABLE |
905 COH901318_CX_CTRL_HSS_DISABLE | 905 COH901318_CX_CTRL_HSS_DISABLE |
906 COH901318_CX_CTRL_DDMA_LEGACY | 906 COH901318_CX_CTRL_DDMA_LEGACY |
907 COH901318_CX_CTRL_PRDD_SOURCE, 907 COH901318_CX_CTRL_PRDD_SOURCE,
908 .param.ctrl_lli_last = 0 | 908 .param.ctrl_lli_last = 0 |
909 COH901318_CX_CTRL_TC_ENABLE | 909 COH901318_CX_CTRL_TC_ENABLE |
910 COH901318_CX_CTRL_BURST_COUNT_16_BYTES | 910 COH901318_CX_CTRL_BURST_COUNT_16_BYTES |
911 COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | 911 COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
912 COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE | 912 COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
913 COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | 913 COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
914 COH901318_CX_CTRL_DST_ADDR_INC_DISABLE | 914 COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
915 COH901318_CX_CTRL_MASTER_MODE_M1RW | 915 COH901318_CX_CTRL_MASTER_MODE_M1RW |
916 COH901318_CX_CTRL_TCP_ENABLE | 916 COH901318_CX_CTRL_TCP_ENABLE |
917 COH901318_CX_CTRL_TC_IRQ_DISABLE | 917 COH901318_CX_CTRL_TC_IRQ_DISABLE |
918 COH901318_CX_CTRL_HSP_ENABLE | 918 COH901318_CX_CTRL_HSP_ENABLE |
919 COH901318_CX_CTRL_HSS_DISABLE | 919 COH901318_CX_CTRL_HSS_DISABLE |
920 COH901318_CX_CTRL_DDMA_LEGACY | 920 COH901318_CX_CTRL_DDMA_LEGACY |
921 COH901318_CX_CTRL_PRDD_SOURCE, 921 COH901318_CX_CTRL_PRDD_SOURCE,
922 }, 922 },
923 { 923 {
924 .number = U300_DMA_PCM_I2S0_RX, 924 .number = U300_DMA_PCM_I2S0_RX,
925 .name = "PCM I2S0 RX", 925 .name = "PCM I2S0 RX",
926 .priority_high = 1, 926 .priority_high = 1,
927 .param.config = COH901318_CX_CFG_CH_DISABLE | 927 .param.config = COH901318_CX_CFG_CH_DISABLE |
928 COH901318_CX_CFG_LCR_DISABLE | 928 COH901318_CX_CFG_LCR_DISABLE |
929 COH901318_CX_CFG_TC_IRQ_ENABLE | 929 COH901318_CX_CFG_TC_IRQ_ENABLE |
930 COH901318_CX_CFG_BE_IRQ_ENABLE, 930 COH901318_CX_CFG_BE_IRQ_ENABLE,
931 .param.ctrl_lli_chained = 0 | 931 .param.ctrl_lli_chained = 0 |
932 COH901318_CX_CTRL_TC_ENABLE | 932 COH901318_CX_CTRL_TC_ENABLE |
933 COH901318_CX_CTRL_BURST_COUNT_16_BYTES | 933 COH901318_CX_CTRL_BURST_COUNT_16_BYTES |
934 COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | 934 COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
935 COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE | 935 COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
936 COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | 936 COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
937 COH901318_CX_CTRL_DST_ADDR_INC_ENABLE | 937 COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
938 COH901318_CX_CTRL_MASTER_MODE_M1RW | 938 COH901318_CX_CTRL_MASTER_MODE_M1RW |
939 COH901318_CX_CTRL_TCP_DISABLE | 939 COH901318_CX_CTRL_TCP_DISABLE |
940 COH901318_CX_CTRL_TC_IRQ_DISABLE | 940 COH901318_CX_CTRL_TC_IRQ_DISABLE |
941 COH901318_CX_CTRL_HSP_ENABLE | 941 COH901318_CX_CTRL_HSP_ENABLE |
942 COH901318_CX_CTRL_HSS_DISABLE | 942 COH901318_CX_CTRL_HSS_DISABLE |
943 COH901318_CX_CTRL_DDMA_LEGACY | 943 COH901318_CX_CTRL_DDMA_LEGACY |
944 COH901318_CX_CTRL_PRDD_DEST, 944 COH901318_CX_CTRL_PRDD_DEST,
945 .param.ctrl_lli = 0 | 945 .param.ctrl_lli = 0 |
946 COH901318_CX_CTRL_TC_ENABLE | 946 COH901318_CX_CTRL_TC_ENABLE |
947 COH901318_CX_CTRL_BURST_COUNT_16_BYTES | 947 COH901318_CX_CTRL_BURST_COUNT_16_BYTES |
948 COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | 948 COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
949 COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE | 949 COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
950 COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | 950 COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
951 COH901318_CX_CTRL_DST_ADDR_INC_ENABLE | 951 COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
952 COH901318_CX_CTRL_MASTER_MODE_M1RW | 952 COH901318_CX_CTRL_MASTER_MODE_M1RW |
953 COH901318_CX_CTRL_TCP_ENABLE | 953 COH901318_CX_CTRL_TCP_ENABLE |
954 COH901318_CX_CTRL_TC_IRQ_DISABLE | 954 COH901318_CX_CTRL_TC_IRQ_DISABLE |
955 COH901318_CX_CTRL_HSP_ENABLE | 955 COH901318_CX_CTRL_HSP_ENABLE |
956 COH901318_CX_CTRL_HSS_DISABLE | 956 COH901318_CX_CTRL_HSS_DISABLE |
957 COH901318_CX_CTRL_DDMA_LEGACY | 957 COH901318_CX_CTRL_DDMA_LEGACY |
958 COH901318_CX_CTRL_PRDD_DEST, 958 COH901318_CX_CTRL_PRDD_DEST,
959 .param.ctrl_lli_last = 0 | 959 .param.ctrl_lli_last = 0 |
960 COH901318_CX_CTRL_TC_ENABLE | 960 COH901318_CX_CTRL_TC_ENABLE |
961 COH901318_CX_CTRL_BURST_COUNT_16_BYTES | 961 COH901318_CX_CTRL_BURST_COUNT_16_BYTES |
962 COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | 962 COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
963 COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE | 963 COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
964 COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | 964 COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
965 COH901318_CX_CTRL_DST_ADDR_INC_ENABLE | 965 COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
966 COH901318_CX_CTRL_MASTER_MODE_M1RW | 966 COH901318_CX_CTRL_MASTER_MODE_M1RW |
967 COH901318_CX_CTRL_TCP_ENABLE | 967 COH901318_CX_CTRL_TCP_ENABLE |
968 COH901318_CX_CTRL_TC_IRQ_ENABLE | 968 COH901318_CX_CTRL_TC_IRQ_ENABLE |
969 COH901318_CX_CTRL_HSP_ENABLE | 969 COH901318_CX_CTRL_HSP_ENABLE |
970 COH901318_CX_CTRL_HSS_DISABLE | 970 COH901318_CX_CTRL_HSS_DISABLE |
971 COH901318_CX_CTRL_DDMA_LEGACY | 971 COH901318_CX_CTRL_DDMA_LEGACY |
972 COH901318_CX_CTRL_PRDD_DEST, 972 COH901318_CX_CTRL_PRDD_DEST,
973 }, 973 },
974 { 974 {
975 .number = U300_DMA_PCM_I2S1_TX, 975 .number = U300_DMA_PCM_I2S1_TX,
976 .name = "PCM I2S1 TX", 976 .name = "PCM I2S1 TX",
977 .priority_high = 1, 977 .priority_high = 1,
978 .param.config = COH901318_CX_CFG_CH_DISABLE | 978 .param.config = COH901318_CX_CFG_CH_DISABLE |
979 COH901318_CX_CFG_LCR_DISABLE | 979 COH901318_CX_CFG_LCR_DISABLE |
980 COH901318_CX_CFG_TC_IRQ_ENABLE | 980 COH901318_CX_CFG_TC_IRQ_ENABLE |
981 COH901318_CX_CFG_BE_IRQ_ENABLE, 981 COH901318_CX_CFG_BE_IRQ_ENABLE,
982 .param.ctrl_lli_chained = 0 | 982 .param.ctrl_lli_chained = 0 |
983 COH901318_CX_CTRL_TC_ENABLE | 983 COH901318_CX_CTRL_TC_ENABLE |
984 COH901318_CX_CTRL_BURST_COUNT_16_BYTES | 984 COH901318_CX_CTRL_BURST_COUNT_16_BYTES |
985 COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | 985 COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
986 COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE | 986 COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
987 COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | 987 COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
988 COH901318_CX_CTRL_DST_ADDR_INC_DISABLE | 988 COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
989 COH901318_CX_CTRL_MASTER_MODE_M1RW | 989 COH901318_CX_CTRL_MASTER_MODE_M1RW |
990 COH901318_CX_CTRL_TCP_DISABLE | 990 COH901318_CX_CTRL_TCP_DISABLE |
991 COH901318_CX_CTRL_TC_IRQ_DISABLE | 991 COH901318_CX_CTRL_TC_IRQ_DISABLE |
992 COH901318_CX_CTRL_HSP_ENABLE | 992 COH901318_CX_CTRL_HSP_ENABLE |
993 COH901318_CX_CTRL_HSS_DISABLE | 993 COH901318_CX_CTRL_HSS_DISABLE |
994 COH901318_CX_CTRL_DDMA_LEGACY | 994 COH901318_CX_CTRL_DDMA_LEGACY |
995 COH901318_CX_CTRL_PRDD_SOURCE, 995 COH901318_CX_CTRL_PRDD_SOURCE,
996 .param.ctrl_lli = 0 | 996 .param.ctrl_lli = 0 |
997 COH901318_CX_CTRL_TC_ENABLE | 997 COH901318_CX_CTRL_TC_ENABLE |
998 COH901318_CX_CTRL_BURST_COUNT_16_BYTES | 998 COH901318_CX_CTRL_BURST_COUNT_16_BYTES |
999 COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | 999 COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
1000 COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE | 1000 COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
1001 COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | 1001 COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
1002 COH901318_CX_CTRL_DST_ADDR_INC_DISABLE | 1002 COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
1003 COH901318_CX_CTRL_MASTER_MODE_M1RW | 1003 COH901318_CX_CTRL_MASTER_MODE_M1RW |
1004 COH901318_CX_CTRL_TCP_ENABLE | 1004 COH901318_CX_CTRL_TCP_ENABLE |
1005 COH901318_CX_CTRL_TC_IRQ_DISABLE | 1005 COH901318_CX_CTRL_TC_IRQ_DISABLE |
1006 COH901318_CX_CTRL_HSP_ENABLE | 1006 COH901318_CX_CTRL_HSP_ENABLE |
1007 COH901318_CX_CTRL_HSS_DISABLE | 1007 COH901318_CX_CTRL_HSS_DISABLE |
1008 COH901318_CX_CTRL_DDMA_LEGACY | 1008 COH901318_CX_CTRL_DDMA_LEGACY |
1009 COH901318_CX_CTRL_PRDD_SOURCE, 1009 COH901318_CX_CTRL_PRDD_SOURCE,
1010 .param.ctrl_lli_last = 0 | 1010 .param.ctrl_lli_last = 0 |
1011 COH901318_CX_CTRL_TC_ENABLE | 1011 COH901318_CX_CTRL_TC_ENABLE |
1012 COH901318_CX_CTRL_BURST_COUNT_16_BYTES | 1012 COH901318_CX_CTRL_BURST_COUNT_16_BYTES |
1013 COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | 1013 COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
1014 COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE | 1014 COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
1015 COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | 1015 COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
1016 COH901318_CX_CTRL_DST_ADDR_INC_DISABLE | 1016 COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
1017 COH901318_CX_CTRL_MASTER_MODE_M1RW | 1017 COH901318_CX_CTRL_MASTER_MODE_M1RW |
1018 COH901318_CX_CTRL_TCP_ENABLE | 1018 COH901318_CX_CTRL_TCP_ENABLE |
1019 COH901318_CX_CTRL_TC_IRQ_ENABLE | 1019 COH901318_CX_CTRL_TC_IRQ_ENABLE |
1020 COH901318_CX_CTRL_HSP_ENABLE | 1020 COH901318_CX_CTRL_HSP_ENABLE |
1021 COH901318_CX_CTRL_HSS_DISABLE | 1021 COH901318_CX_CTRL_HSS_DISABLE |
1022 COH901318_CX_CTRL_DDMA_LEGACY | 1022 COH901318_CX_CTRL_DDMA_LEGACY |
1023 COH901318_CX_CTRL_PRDD_SOURCE, 1023 COH901318_CX_CTRL_PRDD_SOURCE,
1024 }, 1024 },
1025 { 1025 {
1026 .number = U300_DMA_PCM_I2S1_RX, 1026 .number = U300_DMA_PCM_I2S1_RX,
1027 .name = "PCM I2S1 RX", 1027 .name = "PCM I2S1 RX",
1028 .priority_high = 1, 1028 .priority_high = 1,
1029 .param.config = COH901318_CX_CFG_CH_DISABLE | 1029 .param.config = COH901318_CX_CFG_CH_DISABLE |
1030 COH901318_CX_CFG_LCR_DISABLE | 1030 COH901318_CX_CFG_LCR_DISABLE |
1031 COH901318_CX_CFG_TC_IRQ_ENABLE | 1031 COH901318_CX_CFG_TC_IRQ_ENABLE |
1032 COH901318_CX_CFG_BE_IRQ_ENABLE, 1032 COH901318_CX_CFG_BE_IRQ_ENABLE,
1033 .param.ctrl_lli_chained = 0 | 1033 .param.ctrl_lli_chained = 0 |
1034 COH901318_CX_CTRL_TC_ENABLE | 1034 COH901318_CX_CTRL_TC_ENABLE |
1035 COH901318_CX_CTRL_BURST_COUNT_16_BYTES | 1035 COH901318_CX_CTRL_BURST_COUNT_16_BYTES |
1036 COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | 1036 COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
1037 COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE | 1037 COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
1038 COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | 1038 COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
1039 COH901318_CX_CTRL_DST_ADDR_INC_ENABLE | 1039 COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
1040 COH901318_CX_CTRL_MASTER_MODE_M1RW | 1040 COH901318_CX_CTRL_MASTER_MODE_M1RW |
1041 COH901318_CX_CTRL_TCP_DISABLE | 1041 COH901318_CX_CTRL_TCP_DISABLE |
1042 COH901318_CX_CTRL_TC_IRQ_DISABLE | 1042 COH901318_CX_CTRL_TC_IRQ_DISABLE |
1043 COH901318_CX_CTRL_HSP_ENABLE | 1043 COH901318_CX_CTRL_HSP_ENABLE |
1044 COH901318_CX_CTRL_HSS_DISABLE | 1044 COH901318_CX_CTRL_HSS_DISABLE |
1045 COH901318_CX_CTRL_DDMA_LEGACY | 1045 COH901318_CX_CTRL_DDMA_LEGACY |
1046 COH901318_CX_CTRL_PRDD_DEST, 1046 COH901318_CX_CTRL_PRDD_DEST,
1047 .param.ctrl_lli = 0 | 1047 .param.ctrl_lli = 0 |
1048 COH901318_CX_CTRL_TC_ENABLE | 1048 COH901318_CX_CTRL_TC_ENABLE |
1049 COH901318_CX_CTRL_BURST_COUNT_16_BYTES | 1049 COH901318_CX_CTRL_BURST_COUNT_16_BYTES |
1050 COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | 1050 COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
1051 COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE | 1051 COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
1052 COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | 1052 COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
1053 COH901318_CX_CTRL_DST_ADDR_INC_ENABLE | 1053 COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
1054 COH901318_CX_CTRL_MASTER_MODE_M1RW | 1054 COH901318_CX_CTRL_MASTER_MODE_M1RW |
1055 COH901318_CX_CTRL_TCP_ENABLE | 1055 COH901318_CX_CTRL_TCP_ENABLE |
1056 COH901318_CX_CTRL_TC_IRQ_DISABLE | 1056 COH901318_CX_CTRL_TC_IRQ_DISABLE |
1057 COH901318_CX_CTRL_HSP_ENABLE | 1057 COH901318_CX_CTRL_HSP_ENABLE |
1058 COH901318_CX_CTRL_HSS_DISABLE | 1058 COH901318_CX_CTRL_HSS_DISABLE |
1059 COH901318_CX_CTRL_DDMA_LEGACY | 1059 COH901318_CX_CTRL_DDMA_LEGACY |
1060 COH901318_CX_CTRL_PRDD_DEST, 1060 COH901318_CX_CTRL_PRDD_DEST,
1061 .param.ctrl_lli_last = 0 | 1061 .param.ctrl_lli_last = 0 |
1062 COH901318_CX_CTRL_TC_ENABLE | 1062 COH901318_CX_CTRL_TC_ENABLE |
1063 COH901318_CX_CTRL_BURST_COUNT_16_BYTES | 1063 COH901318_CX_CTRL_BURST_COUNT_16_BYTES |
1064 COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | 1064 COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
1065 COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE | 1065 COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
1066 COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | 1066 COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
1067 COH901318_CX_CTRL_DST_ADDR_INC_ENABLE | 1067 COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
1068 COH901318_CX_CTRL_MASTER_MODE_M1RW | 1068 COH901318_CX_CTRL_MASTER_MODE_M1RW |
1069 COH901318_CX_CTRL_TCP_ENABLE | 1069 COH901318_CX_CTRL_TCP_ENABLE |
1070 COH901318_CX_CTRL_TC_IRQ_ENABLE | 1070 COH901318_CX_CTRL_TC_IRQ_ENABLE |
1071 COH901318_CX_CTRL_HSP_ENABLE | 1071 COH901318_CX_CTRL_HSP_ENABLE |
1072 COH901318_CX_CTRL_HSS_DISABLE | 1072 COH901318_CX_CTRL_HSS_DISABLE |
1073 COH901318_CX_CTRL_DDMA_LEGACY | 1073 COH901318_CX_CTRL_DDMA_LEGACY |
1074 COH901318_CX_CTRL_PRDD_DEST, 1074 COH901318_CX_CTRL_PRDD_DEST,
1075 }, 1075 },
1076 { 1076 {
1077 .number = U300_DMA_XGAM_CDI, 1077 .number = U300_DMA_XGAM_CDI,
1078 .name = "XGAM CDI", 1078 .name = "XGAM CDI",
1079 .priority_high = 0, 1079 .priority_high = 0,
1080 }, 1080 },
1081 { 1081 {
1082 .number = U300_DMA_XGAM_PDI, 1082 .number = U300_DMA_XGAM_PDI,
1083 .name = "XGAM PDI", 1083 .name = "XGAM PDI",
1084 .priority_high = 0, 1084 .priority_high = 0,
1085 }, 1085 },
1086 /* 1086 /*
1087 * Don't set up device address, burst count or size of src 1087 * Don't set up device address, burst count or size of src
1088 * or dst bus for this peripheral - handled by PrimeCell 1088 * or dst bus for this peripheral - handled by PrimeCell
1089 * DMA extension. 1089 * DMA extension.
1090 */ 1090 */
1091 { 1091 {
1092 .number = U300_DMA_SPI_TX, 1092 .number = U300_DMA_SPI_TX,
1093 .name = "SPI TX", 1093 .name = "SPI TX",
1094 .priority_high = 0, 1094 .priority_high = 0,
1095 .param.config = COH901318_CX_CFG_CH_DISABLE | 1095 .param.config = COH901318_CX_CFG_CH_DISABLE |
1096 COH901318_CX_CFG_LCR_DISABLE | 1096 COH901318_CX_CFG_LCR_DISABLE |
1097 COH901318_CX_CFG_TC_IRQ_ENABLE | 1097 COH901318_CX_CFG_TC_IRQ_ENABLE |
1098 COH901318_CX_CFG_BE_IRQ_ENABLE, 1098 COH901318_CX_CFG_BE_IRQ_ENABLE,
1099 .param.ctrl_lli_chained = 0 | 1099 .param.ctrl_lli_chained = 0 |
1100 COH901318_CX_CTRL_TC_ENABLE | 1100 COH901318_CX_CTRL_TC_ENABLE |
1101 COH901318_CX_CTRL_MASTER_MODE_M1RW | 1101 COH901318_CX_CTRL_MASTER_MODE_M1RW |
1102 COH901318_CX_CTRL_TCP_DISABLE | 1102 COH901318_CX_CTRL_TCP_DISABLE |
1103 COH901318_CX_CTRL_TC_IRQ_DISABLE | 1103 COH901318_CX_CTRL_TC_IRQ_DISABLE |
1104 COH901318_CX_CTRL_HSP_ENABLE | 1104 COH901318_CX_CTRL_HSP_ENABLE |
1105 COH901318_CX_CTRL_HSS_DISABLE | 1105 COH901318_CX_CTRL_HSS_DISABLE |
1106 COH901318_CX_CTRL_DDMA_LEGACY, 1106 COH901318_CX_CTRL_DDMA_LEGACY,
1107 .param.ctrl_lli = 0 | 1107 .param.ctrl_lli = 0 |
1108 COH901318_CX_CTRL_TC_ENABLE | 1108 COH901318_CX_CTRL_TC_ENABLE |
1109 COH901318_CX_CTRL_MASTER_MODE_M1RW | 1109 COH901318_CX_CTRL_MASTER_MODE_M1RW |
1110 COH901318_CX_CTRL_TCP_DISABLE | 1110 COH901318_CX_CTRL_TCP_DISABLE |
1111 COH901318_CX_CTRL_TC_IRQ_ENABLE | 1111 COH901318_CX_CTRL_TC_IRQ_ENABLE |
1112 COH901318_CX_CTRL_HSP_ENABLE | 1112 COH901318_CX_CTRL_HSP_ENABLE |
1113 COH901318_CX_CTRL_HSS_DISABLE | 1113 COH901318_CX_CTRL_HSS_DISABLE |
1114 COH901318_CX_CTRL_DDMA_LEGACY, 1114 COH901318_CX_CTRL_DDMA_LEGACY,
1115 .param.ctrl_lli_last = 0 | 1115 .param.ctrl_lli_last = 0 |
1116 COH901318_CX_CTRL_TC_ENABLE | 1116 COH901318_CX_CTRL_TC_ENABLE |
1117 COH901318_CX_CTRL_MASTER_MODE_M1RW | 1117 COH901318_CX_CTRL_MASTER_MODE_M1RW |
1118 COH901318_CX_CTRL_TCP_DISABLE | 1118 COH901318_CX_CTRL_TCP_DISABLE |
1119 COH901318_CX_CTRL_TC_IRQ_ENABLE | 1119 COH901318_CX_CTRL_TC_IRQ_ENABLE |
1120 COH901318_CX_CTRL_HSP_ENABLE | 1120 COH901318_CX_CTRL_HSP_ENABLE |
1121 COH901318_CX_CTRL_HSS_DISABLE | 1121 COH901318_CX_CTRL_HSS_DISABLE |
1122 COH901318_CX_CTRL_DDMA_LEGACY, 1122 COH901318_CX_CTRL_DDMA_LEGACY,
1123 }, 1123 },
1124 { 1124 {
1125 .number = U300_DMA_SPI_RX, 1125 .number = U300_DMA_SPI_RX,
1126 .name = "SPI RX", 1126 .name = "SPI RX",
1127 .priority_high = 0, 1127 .priority_high = 0,
1128 .param.config = COH901318_CX_CFG_CH_DISABLE | 1128 .param.config = COH901318_CX_CFG_CH_DISABLE |
1129 COH901318_CX_CFG_LCR_DISABLE | 1129 COH901318_CX_CFG_LCR_DISABLE |
1130 COH901318_CX_CFG_TC_IRQ_ENABLE | 1130 COH901318_CX_CFG_TC_IRQ_ENABLE |
1131 COH901318_CX_CFG_BE_IRQ_ENABLE, 1131 COH901318_CX_CFG_BE_IRQ_ENABLE,
1132 .param.ctrl_lli_chained = 0 | 1132 .param.ctrl_lli_chained = 0 |
1133 COH901318_CX_CTRL_TC_ENABLE | 1133 COH901318_CX_CTRL_TC_ENABLE |
1134 COH901318_CX_CTRL_MASTER_MODE_M1RW | 1134 COH901318_CX_CTRL_MASTER_MODE_M1RW |
1135 COH901318_CX_CTRL_TCP_DISABLE | 1135 COH901318_CX_CTRL_TCP_DISABLE |
1136 COH901318_CX_CTRL_TC_IRQ_DISABLE | 1136 COH901318_CX_CTRL_TC_IRQ_DISABLE |
1137 COH901318_CX_CTRL_HSP_ENABLE | 1137 COH901318_CX_CTRL_HSP_ENABLE |
1138 COH901318_CX_CTRL_HSS_DISABLE | 1138 COH901318_CX_CTRL_HSS_DISABLE |
1139 COH901318_CX_CTRL_DDMA_LEGACY, 1139 COH901318_CX_CTRL_DDMA_LEGACY,
1140 .param.ctrl_lli = 0 | 1140 .param.ctrl_lli = 0 |
1141 COH901318_CX_CTRL_TC_ENABLE | 1141 COH901318_CX_CTRL_TC_ENABLE |
1142 COH901318_CX_CTRL_MASTER_MODE_M1RW | 1142 COH901318_CX_CTRL_MASTER_MODE_M1RW |
1143 COH901318_CX_CTRL_TCP_DISABLE | 1143 COH901318_CX_CTRL_TCP_DISABLE |
1144 COH901318_CX_CTRL_TC_IRQ_ENABLE | 1144 COH901318_CX_CTRL_TC_IRQ_ENABLE |
1145 COH901318_CX_CTRL_HSP_ENABLE | 1145 COH901318_CX_CTRL_HSP_ENABLE |
1146 COH901318_CX_CTRL_HSS_DISABLE | 1146 COH901318_CX_CTRL_HSS_DISABLE |
1147 COH901318_CX_CTRL_DDMA_LEGACY, 1147 COH901318_CX_CTRL_DDMA_LEGACY,
1148 .param.ctrl_lli_last = 0 | 1148 .param.ctrl_lli_last = 0 |
1149 COH901318_CX_CTRL_TC_ENABLE | 1149 COH901318_CX_CTRL_TC_ENABLE |
1150 COH901318_CX_CTRL_MASTER_MODE_M1RW | 1150 COH901318_CX_CTRL_MASTER_MODE_M1RW |
1151 COH901318_CX_CTRL_TCP_DISABLE | 1151 COH901318_CX_CTRL_TCP_DISABLE |
1152 COH901318_CX_CTRL_TC_IRQ_ENABLE | 1152 COH901318_CX_CTRL_TC_IRQ_ENABLE |
1153 COH901318_CX_CTRL_HSP_ENABLE | 1153 COH901318_CX_CTRL_HSP_ENABLE |
1154 COH901318_CX_CTRL_HSS_DISABLE | 1154 COH901318_CX_CTRL_HSS_DISABLE |
1155 COH901318_CX_CTRL_DDMA_LEGACY, 1155 COH901318_CX_CTRL_DDMA_LEGACY,
1156 1156
1157 }, 1157 },
1158 { 1158 {
1159 .number = U300_DMA_GENERAL_PURPOSE_0, 1159 .number = U300_DMA_GENERAL_PURPOSE_0,
1160 .name = "GENERAL 00", 1160 .name = "GENERAL 00",
1161 .priority_high = 0, 1161 .priority_high = 0,
1162 1162
1163 .param.config = flags_memcpy_config, 1163 .param.config = flags_memcpy_config,
1164 .param.ctrl_lli_chained = flags_memcpy_lli_chained, 1164 .param.ctrl_lli_chained = flags_memcpy_lli_chained,
1165 .param.ctrl_lli = flags_memcpy_lli, 1165 .param.ctrl_lli = flags_memcpy_lli,
1166 .param.ctrl_lli_last = flags_memcpy_lli_last, 1166 .param.ctrl_lli_last = flags_memcpy_lli_last,
1167 }, 1167 },
1168 { 1168 {
1169 .number = U300_DMA_GENERAL_PURPOSE_1, 1169 .number = U300_DMA_GENERAL_PURPOSE_1,
1170 .name = "GENERAL 01", 1170 .name = "GENERAL 01",
1171 .priority_high = 0, 1171 .priority_high = 0,
1172 1172
1173 .param.config = flags_memcpy_config, 1173 .param.config = flags_memcpy_config,
1174 .param.ctrl_lli_chained = flags_memcpy_lli_chained, 1174 .param.ctrl_lli_chained = flags_memcpy_lli_chained,
1175 .param.ctrl_lli = flags_memcpy_lli, 1175 .param.ctrl_lli = flags_memcpy_lli,
1176 .param.ctrl_lli_last = flags_memcpy_lli_last, 1176 .param.ctrl_lli_last = flags_memcpy_lli_last,
1177 }, 1177 },
1178 { 1178 {
1179 .number = U300_DMA_GENERAL_PURPOSE_2, 1179 .number = U300_DMA_GENERAL_PURPOSE_2,
1180 .name = "GENERAL 02", 1180 .name = "GENERAL 02",
1181 .priority_high = 0, 1181 .priority_high = 0,
1182 1182
1183 .param.config = flags_memcpy_config, 1183 .param.config = flags_memcpy_config,
1184 .param.ctrl_lli_chained = flags_memcpy_lli_chained, 1184 .param.ctrl_lli_chained = flags_memcpy_lli_chained,
1185 .param.ctrl_lli = flags_memcpy_lli, 1185 .param.ctrl_lli = flags_memcpy_lli,
1186 .param.ctrl_lli_last = flags_memcpy_lli_last, 1186 .param.ctrl_lli_last = flags_memcpy_lli_last,
1187 }, 1187 },
1188 { 1188 {
1189 .number = U300_DMA_GENERAL_PURPOSE_3, 1189 .number = U300_DMA_GENERAL_PURPOSE_3,
1190 .name = "GENERAL 03", 1190 .name = "GENERAL 03",
1191 .priority_high = 0, 1191 .priority_high = 0,
1192 1192
1193 .param.config = flags_memcpy_config, 1193 .param.config = flags_memcpy_config,
1194 .param.ctrl_lli_chained = flags_memcpy_lli_chained, 1194 .param.ctrl_lli_chained = flags_memcpy_lli_chained,
1195 .param.ctrl_lli = flags_memcpy_lli, 1195 .param.ctrl_lli = flags_memcpy_lli,
1196 .param.ctrl_lli_last = flags_memcpy_lli_last, 1196 .param.ctrl_lli_last = flags_memcpy_lli_last,
1197 }, 1197 },
1198 { 1198 {
1199 .number = U300_DMA_GENERAL_PURPOSE_4, 1199 .number = U300_DMA_GENERAL_PURPOSE_4,
1200 .name = "GENERAL 04", 1200 .name = "GENERAL 04",
1201 .priority_high = 0, 1201 .priority_high = 0,
1202 1202
1203 .param.config = flags_memcpy_config, 1203 .param.config = flags_memcpy_config,
1204 .param.ctrl_lli_chained = flags_memcpy_lli_chained, 1204 .param.ctrl_lli_chained = flags_memcpy_lli_chained,
1205 .param.ctrl_lli = flags_memcpy_lli, 1205 .param.ctrl_lli = flags_memcpy_lli,
1206 .param.ctrl_lli_last = flags_memcpy_lli_last, 1206 .param.ctrl_lli_last = flags_memcpy_lli_last,
1207 }, 1207 },
1208 { 1208 {
1209 .number = U300_DMA_GENERAL_PURPOSE_5, 1209 .number = U300_DMA_GENERAL_PURPOSE_5,
1210 .name = "GENERAL 05", 1210 .name = "GENERAL 05",
1211 .priority_high = 0, 1211 .priority_high = 0,
1212 1212
1213 .param.config = flags_memcpy_config, 1213 .param.config = flags_memcpy_config,
1214 .param.ctrl_lli_chained = flags_memcpy_lli_chained, 1214 .param.ctrl_lli_chained = flags_memcpy_lli_chained,
1215 .param.ctrl_lli = flags_memcpy_lli, 1215 .param.ctrl_lli = flags_memcpy_lli,
1216 .param.ctrl_lli_last = flags_memcpy_lli_last, 1216 .param.ctrl_lli_last = flags_memcpy_lli_last,
1217 }, 1217 },
1218 { 1218 {
1219 .number = U300_DMA_GENERAL_PURPOSE_6, 1219 .number = U300_DMA_GENERAL_PURPOSE_6,
1220 .name = "GENERAL 06", 1220 .name = "GENERAL 06",
1221 .priority_high = 0, 1221 .priority_high = 0,
1222 1222
1223 .param.config = flags_memcpy_config, 1223 .param.config = flags_memcpy_config,
1224 .param.ctrl_lli_chained = flags_memcpy_lli_chained, 1224 .param.ctrl_lli_chained = flags_memcpy_lli_chained,
1225 .param.ctrl_lli = flags_memcpy_lli, 1225 .param.ctrl_lli = flags_memcpy_lli,
1226 .param.ctrl_lli_last = flags_memcpy_lli_last, 1226 .param.ctrl_lli_last = flags_memcpy_lli_last,
1227 }, 1227 },
1228 { 1228 {
1229 .number = U300_DMA_GENERAL_PURPOSE_7, 1229 .number = U300_DMA_GENERAL_PURPOSE_7,
1230 .name = "GENERAL 07", 1230 .name = "GENERAL 07",
1231 .priority_high = 0, 1231 .priority_high = 0,
1232 1232
1233 .param.config = flags_memcpy_config, 1233 .param.config = flags_memcpy_config,
1234 .param.ctrl_lli_chained = flags_memcpy_lli_chained, 1234 .param.ctrl_lli_chained = flags_memcpy_lli_chained,
1235 .param.ctrl_lli = flags_memcpy_lli, 1235 .param.ctrl_lli = flags_memcpy_lli,
1236 .param.ctrl_lli_last = flags_memcpy_lli_last, 1236 .param.ctrl_lli_last = flags_memcpy_lli_last,
1237 }, 1237 },
1238 { 1238 {
1239 .number = U300_DMA_GENERAL_PURPOSE_8, 1239 .number = U300_DMA_GENERAL_PURPOSE_8,
1240 .name = "GENERAL 08", 1240 .name = "GENERAL 08",
1241 .priority_high = 0, 1241 .priority_high = 0,
1242 1242
1243 .param.config = flags_memcpy_config, 1243 .param.config = flags_memcpy_config,
1244 .param.ctrl_lli_chained = flags_memcpy_lli_chained, 1244 .param.ctrl_lli_chained = flags_memcpy_lli_chained,
1245 .param.ctrl_lli = flags_memcpy_lli, 1245 .param.ctrl_lli = flags_memcpy_lli,
1246 .param.ctrl_lli_last = flags_memcpy_lli_last, 1246 .param.ctrl_lli_last = flags_memcpy_lli_last,
1247 }, 1247 },
1248 { 1248 {
1249 .number = U300_DMA_UART1_TX, 1249 .number = U300_DMA_UART1_TX,
1250 .name = "UART1 TX", 1250 .name = "UART1 TX",
1251 .priority_high = 0, 1251 .priority_high = 0,
1252 }, 1252 },
1253 { 1253 {
1254 .number = U300_DMA_UART1_RX, 1254 .number = U300_DMA_UART1_RX,
1255 .name = "UART1 RX", 1255 .name = "UART1 RX",
1256 .priority_high = 0, 1256 .priority_high = 0,
1257 } 1257 }
1258 }; 1258 };
1259 1259
1260 #define COHC_2_DEV(cohc) (&cohc->chan.dev->device) 1260 #define COHC_2_DEV(cohc) (&cohc->chan.dev->device)
1261 1261
1262 #ifdef VERBOSE_DEBUG 1262 #ifdef VERBOSE_DEBUG
1263 #define COH_DBG(x) ({ if (1) x; 0; }) 1263 #define COH_DBG(x) ({ if (1) x; 0; })
1264 #else 1264 #else
1265 #define COH_DBG(x) ({ if (0) x; 0; }) 1265 #define COH_DBG(x) ({ if (0) x; 0; })
1266 #endif 1266 #endif
1267 1267
1268 struct coh901318_desc { 1268 struct coh901318_desc {
1269 struct dma_async_tx_descriptor desc; 1269 struct dma_async_tx_descriptor desc;
1270 struct list_head node; 1270 struct list_head node;
1271 struct scatterlist *sg; 1271 struct scatterlist *sg;
1272 unsigned int sg_len; 1272 unsigned int sg_len;
1273 struct coh901318_lli *lli; 1273 struct coh901318_lli *lli;
1274 enum dma_transfer_direction dir; 1274 enum dma_transfer_direction dir;
1275 unsigned long flags; 1275 unsigned long flags;
1276 u32 head_config; 1276 u32 head_config;
1277 u32 head_ctrl; 1277 u32 head_ctrl;
1278 }; 1278 };
1279 1279
1280 struct coh901318_base { 1280 struct coh901318_base {
1281 struct device *dev; 1281 struct device *dev;
1282 void __iomem *virtbase; 1282 void __iomem *virtbase;
1283 struct coh901318_pool pool; 1283 struct coh901318_pool pool;
1284 struct powersave pm; 1284 struct powersave pm;
1285 struct dma_device dma_slave; 1285 struct dma_device dma_slave;
1286 struct dma_device dma_memcpy; 1286 struct dma_device dma_memcpy;
1287 struct coh901318_chan *chans; 1287 struct coh901318_chan *chans;
1288 }; 1288 };
1289 1289
1290 struct coh901318_chan { 1290 struct coh901318_chan {
1291 spinlock_t lock; 1291 spinlock_t lock;
1292 int allocated; 1292 int allocated;
1293 int id; 1293 int id;
1294 int stopped; 1294 int stopped;
1295 1295
1296 struct work_struct free_work; 1296 struct work_struct free_work;
1297 struct dma_chan chan; 1297 struct dma_chan chan;
1298 1298
1299 struct tasklet_struct tasklet; 1299 struct tasklet_struct tasklet;
1300 1300
1301 struct list_head active; 1301 struct list_head active;
1302 struct list_head queue; 1302 struct list_head queue;
1303 struct list_head free; 1303 struct list_head free;
1304 1304
1305 unsigned long nbr_active_done; 1305 unsigned long nbr_active_done;
1306 unsigned long busy; 1306 unsigned long busy;
1307 1307
1308 u32 addr; 1308 u32 addr;
1309 u32 ctrl; 1309 u32 ctrl;
1310 1310
1311 struct coh901318_base *base; 1311 struct coh901318_base *base;
1312 }; 1312 };
1313 1313
1314 static void coh901318_list_print(struct coh901318_chan *cohc, 1314 static void coh901318_list_print(struct coh901318_chan *cohc,
1315 struct coh901318_lli *lli) 1315 struct coh901318_lli *lli)
1316 { 1316 {
1317 struct coh901318_lli *l = lli; 1317 struct coh901318_lli *l = lli;
1318 int i = 0; 1318 int i = 0;
1319 1319
1320 while (l) { 1320 while (l) {
1321 dev_vdbg(COHC_2_DEV(cohc), "i %d, lli %p, ctrl 0x%x, src 0x%x" 1321 dev_vdbg(COHC_2_DEV(cohc), "i %d, lli %p, ctrl 0x%x, src 0x%x"
1322 ", dst 0x%x, link 0x%x virt_link_addr 0x%p\n", 1322 ", dst 0x%x, link 0x%x virt_link_addr 0x%p\n",
1323 i, l, l->control, l->src_addr, l->dst_addr, 1323 i, l, l->control, l->src_addr, l->dst_addr,
1324 l->link_addr, l->virt_link_addr); 1324 l->link_addr, l->virt_link_addr);
1325 i++; 1325 i++;
1326 l = l->virt_link_addr; 1326 l = l->virt_link_addr;
1327 } 1327 }
1328 } 1328 }
1329 1329
1330 #ifdef CONFIG_DEBUG_FS 1330 #ifdef CONFIG_DEBUG_FS
1331 1331
1332 #define COH901318_DEBUGFS_ASSIGN(x, y) (x = y) 1332 #define COH901318_DEBUGFS_ASSIGN(x, y) (x = y)
1333 1333
1334 static struct coh901318_base *debugfs_dma_base; 1334 static struct coh901318_base *debugfs_dma_base;
1335 static struct dentry *dma_dentry; 1335 static struct dentry *dma_dentry;
1336 1336
1337 static int coh901318_debugfs_read(struct file *file, char __user *buf, 1337 static int coh901318_debugfs_read(struct file *file, char __user *buf,
1338 size_t count, loff_t *f_pos) 1338 size_t count, loff_t *f_pos)
1339 { 1339 {
1340 u64 started_channels = debugfs_dma_base->pm.started_channels; 1340 u64 started_channels = debugfs_dma_base->pm.started_channels;
1341 int pool_count = debugfs_dma_base->pool.debugfs_pool_counter; 1341 int pool_count = debugfs_dma_base->pool.debugfs_pool_counter;
1342 char *dev_buf; 1342 char *dev_buf;
1343 char *tmp; 1343 char *tmp;
1344 int ret; 1344 int ret;
1345 int i; 1345 int i;
1346 1346
1347 dev_buf = kmalloc(4*1024, GFP_KERNEL); 1347 dev_buf = kmalloc(4*1024, GFP_KERNEL);
1348 if (dev_buf == NULL) 1348 if (dev_buf == NULL)
1349 return -ENOMEM; 1349 return -ENOMEM;
1350 tmp = dev_buf; 1350 tmp = dev_buf;
1351 1351
1352 tmp += sprintf(tmp, "DMA -- enabled dma channels\n"); 1352 tmp += sprintf(tmp, "DMA -- enabled dma channels\n");
1353 1353
1354 for (i = 0; i < U300_DMA_CHANNELS; i++) 1354 for (i = 0; i < U300_DMA_CHANNELS; i++)
1355 if (started_channels & (1 << i)) 1355 if (started_channels & (1 << i))
1356 tmp += sprintf(tmp, "channel %d\n", i); 1356 tmp += sprintf(tmp, "channel %d\n", i);
1357 1357
1358 tmp += sprintf(tmp, "Pool alloc nbr %d\n", pool_count); 1358 tmp += sprintf(tmp, "Pool alloc nbr %d\n", pool_count);
1359 1359
1360 ret = simple_read_from_buffer(buf, count, f_pos, dev_buf, 1360 ret = simple_read_from_buffer(buf, count, f_pos, dev_buf,
1361 tmp - dev_buf); 1361 tmp - dev_buf);
1362 kfree(dev_buf); 1362 kfree(dev_buf);
1363 return ret; 1363 return ret;
1364 } 1364 }
1365 1365
1366 static const struct file_operations coh901318_debugfs_status_operations = { 1366 static const struct file_operations coh901318_debugfs_status_operations = {
1367 .owner = THIS_MODULE, 1367 .owner = THIS_MODULE,
1368 .open = simple_open, 1368 .open = simple_open,
1369 .read = coh901318_debugfs_read, 1369 .read = coh901318_debugfs_read,
1370 .llseek = default_llseek, 1370 .llseek = default_llseek,
1371 }; 1371 };
1372 1372
1373 1373
1374 static int __init init_coh901318_debugfs(void) 1374 static int __init init_coh901318_debugfs(void)
1375 { 1375 {
1376 1376
1377 dma_dentry = debugfs_create_dir("dma", NULL); 1377 dma_dentry = debugfs_create_dir("dma", NULL);
1378 1378
1379 (void) debugfs_create_file("status", 1379 (void) debugfs_create_file("status",
1380 S_IFREG | S_IRUGO, 1380 S_IFREG | S_IRUGO,
1381 dma_dentry, NULL, 1381 dma_dentry, NULL,
1382 &coh901318_debugfs_status_operations); 1382 &coh901318_debugfs_status_operations);
1383 return 0; 1383 return 0;
1384 } 1384 }
1385 1385
1386 static void __exit exit_coh901318_debugfs(void) 1386 static void __exit exit_coh901318_debugfs(void)
1387 { 1387 {
1388 debugfs_remove_recursive(dma_dentry); 1388 debugfs_remove_recursive(dma_dentry);
1389 } 1389 }
1390 1390
1391 module_init(init_coh901318_debugfs); 1391 module_init(init_coh901318_debugfs);
1392 module_exit(exit_coh901318_debugfs); 1392 module_exit(exit_coh901318_debugfs);
1393 #else 1393 #else
1394 1394
1395 #define COH901318_DEBUGFS_ASSIGN(x, y) 1395 #define COH901318_DEBUGFS_ASSIGN(x, y)
1396 1396
1397 #endif /* CONFIG_DEBUG_FS */ 1397 #endif /* CONFIG_DEBUG_FS */
1398 1398
1399 static inline struct coh901318_chan *to_coh901318_chan(struct dma_chan *chan) 1399 static inline struct coh901318_chan *to_coh901318_chan(struct dma_chan *chan)
1400 { 1400 {
1401 return container_of(chan, struct coh901318_chan, chan); 1401 return container_of(chan, struct coh901318_chan, chan);
1402 } 1402 }
1403 1403
1404 static inline const struct coh901318_params * 1404 static inline const struct coh901318_params *
1405 cohc_chan_param(struct coh901318_chan *cohc) 1405 cohc_chan_param(struct coh901318_chan *cohc)
1406 { 1406 {
1407 return &chan_config[cohc->id].param; 1407 return &chan_config[cohc->id].param;
1408 } 1408 }
1409 1409
1410 static inline const struct coh_dma_channel * 1410 static inline const struct coh_dma_channel *
1411 cohc_chan_conf(struct coh901318_chan *cohc) 1411 cohc_chan_conf(struct coh901318_chan *cohc)
1412 { 1412 {
1413 return &chan_config[cohc->id]; 1413 return &chan_config[cohc->id];
1414 } 1414 }
1415 1415
1416 static void enable_powersave(struct coh901318_chan *cohc) 1416 static void enable_powersave(struct coh901318_chan *cohc)
1417 { 1417 {
1418 unsigned long flags; 1418 unsigned long flags;
1419 struct powersave *pm = &cohc->base->pm; 1419 struct powersave *pm = &cohc->base->pm;
1420 1420
1421 spin_lock_irqsave(&pm->lock, flags); 1421 spin_lock_irqsave(&pm->lock, flags);
1422 1422
1423 pm->started_channels &= ~(1ULL << cohc->id); 1423 pm->started_channels &= ~(1ULL << cohc->id);
1424 1424
1425 spin_unlock_irqrestore(&pm->lock, flags); 1425 spin_unlock_irqrestore(&pm->lock, flags);
1426 } 1426 }
1427 static void disable_powersave(struct coh901318_chan *cohc) 1427 static void disable_powersave(struct coh901318_chan *cohc)
1428 { 1428 {
1429 unsigned long flags; 1429 unsigned long flags;
1430 struct powersave *pm = &cohc->base->pm; 1430 struct powersave *pm = &cohc->base->pm;
1431 1431
1432 spin_lock_irqsave(&pm->lock, flags); 1432 spin_lock_irqsave(&pm->lock, flags);
1433 1433
1434 pm->started_channels |= (1ULL << cohc->id); 1434 pm->started_channels |= (1ULL << cohc->id);
1435 1435
1436 spin_unlock_irqrestore(&pm->lock, flags); 1436 spin_unlock_irqrestore(&pm->lock, flags);
1437 } 1437 }
1438 1438
1439 static inline int coh901318_set_ctrl(struct coh901318_chan *cohc, u32 control) 1439 static inline int coh901318_set_ctrl(struct coh901318_chan *cohc, u32 control)
1440 { 1440 {
1441 int channel = cohc->id; 1441 int channel = cohc->id;
1442 void __iomem *virtbase = cohc->base->virtbase; 1442 void __iomem *virtbase = cohc->base->virtbase;
1443 1443
1444 writel(control, 1444 writel(control,
1445 virtbase + COH901318_CX_CTRL + 1445 virtbase + COH901318_CX_CTRL +
1446 COH901318_CX_CTRL_SPACING * channel); 1446 COH901318_CX_CTRL_SPACING * channel);
1447 return 0; 1447 return 0;
1448 } 1448 }
1449 1449
1450 static inline int coh901318_set_conf(struct coh901318_chan *cohc, u32 conf) 1450 static inline int coh901318_set_conf(struct coh901318_chan *cohc, u32 conf)
1451 { 1451 {
1452 int channel = cohc->id; 1452 int channel = cohc->id;
1453 void __iomem *virtbase = cohc->base->virtbase; 1453 void __iomem *virtbase = cohc->base->virtbase;
1454 1454
1455 writel(conf, 1455 writel(conf,
1456 virtbase + COH901318_CX_CFG + 1456 virtbase + COH901318_CX_CFG +
1457 COH901318_CX_CFG_SPACING*channel); 1457 COH901318_CX_CFG_SPACING*channel);
1458 return 0; 1458 return 0;
1459 } 1459 }
1460 1460
1461 1461
1462 static int coh901318_start(struct coh901318_chan *cohc) 1462 static int coh901318_start(struct coh901318_chan *cohc)
1463 { 1463 {
1464 u32 val; 1464 u32 val;
1465 int channel = cohc->id; 1465 int channel = cohc->id;
1466 void __iomem *virtbase = cohc->base->virtbase; 1466 void __iomem *virtbase = cohc->base->virtbase;
1467 1467
1468 disable_powersave(cohc); 1468 disable_powersave(cohc);
1469 1469
1470 val = readl(virtbase + COH901318_CX_CFG + 1470 val = readl(virtbase + COH901318_CX_CFG +
1471 COH901318_CX_CFG_SPACING * channel); 1471 COH901318_CX_CFG_SPACING * channel);
1472 1472
1473 /* Enable channel */ 1473 /* Enable channel */
1474 val |= COH901318_CX_CFG_CH_ENABLE; 1474 val |= COH901318_CX_CFG_CH_ENABLE;
1475 writel(val, virtbase + COH901318_CX_CFG + 1475 writel(val, virtbase + COH901318_CX_CFG +
1476 COH901318_CX_CFG_SPACING * channel); 1476 COH901318_CX_CFG_SPACING * channel);
1477 1477
1478 return 0; 1478 return 0;
1479 } 1479 }
1480 1480
1481 static int coh901318_prep_linked_list(struct coh901318_chan *cohc, 1481 static int coh901318_prep_linked_list(struct coh901318_chan *cohc,
1482 struct coh901318_lli *lli) 1482 struct coh901318_lli *lli)
1483 { 1483 {
1484 int channel = cohc->id; 1484 int channel = cohc->id;
1485 void __iomem *virtbase = cohc->base->virtbase; 1485 void __iomem *virtbase = cohc->base->virtbase;
1486 1486
1487 BUG_ON(readl(virtbase + COH901318_CX_STAT + 1487 BUG_ON(readl(virtbase + COH901318_CX_STAT +
1488 COH901318_CX_STAT_SPACING*channel) & 1488 COH901318_CX_STAT_SPACING*channel) &
1489 COH901318_CX_STAT_ACTIVE); 1489 COH901318_CX_STAT_ACTIVE);
1490 1490
1491 writel(lli->src_addr, 1491 writel(lli->src_addr,
1492 virtbase + COH901318_CX_SRC_ADDR + 1492 virtbase + COH901318_CX_SRC_ADDR +
1493 COH901318_CX_SRC_ADDR_SPACING * channel); 1493 COH901318_CX_SRC_ADDR_SPACING * channel);
1494 1494
1495 writel(lli->dst_addr, virtbase + 1495 writel(lli->dst_addr, virtbase +
1496 COH901318_CX_DST_ADDR + 1496 COH901318_CX_DST_ADDR +
1497 COH901318_CX_DST_ADDR_SPACING * channel); 1497 COH901318_CX_DST_ADDR_SPACING * channel);
1498 1498
1499 writel(lli->link_addr, virtbase + COH901318_CX_LNK_ADDR + 1499 writel(lli->link_addr, virtbase + COH901318_CX_LNK_ADDR +
1500 COH901318_CX_LNK_ADDR_SPACING * channel); 1500 COH901318_CX_LNK_ADDR_SPACING * channel);
1501 1501
1502 writel(lli->control, virtbase + COH901318_CX_CTRL + 1502 writel(lli->control, virtbase + COH901318_CX_CTRL +
1503 COH901318_CX_CTRL_SPACING * channel); 1503 COH901318_CX_CTRL_SPACING * channel);
1504 1504
1505 return 0; 1505 return 0;
1506 } 1506 }
1507 1507
1508 static struct coh901318_desc * 1508 static struct coh901318_desc *
1509 coh901318_desc_get(struct coh901318_chan *cohc) 1509 coh901318_desc_get(struct coh901318_chan *cohc)
1510 { 1510 {
1511 struct coh901318_desc *desc; 1511 struct coh901318_desc *desc;
1512 1512
1513 if (list_empty(&cohc->free)) { 1513 if (list_empty(&cohc->free)) {
1514 /* alloc new desc because we're out of used ones 1514 /* alloc new desc because we're out of used ones
1515 * TODO: alloc a pile of descs instead of just one, 1515 * TODO: alloc a pile of descs instead of just one,
1516 * avoid many small allocations. 1516 * avoid many small allocations.
1517 */ 1517 */
1518 desc = kzalloc(sizeof(struct coh901318_desc), GFP_NOWAIT); 1518 desc = kzalloc(sizeof(struct coh901318_desc), GFP_NOWAIT);
1519 if (desc == NULL) 1519 if (desc == NULL)
1520 goto out; 1520 goto out;
1521 INIT_LIST_HEAD(&desc->node); 1521 INIT_LIST_HEAD(&desc->node);
1522 dma_async_tx_descriptor_init(&desc->desc, &cohc->chan); 1522 dma_async_tx_descriptor_init(&desc->desc, &cohc->chan);
1523 } else { 1523 } else {
1524 /* Reuse an old desc. */ 1524 /* Reuse an old desc. */
1525 desc = list_first_entry(&cohc->free, 1525 desc = list_first_entry(&cohc->free,
1526 struct coh901318_desc, 1526 struct coh901318_desc,
1527 node); 1527 node);
1528 list_del(&desc->node); 1528 list_del(&desc->node);
1529 /* Initialize it a bit so it's not insane */ 1529 /* Initialize it a bit so it's not insane */
1530 desc->sg = NULL; 1530 desc->sg = NULL;
1531 desc->sg_len = 0; 1531 desc->sg_len = 0;
1532 desc->desc.callback = NULL; 1532 desc->desc.callback = NULL;
1533 desc->desc.callback_param = NULL; 1533 desc->desc.callback_param = NULL;
1534 } 1534 }
1535 1535
1536 out: 1536 out:
1537 return desc; 1537 return desc;
1538 } 1538 }
1539 1539
1540 static void 1540 static void
1541 coh901318_desc_free(struct coh901318_chan *cohc, struct coh901318_desc *cohd) 1541 coh901318_desc_free(struct coh901318_chan *cohc, struct coh901318_desc *cohd)
1542 { 1542 {
1543 list_add_tail(&cohd->node, &cohc->free); 1543 list_add_tail(&cohd->node, &cohc->free);
1544 } 1544 }
1545 1545
1546 /* call with irq lock held */ 1546 /* call with irq lock held */
1547 static void 1547 static void
1548 coh901318_desc_submit(struct coh901318_chan *cohc, struct coh901318_desc *desc) 1548 coh901318_desc_submit(struct coh901318_chan *cohc, struct coh901318_desc *desc)
1549 { 1549 {
1550 list_add_tail(&desc->node, &cohc->active); 1550 list_add_tail(&desc->node, &cohc->active);
1551 } 1551 }
1552 1552
1553 static struct coh901318_desc * 1553 static struct coh901318_desc *
1554 coh901318_first_active_get(struct coh901318_chan *cohc) 1554 coh901318_first_active_get(struct coh901318_chan *cohc)
1555 { 1555 {
1556 struct coh901318_desc *d; 1556 struct coh901318_desc *d;
1557 1557
1558 if (list_empty(&cohc->active)) 1558 if (list_empty(&cohc->active))
1559 return NULL; 1559 return NULL;
1560 1560
1561 d = list_first_entry(&cohc->active, 1561 d = list_first_entry(&cohc->active,
1562 struct coh901318_desc, 1562 struct coh901318_desc,
1563 node); 1563 node);
1564 return d; 1564 return d;
1565 } 1565 }
1566 1566
1567 static void 1567 static void
1568 coh901318_desc_remove(struct coh901318_desc *cohd) 1568 coh901318_desc_remove(struct coh901318_desc *cohd)
1569 { 1569 {
1570 list_del(&cohd->node); 1570 list_del(&cohd->node);
1571 } 1571 }
1572 1572
1573 static void 1573 static void
1574 coh901318_desc_queue(struct coh901318_chan *cohc, struct coh901318_desc *desc) 1574 coh901318_desc_queue(struct coh901318_chan *cohc, struct coh901318_desc *desc)
1575 { 1575 {
1576 list_add_tail(&desc->node, &cohc->queue); 1576 list_add_tail(&desc->node, &cohc->queue);
1577 } 1577 }
1578 1578
1579 static struct coh901318_desc * 1579 static struct coh901318_desc *
1580 coh901318_first_queued(struct coh901318_chan *cohc) 1580 coh901318_first_queued(struct coh901318_chan *cohc)
1581 { 1581 {
1582 struct coh901318_desc *d; 1582 struct coh901318_desc *d;
1583 1583
1584 if (list_empty(&cohc->queue)) 1584 if (list_empty(&cohc->queue))
1585 return NULL; 1585 return NULL;
1586 1586
1587 d = list_first_entry(&cohc->queue, 1587 d = list_first_entry(&cohc->queue,
1588 struct coh901318_desc, 1588 struct coh901318_desc,
1589 node); 1589 node);
1590 return d; 1590 return d;
1591 } 1591 }
1592 1592
1593 static inline u32 coh901318_get_bytes_in_lli(struct coh901318_lli *in_lli) 1593 static inline u32 coh901318_get_bytes_in_lli(struct coh901318_lli *in_lli)
1594 { 1594 {
1595 struct coh901318_lli *lli = in_lli; 1595 struct coh901318_lli *lli = in_lli;
1596 u32 bytes = 0; 1596 u32 bytes = 0;
1597 1597
1598 while (lli) { 1598 while (lli) {
1599 bytes += lli->control & COH901318_CX_CTRL_TC_VALUE_MASK; 1599 bytes += lli->control & COH901318_CX_CTRL_TC_VALUE_MASK;
1600 lli = lli->virt_link_addr; 1600 lli = lli->virt_link_addr;
1601 } 1601 }
1602 return bytes; 1602 return bytes;
1603 } 1603 }
1604 1604
1605 /* 1605 /*
1606 * Get the number of bytes left to transfer on this channel, 1606 * Get the number of bytes left to transfer on this channel,
1607 * it is unwise to call this before stopping the channel for 1607 * it is unwise to call this before stopping the channel for
1608 * absolute measures, but for a rough guess you can still call 1608 * absolute measures, but for a rough guess you can still call
1609 * it. 1609 * it.
1610 */ 1610 */
1611 static u32 coh901318_get_bytes_left(struct dma_chan *chan) 1611 static u32 coh901318_get_bytes_left(struct dma_chan *chan)
1612 { 1612 {
1613 struct coh901318_chan *cohc = to_coh901318_chan(chan); 1613 struct coh901318_chan *cohc = to_coh901318_chan(chan);
1614 struct coh901318_desc *cohd; 1614 struct coh901318_desc *cohd;
1615 struct list_head *pos; 1615 struct list_head *pos;
1616 unsigned long flags; 1616 unsigned long flags;
1617 u32 left = 0; 1617 u32 left = 0;
1618 int i = 0; 1618 int i = 0;
1619 1619
1620 spin_lock_irqsave(&cohc->lock, flags); 1620 spin_lock_irqsave(&cohc->lock, flags);
1621 1621
1622 /* 1622 /*
1623 * If there are many queued jobs, we iterate and add the 1623 * If there are many queued jobs, we iterate and add the
1624 * size of them all. We take a special look on the first 1624 * size of them all. We take a special look on the first
1625 * job though, since it is probably active. 1625 * job though, since it is probably active.
1626 */ 1626 */
1627 list_for_each(pos, &cohc->active) { 1627 list_for_each(pos, &cohc->active) {
1628 /* 1628 /*
1629 * The first job in the list will be working on the 1629 * The first job in the list will be working on the
1630 * hardware. The job can be stopped but still active, 1630 * hardware. The job can be stopped but still active,
1631 * so that the transfer counter is somewhere inside 1631 * so that the transfer counter is somewhere inside
1632 * the buffer. 1632 * the buffer.
1633 */ 1633 */
1634 cohd = list_entry(pos, struct coh901318_desc, node); 1634 cohd = list_entry(pos, struct coh901318_desc, node);
1635 1635
1636 if (i == 0) { 1636 if (i == 0) {
1637 struct coh901318_lli *lli; 1637 struct coh901318_lli *lli;
1638 dma_addr_t ladd; 1638 dma_addr_t ladd;
1639 1639
1640 /* Read current transfer count value */ 1640 /* Read current transfer count value */
1641 left = readl(cohc->base->virtbase + 1641 left = readl(cohc->base->virtbase +
1642 COH901318_CX_CTRL + 1642 COH901318_CX_CTRL +
1643 COH901318_CX_CTRL_SPACING * cohc->id) & 1643 COH901318_CX_CTRL_SPACING * cohc->id) &
1644 COH901318_CX_CTRL_TC_VALUE_MASK; 1644 COH901318_CX_CTRL_TC_VALUE_MASK;
1645 1645
1646 /* See if the transfer is linked... */ 1646 /* See if the transfer is linked... */
1647 ladd = readl(cohc->base->virtbase + 1647 ladd = readl(cohc->base->virtbase +
1648 COH901318_CX_LNK_ADDR + 1648 COH901318_CX_LNK_ADDR +
1649 COH901318_CX_LNK_ADDR_SPACING * 1649 COH901318_CX_LNK_ADDR_SPACING *
1650 cohc->id) & 1650 cohc->id) &
1651 ~COH901318_CX_LNK_LINK_IMMEDIATE; 1651 ~COH901318_CX_LNK_LINK_IMMEDIATE;
1652 /* Single transaction */ 1652 /* Single transaction */
1653 if (!ladd) 1653 if (!ladd)
1654 continue; 1654 continue;
1655 1655
1656 /* 1656 /*
1657 * Linked transaction, follow the lli, find the 1657 * Linked transaction, follow the lli, find the
1658 * currently processing lli, and proceed to the next 1658 * currently processing lli, and proceed to the next
1659 */ 1659 */
1660 lli = cohd->lli; 1660 lli = cohd->lli;
1661 while (lli && lli->link_addr != ladd) 1661 while (lli && lli->link_addr != ladd)
1662 lli = lli->virt_link_addr; 1662 lli = lli->virt_link_addr;
1663 1663
1664 if (lli) 1664 if (lli)
1665 lli = lli->virt_link_addr; 1665 lli = lli->virt_link_addr;
1666 1666
1667 /* 1667 /*
1668 * Follow remaining lli links around to count the total 1668 * Follow remaining lli links around to count the total
1669 * number of bytes left 1669 * number of bytes left
1670 */ 1670 */
1671 left += coh901318_get_bytes_in_lli(lli); 1671 left += coh901318_get_bytes_in_lli(lli);
1672 } else { 1672 } else {
1673 left += coh901318_get_bytes_in_lli(cohd->lli); 1673 left += coh901318_get_bytes_in_lli(cohd->lli);
1674 } 1674 }
1675 i++; 1675 i++;
1676 } 1676 }
1677 1677
1678 /* Also count bytes in the queued jobs */ 1678 /* Also count bytes in the queued jobs */
1679 list_for_each(pos, &cohc->queue) { 1679 list_for_each(pos, &cohc->queue) {
1680 cohd = list_entry(pos, struct coh901318_desc, node); 1680 cohd = list_entry(pos, struct coh901318_desc, node);
1681 left += coh901318_get_bytes_in_lli(cohd->lli); 1681 left += coh901318_get_bytes_in_lli(cohd->lli);
1682 } 1682 }
1683 1683
1684 spin_unlock_irqrestore(&cohc->lock, flags); 1684 spin_unlock_irqrestore(&cohc->lock, flags);
1685 1685
1686 return left; 1686 return left;
1687 } 1687 }
1688 1688
1689 /* 1689 /*
1690 * Pauses a transfer without losing data. Enables power save. 1690 * Pauses a transfer without losing data. Enables power save.
1691 * Use this function in conjunction with coh901318_resume. 1691 * Use this function in conjunction with coh901318_resume.
1692 */ 1692 */
1693 static void coh901318_pause(struct dma_chan *chan) 1693 static void coh901318_pause(struct dma_chan *chan)
1694 { 1694 {
1695 u32 val; 1695 u32 val;
1696 unsigned long flags; 1696 unsigned long flags;
1697 struct coh901318_chan *cohc = to_coh901318_chan(chan); 1697 struct coh901318_chan *cohc = to_coh901318_chan(chan);
1698 int channel = cohc->id; 1698 int channel = cohc->id;
1699 void __iomem *virtbase = cohc->base->virtbase; 1699 void __iomem *virtbase = cohc->base->virtbase;
1700 1700
1701 spin_lock_irqsave(&cohc->lock, flags); 1701 spin_lock_irqsave(&cohc->lock, flags);
1702 1702
1703 /* Disable channel in HW */ 1703 /* Disable channel in HW */
1704 val = readl(virtbase + COH901318_CX_CFG + 1704 val = readl(virtbase + COH901318_CX_CFG +
1705 COH901318_CX_CFG_SPACING * channel); 1705 COH901318_CX_CFG_SPACING * channel);
1706 1706
1707 /* Stopping infinite transfer */ 1707 /* Stopping infinite transfer */
1708 if ((val & COH901318_CX_CTRL_TC_ENABLE) == 0 && 1708 if ((val & COH901318_CX_CTRL_TC_ENABLE) == 0 &&
1709 (val & COH901318_CX_CFG_CH_ENABLE)) 1709 (val & COH901318_CX_CFG_CH_ENABLE))
1710 cohc->stopped = 1; 1710 cohc->stopped = 1;
1711 1711
1712 1712
1713 val &= ~COH901318_CX_CFG_CH_ENABLE; 1713 val &= ~COH901318_CX_CFG_CH_ENABLE;
1714 /* Enable twice, HW bug work around */ 1714 /* Enable twice, HW bug work around */
1715 writel(val, virtbase + COH901318_CX_CFG + 1715 writel(val, virtbase + COH901318_CX_CFG +
1716 COH901318_CX_CFG_SPACING * channel); 1716 COH901318_CX_CFG_SPACING * channel);
1717 writel(val, virtbase + COH901318_CX_CFG + 1717 writel(val, virtbase + COH901318_CX_CFG +
1718 COH901318_CX_CFG_SPACING * channel); 1718 COH901318_CX_CFG_SPACING * channel);
1719 1719
1720 /* Spin-wait for it to actually go inactive */ 1720 /* Spin-wait for it to actually go inactive */
1721 while (readl(virtbase + COH901318_CX_STAT+COH901318_CX_STAT_SPACING * 1721 while (readl(virtbase + COH901318_CX_STAT+COH901318_CX_STAT_SPACING *
1722 channel) & COH901318_CX_STAT_ACTIVE) 1722 channel) & COH901318_CX_STAT_ACTIVE)
1723 cpu_relax(); 1723 cpu_relax();
1724 1724
1725 /* Check if we stopped an active job */ 1725 /* Check if we stopped an active job */
1726 if ((readl(virtbase + COH901318_CX_CTRL+COH901318_CX_CTRL_SPACING * 1726 if ((readl(virtbase + COH901318_CX_CTRL+COH901318_CX_CTRL_SPACING *
1727 channel) & COH901318_CX_CTRL_TC_VALUE_MASK) > 0) 1727 channel) & COH901318_CX_CTRL_TC_VALUE_MASK) > 0)
1728 cohc->stopped = 1; 1728 cohc->stopped = 1;
1729 1729
1730 enable_powersave(cohc); 1730 enable_powersave(cohc);
1731 1731
1732 spin_unlock_irqrestore(&cohc->lock, flags); 1732 spin_unlock_irqrestore(&cohc->lock, flags);
1733 } 1733 }
1734 1734
1735 /* Resumes a transfer that has been stopped via 300_dma_stop(..). 1735 /* Resumes a transfer that has been stopped via 300_dma_stop(..).
1736 Power save is handled. 1736 Power save is handled.
1737 */ 1737 */
1738 static void coh901318_resume(struct dma_chan *chan) 1738 static void coh901318_resume(struct dma_chan *chan)
1739 { 1739 {
1740 u32 val; 1740 u32 val;
1741 unsigned long flags; 1741 unsigned long flags;
1742 struct coh901318_chan *cohc = to_coh901318_chan(chan); 1742 struct coh901318_chan *cohc = to_coh901318_chan(chan);
1743 int channel = cohc->id; 1743 int channel = cohc->id;
1744 1744
1745 spin_lock_irqsave(&cohc->lock, flags); 1745 spin_lock_irqsave(&cohc->lock, flags);
1746 1746
1747 disable_powersave(cohc); 1747 disable_powersave(cohc);
1748 1748
1749 if (cohc->stopped) { 1749 if (cohc->stopped) {
1750 /* Enable channel in HW */ 1750 /* Enable channel in HW */
1751 val = readl(cohc->base->virtbase + COH901318_CX_CFG + 1751 val = readl(cohc->base->virtbase + COH901318_CX_CFG +
1752 COH901318_CX_CFG_SPACING * channel); 1752 COH901318_CX_CFG_SPACING * channel);
1753 1753
1754 val |= COH901318_CX_CFG_CH_ENABLE; 1754 val |= COH901318_CX_CFG_CH_ENABLE;
1755 1755
1756 writel(val, cohc->base->virtbase + COH901318_CX_CFG + 1756 writel(val, cohc->base->virtbase + COH901318_CX_CFG +
1757 COH901318_CX_CFG_SPACING*channel); 1757 COH901318_CX_CFG_SPACING*channel);
1758 1758
1759 cohc->stopped = 0; 1759 cohc->stopped = 0;
1760 } 1760 }
1761 1761
1762 spin_unlock_irqrestore(&cohc->lock, flags); 1762 spin_unlock_irqrestore(&cohc->lock, flags);
1763 } 1763 }
1764 1764
1765 bool coh901318_filter_id(struct dma_chan *chan, void *chan_id) 1765 bool coh901318_filter_id(struct dma_chan *chan, void *chan_id)
1766 { 1766 {
1767 unsigned int ch_nr = (unsigned int) chan_id; 1767 unsigned int ch_nr = (unsigned int) chan_id;
1768 1768
1769 if (ch_nr == to_coh901318_chan(chan)->id) 1769 if (ch_nr == to_coh901318_chan(chan)->id)
1770 return true; 1770 return true;
1771 1771
1772 return false; 1772 return false;
1773 } 1773 }
1774 EXPORT_SYMBOL(coh901318_filter_id); 1774 EXPORT_SYMBOL(coh901318_filter_id);
1775 1775
1776 struct coh901318_filter_args { 1776 struct coh901318_filter_args {
1777 struct coh901318_base *base; 1777 struct coh901318_base *base;
1778 unsigned int ch_nr; 1778 unsigned int ch_nr;
1779 }; 1779 };
1780 1780
1781 static bool coh901318_filter_base_and_id(struct dma_chan *chan, void *data) 1781 static bool coh901318_filter_base_and_id(struct dma_chan *chan, void *data)
1782 { 1782 {
1783 struct coh901318_filter_args *args = data; 1783 struct coh901318_filter_args *args = data;
1784 1784
1785 if (&args->base->dma_slave == chan->device && 1785 if (&args->base->dma_slave == chan->device &&
1786 args->ch_nr == to_coh901318_chan(chan)->id) 1786 args->ch_nr == to_coh901318_chan(chan)->id)
1787 return true; 1787 return true;
1788 1788
1789 return false; 1789 return false;
1790 } 1790 }
1791 1791
1792 static struct dma_chan *coh901318_xlate(struct of_phandle_args *dma_spec, 1792 static struct dma_chan *coh901318_xlate(struct of_phandle_args *dma_spec,
1793 struct of_dma *ofdma) 1793 struct of_dma *ofdma)
1794 { 1794 {
1795 struct coh901318_filter_args args = { 1795 struct coh901318_filter_args args = {
1796 .base = ofdma->of_dma_data, 1796 .base = ofdma->of_dma_data,
1797 .ch_nr = dma_spec->args[0], 1797 .ch_nr = dma_spec->args[0],
1798 }; 1798 };
1799 dma_cap_mask_t cap; 1799 dma_cap_mask_t cap;
1800 dma_cap_zero(cap); 1800 dma_cap_zero(cap);
1801 dma_cap_set(DMA_SLAVE, cap); 1801 dma_cap_set(DMA_SLAVE, cap);
1802 1802
1803 return dma_request_channel(cap, coh901318_filter_base_and_id, &args); 1803 return dma_request_channel(cap, coh901318_filter_base_and_id, &args);
1804 } 1804 }
1805 /* 1805 /*
1806 * DMA channel allocation 1806 * DMA channel allocation
1807 */ 1807 */
1808 static int coh901318_config(struct coh901318_chan *cohc, 1808 static int coh901318_config(struct coh901318_chan *cohc,
1809 struct coh901318_params *param) 1809 struct coh901318_params *param)
1810 { 1810 {
1811 unsigned long flags; 1811 unsigned long flags;
1812 const struct coh901318_params *p; 1812 const struct coh901318_params *p;
1813 int channel = cohc->id; 1813 int channel = cohc->id;
1814 void __iomem *virtbase = cohc->base->virtbase; 1814 void __iomem *virtbase = cohc->base->virtbase;
1815 1815
1816 spin_lock_irqsave(&cohc->lock, flags); 1816 spin_lock_irqsave(&cohc->lock, flags);
1817 1817
1818 if (param) 1818 if (param)
1819 p = param; 1819 p = param;
1820 else 1820 else
1821 p = cohc_chan_param(cohc); 1821 p = cohc_chan_param(cohc);
1822 1822
1823 /* Clear any pending BE or TC interrupt */ 1823 /* Clear any pending BE or TC interrupt */
1824 if (channel < 32) { 1824 if (channel < 32) {
1825 writel(1 << channel, virtbase + COH901318_BE_INT_CLEAR1); 1825 writel(1 << channel, virtbase + COH901318_BE_INT_CLEAR1);
1826 writel(1 << channel, virtbase + COH901318_TC_INT_CLEAR1); 1826 writel(1 << channel, virtbase + COH901318_TC_INT_CLEAR1);
1827 } else { 1827 } else {
1828 writel(1 << (channel - 32), virtbase + 1828 writel(1 << (channel - 32), virtbase +
1829 COH901318_BE_INT_CLEAR2); 1829 COH901318_BE_INT_CLEAR2);
1830 writel(1 << (channel - 32), virtbase + 1830 writel(1 << (channel - 32), virtbase +
1831 COH901318_TC_INT_CLEAR2); 1831 COH901318_TC_INT_CLEAR2);
1832 } 1832 }
1833 1833
1834 coh901318_set_conf(cohc, p->config); 1834 coh901318_set_conf(cohc, p->config);
1835 coh901318_set_ctrl(cohc, p->ctrl_lli_last); 1835 coh901318_set_ctrl(cohc, p->ctrl_lli_last);
1836 1836
1837 spin_unlock_irqrestore(&cohc->lock, flags); 1837 spin_unlock_irqrestore(&cohc->lock, flags);
1838 1838
1839 return 0; 1839 return 0;
1840 } 1840 }
1841 1841
1842 /* must lock when calling this function 1842 /* must lock when calling this function
1843 * start queued jobs, if any 1843 * start queued jobs, if any
1844 * TODO: start all queued jobs in one go 1844 * TODO: start all queued jobs in one go
1845 * 1845 *
1846 * Returns descriptor if queued job is started otherwise NULL. 1846 * Returns descriptor if queued job is started otherwise NULL.
1847 * If the queue is empty NULL is returned. 1847 * If the queue is empty NULL is returned.
1848 */ 1848 */
1849 static struct coh901318_desc *coh901318_queue_start(struct coh901318_chan *cohc) 1849 static struct coh901318_desc *coh901318_queue_start(struct coh901318_chan *cohc)
1850 { 1850 {
1851 struct coh901318_desc *cohd; 1851 struct coh901318_desc *cohd;
1852 1852
1853 /* 1853 /*
1854 * start queued jobs, if any 1854 * start queued jobs, if any
1855 * TODO: transmit all queued jobs in one go 1855 * TODO: transmit all queued jobs in one go
1856 */ 1856 */
1857 cohd = coh901318_first_queued(cohc); 1857 cohd = coh901318_first_queued(cohc);
1858 1858
1859 if (cohd != NULL) { 1859 if (cohd != NULL) {
1860 /* Remove from queue */ 1860 /* Remove from queue */
1861 coh901318_desc_remove(cohd); 1861 coh901318_desc_remove(cohd);
1862 /* initiate DMA job */ 1862 /* initiate DMA job */
1863 cohc->busy = 1; 1863 cohc->busy = 1;
1864 1864
1865 coh901318_desc_submit(cohc, cohd); 1865 coh901318_desc_submit(cohc, cohd);
1866 1866
1867 /* Program the transaction head */ 1867 /* Program the transaction head */
1868 coh901318_set_conf(cohc, cohd->head_config); 1868 coh901318_set_conf(cohc, cohd->head_config);
1869 coh901318_set_ctrl(cohc, cohd->head_ctrl); 1869 coh901318_set_ctrl(cohc, cohd->head_ctrl);
1870 coh901318_prep_linked_list(cohc, cohd->lli); 1870 coh901318_prep_linked_list(cohc, cohd->lli);
1871 1871
1872 /* start dma job on this channel */ 1872 /* start dma job on this channel */
1873 coh901318_start(cohc); 1873 coh901318_start(cohc);
1874 1874
1875 } 1875 }
1876 1876
1877 return cohd; 1877 return cohd;
1878 } 1878 }
1879 1879
1880 /* 1880 /*
1881 * This tasklet is called from the interrupt handler to 1881 * This tasklet is called from the interrupt handler to
1882 * handle each descriptor (DMA job) that is sent to a channel. 1882 * handle each descriptor (DMA job) that is sent to a channel.
1883 */ 1883 */
1884 static void dma_tasklet(unsigned long data) 1884 static void dma_tasklet(unsigned long data)
1885 { 1885 {
1886 struct coh901318_chan *cohc = (struct coh901318_chan *) data; 1886 struct coh901318_chan *cohc = (struct coh901318_chan *) data;
1887 struct coh901318_desc *cohd_fin; 1887 struct coh901318_desc *cohd_fin;
1888 unsigned long flags; 1888 unsigned long flags;
1889 dma_async_tx_callback callback; 1889 dma_async_tx_callback callback;
1890 void *callback_param; 1890 void *callback_param;
1891 1891
1892 dev_vdbg(COHC_2_DEV(cohc), "[%s] chan_id %d" 1892 dev_vdbg(COHC_2_DEV(cohc), "[%s] chan_id %d"
1893 " nbr_active_done %ld\n", __func__, 1893 " nbr_active_done %ld\n", __func__,
1894 cohc->id, cohc->nbr_active_done); 1894 cohc->id, cohc->nbr_active_done);
1895 1895
1896 spin_lock_irqsave(&cohc->lock, flags); 1896 spin_lock_irqsave(&cohc->lock, flags);
1897 1897
1898 /* get first active descriptor entry from list */ 1898 /* get first active descriptor entry from list */
1899 cohd_fin = coh901318_first_active_get(cohc); 1899 cohd_fin = coh901318_first_active_get(cohc);
1900 1900
1901 if (cohd_fin == NULL) 1901 if (cohd_fin == NULL)
1902 goto err; 1902 goto err;
1903 1903
1904 /* locate callback to client */ 1904 /* locate callback to client */
1905 callback = cohd_fin->desc.callback; 1905 callback = cohd_fin->desc.callback;
1906 callback_param = cohd_fin->desc.callback_param; 1906 callback_param = cohd_fin->desc.callback_param;
1907 1907
1908 /* sign this job as completed on the channel */ 1908 /* sign this job as completed on the channel */
1909 dma_cookie_complete(&cohd_fin->desc); 1909 dma_cookie_complete(&cohd_fin->desc);
1910 1910
1911 /* release the lli allocation and remove the descriptor */ 1911 /* release the lli allocation and remove the descriptor */
1912 coh901318_lli_free(&cohc->base->pool, &cohd_fin->lli); 1912 coh901318_lli_free(&cohc->base->pool, &cohd_fin->lli);
1913 1913
1914 /* return desc to free-list */ 1914 /* return desc to free-list */
1915 coh901318_desc_remove(cohd_fin); 1915 coh901318_desc_remove(cohd_fin);
1916 coh901318_desc_free(cohc, cohd_fin); 1916 coh901318_desc_free(cohc, cohd_fin);
1917 1917
1918 spin_unlock_irqrestore(&cohc->lock, flags); 1918 spin_unlock_irqrestore(&cohc->lock, flags);
1919 1919
1920 /* Call the callback when we're done */ 1920 /* Call the callback when we're done */
1921 if (callback) 1921 if (callback)
1922 callback(callback_param); 1922 callback(callback_param);
1923 1923
1924 spin_lock_irqsave(&cohc->lock, flags); 1924 spin_lock_irqsave(&cohc->lock, flags);
1925 1925
1926 /* 1926 /*
1927 * If another interrupt fired while the tasklet was scheduling, 1927 * If another interrupt fired while the tasklet was scheduling,
1928 * we don't get called twice, so we have this number of active 1928 * we don't get called twice, so we have this number of active
1929 * counter that keep track of the number of IRQs expected to 1929 * counter that keep track of the number of IRQs expected to
1930 * be handled for this channel. If there happen to be more than 1930 * be handled for this channel. If there happen to be more than
1931 * one IRQ to be ack:ed, we simply schedule this tasklet again. 1931 * one IRQ to be ack:ed, we simply schedule this tasklet again.
1932 */ 1932 */
1933 cohc->nbr_active_done--; 1933 cohc->nbr_active_done--;
1934 if (cohc->nbr_active_done) { 1934 if (cohc->nbr_active_done) {
1935 dev_dbg(COHC_2_DEV(cohc), "scheduling tasklet again, new IRQs " 1935 dev_dbg(COHC_2_DEV(cohc), "scheduling tasklet again, new IRQs "
1936 "came in while we were scheduling this tasklet\n"); 1936 "came in while we were scheduling this tasklet\n");
1937 if (cohc_chan_conf(cohc)->priority_high) 1937 if (cohc_chan_conf(cohc)->priority_high)
1938 tasklet_hi_schedule(&cohc->tasklet); 1938 tasklet_hi_schedule(&cohc->tasklet);
1939 else 1939 else
1940 tasklet_schedule(&cohc->tasklet); 1940 tasklet_schedule(&cohc->tasklet);
1941 } 1941 }
1942 1942
1943 spin_unlock_irqrestore(&cohc->lock, flags); 1943 spin_unlock_irqrestore(&cohc->lock, flags);
1944 1944
1945 return; 1945 return;
1946 1946
1947 err: 1947 err:
1948 spin_unlock_irqrestore(&cohc->lock, flags); 1948 spin_unlock_irqrestore(&cohc->lock, flags);
1949 dev_err(COHC_2_DEV(cohc), "[%s] No active dma desc\n", __func__); 1949 dev_err(COHC_2_DEV(cohc), "[%s] No active dma desc\n", __func__);
1950 } 1950 }
1951 1951
1952 1952
1953 /* called from interrupt context */ 1953 /* called from interrupt context */
1954 static void dma_tc_handle(struct coh901318_chan *cohc) 1954 static void dma_tc_handle(struct coh901318_chan *cohc)
1955 { 1955 {
1956 /* 1956 /*
1957 * If the channel is not allocated, then we shouldn't have 1957 * If the channel is not allocated, then we shouldn't have
1958 * any TC interrupts on it. 1958 * any TC interrupts on it.
1959 */ 1959 */
1960 if (!cohc->allocated) { 1960 if (!cohc->allocated) {
1961 dev_err(COHC_2_DEV(cohc), "spurious interrupt from " 1961 dev_err(COHC_2_DEV(cohc), "spurious interrupt from "
1962 "unallocated channel\n"); 1962 "unallocated channel\n");
1963 return; 1963 return;
1964 } 1964 }
1965 1965
1966 spin_lock(&cohc->lock); 1966 spin_lock(&cohc->lock);
1967 1967
1968 /* 1968 /*
1969 * When we reach this point, at least one queue item 1969 * When we reach this point, at least one queue item
1970 * should have been moved over from cohc->queue to 1970 * should have been moved over from cohc->queue to
1971 * cohc->active and run to completion, that is why we're 1971 * cohc->active and run to completion, that is why we're
1972 * getting a terminal count interrupt is it not? 1972 * getting a terminal count interrupt is it not?
1973 * If you get this BUG() the most probable cause is that 1973 * If you get this BUG() the most probable cause is that
1974 * the individual nodes in the lli chain have IRQ enabled, 1974 * the individual nodes in the lli chain have IRQ enabled,
1975 * so check your platform config for lli chain ctrl. 1975 * so check your platform config for lli chain ctrl.
1976 */ 1976 */
1977 BUG_ON(list_empty(&cohc->active)); 1977 BUG_ON(list_empty(&cohc->active));
1978 1978
1979 cohc->nbr_active_done++; 1979 cohc->nbr_active_done++;
1980 1980
1981 /* 1981 /*
1982 * This attempt to take a job from cohc->queue, put it 1982 * This attempt to take a job from cohc->queue, put it
1983 * into cohc->active and start it. 1983 * into cohc->active and start it.
1984 */ 1984 */
1985 if (coh901318_queue_start(cohc) == NULL) 1985 if (coh901318_queue_start(cohc) == NULL)
1986 cohc->busy = 0; 1986 cohc->busy = 0;
1987 1987
1988 spin_unlock(&cohc->lock); 1988 spin_unlock(&cohc->lock);
1989 1989
1990 /* 1990 /*
1991 * This tasklet will remove items from cohc->active 1991 * This tasklet will remove items from cohc->active
1992 * and thus terminates them. 1992 * and thus terminates them.
1993 */ 1993 */
1994 if (cohc_chan_conf(cohc)->priority_high) 1994 if (cohc_chan_conf(cohc)->priority_high)
1995 tasklet_hi_schedule(&cohc->tasklet); 1995 tasklet_hi_schedule(&cohc->tasklet);
1996 else 1996 else
1997 tasklet_schedule(&cohc->tasklet); 1997 tasklet_schedule(&cohc->tasklet);
1998 } 1998 }
1999 1999
2000 2000
2001 static irqreturn_t dma_irq_handler(int irq, void *dev_id) 2001 static irqreturn_t dma_irq_handler(int irq, void *dev_id)
2002 { 2002 {
2003 u32 status1; 2003 u32 status1;
2004 u32 status2; 2004 u32 status2;
2005 int i; 2005 int i;
2006 int ch; 2006 int ch;
2007 struct coh901318_base *base = dev_id; 2007 struct coh901318_base *base = dev_id;
2008 struct coh901318_chan *cohc; 2008 struct coh901318_chan *cohc;
2009 void __iomem *virtbase = base->virtbase; 2009 void __iomem *virtbase = base->virtbase;
2010 2010
2011 status1 = readl(virtbase + COH901318_INT_STATUS1); 2011 status1 = readl(virtbase + COH901318_INT_STATUS1);
2012 status2 = readl(virtbase + COH901318_INT_STATUS2); 2012 status2 = readl(virtbase + COH901318_INT_STATUS2);
2013 2013
2014 if (unlikely(status1 == 0 && status2 == 0)) { 2014 if (unlikely(status1 == 0 && status2 == 0)) {
2015 dev_warn(base->dev, "spurious DMA IRQ from no channel!\n"); 2015 dev_warn(base->dev, "spurious DMA IRQ from no channel!\n");
2016 return IRQ_HANDLED; 2016 return IRQ_HANDLED;
2017 } 2017 }
2018 2018
2019 /* TODO: consider handle IRQ in tasklet here to 2019 /* TODO: consider handle IRQ in tasklet here to
2020 * minimize interrupt latency */ 2020 * minimize interrupt latency */
2021 2021
2022 /* Check the first 32 DMA channels for IRQ */ 2022 /* Check the first 32 DMA channels for IRQ */
2023 while (status1) { 2023 while (status1) {
2024 /* Find first bit set, return as a number. */ 2024 /* Find first bit set, return as a number. */
2025 i = ffs(status1) - 1; 2025 i = ffs(status1) - 1;
2026 ch = i; 2026 ch = i;
2027 2027
2028 cohc = &base->chans[ch]; 2028 cohc = &base->chans[ch];
2029 spin_lock(&cohc->lock); 2029 spin_lock(&cohc->lock);
2030 2030
2031 /* Mask off this bit */ 2031 /* Mask off this bit */
2032 status1 &= ~(1 << i); 2032 status1 &= ~(1 << i);
2033 /* Check the individual channel bits */ 2033 /* Check the individual channel bits */
2034 if (test_bit(i, virtbase + COH901318_BE_INT_STATUS1)) { 2034 if (test_bit(i, virtbase + COH901318_BE_INT_STATUS1)) {
2035 dev_crit(COHC_2_DEV(cohc), 2035 dev_crit(COHC_2_DEV(cohc),
2036 "DMA bus error on channel %d!\n", ch); 2036 "DMA bus error on channel %d!\n", ch);
2037 BUG_ON(1); 2037 BUG_ON(1);
2038 /* Clear BE interrupt */ 2038 /* Clear BE interrupt */
2039 __set_bit(i, virtbase + COH901318_BE_INT_CLEAR1); 2039 __set_bit(i, virtbase + COH901318_BE_INT_CLEAR1);
2040 } else { 2040 } else {
2041 /* Caused by TC, really? */ 2041 /* Caused by TC, really? */
2042 if (unlikely(!test_bit(i, virtbase + 2042 if (unlikely(!test_bit(i, virtbase +
2043 COH901318_TC_INT_STATUS1))) { 2043 COH901318_TC_INT_STATUS1))) {
2044 dev_warn(COHC_2_DEV(cohc), 2044 dev_warn(COHC_2_DEV(cohc),
2045 "ignoring interrupt not caused by terminal count on channel %d\n", ch); 2045 "ignoring interrupt not caused by terminal count on channel %d\n", ch);
2046 /* Clear TC interrupt */ 2046 /* Clear TC interrupt */
2047 BUG_ON(1); 2047 BUG_ON(1);
2048 __set_bit(i, virtbase + COH901318_TC_INT_CLEAR1); 2048 __set_bit(i, virtbase + COH901318_TC_INT_CLEAR1);
2049 } else { 2049 } else {
2050 /* Enable powersave if transfer has finished */ 2050 /* Enable powersave if transfer has finished */
2051 if (!(readl(virtbase + COH901318_CX_STAT + 2051 if (!(readl(virtbase + COH901318_CX_STAT +
2052 COH901318_CX_STAT_SPACING*ch) & 2052 COH901318_CX_STAT_SPACING*ch) &
2053 COH901318_CX_STAT_ENABLED)) { 2053 COH901318_CX_STAT_ENABLED)) {
2054 enable_powersave(cohc); 2054 enable_powersave(cohc);
2055 } 2055 }
2056 2056
2057 /* Must clear TC interrupt before calling 2057 /* Must clear TC interrupt before calling
2058 * dma_tc_handle 2058 * dma_tc_handle
2059 * in case tc_handle initiate a new dma job 2059 * in case tc_handle initiate a new dma job
2060 */ 2060 */
2061 __set_bit(i, virtbase + COH901318_TC_INT_CLEAR1); 2061 __set_bit(i, virtbase + COH901318_TC_INT_CLEAR1);
2062 2062
2063 dma_tc_handle(cohc); 2063 dma_tc_handle(cohc);
2064 } 2064 }
2065 } 2065 }
2066 spin_unlock(&cohc->lock); 2066 spin_unlock(&cohc->lock);
2067 } 2067 }
2068 2068
2069 /* Check the remaining 32 DMA channels for IRQ */ 2069 /* Check the remaining 32 DMA channels for IRQ */
2070 while (status2) { 2070 while (status2) {
2071 /* Find first bit set, return as a number. */ 2071 /* Find first bit set, return as a number. */
2072 i = ffs(status2) - 1; 2072 i = ffs(status2) - 1;
2073 ch = i + 32; 2073 ch = i + 32;
2074 cohc = &base->chans[ch]; 2074 cohc = &base->chans[ch];
2075 spin_lock(&cohc->lock); 2075 spin_lock(&cohc->lock);
2076 2076
2077 /* Mask off this bit */ 2077 /* Mask off this bit */
2078 status2 &= ~(1 << i); 2078 status2 &= ~(1 << i);
2079 /* Check the individual channel bits */ 2079 /* Check the individual channel bits */
2080 if (test_bit(i, virtbase + COH901318_BE_INT_STATUS2)) { 2080 if (test_bit(i, virtbase + COH901318_BE_INT_STATUS2)) {
2081 dev_crit(COHC_2_DEV(cohc), 2081 dev_crit(COHC_2_DEV(cohc),
2082 "DMA bus error on channel %d!\n", ch); 2082 "DMA bus error on channel %d!\n", ch);
2083 /* Clear BE interrupt */ 2083 /* Clear BE interrupt */
2084 BUG_ON(1); 2084 BUG_ON(1);
2085 __set_bit(i, virtbase + COH901318_BE_INT_CLEAR2); 2085 __set_bit(i, virtbase + COH901318_BE_INT_CLEAR2);
2086 } else { 2086 } else {
2087 /* Caused by TC, really? */ 2087 /* Caused by TC, really? */
2088 if (unlikely(!test_bit(i, virtbase + 2088 if (unlikely(!test_bit(i, virtbase +
2089 COH901318_TC_INT_STATUS2))) { 2089 COH901318_TC_INT_STATUS2))) {
2090 dev_warn(COHC_2_DEV(cohc), 2090 dev_warn(COHC_2_DEV(cohc),
2091 "ignoring interrupt not caused by terminal count on channel %d\n", ch); 2091 "ignoring interrupt not caused by terminal count on channel %d\n", ch);
2092 /* Clear TC interrupt */ 2092 /* Clear TC interrupt */
2093 __set_bit(i, virtbase + COH901318_TC_INT_CLEAR2); 2093 __set_bit(i, virtbase + COH901318_TC_INT_CLEAR2);
2094 BUG_ON(1); 2094 BUG_ON(1);
2095 } else { 2095 } else {
2096 /* Enable powersave if transfer has finished */ 2096 /* Enable powersave if transfer has finished */
2097 if (!(readl(virtbase + COH901318_CX_STAT + 2097 if (!(readl(virtbase + COH901318_CX_STAT +
2098 COH901318_CX_STAT_SPACING*ch) & 2098 COH901318_CX_STAT_SPACING*ch) &
2099 COH901318_CX_STAT_ENABLED)) { 2099 COH901318_CX_STAT_ENABLED)) {
2100 enable_powersave(cohc); 2100 enable_powersave(cohc);
2101 } 2101 }
2102 /* Must clear TC interrupt before calling 2102 /* Must clear TC interrupt before calling
2103 * dma_tc_handle 2103 * dma_tc_handle
2104 * in case tc_handle initiate a new dma job 2104 * in case tc_handle initiate a new dma job
2105 */ 2105 */
2106 __set_bit(i, virtbase + COH901318_TC_INT_CLEAR2); 2106 __set_bit(i, virtbase + COH901318_TC_INT_CLEAR2);
2107 2107
2108 dma_tc_handle(cohc); 2108 dma_tc_handle(cohc);
2109 } 2109 }
2110 } 2110 }
2111 spin_unlock(&cohc->lock); 2111 spin_unlock(&cohc->lock);
2112 } 2112 }
2113 2113
2114 return IRQ_HANDLED; 2114 return IRQ_HANDLED;
2115 } 2115 }
2116 2116
2117 static int coh901318_alloc_chan_resources(struct dma_chan *chan) 2117 static int coh901318_alloc_chan_resources(struct dma_chan *chan)
2118 { 2118 {
2119 struct coh901318_chan *cohc = to_coh901318_chan(chan); 2119 struct coh901318_chan *cohc = to_coh901318_chan(chan);
2120 unsigned long flags; 2120 unsigned long flags;
2121 2121
2122 dev_vdbg(COHC_2_DEV(cohc), "[%s] DMA channel %d\n", 2122 dev_vdbg(COHC_2_DEV(cohc), "[%s] DMA channel %d\n",
2123 __func__, cohc->id); 2123 __func__, cohc->id);
2124 2124
2125 if (chan->client_count > 1) 2125 if (chan->client_count > 1)
2126 return -EBUSY; 2126 return -EBUSY;
2127 2127
2128 spin_lock_irqsave(&cohc->lock, flags); 2128 spin_lock_irqsave(&cohc->lock, flags);
2129 2129
2130 coh901318_config(cohc, NULL); 2130 coh901318_config(cohc, NULL);
2131 2131
2132 cohc->allocated = 1; 2132 cohc->allocated = 1;
2133 dma_cookie_init(chan); 2133 dma_cookie_init(chan);
2134 2134
2135 spin_unlock_irqrestore(&cohc->lock, flags); 2135 spin_unlock_irqrestore(&cohc->lock, flags);
2136 2136
2137 return 1; 2137 return 1;
2138 } 2138 }
2139 2139
2140 static void 2140 static void
2141 coh901318_free_chan_resources(struct dma_chan *chan) 2141 coh901318_free_chan_resources(struct dma_chan *chan)
2142 { 2142 {
2143 struct coh901318_chan *cohc = to_coh901318_chan(chan); 2143 struct coh901318_chan *cohc = to_coh901318_chan(chan);
2144 int channel = cohc->id; 2144 int channel = cohc->id;
2145 unsigned long flags; 2145 unsigned long flags;
2146 2146
2147 spin_lock_irqsave(&cohc->lock, flags); 2147 spin_lock_irqsave(&cohc->lock, flags);
2148 2148
2149 /* Disable HW */ 2149 /* Disable HW */
2150 writel(0x00000000U, cohc->base->virtbase + COH901318_CX_CFG + 2150 writel(0x00000000U, cohc->base->virtbase + COH901318_CX_CFG +
2151 COH901318_CX_CFG_SPACING*channel); 2151 COH901318_CX_CFG_SPACING*channel);
2152 writel(0x00000000U, cohc->base->virtbase + COH901318_CX_CTRL + 2152 writel(0x00000000U, cohc->base->virtbase + COH901318_CX_CTRL +
2153 COH901318_CX_CTRL_SPACING*channel); 2153 COH901318_CX_CTRL_SPACING*channel);
2154 2154
2155 cohc->allocated = 0; 2155 cohc->allocated = 0;
2156 2156
2157 spin_unlock_irqrestore(&cohc->lock, flags); 2157 spin_unlock_irqrestore(&cohc->lock, flags);
2158 2158
2159 chan->device->device_control(chan, DMA_TERMINATE_ALL, 0); 2159 chan->device->device_control(chan, DMA_TERMINATE_ALL, 0);
2160 } 2160 }
2161 2161
2162 2162
2163 static dma_cookie_t 2163 static dma_cookie_t
2164 coh901318_tx_submit(struct dma_async_tx_descriptor *tx) 2164 coh901318_tx_submit(struct dma_async_tx_descriptor *tx)
2165 { 2165 {
2166 struct coh901318_desc *cohd = container_of(tx, struct coh901318_desc, 2166 struct coh901318_desc *cohd = container_of(tx, struct coh901318_desc,
2167 desc); 2167 desc);
2168 struct coh901318_chan *cohc = to_coh901318_chan(tx->chan); 2168 struct coh901318_chan *cohc = to_coh901318_chan(tx->chan);
2169 unsigned long flags; 2169 unsigned long flags;
2170 dma_cookie_t cookie; 2170 dma_cookie_t cookie;
2171 2171
2172 spin_lock_irqsave(&cohc->lock, flags); 2172 spin_lock_irqsave(&cohc->lock, flags);
2173 cookie = dma_cookie_assign(tx); 2173 cookie = dma_cookie_assign(tx);
2174 2174
2175 coh901318_desc_queue(cohc, cohd); 2175 coh901318_desc_queue(cohc, cohd);
2176 2176
2177 spin_unlock_irqrestore(&cohc->lock, flags); 2177 spin_unlock_irqrestore(&cohc->lock, flags);
2178 2178
2179 return cookie; 2179 return cookie;
2180 } 2180 }
2181 2181
2182 static struct dma_async_tx_descriptor * 2182 static struct dma_async_tx_descriptor *
2183 coh901318_prep_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, 2183 coh901318_prep_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
2184 size_t size, unsigned long flags) 2184 size_t size, unsigned long flags)
2185 { 2185 {
2186 struct coh901318_lli *lli; 2186 struct coh901318_lli *lli;
2187 struct coh901318_desc *cohd; 2187 struct coh901318_desc *cohd;
2188 unsigned long flg; 2188 unsigned long flg;
2189 struct coh901318_chan *cohc = to_coh901318_chan(chan); 2189 struct coh901318_chan *cohc = to_coh901318_chan(chan);
2190 int lli_len; 2190 int lli_len;
2191 u32 ctrl_last = cohc_chan_param(cohc)->ctrl_lli_last; 2191 u32 ctrl_last = cohc_chan_param(cohc)->ctrl_lli_last;
2192 int ret; 2192 int ret;
2193 2193
2194 spin_lock_irqsave(&cohc->lock, flg); 2194 spin_lock_irqsave(&cohc->lock, flg);
2195 2195
2196 dev_vdbg(COHC_2_DEV(cohc), 2196 dev_vdbg(COHC_2_DEV(cohc),
2197 "[%s] channel %d src 0x%x dest 0x%x size %d\n", 2197 "[%s] channel %d src 0x%x dest 0x%x size %d\n",
2198 __func__, cohc->id, src, dest, size); 2198 __func__, cohc->id, src, dest, size);
2199 2199
2200 if (flags & DMA_PREP_INTERRUPT) 2200 if (flags & DMA_PREP_INTERRUPT)
2201 /* Trigger interrupt after last lli */ 2201 /* Trigger interrupt after last lli */
2202 ctrl_last |= COH901318_CX_CTRL_TC_IRQ_ENABLE; 2202 ctrl_last |= COH901318_CX_CTRL_TC_IRQ_ENABLE;
2203 2203
2204 lli_len = size >> MAX_DMA_PACKET_SIZE_SHIFT; 2204 lli_len = size >> MAX_DMA_PACKET_SIZE_SHIFT;
2205 if ((lli_len << MAX_DMA_PACKET_SIZE_SHIFT) < size) 2205 if ((lli_len << MAX_DMA_PACKET_SIZE_SHIFT) < size)
2206 lli_len++; 2206 lli_len++;
2207 2207
2208 lli = coh901318_lli_alloc(&cohc->base->pool, lli_len); 2208 lli = coh901318_lli_alloc(&cohc->base->pool, lli_len);
2209 2209
2210 if (lli == NULL) 2210 if (lli == NULL)
2211 goto err; 2211 goto err;
2212 2212
2213 ret = coh901318_lli_fill_memcpy( 2213 ret = coh901318_lli_fill_memcpy(
2214 &cohc->base->pool, lli, src, size, dest, 2214 &cohc->base->pool, lli, src, size, dest,
2215 cohc_chan_param(cohc)->ctrl_lli_chained, 2215 cohc_chan_param(cohc)->ctrl_lli_chained,
2216 ctrl_last); 2216 ctrl_last);
2217 if (ret) 2217 if (ret)
2218 goto err; 2218 goto err;
2219 2219
2220 COH_DBG(coh901318_list_print(cohc, lli)); 2220 COH_DBG(coh901318_list_print(cohc, lli));
2221 2221
2222 /* Pick a descriptor to handle this transfer */ 2222 /* Pick a descriptor to handle this transfer */
2223 cohd = coh901318_desc_get(cohc); 2223 cohd = coh901318_desc_get(cohc);
2224 cohd->lli = lli; 2224 cohd->lli = lli;
2225 cohd->flags = flags; 2225 cohd->flags = flags;
2226 cohd->desc.tx_submit = coh901318_tx_submit; 2226 cohd->desc.tx_submit = coh901318_tx_submit;
2227 2227
2228 spin_unlock_irqrestore(&cohc->lock, flg); 2228 spin_unlock_irqrestore(&cohc->lock, flg);
2229 2229
2230 return &cohd->desc; 2230 return &cohd->desc;
2231 err: 2231 err:
2232 spin_unlock_irqrestore(&cohc->lock, flg); 2232 spin_unlock_irqrestore(&cohc->lock, flg);
2233 return NULL; 2233 return NULL;
2234 } 2234 }
2235 2235
2236 static struct dma_async_tx_descriptor * 2236 static struct dma_async_tx_descriptor *
2237 coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, 2237 coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
2238 unsigned int sg_len, enum dma_transfer_direction direction, 2238 unsigned int sg_len, enum dma_transfer_direction direction,
2239 unsigned long flags, void *context) 2239 unsigned long flags, void *context)
2240 { 2240 {
2241 struct coh901318_chan *cohc = to_coh901318_chan(chan); 2241 struct coh901318_chan *cohc = to_coh901318_chan(chan);
2242 struct coh901318_lli *lli; 2242 struct coh901318_lli *lli;
2243 struct coh901318_desc *cohd; 2243 struct coh901318_desc *cohd;
2244 const struct coh901318_params *params; 2244 const struct coh901318_params *params;
2245 struct scatterlist *sg; 2245 struct scatterlist *sg;
2246 int len = 0; 2246 int len = 0;
2247 int size; 2247 int size;
2248 int i; 2248 int i;
2249 u32 ctrl_chained = cohc_chan_param(cohc)->ctrl_lli_chained; 2249 u32 ctrl_chained = cohc_chan_param(cohc)->ctrl_lli_chained;
2250 u32 ctrl = cohc_chan_param(cohc)->ctrl_lli; 2250 u32 ctrl = cohc_chan_param(cohc)->ctrl_lli;
2251 u32 ctrl_last = cohc_chan_param(cohc)->ctrl_lli_last; 2251 u32 ctrl_last = cohc_chan_param(cohc)->ctrl_lli_last;
2252 u32 config; 2252 u32 config;
2253 unsigned long flg; 2253 unsigned long flg;
2254 int ret; 2254 int ret;
2255 2255
2256 if (!sgl) 2256 if (!sgl)
2257 goto out; 2257 goto out;
2258 if (sg_dma_len(sgl) == 0) 2258 if (sg_dma_len(sgl) == 0)
2259 goto out; 2259 goto out;
2260 2260
2261 spin_lock_irqsave(&cohc->lock, flg); 2261 spin_lock_irqsave(&cohc->lock, flg);
2262 2262
2263 dev_vdbg(COHC_2_DEV(cohc), "[%s] sg_len %d dir %d\n", 2263 dev_vdbg(COHC_2_DEV(cohc), "[%s] sg_len %d dir %d\n",
2264 __func__, sg_len, direction); 2264 __func__, sg_len, direction);
2265 2265
2266 if (flags & DMA_PREP_INTERRUPT) 2266 if (flags & DMA_PREP_INTERRUPT)
2267 /* Trigger interrupt after last lli */ 2267 /* Trigger interrupt after last lli */
2268 ctrl_last |= COH901318_CX_CTRL_TC_IRQ_ENABLE; 2268 ctrl_last |= COH901318_CX_CTRL_TC_IRQ_ENABLE;
2269 2269
2270 params = cohc_chan_param(cohc); 2270 params = cohc_chan_param(cohc);
2271 config = params->config; 2271 config = params->config;
2272 /* 2272 /*
2273 * Add runtime-specific control on top, make 2273 * Add runtime-specific control on top, make
2274 * sure the bits you set per peripheral channel are 2274 * sure the bits you set per peripheral channel are
2275 * cleared in the default config from the platform. 2275 * cleared in the default config from the platform.
2276 */ 2276 */
2277 ctrl_chained |= cohc->ctrl; 2277 ctrl_chained |= cohc->ctrl;
2278 ctrl_last |= cohc->ctrl; 2278 ctrl_last |= cohc->ctrl;
2279 ctrl |= cohc->ctrl; 2279 ctrl |= cohc->ctrl;
2280 2280
2281 if (direction == DMA_MEM_TO_DEV) { 2281 if (direction == DMA_MEM_TO_DEV) {
2282 u32 tx_flags = COH901318_CX_CTRL_PRDD_SOURCE | 2282 u32 tx_flags = COH901318_CX_CTRL_PRDD_SOURCE |
2283 COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE; 2283 COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE;
2284 2284
2285 config |= COH901318_CX_CFG_RM_MEMORY_TO_PRIMARY; 2285 config |= COH901318_CX_CFG_RM_MEMORY_TO_PRIMARY;
2286 ctrl_chained |= tx_flags; 2286 ctrl_chained |= tx_flags;
2287 ctrl_last |= tx_flags; 2287 ctrl_last |= tx_flags;
2288 ctrl |= tx_flags; 2288 ctrl |= tx_flags;
2289 } else if (direction == DMA_DEV_TO_MEM) { 2289 } else if (direction == DMA_DEV_TO_MEM) {
2290 u32 rx_flags = COH901318_CX_CTRL_PRDD_DEST | 2290 u32 rx_flags = COH901318_CX_CTRL_PRDD_DEST |
2291 COH901318_CX_CTRL_DST_ADDR_INC_ENABLE; 2291 COH901318_CX_CTRL_DST_ADDR_INC_ENABLE;
2292 2292
2293 config |= COH901318_CX_CFG_RM_PRIMARY_TO_MEMORY; 2293 config |= COH901318_CX_CFG_RM_PRIMARY_TO_MEMORY;
2294 ctrl_chained |= rx_flags; 2294 ctrl_chained |= rx_flags;
2295 ctrl_last |= rx_flags; 2295 ctrl_last |= rx_flags;
2296 ctrl |= rx_flags; 2296 ctrl |= rx_flags;
2297 } else 2297 } else
2298 goto err_direction; 2298 goto err_direction;
2299 2299
2300 /* The dma only supports transmitting packages up to 2300 /* The dma only supports transmitting packages up to
2301 * MAX_DMA_PACKET_SIZE. Calculate to total number of 2301 * MAX_DMA_PACKET_SIZE. Calculate to total number of
2302 * dma elemts required to send the entire sg list 2302 * dma elemts required to send the entire sg list
2303 */ 2303 */
2304 for_each_sg(sgl, sg, sg_len, i) { 2304 for_each_sg(sgl, sg, sg_len, i) {
2305 unsigned int factor; 2305 unsigned int factor;
2306 size = sg_dma_len(sg); 2306 size = sg_dma_len(sg);
2307 2307
2308 if (size <= MAX_DMA_PACKET_SIZE) { 2308 if (size <= MAX_DMA_PACKET_SIZE) {
2309 len++; 2309 len++;
2310 continue; 2310 continue;
2311 } 2311 }
2312 2312
2313 factor = size >> MAX_DMA_PACKET_SIZE_SHIFT; 2313 factor = size >> MAX_DMA_PACKET_SIZE_SHIFT;
2314 if ((factor << MAX_DMA_PACKET_SIZE_SHIFT) < size) 2314 if ((factor << MAX_DMA_PACKET_SIZE_SHIFT) < size)
2315 factor++; 2315 factor++;
2316 2316
2317 len += factor; 2317 len += factor;
2318 } 2318 }
2319 2319
2320 pr_debug("Allocate %d lli:s for this transfer\n", len); 2320 pr_debug("Allocate %d lli:s for this transfer\n", len);
2321 lli = coh901318_lli_alloc(&cohc->base->pool, len); 2321 lli = coh901318_lli_alloc(&cohc->base->pool, len);
2322 2322
2323 if (lli == NULL) 2323 if (lli == NULL)
2324 goto err_dma_alloc; 2324 goto err_dma_alloc;
2325 2325
2326 /* initiate allocated lli list */ 2326 /* initiate allocated lli list */
2327 ret = coh901318_lli_fill_sg(&cohc->base->pool, lli, sgl, sg_len, 2327 ret = coh901318_lli_fill_sg(&cohc->base->pool, lli, sgl, sg_len,
2328 cohc->addr, 2328 cohc->addr,
2329 ctrl_chained, 2329 ctrl_chained,
2330 ctrl, 2330 ctrl,
2331 ctrl_last, 2331 ctrl_last,
2332 direction, COH901318_CX_CTRL_TC_IRQ_ENABLE); 2332 direction, COH901318_CX_CTRL_TC_IRQ_ENABLE);
2333 if (ret) 2333 if (ret)
2334 goto err_lli_fill; 2334 goto err_lli_fill;
2335 2335
2336 2336
2337 COH_DBG(coh901318_list_print(cohc, lli)); 2337 COH_DBG(coh901318_list_print(cohc, lli));
2338 2338
2339 /* Pick a descriptor to handle this transfer */ 2339 /* Pick a descriptor to handle this transfer */
2340 cohd = coh901318_desc_get(cohc); 2340 cohd = coh901318_desc_get(cohc);
2341 cohd->head_config = config; 2341 cohd->head_config = config;
2342 /* 2342 /*
2343 * Set the default head ctrl for the channel to the one from the 2343 * Set the default head ctrl for the channel to the one from the
2344 * lli, things may have changed due to odd buffer alignment 2344 * lli, things may have changed due to odd buffer alignment
2345 * etc. 2345 * etc.
2346 */ 2346 */
2347 cohd->head_ctrl = lli->control; 2347 cohd->head_ctrl = lli->control;
2348 cohd->dir = direction; 2348 cohd->dir = direction;
2349 cohd->flags = flags; 2349 cohd->flags = flags;
2350 cohd->desc.tx_submit = coh901318_tx_submit; 2350 cohd->desc.tx_submit = coh901318_tx_submit;
2351 cohd->lli = lli; 2351 cohd->lli = lli;
2352 2352
2353 spin_unlock_irqrestore(&cohc->lock, flg); 2353 spin_unlock_irqrestore(&cohc->lock, flg);
2354 2354
2355 return &cohd->desc; 2355 return &cohd->desc;
2356 err_lli_fill: 2356 err_lli_fill:
2357 err_dma_alloc: 2357 err_dma_alloc:
2358 err_direction: 2358 err_direction:
2359 spin_unlock_irqrestore(&cohc->lock, flg); 2359 spin_unlock_irqrestore(&cohc->lock, flg);
2360 out: 2360 out:
2361 return NULL; 2361 return NULL;
2362 } 2362 }
2363 2363
2364 static enum dma_status 2364 static enum dma_status
2365 coh901318_tx_status(struct dma_chan *chan, dma_cookie_t cookie, 2365 coh901318_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
2366 struct dma_tx_state *txstate) 2366 struct dma_tx_state *txstate)
2367 { 2367 {
2368 struct coh901318_chan *cohc = to_coh901318_chan(chan); 2368 struct coh901318_chan *cohc = to_coh901318_chan(chan);
2369 enum dma_status ret; 2369 enum dma_status ret;
2370 2370
2371 ret = dma_cookie_status(chan, cookie, txstate); 2371 ret = dma_cookie_status(chan, cookie, txstate);
2372 if (ret == DMA_SUCCESS) 2372 if (ret == DMA_COMPLETE)
2373 return ret; 2373 return ret;
2374 2374
2375 dma_set_residue(txstate, coh901318_get_bytes_left(chan)); 2375 dma_set_residue(txstate, coh901318_get_bytes_left(chan));
2376 2376
2377 if (ret == DMA_IN_PROGRESS && cohc->stopped) 2377 if (ret == DMA_IN_PROGRESS && cohc->stopped)
2378 ret = DMA_PAUSED; 2378 ret = DMA_PAUSED;
2379 2379
2380 return ret; 2380 return ret;
2381 } 2381 }
2382 2382
2383 static void 2383 static void
2384 coh901318_issue_pending(struct dma_chan *chan) 2384 coh901318_issue_pending(struct dma_chan *chan)
2385 { 2385 {
2386 struct coh901318_chan *cohc = to_coh901318_chan(chan); 2386 struct coh901318_chan *cohc = to_coh901318_chan(chan);
2387 unsigned long flags; 2387 unsigned long flags;
2388 2388
2389 spin_lock_irqsave(&cohc->lock, flags); 2389 spin_lock_irqsave(&cohc->lock, flags);
2390 2390
2391 /* 2391 /*
2392 * Busy means that pending jobs are already being processed, 2392 * Busy means that pending jobs are already being processed,
2393 * and then there is no point in starting the queue: the 2393 * and then there is no point in starting the queue: the
2394 * terminal count interrupt on the channel will take the next 2394 * terminal count interrupt on the channel will take the next
2395 * job on the queue and execute it anyway. 2395 * job on the queue and execute it anyway.
2396 */ 2396 */
2397 if (!cohc->busy) 2397 if (!cohc->busy)
2398 coh901318_queue_start(cohc); 2398 coh901318_queue_start(cohc);
2399 2399
2400 spin_unlock_irqrestore(&cohc->lock, flags); 2400 spin_unlock_irqrestore(&cohc->lock, flags);
2401 } 2401 }
2402 2402
2403 /* 2403 /*
2404 * Here we wrap in the runtime dma control interface 2404 * Here we wrap in the runtime dma control interface
2405 */ 2405 */
2406 struct burst_table { 2406 struct burst_table {
2407 int burst_8bit; 2407 int burst_8bit;
2408 int burst_16bit; 2408 int burst_16bit;
2409 int burst_32bit; 2409 int burst_32bit;
2410 u32 reg; 2410 u32 reg;
2411 }; 2411 };
2412 2412
2413 static const struct burst_table burst_sizes[] = { 2413 static const struct burst_table burst_sizes[] = {
2414 { 2414 {
2415 .burst_8bit = 64, 2415 .burst_8bit = 64,
2416 .burst_16bit = 32, 2416 .burst_16bit = 32,
2417 .burst_32bit = 16, 2417 .burst_32bit = 16,
2418 .reg = COH901318_CX_CTRL_BURST_COUNT_64_BYTES, 2418 .reg = COH901318_CX_CTRL_BURST_COUNT_64_BYTES,
2419 }, 2419 },
2420 { 2420 {
2421 .burst_8bit = 48, 2421 .burst_8bit = 48,
2422 .burst_16bit = 24, 2422 .burst_16bit = 24,
2423 .burst_32bit = 12, 2423 .burst_32bit = 12,
2424 .reg = COH901318_CX_CTRL_BURST_COUNT_48_BYTES, 2424 .reg = COH901318_CX_CTRL_BURST_COUNT_48_BYTES,
2425 }, 2425 },
2426 { 2426 {
2427 .burst_8bit = 32, 2427 .burst_8bit = 32,
2428 .burst_16bit = 16, 2428 .burst_16bit = 16,
2429 .burst_32bit = 8, 2429 .burst_32bit = 8,
2430 .reg = COH901318_CX_CTRL_BURST_COUNT_32_BYTES, 2430 .reg = COH901318_CX_CTRL_BURST_COUNT_32_BYTES,
2431 }, 2431 },
2432 { 2432 {
2433 .burst_8bit = 16, 2433 .burst_8bit = 16,
2434 .burst_16bit = 8, 2434 .burst_16bit = 8,
2435 .burst_32bit = 4, 2435 .burst_32bit = 4,
2436 .reg = COH901318_CX_CTRL_BURST_COUNT_16_BYTES, 2436 .reg = COH901318_CX_CTRL_BURST_COUNT_16_BYTES,
2437 }, 2437 },
2438 { 2438 {
2439 .burst_8bit = 8, 2439 .burst_8bit = 8,
2440 .burst_16bit = 4, 2440 .burst_16bit = 4,
2441 .burst_32bit = 2, 2441 .burst_32bit = 2,
2442 .reg = COH901318_CX_CTRL_BURST_COUNT_8_BYTES, 2442 .reg = COH901318_CX_CTRL_BURST_COUNT_8_BYTES,
2443 }, 2443 },
2444 { 2444 {
2445 .burst_8bit = 4, 2445 .burst_8bit = 4,
2446 .burst_16bit = 2, 2446 .burst_16bit = 2,
2447 .burst_32bit = 1, 2447 .burst_32bit = 1,
2448 .reg = COH901318_CX_CTRL_BURST_COUNT_4_BYTES, 2448 .reg = COH901318_CX_CTRL_BURST_COUNT_4_BYTES,
2449 }, 2449 },
2450 { 2450 {
2451 .burst_8bit = 2, 2451 .burst_8bit = 2,
2452 .burst_16bit = 1, 2452 .burst_16bit = 1,
2453 .burst_32bit = 0, 2453 .burst_32bit = 0,
2454 .reg = COH901318_CX_CTRL_BURST_COUNT_2_BYTES, 2454 .reg = COH901318_CX_CTRL_BURST_COUNT_2_BYTES,
2455 }, 2455 },
2456 { 2456 {
2457 .burst_8bit = 1, 2457 .burst_8bit = 1,
2458 .burst_16bit = 0, 2458 .burst_16bit = 0,
2459 .burst_32bit = 0, 2459 .burst_32bit = 0,
2460 .reg = COH901318_CX_CTRL_BURST_COUNT_1_BYTE, 2460 .reg = COH901318_CX_CTRL_BURST_COUNT_1_BYTE,
2461 }, 2461 },
2462 }; 2462 };
2463 2463
2464 static void coh901318_dma_set_runtimeconfig(struct dma_chan *chan, 2464 static void coh901318_dma_set_runtimeconfig(struct dma_chan *chan,
2465 struct dma_slave_config *config) 2465 struct dma_slave_config *config)
2466 { 2466 {
2467 struct coh901318_chan *cohc = to_coh901318_chan(chan); 2467 struct coh901318_chan *cohc = to_coh901318_chan(chan);
2468 dma_addr_t addr; 2468 dma_addr_t addr;
2469 enum dma_slave_buswidth addr_width; 2469 enum dma_slave_buswidth addr_width;
2470 u32 maxburst; 2470 u32 maxburst;
2471 u32 ctrl = 0; 2471 u32 ctrl = 0;
2472 int i = 0; 2472 int i = 0;
2473 2473
2474 /* We only support mem to per or per to mem transfers */ 2474 /* We only support mem to per or per to mem transfers */
2475 if (config->direction == DMA_DEV_TO_MEM) { 2475 if (config->direction == DMA_DEV_TO_MEM) {
2476 addr = config->src_addr; 2476 addr = config->src_addr;
2477 addr_width = config->src_addr_width; 2477 addr_width = config->src_addr_width;
2478 maxburst = config->src_maxburst; 2478 maxburst = config->src_maxburst;
2479 } else if (config->direction == DMA_MEM_TO_DEV) { 2479 } else if (config->direction == DMA_MEM_TO_DEV) {
2480 addr = config->dst_addr; 2480 addr = config->dst_addr;
2481 addr_width = config->dst_addr_width; 2481 addr_width = config->dst_addr_width;
2482 maxburst = config->dst_maxburst; 2482 maxburst = config->dst_maxburst;
2483 } else { 2483 } else {
2484 dev_err(COHC_2_DEV(cohc), "illegal channel mode\n"); 2484 dev_err(COHC_2_DEV(cohc), "illegal channel mode\n");
2485 return; 2485 return;
2486 } 2486 }
2487 2487
2488 dev_dbg(COHC_2_DEV(cohc), "configure channel for %d byte transfers\n", 2488 dev_dbg(COHC_2_DEV(cohc), "configure channel for %d byte transfers\n",
2489 addr_width); 2489 addr_width);
2490 switch (addr_width) { 2490 switch (addr_width) {
2491 case DMA_SLAVE_BUSWIDTH_1_BYTE: 2491 case DMA_SLAVE_BUSWIDTH_1_BYTE:
2492 ctrl |= 2492 ctrl |=
2493 COH901318_CX_CTRL_SRC_BUS_SIZE_8_BITS | 2493 COH901318_CX_CTRL_SRC_BUS_SIZE_8_BITS |
2494 COH901318_CX_CTRL_DST_BUS_SIZE_8_BITS; 2494 COH901318_CX_CTRL_DST_BUS_SIZE_8_BITS;
2495 2495
2496 while (i < ARRAY_SIZE(burst_sizes)) { 2496 while (i < ARRAY_SIZE(burst_sizes)) {
2497 if (burst_sizes[i].burst_8bit <= maxburst) 2497 if (burst_sizes[i].burst_8bit <= maxburst)
2498 break; 2498 break;
2499 i++; 2499 i++;
2500 } 2500 }
2501 2501
2502 break; 2502 break;
2503 case DMA_SLAVE_BUSWIDTH_2_BYTES: 2503 case DMA_SLAVE_BUSWIDTH_2_BYTES:
2504 ctrl |= 2504 ctrl |=
2505 COH901318_CX_CTRL_SRC_BUS_SIZE_16_BITS | 2505 COH901318_CX_CTRL_SRC_BUS_SIZE_16_BITS |
2506 COH901318_CX_CTRL_DST_BUS_SIZE_16_BITS; 2506 COH901318_CX_CTRL_DST_BUS_SIZE_16_BITS;
2507 2507
2508 while (i < ARRAY_SIZE(burst_sizes)) { 2508 while (i < ARRAY_SIZE(burst_sizes)) {
2509 if (burst_sizes[i].burst_16bit <= maxburst) 2509 if (burst_sizes[i].burst_16bit <= maxburst)
2510 break; 2510 break;
2511 i++; 2511 i++;
2512 } 2512 }
2513 2513
2514 break; 2514 break;
2515 case DMA_SLAVE_BUSWIDTH_4_BYTES: 2515 case DMA_SLAVE_BUSWIDTH_4_BYTES:
2516 /* Direction doesn't matter here, it's 32/32 bits */ 2516 /* Direction doesn't matter here, it's 32/32 bits */
2517 ctrl |= 2517 ctrl |=
2518 COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | 2518 COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
2519 COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS; 2519 COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS;
2520 2520
2521 while (i < ARRAY_SIZE(burst_sizes)) { 2521 while (i < ARRAY_SIZE(burst_sizes)) {
2522 if (burst_sizes[i].burst_32bit <= maxburst) 2522 if (burst_sizes[i].burst_32bit <= maxburst)
2523 break; 2523 break;
2524 i++; 2524 i++;
2525 } 2525 }
2526 2526
2527 break; 2527 break;
2528 default: 2528 default:
2529 dev_err(COHC_2_DEV(cohc), 2529 dev_err(COHC_2_DEV(cohc),
2530 "bad runtimeconfig: alien address width\n"); 2530 "bad runtimeconfig: alien address width\n");
2531 return; 2531 return;
2532 } 2532 }
2533 2533
2534 ctrl |= burst_sizes[i].reg; 2534 ctrl |= burst_sizes[i].reg;
2535 dev_dbg(COHC_2_DEV(cohc), 2535 dev_dbg(COHC_2_DEV(cohc),
2536 "selected burst size %d bytes for address width %d bytes, maxburst %d\n", 2536 "selected burst size %d bytes for address width %d bytes, maxburst %d\n",
2537 burst_sizes[i].burst_8bit, addr_width, maxburst); 2537 burst_sizes[i].burst_8bit, addr_width, maxburst);
2538 2538
2539 cohc->addr = addr; 2539 cohc->addr = addr;
2540 cohc->ctrl = ctrl; 2540 cohc->ctrl = ctrl;
2541 } 2541 }
2542 2542
2543 static int 2543 static int
2544 coh901318_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 2544 coh901318_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
2545 unsigned long arg) 2545 unsigned long arg)
2546 { 2546 {
2547 unsigned long flags; 2547 unsigned long flags;
2548 struct coh901318_chan *cohc = to_coh901318_chan(chan); 2548 struct coh901318_chan *cohc = to_coh901318_chan(chan);
2549 struct coh901318_desc *cohd; 2549 struct coh901318_desc *cohd;
2550 void __iomem *virtbase = cohc->base->virtbase; 2550 void __iomem *virtbase = cohc->base->virtbase;
2551 2551
2552 if (cmd == DMA_SLAVE_CONFIG) { 2552 if (cmd == DMA_SLAVE_CONFIG) {
2553 struct dma_slave_config *config = 2553 struct dma_slave_config *config =
2554 (struct dma_slave_config *) arg; 2554 (struct dma_slave_config *) arg;
2555 2555
2556 coh901318_dma_set_runtimeconfig(chan, config); 2556 coh901318_dma_set_runtimeconfig(chan, config);
2557 return 0; 2557 return 0;
2558 } 2558 }
2559 2559
2560 if (cmd == DMA_PAUSE) { 2560 if (cmd == DMA_PAUSE) {
2561 coh901318_pause(chan); 2561 coh901318_pause(chan);
2562 return 0; 2562 return 0;
2563 } 2563 }
2564 2564
2565 if (cmd == DMA_RESUME) { 2565 if (cmd == DMA_RESUME) {
2566 coh901318_resume(chan); 2566 coh901318_resume(chan);
2567 return 0; 2567 return 0;
2568 } 2568 }
2569 2569
2570 if (cmd != DMA_TERMINATE_ALL) 2570 if (cmd != DMA_TERMINATE_ALL)
2571 return -ENXIO; 2571 return -ENXIO;
2572 2572
2573 /* The remainder of this function terminates the transfer */ 2573 /* The remainder of this function terminates the transfer */
2574 coh901318_pause(chan); 2574 coh901318_pause(chan);
2575 spin_lock_irqsave(&cohc->lock, flags); 2575 spin_lock_irqsave(&cohc->lock, flags);
2576 2576
2577 /* Clear any pending BE or TC interrupt */ 2577 /* Clear any pending BE or TC interrupt */
2578 if (cohc->id < 32) { 2578 if (cohc->id < 32) {
2579 writel(1 << cohc->id, virtbase + COH901318_BE_INT_CLEAR1); 2579 writel(1 << cohc->id, virtbase + COH901318_BE_INT_CLEAR1);
2580 writel(1 << cohc->id, virtbase + COH901318_TC_INT_CLEAR1); 2580 writel(1 << cohc->id, virtbase + COH901318_TC_INT_CLEAR1);
2581 } else { 2581 } else {
2582 writel(1 << (cohc->id - 32), virtbase + 2582 writel(1 << (cohc->id - 32), virtbase +
2583 COH901318_BE_INT_CLEAR2); 2583 COH901318_BE_INT_CLEAR2);
2584 writel(1 << (cohc->id - 32), virtbase + 2584 writel(1 << (cohc->id - 32), virtbase +
2585 COH901318_TC_INT_CLEAR2); 2585 COH901318_TC_INT_CLEAR2);
2586 } 2586 }
2587 2587
2588 enable_powersave(cohc); 2588 enable_powersave(cohc);
2589 2589
2590 while ((cohd = coh901318_first_active_get(cohc))) { 2590 while ((cohd = coh901318_first_active_get(cohc))) {
2591 /* release the lli allocation*/ 2591 /* release the lli allocation*/
2592 coh901318_lli_free(&cohc->base->pool, &cohd->lli); 2592 coh901318_lli_free(&cohc->base->pool, &cohd->lli);
2593 2593
2594 /* return desc to free-list */ 2594 /* return desc to free-list */
2595 coh901318_desc_remove(cohd); 2595 coh901318_desc_remove(cohd);
2596 coh901318_desc_free(cohc, cohd); 2596 coh901318_desc_free(cohc, cohd);
2597 } 2597 }
2598 2598
2599 while ((cohd = coh901318_first_queued(cohc))) { 2599 while ((cohd = coh901318_first_queued(cohc))) {
2600 /* release the lli allocation*/ 2600 /* release the lli allocation*/
2601 coh901318_lli_free(&cohc->base->pool, &cohd->lli); 2601 coh901318_lli_free(&cohc->base->pool, &cohd->lli);
2602 2602
2603 /* return desc to free-list */ 2603 /* return desc to free-list */
2604 coh901318_desc_remove(cohd); 2604 coh901318_desc_remove(cohd);
2605 coh901318_desc_free(cohc, cohd); 2605 coh901318_desc_free(cohc, cohd);
2606 } 2606 }
2607 2607
2608 2608
2609 cohc->nbr_active_done = 0; 2609 cohc->nbr_active_done = 0;
2610 cohc->busy = 0; 2610 cohc->busy = 0;
2611 2611
2612 spin_unlock_irqrestore(&cohc->lock, flags); 2612 spin_unlock_irqrestore(&cohc->lock, flags);
2613 2613
2614 return 0; 2614 return 0;
2615 } 2615 }
2616 2616
2617 void coh901318_base_init(struct dma_device *dma, const int *pick_chans, 2617 void coh901318_base_init(struct dma_device *dma, const int *pick_chans,
2618 struct coh901318_base *base) 2618 struct coh901318_base *base)
2619 { 2619 {
2620 int chans_i; 2620 int chans_i;
2621 int i = 0; 2621 int i = 0;
2622 struct coh901318_chan *cohc; 2622 struct coh901318_chan *cohc;
2623 2623
2624 INIT_LIST_HEAD(&dma->channels); 2624 INIT_LIST_HEAD(&dma->channels);
2625 2625
2626 for (chans_i = 0; pick_chans[chans_i] != -1; chans_i += 2) { 2626 for (chans_i = 0; pick_chans[chans_i] != -1; chans_i += 2) {
2627 for (i = pick_chans[chans_i]; i <= pick_chans[chans_i+1]; i++) { 2627 for (i = pick_chans[chans_i]; i <= pick_chans[chans_i+1]; i++) {
2628 cohc = &base->chans[i]; 2628 cohc = &base->chans[i];
2629 2629
2630 cohc->base = base; 2630 cohc->base = base;
2631 cohc->chan.device = dma; 2631 cohc->chan.device = dma;
2632 cohc->id = i; 2632 cohc->id = i;
2633 2633
2634 /* TODO: do we really need this lock if only one 2634 /* TODO: do we really need this lock if only one
2635 * client is connected to each channel? 2635 * client is connected to each channel?
2636 */ 2636 */
2637 2637
2638 spin_lock_init(&cohc->lock); 2638 spin_lock_init(&cohc->lock);
2639 2639
2640 cohc->nbr_active_done = 0; 2640 cohc->nbr_active_done = 0;
2641 cohc->busy = 0; 2641 cohc->busy = 0;
2642 INIT_LIST_HEAD(&cohc->free); 2642 INIT_LIST_HEAD(&cohc->free);
2643 INIT_LIST_HEAD(&cohc->active); 2643 INIT_LIST_HEAD(&cohc->active);
2644 INIT_LIST_HEAD(&cohc->queue); 2644 INIT_LIST_HEAD(&cohc->queue);
2645 2645
2646 tasklet_init(&cohc->tasklet, dma_tasklet, 2646 tasklet_init(&cohc->tasklet, dma_tasklet,
2647 (unsigned long) cohc); 2647 (unsigned long) cohc);
2648 2648
2649 list_add_tail(&cohc->chan.device_node, 2649 list_add_tail(&cohc->chan.device_node,
2650 &dma->channels); 2650 &dma->channels);
2651 } 2651 }
2652 } 2652 }
2653 } 2653 }
2654 2654
2655 static int __init coh901318_probe(struct platform_device *pdev) 2655 static int __init coh901318_probe(struct platform_device *pdev)
2656 { 2656 {
2657 int err = 0; 2657 int err = 0;
2658 struct coh901318_base *base; 2658 struct coh901318_base *base;
2659 int irq; 2659 int irq;
2660 struct resource *io; 2660 struct resource *io;
2661 2661
2662 io = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2662 io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2663 if (!io) 2663 if (!io)
2664 return -ENODEV; 2664 return -ENODEV;
2665 2665
2666 /* Map DMA controller registers to virtual memory */ 2666 /* Map DMA controller registers to virtual memory */
2667 if (devm_request_mem_region(&pdev->dev, 2667 if (devm_request_mem_region(&pdev->dev,
2668 io->start, 2668 io->start,
2669 resource_size(io), 2669 resource_size(io),
2670 pdev->dev.driver->name) == NULL) 2670 pdev->dev.driver->name) == NULL)
2671 return -ENOMEM; 2671 return -ENOMEM;
2672 2672
2673 base = devm_kzalloc(&pdev->dev, 2673 base = devm_kzalloc(&pdev->dev,
2674 ALIGN(sizeof(struct coh901318_base), 4) + 2674 ALIGN(sizeof(struct coh901318_base), 4) +
2675 U300_DMA_CHANNELS * 2675 U300_DMA_CHANNELS *
2676 sizeof(struct coh901318_chan), 2676 sizeof(struct coh901318_chan),
2677 GFP_KERNEL); 2677 GFP_KERNEL);
2678 if (!base) 2678 if (!base)
2679 return -ENOMEM; 2679 return -ENOMEM;
2680 2680
2681 base->chans = ((void *)base) + ALIGN(sizeof(struct coh901318_base), 4); 2681 base->chans = ((void *)base) + ALIGN(sizeof(struct coh901318_base), 4);
2682 2682
2683 base->virtbase = devm_ioremap(&pdev->dev, io->start, resource_size(io)); 2683 base->virtbase = devm_ioremap(&pdev->dev, io->start, resource_size(io));
2684 if (!base->virtbase) 2684 if (!base->virtbase)
2685 return -ENOMEM; 2685 return -ENOMEM;
2686 2686
2687 base->dev = &pdev->dev; 2687 base->dev = &pdev->dev;
2688 spin_lock_init(&base->pm.lock); 2688 spin_lock_init(&base->pm.lock);
2689 base->pm.started_channels = 0; 2689 base->pm.started_channels = 0;
2690 2690
2691 COH901318_DEBUGFS_ASSIGN(debugfs_dma_base, base); 2691 COH901318_DEBUGFS_ASSIGN(debugfs_dma_base, base);
2692 2692
2693 irq = platform_get_irq(pdev, 0); 2693 irq = platform_get_irq(pdev, 0);
2694 if (irq < 0) 2694 if (irq < 0)
2695 return irq; 2695 return irq;
2696 2696
2697 err = devm_request_irq(&pdev->dev, irq, dma_irq_handler, 0, 2697 err = devm_request_irq(&pdev->dev, irq, dma_irq_handler, 0,
2698 "coh901318", base); 2698 "coh901318", base);
2699 if (err) 2699 if (err)
2700 return err; 2700 return err;
2701 2701
2702 err = coh901318_pool_create(&base->pool, &pdev->dev, 2702 err = coh901318_pool_create(&base->pool, &pdev->dev,
2703 sizeof(struct coh901318_lli), 2703 sizeof(struct coh901318_lli),
2704 32); 2704 32);
2705 if (err) 2705 if (err)
2706 return err; 2706 return err;
2707 2707
2708 /* init channels for device transfers */ 2708 /* init channels for device transfers */
2709 coh901318_base_init(&base->dma_slave, dma_slave_channels, 2709 coh901318_base_init(&base->dma_slave, dma_slave_channels,
2710 base); 2710 base);
2711 2711
2712 dma_cap_zero(base->dma_slave.cap_mask); 2712 dma_cap_zero(base->dma_slave.cap_mask);
2713 dma_cap_set(DMA_SLAVE, base->dma_slave.cap_mask); 2713 dma_cap_set(DMA_SLAVE, base->dma_slave.cap_mask);
2714 2714
2715 base->dma_slave.device_alloc_chan_resources = coh901318_alloc_chan_resources; 2715 base->dma_slave.device_alloc_chan_resources = coh901318_alloc_chan_resources;
2716 base->dma_slave.device_free_chan_resources = coh901318_free_chan_resources; 2716 base->dma_slave.device_free_chan_resources = coh901318_free_chan_resources;
2717 base->dma_slave.device_prep_slave_sg = coh901318_prep_slave_sg; 2717 base->dma_slave.device_prep_slave_sg = coh901318_prep_slave_sg;
2718 base->dma_slave.device_tx_status = coh901318_tx_status; 2718 base->dma_slave.device_tx_status = coh901318_tx_status;
2719 base->dma_slave.device_issue_pending = coh901318_issue_pending; 2719 base->dma_slave.device_issue_pending = coh901318_issue_pending;
2720 base->dma_slave.device_control = coh901318_control; 2720 base->dma_slave.device_control = coh901318_control;
2721 base->dma_slave.dev = &pdev->dev; 2721 base->dma_slave.dev = &pdev->dev;
2722 2722
2723 err = dma_async_device_register(&base->dma_slave); 2723 err = dma_async_device_register(&base->dma_slave);
2724 2724
2725 if (err) 2725 if (err)
2726 goto err_register_slave; 2726 goto err_register_slave;
2727 2727
2728 /* init channels for memcpy */ 2728 /* init channels for memcpy */
2729 coh901318_base_init(&base->dma_memcpy, dma_memcpy_channels, 2729 coh901318_base_init(&base->dma_memcpy, dma_memcpy_channels,
2730 base); 2730 base);
2731 2731
2732 dma_cap_zero(base->dma_memcpy.cap_mask); 2732 dma_cap_zero(base->dma_memcpy.cap_mask);
2733 dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask); 2733 dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask);
2734 2734
2735 base->dma_memcpy.device_alloc_chan_resources = coh901318_alloc_chan_resources; 2735 base->dma_memcpy.device_alloc_chan_resources = coh901318_alloc_chan_resources;
2736 base->dma_memcpy.device_free_chan_resources = coh901318_free_chan_resources; 2736 base->dma_memcpy.device_free_chan_resources = coh901318_free_chan_resources;
2737 base->dma_memcpy.device_prep_dma_memcpy = coh901318_prep_memcpy; 2737 base->dma_memcpy.device_prep_dma_memcpy = coh901318_prep_memcpy;
2738 base->dma_memcpy.device_tx_status = coh901318_tx_status; 2738 base->dma_memcpy.device_tx_status = coh901318_tx_status;
2739 base->dma_memcpy.device_issue_pending = coh901318_issue_pending; 2739 base->dma_memcpy.device_issue_pending = coh901318_issue_pending;
2740 base->dma_memcpy.device_control = coh901318_control; 2740 base->dma_memcpy.device_control = coh901318_control;
2741 base->dma_memcpy.dev = &pdev->dev; 2741 base->dma_memcpy.dev = &pdev->dev;
2742 /* 2742 /*
2743 * This controller can only access address at even 32bit boundaries, 2743 * This controller can only access address at even 32bit boundaries,
2744 * i.e. 2^2 2744 * i.e. 2^2
2745 */ 2745 */
2746 base->dma_memcpy.copy_align = 2; 2746 base->dma_memcpy.copy_align = 2;
2747 err = dma_async_device_register(&base->dma_memcpy); 2747 err = dma_async_device_register(&base->dma_memcpy);
2748 2748
2749 if (err) 2749 if (err)
2750 goto err_register_memcpy; 2750 goto err_register_memcpy;
2751 2751
2752 err = of_dma_controller_register(pdev->dev.of_node, coh901318_xlate, 2752 err = of_dma_controller_register(pdev->dev.of_node, coh901318_xlate,
2753 base); 2753 base);
2754 if (err) 2754 if (err)
2755 goto err_register_of_dma; 2755 goto err_register_of_dma;
2756 2756
2757 platform_set_drvdata(pdev, base); 2757 platform_set_drvdata(pdev, base);
2758 dev_info(&pdev->dev, "Initialized COH901318 DMA on virtual base 0x%08x\n", 2758 dev_info(&pdev->dev, "Initialized COH901318 DMA on virtual base 0x%08x\n",
2759 (u32) base->virtbase); 2759 (u32) base->virtbase);
2760 2760
2761 return err; 2761 return err;
2762 2762
2763 err_register_of_dma: 2763 err_register_of_dma:
2764 dma_async_device_unregister(&base->dma_memcpy); 2764 dma_async_device_unregister(&base->dma_memcpy);
2765 err_register_memcpy: 2765 err_register_memcpy:
2766 dma_async_device_unregister(&base->dma_slave); 2766 dma_async_device_unregister(&base->dma_slave);
2767 err_register_slave: 2767 err_register_slave:
2768 coh901318_pool_destroy(&base->pool); 2768 coh901318_pool_destroy(&base->pool);
2769 return err; 2769 return err;
2770 } 2770 }
2771 2771
2772 static int coh901318_remove(struct platform_device *pdev) 2772 static int coh901318_remove(struct platform_device *pdev)
2773 { 2773 {
2774 struct coh901318_base *base = platform_get_drvdata(pdev); 2774 struct coh901318_base *base = platform_get_drvdata(pdev);
2775 2775
2776 of_dma_controller_free(pdev->dev.of_node); 2776 of_dma_controller_free(pdev->dev.of_node);
2777 dma_async_device_unregister(&base->dma_memcpy); 2777 dma_async_device_unregister(&base->dma_memcpy);
2778 dma_async_device_unregister(&base->dma_slave); 2778 dma_async_device_unregister(&base->dma_slave);
2779 coh901318_pool_destroy(&base->pool); 2779 coh901318_pool_destroy(&base->pool);
2780 return 0; 2780 return 0;
2781 } 2781 }
2782 2782
2783 static const struct of_device_id coh901318_dt_match[] = { 2783 static const struct of_device_id coh901318_dt_match[] = {
2784 { .compatible = "stericsson,coh901318" }, 2784 { .compatible = "stericsson,coh901318" },
2785 {}, 2785 {},
2786 }; 2786 };
2787 2787
2788 static struct platform_driver coh901318_driver = { 2788 static struct platform_driver coh901318_driver = {
2789 .remove = coh901318_remove, 2789 .remove = coh901318_remove,
2790 .driver = { 2790 .driver = {
2791 .name = "coh901318", 2791 .name = "coh901318",
2792 .of_match_table = coh901318_dt_match, 2792 .of_match_table = coh901318_dt_match,
2793 }, 2793 },
2794 }; 2794 };
2795 2795
2796 int __init coh901318_init(void) 2796 int __init coh901318_init(void)
2797 { 2797 {
2798 return platform_driver_probe(&coh901318_driver, coh901318_probe); 2798 return platform_driver_probe(&coh901318_driver, coh901318_probe);
2799 } 2799 }
2800 subsys_initcall(coh901318_init); 2800 subsys_initcall(coh901318_init);
2801 2801
2802 void __exit coh901318_exit(void) 2802 void __exit coh901318_exit(void)
2803 { 2803 {
2804 platform_driver_unregister(&coh901318_driver); 2804 platform_driver_unregister(&coh901318_driver);
2805 } 2805 }
2806 module_exit(coh901318_exit); 2806 module_exit(coh901318_exit);
2807 2807
2808 MODULE_LICENSE("GPL"); 2808 MODULE_LICENSE("GPL");
2809 MODULE_AUTHOR("Per Friden"); 2809 MODULE_AUTHOR("Per Friden");
2810 2810
drivers/dma/cppi41.c
1 #include <linux/dmaengine.h> 1 #include <linux/dmaengine.h>
2 #include <linux/dma-mapping.h> 2 #include <linux/dma-mapping.h>
3 #include <linux/platform_device.h> 3 #include <linux/platform_device.h>
4 #include <linux/module.h> 4 #include <linux/module.h>
5 #include <linux/of.h> 5 #include <linux/of.h>
6 #include <linux/slab.h> 6 #include <linux/slab.h>
7 #include <linux/of_dma.h> 7 #include <linux/of_dma.h>
8 #include <linux/of_irq.h> 8 #include <linux/of_irq.h>
9 #include <linux/dmapool.h> 9 #include <linux/dmapool.h>
10 #include <linux/interrupt.h> 10 #include <linux/interrupt.h>
11 #include <linux/of_address.h> 11 #include <linux/of_address.h>
12 #include <linux/pm_runtime.h> 12 #include <linux/pm_runtime.h>
13 #include "dmaengine.h" 13 #include "dmaengine.h"
14 14
15 #define DESC_TYPE 27 15 #define DESC_TYPE 27
16 #define DESC_TYPE_HOST 0x10 16 #define DESC_TYPE_HOST 0x10
17 #define DESC_TYPE_TEARD 0x13 17 #define DESC_TYPE_TEARD 0x13
18 18
19 #define TD_DESC_IS_RX (1 << 16) 19 #define TD_DESC_IS_RX (1 << 16)
20 #define TD_DESC_DMA_NUM 10 20 #define TD_DESC_DMA_NUM 10
21 21
22 #define DESC_LENGTH_BITS_NUM 21 22 #define DESC_LENGTH_BITS_NUM 21
23 23
24 #define DESC_TYPE_USB (5 << 26) 24 #define DESC_TYPE_USB (5 << 26)
25 #define DESC_PD_COMPLETE (1 << 31) 25 #define DESC_PD_COMPLETE (1 << 31)
26 26
27 /* DMA engine */ 27 /* DMA engine */
28 #define DMA_TDFDQ 4 28 #define DMA_TDFDQ 4
29 #define DMA_TXGCR(x) (0x800 + (x) * 0x20) 29 #define DMA_TXGCR(x) (0x800 + (x) * 0x20)
30 #define DMA_RXGCR(x) (0x808 + (x) * 0x20) 30 #define DMA_RXGCR(x) (0x808 + (x) * 0x20)
31 #define RXHPCRA0 4 31 #define RXHPCRA0 4
32 32
33 #define GCR_CHAN_ENABLE (1 << 31) 33 #define GCR_CHAN_ENABLE (1 << 31)
34 #define GCR_TEARDOWN (1 << 30) 34 #define GCR_TEARDOWN (1 << 30)
35 #define GCR_STARV_RETRY (1 << 24) 35 #define GCR_STARV_RETRY (1 << 24)
36 #define GCR_DESC_TYPE_HOST (1 << 14) 36 #define GCR_DESC_TYPE_HOST (1 << 14)
37 37
38 /* DMA scheduler */ 38 /* DMA scheduler */
39 #define DMA_SCHED_CTRL 0 39 #define DMA_SCHED_CTRL 0
40 #define DMA_SCHED_CTRL_EN (1 << 31) 40 #define DMA_SCHED_CTRL_EN (1 << 31)
41 #define DMA_SCHED_WORD(x) ((x) * 4 + 0x800) 41 #define DMA_SCHED_WORD(x) ((x) * 4 + 0x800)
42 42
43 #define SCHED_ENTRY0_CHAN(x) ((x) << 0) 43 #define SCHED_ENTRY0_CHAN(x) ((x) << 0)
44 #define SCHED_ENTRY0_IS_RX (1 << 7) 44 #define SCHED_ENTRY0_IS_RX (1 << 7)
45 45
46 #define SCHED_ENTRY1_CHAN(x) ((x) << 8) 46 #define SCHED_ENTRY1_CHAN(x) ((x) << 8)
47 #define SCHED_ENTRY1_IS_RX (1 << 15) 47 #define SCHED_ENTRY1_IS_RX (1 << 15)
48 48
49 #define SCHED_ENTRY2_CHAN(x) ((x) << 16) 49 #define SCHED_ENTRY2_CHAN(x) ((x) << 16)
50 #define SCHED_ENTRY2_IS_RX (1 << 23) 50 #define SCHED_ENTRY2_IS_RX (1 << 23)
51 51
52 #define SCHED_ENTRY3_CHAN(x) ((x) << 24) 52 #define SCHED_ENTRY3_CHAN(x) ((x) << 24)
53 #define SCHED_ENTRY3_IS_RX (1 << 31) 53 #define SCHED_ENTRY3_IS_RX (1 << 31)
54 54
55 /* Queue manager */ 55 /* Queue manager */
56 /* 4 KiB of memory for descriptors, 2 for each endpoint */ 56 /* 4 KiB of memory for descriptors, 2 for each endpoint */
57 #define ALLOC_DECS_NUM 128 57 #define ALLOC_DECS_NUM 128
58 #define DESCS_AREAS 1 58 #define DESCS_AREAS 1
59 #define TOTAL_DESCS_NUM (ALLOC_DECS_NUM * DESCS_AREAS) 59 #define TOTAL_DESCS_NUM (ALLOC_DECS_NUM * DESCS_AREAS)
60 #define QMGR_SCRATCH_SIZE (TOTAL_DESCS_NUM * 4) 60 #define QMGR_SCRATCH_SIZE (TOTAL_DESCS_NUM * 4)
61 61
62 #define QMGR_LRAM0_BASE 0x80 62 #define QMGR_LRAM0_BASE 0x80
63 #define QMGR_LRAM_SIZE 0x84 63 #define QMGR_LRAM_SIZE 0x84
64 #define QMGR_LRAM1_BASE 0x88 64 #define QMGR_LRAM1_BASE 0x88
65 #define QMGR_MEMBASE(x) (0x1000 + (x) * 0x10) 65 #define QMGR_MEMBASE(x) (0x1000 + (x) * 0x10)
66 #define QMGR_MEMCTRL(x) (0x1004 + (x) * 0x10) 66 #define QMGR_MEMCTRL(x) (0x1004 + (x) * 0x10)
67 #define QMGR_MEMCTRL_IDX_SH 16 67 #define QMGR_MEMCTRL_IDX_SH 16
68 #define QMGR_MEMCTRL_DESC_SH 8 68 #define QMGR_MEMCTRL_DESC_SH 8
69 69
70 #define QMGR_NUM_PEND 5 70 #define QMGR_NUM_PEND 5
71 #define QMGR_PEND(x) (0x90 + (x) * 4) 71 #define QMGR_PEND(x) (0x90 + (x) * 4)
72 72
73 #define QMGR_PENDING_SLOT_Q(x) (x / 32) 73 #define QMGR_PENDING_SLOT_Q(x) (x / 32)
74 #define QMGR_PENDING_BIT_Q(x) (x % 32) 74 #define QMGR_PENDING_BIT_Q(x) (x % 32)
75 75
76 #define QMGR_QUEUE_A(n) (0x2000 + (n) * 0x10) 76 #define QMGR_QUEUE_A(n) (0x2000 + (n) * 0x10)
77 #define QMGR_QUEUE_B(n) (0x2004 + (n) * 0x10) 77 #define QMGR_QUEUE_B(n) (0x2004 + (n) * 0x10)
78 #define QMGR_QUEUE_C(n) (0x2008 + (n) * 0x10) 78 #define QMGR_QUEUE_C(n) (0x2008 + (n) * 0x10)
79 #define QMGR_QUEUE_D(n) (0x200c + (n) * 0x10) 79 #define QMGR_QUEUE_D(n) (0x200c + (n) * 0x10)
80 80
81 /* Glue layer specific */ 81 /* Glue layer specific */
82 /* USBSS / USB AM335x */ 82 /* USBSS / USB AM335x */
83 #define USBSS_IRQ_STATUS 0x28 83 #define USBSS_IRQ_STATUS 0x28
84 #define USBSS_IRQ_ENABLER 0x2c 84 #define USBSS_IRQ_ENABLER 0x2c
85 #define USBSS_IRQ_CLEARR 0x30 85 #define USBSS_IRQ_CLEARR 0x30
86 86
87 #define USBSS_IRQ_PD_COMP (1 << 2) 87 #define USBSS_IRQ_PD_COMP (1 << 2)
88 88
89 struct cppi41_channel { 89 struct cppi41_channel {
90 struct dma_chan chan; 90 struct dma_chan chan;
91 struct dma_async_tx_descriptor txd; 91 struct dma_async_tx_descriptor txd;
92 struct cppi41_dd *cdd; 92 struct cppi41_dd *cdd;
93 struct cppi41_desc *desc; 93 struct cppi41_desc *desc;
94 dma_addr_t desc_phys; 94 dma_addr_t desc_phys;
95 void __iomem *gcr_reg; 95 void __iomem *gcr_reg;
96 int is_tx; 96 int is_tx;
97 u32 residue; 97 u32 residue;
98 98
99 unsigned int q_num; 99 unsigned int q_num;
100 unsigned int q_comp_num; 100 unsigned int q_comp_num;
101 unsigned int port_num; 101 unsigned int port_num;
102 102
103 unsigned td_retry; 103 unsigned td_retry;
104 unsigned td_queued:1; 104 unsigned td_queued:1;
105 unsigned td_seen:1; 105 unsigned td_seen:1;
106 unsigned td_desc_seen:1; 106 unsigned td_desc_seen:1;
107 }; 107 };
108 108
109 struct cppi41_desc { 109 struct cppi41_desc {
110 u32 pd0; 110 u32 pd0;
111 u32 pd1; 111 u32 pd1;
112 u32 pd2; 112 u32 pd2;
113 u32 pd3; 113 u32 pd3;
114 u32 pd4; 114 u32 pd4;
115 u32 pd5; 115 u32 pd5;
116 u32 pd6; 116 u32 pd6;
117 u32 pd7; 117 u32 pd7;
118 } __aligned(32); 118 } __aligned(32);
119 119
120 struct chan_queues { 120 struct chan_queues {
121 u16 submit; 121 u16 submit;
122 u16 complete; 122 u16 complete;
123 }; 123 };
124 124
125 struct cppi41_dd { 125 struct cppi41_dd {
126 struct dma_device ddev; 126 struct dma_device ddev;
127 127
128 void *qmgr_scratch; 128 void *qmgr_scratch;
129 dma_addr_t scratch_phys; 129 dma_addr_t scratch_phys;
130 130
131 struct cppi41_desc *cd; 131 struct cppi41_desc *cd;
132 dma_addr_t descs_phys; 132 dma_addr_t descs_phys;
133 u32 first_td_desc; 133 u32 first_td_desc;
134 struct cppi41_channel *chan_busy[ALLOC_DECS_NUM]; 134 struct cppi41_channel *chan_busy[ALLOC_DECS_NUM];
135 135
136 void __iomem *usbss_mem; 136 void __iomem *usbss_mem;
137 void __iomem *ctrl_mem; 137 void __iomem *ctrl_mem;
138 void __iomem *sched_mem; 138 void __iomem *sched_mem;
139 void __iomem *qmgr_mem; 139 void __iomem *qmgr_mem;
140 unsigned int irq; 140 unsigned int irq;
141 const struct chan_queues *queues_rx; 141 const struct chan_queues *queues_rx;
142 const struct chan_queues *queues_tx; 142 const struct chan_queues *queues_tx;
143 struct chan_queues td_queue; 143 struct chan_queues td_queue;
144 }; 144 };
145 145
146 #define FIST_COMPLETION_QUEUE 93 146 #define FIST_COMPLETION_QUEUE 93
147 static struct chan_queues usb_queues_tx[] = { 147 static struct chan_queues usb_queues_tx[] = {
148 /* USB0 ENDP 1 */ 148 /* USB0 ENDP 1 */
149 [ 0] = { .submit = 32, .complete = 93}, 149 [ 0] = { .submit = 32, .complete = 93},
150 [ 1] = { .submit = 34, .complete = 94}, 150 [ 1] = { .submit = 34, .complete = 94},
151 [ 2] = { .submit = 36, .complete = 95}, 151 [ 2] = { .submit = 36, .complete = 95},
152 [ 3] = { .submit = 38, .complete = 96}, 152 [ 3] = { .submit = 38, .complete = 96},
153 [ 4] = { .submit = 40, .complete = 97}, 153 [ 4] = { .submit = 40, .complete = 97},
154 [ 5] = { .submit = 42, .complete = 98}, 154 [ 5] = { .submit = 42, .complete = 98},
155 [ 6] = { .submit = 44, .complete = 99}, 155 [ 6] = { .submit = 44, .complete = 99},
156 [ 7] = { .submit = 46, .complete = 100}, 156 [ 7] = { .submit = 46, .complete = 100},
157 [ 8] = { .submit = 48, .complete = 101}, 157 [ 8] = { .submit = 48, .complete = 101},
158 [ 9] = { .submit = 50, .complete = 102}, 158 [ 9] = { .submit = 50, .complete = 102},
159 [10] = { .submit = 52, .complete = 103}, 159 [10] = { .submit = 52, .complete = 103},
160 [11] = { .submit = 54, .complete = 104}, 160 [11] = { .submit = 54, .complete = 104},
161 [12] = { .submit = 56, .complete = 105}, 161 [12] = { .submit = 56, .complete = 105},
162 [13] = { .submit = 58, .complete = 106}, 162 [13] = { .submit = 58, .complete = 106},
163 [14] = { .submit = 60, .complete = 107}, 163 [14] = { .submit = 60, .complete = 107},
164 164
165 /* USB1 ENDP1 */ 165 /* USB1 ENDP1 */
166 [15] = { .submit = 62, .complete = 125}, 166 [15] = { .submit = 62, .complete = 125},
167 [16] = { .submit = 64, .complete = 126}, 167 [16] = { .submit = 64, .complete = 126},
168 [17] = { .submit = 66, .complete = 127}, 168 [17] = { .submit = 66, .complete = 127},
169 [18] = { .submit = 68, .complete = 128}, 169 [18] = { .submit = 68, .complete = 128},
170 [19] = { .submit = 70, .complete = 129}, 170 [19] = { .submit = 70, .complete = 129},
171 [20] = { .submit = 72, .complete = 130}, 171 [20] = { .submit = 72, .complete = 130},
172 [21] = { .submit = 74, .complete = 131}, 172 [21] = { .submit = 74, .complete = 131},
173 [22] = { .submit = 76, .complete = 132}, 173 [22] = { .submit = 76, .complete = 132},
174 [23] = { .submit = 78, .complete = 133}, 174 [23] = { .submit = 78, .complete = 133},
175 [24] = { .submit = 80, .complete = 134}, 175 [24] = { .submit = 80, .complete = 134},
176 [25] = { .submit = 82, .complete = 135}, 176 [25] = { .submit = 82, .complete = 135},
177 [26] = { .submit = 84, .complete = 136}, 177 [26] = { .submit = 84, .complete = 136},
178 [27] = { .submit = 86, .complete = 137}, 178 [27] = { .submit = 86, .complete = 137},
179 [28] = { .submit = 88, .complete = 138}, 179 [28] = { .submit = 88, .complete = 138},
180 [29] = { .submit = 90, .complete = 139}, 180 [29] = { .submit = 90, .complete = 139},
181 }; 181 };
182 182
183 static const struct chan_queues usb_queues_rx[] = { 183 static const struct chan_queues usb_queues_rx[] = {
184 /* USB0 ENDP 1 */ 184 /* USB0 ENDP 1 */
185 [ 0] = { .submit = 1, .complete = 109}, 185 [ 0] = { .submit = 1, .complete = 109},
186 [ 1] = { .submit = 2, .complete = 110}, 186 [ 1] = { .submit = 2, .complete = 110},
187 [ 2] = { .submit = 3, .complete = 111}, 187 [ 2] = { .submit = 3, .complete = 111},
188 [ 3] = { .submit = 4, .complete = 112}, 188 [ 3] = { .submit = 4, .complete = 112},
189 [ 4] = { .submit = 5, .complete = 113}, 189 [ 4] = { .submit = 5, .complete = 113},
190 [ 5] = { .submit = 6, .complete = 114}, 190 [ 5] = { .submit = 6, .complete = 114},
191 [ 6] = { .submit = 7, .complete = 115}, 191 [ 6] = { .submit = 7, .complete = 115},
192 [ 7] = { .submit = 8, .complete = 116}, 192 [ 7] = { .submit = 8, .complete = 116},
193 [ 8] = { .submit = 9, .complete = 117}, 193 [ 8] = { .submit = 9, .complete = 117},
194 [ 9] = { .submit = 10, .complete = 118}, 194 [ 9] = { .submit = 10, .complete = 118},
195 [10] = { .submit = 11, .complete = 119}, 195 [10] = { .submit = 11, .complete = 119},
196 [11] = { .submit = 12, .complete = 120}, 196 [11] = { .submit = 12, .complete = 120},
197 [12] = { .submit = 13, .complete = 121}, 197 [12] = { .submit = 13, .complete = 121},
198 [13] = { .submit = 14, .complete = 122}, 198 [13] = { .submit = 14, .complete = 122},
199 [14] = { .submit = 15, .complete = 123}, 199 [14] = { .submit = 15, .complete = 123},
200 200
201 /* USB1 ENDP 1 */ 201 /* USB1 ENDP 1 */
202 [15] = { .submit = 16, .complete = 141}, 202 [15] = { .submit = 16, .complete = 141},
203 [16] = { .submit = 17, .complete = 142}, 203 [16] = { .submit = 17, .complete = 142},
204 [17] = { .submit = 18, .complete = 143}, 204 [17] = { .submit = 18, .complete = 143},
205 [18] = { .submit = 19, .complete = 144}, 205 [18] = { .submit = 19, .complete = 144},
206 [19] = { .submit = 20, .complete = 145}, 206 [19] = { .submit = 20, .complete = 145},
207 [20] = { .submit = 21, .complete = 146}, 207 [20] = { .submit = 21, .complete = 146},
208 [21] = { .submit = 22, .complete = 147}, 208 [21] = { .submit = 22, .complete = 147},
209 [22] = { .submit = 23, .complete = 148}, 209 [22] = { .submit = 23, .complete = 148},
210 [23] = { .submit = 24, .complete = 149}, 210 [23] = { .submit = 24, .complete = 149},
211 [24] = { .submit = 25, .complete = 150}, 211 [24] = { .submit = 25, .complete = 150},
212 [25] = { .submit = 26, .complete = 151}, 212 [25] = { .submit = 26, .complete = 151},
213 [26] = { .submit = 27, .complete = 152}, 213 [26] = { .submit = 27, .complete = 152},
214 [27] = { .submit = 28, .complete = 153}, 214 [27] = { .submit = 28, .complete = 153},
215 [28] = { .submit = 29, .complete = 154}, 215 [28] = { .submit = 29, .complete = 154},
216 [29] = { .submit = 30, .complete = 155}, 216 [29] = { .submit = 30, .complete = 155},
217 }; 217 };
218 218
219 struct cppi_glue_infos { 219 struct cppi_glue_infos {
220 irqreturn_t (*isr)(int irq, void *data); 220 irqreturn_t (*isr)(int irq, void *data);
221 const struct chan_queues *queues_rx; 221 const struct chan_queues *queues_rx;
222 const struct chan_queues *queues_tx; 222 const struct chan_queues *queues_tx;
223 struct chan_queues td_queue; 223 struct chan_queues td_queue;
224 }; 224 };
225 225
226 static struct cppi41_channel *to_cpp41_chan(struct dma_chan *c) 226 static struct cppi41_channel *to_cpp41_chan(struct dma_chan *c)
227 { 227 {
228 return container_of(c, struct cppi41_channel, chan); 228 return container_of(c, struct cppi41_channel, chan);
229 } 229 }
230 230
231 static struct cppi41_channel *desc_to_chan(struct cppi41_dd *cdd, u32 desc) 231 static struct cppi41_channel *desc_to_chan(struct cppi41_dd *cdd, u32 desc)
232 { 232 {
233 struct cppi41_channel *c; 233 struct cppi41_channel *c;
234 u32 descs_size; 234 u32 descs_size;
235 u32 desc_num; 235 u32 desc_num;
236 236
237 descs_size = sizeof(struct cppi41_desc) * ALLOC_DECS_NUM; 237 descs_size = sizeof(struct cppi41_desc) * ALLOC_DECS_NUM;
238 238
239 if (!((desc >= cdd->descs_phys) && 239 if (!((desc >= cdd->descs_phys) &&
240 (desc < (cdd->descs_phys + descs_size)))) { 240 (desc < (cdd->descs_phys + descs_size)))) {
241 return NULL; 241 return NULL;
242 } 242 }
243 243
244 desc_num = (desc - cdd->descs_phys) / sizeof(struct cppi41_desc); 244 desc_num = (desc - cdd->descs_phys) / sizeof(struct cppi41_desc);
245 BUG_ON(desc_num >= ALLOC_DECS_NUM); 245 BUG_ON(desc_num >= ALLOC_DECS_NUM);
246 c = cdd->chan_busy[desc_num]; 246 c = cdd->chan_busy[desc_num];
247 cdd->chan_busy[desc_num] = NULL; 247 cdd->chan_busy[desc_num] = NULL;
248 return c; 248 return c;
249 } 249 }
250 250
251 static void cppi_writel(u32 val, void *__iomem *mem) 251 static void cppi_writel(u32 val, void *__iomem *mem)
252 { 252 {
253 __raw_writel(val, mem); 253 __raw_writel(val, mem);
254 } 254 }
255 255
256 static u32 cppi_readl(void *__iomem *mem) 256 static u32 cppi_readl(void *__iomem *mem)
257 { 257 {
258 return __raw_readl(mem); 258 return __raw_readl(mem);
259 } 259 }
260 260
261 static u32 pd_trans_len(u32 val) 261 static u32 pd_trans_len(u32 val)
262 { 262 {
263 return val & ((1 << (DESC_LENGTH_BITS_NUM + 1)) - 1); 263 return val & ((1 << (DESC_LENGTH_BITS_NUM + 1)) - 1);
264 } 264 }
265 265
266 static irqreturn_t cppi41_irq(int irq, void *data) 266 static irqreturn_t cppi41_irq(int irq, void *data)
267 { 267 {
268 struct cppi41_dd *cdd = data; 268 struct cppi41_dd *cdd = data;
269 struct cppi41_channel *c; 269 struct cppi41_channel *c;
270 u32 status; 270 u32 status;
271 int i; 271 int i;
272 272
273 status = cppi_readl(cdd->usbss_mem + USBSS_IRQ_STATUS); 273 status = cppi_readl(cdd->usbss_mem + USBSS_IRQ_STATUS);
274 if (!(status & USBSS_IRQ_PD_COMP)) 274 if (!(status & USBSS_IRQ_PD_COMP))
275 return IRQ_NONE; 275 return IRQ_NONE;
276 cppi_writel(status, cdd->usbss_mem + USBSS_IRQ_STATUS); 276 cppi_writel(status, cdd->usbss_mem + USBSS_IRQ_STATUS);
277 277
278 for (i = QMGR_PENDING_SLOT_Q(FIST_COMPLETION_QUEUE); i < QMGR_NUM_PEND; 278 for (i = QMGR_PENDING_SLOT_Q(FIST_COMPLETION_QUEUE); i < QMGR_NUM_PEND;
279 i++) { 279 i++) {
280 u32 val; 280 u32 val;
281 u32 q_num; 281 u32 q_num;
282 282
283 val = cppi_readl(cdd->qmgr_mem + QMGR_PEND(i)); 283 val = cppi_readl(cdd->qmgr_mem + QMGR_PEND(i));
284 if (i == QMGR_PENDING_SLOT_Q(FIST_COMPLETION_QUEUE) && val) { 284 if (i == QMGR_PENDING_SLOT_Q(FIST_COMPLETION_QUEUE) && val) {
285 u32 mask; 285 u32 mask;
286 /* set corresponding bit for completetion Q 93 */ 286 /* set corresponding bit for completetion Q 93 */
287 mask = 1 << QMGR_PENDING_BIT_Q(FIST_COMPLETION_QUEUE); 287 mask = 1 << QMGR_PENDING_BIT_Q(FIST_COMPLETION_QUEUE);
288 /* not set all bits for queues less than Q 93 */ 288 /* not set all bits for queues less than Q 93 */
289 mask--; 289 mask--;
290 /* now invert and keep only Q 93+ set */ 290 /* now invert and keep only Q 93+ set */
291 val &= ~mask; 291 val &= ~mask;
292 } 292 }
293 293
294 if (val) 294 if (val)
295 __iormb(); 295 __iormb();
296 296
297 while (val) { 297 while (val) {
298 u32 desc; 298 u32 desc;
299 299
300 q_num = __fls(val); 300 q_num = __fls(val);
301 val &= ~(1 << q_num); 301 val &= ~(1 << q_num);
302 q_num += 32 * i; 302 q_num += 32 * i;
303 desc = cppi_readl(cdd->qmgr_mem + QMGR_QUEUE_D(q_num)); 303 desc = cppi_readl(cdd->qmgr_mem + QMGR_QUEUE_D(q_num));
304 desc &= ~0x1f; 304 desc &= ~0x1f;
305 c = desc_to_chan(cdd, desc); 305 c = desc_to_chan(cdd, desc);
306 if (WARN_ON(!c)) { 306 if (WARN_ON(!c)) {
307 pr_err("%s() q %d desc %08x\n", __func__, 307 pr_err("%s() q %d desc %08x\n", __func__,
308 q_num, desc); 308 q_num, desc);
309 continue; 309 continue;
310 } 310 }
311 c->residue = pd_trans_len(c->desc->pd6) - 311 c->residue = pd_trans_len(c->desc->pd6) -
312 pd_trans_len(c->desc->pd0); 312 pd_trans_len(c->desc->pd0);
313 313
314 dma_cookie_complete(&c->txd); 314 dma_cookie_complete(&c->txd);
315 c->txd.callback(c->txd.callback_param); 315 c->txd.callback(c->txd.callback_param);
316 } 316 }
317 } 317 }
318 return IRQ_HANDLED; 318 return IRQ_HANDLED;
319 } 319 }
320 320
321 static dma_cookie_t cppi41_tx_submit(struct dma_async_tx_descriptor *tx) 321 static dma_cookie_t cppi41_tx_submit(struct dma_async_tx_descriptor *tx)
322 { 322 {
323 dma_cookie_t cookie; 323 dma_cookie_t cookie;
324 324
325 cookie = dma_cookie_assign(tx); 325 cookie = dma_cookie_assign(tx);
326 326
327 return cookie; 327 return cookie;
328 } 328 }
329 329
330 static int cppi41_dma_alloc_chan_resources(struct dma_chan *chan) 330 static int cppi41_dma_alloc_chan_resources(struct dma_chan *chan)
331 { 331 {
332 struct cppi41_channel *c = to_cpp41_chan(chan); 332 struct cppi41_channel *c = to_cpp41_chan(chan);
333 333
334 dma_cookie_init(chan); 334 dma_cookie_init(chan);
335 dma_async_tx_descriptor_init(&c->txd, chan); 335 dma_async_tx_descriptor_init(&c->txd, chan);
336 c->txd.tx_submit = cppi41_tx_submit; 336 c->txd.tx_submit = cppi41_tx_submit;
337 337
338 if (!c->is_tx) 338 if (!c->is_tx)
339 cppi_writel(c->q_num, c->gcr_reg + RXHPCRA0); 339 cppi_writel(c->q_num, c->gcr_reg + RXHPCRA0);
340 340
341 return 0; 341 return 0;
342 } 342 }
343 343
344 static void cppi41_dma_free_chan_resources(struct dma_chan *chan) 344 static void cppi41_dma_free_chan_resources(struct dma_chan *chan)
345 { 345 {
346 } 346 }
347 347
348 static enum dma_status cppi41_dma_tx_status(struct dma_chan *chan, 348 static enum dma_status cppi41_dma_tx_status(struct dma_chan *chan,
349 dma_cookie_t cookie, struct dma_tx_state *txstate) 349 dma_cookie_t cookie, struct dma_tx_state *txstate)
350 { 350 {
351 struct cppi41_channel *c = to_cpp41_chan(chan); 351 struct cppi41_channel *c = to_cpp41_chan(chan);
352 enum dma_status ret; 352 enum dma_status ret;
353 353
354 /* lock */ 354 /* lock */
355 ret = dma_cookie_status(chan, cookie, txstate); 355 ret = dma_cookie_status(chan, cookie, txstate);
356 if (txstate && ret == DMA_SUCCESS) 356 if (txstate && ret == DMA_COMPLETE)
357 txstate->residue = c->residue; 357 txstate->residue = c->residue;
358 /* unlock */ 358 /* unlock */
359 359
360 return ret; 360 return ret;
361 } 361 }
362 362
363 static void push_desc_queue(struct cppi41_channel *c) 363 static void push_desc_queue(struct cppi41_channel *c)
364 { 364 {
365 struct cppi41_dd *cdd = c->cdd; 365 struct cppi41_dd *cdd = c->cdd;
366 u32 desc_num; 366 u32 desc_num;
367 u32 desc_phys; 367 u32 desc_phys;
368 u32 reg; 368 u32 reg;
369 369
370 desc_phys = lower_32_bits(c->desc_phys); 370 desc_phys = lower_32_bits(c->desc_phys);
371 desc_num = (desc_phys - cdd->descs_phys) / sizeof(struct cppi41_desc); 371 desc_num = (desc_phys - cdd->descs_phys) / sizeof(struct cppi41_desc);
372 WARN_ON(cdd->chan_busy[desc_num]); 372 WARN_ON(cdd->chan_busy[desc_num]);
373 cdd->chan_busy[desc_num] = c; 373 cdd->chan_busy[desc_num] = c;
374 374
375 reg = (sizeof(struct cppi41_desc) - 24) / 4; 375 reg = (sizeof(struct cppi41_desc) - 24) / 4;
376 reg |= desc_phys; 376 reg |= desc_phys;
377 cppi_writel(reg, cdd->qmgr_mem + QMGR_QUEUE_D(c->q_num)); 377 cppi_writel(reg, cdd->qmgr_mem + QMGR_QUEUE_D(c->q_num));
378 } 378 }
379 379
380 static void cppi41_dma_issue_pending(struct dma_chan *chan) 380 static void cppi41_dma_issue_pending(struct dma_chan *chan)
381 { 381 {
382 struct cppi41_channel *c = to_cpp41_chan(chan); 382 struct cppi41_channel *c = to_cpp41_chan(chan);
383 u32 reg; 383 u32 reg;
384 384
385 c->residue = 0; 385 c->residue = 0;
386 386
387 reg = GCR_CHAN_ENABLE; 387 reg = GCR_CHAN_ENABLE;
388 if (!c->is_tx) { 388 if (!c->is_tx) {
389 reg |= GCR_STARV_RETRY; 389 reg |= GCR_STARV_RETRY;
390 reg |= GCR_DESC_TYPE_HOST; 390 reg |= GCR_DESC_TYPE_HOST;
391 reg |= c->q_comp_num; 391 reg |= c->q_comp_num;
392 } 392 }
393 393
394 cppi_writel(reg, c->gcr_reg); 394 cppi_writel(reg, c->gcr_reg);
395 395
396 /* 396 /*
397 * We don't use writel() but __raw_writel() so we have to make sure 397 * We don't use writel() but __raw_writel() so we have to make sure
398 * that the DMA descriptor in coherent memory made to the main memory 398 * that the DMA descriptor in coherent memory made to the main memory
399 * before starting the dma engine. 399 * before starting the dma engine.
400 */ 400 */
401 __iowmb(); 401 __iowmb();
402 push_desc_queue(c); 402 push_desc_queue(c);
403 } 403 }
404 404
405 static u32 get_host_pd0(u32 length) 405 static u32 get_host_pd0(u32 length)
406 { 406 {
407 u32 reg; 407 u32 reg;
408 408
409 reg = DESC_TYPE_HOST << DESC_TYPE; 409 reg = DESC_TYPE_HOST << DESC_TYPE;
410 reg |= length; 410 reg |= length;
411 411
412 return reg; 412 return reg;
413 } 413 }
414 414
415 static u32 get_host_pd1(struct cppi41_channel *c) 415 static u32 get_host_pd1(struct cppi41_channel *c)
416 { 416 {
417 u32 reg; 417 u32 reg;
418 418
419 reg = 0; 419 reg = 0;
420 420
421 return reg; 421 return reg;
422 } 422 }
423 423
424 static u32 get_host_pd2(struct cppi41_channel *c) 424 static u32 get_host_pd2(struct cppi41_channel *c)
425 { 425 {
426 u32 reg; 426 u32 reg;
427 427
428 reg = DESC_TYPE_USB; 428 reg = DESC_TYPE_USB;
429 reg |= c->q_comp_num; 429 reg |= c->q_comp_num;
430 430
431 return reg; 431 return reg;
432 } 432 }
433 433
434 static u32 get_host_pd3(u32 length) 434 static u32 get_host_pd3(u32 length)
435 { 435 {
436 u32 reg; 436 u32 reg;
437 437
438 /* PD3 = packet size */ 438 /* PD3 = packet size */
439 reg = length; 439 reg = length;
440 440
441 return reg; 441 return reg;
442 } 442 }
443 443
444 static u32 get_host_pd6(u32 length) 444 static u32 get_host_pd6(u32 length)
445 { 445 {
446 u32 reg; 446 u32 reg;
447 447
448 /* PD6 buffer size */ 448 /* PD6 buffer size */
449 reg = DESC_PD_COMPLETE; 449 reg = DESC_PD_COMPLETE;
450 reg |= length; 450 reg |= length;
451 451
452 return reg; 452 return reg;
453 } 453 }
454 454
455 static u32 get_host_pd4_or_7(u32 addr) 455 static u32 get_host_pd4_or_7(u32 addr)
456 { 456 {
457 u32 reg; 457 u32 reg;
458 458
459 reg = addr; 459 reg = addr;
460 460
461 return reg; 461 return reg;
462 } 462 }
463 463
464 static u32 get_host_pd5(void) 464 static u32 get_host_pd5(void)
465 { 465 {
466 u32 reg; 466 u32 reg;
467 467
468 reg = 0; 468 reg = 0;
469 469
470 return reg; 470 return reg;
471 } 471 }
472 472
473 static struct dma_async_tx_descriptor *cppi41_dma_prep_slave_sg( 473 static struct dma_async_tx_descriptor *cppi41_dma_prep_slave_sg(
474 struct dma_chan *chan, struct scatterlist *sgl, unsigned sg_len, 474 struct dma_chan *chan, struct scatterlist *sgl, unsigned sg_len,
475 enum dma_transfer_direction dir, unsigned long tx_flags, void *context) 475 enum dma_transfer_direction dir, unsigned long tx_flags, void *context)
476 { 476 {
477 struct cppi41_channel *c = to_cpp41_chan(chan); 477 struct cppi41_channel *c = to_cpp41_chan(chan);
478 struct cppi41_desc *d; 478 struct cppi41_desc *d;
479 struct scatterlist *sg; 479 struct scatterlist *sg;
480 unsigned int i; 480 unsigned int i;
481 unsigned int num; 481 unsigned int num;
482 482
483 num = 0; 483 num = 0;
484 d = c->desc; 484 d = c->desc;
485 for_each_sg(sgl, sg, sg_len, i) { 485 for_each_sg(sgl, sg, sg_len, i) {
486 u32 addr; 486 u32 addr;
487 u32 len; 487 u32 len;
488 488
489 /* We need to use more than one desc once musb supports sg */ 489 /* We need to use more than one desc once musb supports sg */
490 BUG_ON(num > 0); 490 BUG_ON(num > 0);
491 addr = lower_32_bits(sg_dma_address(sg)); 491 addr = lower_32_bits(sg_dma_address(sg));
492 len = sg_dma_len(sg); 492 len = sg_dma_len(sg);
493 493
494 d->pd0 = get_host_pd0(len); 494 d->pd0 = get_host_pd0(len);
495 d->pd1 = get_host_pd1(c); 495 d->pd1 = get_host_pd1(c);
496 d->pd2 = get_host_pd2(c); 496 d->pd2 = get_host_pd2(c);
497 d->pd3 = get_host_pd3(len); 497 d->pd3 = get_host_pd3(len);
498 d->pd4 = get_host_pd4_or_7(addr); 498 d->pd4 = get_host_pd4_or_7(addr);
499 d->pd5 = get_host_pd5(); 499 d->pd5 = get_host_pd5();
500 d->pd6 = get_host_pd6(len); 500 d->pd6 = get_host_pd6(len);
501 d->pd7 = get_host_pd4_or_7(addr); 501 d->pd7 = get_host_pd4_or_7(addr);
502 502
503 d++; 503 d++;
504 } 504 }
505 505
506 return &c->txd; 506 return &c->txd;
507 } 507 }
508 508
509 static int cpp41_cfg_chan(struct cppi41_channel *c, 509 static int cpp41_cfg_chan(struct cppi41_channel *c,
510 struct dma_slave_config *cfg) 510 struct dma_slave_config *cfg)
511 { 511 {
512 return 0; 512 return 0;
513 } 513 }
514 514
515 static void cppi41_compute_td_desc(struct cppi41_desc *d) 515 static void cppi41_compute_td_desc(struct cppi41_desc *d)
516 { 516 {
517 d->pd0 = DESC_TYPE_TEARD << DESC_TYPE; 517 d->pd0 = DESC_TYPE_TEARD << DESC_TYPE;
518 } 518 }
519 519
520 static u32 cppi41_pop_desc(struct cppi41_dd *cdd, unsigned queue_num) 520 static u32 cppi41_pop_desc(struct cppi41_dd *cdd, unsigned queue_num)
521 { 521 {
522 u32 desc; 522 u32 desc;
523 523
524 desc = cppi_readl(cdd->qmgr_mem + QMGR_QUEUE_D(queue_num)); 524 desc = cppi_readl(cdd->qmgr_mem + QMGR_QUEUE_D(queue_num));
525 desc &= ~0x1f; 525 desc &= ~0x1f;
526 return desc; 526 return desc;
527 } 527 }
528 528
529 static int cppi41_tear_down_chan(struct cppi41_channel *c) 529 static int cppi41_tear_down_chan(struct cppi41_channel *c)
530 { 530 {
531 struct cppi41_dd *cdd = c->cdd; 531 struct cppi41_dd *cdd = c->cdd;
532 struct cppi41_desc *td; 532 struct cppi41_desc *td;
533 u32 reg; 533 u32 reg;
534 u32 desc_phys; 534 u32 desc_phys;
535 u32 td_desc_phys; 535 u32 td_desc_phys;
536 536
537 td = cdd->cd; 537 td = cdd->cd;
538 td += cdd->first_td_desc; 538 td += cdd->first_td_desc;
539 539
540 td_desc_phys = cdd->descs_phys; 540 td_desc_phys = cdd->descs_phys;
541 td_desc_phys += cdd->first_td_desc * sizeof(struct cppi41_desc); 541 td_desc_phys += cdd->first_td_desc * sizeof(struct cppi41_desc);
542 542
543 if (!c->td_queued) { 543 if (!c->td_queued) {
544 cppi41_compute_td_desc(td); 544 cppi41_compute_td_desc(td);
545 __iowmb(); 545 __iowmb();
546 546
547 reg = (sizeof(struct cppi41_desc) - 24) / 4; 547 reg = (sizeof(struct cppi41_desc) - 24) / 4;
548 reg |= td_desc_phys; 548 reg |= td_desc_phys;
549 cppi_writel(reg, cdd->qmgr_mem + 549 cppi_writel(reg, cdd->qmgr_mem +
550 QMGR_QUEUE_D(cdd->td_queue.submit)); 550 QMGR_QUEUE_D(cdd->td_queue.submit));
551 551
552 reg = GCR_CHAN_ENABLE; 552 reg = GCR_CHAN_ENABLE;
553 if (!c->is_tx) { 553 if (!c->is_tx) {
554 reg |= GCR_STARV_RETRY; 554 reg |= GCR_STARV_RETRY;
555 reg |= GCR_DESC_TYPE_HOST; 555 reg |= GCR_DESC_TYPE_HOST;
556 reg |= c->q_comp_num; 556 reg |= c->q_comp_num;
557 } 557 }
558 reg |= GCR_TEARDOWN; 558 reg |= GCR_TEARDOWN;
559 cppi_writel(reg, c->gcr_reg); 559 cppi_writel(reg, c->gcr_reg);
560 c->td_queued = 1; 560 c->td_queued = 1;
561 c->td_retry = 100; 561 c->td_retry = 100;
562 } 562 }
563 563
564 if (!c->td_seen) { 564 if (!c->td_seen) {
565 unsigned td_comp_queue; 565 unsigned td_comp_queue;
566 566
567 if (c->is_tx) 567 if (c->is_tx)
568 td_comp_queue = cdd->td_queue.complete; 568 td_comp_queue = cdd->td_queue.complete;
569 else 569 else
570 td_comp_queue = c->q_comp_num; 570 td_comp_queue = c->q_comp_num;
571 571
572 desc_phys = cppi41_pop_desc(cdd, td_comp_queue); 572 desc_phys = cppi41_pop_desc(cdd, td_comp_queue);
573 if (desc_phys) { 573 if (desc_phys) {
574 __iormb(); 574 __iormb();
575 575
576 if (desc_phys == td_desc_phys) { 576 if (desc_phys == td_desc_phys) {
577 u32 pd0; 577 u32 pd0;
578 pd0 = td->pd0; 578 pd0 = td->pd0;
579 WARN_ON((pd0 >> DESC_TYPE) != DESC_TYPE_TEARD); 579 WARN_ON((pd0 >> DESC_TYPE) != DESC_TYPE_TEARD);
580 WARN_ON(!c->is_tx && !(pd0 & TD_DESC_IS_RX)); 580 WARN_ON(!c->is_tx && !(pd0 & TD_DESC_IS_RX));
581 WARN_ON((pd0 & 0x1f) != c->port_num); 581 WARN_ON((pd0 & 0x1f) != c->port_num);
582 } else { 582 } else {
583 WARN_ON_ONCE(1); 583 WARN_ON_ONCE(1);
584 } 584 }
585 c->td_seen = 1; 585 c->td_seen = 1;
586 } 586 }
587 } 587 }
588 if (!c->td_desc_seen) { 588 if (!c->td_desc_seen) {
589 desc_phys = cppi41_pop_desc(cdd, c->q_comp_num); 589 desc_phys = cppi41_pop_desc(cdd, c->q_comp_num);
590 if (desc_phys) { 590 if (desc_phys) {
591 __iormb(); 591 __iormb();
592 WARN_ON(c->desc_phys != desc_phys); 592 WARN_ON(c->desc_phys != desc_phys);
593 c->td_desc_seen = 1; 593 c->td_desc_seen = 1;
594 } 594 }
595 } 595 }
596 c->td_retry--; 596 c->td_retry--;
597 /* 597 /*
598 * If the TX descriptor / channel is in use, the caller needs to poke 598 * If the TX descriptor / channel is in use, the caller needs to poke
599 * his TD bit multiple times. After that he hardware releases the 599 * his TD bit multiple times. After that he hardware releases the
600 * transfer descriptor followed by TD descriptor. Waiting seems not to 600 * transfer descriptor followed by TD descriptor. Waiting seems not to
601 * cause any difference. 601 * cause any difference.
602 * RX seems to be thrown out right away. However once the TearDown 602 * RX seems to be thrown out right away. However once the TearDown
603 * descriptor gets through we are done. If we have seens the transfer 603 * descriptor gets through we are done. If we have seens the transfer
604 * descriptor before the TD we fetch it from enqueue, it has to be 604 * descriptor before the TD we fetch it from enqueue, it has to be
605 * there waiting for us. 605 * there waiting for us.
606 */ 606 */
607 if (!c->td_seen && c->td_retry) 607 if (!c->td_seen && c->td_retry)
608 return -EAGAIN; 608 return -EAGAIN;
609 609
610 WARN_ON(!c->td_retry); 610 WARN_ON(!c->td_retry);
611 if (!c->td_desc_seen) { 611 if (!c->td_desc_seen) {
612 desc_phys = cppi_readl(cdd->qmgr_mem + QMGR_QUEUE_D(c->q_num)); 612 desc_phys = cppi_readl(cdd->qmgr_mem + QMGR_QUEUE_D(c->q_num));
613 WARN_ON(!desc_phys); 613 WARN_ON(!desc_phys);
614 } 614 }
615 615
616 c->td_queued = 0; 616 c->td_queued = 0;
617 c->td_seen = 0; 617 c->td_seen = 0;
618 c->td_desc_seen = 0; 618 c->td_desc_seen = 0;
619 cppi_writel(0, c->gcr_reg); 619 cppi_writel(0, c->gcr_reg);
620 return 0; 620 return 0;
621 } 621 }
622 622
623 static int cppi41_stop_chan(struct dma_chan *chan) 623 static int cppi41_stop_chan(struct dma_chan *chan)
624 { 624 {
625 struct cppi41_channel *c = to_cpp41_chan(chan); 625 struct cppi41_channel *c = to_cpp41_chan(chan);
626 struct cppi41_dd *cdd = c->cdd; 626 struct cppi41_dd *cdd = c->cdd;
627 u32 desc_num; 627 u32 desc_num;
628 u32 desc_phys; 628 u32 desc_phys;
629 int ret; 629 int ret;
630 630
631 ret = cppi41_tear_down_chan(c); 631 ret = cppi41_tear_down_chan(c);
632 if (ret) 632 if (ret)
633 return ret; 633 return ret;
634 634
635 desc_phys = lower_32_bits(c->desc_phys); 635 desc_phys = lower_32_bits(c->desc_phys);
636 desc_num = (desc_phys - cdd->descs_phys) / sizeof(struct cppi41_desc); 636 desc_num = (desc_phys - cdd->descs_phys) / sizeof(struct cppi41_desc);
637 WARN_ON(!cdd->chan_busy[desc_num]); 637 WARN_ON(!cdd->chan_busy[desc_num]);
638 cdd->chan_busy[desc_num] = NULL; 638 cdd->chan_busy[desc_num] = NULL;
639 639
640 return 0; 640 return 0;
641 } 641 }
642 642
643 static int cppi41_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 643 static int cppi41_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
644 unsigned long arg) 644 unsigned long arg)
645 { 645 {
646 struct cppi41_channel *c = to_cpp41_chan(chan); 646 struct cppi41_channel *c = to_cpp41_chan(chan);
647 int ret; 647 int ret;
648 648
649 switch (cmd) { 649 switch (cmd) {
650 case DMA_SLAVE_CONFIG: 650 case DMA_SLAVE_CONFIG:
651 ret = cpp41_cfg_chan(c, (struct dma_slave_config *) arg); 651 ret = cpp41_cfg_chan(c, (struct dma_slave_config *) arg);
652 break; 652 break;
653 653
654 case DMA_TERMINATE_ALL: 654 case DMA_TERMINATE_ALL:
655 ret = cppi41_stop_chan(chan); 655 ret = cppi41_stop_chan(chan);
656 break; 656 break;
657 657
658 default: 658 default:
659 ret = -ENXIO; 659 ret = -ENXIO;
660 break; 660 break;
661 } 661 }
662 return ret; 662 return ret;
663 } 663 }
664 664
665 static void cleanup_chans(struct cppi41_dd *cdd) 665 static void cleanup_chans(struct cppi41_dd *cdd)
666 { 666 {
667 while (!list_empty(&cdd->ddev.channels)) { 667 while (!list_empty(&cdd->ddev.channels)) {
668 struct cppi41_channel *cchan; 668 struct cppi41_channel *cchan;
669 669
670 cchan = list_first_entry(&cdd->ddev.channels, 670 cchan = list_first_entry(&cdd->ddev.channels,
671 struct cppi41_channel, chan.device_node); 671 struct cppi41_channel, chan.device_node);
672 list_del(&cchan->chan.device_node); 672 list_del(&cchan->chan.device_node);
673 kfree(cchan); 673 kfree(cchan);
674 } 674 }
675 } 675 }
676 676
677 static int cppi41_add_chans(struct device *dev, struct cppi41_dd *cdd) 677 static int cppi41_add_chans(struct device *dev, struct cppi41_dd *cdd)
678 { 678 {
679 struct cppi41_channel *cchan; 679 struct cppi41_channel *cchan;
680 int i; 680 int i;
681 int ret; 681 int ret;
682 u32 n_chans; 682 u32 n_chans;
683 683
684 ret = of_property_read_u32(dev->of_node, "#dma-channels", 684 ret = of_property_read_u32(dev->of_node, "#dma-channels",
685 &n_chans); 685 &n_chans);
686 if (ret) 686 if (ret)
687 return ret; 687 return ret;
688 /* 688 /*
689 * The channels can only be used as TX or as RX. So we add twice 689 * The channels can only be used as TX or as RX. So we add twice
690 * that much dma channels because USB can only do RX or TX. 690 * that much dma channels because USB can only do RX or TX.
691 */ 691 */
692 n_chans *= 2; 692 n_chans *= 2;
693 693
694 for (i = 0; i < n_chans; i++) { 694 for (i = 0; i < n_chans; i++) {
695 cchan = kzalloc(sizeof(*cchan), GFP_KERNEL); 695 cchan = kzalloc(sizeof(*cchan), GFP_KERNEL);
696 if (!cchan) 696 if (!cchan)
697 goto err; 697 goto err;
698 698
699 cchan->cdd = cdd; 699 cchan->cdd = cdd;
700 if (i & 1) { 700 if (i & 1) {
701 cchan->gcr_reg = cdd->ctrl_mem + DMA_TXGCR(i >> 1); 701 cchan->gcr_reg = cdd->ctrl_mem + DMA_TXGCR(i >> 1);
702 cchan->is_tx = 1; 702 cchan->is_tx = 1;
703 } else { 703 } else {
704 cchan->gcr_reg = cdd->ctrl_mem + DMA_RXGCR(i >> 1); 704 cchan->gcr_reg = cdd->ctrl_mem + DMA_RXGCR(i >> 1);
705 cchan->is_tx = 0; 705 cchan->is_tx = 0;
706 } 706 }
707 cchan->port_num = i >> 1; 707 cchan->port_num = i >> 1;
708 cchan->desc = &cdd->cd[i]; 708 cchan->desc = &cdd->cd[i];
709 cchan->desc_phys = cdd->descs_phys; 709 cchan->desc_phys = cdd->descs_phys;
710 cchan->desc_phys += i * sizeof(struct cppi41_desc); 710 cchan->desc_phys += i * sizeof(struct cppi41_desc);
711 cchan->chan.device = &cdd->ddev; 711 cchan->chan.device = &cdd->ddev;
712 list_add_tail(&cchan->chan.device_node, &cdd->ddev.channels); 712 list_add_tail(&cchan->chan.device_node, &cdd->ddev.channels);
713 } 713 }
714 cdd->first_td_desc = n_chans; 714 cdd->first_td_desc = n_chans;
715 715
716 return 0; 716 return 0;
717 err: 717 err:
718 cleanup_chans(cdd); 718 cleanup_chans(cdd);
719 return -ENOMEM; 719 return -ENOMEM;
720 } 720 }
721 721
722 static void purge_descs(struct device *dev, struct cppi41_dd *cdd) 722 static void purge_descs(struct device *dev, struct cppi41_dd *cdd)
723 { 723 {
724 unsigned int mem_decs; 724 unsigned int mem_decs;
725 int i; 725 int i;
726 726
727 mem_decs = ALLOC_DECS_NUM * sizeof(struct cppi41_desc); 727 mem_decs = ALLOC_DECS_NUM * sizeof(struct cppi41_desc);
728 728
729 for (i = 0; i < DESCS_AREAS; i++) { 729 for (i = 0; i < DESCS_AREAS; i++) {
730 730
731 cppi_writel(0, cdd->qmgr_mem + QMGR_MEMBASE(i)); 731 cppi_writel(0, cdd->qmgr_mem + QMGR_MEMBASE(i));
732 cppi_writel(0, cdd->qmgr_mem + QMGR_MEMCTRL(i)); 732 cppi_writel(0, cdd->qmgr_mem + QMGR_MEMCTRL(i));
733 733
734 dma_free_coherent(dev, mem_decs, cdd->cd, 734 dma_free_coherent(dev, mem_decs, cdd->cd,
735 cdd->descs_phys); 735 cdd->descs_phys);
736 } 736 }
737 } 737 }
738 738
739 static void disable_sched(struct cppi41_dd *cdd) 739 static void disable_sched(struct cppi41_dd *cdd)
740 { 740 {
741 cppi_writel(0, cdd->sched_mem + DMA_SCHED_CTRL); 741 cppi_writel(0, cdd->sched_mem + DMA_SCHED_CTRL);
742 } 742 }
743 743
744 static void deinit_cppi41(struct device *dev, struct cppi41_dd *cdd) 744 static void deinit_cppi41(struct device *dev, struct cppi41_dd *cdd)
745 { 745 {
746 disable_sched(cdd); 746 disable_sched(cdd);
747 747
748 purge_descs(dev, cdd); 748 purge_descs(dev, cdd);
749 749
750 cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM0_BASE); 750 cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM0_BASE);
751 cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM0_BASE); 751 cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM0_BASE);
752 dma_free_coherent(dev, QMGR_SCRATCH_SIZE, cdd->qmgr_scratch, 752 dma_free_coherent(dev, QMGR_SCRATCH_SIZE, cdd->qmgr_scratch,
753 cdd->scratch_phys); 753 cdd->scratch_phys);
754 } 754 }
755 755
756 static int init_descs(struct device *dev, struct cppi41_dd *cdd) 756 static int init_descs(struct device *dev, struct cppi41_dd *cdd)
757 { 757 {
758 unsigned int desc_size; 758 unsigned int desc_size;
759 unsigned int mem_decs; 759 unsigned int mem_decs;
760 int i; 760 int i;
761 u32 reg; 761 u32 reg;
762 u32 idx; 762 u32 idx;
763 763
764 BUILD_BUG_ON(sizeof(struct cppi41_desc) & 764 BUILD_BUG_ON(sizeof(struct cppi41_desc) &
765 (sizeof(struct cppi41_desc) - 1)); 765 (sizeof(struct cppi41_desc) - 1));
766 BUILD_BUG_ON(sizeof(struct cppi41_desc) < 32); 766 BUILD_BUG_ON(sizeof(struct cppi41_desc) < 32);
767 BUILD_BUG_ON(ALLOC_DECS_NUM < 32); 767 BUILD_BUG_ON(ALLOC_DECS_NUM < 32);
768 768
769 desc_size = sizeof(struct cppi41_desc); 769 desc_size = sizeof(struct cppi41_desc);
770 mem_decs = ALLOC_DECS_NUM * desc_size; 770 mem_decs = ALLOC_DECS_NUM * desc_size;
771 771
772 idx = 0; 772 idx = 0;
773 for (i = 0; i < DESCS_AREAS; i++) { 773 for (i = 0; i < DESCS_AREAS; i++) {
774 774
775 reg = idx << QMGR_MEMCTRL_IDX_SH; 775 reg = idx << QMGR_MEMCTRL_IDX_SH;
776 reg |= (ilog2(desc_size) - 5) << QMGR_MEMCTRL_DESC_SH; 776 reg |= (ilog2(desc_size) - 5) << QMGR_MEMCTRL_DESC_SH;
777 reg |= ilog2(ALLOC_DECS_NUM) - 5; 777 reg |= ilog2(ALLOC_DECS_NUM) - 5;
778 778
779 BUILD_BUG_ON(DESCS_AREAS != 1); 779 BUILD_BUG_ON(DESCS_AREAS != 1);
780 cdd->cd = dma_alloc_coherent(dev, mem_decs, 780 cdd->cd = dma_alloc_coherent(dev, mem_decs,
781 &cdd->descs_phys, GFP_KERNEL); 781 &cdd->descs_phys, GFP_KERNEL);
782 if (!cdd->cd) 782 if (!cdd->cd)
783 return -ENOMEM; 783 return -ENOMEM;
784 784
785 cppi_writel(cdd->descs_phys, cdd->qmgr_mem + QMGR_MEMBASE(i)); 785 cppi_writel(cdd->descs_phys, cdd->qmgr_mem + QMGR_MEMBASE(i));
786 cppi_writel(reg, cdd->qmgr_mem + QMGR_MEMCTRL(i)); 786 cppi_writel(reg, cdd->qmgr_mem + QMGR_MEMCTRL(i));
787 787
788 idx += ALLOC_DECS_NUM; 788 idx += ALLOC_DECS_NUM;
789 } 789 }
790 return 0; 790 return 0;
791 } 791 }
792 792
793 static void init_sched(struct cppi41_dd *cdd) 793 static void init_sched(struct cppi41_dd *cdd)
794 { 794 {
795 unsigned ch; 795 unsigned ch;
796 unsigned word; 796 unsigned word;
797 u32 reg; 797 u32 reg;
798 798
799 word = 0; 799 word = 0;
800 cppi_writel(0, cdd->sched_mem + DMA_SCHED_CTRL); 800 cppi_writel(0, cdd->sched_mem + DMA_SCHED_CTRL);
801 for (ch = 0; ch < 15 * 2; ch += 2) { 801 for (ch = 0; ch < 15 * 2; ch += 2) {
802 802
803 reg = SCHED_ENTRY0_CHAN(ch); 803 reg = SCHED_ENTRY0_CHAN(ch);
804 reg |= SCHED_ENTRY1_CHAN(ch) | SCHED_ENTRY1_IS_RX; 804 reg |= SCHED_ENTRY1_CHAN(ch) | SCHED_ENTRY1_IS_RX;
805 805
806 reg |= SCHED_ENTRY2_CHAN(ch + 1); 806 reg |= SCHED_ENTRY2_CHAN(ch + 1);
807 reg |= SCHED_ENTRY3_CHAN(ch + 1) | SCHED_ENTRY3_IS_RX; 807 reg |= SCHED_ENTRY3_CHAN(ch + 1) | SCHED_ENTRY3_IS_RX;
808 cppi_writel(reg, cdd->sched_mem + DMA_SCHED_WORD(word)); 808 cppi_writel(reg, cdd->sched_mem + DMA_SCHED_WORD(word));
809 word++; 809 word++;
810 } 810 }
811 reg = 15 * 2 * 2 - 1; 811 reg = 15 * 2 * 2 - 1;
812 reg |= DMA_SCHED_CTRL_EN; 812 reg |= DMA_SCHED_CTRL_EN;
813 cppi_writel(reg, cdd->sched_mem + DMA_SCHED_CTRL); 813 cppi_writel(reg, cdd->sched_mem + DMA_SCHED_CTRL);
814 } 814 }
815 815
816 static int init_cppi41(struct device *dev, struct cppi41_dd *cdd) 816 static int init_cppi41(struct device *dev, struct cppi41_dd *cdd)
817 { 817 {
818 int ret; 818 int ret;
819 819
820 BUILD_BUG_ON(QMGR_SCRATCH_SIZE > ((1 << 14) - 1)); 820 BUILD_BUG_ON(QMGR_SCRATCH_SIZE > ((1 << 14) - 1));
821 cdd->qmgr_scratch = dma_alloc_coherent(dev, QMGR_SCRATCH_SIZE, 821 cdd->qmgr_scratch = dma_alloc_coherent(dev, QMGR_SCRATCH_SIZE,
822 &cdd->scratch_phys, GFP_KERNEL); 822 &cdd->scratch_phys, GFP_KERNEL);
823 if (!cdd->qmgr_scratch) 823 if (!cdd->qmgr_scratch)
824 return -ENOMEM; 824 return -ENOMEM;
825 825
826 cppi_writel(cdd->scratch_phys, cdd->qmgr_mem + QMGR_LRAM0_BASE); 826 cppi_writel(cdd->scratch_phys, cdd->qmgr_mem + QMGR_LRAM0_BASE);
827 cppi_writel(QMGR_SCRATCH_SIZE, cdd->qmgr_mem + QMGR_LRAM_SIZE); 827 cppi_writel(QMGR_SCRATCH_SIZE, cdd->qmgr_mem + QMGR_LRAM_SIZE);
828 cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM1_BASE); 828 cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM1_BASE);
829 829
830 ret = init_descs(dev, cdd); 830 ret = init_descs(dev, cdd);
831 if (ret) 831 if (ret)
832 goto err_td; 832 goto err_td;
833 833
834 cppi_writel(cdd->td_queue.submit, cdd->ctrl_mem + DMA_TDFDQ); 834 cppi_writel(cdd->td_queue.submit, cdd->ctrl_mem + DMA_TDFDQ);
835 init_sched(cdd); 835 init_sched(cdd);
836 return 0; 836 return 0;
837 err_td: 837 err_td:
838 deinit_cppi41(dev, cdd); 838 deinit_cppi41(dev, cdd);
839 return ret; 839 return ret;
840 } 840 }
841 841
842 static struct platform_driver cpp41_dma_driver; 842 static struct platform_driver cpp41_dma_driver;
843 /* 843 /*
844 * The param format is: 844 * The param format is:
845 * X Y 845 * X Y
846 * X: Port 846 * X: Port
847 * Y: 0 = RX else TX 847 * Y: 0 = RX else TX
848 */ 848 */
849 #define INFO_PORT 0 849 #define INFO_PORT 0
850 #define INFO_IS_TX 1 850 #define INFO_IS_TX 1
851 851
852 static bool cpp41_dma_filter_fn(struct dma_chan *chan, void *param) 852 static bool cpp41_dma_filter_fn(struct dma_chan *chan, void *param)
853 { 853 {
854 struct cppi41_channel *cchan; 854 struct cppi41_channel *cchan;
855 struct cppi41_dd *cdd; 855 struct cppi41_dd *cdd;
856 const struct chan_queues *queues; 856 const struct chan_queues *queues;
857 u32 *num = param; 857 u32 *num = param;
858 858
859 if (chan->device->dev->driver != &cpp41_dma_driver.driver) 859 if (chan->device->dev->driver != &cpp41_dma_driver.driver)
860 return false; 860 return false;
861 861
862 cchan = to_cpp41_chan(chan); 862 cchan = to_cpp41_chan(chan);
863 863
864 if (cchan->port_num != num[INFO_PORT]) 864 if (cchan->port_num != num[INFO_PORT])
865 return false; 865 return false;
866 866
867 if (cchan->is_tx && !num[INFO_IS_TX]) 867 if (cchan->is_tx && !num[INFO_IS_TX])
868 return false; 868 return false;
869 cdd = cchan->cdd; 869 cdd = cchan->cdd;
870 if (cchan->is_tx) 870 if (cchan->is_tx)
871 queues = cdd->queues_tx; 871 queues = cdd->queues_tx;
872 else 872 else
873 queues = cdd->queues_rx; 873 queues = cdd->queues_rx;
874 874
875 BUILD_BUG_ON(ARRAY_SIZE(usb_queues_rx) != ARRAY_SIZE(usb_queues_tx)); 875 BUILD_BUG_ON(ARRAY_SIZE(usb_queues_rx) != ARRAY_SIZE(usb_queues_tx));
876 if (WARN_ON(cchan->port_num > ARRAY_SIZE(usb_queues_rx))) 876 if (WARN_ON(cchan->port_num > ARRAY_SIZE(usb_queues_rx)))
877 return false; 877 return false;
878 878
879 cchan->q_num = queues[cchan->port_num].submit; 879 cchan->q_num = queues[cchan->port_num].submit;
880 cchan->q_comp_num = queues[cchan->port_num].complete; 880 cchan->q_comp_num = queues[cchan->port_num].complete;
881 return true; 881 return true;
882 } 882 }
883 883
884 static struct of_dma_filter_info cpp41_dma_info = { 884 static struct of_dma_filter_info cpp41_dma_info = {
885 .filter_fn = cpp41_dma_filter_fn, 885 .filter_fn = cpp41_dma_filter_fn,
886 }; 886 };
887 887
888 static struct dma_chan *cppi41_dma_xlate(struct of_phandle_args *dma_spec, 888 static struct dma_chan *cppi41_dma_xlate(struct of_phandle_args *dma_spec,
889 struct of_dma *ofdma) 889 struct of_dma *ofdma)
890 { 890 {
891 int count = dma_spec->args_count; 891 int count = dma_spec->args_count;
892 struct of_dma_filter_info *info = ofdma->of_dma_data; 892 struct of_dma_filter_info *info = ofdma->of_dma_data;
893 893
894 if (!info || !info->filter_fn) 894 if (!info || !info->filter_fn)
895 return NULL; 895 return NULL;
896 896
897 if (count != 2) 897 if (count != 2)
898 return NULL; 898 return NULL;
899 899
900 return dma_request_channel(info->dma_cap, info->filter_fn, 900 return dma_request_channel(info->dma_cap, info->filter_fn,
901 &dma_spec->args[0]); 901 &dma_spec->args[0]);
902 } 902 }
903 903
904 static const struct cppi_glue_infos usb_infos = { 904 static const struct cppi_glue_infos usb_infos = {
905 .isr = cppi41_irq, 905 .isr = cppi41_irq,
906 .queues_rx = usb_queues_rx, 906 .queues_rx = usb_queues_rx,
907 .queues_tx = usb_queues_tx, 907 .queues_tx = usb_queues_tx,
908 .td_queue = { .submit = 31, .complete = 0 }, 908 .td_queue = { .submit = 31, .complete = 0 },
909 }; 909 };
910 910
911 static const struct of_device_id cppi41_dma_ids[] = { 911 static const struct of_device_id cppi41_dma_ids[] = {
912 { .compatible = "ti,am3359-cppi41", .data = &usb_infos}, 912 { .compatible = "ti,am3359-cppi41", .data = &usb_infos},
913 {}, 913 {},
914 }; 914 };
915 MODULE_DEVICE_TABLE(of, cppi41_dma_ids); 915 MODULE_DEVICE_TABLE(of, cppi41_dma_ids);
916 916
917 static const struct cppi_glue_infos *get_glue_info(struct device *dev) 917 static const struct cppi_glue_infos *get_glue_info(struct device *dev)
918 { 918 {
919 const struct of_device_id *of_id; 919 const struct of_device_id *of_id;
920 920
921 of_id = of_match_node(cppi41_dma_ids, dev->of_node); 921 of_id = of_match_node(cppi41_dma_ids, dev->of_node);
922 if (!of_id) 922 if (!of_id)
923 return NULL; 923 return NULL;
924 return of_id->data; 924 return of_id->data;
925 } 925 }
926 926
927 static int cppi41_dma_probe(struct platform_device *pdev) 927 static int cppi41_dma_probe(struct platform_device *pdev)
928 { 928 {
929 struct cppi41_dd *cdd; 929 struct cppi41_dd *cdd;
930 struct device *dev = &pdev->dev; 930 struct device *dev = &pdev->dev;
931 const struct cppi_glue_infos *glue_info; 931 const struct cppi_glue_infos *glue_info;
932 int irq; 932 int irq;
933 int ret; 933 int ret;
934 934
935 glue_info = get_glue_info(dev); 935 glue_info = get_glue_info(dev);
936 if (!glue_info) 936 if (!glue_info)
937 return -EINVAL; 937 return -EINVAL;
938 938
939 cdd = kzalloc(sizeof(*cdd), GFP_KERNEL); 939 cdd = kzalloc(sizeof(*cdd), GFP_KERNEL);
940 if (!cdd) 940 if (!cdd)
941 return -ENOMEM; 941 return -ENOMEM;
942 942
943 dma_cap_set(DMA_SLAVE, cdd->ddev.cap_mask); 943 dma_cap_set(DMA_SLAVE, cdd->ddev.cap_mask);
944 cdd->ddev.device_alloc_chan_resources = cppi41_dma_alloc_chan_resources; 944 cdd->ddev.device_alloc_chan_resources = cppi41_dma_alloc_chan_resources;
945 cdd->ddev.device_free_chan_resources = cppi41_dma_free_chan_resources; 945 cdd->ddev.device_free_chan_resources = cppi41_dma_free_chan_resources;
946 cdd->ddev.device_tx_status = cppi41_dma_tx_status; 946 cdd->ddev.device_tx_status = cppi41_dma_tx_status;
947 cdd->ddev.device_issue_pending = cppi41_dma_issue_pending; 947 cdd->ddev.device_issue_pending = cppi41_dma_issue_pending;
948 cdd->ddev.device_prep_slave_sg = cppi41_dma_prep_slave_sg; 948 cdd->ddev.device_prep_slave_sg = cppi41_dma_prep_slave_sg;
949 cdd->ddev.device_control = cppi41_dma_control; 949 cdd->ddev.device_control = cppi41_dma_control;
950 cdd->ddev.dev = dev; 950 cdd->ddev.dev = dev;
951 INIT_LIST_HEAD(&cdd->ddev.channels); 951 INIT_LIST_HEAD(&cdd->ddev.channels);
952 cpp41_dma_info.dma_cap = cdd->ddev.cap_mask; 952 cpp41_dma_info.dma_cap = cdd->ddev.cap_mask;
953 953
954 cdd->usbss_mem = of_iomap(dev->of_node, 0); 954 cdd->usbss_mem = of_iomap(dev->of_node, 0);
955 cdd->ctrl_mem = of_iomap(dev->of_node, 1); 955 cdd->ctrl_mem = of_iomap(dev->of_node, 1);
956 cdd->sched_mem = of_iomap(dev->of_node, 2); 956 cdd->sched_mem = of_iomap(dev->of_node, 2);
957 cdd->qmgr_mem = of_iomap(dev->of_node, 3); 957 cdd->qmgr_mem = of_iomap(dev->of_node, 3);
958 958
959 if (!cdd->usbss_mem || !cdd->ctrl_mem || !cdd->sched_mem || 959 if (!cdd->usbss_mem || !cdd->ctrl_mem || !cdd->sched_mem ||
960 !cdd->qmgr_mem) { 960 !cdd->qmgr_mem) {
961 ret = -ENXIO; 961 ret = -ENXIO;
962 goto err_remap; 962 goto err_remap;
963 } 963 }
964 964
965 pm_runtime_enable(dev); 965 pm_runtime_enable(dev);
966 ret = pm_runtime_get_sync(dev); 966 ret = pm_runtime_get_sync(dev);
967 if (ret) 967 if (ret)
968 goto err_get_sync; 968 goto err_get_sync;
969 969
970 cdd->queues_rx = glue_info->queues_rx; 970 cdd->queues_rx = glue_info->queues_rx;
971 cdd->queues_tx = glue_info->queues_tx; 971 cdd->queues_tx = glue_info->queues_tx;
972 cdd->td_queue = glue_info->td_queue; 972 cdd->td_queue = glue_info->td_queue;
973 973
974 ret = init_cppi41(dev, cdd); 974 ret = init_cppi41(dev, cdd);
975 if (ret) 975 if (ret)
976 goto err_init_cppi; 976 goto err_init_cppi;
977 977
978 ret = cppi41_add_chans(dev, cdd); 978 ret = cppi41_add_chans(dev, cdd);
979 if (ret) 979 if (ret)
980 goto err_chans; 980 goto err_chans;
981 981
982 irq = irq_of_parse_and_map(dev->of_node, 0); 982 irq = irq_of_parse_and_map(dev->of_node, 0);
983 if (!irq) 983 if (!irq)
984 goto err_irq; 984 goto err_irq;
985 985
986 cppi_writel(USBSS_IRQ_PD_COMP, cdd->usbss_mem + USBSS_IRQ_ENABLER); 986 cppi_writel(USBSS_IRQ_PD_COMP, cdd->usbss_mem + USBSS_IRQ_ENABLER);
987 987
988 ret = request_irq(irq, glue_info->isr, IRQF_SHARED, 988 ret = request_irq(irq, glue_info->isr, IRQF_SHARED,
989 dev_name(dev), cdd); 989 dev_name(dev), cdd);
990 if (ret) 990 if (ret)
991 goto err_irq; 991 goto err_irq;
992 cdd->irq = irq; 992 cdd->irq = irq;
993 993
994 ret = dma_async_device_register(&cdd->ddev); 994 ret = dma_async_device_register(&cdd->ddev);
995 if (ret) 995 if (ret)
996 goto err_dma_reg; 996 goto err_dma_reg;
997 997
998 ret = of_dma_controller_register(dev->of_node, 998 ret = of_dma_controller_register(dev->of_node,
999 cppi41_dma_xlate, &cpp41_dma_info); 999 cppi41_dma_xlate, &cpp41_dma_info);
1000 if (ret) 1000 if (ret)
1001 goto err_of; 1001 goto err_of;
1002 1002
1003 platform_set_drvdata(pdev, cdd); 1003 platform_set_drvdata(pdev, cdd);
1004 return 0; 1004 return 0;
1005 err_of: 1005 err_of:
1006 dma_async_device_unregister(&cdd->ddev); 1006 dma_async_device_unregister(&cdd->ddev);
1007 err_dma_reg: 1007 err_dma_reg:
1008 free_irq(irq, cdd); 1008 free_irq(irq, cdd);
1009 err_irq: 1009 err_irq:
1010 cppi_writel(0, cdd->usbss_mem + USBSS_IRQ_CLEARR); 1010 cppi_writel(0, cdd->usbss_mem + USBSS_IRQ_CLEARR);
1011 cleanup_chans(cdd); 1011 cleanup_chans(cdd);
1012 err_chans: 1012 err_chans:
1013 deinit_cppi41(dev, cdd); 1013 deinit_cppi41(dev, cdd);
1014 err_init_cppi: 1014 err_init_cppi:
1015 pm_runtime_put(dev); 1015 pm_runtime_put(dev);
1016 err_get_sync: 1016 err_get_sync:
1017 pm_runtime_disable(dev); 1017 pm_runtime_disable(dev);
1018 iounmap(cdd->usbss_mem); 1018 iounmap(cdd->usbss_mem);
1019 iounmap(cdd->ctrl_mem); 1019 iounmap(cdd->ctrl_mem);
1020 iounmap(cdd->sched_mem); 1020 iounmap(cdd->sched_mem);
1021 iounmap(cdd->qmgr_mem); 1021 iounmap(cdd->qmgr_mem);
1022 err_remap: 1022 err_remap:
1023 kfree(cdd); 1023 kfree(cdd);
1024 return ret; 1024 return ret;
1025 } 1025 }
1026 1026
1027 static int cppi41_dma_remove(struct platform_device *pdev) 1027 static int cppi41_dma_remove(struct platform_device *pdev)
1028 { 1028 {
1029 struct cppi41_dd *cdd = platform_get_drvdata(pdev); 1029 struct cppi41_dd *cdd = platform_get_drvdata(pdev);
1030 1030
1031 of_dma_controller_free(pdev->dev.of_node); 1031 of_dma_controller_free(pdev->dev.of_node);
1032 dma_async_device_unregister(&cdd->ddev); 1032 dma_async_device_unregister(&cdd->ddev);
1033 1033
1034 cppi_writel(0, cdd->usbss_mem + USBSS_IRQ_CLEARR); 1034 cppi_writel(0, cdd->usbss_mem + USBSS_IRQ_CLEARR);
1035 free_irq(cdd->irq, cdd); 1035 free_irq(cdd->irq, cdd);
1036 cleanup_chans(cdd); 1036 cleanup_chans(cdd);
1037 deinit_cppi41(&pdev->dev, cdd); 1037 deinit_cppi41(&pdev->dev, cdd);
1038 iounmap(cdd->usbss_mem); 1038 iounmap(cdd->usbss_mem);
1039 iounmap(cdd->ctrl_mem); 1039 iounmap(cdd->ctrl_mem);
1040 iounmap(cdd->sched_mem); 1040 iounmap(cdd->sched_mem);
1041 iounmap(cdd->qmgr_mem); 1041 iounmap(cdd->qmgr_mem);
1042 pm_runtime_put(&pdev->dev); 1042 pm_runtime_put(&pdev->dev);
1043 pm_runtime_disable(&pdev->dev); 1043 pm_runtime_disable(&pdev->dev);
1044 kfree(cdd); 1044 kfree(cdd);
1045 return 0; 1045 return 0;
1046 } 1046 }
1047 1047
1048 #ifdef CONFIG_PM_SLEEP 1048 #ifdef CONFIG_PM_SLEEP
1049 static int cppi41_suspend(struct device *dev) 1049 static int cppi41_suspend(struct device *dev)
1050 { 1050 {
1051 struct cppi41_dd *cdd = dev_get_drvdata(dev); 1051 struct cppi41_dd *cdd = dev_get_drvdata(dev);
1052 1052
1053 cppi_writel(0, cdd->usbss_mem + USBSS_IRQ_CLEARR); 1053 cppi_writel(0, cdd->usbss_mem + USBSS_IRQ_CLEARR);
1054 disable_sched(cdd); 1054 disable_sched(cdd);
1055 1055
1056 return 0; 1056 return 0;
1057 } 1057 }
1058 1058
1059 static int cppi41_resume(struct device *dev) 1059 static int cppi41_resume(struct device *dev)
1060 { 1060 {
1061 struct cppi41_dd *cdd = dev_get_drvdata(dev); 1061 struct cppi41_dd *cdd = dev_get_drvdata(dev);
1062 int i; 1062 int i;
1063 1063
1064 for (i = 0; i < DESCS_AREAS; i++) 1064 for (i = 0; i < DESCS_AREAS; i++)
1065 cppi_writel(cdd->descs_phys, cdd->qmgr_mem + QMGR_MEMBASE(i)); 1065 cppi_writel(cdd->descs_phys, cdd->qmgr_mem + QMGR_MEMBASE(i));
1066 1066
1067 init_sched(cdd); 1067 init_sched(cdd);
1068 cppi_writel(USBSS_IRQ_PD_COMP, cdd->usbss_mem + USBSS_IRQ_ENABLER); 1068 cppi_writel(USBSS_IRQ_PD_COMP, cdd->usbss_mem + USBSS_IRQ_ENABLER);
1069 1069
1070 return 0; 1070 return 0;
1071 } 1071 }
1072 #endif 1072 #endif
1073 1073
1074 static SIMPLE_DEV_PM_OPS(cppi41_pm_ops, cppi41_suspend, cppi41_resume); 1074 static SIMPLE_DEV_PM_OPS(cppi41_pm_ops, cppi41_suspend, cppi41_resume);
1075 1075
1076 static struct platform_driver cpp41_dma_driver = { 1076 static struct platform_driver cpp41_dma_driver = {
1077 .probe = cppi41_dma_probe, 1077 .probe = cppi41_dma_probe,
1078 .remove = cppi41_dma_remove, 1078 .remove = cppi41_dma_remove,
1079 .driver = { 1079 .driver = {
1080 .name = "cppi41-dma-engine", 1080 .name = "cppi41-dma-engine",
1081 .owner = THIS_MODULE, 1081 .owner = THIS_MODULE,
1082 .pm = &cppi41_pm_ops, 1082 .pm = &cppi41_pm_ops,
1083 .of_match_table = of_match_ptr(cppi41_dma_ids), 1083 .of_match_table = of_match_ptr(cppi41_dma_ids),
1084 }, 1084 },
1085 }; 1085 };
1086 1086
1087 module_platform_driver(cpp41_dma_driver); 1087 module_platform_driver(cpp41_dma_driver);
1088 MODULE_LICENSE("GPL"); 1088 MODULE_LICENSE("GPL");
1089 MODULE_AUTHOR("Sebastian Andrzej Siewior <bigeasy@linutronix.de>"); 1089 MODULE_AUTHOR("Sebastian Andrzej Siewior <bigeasy@linutronix.de>");
1090 1090
drivers/dma/dma-jz4740.c
1 /* 1 /*
2 * Copyright (C) 2013, Lars-Peter Clausen <lars@metafoo.de> 2 * Copyright (C) 2013, Lars-Peter Clausen <lars@metafoo.de>
3 * JZ4740 DMAC support 3 * JZ4740 DMAC support
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the 6 * under the terms of the GNU General Public License as published by the
7 * Free Software Foundation; either version 2 of the License, or (at your 7 * Free Software Foundation; either version 2 of the License, or (at your
8 * option) any later version. 8 * option) any later version.
9 * 9 *
10 * You should have received a copy of the GNU General Public License along 10 * You should have received a copy of the GNU General Public License along
11 * with this program; if not, write to the Free Software Foundation, Inc., 11 * with this program; if not, write to the Free Software Foundation, Inc.,
12 * 675 Mass Ave, Cambridge, MA 02139, USA. 12 * 675 Mass Ave, Cambridge, MA 02139, USA.
13 * 13 *
14 */ 14 */
15 15
16 #include <linux/dmaengine.h> 16 #include <linux/dmaengine.h>
17 #include <linux/dma-mapping.h> 17 #include <linux/dma-mapping.h>
18 #include <linux/err.h> 18 #include <linux/err.h>
19 #include <linux/init.h> 19 #include <linux/init.h>
20 #include <linux/list.h> 20 #include <linux/list.h>
21 #include <linux/module.h> 21 #include <linux/module.h>
22 #include <linux/platform_device.h> 22 #include <linux/platform_device.h>
23 #include <linux/slab.h> 23 #include <linux/slab.h>
24 #include <linux/spinlock.h> 24 #include <linux/spinlock.h>
25 #include <linux/irq.h> 25 #include <linux/irq.h>
26 #include <linux/clk.h> 26 #include <linux/clk.h>
27 27
28 #include <asm/mach-jz4740/dma.h> 28 #include <asm/mach-jz4740/dma.h>
29 29
30 #include "virt-dma.h" 30 #include "virt-dma.h"
31 31
32 #define JZ_DMA_NR_CHANS 6 32 #define JZ_DMA_NR_CHANS 6
33 33
34 #define JZ_REG_DMA_SRC_ADDR(x) (0x00 + (x) * 0x20) 34 #define JZ_REG_DMA_SRC_ADDR(x) (0x00 + (x) * 0x20)
35 #define JZ_REG_DMA_DST_ADDR(x) (0x04 + (x) * 0x20) 35 #define JZ_REG_DMA_DST_ADDR(x) (0x04 + (x) * 0x20)
36 #define JZ_REG_DMA_TRANSFER_COUNT(x) (0x08 + (x) * 0x20) 36 #define JZ_REG_DMA_TRANSFER_COUNT(x) (0x08 + (x) * 0x20)
37 #define JZ_REG_DMA_REQ_TYPE(x) (0x0C + (x) * 0x20) 37 #define JZ_REG_DMA_REQ_TYPE(x) (0x0C + (x) * 0x20)
38 #define JZ_REG_DMA_STATUS_CTRL(x) (0x10 + (x) * 0x20) 38 #define JZ_REG_DMA_STATUS_CTRL(x) (0x10 + (x) * 0x20)
39 #define JZ_REG_DMA_CMD(x) (0x14 + (x) * 0x20) 39 #define JZ_REG_DMA_CMD(x) (0x14 + (x) * 0x20)
40 #define JZ_REG_DMA_DESC_ADDR(x) (0x18 + (x) * 0x20) 40 #define JZ_REG_DMA_DESC_ADDR(x) (0x18 + (x) * 0x20)
41 41
42 #define JZ_REG_DMA_CTRL 0x300 42 #define JZ_REG_DMA_CTRL 0x300
43 #define JZ_REG_DMA_IRQ 0x304 43 #define JZ_REG_DMA_IRQ 0x304
44 #define JZ_REG_DMA_DOORBELL 0x308 44 #define JZ_REG_DMA_DOORBELL 0x308
45 #define JZ_REG_DMA_DOORBELL_SET 0x30C 45 #define JZ_REG_DMA_DOORBELL_SET 0x30C
46 46
47 #define JZ_DMA_STATUS_CTRL_NO_DESC BIT(31) 47 #define JZ_DMA_STATUS_CTRL_NO_DESC BIT(31)
48 #define JZ_DMA_STATUS_CTRL_DESC_INV BIT(6) 48 #define JZ_DMA_STATUS_CTRL_DESC_INV BIT(6)
49 #define JZ_DMA_STATUS_CTRL_ADDR_ERR BIT(4) 49 #define JZ_DMA_STATUS_CTRL_ADDR_ERR BIT(4)
50 #define JZ_DMA_STATUS_CTRL_TRANSFER_DONE BIT(3) 50 #define JZ_DMA_STATUS_CTRL_TRANSFER_DONE BIT(3)
51 #define JZ_DMA_STATUS_CTRL_HALT BIT(2) 51 #define JZ_DMA_STATUS_CTRL_HALT BIT(2)
52 #define JZ_DMA_STATUS_CTRL_COUNT_TERMINATE BIT(1) 52 #define JZ_DMA_STATUS_CTRL_COUNT_TERMINATE BIT(1)
53 #define JZ_DMA_STATUS_CTRL_ENABLE BIT(0) 53 #define JZ_DMA_STATUS_CTRL_ENABLE BIT(0)
54 54
55 #define JZ_DMA_CMD_SRC_INC BIT(23) 55 #define JZ_DMA_CMD_SRC_INC BIT(23)
56 #define JZ_DMA_CMD_DST_INC BIT(22) 56 #define JZ_DMA_CMD_DST_INC BIT(22)
57 #define JZ_DMA_CMD_RDIL_MASK (0xf << 16) 57 #define JZ_DMA_CMD_RDIL_MASK (0xf << 16)
58 #define JZ_DMA_CMD_SRC_WIDTH_MASK (0x3 << 14) 58 #define JZ_DMA_CMD_SRC_WIDTH_MASK (0x3 << 14)
59 #define JZ_DMA_CMD_DST_WIDTH_MASK (0x3 << 12) 59 #define JZ_DMA_CMD_DST_WIDTH_MASK (0x3 << 12)
60 #define JZ_DMA_CMD_INTERVAL_LENGTH_MASK (0x7 << 8) 60 #define JZ_DMA_CMD_INTERVAL_LENGTH_MASK (0x7 << 8)
61 #define JZ_DMA_CMD_BLOCK_MODE BIT(7) 61 #define JZ_DMA_CMD_BLOCK_MODE BIT(7)
62 #define JZ_DMA_CMD_DESC_VALID BIT(4) 62 #define JZ_DMA_CMD_DESC_VALID BIT(4)
63 #define JZ_DMA_CMD_DESC_VALID_MODE BIT(3) 63 #define JZ_DMA_CMD_DESC_VALID_MODE BIT(3)
64 #define JZ_DMA_CMD_VALID_IRQ_ENABLE BIT(2) 64 #define JZ_DMA_CMD_VALID_IRQ_ENABLE BIT(2)
65 #define JZ_DMA_CMD_TRANSFER_IRQ_ENABLE BIT(1) 65 #define JZ_DMA_CMD_TRANSFER_IRQ_ENABLE BIT(1)
66 #define JZ_DMA_CMD_LINK_ENABLE BIT(0) 66 #define JZ_DMA_CMD_LINK_ENABLE BIT(0)
67 67
68 #define JZ_DMA_CMD_FLAGS_OFFSET 22 68 #define JZ_DMA_CMD_FLAGS_OFFSET 22
69 #define JZ_DMA_CMD_RDIL_OFFSET 16 69 #define JZ_DMA_CMD_RDIL_OFFSET 16
70 #define JZ_DMA_CMD_SRC_WIDTH_OFFSET 14 70 #define JZ_DMA_CMD_SRC_WIDTH_OFFSET 14
71 #define JZ_DMA_CMD_DST_WIDTH_OFFSET 12 71 #define JZ_DMA_CMD_DST_WIDTH_OFFSET 12
72 #define JZ_DMA_CMD_TRANSFER_SIZE_OFFSET 8 72 #define JZ_DMA_CMD_TRANSFER_SIZE_OFFSET 8
73 #define JZ_DMA_CMD_MODE_OFFSET 7 73 #define JZ_DMA_CMD_MODE_OFFSET 7
74 74
75 #define JZ_DMA_CTRL_PRIORITY_MASK (0x3 << 8) 75 #define JZ_DMA_CTRL_PRIORITY_MASK (0x3 << 8)
76 #define JZ_DMA_CTRL_HALT BIT(3) 76 #define JZ_DMA_CTRL_HALT BIT(3)
77 #define JZ_DMA_CTRL_ADDRESS_ERROR BIT(2) 77 #define JZ_DMA_CTRL_ADDRESS_ERROR BIT(2)
78 #define JZ_DMA_CTRL_ENABLE BIT(0) 78 #define JZ_DMA_CTRL_ENABLE BIT(0)
79 79
80 enum jz4740_dma_width { 80 enum jz4740_dma_width {
81 JZ4740_DMA_WIDTH_32BIT = 0, 81 JZ4740_DMA_WIDTH_32BIT = 0,
82 JZ4740_DMA_WIDTH_8BIT = 1, 82 JZ4740_DMA_WIDTH_8BIT = 1,
83 JZ4740_DMA_WIDTH_16BIT = 2, 83 JZ4740_DMA_WIDTH_16BIT = 2,
84 }; 84 };
85 85
86 enum jz4740_dma_transfer_size { 86 enum jz4740_dma_transfer_size {
87 JZ4740_DMA_TRANSFER_SIZE_4BYTE = 0, 87 JZ4740_DMA_TRANSFER_SIZE_4BYTE = 0,
88 JZ4740_DMA_TRANSFER_SIZE_1BYTE = 1, 88 JZ4740_DMA_TRANSFER_SIZE_1BYTE = 1,
89 JZ4740_DMA_TRANSFER_SIZE_2BYTE = 2, 89 JZ4740_DMA_TRANSFER_SIZE_2BYTE = 2,
90 JZ4740_DMA_TRANSFER_SIZE_16BYTE = 3, 90 JZ4740_DMA_TRANSFER_SIZE_16BYTE = 3,
91 JZ4740_DMA_TRANSFER_SIZE_32BYTE = 4, 91 JZ4740_DMA_TRANSFER_SIZE_32BYTE = 4,
92 }; 92 };
93 93
94 enum jz4740_dma_flags { 94 enum jz4740_dma_flags {
95 JZ4740_DMA_SRC_AUTOINC = 0x2, 95 JZ4740_DMA_SRC_AUTOINC = 0x2,
96 JZ4740_DMA_DST_AUTOINC = 0x1, 96 JZ4740_DMA_DST_AUTOINC = 0x1,
97 }; 97 };
98 98
99 enum jz4740_dma_mode { 99 enum jz4740_dma_mode {
100 JZ4740_DMA_MODE_SINGLE = 0, 100 JZ4740_DMA_MODE_SINGLE = 0,
101 JZ4740_DMA_MODE_BLOCK = 1, 101 JZ4740_DMA_MODE_BLOCK = 1,
102 }; 102 };
103 103
104 struct jz4740_dma_sg { 104 struct jz4740_dma_sg {
105 dma_addr_t addr; 105 dma_addr_t addr;
106 unsigned int len; 106 unsigned int len;
107 }; 107 };
108 108
109 struct jz4740_dma_desc { 109 struct jz4740_dma_desc {
110 struct virt_dma_desc vdesc; 110 struct virt_dma_desc vdesc;
111 111
112 enum dma_transfer_direction direction; 112 enum dma_transfer_direction direction;
113 bool cyclic; 113 bool cyclic;
114 114
115 unsigned int num_sgs; 115 unsigned int num_sgs;
116 struct jz4740_dma_sg sg[]; 116 struct jz4740_dma_sg sg[];
117 }; 117 };
118 118
119 struct jz4740_dmaengine_chan { 119 struct jz4740_dmaengine_chan {
120 struct virt_dma_chan vchan; 120 struct virt_dma_chan vchan;
121 unsigned int id; 121 unsigned int id;
122 122
123 dma_addr_t fifo_addr; 123 dma_addr_t fifo_addr;
124 unsigned int transfer_shift; 124 unsigned int transfer_shift;
125 125
126 struct jz4740_dma_desc *desc; 126 struct jz4740_dma_desc *desc;
127 unsigned int next_sg; 127 unsigned int next_sg;
128 }; 128 };
129 129
130 struct jz4740_dma_dev { 130 struct jz4740_dma_dev {
131 struct dma_device ddev; 131 struct dma_device ddev;
132 void __iomem *base; 132 void __iomem *base;
133 struct clk *clk; 133 struct clk *clk;
134 134
135 struct jz4740_dmaengine_chan chan[JZ_DMA_NR_CHANS]; 135 struct jz4740_dmaengine_chan chan[JZ_DMA_NR_CHANS];
136 }; 136 };
137 137
138 static struct jz4740_dma_dev *jz4740_dma_chan_get_dev( 138 static struct jz4740_dma_dev *jz4740_dma_chan_get_dev(
139 struct jz4740_dmaengine_chan *chan) 139 struct jz4740_dmaengine_chan *chan)
140 { 140 {
141 return container_of(chan->vchan.chan.device, struct jz4740_dma_dev, 141 return container_of(chan->vchan.chan.device, struct jz4740_dma_dev,
142 ddev); 142 ddev);
143 } 143 }
144 144
145 static struct jz4740_dmaengine_chan *to_jz4740_dma_chan(struct dma_chan *c) 145 static struct jz4740_dmaengine_chan *to_jz4740_dma_chan(struct dma_chan *c)
146 { 146 {
147 return container_of(c, struct jz4740_dmaengine_chan, vchan.chan); 147 return container_of(c, struct jz4740_dmaengine_chan, vchan.chan);
148 } 148 }
149 149
150 static struct jz4740_dma_desc *to_jz4740_dma_desc(struct virt_dma_desc *vdesc) 150 static struct jz4740_dma_desc *to_jz4740_dma_desc(struct virt_dma_desc *vdesc)
151 { 151 {
152 return container_of(vdesc, struct jz4740_dma_desc, vdesc); 152 return container_of(vdesc, struct jz4740_dma_desc, vdesc);
153 } 153 }
154 154
155 static inline uint32_t jz4740_dma_read(struct jz4740_dma_dev *dmadev, 155 static inline uint32_t jz4740_dma_read(struct jz4740_dma_dev *dmadev,
156 unsigned int reg) 156 unsigned int reg)
157 { 157 {
158 return readl(dmadev->base + reg); 158 return readl(dmadev->base + reg);
159 } 159 }
160 160
161 static inline void jz4740_dma_write(struct jz4740_dma_dev *dmadev, 161 static inline void jz4740_dma_write(struct jz4740_dma_dev *dmadev,
162 unsigned reg, uint32_t val) 162 unsigned reg, uint32_t val)
163 { 163 {
164 writel(val, dmadev->base + reg); 164 writel(val, dmadev->base + reg);
165 } 165 }
166 166
167 static inline void jz4740_dma_write_mask(struct jz4740_dma_dev *dmadev, 167 static inline void jz4740_dma_write_mask(struct jz4740_dma_dev *dmadev,
168 unsigned int reg, uint32_t val, uint32_t mask) 168 unsigned int reg, uint32_t val, uint32_t mask)
169 { 169 {
170 uint32_t tmp; 170 uint32_t tmp;
171 171
172 tmp = jz4740_dma_read(dmadev, reg); 172 tmp = jz4740_dma_read(dmadev, reg);
173 tmp &= ~mask; 173 tmp &= ~mask;
174 tmp |= val; 174 tmp |= val;
175 jz4740_dma_write(dmadev, reg, tmp); 175 jz4740_dma_write(dmadev, reg, tmp);
176 } 176 }
177 177
178 static struct jz4740_dma_desc *jz4740_dma_alloc_desc(unsigned int num_sgs) 178 static struct jz4740_dma_desc *jz4740_dma_alloc_desc(unsigned int num_sgs)
179 { 179 {
180 return kzalloc(sizeof(struct jz4740_dma_desc) + 180 return kzalloc(sizeof(struct jz4740_dma_desc) +
181 sizeof(struct jz4740_dma_sg) * num_sgs, GFP_ATOMIC); 181 sizeof(struct jz4740_dma_sg) * num_sgs, GFP_ATOMIC);
182 } 182 }
183 183
184 static enum jz4740_dma_width jz4740_dma_width(enum dma_slave_buswidth width) 184 static enum jz4740_dma_width jz4740_dma_width(enum dma_slave_buswidth width)
185 { 185 {
186 switch (width) { 186 switch (width) {
187 case DMA_SLAVE_BUSWIDTH_1_BYTE: 187 case DMA_SLAVE_BUSWIDTH_1_BYTE:
188 return JZ4740_DMA_WIDTH_8BIT; 188 return JZ4740_DMA_WIDTH_8BIT;
189 case DMA_SLAVE_BUSWIDTH_2_BYTES: 189 case DMA_SLAVE_BUSWIDTH_2_BYTES:
190 return JZ4740_DMA_WIDTH_16BIT; 190 return JZ4740_DMA_WIDTH_16BIT;
191 case DMA_SLAVE_BUSWIDTH_4_BYTES: 191 case DMA_SLAVE_BUSWIDTH_4_BYTES:
192 return JZ4740_DMA_WIDTH_32BIT; 192 return JZ4740_DMA_WIDTH_32BIT;
193 default: 193 default:
194 return JZ4740_DMA_WIDTH_32BIT; 194 return JZ4740_DMA_WIDTH_32BIT;
195 } 195 }
196 } 196 }
197 197
198 static enum jz4740_dma_transfer_size jz4740_dma_maxburst(u32 maxburst) 198 static enum jz4740_dma_transfer_size jz4740_dma_maxburst(u32 maxburst)
199 { 199 {
200 if (maxburst <= 1) 200 if (maxburst <= 1)
201 return JZ4740_DMA_TRANSFER_SIZE_1BYTE; 201 return JZ4740_DMA_TRANSFER_SIZE_1BYTE;
202 else if (maxburst <= 3) 202 else if (maxburst <= 3)
203 return JZ4740_DMA_TRANSFER_SIZE_2BYTE; 203 return JZ4740_DMA_TRANSFER_SIZE_2BYTE;
204 else if (maxburst <= 15) 204 else if (maxburst <= 15)
205 return JZ4740_DMA_TRANSFER_SIZE_4BYTE; 205 return JZ4740_DMA_TRANSFER_SIZE_4BYTE;
206 else if (maxburst <= 31) 206 else if (maxburst <= 31)
207 return JZ4740_DMA_TRANSFER_SIZE_16BYTE; 207 return JZ4740_DMA_TRANSFER_SIZE_16BYTE;
208 208
209 return JZ4740_DMA_TRANSFER_SIZE_32BYTE; 209 return JZ4740_DMA_TRANSFER_SIZE_32BYTE;
210 } 210 }
211 211
212 static int jz4740_dma_slave_config(struct dma_chan *c, 212 static int jz4740_dma_slave_config(struct dma_chan *c,
213 const struct dma_slave_config *config) 213 const struct dma_slave_config *config)
214 { 214 {
215 struct jz4740_dmaengine_chan *chan = to_jz4740_dma_chan(c); 215 struct jz4740_dmaengine_chan *chan = to_jz4740_dma_chan(c);
216 struct jz4740_dma_dev *dmadev = jz4740_dma_chan_get_dev(chan); 216 struct jz4740_dma_dev *dmadev = jz4740_dma_chan_get_dev(chan);
217 enum jz4740_dma_width src_width; 217 enum jz4740_dma_width src_width;
218 enum jz4740_dma_width dst_width; 218 enum jz4740_dma_width dst_width;
219 enum jz4740_dma_transfer_size transfer_size; 219 enum jz4740_dma_transfer_size transfer_size;
220 enum jz4740_dma_flags flags; 220 enum jz4740_dma_flags flags;
221 uint32_t cmd; 221 uint32_t cmd;
222 222
223 switch (config->direction) { 223 switch (config->direction) {
224 case DMA_MEM_TO_DEV: 224 case DMA_MEM_TO_DEV:
225 flags = JZ4740_DMA_SRC_AUTOINC; 225 flags = JZ4740_DMA_SRC_AUTOINC;
226 transfer_size = jz4740_dma_maxburst(config->dst_maxburst); 226 transfer_size = jz4740_dma_maxburst(config->dst_maxburst);
227 chan->fifo_addr = config->dst_addr; 227 chan->fifo_addr = config->dst_addr;
228 break; 228 break;
229 case DMA_DEV_TO_MEM: 229 case DMA_DEV_TO_MEM:
230 flags = JZ4740_DMA_DST_AUTOINC; 230 flags = JZ4740_DMA_DST_AUTOINC;
231 transfer_size = jz4740_dma_maxburst(config->src_maxburst); 231 transfer_size = jz4740_dma_maxburst(config->src_maxburst);
232 chan->fifo_addr = config->src_addr; 232 chan->fifo_addr = config->src_addr;
233 break; 233 break;
234 default: 234 default:
235 return -EINVAL; 235 return -EINVAL;
236 } 236 }
237 237
238 src_width = jz4740_dma_width(config->src_addr_width); 238 src_width = jz4740_dma_width(config->src_addr_width);
239 dst_width = jz4740_dma_width(config->dst_addr_width); 239 dst_width = jz4740_dma_width(config->dst_addr_width);
240 240
241 switch (transfer_size) { 241 switch (transfer_size) {
242 case JZ4740_DMA_TRANSFER_SIZE_2BYTE: 242 case JZ4740_DMA_TRANSFER_SIZE_2BYTE:
243 chan->transfer_shift = 1; 243 chan->transfer_shift = 1;
244 break; 244 break;
245 case JZ4740_DMA_TRANSFER_SIZE_4BYTE: 245 case JZ4740_DMA_TRANSFER_SIZE_4BYTE:
246 chan->transfer_shift = 2; 246 chan->transfer_shift = 2;
247 break; 247 break;
248 case JZ4740_DMA_TRANSFER_SIZE_16BYTE: 248 case JZ4740_DMA_TRANSFER_SIZE_16BYTE:
249 chan->transfer_shift = 4; 249 chan->transfer_shift = 4;
250 break; 250 break;
251 case JZ4740_DMA_TRANSFER_SIZE_32BYTE: 251 case JZ4740_DMA_TRANSFER_SIZE_32BYTE:
252 chan->transfer_shift = 5; 252 chan->transfer_shift = 5;
253 break; 253 break;
254 default: 254 default:
255 chan->transfer_shift = 0; 255 chan->transfer_shift = 0;
256 break; 256 break;
257 } 257 }
258 258
259 cmd = flags << JZ_DMA_CMD_FLAGS_OFFSET; 259 cmd = flags << JZ_DMA_CMD_FLAGS_OFFSET;
260 cmd |= src_width << JZ_DMA_CMD_SRC_WIDTH_OFFSET; 260 cmd |= src_width << JZ_DMA_CMD_SRC_WIDTH_OFFSET;
261 cmd |= dst_width << JZ_DMA_CMD_DST_WIDTH_OFFSET; 261 cmd |= dst_width << JZ_DMA_CMD_DST_WIDTH_OFFSET;
262 cmd |= transfer_size << JZ_DMA_CMD_TRANSFER_SIZE_OFFSET; 262 cmd |= transfer_size << JZ_DMA_CMD_TRANSFER_SIZE_OFFSET;
263 cmd |= JZ4740_DMA_MODE_SINGLE << JZ_DMA_CMD_MODE_OFFSET; 263 cmd |= JZ4740_DMA_MODE_SINGLE << JZ_DMA_CMD_MODE_OFFSET;
264 cmd |= JZ_DMA_CMD_TRANSFER_IRQ_ENABLE; 264 cmd |= JZ_DMA_CMD_TRANSFER_IRQ_ENABLE;
265 265
266 jz4740_dma_write(dmadev, JZ_REG_DMA_CMD(chan->id), cmd); 266 jz4740_dma_write(dmadev, JZ_REG_DMA_CMD(chan->id), cmd);
267 jz4740_dma_write(dmadev, JZ_REG_DMA_STATUS_CTRL(chan->id), 0); 267 jz4740_dma_write(dmadev, JZ_REG_DMA_STATUS_CTRL(chan->id), 0);
268 jz4740_dma_write(dmadev, JZ_REG_DMA_REQ_TYPE(chan->id), 268 jz4740_dma_write(dmadev, JZ_REG_DMA_REQ_TYPE(chan->id),
269 config->slave_id); 269 config->slave_id);
270 270
271 return 0; 271 return 0;
272 } 272 }
273 273
274 static int jz4740_dma_terminate_all(struct dma_chan *c) 274 static int jz4740_dma_terminate_all(struct dma_chan *c)
275 { 275 {
276 struct jz4740_dmaengine_chan *chan = to_jz4740_dma_chan(c); 276 struct jz4740_dmaengine_chan *chan = to_jz4740_dma_chan(c);
277 struct jz4740_dma_dev *dmadev = jz4740_dma_chan_get_dev(chan); 277 struct jz4740_dma_dev *dmadev = jz4740_dma_chan_get_dev(chan);
278 unsigned long flags; 278 unsigned long flags;
279 LIST_HEAD(head); 279 LIST_HEAD(head);
280 280
281 spin_lock_irqsave(&chan->vchan.lock, flags); 281 spin_lock_irqsave(&chan->vchan.lock, flags);
282 jz4740_dma_write_mask(dmadev, JZ_REG_DMA_STATUS_CTRL(chan->id), 0, 282 jz4740_dma_write_mask(dmadev, JZ_REG_DMA_STATUS_CTRL(chan->id), 0,
283 JZ_DMA_STATUS_CTRL_ENABLE); 283 JZ_DMA_STATUS_CTRL_ENABLE);
284 chan->desc = NULL; 284 chan->desc = NULL;
285 vchan_get_all_descriptors(&chan->vchan, &head); 285 vchan_get_all_descriptors(&chan->vchan, &head);
286 spin_unlock_irqrestore(&chan->vchan.lock, flags); 286 spin_unlock_irqrestore(&chan->vchan.lock, flags);
287 287
288 vchan_dma_desc_free_list(&chan->vchan, &head); 288 vchan_dma_desc_free_list(&chan->vchan, &head);
289 289
290 return 0; 290 return 0;
291 } 291 }
292 292
293 static int jz4740_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 293 static int jz4740_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
294 unsigned long arg) 294 unsigned long arg)
295 { 295 {
296 struct dma_slave_config *config = (struct dma_slave_config *)arg; 296 struct dma_slave_config *config = (struct dma_slave_config *)arg;
297 297
298 switch (cmd) { 298 switch (cmd) {
299 case DMA_SLAVE_CONFIG: 299 case DMA_SLAVE_CONFIG:
300 return jz4740_dma_slave_config(chan, config); 300 return jz4740_dma_slave_config(chan, config);
301 case DMA_TERMINATE_ALL: 301 case DMA_TERMINATE_ALL:
302 return jz4740_dma_terminate_all(chan); 302 return jz4740_dma_terminate_all(chan);
303 default: 303 default:
304 return -ENOSYS; 304 return -ENOSYS;
305 } 305 }
306 } 306 }
307 307
308 static int jz4740_dma_start_transfer(struct jz4740_dmaengine_chan *chan) 308 static int jz4740_dma_start_transfer(struct jz4740_dmaengine_chan *chan)
309 { 309 {
310 struct jz4740_dma_dev *dmadev = jz4740_dma_chan_get_dev(chan); 310 struct jz4740_dma_dev *dmadev = jz4740_dma_chan_get_dev(chan);
311 dma_addr_t src_addr, dst_addr; 311 dma_addr_t src_addr, dst_addr;
312 struct virt_dma_desc *vdesc; 312 struct virt_dma_desc *vdesc;
313 struct jz4740_dma_sg *sg; 313 struct jz4740_dma_sg *sg;
314 314
315 jz4740_dma_write_mask(dmadev, JZ_REG_DMA_STATUS_CTRL(chan->id), 0, 315 jz4740_dma_write_mask(dmadev, JZ_REG_DMA_STATUS_CTRL(chan->id), 0,
316 JZ_DMA_STATUS_CTRL_ENABLE); 316 JZ_DMA_STATUS_CTRL_ENABLE);
317 317
318 if (!chan->desc) { 318 if (!chan->desc) {
319 vdesc = vchan_next_desc(&chan->vchan); 319 vdesc = vchan_next_desc(&chan->vchan);
320 if (!vdesc) 320 if (!vdesc)
321 return 0; 321 return 0;
322 chan->desc = to_jz4740_dma_desc(vdesc); 322 chan->desc = to_jz4740_dma_desc(vdesc);
323 chan->next_sg = 0; 323 chan->next_sg = 0;
324 } 324 }
325 325
326 if (chan->next_sg == chan->desc->num_sgs) 326 if (chan->next_sg == chan->desc->num_sgs)
327 chan->next_sg = 0; 327 chan->next_sg = 0;
328 328
329 sg = &chan->desc->sg[chan->next_sg]; 329 sg = &chan->desc->sg[chan->next_sg];
330 330
331 if (chan->desc->direction == DMA_MEM_TO_DEV) { 331 if (chan->desc->direction == DMA_MEM_TO_DEV) {
332 src_addr = sg->addr; 332 src_addr = sg->addr;
333 dst_addr = chan->fifo_addr; 333 dst_addr = chan->fifo_addr;
334 } else { 334 } else {
335 src_addr = chan->fifo_addr; 335 src_addr = chan->fifo_addr;
336 dst_addr = sg->addr; 336 dst_addr = sg->addr;
337 } 337 }
338 jz4740_dma_write(dmadev, JZ_REG_DMA_SRC_ADDR(chan->id), src_addr); 338 jz4740_dma_write(dmadev, JZ_REG_DMA_SRC_ADDR(chan->id), src_addr);
339 jz4740_dma_write(dmadev, JZ_REG_DMA_DST_ADDR(chan->id), dst_addr); 339 jz4740_dma_write(dmadev, JZ_REG_DMA_DST_ADDR(chan->id), dst_addr);
340 jz4740_dma_write(dmadev, JZ_REG_DMA_TRANSFER_COUNT(chan->id), 340 jz4740_dma_write(dmadev, JZ_REG_DMA_TRANSFER_COUNT(chan->id),
341 sg->len >> chan->transfer_shift); 341 sg->len >> chan->transfer_shift);
342 342
343 chan->next_sg++; 343 chan->next_sg++;
344 344
345 jz4740_dma_write_mask(dmadev, JZ_REG_DMA_STATUS_CTRL(chan->id), 345 jz4740_dma_write_mask(dmadev, JZ_REG_DMA_STATUS_CTRL(chan->id),
346 JZ_DMA_STATUS_CTRL_NO_DESC | JZ_DMA_STATUS_CTRL_ENABLE, 346 JZ_DMA_STATUS_CTRL_NO_DESC | JZ_DMA_STATUS_CTRL_ENABLE,
347 JZ_DMA_STATUS_CTRL_HALT | JZ_DMA_STATUS_CTRL_NO_DESC | 347 JZ_DMA_STATUS_CTRL_HALT | JZ_DMA_STATUS_CTRL_NO_DESC |
348 JZ_DMA_STATUS_CTRL_ENABLE); 348 JZ_DMA_STATUS_CTRL_ENABLE);
349 349
350 jz4740_dma_write_mask(dmadev, JZ_REG_DMA_CTRL, 350 jz4740_dma_write_mask(dmadev, JZ_REG_DMA_CTRL,
351 JZ_DMA_CTRL_ENABLE, 351 JZ_DMA_CTRL_ENABLE,
352 JZ_DMA_CTRL_HALT | JZ_DMA_CTRL_ENABLE); 352 JZ_DMA_CTRL_HALT | JZ_DMA_CTRL_ENABLE);
353 353
354 return 0; 354 return 0;
355 } 355 }
356 356
357 static void jz4740_dma_chan_irq(struct jz4740_dmaengine_chan *chan) 357 static void jz4740_dma_chan_irq(struct jz4740_dmaengine_chan *chan)
358 { 358 {
359 spin_lock(&chan->vchan.lock); 359 spin_lock(&chan->vchan.lock);
360 if (chan->desc) { 360 if (chan->desc) {
361 if (chan->desc && chan->desc->cyclic) { 361 if (chan->desc && chan->desc->cyclic) {
362 vchan_cyclic_callback(&chan->desc->vdesc); 362 vchan_cyclic_callback(&chan->desc->vdesc);
363 } else { 363 } else {
364 if (chan->next_sg == chan->desc->num_sgs) { 364 if (chan->next_sg == chan->desc->num_sgs) {
365 chan->desc = NULL; 365 chan->desc = NULL;
366 vchan_cookie_complete(&chan->desc->vdesc); 366 vchan_cookie_complete(&chan->desc->vdesc);
367 } 367 }
368 } 368 }
369 } 369 }
370 jz4740_dma_start_transfer(chan); 370 jz4740_dma_start_transfer(chan);
371 spin_unlock(&chan->vchan.lock); 371 spin_unlock(&chan->vchan.lock);
372 } 372 }
373 373
374 static irqreturn_t jz4740_dma_irq(int irq, void *devid) 374 static irqreturn_t jz4740_dma_irq(int irq, void *devid)
375 { 375 {
376 struct jz4740_dma_dev *dmadev = devid; 376 struct jz4740_dma_dev *dmadev = devid;
377 uint32_t irq_status; 377 uint32_t irq_status;
378 unsigned int i; 378 unsigned int i;
379 379
380 irq_status = readl(dmadev->base + JZ_REG_DMA_IRQ); 380 irq_status = readl(dmadev->base + JZ_REG_DMA_IRQ);
381 381
382 for (i = 0; i < 6; ++i) { 382 for (i = 0; i < 6; ++i) {
383 if (irq_status & (1 << i)) { 383 if (irq_status & (1 << i)) {
384 jz4740_dma_write_mask(dmadev, 384 jz4740_dma_write_mask(dmadev,
385 JZ_REG_DMA_STATUS_CTRL(i), 0, 385 JZ_REG_DMA_STATUS_CTRL(i), 0,
386 JZ_DMA_STATUS_CTRL_ENABLE | 386 JZ_DMA_STATUS_CTRL_ENABLE |
387 JZ_DMA_STATUS_CTRL_TRANSFER_DONE); 387 JZ_DMA_STATUS_CTRL_TRANSFER_DONE);
388 388
389 jz4740_dma_chan_irq(&dmadev->chan[i]); 389 jz4740_dma_chan_irq(&dmadev->chan[i]);
390 } 390 }
391 } 391 }
392 392
393 return IRQ_HANDLED; 393 return IRQ_HANDLED;
394 } 394 }
395 395
396 static void jz4740_dma_issue_pending(struct dma_chan *c) 396 static void jz4740_dma_issue_pending(struct dma_chan *c)
397 { 397 {
398 struct jz4740_dmaengine_chan *chan = to_jz4740_dma_chan(c); 398 struct jz4740_dmaengine_chan *chan = to_jz4740_dma_chan(c);
399 unsigned long flags; 399 unsigned long flags;
400 400
401 spin_lock_irqsave(&chan->vchan.lock, flags); 401 spin_lock_irqsave(&chan->vchan.lock, flags);
402 if (vchan_issue_pending(&chan->vchan) && !chan->desc) 402 if (vchan_issue_pending(&chan->vchan) && !chan->desc)
403 jz4740_dma_start_transfer(chan); 403 jz4740_dma_start_transfer(chan);
404 spin_unlock_irqrestore(&chan->vchan.lock, flags); 404 spin_unlock_irqrestore(&chan->vchan.lock, flags);
405 } 405 }
406 406
407 static struct dma_async_tx_descriptor *jz4740_dma_prep_slave_sg( 407 static struct dma_async_tx_descriptor *jz4740_dma_prep_slave_sg(
408 struct dma_chan *c, struct scatterlist *sgl, 408 struct dma_chan *c, struct scatterlist *sgl,
409 unsigned int sg_len, enum dma_transfer_direction direction, 409 unsigned int sg_len, enum dma_transfer_direction direction,
410 unsigned long flags, void *context) 410 unsigned long flags, void *context)
411 { 411 {
412 struct jz4740_dmaengine_chan *chan = to_jz4740_dma_chan(c); 412 struct jz4740_dmaengine_chan *chan = to_jz4740_dma_chan(c);
413 struct jz4740_dma_desc *desc; 413 struct jz4740_dma_desc *desc;
414 struct scatterlist *sg; 414 struct scatterlist *sg;
415 unsigned int i; 415 unsigned int i;
416 416
417 desc = jz4740_dma_alloc_desc(sg_len); 417 desc = jz4740_dma_alloc_desc(sg_len);
418 if (!desc) 418 if (!desc)
419 return NULL; 419 return NULL;
420 420
421 for_each_sg(sgl, sg, sg_len, i) { 421 for_each_sg(sgl, sg, sg_len, i) {
422 desc->sg[i].addr = sg_dma_address(sg); 422 desc->sg[i].addr = sg_dma_address(sg);
423 desc->sg[i].len = sg_dma_len(sg); 423 desc->sg[i].len = sg_dma_len(sg);
424 } 424 }
425 425
426 desc->num_sgs = sg_len; 426 desc->num_sgs = sg_len;
427 desc->direction = direction; 427 desc->direction = direction;
428 desc->cyclic = false; 428 desc->cyclic = false;
429 429
430 return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags); 430 return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
431 } 431 }
432 432
433 static struct dma_async_tx_descriptor *jz4740_dma_prep_dma_cyclic( 433 static struct dma_async_tx_descriptor *jz4740_dma_prep_dma_cyclic(
434 struct dma_chan *c, dma_addr_t buf_addr, size_t buf_len, 434 struct dma_chan *c, dma_addr_t buf_addr, size_t buf_len,
435 size_t period_len, enum dma_transfer_direction direction, 435 size_t period_len, enum dma_transfer_direction direction,
436 unsigned long flags, void *context) 436 unsigned long flags, void *context)
437 { 437 {
438 struct jz4740_dmaengine_chan *chan = to_jz4740_dma_chan(c); 438 struct jz4740_dmaengine_chan *chan = to_jz4740_dma_chan(c);
439 struct jz4740_dma_desc *desc; 439 struct jz4740_dma_desc *desc;
440 unsigned int num_periods, i; 440 unsigned int num_periods, i;
441 441
442 if (buf_len % period_len) 442 if (buf_len % period_len)
443 return NULL; 443 return NULL;
444 444
445 num_periods = buf_len / period_len; 445 num_periods = buf_len / period_len;
446 446
447 desc = jz4740_dma_alloc_desc(num_periods); 447 desc = jz4740_dma_alloc_desc(num_periods);
448 if (!desc) 448 if (!desc)
449 return NULL; 449 return NULL;
450 450
451 for (i = 0; i < num_periods; i++) { 451 for (i = 0; i < num_periods; i++) {
452 desc->sg[i].addr = buf_addr; 452 desc->sg[i].addr = buf_addr;
453 desc->sg[i].len = period_len; 453 desc->sg[i].len = period_len;
454 buf_addr += period_len; 454 buf_addr += period_len;
455 } 455 }
456 456
457 desc->num_sgs = num_periods; 457 desc->num_sgs = num_periods;
458 desc->direction = direction; 458 desc->direction = direction;
459 desc->cyclic = true; 459 desc->cyclic = true;
460 460
461 return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags); 461 return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
462 } 462 }
463 463
464 static size_t jz4740_dma_desc_residue(struct jz4740_dmaengine_chan *chan, 464 static size_t jz4740_dma_desc_residue(struct jz4740_dmaengine_chan *chan,
465 struct jz4740_dma_desc *desc, unsigned int next_sg) 465 struct jz4740_dma_desc *desc, unsigned int next_sg)
466 { 466 {
467 struct jz4740_dma_dev *dmadev = jz4740_dma_chan_get_dev(chan); 467 struct jz4740_dma_dev *dmadev = jz4740_dma_chan_get_dev(chan);
468 unsigned int residue, count; 468 unsigned int residue, count;
469 unsigned int i; 469 unsigned int i;
470 470
471 residue = 0; 471 residue = 0;
472 472
473 for (i = next_sg; i < desc->num_sgs; i++) 473 for (i = next_sg; i < desc->num_sgs; i++)
474 residue += desc->sg[i].len; 474 residue += desc->sg[i].len;
475 475
476 if (next_sg != 0) { 476 if (next_sg != 0) {
477 count = jz4740_dma_read(dmadev, 477 count = jz4740_dma_read(dmadev,
478 JZ_REG_DMA_TRANSFER_COUNT(chan->id)); 478 JZ_REG_DMA_TRANSFER_COUNT(chan->id));
479 residue += count << chan->transfer_shift; 479 residue += count << chan->transfer_shift;
480 } 480 }
481 481
482 return residue; 482 return residue;
483 } 483 }
484 484
485 static enum dma_status jz4740_dma_tx_status(struct dma_chan *c, 485 static enum dma_status jz4740_dma_tx_status(struct dma_chan *c,
486 dma_cookie_t cookie, struct dma_tx_state *state) 486 dma_cookie_t cookie, struct dma_tx_state *state)
487 { 487 {
488 struct jz4740_dmaengine_chan *chan = to_jz4740_dma_chan(c); 488 struct jz4740_dmaengine_chan *chan = to_jz4740_dma_chan(c);
489 struct virt_dma_desc *vdesc; 489 struct virt_dma_desc *vdesc;
490 enum dma_status status; 490 enum dma_status status;
491 unsigned long flags; 491 unsigned long flags;
492 492
493 status = dma_cookie_status(c, cookie, state); 493 status = dma_cookie_status(c, cookie, state);
494 if (status == DMA_SUCCESS || !state) 494 if (status == DMA_COMPLETE || !state)
495 return status; 495 return status;
496 496
497 spin_lock_irqsave(&chan->vchan.lock, flags); 497 spin_lock_irqsave(&chan->vchan.lock, flags);
498 vdesc = vchan_find_desc(&chan->vchan, cookie); 498 vdesc = vchan_find_desc(&chan->vchan, cookie);
499 if (cookie == chan->desc->vdesc.tx.cookie) { 499 if (cookie == chan->desc->vdesc.tx.cookie) {
500 state->residue = jz4740_dma_desc_residue(chan, chan->desc, 500 state->residue = jz4740_dma_desc_residue(chan, chan->desc,
501 chan->next_sg); 501 chan->next_sg);
502 } else if (vdesc) { 502 } else if (vdesc) {
503 state->residue = jz4740_dma_desc_residue(chan, 503 state->residue = jz4740_dma_desc_residue(chan,
504 to_jz4740_dma_desc(vdesc), 0); 504 to_jz4740_dma_desc(vdesc), 0);
505 } else { 505 } else {
506 state->residue = 0; 506 state->residue = 0;
507 } 507 }
508 spin_unlock_irqrestore(&chan->vchan.lock, flags); 508 spin_unlock_irqrestore(&chan->vchan.lock, flags);
509 509
510 return status; 510 return status;
511 } 511 }
512 512
513 static int jz4740_dma_alloc_chan_resources(struct dma_chan *c) 513 static int jz4740_dma_alloc_chan_resources(struct dma_chan *c)
514 { 514 {
515 return 0; 515 return 0;
516 } 516 }
517 517
518 static void jz4740_dma_free_chan_resources(struct dma_chan *c) 518 static void jz4740_dma_free_chan_resources(struct dma_chan *c)
519 { 519 {
520 vchan_free_chan_resources(to_virt_chan(c)); 520 vchan_free_chan_resources(to_virt_chan(c));
521 } 521 }
522 522
523 static void jz4740_dma_desc_free(struct virt_dma_desc *vdesc) 523 static void jz4740_dma_desc_free(struct virt_dma_desc *vdesc)
524 { 524 {
525 kfree(container_of(vdesc, struct jz4740_dma_desc, vdesc)); 525 kfree(container_of(vdesc, struct jz4740_dma_desc, vdesc));
526 } 526 }
527 527
528 static int jz4740_dma_probe(struct platform_device *pdev) 528 static int jz4740_dma_probe(struct platform_device *pdev)
529 { 529 {
530 struct jz4740_dmaengine_chan *chan; 530 struct jz4740_dmaengine_chan *chan;
531 struct jz4740_dma_dev *dmadev; 531 struct jz4740_dma_dev *dmadev;
532 struct dma_device *dd; 532 struct dma_device *dd;
533 unsigned int i; 533 unsigned int i;
534 struct resource *res; 534 struct resource *res;
535 int ret; 535 int ret;
536 int irq; 536 int irq;
537 537
538 dmadev = devm_kzalloc(&pdev->dev, sizeof(*dmadev), GFP_KERNEL); 538 dmadev = devm_kzalloc(&pdev->dev, sizeof(*dmadev), GFP_KERNEL);
539 if (!dmadev) 539 if (!dmadev)
540 return -EINVAL; 540 return -EINVAL;
541 541
542 dd = &dmadev->ddev; 542 dd = &dmadev->ddev;
543 543
544 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 544 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
545 dmadev->base = devm_ioremap_resource(&pdev->dev, res); 545 dmadev->base = devm_ioremap_resource(&pdev->dev, res);
546 if (IS_ERR(dmadev->base)) 546 if (IS_ERR(dmadev->base))
547 return PTR_ERR(dmadev->base); 547 return PTR_ERR(dmadev->base);
548 548
549 dmadev->clk = clk_get(&pdev->dev, "dma"); 549 dmadev->clk = clk_get(&pdev->dev, "dma");
550 if (IS_ERR(dmadev->clk)) 550 if (IS_ERR(dmadev->clk))
551 return PTR_ERR(dmadev->clk); 551 return PTR_ERR(dmadev->clk);
552 552
553 clk_prepare_enable(dmadev->clk); 553 clk_prepare_enable(dmadev->clk);
554 554
555 dma_cap_set(DMA_SLAVE, dd->cap_mask); 555 dma_cap_set(DMA_SLAVE, dd->cap_mask);
556 dma_cap_set(DMA_CYCLIC, dd->cap_mask); 556 dma_cap_set(DMA_CYCLIC, dd->cap_mask);
557 dd->device_alloc_chan_resources = jz4740_dma_alloc_chan_resources; 557 dd->device_alloc_chan_resources = jz4740_dma_alloc_chan_resources;
558 dd->device_free_chan_resources = jz4740_dma_free_chan_resources; 558 dd->device_free_chan_resources = jz4740_dma_free_chan_resources;
559 dd->device_tx_status = jz4740_dma_tx_status; 559 dd->device_tx_status = jz4740_dma_tx_status;
560 dd->device_issue_pending = jz4740_dma_issue_pending; 560 dd->device_issue_pending = jz4740_dma_issue_pending;
561 dd->device_prep_slave_sg = jz4740_dma_prep_slave_sg; 561 dd->device_prep_slave_sg = jz4740_dma_prep_slave_sg;
562 dd->device_prep_dma_cyclic = jz4740_dma_prep_dma_cyclic; 562 dd->device_prep_dma_cyclic = jz4740_dma_prep_dma_cyclic;
563 dd->device_control = jz4740_dma_control; 563 dd->device_control = jz4740_dma_control;
564 dd->dev = &pdev->dev; 564 dd->dev = &pdev->dev;
565 dd->chancnt = JZ_DMA_NR_CHANS; 565 dd->chancnt = JZ_DMA_NR_CHANS;
566 INIT_LIST_HEAD(&dd->channels); 566 INIT_LIST_HEAD(&dd->channels);
567 567
568 for (i = 0; i < dd->chancnt; i++) { 568 for (i = 0; i < dd->chancnt; i++) {
569 chan = &dmadev->chan[i]; 569 chan = &dmadev->chan[i];
570 chan->id = i; 570 chan->id = i;
571 chan->vchan.desc_free = jz4740_dma_desc_free; 571 chan->vchan.desc_free = jz4740_dma_desc_free;
572 vchan_init(&chan->vchan, dd); 572 vchan_init(&chan->vchan, dd);
573 } 573 }
574 574
575 ret = dma_async_device_register(dd); 575 ret = dma_async_device_register(dd);
576 if (ret) 576 if (ret)
577 return ret; 577 return ret;
578 578
579 irq = platform_get_irq(pdev, 0); 579 irq = platform_get_irq(pdev, 0);
580 ret = request_irq(irq, jz4740_dma_irq, 0, dev_name(&pdev->dev), dmadev); 580 ret = request_irq(irq, jz4740_dma_irq, 0, dev_name(&pdev->dev), dmadev);
581 if (ret) 581 if (ret)
582 goto err_unregister; 582 goto err_unregister;
583 583
584 platform_set_drvdata(pdev, dmadev); 584 platform_set_drvdata(pdev, dmadev);
585 585
586 return 0; 586 return 0;
587 587
588 err_unregister: 588 err_unregister:
589 dma_async_device_unregister(dd); 589 dma_async_device_unregister(dd);
590 return ret; 590 return ret;
591 } 591 }
592 592
593 static int jz4740_dma_remove(struct platform_device *pdev) 593 static int jz4740_dma_remove(struct platform_device *pdev)
594 { 594 {
595 struct jz4740_dma_dev *dmadev = platform_get_drvdata(pdev); 595 struct jz4740_dma_dev *dmadev = platform_get_drvdata(pdev);
596 int irq = platform_get_irq(pdev, 0); 596 int irq = platform_get_irq(pdev, 0);
597 597
598 free_irq(irq, dmadev); 598 free_irq(irq, dmadev);
599 dma_async_device_unregister(&dmadev->ddev); 599 dma_async_device_unregister(&dmadev->ddev);
600 clk_disable_unprepare(dmadev->clk); 600 clk_disable_unprepare(dmadev->clk);
601 601
602 return 0; 602 return 0;
603 } 603 }
604 604
605 static struct platform_driver jz4740_dma_driver = { 605 static struct platform_driver jz4740_dma_driver = {
606 .probe = jz4740_dma_probe, 606 .probe = jz4740_dma_probe,
607 .remove = jz4740_dma_remove, 607 .remove = jz4740_dma_remove,
608 .driver = { 608 .driver = {
609 .name = "jz4740-dma", 609 .name = "jz4740-dma",
610 .owner = THIS_MODULE, 610 .owner = THIS_MODULE,
611 }, 611 },
612 }; 612 };
613 module_platform_driver(jz4740_dma_driver); 613 module_platform_driver(jz4740_dma_driver);
614 614
615 MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>"); 615 MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
616 MODULE_DESCRIPTION("JZ4740 DMA driver"); 616 MODULE_DESCRIPTION("JZ4740 DMA driver");
617 MODULE_LICENSE("GPLv2"); 617 MODULE_LICENSE("GPLv2");
618 618
drivers/dma/dmaengine.c
1 /* 1 /*
2 * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved. 2 * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
3 * 3 *
4 * This program is free software; you can redistribute it and/or modify it 4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the Free 5 * under the terms of the GNU General Public License as published by the Free
6 * Software Foundation; either version 2 of the License, or (at your option) 6 * Software Foundation; either version 2 of the License, or (at your option)
7 * any later version. 7 * any later version.
8 * 8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT 9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details. 12 * more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License along with 14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59 15 * this program; if not, write to the Free Software Foundation, Inc., 59
16 * Temple Place - Suite 330, Boston, MA 02111-1307, USA. 16 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 * 17 *
18 * The full GNU General Public License is included in this distribution in the 18 * The full GNU General Public License is included in this distribution in the
19 * file called COPYING. 19 * file called COPYING.
20 */ 20 */
21 21
22 /* 22 /*
23 * This code implements the DMA subsystem. It provides a HW-neutral interface 23 * This code implements the DMA subsystem. It provides a HW-neutral interface
24 * for other kernel code to use asynchronous memory copy capabilities, 24 * for other kernel code to use asynchronous memory copy capabilities,
25 * if present, and allows different HW DMA drivers to register as providing 25 * if present, and allows different HW DMA drivers to register as providing
26 * this capability. 26 * this capability.
27 * 27 *
28 * Due to the fact we are accelerating what is already a relatively fast 28 * Due to the fact we are accelerating what is already a relatively fast
29 * operation, the code goes to great lengths to avoid additional overhead, 29 * operation, the code goes to great lengths to avoid additional overhead,
30 * such as locking. 30 * such as locking.
31 * 31 *
32 * LOCKING: 32 * LOCKING:
33 * 33 *
34 * The subsystem keeps a global list of dma_device structs it is protected by a 34 * The subsystem keeps a global list of dma_device structs it is protected by a
35 * mutex, dma_list_mutex. 35 * mutex, dma_list_mutex.
36 * 36 *
37 * A subsystem can get access to a channel by calling dmaengine_get() followed 37 * A subsystem can get access to a channel by calling dmaengine_get() followed
38 * by dma_find_channel(), or if it has need for an exclusive channel it can call 38 * by dma_find_channel(), or if it has need for an exclusive channel it can call
39 * dma_request_channel(). Once a channel is allocated a reference is taken 39 * dma_request_channel(). Once a channel is allocated a reference is taken
40 * against its corresponding driver to disable removal. 40 * against its corresponding driver to disable removal.
41 * 41 *
42 * Each device has a channels list, which runs unlocked but is never modified 42 * Each device has a channels list, which runs unlocked but is never modified
43 * once the device is registered, it's just setup by the driver. 43 * once the device is registered, it's just setup by the driver.
44 * 44 *
45 * See Documentation/dmaengine.txt for more details 45 * See Documentation/dmaengine.txt for more details
46 */ 46 */
47 47
48 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 48 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
49 49
50 #include <linux/dma-mapping.h> 50 #include <linux/dma-mapping.h>
51 #include <linux/init.h> 51 #include <linux/init.h>
52 #include <linux/module.h> 52 #include <linux/module.h>
53 #include <linux/mm.h> 53 #include <linux/mm.h>
54 #include <linux/device.h> 54 #include <linux/device.h>
55 #include <linux/dmaengine.h> 55 #include <linux/dmaengine.h>
56 #include <linux/hardirq.h> 56 #include <linux/hardirq.h>
57 #include <linux/spinlock.h> 57 #include <linux/spinlock.h>
58 #include <linux/percpu.h> 58 #include <linux/percpu.h>
59 #include <linux/rcupdate.h> 59 #include <linux/rcupdate.h>
60 #include <linux/mutex.h> 60 #include <linux/mutex.h>
61 #include <linux/jiffies.h> 61 #include <linux/jiffies.h>
62 #include <linux/rculist.h> 62 #include <linux/rculist.h>
63 #include <linux/idr.h> 63 #include <linux/idr.h>
64 #include <linux/slab.h> 64 #include <linux/slab.h>
65 #include <linux/acpi.h> 65 #include <linux/acpi.h>
66 #include <linux/acpi_dma.h> 66 #include <linux/acpi_dma.h>
67 #include <linux/of_dma.h> 67 #include <linux/of_dma.h>
68 68
69 static DEFINE_MUTEX(dma_list_mutex); 69 static DEFINE_MUTEX(dma_list_mutex);
70 static DEFINE_IDR(dma_idr); 70 static DEFINE_IDR(dma_idr);
71 static LIST_HEAD(dma_device_list); 71 static LIST_HEAD(dma_device_list);
72 static long dmaengine_ref_count; 72 static long dmaengine_ref_count;
73 73
74 /* --- sysfs implementation --- */ 74 /* --- sysfs implementation --- */
75 75
76 /** 76 /**
77 * dev_to_dma_chan - convert a device pointer to the its sysfs container object 77 * dev_to_dma_chan - convert a device pointer to the its sysfs container object
78 * @dev - device node 78 * @dev - device node
79 * 79 *
80 * Must be called under dma_list_mutex 80 * Must be called under dma_list_mutex
81 */ 81 */
82 static struct dma_chan *dev_to_dma_chan(struct device *dev) 82 static struct dma_chan *dev_to_dma_chan(struct device *dev)
83 { 83 {
84 struct dma_chan_dev *chan_dev; 84 struct dma_chan_dev *chan_dev;
85 85
86 chan_dev = container_of(dev, typeof(*chan_dev), device); 86 chan_dev = container_of(dev, typeof(*chan_dev), device);
87 return chan_dev->chan; 87 return chan_dev->chan;
88 } 88 }
89 89
90 static ssize_t memcpy_count_show(struct device *dev, 90 static ssize_t memcpy_count_show(struct device *dev,
91 struct device_attribute *attr, char *buf) 91 struct device_attribute *attr, char *buf)
92 { 92 {
93 struct dma_chan *chan; 93 struct dma_chan *chan;
94 unsigned long count = 0; 94 unsigned long count = 0;
95 int i; 95 int i;
96 int err; 96 int err;
97 97
98 mutex_lock(&dma_list_mutex); 98 mutex_lock(&dma_list_mutex);
99 chan = dev_to_dma_chan(dev); 99 chan = dev_to_dma_chan(dev);
100 if (chan) { 100 if (chan) {
101 for_each_possible_cpu(i) 101 for_each_possible_cpu(i)
102 count += per_cpu_ptr(chan->local, i)->memcpy_count; 102 count += per_cpu_ptr(chan->local, i)->memcpy_count;
103 err = sprintf(buf, "%lu\n", count); 103 err = sprintf(buf, "%lu\n", count);
104 } else 104 } else
105 err = -ENODEV; 105 err = -ENODEV;
106 mutex_unlock(&dma_list_mutex); 106 mutex_unlock(&dma_list_mutex);
107 107
108 return err; 108 return err;
109 } 109 }
110 static DEVICE_ATTR_RO(memcpy_count); 110 static DEVICE_ATTR_RO(memcpy_count);
111 111
112 static ssize_t bytes_transferred_show(struct device *dev, 112 static ssize_t bytes_transferred_show(struct device *dev,
113 struct device_attribute *attr, char *buf) 113 struct device_attribute *attr, char *buf)
114 { 114 {
115 struct dma_chan *chan; 115 struct dma_chan *chan;
116 unsigned long count = 0; 116 unsigned long count = 0;
117 int i; 117 int i;
118 int err; 118 int err;
119 119
120 mutex_lock(&dma_list_mutex); 120 mutex_lock(&dma_list_mutex);
121 chan = dev_to_dma_chan(dev); 121 chan = dev_to_dma_chan(dev);
122 if (chan) { 122 if (chan) {
123 for_each_possible_cpu(i) 123 for_each_possible_cpu(i)
124 count += per_cpu_ptr(chan->local, i)->bytes_transferred; 124 count += per_cpu_ptr(chan->local, i)->bytes_transferred;
125 err = sprintf(buf, "%lu\n", count); 125 err = sprintf(buf, "%lu\n", count);
126 } else 126 } else
127 err = -ENODEV; 127 err = -ENODEV;
128 mutex_unlock(&dma_list_mutex); 128 mutex_unlock(&dma_list_mutex);
129 129
130 return err; 130 return err;
131 } 131 }
132 static DEVICE_ATTR_RO(bytes_transferred); 132 static DEVICE_ATTR_RO(bytes_transferred);
133 133
134 static ssize_t in_use_show(struct device *dev, struct device_attribute *attr, 134 static ssize_t in_use_show(struct device *dev, struct device_attribute *attr,
135 char *buf) 135 char *buf)
136 { 136 {
137 struct dma_chan *chan; 137 struct dma_chan *chan;
138 int err; 138 int err;
139 139
140 mutex_lock(&dma_list_mutex); 140 mutex_lock(&dma_list_mutex);
141 chan = dev_to_dma_chan(dev); 141 chan = dev_to_dma_chan(dev);
142 if (chan) 142 if (chan)
143 err = sprintf(buf, "%d\n", chan->client_count); 143 err = sprintf(buf, "%d\n", chan->client_count);
144 else 144 else
145 err = -ENODEV; 145 err = -ENODEV;
146 mutex_unlock(&dma_list_mutex); 146 mutex_unlock(&dma_list_mutex);
147 147
148 return err; 148 return err;
149 } 149 }
150 static DEVICE_ATTR_RO(in_use); 150 static DEVICE_ATTR_RO(in_use);
151 151
152 static struct attribute *dma_dev_attrs[] = { 152 static struct attribute *dma_dev_attrs[] = {
153 &dev_attr_memcpy_count.attr, 153 &dev_attr_memcpy_count.attr,
154 &dev_attr_bytes_transferred.attr, 154 &dev_attr_bytes_transferred.attr,
155 &dev_attr_in_use.attr, 155 &dev_attr_in_use.attr,
156 NULL, 156 NULL,
157 }; 157 };
158 ATTRIBUTE_GROUPS(dma_dev); 158 ATTRIBUTE_GROUPS(dma_dev);
159 159
160 static void chan_dev_release(struct device *dev) 160 static void chan_dev_release(struct device *dev)
161 { 161 {
162 struct dma_chan_dev *chan_dev; 162 struct dma_chan_dev *chan_dev;
163 163
164 chan_dev = container_of(dev, typeof(*chan_dev), device); 164 chan_dev = container_of(dev, typeof(*chan_dev), device);
165 if (atomic_dec_and_test(chan_dev->idr_ref)) { 165 if (atomic_dec_and_test(chan_dev->idr_ref)) {
166 mutex_lock(&dma_list_mutex); 166 mutex_lock(&dma_list_mutex);
167 idr_remove(&dma_idr, chan_dev->dev_id); 167 idr_remove(&dma_idr, chan_dev->dev_id);
168 mutex_unlock(&dma_list_mutex); 168 mutex_unlock(&dma_list_mutex);
169 kfree(chan_dev->idr_ref); 169 kfree(chan_dev->idr_ref);
170 } 170 }
171 kfree(chan_dev); 171 kfree(chan_dev);
172 } 172 }
173 173
174 static struct class dma_devclass = { 174 static struct class dma_devclass = {
175 .name = "dma", 175 .name = "dma",
176 .dev_groups = dma_dev_groups, 176 .dev_groups = dma_dev_groups,
177 .dev_release = chan_dev_release, 177 .dev_release = chan_dev_release,
178 }; 178 };
179 179
180 /* --- client and device registration --- */ 180 /* --- client and device registration --- */
181 181
182 #define dma_device_satisfies_mask(device, mask) \ 182 #define dma_device_satisfies_mask(device, mask) \
183 __dma_device_satisfies_mask((device), &(mask)) 183 __dma_device_satisfies_mask((device), &(mask))
184 static int 184 static int
185 __dma_device_satisfies_mask(struct dma_device *device, 185 __dma_device_satisfies_mask(struct dma_device *device,
186 const dma_cap_mask_t *want) 186 const dma_cap_mask_t *want)
187 { 187 {
188 dma_cap_mask_t has; 188 dma_cap_mask_t has;
189 189
190 bitmap_and(has.bits, want->bits, device->cap_mask.bits, 190 bitmap_and(has.bits, want->bits, device->cap_mask.bits,
191 DMA_TX_TYPE_END); 191 DMA_TX_TYPE_END);
192 return bitmap_equal(want->bits, has.bits, DMA_TX_TYPE_END); 192 return bitmap_equal(want->bits, has.bits, DMA_TX_TYPE_END);
193 } 193 }
194 194
195 static struct module *dma_chan_to_owner(struct dma_chan *chan) 195 static struct module *dma_chan_to_owner(struct dma_chan *chan)
196 { 196 {
197 return chan->device->dev->driver->owner; 197 return chan->device->dev->driver->owner;
198 } 198 }
199 199
200 /** 200 /**
201 * balance_ref_count - catch up the channel reference count 201 * balance_ref_count - catch up the channel reference count
202 * @chan - channel to balance ->client_count versus dmaengine_ref_count 202 * @chan - channel to balance ->client_count versus dmaengine_ref_count
203 * 203 *
204 * balance_ref_count must be called under dma_list_mutex 204 * balance_ref_count must be called under dma_list_mutex
205 */ 205 */
206 static void balance_ref_count(struct dma_chan *chan) 206 static void balance_ref_count(struct dma_chan *chan)
207 { 207 {
208 struct module *owner = dma_chan_to_owner(chan); 208 struct module *owner = dma_chan_to_owner(chan);
209 209
210 while (chan->client_count < dmaengine_ref_count) { 210 while (chan->client_count < dmaengine_ref_count) {
211 __module_get(owner); 211 __module_get(owner);
212 chan->client_count++; 212 chan->client_count++;
213 } 213 }
214 } 214 }
215 215
216 /** 216 /**
217 * dma_chan_get - try to grab a dma channel's parent driver module 217 * dma_chan_get - try to grab a dma channel's parent driver module
218 * @chan - channel to grab 218 * @chan - channel to grab
219 * 219 *
220 * Must be called under dma_list_mutex 220 * Must be called under dma_list_mutex
221 */ 221 */
222 static int dma_chan_get(struct dma_chan *chan) 222 static int dma_chan_get(struct dma_chan *chan)
223 { 223 {
224 int err = -ENODEV; 224 int err = -ENODEV;
225 struct module *owner = dma_chan_to_owner(chan); 225 struct module *owner = dma_chan_to_owner(chan);
226 226
227 if (chan->client_count) { 227 if (chan->client_count) {
228 __module_get(owner); 228 __module_get(owner);
229 err = 0; 229 err = 0;
230 } else if (try_module_get(owner)) 230 } else if (try_module_get(owner))
231 err = 0; 231 err = 0;
232 232
233 if (err == 0) 233 if (err == 0)
234 chan->client_count++; 234 chan->client_count++;
235 235
236 /* allocate upon first client reference */ 236 /* allocate upon first client reference */
237 if (chan->client_count == 1 && err == 0) { 237 if (chan->client_count == 1 && err == 0) {
238 int desc_cnt = chan->device->device_alloc_chan_resources(chan); 238 int desc_cnt = chan->device->device_alloc_chan_resources(chan);
239 239
240 if (desc_cnt < 0) { 240 if (desc_cnt < 0) {
241 err = desc_cnt; 241 err = desc_cnt;
242 chan->client_count = 0; 242 chan->client_count = 0;
243 module_put(owner); 243 module_put(owner);
244 } else if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask)) 244 } else if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask))
245 balance_ref_count(chan); 245 balance_ref_count(chan);
246 } 246 }
247 247
248 return err; 248 return err;
249 } 249 }
250 250
251 /** 251 /**
252 * dma_chan_put - drop a reference to a dma channel's parent driver module 252 * dma_chan_put - drop a reference to a dma channel's parent driver module
253 * @chan - channel to release 253 * @chan - channel to release
254 * 254 *
255 * Must be called under dma_list_mutex 255 * Must be called under dma_list_mutex
256 */ 256 */
257 static void dma_chan_put(struct dma_chan *chan) 257 static void dma_chan_put(struct dma_chan *chan)
258 { 258 {
259 if (!chan->client_count) 259 if (!chan->client_count)
260 return; /* this channel failed alloc_chan_resources */ 260 return; /* this channel failed alloc_chan_resources */
261 chan->client_count--; 261 chan->client_count--;
262 module_put(dma_chan_to_owner(chan)); 262 module_put(dma_chan_to_owner(chan));
263 if (chan->client_count == 0) 263 if (chan->client_count == 0)
264 chan->device->device_free_chan_resources(chan); 264 chan->device->device_free_chan_resources(chan);
265 } 265 }
266 266
267 enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie) 267 enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
268 { 268 {
269 enum dma_status status; 269 enum dma_status status;
270 unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000); 270 unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
271 271
272 dma_async_issue_pending(chan); 272 dma_async_issue_pending(chan);
273 do { 273 do {
274 status = dma_async_is_tx_complete(chan, cookie, NULL, NULL); 274 status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
275 if (time_after_eq(jiffies, dma_sync_wait_timeout)) { 275 if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
276 pr_err("%s: timeout!\n", __func__); 276 pr_err("%s: timeout!\n", __func__);
277 return DMA_ERROR; 277 return DMA_ERROR;
278 } 278 }
279 if (status != DMA_IN_PROGRESS) 279 if (status != DMA_IN_PROGRESS)
280 break; 280 break;
281 cpu_relax(); 281 cpu_relax();
282 } while (1); 282 } while (1);
283 283
284 return status; 284 return status;
285 } 285 }
286 EXPORT_SYMBOL(dma_sync_wait); 286 EXPORT_SYMBOL(dma_sync_wait);
287 287
288 /** 288 /**
289 * dma_cap_mask_all - enable iteration over all operation types 289 * dma_cap_mask_all - enable iteration over all operation types
290 */ 290 */
291 static dma_cap_mask_t dma_cap_mask_all; 291 static dma_cap_mask_t dma_cap_mask_all;
292 292
293 /** 293 /**
294 * dma_chan_tbl_ent - tracks channel allocations per core/operation 294 * dma_chan_tbl_ent - tracks channel allocations per core/operation
295 * @chan - associated channel for this entry 295 * @chan - associated channel for this entry
296 */ 296 */
297 struct dma_chan_tbl_ent { 297 struct dma_chan_tbl_ent {
298 struct dma_chan *chan; 298 struct dma_chan *chan;
299 }; 299 };
300 300
301 /** 301 /**
302 * channel_table - percpu lookup table for memory-to-memory offload providers 302 * channel_table - percpu lookup table for memory-to-memory offload providers
303 */ 303 */
304 static struct dma_chan_tbl_ent __percpu *channel_table[DMA_TX_TYPE_END]; 304 static struct dma_chan_tbl_ent __percpu *channel_table[DMA_TX_TYPE_END];
305 305
306 static int __init dma_channel_table_init(void) 306 static int __init dma_channel_table_init(void)
307 { 307 {
308 enum dma_transaction_type cap; 308 enum dma_transaction_type cap;
309 int err = 0; 309 int err = 0;
310 310
311 bitmap_fill(dma_cap_mask_all.bits, DMA_TX_TYPE_END); 311 bitmap_fill(dma_cap_mask_all.bits, DMA_TX_TYPE_END);
312 312
313 /* 'interrupt', 'private', and 'slave' are channel capabilities, 313 /* 'interrupt', 'private', and 'slave' are channel capabilities,
314 * but are not associated with an operation so they do not need 314 * but are not associated with an operation so they do not need
315 * an entry in the channel_table 315 * an entry in the channel_table
316 */ 316 */
317 clear_bit(DMA_INTERRUPT, dma_cap_mask_all.bits); 317 clear_bit(DMA_INTERRUPT, dma_cap_mask_all.bits);
318 clear_bit(DMA_PRIVATE, dma_cap_mask_all.bits); 318 clear_bit(DMA_PRIVATE, dma_cap_mask_all.bits);
319 clear_bit(DMA_SLAVE, dma_cap_mask_all.bits); 319 clear_bit(DMA_SLAVE, dma_cap_mask_all.bits);
320 320
321 for_each_dma_cap_mask(cap, dma_cap_mask_all) { 321 for_each_dma_cap_mask(cap, dma_cap_mask_all) {
322 channel_table[cap] = alloc_percpu(struct dma_chan_tbl_ent); 322 channel_table[cap] = alloc_percpu(struct dma_chan_tbl_ent);
323 if (!channel_table[cap]) { 323 if (!channel_table[cap]) {
324 err = -ENOMEM; 324 err = -ENOMEM;
325 break; 325 break;
326 } 326 }
327 } 327 }
328 328
329 if (err) { 329 if (err) {
330 pr_err("initialization failure\n"); 330 pr_err("initialization failure\n");
331 for_each_dma_cap_mask(cap, dma_cap_mask_all) 331 for_each_dma_cap_mask(cap, dma_cap_mask_all)
332 if (channel_table[cap]) 332 if (channel_table[cap])
333 free_percpu(channel_table[cap]); 333 free_percpu(channel_table[cap]);
334 } 334 }
335 335
336 return err; 336 return err;
337 } 337 }
338 arch_initcall(dma_channel_table_init); 338 arch_initcall(dma_channel_table_init);
339 339
340 /** 340 /**
341 * dma_find_channel - find a channel to carry out the operation 341 * dma_find_channel - find a channel to carry out the operation
342 * @tx_type: transaction type 342 * @tx_type: transaction type
343 */ 343 */
344 struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type) 344 struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
345 { 345 {
346 return this_cpu_read(channel_table[tx_type]->chan); 346 return this_cpu_read(channel_table[tx_type]->chan);
347 } 347 }
348 EXPORT_SYMBOL(dma_find_channel); 348 EXPORT_SYMBOL(dma_find_channel);
349 349
350 /* 350 /*
351 * net_dma_find_channel - find a channel for net_dma 351 * net_dma_find_channel - find a channel for net_dma
352 * net_dma has alignment requirements 352 * net_dma has alignment requirements
353 */ 353 */
354 struct dma_chan *net_dma_find_channel(void) 354 struct dma_chan *net_dma_find_channel(void)
355 { 355 {
356 struct dma_chan *chan = dma_find_channel(DMA_MEMCPY); 356 struct dma_chan *chan = dma_find_channel(DMA_MEMCPY);
357 if (chan && !is_dma_copy_aligned(chan->device, 1, 1, 1)) 357 if (chan && !is_dma_copy_aligned(chan->device, 1, 1, 1))
358 return NULL; 358 return NULL;
359 359
360 return chan; 360 return chan;
361 } 361 }
362 EXPORT_SYMBOL(net_dma_find_channel); 362 EXPORT_SYMBOL(net_dma_find_channel);
363 363
364 /** 364 /**
365 * dma_issue_pending_all - flush all pending operations across all channels 365 * dma_issue_pending_all - flush all pending operations across all channels
366 */ 366 */
367 void dma_issue_pending_all(void) 367 void dma_issue_pending_all(void)
368 { 368 {
369 struct dma_device *device; 369 struct dma_device *device;
370 struct dma_chan *chan; 370 struct dma_chan *chan;
371 371
372 rcu_read_lock(); 372 rcu_read_lock();
373 list_for_each_entry_rcu(device, &dma_device_list, global_node) { 373 list_for_each_entry_rcu(device, &dma_device_list, global_node) {
374 if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) 374 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
375 continue; 375 continue;
376 list_for_each_entry(chan, &device->channels, device_node) 376 list_for_each_entry(chan, &device->channels, device_node)
377 if (chan->client_count) 377 if (chan->client_count)
378 device->device_issue_pending(chan); 378 device->device_issue_pending(chan);
379 } 379 }
380 rcu_read_unlock(); 380 rcu_read_unlock();
381 } 381 }
382 EXPORT_SYMBOL(dma_issue_pending_all); 382 EXPORT_SYMBOL(dma_issue_pending_all);
383 383
384 /** 384 /**
385 * dma_chan_is_local - returns true if the channel is in the same numa-node as the cpu 385 * dma_chan_is_local - returns true if the channel is in the same numa-node as the cpu
386 */ 386 */
387 static bool dma_chan_is_local(struct dma_chan *chan, int cpu) 387 static bool dma_chan_is_local(struct dma_chan *chan, int cpu)
388 { 388 {
389 int node = dev_to_node(chan->device->dev); 389 int node = dev_to_node(chan->device->dev);
390 return node == -1 || cpumask_test_cpu(cpu, cpumask_of_node(node)); 390 return node == -1 || cpumask_test_cpu(cpu, cpumask_of_node(node));
391 } 391 }
392 392
393 /** 393 /**
394 * min_chan - returns the channel with min count and in the same numa-node as the cpu 394 * min_chan - returns the channel with min count and in the same numa-node as the cpu
395 * @cap: capability to match 395 * @cap: capability to match
396 * @cpu: cpu index which the channel should be close to 396 * @cpu: cpu index which the channel should be close to
397 * 397 *
398 * If some channels are close to the given cpu, the one with the lowest 398 * If some channels are close to the given cpu, the one with the lowest
399 * reference count is returned. Otherwise, cpu is ignored and only the 399 * reference count is returned. Otherwise, cpu is ignored and only the
400 * reference count is taken into account. 400 * reference count is taken into account.
401 * Must be called under dma_list_mutex. 401 * Must be called under dma_list_mutex.
402 */ 402 */
403 static struct dma_chan *min_chan(enum dma_transaction_type cap, int cpu) 403 static struct dma_chan *min_chan(enum dma_transaction_type cap, int cpu)
404 { 404 {
405 struct dma_device *device; 405 struct dma_device *device;
406 struct dma_chan *chan; 406 struct dma_chan *chan;
407 struct dma_chan *min = NULL; 407 struct dma_chan *min = NULL;
408 struct dma_chan *localmin = NULL; 408 struct dma_chan *localmin = NULL;
409 409
410 list_for_each_entry(device, &dma_device_list, global_node) { 410 list_for_each_entry(device, &dma_device_list, global_node) {
411 if (!dma_has_cap(cap, device->cap_mask) || 411 if (!dma_has_cap(cap, device->cap_mask) ||
412 dma_has_cap(DMA_PRIVATE, device->cap_mask)) 412 dma_has_cap(DMA_PRIVATE, device->cap_mask))
413 continue; 413 continue;
414 list_for_each_entry(chan, &device->channels, device_node) { 414 list_for_each_entry(chan, &device->channels, device_node) {
415 if (!chan->client_count) 415 if (!chan->client_count)
416 continue; 416 continue;
417 if (!min || chan->table_count < min->table_count) 417 if (!min || chan->table_count < min->table_count)
418 min = chan; 418 min = chan;
419 419
420 if (dma_chan_is_local(chan, cpu)) 420 if (dma_chan_is_local(chan, cpu))
421 if (!localmin || 421 if (!localmin ||
422 chan->table_count < localmin->table_count) 422 chan->table_count < localmin->table_count)
423 localmin = chan; 423 localmin = chan;
424 } 424 }
425 } 425 }
426 426
427 chan = localmin ? localmin : min; 427 chan = localmin ? localmin : min;
428 428
429 if (chan) 429 if (chan)
430 chan->table_count++; 430 chan->table_count++;
431 431
432 return chan; 432 return chan;
433 } 433 }
434 434
435 /** 435 /**
436 * dma_channel_rebalance - redistribute the available channels 436 * dma_channel_rebalance - redistribute the available channels
437 * 437 *
438 * Optimize for cpu isolation (each cpu gets a dedicated channel for an 438 * Optimize for cpu isolation (each cpu gets a dedicated channel for an
439 * operation type) in the SMP case, and operation isolation (avoid 439 * operation type) in the SMP case, and operation isolation (avoid
440 * multi-tasking channels) in the non-SMP case. Must be called under 440 * multi-tasking channels) in the non-SMP case. Must be called under
441 * dma_list_mutex. 441 * dma_list_mutex.
442 */ 442 */
443 static void dma_channel_rebalance(void) 443 static void dma_channel_rebalance(void)
444 { 444 {
445 struct dma_chan *chan; 445 struct dma_chan *chan;
446 struct dma_device *device; 446 struct dma_device *device;
447 int cpu; 447 int cpu;
448 int cap; 448 int cap;
449 449
450 /* undo the last distribution */ 450 /* undo the last distribution */
451 for_each_dma_cap_mask(cap, dma_cap_mask_all) 451 for_each_dma_cap_mask(cap, dma_cap_mask_all)
452 for_each_possible_cpu(cpu) 452 for_each_possible_cpu(cpu)
453 per_cpu_ptr(channel_table[cap], cpu)->chan = NULL; 453 per_cpu_ptr(channel_table[cap], cpu)->chan = NULL;
454 454
455 list_for_each_entry(device, &dma_device_list, global_node) { 455 list_for_each_entry(device, &dma_device_list, global_node) {
456 if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) 456 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
457 continue; 457 continue;
458 list_for_each_entry(chan, &device->channels, device_node) 458 list_for_each_entry(chan, &device->channels, device_node)
459 chan->table_count = 0; 459 chan->table_count = 0;
460 } 460 }
461 461
462 /* don't populate the channel_table if no clients are available */ 462 /* don't populate the channel_table if no clients are available */
463 if (!dmaengine_ref_count) 463 if (!dmaengine_ref_count)
464 return; 464 return;
465 465
466 /* redistribute available channels */ 466 /* redistribute available channels */
467 for_each_dma_cap_mask(cap, dma_cap_mask_all) 467 for_each_dma_cap_mask(cap, dma_cap_mask_all)
468 for_each_online_cpu(cpu) { 468 for_each_online_cpu(cpu) {
469 chan = min_chan(cap, cpu); 469 chan = min_chan(cap, cpu);
470 per_cpu_ptr(channel_table[cap], cpu)->chan = chan; 470 per_cpu_ptr(channel_table[cap], cpu)->chan = chan;
471 } 471 }
472 } 472 }
473 473
474 static struct dma_chan *private_candidate(const dma_cap_mask_t *mask, 474 static struct dma_chan *private_candidate(const dma_cap_mask_t *mask,
475 struct dma_device *dev, 475 struct dma_device *dev,
476 dma_filter_fn fn, void *fn_param) 476 dma_filter_fn fn, void *fn_param)
477 { 477 {
478 struct dma_chan *chan; 478 struct dma_chan *chan;
479 479
480 if (!__dma_device_satisfies_mask(dev, mask)) { 480 if (!__dma_device_satisfies_mask(dev, mask)) {
481 pr_debug("%s: wrong capabilities\n", __func__); 481 pr_debug("%s: wrong capabilities\n", __func__);
482 return NULL; 482 return NULL;
483 } 483 }
484 /* devices with multiple channels need special handling as we need to 484 /* devices with multiple channels need special handling as we need to
485 * ensure that all channels are either private or public. 485 * ensure that all channels are either private or public.
486 */ 486 */
487 if (dev->chancnt > 1 && !dma_has_cap(DMA_PRIVATE, dev->cap_mask)) 487 if (dev->chancnt > 1 && !dma_has_cap(DMA_PRIVATE, dev->cap_mask))
488 list_for_each_entry(chan, &dev->channels, device_node) { 488 list_for_each_entry(chan, &dev->channels, device_node) {
489 /* some channels are already publicly allocated */ 489 /* some channels are already publicly allocated */
490 if (chan->client_count) 490 if (chan->client_count)
491 return NULL; 491 return NULL;
492 } 492 }
493 493
494 list_for_each_entry(chan, &dev->channels, device_node) { 494 list_for_each_entry(chan, &dev->channels, device_node) {
495 if (chan->client_count) { 495 if (chan->client_count) {
496 pr_debug("%s: %s busy\n", 496 pr_debug("%s: %s busy\n",
497 __func__, dma_chan_name(chan)); 497 __func__, dma_chan_name(chan));
498 continue; 498 continue;
499 } 499 }
500 if (fn && !fn(chan, fn_param)) { 500 if (fn && !fn(chan, fn_param)) {
501 pr_debug("%s: %s filter said false\n", 501 pr_debug("%s: %s filter said false\n",
502 __func__, dma_chan_name(chan)); 502 __func__, dma_chan_name(chan));
503 continue; 503 continue;
504 } 504 }
505 return chan; 505 return chan;
506 } 506 }
507 507
508 return NULL; 508 return NULL;
509 } 509 }
510 510
511 /** 511 /**
512 * dma_request_slave_channel - try to get specific channel exclusively 512 * dma_request_slave_channel - try to get specific channel exclusively
513 * @chan: target channel 513 * @chan: target channel
514 */ 514 */
515 struct dma_chan *dma_get_slave_channel(struct dma_chan *chan) 515 struct dma_chan *dma_get_slave_channel(struct dma_chan *chan)
516 { 516 {
517 int err = -EBUSY; 517 int err = -EBUSY;
518 518
519 /* lock against __dma_request_channel */ 519 /* lock against __dma_request_channel */
520 mutex_lock(&dma_list_mutex); 520 mutex_lock(&dma_list_mutex);
521 521
522 if (chan->client_count == 0) { 522 if (chan->client_count == 0) {
523 err = dma_chan_get(chan); 523 err = dma_chan_get(chan);
524 if (err) 524 if (err)
525 pr_debug("%s: failed to get %s: (%d)\n", 525 pr_debug("%s: failed to get %s: (%d)\n",
526 __func__, dma_chan_name(chan), err); 526 __func__, dma_chan_name(chan), err);
527 } else 527 } else
528 chan = NULL; 528 chan = NULL;
529 529
530 mutex_unlock(&dma_list_mutex); 530 mutex_unlock(&dma_list_mutex);
531 531
532 532
533 return chan; 533 return chan;
534 } 534 }
535 EXPORT_SYMBOL_GPL(dma_get_slave_channel); 535 EXPORT_SYMBOL_GPL(dma_get_slave_channel);
536 536
537 /** 537 /**
538 * __dma_request_channel - try to allocate an exclusive channel 538 * __dma_request_channel - try to allocate an exclusive channel
539 * @mask: capabilities that the channel must satisfy 539 * @mask: capabilities that the channel must satisfy
540 * @fn: optional callback to disposition available channels 540 * @fn: optional callback to disposition available channels
541 * @fn_param: opaque parameter to pass to dma_filter_fn 541 * @fn_param: opaque parameter to pass to dma_filter_fn
542 */ 542 */
543 struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask, 543 struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
544 dma_filter_fn fn, void *fn_param) 544 dma_filter_fn fn, void *fn_param)
545 { 545 {
546 struct dma_device *device, *_d; 546 struct dma_device *device, *_d;
547 struct dma_chan *chan = NULL; 547 struct dma_chan *chan = NULL;
548 int err; 548 int err;
549 549
550 /* Find a channel */ 550 /* Find a channel */
551 mutex_lock(&dma_list_mutex); 551 mutex_lock(&dma_list_mutex);
552 list_for_each_entry_safe(device, _d, &dma_device_list, global_node) { 552 list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
553 chan = private_candidate(mask, device, fn, fn_param); 553 chan = private_candidate(mask, device, fn, fn_param);
554 if (chan) { 554 if (chan) {
555 /* Found a suitable channel, try to grab, prep, and 555 /* Found a suitable channel, try to grab, prep, and
556 * return it. We first set DMA_PRIVATE to disable 556 * return it. We first set DMA_PRIVATE to disable
557 * balance_ref_count as this channel will not be 557 * balance_ref_count as this channel will not be
558 * published in the general-purpose allocator 558 * published in the general-purpose allocator
559 */ 559 */
560 dma_cap_set(DMA_PRIVATE, device->cap_mask); 560 dma_cap_set(DMA_PRIVATE, device->cap_mask);
561 device->privatecnt++; 561 device->privatecnt++;
562 err = dma_chan_get(chan); 562 err = dma_chan_get(chan);
563 563
564 if (err == -ENODEV) { 564 if (err == -ENODEV) {
565 pr_debug("%s: %s module removed\n", 565 pr_debug("%s: %s module removed\n",
566 __func__, dma_chan_name(chan)); 566 __func__, dma_chan_name(chan));
567 list_del_rcu(&device->global_node); 567 list_del_rcu(&device->global_node);
568 } else if (err) 568 } else if (err)
569 pr_debug("%s: failed to get %s: (%d)\n", 569 pr_debug("%s: failed to get %s: (%d)\n",
570 __func__, dma_chan_name(chan), err); 570 __func__, dma_chan_name(chan), err);
571 else 571 else
572 break; 572 break;
573 if (--device->privatecnt == 0) 573 if (--device->privatecnt == 0)
574 dma_cap_clear(DMA_PRIVATE, device->cap_mask); 574 dma_cap_clear(DMA_PRIVATE, device->cap_mask);
575 chan = NULL; 575 chan = NULL;
576 } 576 }
577 } 577 }
578 mutex_unlock(&dma_list_mutex); 578 mutex_unlock(&dma_list_mutex);
579 579
580 pr_debug("%s: %s (%s)\n", 580 pr_debug("%s: %s (%s)\n",
581 __func__, 581 __func__,
582 chan ? "success" : "fail", 582 chan ? "success" : "fail",
583 chan ? dma_chan_name(chan) : NULL); 583 chan ? dma_chan_name(chan) : NULL);
584 584
585 return chan; 585 return chan;
586 } 586 }
587 EXPORT_SYMBOL_GPL(__dma_request_channel); 587 EXPORT_SYMBOL_GPL(__dma_request_channel);
588 588
589 /** 589 /**
590 * dma_request_slave_channel - try to allocate an exclusive slave channel 590 * dma_request_slave_channel - try to allocate an exclusive slave channel
591 * @dev: pointer to client device structure 591 * @dev: pointer to client device structure
592 * @name: slave channel name 592 * @name: slave channel name
593 */ 593 */
594 struct dma_chan *dma_request_slave_channel(struct device *dev, const char *name) 594 struct dma_chan *dma_request_slave_channel(struct device *dev, const char *name)
595 { 595 {
596 /* If device-tree is present get slave info from here */ 596 /* If device-tree is present get slave info from here */
597 if (dev->of_node) 597 if (dev->of_node)
598 return of_dma_request_slave_channel(dev->of_node, name); 598 return of_dma_request_slave_channel(dev->of_node, name);
599 599
600 /* If device was enumerated by ACPI get slave info from here */ 600 /* If device was enumerated by ACPI get slave info from here */
601 if (ACPI_HANDLE(dev)) 601 if (ACPI_HANDLE(dev))
602 return acpi_dma_request_slave_chan_by_name(dev, name); 602 return acpi_dma_request_slave_chan_by_name(dev, name);
603 603
604 return NULL; 604 return NULL;
605 } 605 }
606 EXPORT_SYMBOL_GPL(dma_request_slave_channel); 606 EXPORT_SYMBOL_GPL(dma_request_slave_channel);
607 607
608 void dma_release_channel(struct dma_chan *chan) 608 void dma_release_channel(struct dma_chan *chan)
609 { 609 {
610 mutex_lock(&dma_list_mutex); 610 mutex_lock(&dma_list_mutex);
611 WARN_ONCE(chan->client_count != 1, 611 WARN_ONCE(chan->client_count != 1,
612 "chan reference count %d != 1\n", chan->client_count); 612 "chan reference count %d != 1\n", chan->client_count);
613 dma_chan_put(chan); 613 dma_chan_put(chan);
614 /* drop PRIVATE cap enabled by __dma_request_channel() */ 614 /* drop PRIVATE cap enabled by __dma_request_channel() */
615 if (--chan->device->privatecnt == 0) 615 if (--chan->device->privatecnt == 0)
616 dma_cap_clear(DMA_PRIVATE, chan->device->cap_mask); 616 dma_cap_clear(DMA_PRIVATE, chan->device->cap_mask);
617 mutex_unlock(&dma_list_mutex); 617 mutex_unlock(&dma_list_mutex);
618 } 618 }
619 EXPORT_SYMBOL_GPL(dma_release_channel); 619 EXPORT_SYMBOL_GPL(dma_release_channel);
620 620
621 /** 621 /**
622 * dmaengine_get - register interest in dma_channels 622 * dmaengine_get - register interest in dma_channels
623 */ 623 */
624 void dmaengine_get(void) 624 void dmaengine_get(void)
625 { 625 {
626 struct dma_device *device, *_d; 626 struct dma_device *device, *_d;
627 struct dma_chan *chan; 627 struct dma_chan *chan;
628 int err; 628 int err;
629 629
630 mutex_lock(&dma_list_mutex); 630 mutex_lock(&dma_list_mutex);
631 dmaengine_ref_count++; 631 dmaengine_ref_count++;
632 632
633 /* try to grab channels */ 633 /* try to grab channels */
634 list_for_each_entry_safe(device, _d, &dma_device_list, global_node) { 634 list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
635 if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) 635 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
636 continue; 636 continue;
637 list_for_each_entry(chan, &device->channels, device_node) { 637 list_for_each_entry(chan, &device->channels, device_node) {
638 err = dma_chan_get(chan); 638 err = dma_chan_get(chan);
639 if (err == -ENODEV) { 639 if (err == -ENODEV) {
640 /* module removed before we could use it */ 640 /* module removed before we could use it */
641 list_del_rcu(&device->global_node); 641 list_del_rcu(&device->global_node);
642 break; 642 break;
643 } else if (err) 643 } else if (err)
644 pr_debug("%s: failed to get %s: (%d)\n", 644 pr_debug("%s: failed to get %s: (%d)\n",
645 __func__, dma_chan_name(chan), err); 645 __func__, dma_chan_name(chan), err);
646 } 646 }
647 } 647 }
648 648
649 /* if this is the first reference and there were channels 649 /* if this is the first reference and there were channels
650 * waiting we need to rebalance to get those channels 650 * waiting we need to rebalance to get those channels
651 * incorporated into the channel table 651 * incorporated into the channel table
652 */ 652 */
653 if (dmaengine_ref_count == 1) 653 if (dmaengine_ref_count == 1)
654 dma_channel_rebalance(); 654 dma_channel_rebalance();
655 mutex_unlock(&dma_list_mutex); 655 mutex_unlock(&dma_list_mutex);
656 } 656 }
657 EXPORT_SYMBOL(dmaengine_get); 657 EXPORT_SYMBOL(dmaengine_get);
658 658
659 /** 659 /**
660 * dmaengine_put - let dma drivers be removed when ref_count == 0 660 * dmaengine_put - let dma drivers be removed when ref_count == 0
661 */ 661 */
662 void dmaengine_put(void) 662 void dmaengine_put(void)
663 { 663 {
664 struct dma_device *device; 664 struct dma_device *device;
665 struct dma_chan *chan; 665 struct dma_chan *chan;
666 666
667 mutex_lock(&dma_list_mutex); 667 mutex_lock(&dma_list_mutex);
668 dmaengine_ref_count--; 668 dmaengine_ref_count--;
669 BUG_ON(dmaengine_ref_count < 0); 669 BUG_ON(dmaengine_ref_count < 0);
670 /* drop channel references */ 670 /* drop channel references */
671 list_for_each_entry(device, &dma_device_list, global_node) { 671 list_for_each_entry(device, &dma_device_list, global_node) {
672 if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) 672 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
673 continue; 673 continue;
674 list_for_each_entry(chan, &device->channels, device_node) 674 list_for_each_entry(chan, &device->channels, device_node)
675 dma_chan_put(chan); 675 dma_chan_put(chan);
676 } 676 }
677 mutex_unlock(&dma_list_mutex); 677 mutex_unlock(&dma_list_mutex);
678 } 678 }
679 EXPORT_SYMBOL(dmaengine_put); 679 EXPORT_SYMBOL(dmaengine_put);
680 680
681 static bool device_has_all_tx_types(struct dma_device *device) 681 static bool device_has_all_tx_types(struct dma_device *device)
682 { 682 {
683 /* A device that satisfies this test has channels that will never cause 683 /* A device that satisfies this test has channels that will never cause
684 * an async_tx channel switch event as all possible operation types can 684 * an async_tx channel switch event as all possible operation types can
685 * be handled. 685 * be handled.
686 */ 686 */
687 #ifdef CONFIG_ASYNC_TX_DMA 687 #ifdef CONFIG_ASYNC_TX_DMA
688 if (!dma_has_cap(DMA_INTERRUPT, device->cap_mask)) 688 if (!dma_has_cap(DMA_INTERRUPT, device->cap_mask))
689 return false; 689 return false;
690 #endif 690 #endif
691 691
692 #if defined(CONFIG_ASYNC_MEMCPY) || defined(CONFIG_ASYNC_MEMCPY_MODULE) 692 #if defined(CONFIG_ASYNC_MEMCPY) || defined(CONFIG_ASYNC_MEMCPY_MODULE)
693 if (!dma_has_cap(DMA_MEMCPY, device->cap_mask)) 693 if (!dma_has_cap(DMA_MEMCPY, device->cap_mask))
694 return false; 694 return false;
695 #endif 695 #endif
696 696
697 #if defined(CONFIG_ASYNC_XOR) || defined(CONFIG_ASYNC_XOR_MODULE) 697 #if defined(CONFIG_ASYNC_XOR) || defined(CONFIG_ASYNC_XOR_MODULE)
698 if (!dma_has_cap(DMA_XOR, device->cap_mask)) 698 if (!dma_has_cap(DMA_XOR, device->cap_mask))
699 return false; 699 return false;
700 700
701 #ifndef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA 701 #ifndef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA
702 if (!dma_has_cap(DMA_XOR_VAL, device->cap_mask)) 702 if (!dma_has_cap(DMA_XOR_VAL, device->cap_mask))
703 return false; 703 return false;
704 #endif 704 #endif
705 #endif 705 #endif
706 706
707 #if defined(CONFIG_ASYNC_PQ) || defined(CONFIG_ASYNC_PQ_MODULE) 707 #if defined(CONFIG_ASYNC_PQ) || defined(CONFIG_ASYNC_PQ_MODULE)
708 if (!dma_has_cap(DMA_PQ, device->cap_mask)) 708 if (!dma_has_cap(DMA_PQ, device->cap_mask))
709 return false; 709 return false;
710 710
711 #ifndef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA 711 #ifndef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA
712 if (!dma_has_cap(DMA_PQ_VAL, device->cap_mask)) 712 if (!dma_has_cap(DMA_PQ_VAL, device->cap_mask))
713 return false; 713 return false;
714 #endif 714 #endif
715 #endif 715 #endif
716 716
717 return true; 717 return true;
718 } 718 }
719 719
720 static int get_dma_id(struct dma_device *device) 720 static int get_dma_id(struct dma_device *device)
721 { 721 {
722 int rc; 722 int rc;
723 723
724 mutex_lock(&dma_list_mutex); 724 mutex_lock(&dma_list_mutex);
725 725
726 rc = idr_alloc(&dma_idr, NULL, 0, 0, GFP_KERNEL); 726 rc = idr_alloc(&dma_idr, NULL, 0, 0, GFP_KERNEL);
727 if (rc >= 0) 727 if (rc >= 0)
728 device->dev_id = rc; 728 device->dev_id = rc;
729 729
730 mutex_unlock(&dma_list_mutex); 730 mutex_unlock(&dma_list_mutex);
731 return rc < 0 ? rc : 0; 731 return rc < 0 ? rc : 0;
732 } 732 }
733 733
734 /** 734 /**
735 * dma_async_device_register - registers DMA devices found 735 * dma_async_device_register - registers DMA devices found
736 * @device: &dma_device 736 * @device: &dma_device
737 */ 737 */
738 int dma_async_device_register(struct dma_device *device) 738 int dma_async_device_register(struct dma_device *device)
739 { 739 {
740 int chancnt = 0, rc; 740 int chancnt = 0, rc;
741 struct dma_chan* chan; 741 struct dma_chan* chan;
742 atomic_t *idr_ref; 742 atomic_t *idr_ref;
743 743
744 if (!device) 744 if (!device)
745 return -ENODEV; 745 return -ENODEV;
746 746
747 /* validate device routines */ 747 /* validate device routines */
748 BUG_ON(dma_has_cap(DMA_MEMCPY, device->cap_mask) && 748 BUG_ON(dma_has_cap(DMA_MEMCPY, device->cap_mask) &&
749 !device->device_prep_dma_memcpy); 749 !device->device_prep_dma_memcpy);
750 BUG_ON(dma_has_cap(DMA_XOR, device->cap_mask) && 750 BUG_ON(dma_has_cap(DMA_XOR, device->cap_mask) &&
751 !device->device_prep_dma_xor); 751 !device->device_prep_dma_xor);
752 BUG_ON(dma_has_cap(DMA_XOR_VAL, device->cap_mask) && 752 BUG_ON(dma_has_cap(DMA_XOR_VAL, device->cap_mask) &&
753 !device->device_prep_dma_xor_val); 753 !device->device_prep_dma_xor_val);
754 BUG_ON(dma_has_cap(DMA_PQ, device->cap_mask) && 754 BUG_ON(dma_has_cap(DMA_PQ, device->cap_mask) &&
755 !device->device_prep_dma_pq); 755 !device->device_prep_dma_pq);
756 BUG_ON(dma_has_cap(DMA_PQ_VAL, device->cap_mask) && 756 BUG_ON(dma_has_cap(DMA_PQ_VAL, device->cap_mask) &&
757 !device->device_prep_dma_pq_val); 757 !device->device_prep_dma_pq_val);
758 BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) && 758 BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) &&
759 !device->device_prep_dma_interrupt); 759 !device->device_prep_dma_interrupt);
760 BUG_ON(dma_has_cap(DMA_SG, device->cap_mask) && 760 BUG_ON(dma_has_cap(DMA_SG, device->cap_mask) &&
761 !device->device_prep_dma_sg); 761 !device->device_prep_dma_sg);
762 BUG_ON(dma_has_cap(DMA_CYCLIC, device->cap_mask) && 762 BUG_ON(dma_has_cap(DMA_CYCLIC, device->cap_mask) &&
763 !device->device_prep_dma_cyclic); 763 !device->device_prep_dma_cyclic);
764 BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) && 764 BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
765 !device->device_control); 765 !device->device_control);
766 BUG_ON(dma_has_cap(DMA_INTERLEAVE, device->cap_mask) && 766 BUG_ON(dma_has_cap(DMA_INTERLEAVE, device->cap_mask) &&
767 !device->device_prep_interleaved_dma); 767 !device->device_prep_interleaved_dma);
768 768
769 BUG_ON(!device->device_alloc_chan_resources); 769 BUG_ON(!device->device_alloc_chan_resources);
770 BUG_ON(!device->device_free_chan_resources); 770 BUG_ON(!device->device_free_chan_resources);
771 BUG_ON(!device->device_tx_status); 771 BUG_ON(!device->device_tx_status);
772 BUG_ON(!device->device_issue_pending); 772 BUG_ON(!device->device_issue_pending);
773 BUG_ON(!device->dev); 773 BUG_ON(!device->dev);
774 774
775 /* note: this only matters in the 775 /* note: this only matters in the
776 * CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=n case 776 * CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=n case
777 */ 777 */
778 if (device_has_all_tx_types(device)) 778 if (device_has_all_tx_types(device))
779 dma_cap_set(DMA_ASYNC_TX, device->cap_mask); 779 dma_cap_set(DMA_ASYNC_TX, device->cap_mask);
780 780
781 idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL); 781 idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL);
782 if (!idr_ref) 782 if (!idr_ref)
783 return -ENOMEM; 783 return -ENOMEM;
784 rc = get_dma_id(device); 784 rc = get_dma_id(device);
785 if (rc != 0) { 785 if (rc != 0) {
786 kfree(idr_ref); 786 kfree(idr_ref);
787 return rc; 787 return rc;
788 } 788 }
789 789
790 atomic_set(idr_ref, 0); 790 atomic_set(idr_ref, 0);
791 791
792 /* represent channels in sysfs. Probably want devs too */ 792 /* represent channels in sysfs. Probably want devs too */
793 list_for_each_entry(chan, &device->channels, device_node) { 793 list_for_each_entry(chan, &device->channels, device_node) {
794 rc = -ENOMEM; 794 rc = -ENOMEM;
795 chan->local = alloc_percpu(typeof(*chan->local)); 795 chan->local = alloc_percpu(typeof(*chan->local));
796 if (chan->local == NULL) 796 if (chan->local == NULL)
797 goto err_out; 797 goto err_out;
798 chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL); 798 chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL);
799 if (chan->dev == NULL) { 799 if (chan->dev == NULL) {
800 free_percpu(chan->local); 800 free_percpu(chan->local);
801 chan->local = NULL; 801 chan->local = NULL;
802 goto err_out; 802 goto err_out;
803 } 803 }
804 804
805 chan->chan_id = chancnt++; 805 chan->chan_id = chancnt++;
806 chan->dev->device.class = &dma_devclass; 806 chan->dev->device.class = &dma_devclass;
807 chan->dev->device.parent = device->dev; 807 chan->dev->device.parent = device->dev;
808 chan->dev->chan = chan; 808 chan->dev->chan = chan;
809 chan->dev->idr_ref = idr_ref; 809 chan->dev->idr_ref = idr_ref;
810 chan->dev->dev_id = device->dev_id; 810 chan->dev->dev_id = device->dev_id;
811 atomic_inc(idr_ref); 811 atomic_inc(idr_ref);
812 dev_set_name(&chan->dev->device, "dma%dchan%d", 812 dev_set_name(&chan->dev->device, "dma%dchan%d",
813 device->dev_id, chan->chan_id); 813 device->dev_id, chan->chan_id);
814 814
815 rc = device_register(&chan->dev->device); 815 rc = device_register(&chan->dev->device);
816 if (rc) { 816 if (rc) {
817 free_percpu(chan->local); 817 free_percpu(chan->local);
818 chan->local = NULL; 818 chan->local = NULL;
819 kfree(chan->dev); 819 kfree(chan->dev);
820 atomic_dec(idr_ref); 820 atomic_dec(idr_ref);
821 goto err_out; 821 goto err_out;
822 } 822 }
823 chan->client_count = 0; 823 chan->client_count = 0;
824 } 824 }
825 device->chancnt = chancnt; 825 device->chancnt = chancnt;
826 826
827 mutex_lock(&dma_list_mutex); 827 mutex_lock(&dma_list_mutex);
828 /* take references on public channels */ 828 /* take references on public channels */
829 if (dmaengine_ref_count && !dma_has_cap(DMA_PRIVATE, device->cap_mask)) 829 if (dmaengine_ref_count && !dma_has_cap(DMA_PRIVATE, device->cap_mask))
830 list_for_each_entry(chan, &device->channels, device_node) { 830 list_for_each_entry(chan, &device->channels, device_node) {
831 /* if clients are already waiting for channels we need 831 /* if clients are already waiting for channels we need
832 * to take references on their behalf 832 * to take references on their behalf
833 */ 833 */
834 if (dma_chan_get(chan) == -ENODEV) { 834 if (dma_chan_get(chan) == -ENODEV) {
835 /* note we can only get here for the first 835 /* note we can only get here for the first
836 * channel as the remaining channels are 836 * channel as the remaining channels are
837 * guaranteed to get a reference 837 * guaranteed to get a reference
838 */ 838 */
839 rc = -ENODEV; 839 rc = -ENODEV;
840 mutex_unlock(&dma_list_mutex); 840 mutex_unlock(&dma_list_mutex);
841 goto err_out; 841 goto err_out;
842 } 842 }
843 } 843 }
844 list_add_tail_rcu(&device->global_node, &dma_device_list); 844 list_add_tail_rcu(&device->global_node, &dma_device_list);
845 if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) 845 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
846 device->privatecnt++; /* Always private */ 846 device->privatecnt++; /* Always private */
847 dma_channel_rebalance(); 847 dma_channel_rebalance();
848 mutex_unlock(&dma_list_mutex); 848 mutex_unlock(&dma_list_mutex);
849 849
850 return 0; 850 return 0;
851 851
852 err_out: 852 err_out:
853 /* if we never registered a channel just release the idr */ 853 /* if we never registered a channel just release the idr */
854 if (atomic_read(idr_ref) == 0) { 854 if (atomic_read(idr_ref) == 0) {
855 mutex_lock(&dma_list_mutex); 855 mutex_lock(&dma_list_mutex);
856 idr_remove(&dma_idr, device->dev_id); 856 idr_remove(&dma_idr, device->dev_id);
857 mutex_unlock(&dma_list_mutex); 857 mutex_unlock(&dma_list_mutex);
858 kfree(idr_ref); 858 kfree(idr_ref);
859 return rc; 859 return rc;
860 } 860 }
861 861
862 list_for_each_entry(chan, &device->channels, device_node) { 862 list_for_each_entry(chan, &device->channels, device_node) {
863 if (chan->local == NULL) 863 if (chan->local == NULL)
864 continue; 864 continue;
865 mutex_lock(&dma_list_mutex); 865 mutex_lock(&dma_list_mutex);
866 chan->dev->chan = NULL; 866 chan->dev->chan = NULL;
867 mutex_unlock(&dma_list_mutex); 867 mutex_unlock(&dma_list_mutex);
868 device_unregister(&chan->dev->device); 868 device_unregister(&chan->dev->device);
869 free_percpu(chan->local); 869 free_percpu(chan->local);
870 } 870 }
871 return rc; 871 return rc;
872 } 872 }
873 EXPORT_SYMBOL(dma_async_device_register); 873 EXPORT_SYMBOL(dma_async_device_register);
874 874
875 /** 875 /**
876 * dma_async_device_unregister - unregister a DMA device 876 * dma_async_device_unregister - unregister a DMA device
877 * @device: &dma_device 877 * @device: &dma_device
878 * 878 *
879 * This routine is called by dma driver exit routines, dmaengine holds module 879 * This routine is called by dma driver exit routines, dmaengine holds module
880 * references to prevent it being called while channels are in use. 880 * references to prevent it being called while channels are in use.
881 */ 881 */
882 void dma_async_device_unregister(struct dma_device *device) 882 void dma_async_device_unregister(struct dma_device *device)
883 { 883 {
884 struct dma_chan *chan; 884 struct dma_chan *chan;
885 885
886 mutex_lock(&dma_list_mutex); 886 mutex_lock(&dma_list_mutex);
887 list_del_rcu(&device->global_node); 887 list_del_rcu(&device->global_node);
888 dma_channel_rebalance(); 888 dma_channel_rebalance();
889 mutex_unlock(&dma_list_mutex); 889 mutex_unlock(&dma_list_mutex);
890 890
891 list_for_each_entry(chan, &device->channels, device_node) { 891 list_for_each_entry(chan, &device->channels, device_node) {
892 WARN_ONCE(chan->client_count, 892 WARN_ONCE(chan->client_count,
893 "%s called while %d clients hold a reference\n", 893 "%s called while %d clients hold a reference\n",
894 __func__, chan->client_count); 894 __func__, chan->client_count);
895 mutex_lock(&dma_list_mutex); 895 mutex_lock(&dma_list_mutex);
896 chan->dev->chan = NULL; 896 chan->dev->chan = NULL;
897 mutex_unlock(&dma_list_mutex); 897 mutex_unlock(&dma_list_mutex);
898 device_unregister(&chan->dev->device); 898 device_unregister(&chan->dev->device);
899 free_percpu(chan->local); 899 free_percpu(chan->local);
900 } 900 }
901 } 901 }
902 EXPORT_SYMBOL(dma_async_device_unregister); 902 EXPORT_SYMBOL(dma_async_device_unregister);
903 903
904 /** 904 /**
905 * dma_async_memcpy_buf_to_buf - offloaded copy between virtual addresses 905 * dma_async_memcpy_buf_to_buf - offloaded copy between virtual addresses
906 * @chan: DMA channel to offload copy to 906 * @chan: DMA channel to offload copy to
907 * @dest: destination address (virtual) 907 * @dest: destination address (virtual)
908 * @src: source address (virtual) 908 * @src: source address (virtual)
909 * @len: length 909 * @len: length
910 * 910 *
911 * Both @dest and @src must be mappable to a bus address according to the 911 * Both @dest and @src must be mappable to a bus address according to the
912 * DMA mapping API rules for streaming mappings. 912 * DMA mapping API rules for streaming mappings.
913 * Both @dest and @src must stay memory resident (kernel memory or locked 913 * Both @dest and @src must stay memory resident (kernel memory or locked
914 * user space pages). 914 * user space pages).
915 */ 915 */
916 dma_cookie_t 916 dma_cookie_t
917 dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest, 917 dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest,
918 void *src, size_t len) 918 void *src, size_t len)
919 { 919 {
920 struct dma_device *dev = chan->device; 920 struct dma_device *dev = chan->device;
921 struct dma_async_tx_descriptor *tx; 921 struct dma_async_tx_descriptor *tx;
922 dma_addr_t dma_dest, dma_src; 922 dma_addr_t dma_dest, dma_src;
923 dma_cookie_t cookie; 923 dma_cookie_t cookie;
924 unsigned long flags; 924 unsigned long flags;
925 925
926 dma_src = dma_map_single(dev->dev, src, len, DMA_TO_DEVICE); 926 dma_src = dma_map_single(dev->dev, src, len, DMA_TO_DEVICE);
927 dma_dest = dma_map_single(dev->dev, dest, len, DMA_FROM_DEVICE); 927 dma_dest = dma_map_single(dev->dev, dest, len, DMA_FROM_DEVICE);
928 flags = DMA_CTRL_ACK | 928 flags = DMA_CTRL_ACK |
929 DMA_COMPL_SRC_UNMAP_SINGLE | 929 DMA_COMPL_SRC_UNMAP_SINGLE |
930 DMA_COMPL_DEST_UNMAP_SINGLE; 930 DMA_COMPL_DEST_UNMAP_SINGLE;
931 tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags); 931 tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags);
932 932
933 if (!tx) { 933 if (!tx) {
934 dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE); 934 dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE);
935 dma_unmap_single(dev->dev, dma_dest, len, DMA_FROM_DEVICE); 935 dma_unmap_single(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
936 return -ENOMEM; 936 return -ENOMEM;
937 } 937 }
938 938
939 tx->callback = NULL; 939 tx->callback = NULL;
940 cookie = tx->tx_submit(tx); 940 cookie = tx->tx_submit(tx);
941 941
942 preempt_disable(); 942 preempt_disable();
943 __this_cpu_add(chan->local->bytes_transferred, len); 943 __this_cpu_add(chan->local->bytes_transferred, len);
944 __this_cpu_inc(chan->local->memcpy_count); 944 __this_cpu_inc(chan->local->memcpy_count);
945 preempt_enable(); 945 preempt_enable();
946 946
947 return cookie; 947 return cookie;
948 } 948 }
949 EXPORT_SYMBOL(dma_async_memcpy_buf_to_buf); 949 EXPORT_SYMBOL(dma_async_memcpy_buf_to_buf);
950 950
951 /** 951 /**
952 * dma_async_memcpy_buf_to_pg - offloaded copy from address to page 952 * dma_async_memcpy_buf_to_pg - offloaded copy from address to page
953 * @chan: DMA channel to offload copy to 953 * @chan: DMA channel to offload copy to
954 * @page: destination page 954 * @page: destination page
955 * @offset: offset in page to copy to 955 * @offset: offset in page to copy to
956 * @kdata: source address (virtual) 956 * @kdata: source address (virtual)
957 * @len: length 957 * @len: length
958 * 958 *
959 * Both @page/@offset and @kdata must be mappable to a bus address according 959 * Both @page/@offset and @kdata must be mappable to a bus address according
960 * to the DMA mapping API rules for streaming mappings. 960 * to the DMA mapping API rules for streaming mappings.
961 * Both @page/@offset and @kdata must stay memory resident (kernel memory or 961 * Both @page/@offset and @kdata must stay memory resident (kernel memory or
962 * locked user space pages) 962 * locked user space pages)
963 */ 963 */
964 dma_cookie_t 964 dma_cookie_t
965 dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page, 965 dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page,
966 unsigned int offset, void *kdata, size_t len) 966 unsigned int offset, void *kdata, size_t len)
967 { 967 {
968 struct dma_device *dev = chan->device; 968 struct dma_device *dev = chan->device;
969 struct dma_async_tx_descriptor *tx; 969 struct dma_async_tx_descriptor *tx;
970 dma_addr_t dma_dest, dma_src; 970 dma_addr_t dma_dest, dma_src;
971 dma_cookie_t cookie; 971 dma_cookie_t cookie;
972 unsigned long flags; 972 unsigned long flags;
973 973
974 dma_src = dma_map_single(dev->dev, kdata, len, DMA_TO_DEVICE); 974 dma_src = dma_map_single(dev->dev, kdata, len, DMA_TO_DEVICE);
975 dma_dest = dma_map_page(dev->dev, page, offset, len, DMA_FROM_DEVICE); 975 dma_dest = dma_map_page(dev->dev, page, offset, len, DMA_FROM_DEVICE);
976 flags = DMA_CTRL_ACK | DMA_COMPL_SRC_UNMAP_SINGLE; 976 flags = DMA_CTRL_ACK | DMA_COMPL_SRC_UNMAP_SINGLE;
977 tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags); 977 tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags);
978 978
979 if (!tx) { 979 if (!tx) {
980 dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE); 980 dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE);
981 dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE); 981 dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
982 return -ENOMEM; 982 return -ENOMEM;
983 } 983 }
984 984
985 tx->callback = NULL; 985 tx->callback = NULL;
986 cookie = tx->tx_submit(tx); 986 cookie = tx->tx_submit(tx);
987 987
988 preempt_disable(); 988 preempt_disable();
989 __this_cpu_add(chan->local->bytes_transferred, len); 989 __this_cpu_add(chan->local->bytes_transferred, len);
990 __this_cpu_inc(chan->local->memcpy_count); 990 __this_cpu_inc(chan->local->memcpy_count);
991 preempt_enable(); 991 preempt_enable();
992 992
993 return cookie; 993 return cookie;
994 } 994 }
995 EXPORT_SYMBOL(dma_async_memcpy_buf_to_pg); 995 EXPORT_SYMBOL(dma_async_memcpy_buf_to_pg);
996 996
997 /** 997 /**
998 * dma_async_memcpy_pg_to_pg - offloaded copy from page to page 998 * dma_async_memcpy_pg_to_pg - offloaded copy from page to page
999 * @chan: DMA channel to offload copy to 999 * @chan: DMA channel to offload copy to
1000 * @dest_pg: destination page 1000 * @dest_pg: destination page
1001 * @dest_off: offset in page to copy to 1001 * @dest_off: offset in page to copy to
1002 * @src_pg: source page 1002 * @src_pg: source page
1003 * @src_off: offset in page to copy from 1003 * @src_off: offset in page to copy from
1004 * @len: length 1004 * @len: length
1005 * 1005 *
1006 * Both @dest_page/@dest_off and @src_page/@src_off must be mappable to a bus 1006 * Both @dest_page/@dest_off and @src_page/@src_off must be mappable to a bus
1007 * address according to the DMA mapping API rules for streaming mappings. 1007 * address according to the DMA mapping API rules for streaming mappings.
1008 * Both @dest_page/@dest_off and @src_page/@src_off must stay memory resident 1008 * Both @dest_page/@dest_off and @src_page/@src_off must stay memory resident
1009 * (kernel memory or locked user space pages). 1009 * (kernel memory or locked user space pages).
1010 */ 1010 */
1011 dma_cookie_t 1011 dma_cookie_t
1012 dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg, 1012 dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg,
1013 unsigned int dest_off, struct page *src_pg, unsigned int src_off, 1013 unsigned int dest_off, struct page *src_pg, unsigned int src_off,
1014 size_t len) 1014 size_t len)
1015 { 1015 {
1016 struct dma_device *dev = chan->device; 1016 struct dma_device *dev = chan->device;
1017 struct dma_async_tx_descriptor *tx; 1017 struct dma_async_tx_descriptor *tx;
1018 dma_addr_t dma_dest, dma_src; 1018 dma_addr_t dma_dest, dma_src;
1019 dma_cookie_t cookie; 1019 dma_cookie_t cookie;
1020 unsigned long flags; 1020 unsigned long flags;
1021 1021
1022 dma_src = dma_map_page(dev->dev, src_pg, src_off, len, DMA_TO_DEVICE); 1022 dma_src = dma_map_page(dev->dev, src_pg, src_off, len, DMA_TO_DEVICE);
1023 dma_dest = dma_map_page(dev->dev, dest_pg, dest_off, len, 1023 dma_dest = dma_map_page(dev->dev, dest_pg, dest_off, len,
1024 DMA_FROM_DEVICE); 1024 DMA_FROM_DEVICE);
1025 flags = DMA_CTRL_ACK; 1025 flags = DMA_CTRL_ACK;
1026 tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags); 1026 tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags);
1027 1027
1028 if (!tx) { 1028 if (!tx) {
1029 dma_unmap_page(dev->dev, dma_src, len, DMA_TO_DEVICE); 1029 dma_unmap_page(dev->dev, dma_src, len, DMA_TO_DEVICE);
1030 dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE); 1030 dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
1031 return -ENOMEM; 1031 return -ENOMEM;
1032 } 1032 }
1033 1033
1034 tx->callback = NULL; 1034 tx->callback = NULL;
1035 cookie = tx->tx_submit(tx); 1035 cookie = tx->tx_submit(tx);
1036 1036
1037 preempt_disable(); 1037 preempt_disable();
1038 __this_cpu_add(chan->local->bytes_transferred, len); 1038 __this_cpu_add(chan->local->bytes_transferred, len);
1039 __this_cpu_inc(chan->local->memcpy_count); 1039 __this_cpu_inc(chan->local->memcpy_count);
1040 preempt_enable(); 1040 preempt_enable();
1041 1041
1042 return cookie; 1042 return cookie;
1043 } 1043 }
1044 EXPORT_SYMBOL(dma_async_memcpy_pg_to_pg); 1044 EXPORT_SYMBOL(dma_async_memcpy_pg_to_pg);
1045 1045
1046 void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx, 1046 void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
1047 struct dma_chan *chan) 1047 struct dma_chan *chan)
1048 { 1048 {
1049 tx->chan = chan; 1049 tx->chan = chan;
1050 #ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH 1050 #ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
1051 spin_lock_init(&tx->lock); 1051 spin_lock_init(&tx->lock);
1052 #endif 1052 #endif
1053 } 1053 }
1054 EXPORT_SYMBOL(dma_async_tx_descriptor_init); 1054 EXPORT_SYMBOL(dma_async_tx_descriptor_init);
1055 1055
1056 /* dma_wait_for_async_tx - spin wait for a transaction to complete 1056 /* dma_wait_for_async_tx - spin wait for a transaction to complete
1057 * @tx: in-flight transaction to wait on 1057 * @tx: in-flight transaction to wait on
1058 */ 1058 */
1059 enum dma_status 1059 enum dma_status
1060 dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx) 1060 dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
1061 { 1061 {
1062 unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000); 1062 unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
1063 1063
1064 if (!tx) 1064 if (!tx)
1065 return DMA_SUCCESS; 1065 return DMA_COMPLETE;
1066 1066
1067 while (tx->cookie == -EBUSY) { 1067 while (tx->cookie == -EBUSY) {
1068 if (time_after_eq(jiffies, dma_sync_wait_timeout)) { 1068 if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
1069 pr_err("%s timeout waiting for descriptor submission\n", 1069 pr_err("%s timeout waiting for descriptor submission\n",
1070 __func__); 1070 __func__);
1071 return DMA_ERROR; 1071 return DMA_ERROR;
1072 } 1072 }
1073 cpu_relax(); 1073 cpu_relax();
1074 } 1074 }
1075 return dma_sync_wait(tx->chan, tx->cookie); 1075 return dma_sync_wait(tx->chan, tx->cookie);
1076 } 1076 }
1077 EXPORT_SYMBOL_GPL(dma_wait_for_async_tx); 1077 EXPORT_SYMBOL_GPL(dma_wait_for_async_tx);
1078 1078
1079 /* dma_run_dependencies - helper routine for dma drivers to process 1079 /* dma_run_dependencies - helper routine for dma drivers to process
1080 * (start) dependent operations on their target channel 1080 * (start) dependent operations on their target channel
1081 * @tx: transaction with dependencies 1081 * @tx: transaction with dependencies
1082 */ 1082 */
1083 void dma_run_dependencies(struct dma_async_tx_descriptor *tx) 1083 void dma_run_dependencies(struct dma_async_tx_descriptor *tx)
1084 { 1084 {
1085 struct dma_async_tx_descriptor *dep = txd_next(tx); 1085 struct dma_async_tx_descriptor *dep = txd_next(tx);
1086 struct dma_async_tx_descriptor *dep_next; 1086 struct dma_async_tx_descriptor *dep_next;
1087 struct dma_chan *chan; 1087 struct dma_chan *chan;
1088 1088
1089 if (!dep) 1089 if (!dep)
1090 return; 1090 return;
1091 1091
1092 /* we'll submit tx->next now, so clear the link */ 1092 /* we'll submit tx->next now, so clear the link */
1093 txd_clear_next(tx); 1093 txd_clear_next(tx);
1094 chan = dep->chan; 1094 chan = dep->chan;
1095 1095
1096 /* keep submitting up until a channel switch is detected 1096 /* keep submitting up until a channel switch is detected
1097 * in that case we will be called again as a result of 1097 * in that case we will be called again as a result of
1098 * processing the interrupt from async_tx_channel_switch 1098 * processing the interrupt from async_tx_channel_switch
1099 */ 1099 */
1100 for (; dep; dep = dep_next) { 1100 for (; dep; dep = dep_next) {
1101 txd_lock(dep); 1101 txd_lock(dep);
1102 txd_clear_parent(dep); 1102 txd_clear_parent(dep);
1103 dep_next = txd_next(dep); 1103 dep_next = txd_next(dep);
1104 if (dep_next && dep_next->chan == chan) 1104 if (dep_next && dep_next->chan == chan)
1105 txd_clear_next(dep); /* ->next will be submitted */ 1105 txd_clear_next(dep); /* ->next will be submitted */
1106 else 1106 else
1107 dep_next = NULL; /* submit current dep and terminate */ 1107 dep_next = NULL; /* submit current dep and terminate */
1108 txd_unlock(dep); 1108 txd_unlock(dep);
1109 1109
1110 dep->tx_submit(dep); 1110 dep->tx_submit(dep);
1111 } 1111 }
1112 1112
1113 chan->device->device_issue_pending(chan); 1113 chan->device->device_issue_pending(chan);
1114 } 1114 }
1115 EXPORT_SYMBOL_GPL(dma_run_dependencies); 1115 EXPORT_SYMBOL_GPL(dma_run_dependencies);
1116 1116
1117 static int __init dma_bus_init(void) 1117 static int __init dma_bus_init(void)
1118 { 1118 {
1119 return class_register(&dma_devclass); 1119 return class_register(&dma_devclass);
1120 } 1120 }
1121 arch_initcall(dma_bus_init); 1121 arch_initcall(dma_bus_init);
1122 1122
1123 1123
1124 1124
drivers/dma/dmatest.c
1 /* 1 /*
2 * DMA Engine test module 2 * DMA Engine test module
3 * 3 *
4 * Copyright (C) 2007 Atmel Corporation 4 * Copyright (C) 2007 Atmel Corporation
5 * Copyright (C) 2013 Intel Corporation 5 * Copyright (C) 2013 Intel Corporation
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as 8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation. 9 * published by the Free Software Foundation.
10 */ 10 */
11 #include <linux/delay.h> 11 #include <linux/delay.h>
12 #include <linux/dma-mapping.h> 12 #include <linux/dma-mapping.h>
13 #include <linux/dmaengine.h> 13 #include <linux/dmaengine.h>
14 #include <linux/freezer.h> 14 #include <linux/freezer.h>
15 #include <linux/init.h> 15 #include <linux/init.h>
16 #include <linux/kthread.h> 16 #include <linux/kthread.h>
17 #include <linux/module.h> 17 #include <linux/module.h>
18 #include <linux/moduleparam.h> 18 #include <linux/moduleparam.h>
19 #include <linux/random.h> 19 #include <linux/random.h>
20 #include <linux/slab.h> 20 #include <linux/slab.h>
21 #include <linux/wait.h> 21 #include <linux/wait.h>
22 #include <linux/ctype.h> 22 #include <linux/ctype.h>
23 #include <linux/debugfs.h> 23 #include <linux/debugfs.h>
24 #include <linux/uaccess.h> 24 #include <linux/uaccess.h>
25 #include <linux/seq_file.h> 25 #include <linux/seq_file.h>
26 26
27 static unsigned int test_buf_size = 16384; 27 static unsigned int test_buf_size = 16384;
28 module_param(test_buf_size, uint, S_IRUGO | S_IWUSR); 28 module_param(test_buf_size, uint, S_IRUGO | S_IWUSR);
29 MODULE_PARM_DESC(test_buf_size, "Size of the memcpy test buffer"); 29 MODULE_PARM_DESC(test_buf_size, "Size of the memcpy test buffer");
30 30
31 static char test_channel[20]; 31 static char test_channel[20];
32 module_param_string(channel, test_channel, sizeof(test_channel), 32 module_param_string(channel, test_channel, sizeof(test_channel),
33 S_IRUGO | S_IWUSR); 33 S_IRUGO | S_IWUSR);
34 MODULE_PARM_DESC(channel, "Bus ID of the channel to test (default: any)"); 34 MODULE_PARM_DESC(channel, "Bus ID of the channel to test (default: any)");
35 35
36 static char test_device[20]; 36 static char test_device[20];
37 module_param_string(device, test_device, sizeof(test_device), 37 module_param_string(device, test_device, sizeof(test_device),
38 S_IRUGO | S_IWUSR); 38 S_IRUGO | S_IWUSR);
39 MODULE_PARM_DESC(device, "Bus ID of the DMA Engine to test (default: any)"); 39 MODULE_PARM_DESC(device, "Bus ID of the DMA Engine to test (default: any)");
40 40
41 static unsigned int threads_per_chan = 1; 41 static unsigned int threads_per_chan = 1;
42 module_param(threads_per_chan, uint, S_IRUGO | S_IWUSR); 42 module_param(threads_per_chan, uint, S_IRUGO | S_IWUSR);
43 MODULE_PARM_DESC(threads_per_chan, 43 MODULE_PARM_DESC(threads_per_chan,
44 "Number of threads to start per channel (default: 1)"); 44 "Number of threads to start per channel (default: 1)");
45 45
46 static unsigned int max_channels; 46 static unsigned int max_channels;
47 module_param(max_channels, uint, S_IRUGO | S_IWUSR); 47 module_param(max_channels, uint, S_IRUGO | S_IWUSR);
48 MODULE_PARM_DESC(max_channels, 48 MODULE_PARM_DESC(max_channels,
49 "Maximum number of channels to use (default: all)"); 49 "Maximum number of channels to use (default: all)");
50 50
51 static unsigned int iterations; 51 static unsigned int iterations;
52 module_param(iterations, uint, S_IRUGO | S_IWUSR); 52 module_param(iterations, uint, S_IRUGO | S_IWUSR);
53 MODULE_PARM_DESC(iterations, 53 MODULE_PARM_DESC(iterations,
54 "Iterations before stopping test (default: infinite)"); 54 "Iterations before stopping test (default: infinite)");
55 55
56 static unsigned int xor_sources = 3; 56 static unsigned int xor_sources = 3;
57 module_param(xor_sources, uint, S_IRUGO | S_IWUSR); 57 module_param(xor_sources, uint, S_IRUGO | S_IWUSR);
58 MODULE_PARM_DESC(xor_sources, 58 MODULE_PARM_DESC(xor_sources,
59 "Number of xor source buffers (default: 3)"); 59 "Number of xor source buffers (default: 3)");
60 60
61 static unsigned int pq_sources = 3; 61 static unsigned int pq_sources = 3;
62 module_param(pq_sources, uint, S_IRUGO | S_IWUSR); 62 module_param(pq_sources, uint, S_IRUGO | S_IWUSR);
63 MODULE_PARM_DESC(pq_sources, 63 MODULE_PARM_DESC(pq_sources,
64 "Number of p+q source buffers (default: 3)"); 64 "Number of p+q source buffers (default: 3)");
65 65
66 static int timeout = 3000; 66 static int timeout = 3000;
67 module_param(timeout, uint, S_IRUGO | S_IWUSR); 67 module_param(timeout, uint, S_IRUGO | S_IWUSR);
68 MODULE_PARM_DESC(timeout, "Transfer Timeout in msec (default: 3000), " 68 MODULE_PARM_DESC(timeout, "Transfer Timeout in msec (default: 3000), "
69 "Pass -1 for infinite timeout"); 69 "Pass -1 for infinite timeout");
70 70
71 /* Maximum amount of mismatched bytes in buffer to print */ 71 /* Maximum amount of mismatched bytes in buffer to print */
72 #define MAX_ERROR_COUNT 32 72 #define MAX_ERROR_COUNT 32
73 73
74 /* 74 /*
75 * Initialization patterns. All bytes in the source buffer has bit 7 75 * Initialization patterns. All bytes in the source buffer has bit 7
76 * set, all bytes in the destination buffer has bit 7 cleared. 76 * set, all bytes in the destination buffer has bit 7 cleared.
77 * 77 *
78 * Bit 6 is set for all bytes which are to be copied by the DMA 78 * Bit 6 is set for all bytes which are to be copied by the DMA
79 * engine. Bit 5 is set for all bytes which are to be overwritten by 79 * engine. Bit 5 is set for all bytes which are to be overwritten by
80 * the DMA engine. 80 * the DMA engine.
81 * 81 *
82 * The remaining bits are the inverse of a counter which increments by 82 * The remaining bits are the inverse of a counter which increments by
83 * one for each byte address. 83 * one for each byte address.
84 */ 84 */
85 #define PATTERN_SRC 0x80 85 #define PATTERN_SRC 0x80
86 #define PATTERN_DST 0x00 86 #define PATTERN_DST 0x00
87 #define PATTERN_COPY 0x40 87 #define PATTERN_COPY 0x40
88 #define PATTERN_OVERWRITE 0x20 88 #define PATTERN_OVERWRITE 0x20
89 #define PATTERN_COUNT_MASK 0x1f 89 #define PATTERN_COUNT_MASK 0x1f
90 90
91 enum dmatest_error_type { 91 enum dmatest_error_type {
92 DMATEST_ET_OK, 92 DMATEST_ET_OK,
93 DMATEST_ET_MAP_SRC, 93 DMATEST_ET_MAP_SRC,
94 DMATEST_ET_MAP_DST, 94 DMATEST_ET_MAP_DST,
95 DMATEST_ET_PREP, 95 DMATEST_ET_PREP,
96 DMATEST_ET_SUBMIT, 96 DMATEST_ET_SUBMIT,
97 DMATEST_ET_TIMEOUT, 97 DMATEST_ET_TIMEOUT,
98 DMATEST_ET_DMA_ERROR, 98 DMATEST_ET_DMA_ERROR,
99 DMATEST_ET_DMA_IN_PROGRESS, 99 DMATEST_ET_DMA_IN_PROGRESS,
100 DMATEST_ET_VERIFY, 100 DMATEST_ET_VERIFY,
101 DMATEST_ET_VERIFY_BUF, 101 DMATEST_ET_VERIFY_BUF,
102 }; 102 };
103 103
104 struct dmatest_verify_buffer { 104 struct dmatest_verify_buffer {
105 unsigned int index; 105 unsigned int index;
106 u8 expected; 106 u8 expected;
107 u8 actual; 107 u8 actual;
108 }; 108 };
109 109
110 struct dmatest_verify_result { 110 struct dmatest_verify_result {
111 unsigned int error_count; 111 unsigned int error_count;
112 struct dmatest_verify_buffer data[MAX_ERROR_COUNT]; 112 struct dmatest_verify_buffer data[MAX_ERROR_COUNT];
113 u8 pattern; 113 u8 pattern;
114 bool is_srcbuf; 114 bool is_srcbuf;
115 }; 115 };
116 116
117 struct dmatest_thread_result { 117 struct dmatest_thread_result {
118 struct list_head node; 118 struct list_head node;
119 unsigned int n; 119 unsigned int n;
120 unsigned int src_off; 120 unsigned int src_off;
121 unsigned int dst_off; 121 unsigned int dst_off;
122 unsigned int len; 122 unsigned int len;
123 enum dmatest_error_type type; 123 enum dmatest_error_type type;
124 union { 124 union {
125 unsigned long data; 125 unsigned long data;
126 dma_cookie_t cookie; 126 dma_cookie_t cookie;
127 enum dma_status status; 127 enum dma_status status;
128 int error; 128 int error;
129 struct dmatest_verify_result *vr; 129 struct dmatest_verify_result *vr;
130 }; 130 };
131 }; 131 };
132 132
133 struct dmatest_result { 133 struct dmatest_result {
134 struct list_head node; 134 struct list_head node;
135 char *name; 135 char *name;
136 struct list_head results; 136 struct list_head results;
137 }; 137 };
138 138
139 struct dmatest_info; 139 struct dmatest_info;
140 140
141 struct dmatest_thread { 141 struct dmatest_thread {
142 struct list_head node; 142 struct list_head node;
143 struct dmatest_info *info; 143 struct dmatest_info *info;
144 struct task_struct *task; 144 struct task_struct *task;
145 struct dma_chan *chan; 145 struct dma_chan *chan;
146 u8 **srcs; 146 u8 **srcs;
147 u8 **dsts; 147 u8 **dsts;
148 enum dma_transaction_type type; 148 enum dma_transaction_type type;
149 bool done; 149 bool done;
150 }; 150 };
151 151
152 struct dmatest_chan { 152 struct dmatest_chan {
153 struct list_head node; 153 struct list_head node;
154 struct dma_chan *chan; 154 struct dma_chan *chan;
155 struct list_head threads; 155 struct list_head threads;
156 }; 156 };
157 157
158 /** 158 /**
159 * struct dmatest_params - test parameters. 159 * struct dmatest_params - test parameters.
160 * @buf_size: size of the memcpy test buffer 160 * @buf_size: size of the memcpy test buffer
161 * @channel: bus ID of the channel to test 161 * @channel: bus ID of the channel to test
162 * @device: bus ID of the DMA Engine to test 162 * @device: bus ID of the DMA Engine to test
163 * @threads_per_chan: number of threads to start per channel 163 * @threads_per_chan: number of threads to start per channel
164 * @max_channels: maximum number of channels to use 164 * @max_channels: maximum number of channels to use
165 * @iterations: iterations before stopping test 165 * @iterations: iterations before stopping test
166 * @xor_sources: number of xor source buffers 166 * @xor_sources: number of xor source buffers
167 * @pq_sources: number of p+q source buffers 167 * @pq_sources: number of p+q source buffers
168 * @timeout: transfer timeout in msec, -1 for infinite timeout 168 * @timeout: transfer timeout in msec, -1 for infinite timeout
169 */ 169 */
170 struct dmatest_params { 170 struct dmatest_params {
171 unsigned int buf_size; 171 unsigned int buf_size;
172 char channel[20]; 172 char channel[20];
173 char device[20]; 173 char device[20];
174 unsigned int threads_per_chan; 174 unsigned int threads_per_chan;
175 unsigned int max_channels; 175 unsigned int max_channels;
176 unsigned int iterations; 176 unsigned int iterations;
177 unsigned int xor_sources; 177 unsigned int xor_sources;
178 unsigned int pq_sources; 178 unsigned int pq_sources;
179 int timeout; 179 int timeout;
180 }; 180 };
181 181
182 /** 182 /**
183 * struct dmatest_info - test information. 183 * struct dmatest_info - test information.
184 * @params: test parameters 184 * @params: test parameters
185 * @lock: access protection to the fields of this structure 185 * @lock: access protection to the fields of this structure
186 */ 186 */
187 struct dmatest_info { 187 struct dmatest_info {
188 /* Test parameters */ 188 /* Test parameters */
189 struct dmatest_params params; 189 struct dmatest_params params;
190 190
191 /* Internal state */ 191 /* Internal state */
192 struct list_head channels; 192 struct list_head channels;
193 unsigned int nr_channels; 193 unsigned int nr_channels;
194 struct mutex lock; 194 struct mutex lock;
195 195
196 /* debugfs related stuff */ 196 /* debugfs related stuff */
197 struct dentry *root; 197 struct dentry *root;
198 198
199 /* Test results */ 199 /* Test results */
200 struct list_head results; 200 struct list_head results;
201 struct mutex results_lock; 201 struct mutex results_lock;
202 }; 202 };
203 203
204 static struct dmatest_info test_info; 204 static struct dmatest_info test_info;
205 205
206 static bool dmatest_match_channel(struct dmatest_params *params, 206 static bool dmatest_match_channel(struct dmatest_params *params,
207 struct dma_chan *chan) 207 struct dma_chan *chan)
208 { 208 {
209 if (params->channel[0] == '\0') 209 if (params->channel[0] == '\0')
210 return true; 210 return true;
211 return strcmp(dma_chan_name(chan), params->channel) == 0; 211 return strcmp(dma_chan_name(chan), params->channel) == 0;
212 } 212 }
213 213
214 static bool dmatest_match_device(struct dmatest_params *params, 214 static bool dmatest_match_device(struct dmatest_params *params,
215 struct dma_device *device) 215 struct dma_device *device)
216 { 216 {
217 if (params->device[0] == '\0') 217 if (params->device[0] == '\0')
218 return true; 218 return true;
219 return strcmp(dev_name(device->dev), params->device) == 0; 219 return strcmp(dev_name(device->dev), params->device) == 0;
220 } 220 }
221 221
222 static unsigned long dmatest_random(void) 222 static unsigned long dmatest_random(void)
223 { 223 {
224 unsigned long buf; 224 unsigned long buf;
225 225
226 get_random_bytes(&buf, sizeof(buf)); 226 get_random_bytes(&buf, sizeof(buf));
227 return buf; 227 return buf;
228 } 228 }
229 229
230 static void dmatest_init_srcs(u8 **bufs, unsigned int start, unsigned int len, 230 static void dmatest_init_srcs(u8 **bufs, unsigned int start, unsigned int len,
231 unsigned int buf_size) 231 unsigned int buf_size)
232 { 232 {
233 unsigned int i; 233 unsigned int i;
234 u8 *buf; 234 u8 *buf;
235 235
236 for (; (buf = *bufs); bufs++) { 236 for (; (buf = *bufs); bufs++) {
237 for (i = 0; i < start; i++) 237 for (i = 0; i < start; i++)
238 buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK); 238 buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK);
239 for ( ; i < start + len; i++) 239 for ( ; i < start + len; i++)
240 buf[i] = PATTERN_SRC | PATTERN_COPY 240 buf[i] = PATTERN_SRC | PATTERN_COPY
241 | (~i & PATTERN_COUNT_MASK); 241 | (~i & PATTERN_COUNT_MASK);
242 for ( ; i < buf_size; i++) 242 for ( ; i < buf_size; i++)
243 buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK); 243 buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK);
244 buf++; 244 buf++;
245 } 245 }
246 } 246 }
247 247
248 static void dmatest_init_dsts(u8 **bufs, unsigned int start, unsigned int len, 248 static void dmatest_init_dsts(u8 **bufs, unsigned int start, unsigned int len,
249 unsigned int buf_size) 249 unsigned int buf_size)
250 { 250 {
251 unsigned int i; 251 unsigned int i;
252 u8 *buf; 252 u8 *buf;
253 253
254 for (; (buf = *bufs); bufs++) { 254 for (; (buf = *bufs); bufs++) {
255 for (i = 0; i < start; i++) 255 for (i = 0; i < start; i++)
256 buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK); 256 buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK);
257 for ( ; i < start + len; i++) 257 for ( ; i < start + len; i++)
258 buf[i] = PATTERN_DST | PATTERN_OVERWRITE 258 buf[i] = PATTERN_DST | PATTERN_OVERWRITE
259 | (~i & PATTERN_COUNT_MASK); 259 | (~i & PATTERN_COUNT_MASK);
260 for ( ; i < buf_size; i++) 260 for ( ; i < buf_size; i++)
261 buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK); 261 buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK);
262 } 262 }
263 } 263 }
264 264
265 static unsigned int dmatest_verify(struct dmatest_verify_result *vr, u8 **bufs, 265 static unsigned int dmatest_verify(struct dmatest_verify_result *vr, u8 **bufs,
266 unsigned int start, unsigned int end, unsigned int counter, 266 unsigned int start, unsigned int end, unsigned int counter,
267 u8 pattern, bool is_srcbuf) 267 u8 pattern, bool is_srcbuf)
268 { 268 {
269 unsigned int i; 269 unsigned int i;
270 unsigned int error_count = 0; 270 unsigned int error_count = 0;
271 u8 actual; 271 u8 actual;
272 u8 expected; 272 u8 expected;
273 u8 *buf; 273 u8 *buf;
274 unsigned int counter_orig = counter; 274 unsigned int counter_orig = counter;
275 struct dmatest_verify_buffer *vb; 275 struct dmatest_verify_buffer *vb;
276 276
277 for (; (buf = *bufs); bufs++) { 277 for (; (buf = *bufs); bufs++) {
278 counter = counter_orig; 278 counter = counter_orig;
279 for (i = start; i < end; i++) { 279 for (i = start; i < end; i++) {
280 actual = buf[i]; 280 actual = buf[i];
281 expected = pattern | (~counter & PATTERN_COUNT_MASK); 281 expected = pattern | (~counter & PATTERN_COUNT_MASK);
282 if (actual != expected) { 282 if (actual != expected) {
283 if (error_count < MAX_ERROR_COUNT && vr) { 283 if (error_count < MAX_ERROR_COUNT && vr) {
284 vb = &vr->data[error_count]; 284 vb = &vr->data[error_count];
285 vb->index = i; 285 vb->index = i;
286 vb->expected = expected; 286 vb->expected = expected;
287 vb->actual = actual; 287 vb->actual = actual;
288 } 288 }
289 error_count++; 289 error_count++;
290 } 290 }
291 counter++; 291 counter++;
292 } 292 }
293 } 293 }
294 294
295 if (error_count > MAX_ERROR_COUNT) 295 if (error_count > MAX_ERROR_COUNT)
296 pr_warning("%s: %u errors suppressed\n", 296 pr_warning("%s: %u errors suppressed\n",
297 current->comm, error_count - MAX_ERROR_COUNT); 297 current->comm, error_count - MAX_ERROR_COUNT);
298 298
299 return error_count; 299 return error_count;
300 } 300 }
301 301
302 /* poor man's completion - we want to use wait_event_freezable() on it */ 302 /* poor man's completion - we want to use wait_event_freezable() on it */
303 struct dmatest_done { 303 struct dmatest_done {
304 bool done; 304 bool done;
305 wait_queue_head_t *wait; 305 wait_queue_head_t *wait;
306 }; 306 };
307 307
308 static void dmatest_callback(void *arg) 308 static void dmatest_callback(void *arg)
309 { 309 {
310 struct dmatest_done *done = arg; 310 struct dmatest_done *done = arg;
311 311
312 done->done = true; 312 done->done = true;
313 wake_up_all(done->wait); 313 wake_up_all(done->wait);
314 } 314 }
315 315
316 static inline void unmap_src(struct device *dev, dma_addr_t *addr, size_t len, 316 static inline void unmap_src(struct device *dev, dma_addr_t *addr, size_t len,
317 unsigned int count) 317 unsigned int count)
318 { 318 {
319 while (count--) 319 while (count--)
320 dma_unmap_single(dev, addr[count], len, DMA_TO_DEVICE); 320 dma_unmap_single(dev, addr[count], len, DMA_TO_DEVICE);
321 } 321 }
322 322
323 static inline void unmap_dst(struct device *dev, dma_addr_t *addr, size_t len, 323 static inline void unmap_dst(struct device *dev, dma_addr_t *addr, size_t len,
324 unsigned int count) 324 unsigned int count)
325 { 325 {
326 while (count--) 326 while (count--)
327 dma_unmap_single(dev, addr[count], len, DMA_BIDIRECTIONAL); 327 dma_unmap_single(dev, addr[count], len, DMA_BIDIRECTIONAL);
328 } 328 }
329 329
330 static unsigned int min_odd(unsigned int x, unsigned int y) 330 static unsigned int min_odd(unsigned int x, unsigned int y)
331 { 331 {
332 unsigned int val = min(x, y); 332 unsigned int val = min(x, y);
333 333
334 return val % 2 ? val : val - 1; 334 return val % 2 ? val : val - 1;
335 } 335 }
336 336
337 static char *verify_result_get_one(struct dmatest_verify_result *vr, 337 static char *verify_result_get_one(struct dmatest_verify_result *vr,
338 unsigned int i) 338 unsigned int i)
339 { 339 {
340 struct dmatest_verify_buffer *vb = &vr->data[i]; 340 struct dmatest_verify_buffer *vb = &vr->data[i];
341 u8 diff = vb->actual ^ vr->pattern; 341 u8 diff = vb->actual ^ vr->pattern;
342 static char buf[512]; 342 static char buf[512];
343 char *msg; 343 char *msg;
344 344
345 if (vr->is_srcbuf) 345 if (vr->is_srcbuf)
346 msg = "srcbuf overwritten!"; 346 msg = "srcbuf overwritten!";
347 else if ((vr->pattern & PATTERN_COPY) 347 else if ((vr->pattern & PATTERN_COPY)
348 && (diff & (PATTERN_COPY | PATTERN_OVERWRITE))) 348 && (diff & (PATTERN_COPY | PATTERN_OVERWRITE)))
349 msg = "dstbuf not copied!"; 349 msg = "dstbuf not copied!";
350 else if (diff & PATTERN_SRC) 350 else if (diff & PATTERN_SRC)
351 msg = "dstbuf was copied!"; 351 msg = "dstbuf was copied!";
352 else 352 else
353 msg = "dstbuf mismatch!"; 353 msg = "dstbuf mismatch!";
354 354
355 snprintf(buf, sizeof(buf) - 1, "%s [0x%x] Expected %02x, got %02x", msg, 355 snprintf(buf, sizeof(buf) - 1, "%s [0x%x] Expected %02x, got %02x", msg,
356 vb->index, vb->expected, vb->actual); 356 vb->index, vb->expected, vb->actual);
357 357
358 return buf; 358 return buf;
359 } 359 }
360 360
361 static char *thread_result_get(const char *name, 361 static char *thread_result_get(const char *name,
362 struct dmatest_thread_result *tr) 362 struct dmatest_thread_result *tr)
363 { 363 {
364 static const char * const messages[] = { 364 static const char * const messages[] = {
365 [DMATEST_ET_OK] = "No errors", 365 [DMATEST_ET_OK] = "No errors",
366 [DMATEST_ET_MAP_SRC] = "src mapping error", 366 [DMATEST_ET_MAP_SRC] = "src mapping error",
367 [DMATEST_ET_MAP_DST] = "dst mapping error", 367 [DMATEST_ET_MAP_DST] = "dst mapping error",
368 [DMATEST_ET_PREP] = "prep error", 368 [DMATEST_ET_PREP] = "prep error",
369 [DMATEST_ET_SUBMIT] = "submit error", 369 [DMATEST_ET_SUBMIT] = "submit error",
370 [DMATEST_ET_TIMEOUT] = "test timed out", 370 [DMATEST_ET_TIMEOUT] = "test timed out",
371 [DMATEST_ET_DMA_ERROR] = 371 [DMATEST_ET_DMA_ERROR] =
372 "got completion callback (DMA_ERROR)", 372 "got completion callback (DMA_ERROR)",
373 [DMATEST_ET_DMA_IN_PROGRESS] = 373 [DMATEST_ET_DMA_IN_PROGRESS] =
374 "got completion callback (DMA_IN_PROGRESS)", 374 "got completion callback (DMA_IN_PROGRESS)",
375 [DMATEST_ET_VERIFY] = "errors", 375 [DMATEST_ET_VERIFY] = "errors",
376 [DMATEST_ET_VERIFY_BUF] = "verify errors", 376 [DMATEST_ET_VERIFY_BUF] = "verify errors",
377 }; 377 };
378 static char buf[512]; 378 static char buf[512];
379 379
380 snprintf(buf, sizeof(buf) - 1, 380 snprintf(buf, sizeof(buf) - 1,
381 "%s: #%u: %s with src_off=0x%x ""dst_off=0x%x len=0x%x (%lu)", 381 "%s: #%u: %s with src_off=0x%x ""dst_off=0x%x len=0x%x (%lu)",
382 name, tr->n, messages[tr->type], tr->src_off, tr->dst_off, 382 name, tr->n, messages[tr->type], tr->src_off, tr->dst_off,
383 tr->len, tr->data); 383 tr->len, tr->data);
384 384
385 return buf; 385 return buf;
386 } 386 }
387 387
388 static int thread_result_add(struct dmatest_info *info, 388 static int thread_result_add(struct dmatest_info *info,
389 struct dmatest_result *r, enum dmatest_error_type type, 389 struct dmatest_result *r, enum dmatest_error_type type,
390 unsigned int n, unsigned int src_off, unsigned int dst_off, 390 unsigned int n, unsigned int src_off, unsigned int dst_off,
391 unsigned int len, unsigned long data) 391 unsigned int len, unsigned long data)
392 { 392 {
393 struct dmatest_thread_result *tr; 393 struct dmatest_thread_result *tr;
394 394
395 tr = kzalloc(sizeof(*tr), GFP_KERNEL); 395 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
396 if (!tr) 396 if (!tr)
397 return -ENOMEM; 397 return -ENOMEM;
398 398
399 tr->type = type; 399 tr->type = type;
400 tr->n = n; 400 tr->n = n;
401 tr->src_off = src_off; 401 tr->src_off = src_off;
402 tr->dst_off = dst_off; 402 tr->dst_off = dst_off;
403 tr->len = len; 403 tr->len = len;
404 tr->data = data; 404 tr->data = data;
405 405
406 mutex_lock(&info->results_lock); 406 mutex_lock(&info->results_lock);
407 list_add_tail(&tr->node, &r->results); 407 list_add_tail(&tr->node, &r->results);
408 mutex_unlock(&info->results_lock); 408 mutex_unlock(&info->results_lock);
409 409
410 if (tr->type == DMATEST_ET_OK) 410 if (tr->type == DMATEST_ET_OK)
411 pr_debug("%s\n", thread_result_get(r->name, tr)); 411 pr_debug("%s\n", thread_result_get(r->name, tr));
412 else 412 else
413 pr_warn("%s\n", thread_result_get(r->name, tr)); 413 pr_warn("%s\n", thread_result_get(r->name, tr));
414 414
415 return 0; 415 return 0;
416 } 416 }
417 417
418 static unsigned int verify_result_add(struct dmatest_info *info, 418 static unsigned int verify_result_add(struct dmatest_info *info,
419 struct dmatest_result *r, unsigned int n, 419 struct dmatest_result *r, unsigned int n,
420 unsigned int src_off, unsigned int dst_off, unsigned int len, 420 unsigned int src_off, unsigned int dst_off, unsigned int len,
421 u8 **bufs, int whence, unsigned int counter, u8 pattern, 421 u8 **bufs, int whence, unsigned int counter, u8 pattern,
422 bool is_srcbuf) 422 bool is_srcbuf)
423 { 423 {
424 struct dmatest_verify_result *vr; 424 struct dmatest_verify_result *vr;
425 unsigned int error_count; 425 unsigned int error_count;
426 unsigned int buf_off = is_srcbuf ? src_off : dst_off; 426 unsigned int buf_off = is_srcbuf ? src_off : dst_off;
427 unsigned int start, end; 427 unsigned int start, end;
428 428
429 if (whence < 0) { 429 if (whence < 0) {
430 start = 0; 430 start = 0;
431 end = buf_off; 431 end = buf_off;
432 } else if (whence > 0) { 432 } else if (whence > 0) {
433 start = buf_off + len; 433 start = buf_off + len;
434 end = info->params.buf_size; 434 end = info->params.buf_size;
435 } else { 435 } else {
436 start = buf_off; 436 start = buf_off;
437 end = buf_off + len; 437 end = buf_off + len;
438 } 438 }
439 439
440 vr = kmalloc(sizeof(*vr), GFP_KERNEL); 440 vr = kmalloc(sizeof(*vr), GFP_KERNEL);
441 if (!vr) { 441 if (!vr) {
442 pr_warn("dmatest: No memory to store verify result\n"); 442 pr_warn("dmatest: No memory to store verify result\n");
443 return dmatest_verify(NULL, bufs, start, end, counter, pattern, 443 return dmatest_verify(NULL, bufs, start, end, counter, pattern,
444 is_srcbuf); 444 is_srcbuf);
445 } 445 }
446 446
447 vr->pattern = pattern; 447 vr->pattern = pattern;
448 vr->is_srcbuf = is_srcbuf; 448 vr->is_srcbuf = is_srcbuf;
449 449
450 error_count = dmatest_verify(vr, bufs, start, end, counter, pattern, 450 error_count = dmatest_verify(vr, bufs, start, end, counter, pattern,
451 is_srcbuf); 451 is_srcbuf);
452 if (error_count) { 452 if (error_count) {
453 vr->error_count = error_count; 453 vr->error_count = error_count;
454 thread_result_add(info, r, DMATEST_ET_VERIFY_BUF, n, src_off, 454 thread_result_add(info, r, DMATEST_ET_VERIFY_BUF, n, src_off,
455 dst_off, len, (unsigned long)vr); 455 dst_off, len, (unsigned long)vr);
456 return error_count; 456 return error_count;
457 } 457 }
458 458
459 kfree(vr); 459 kfree(vr);
460 return 0; 460 return 0;
461 } 461 }
462 462
463 static void result_free(struct dmatest_info *info, const char *name) 463 static void result_free(struct dmatest_info *info, const char *name)
464 { 464 {
465 struct dmatest_result *r, *_r; 465 struct dmatest_result *r, *_r;
466 466
467 mutex_lock(&info->results_lock); 467 mutex_lock(&info->results_lock);
468 list_for_each_entry_safe(r, _r, &info->results, node) { 468 list_for_each_entry_safe(r, _r, &info->results, node) {
469 struct dmatest_thread_result *tr, *_tr; 469 struct dmatest_thread_result *tr, *_tr;
470 470
471 if (name && strcmp(r->name, name)) 471 if (name && strcmp(r->name, name))
472 continue; 472 continue;
473 473
474 list_for_each_entry_safe(tr, _tr, &r->results, node) { 474 list_for_each_entry_safe(tr, _tr, &r->results, node) {
475 if (tr->type == DMATEST_ET_VERIFY_BUF) 475 if (tr->type == DMATEST_ET_VERIFY_BUF)
476 kfree(tr->vr); 476 kfree(tr->vr);
477 list_del(&tr->node); 477 list_del(&tr->node);
478 kfree(tr); 478 kfree(tr);
479 } 479 }
480 480
481 kfree(r->name); 481 kfree(r->name);
482 list_del(&r->node); 482 list_del(&r->node);
483 kfree(r); 483 kfree(r);
484 } 484 }
485 485
486 mutex_unlock(&info->results_lock); 486 mutex_unlock(&info->results_lock);
487 } 487 }
488 488
489 static struct dmatest_result *result_init(struct dmatest_info *info, 489 static struct dmatest_result *result_init(struct dmatest_info *info,
490 const char *name) 490 const char *name)
491 { 491 {
492 struct dmatest_result *r; 492 struct dmatest_result *r;
493 493
494 r = kzalloc(sizeof(*r), GFP_KERNEL); 494 r = kzalloc(sizeof(*r), GFP_KERNEL);
495 if (r) { 495 if (r) {
496 r->name = kstrdup(name, GFP_KERNEL); 496 r->name = kstrdup(name, GFP_KERNEL);
497 INIT_LIST_HEAD(&r->results); 497 INIT_LIST_HEAD(&r->results);
498 mutex_lock(&info->results_lock); 498 mutex_lock(&info->results_lock);
499 list_add_tail(&r->node, &info->results); 499 list_add_tail(&r->node, &info->results);
500 mutex_unlock(&info->results_lock); 500 mutex_unlock(&info->results_lock);
501 } 501 }
502 return r; 502 return r;
503 } 503 }
504 504
505 /* 505 /*
506 * This function repeatedly tests DMA transfers of various lengths and 506 * This function repeatedly tests DMA transfers of various lengths and
507 * offsets for a given operation type until it is told to exit by 507 * offsets for a given operation type until it is told to exit by
508 * kthread_stop(). There may be multiple threads running this function 508 * kthread_stop(). There may be multiple threads running this function
509 * in parallel for a single channel, and there may be multiple channels 509 * in parallel for a single channel, and there may be multiple channels
510 * being tested in parallel. 510 * being tested in parallel.
511 * 511 *
512 * Before each test, the source and destination buffer is initialized 512 * Before each test, the source and destination buffer is initialized
513 * with a known pattern. This pattern is different depending on 513 * with a known pattern. This pattern is different depending on
514 * whether it's in an area which is supposed to be copied or 514 * whether it's in an area which is supposed to be copied or
515 * overwritten, and different in the source and destination buffers. 515 * overwritten, and different in the source and destination buffers.
516 * So if the DMA engine doesn't copy exactly what we tell it to copy, 516 * So if the DMA engine doesn't copy exactly what we tell it to copy,
517 * we'll notice. 517 * we'll notice.
518 */ 518 */
519 static int dmatest_func(void *data) 519 static int dmatest_func(void *data)
520 { 520 {
521 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_wait); 521 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_wait);
522 struct dmatest_thread *thread = data; 522 struct dmatest_thread *thread = data;
523 struct dmatest_done done = { .wait = &done_wait }; 523 struct dmatest_done done = { .wait = &done_wait };
524 struct dmatest_info *info; 524 struct dmatest_info *info;
525 struct dmatest_params *params; 525 struct dmatest_params *params;
526 struct dma_chan *chan; 526 struct dma_chan *chan;
527 struct dma_device *dev; 527 struct dma_device *dev;
528 const char *thread_name; 528 const char *thread_name;
529 unsigned int src_off, dst_off, len; 529 unsigned int src_off, dst_off, len;
530 unsigned int error_count; 530 unsigned int error_count;
531 unsigned int failed_tests = 0; 531 unsigned int failed_tests = 0;
532 unsigned int total_tests = 0; 532 unsigned int total_tests = 0;
533 dma_cookie_t cookie; 533 dma_cookie_t cookie;
534 enum dma_status status; 534 enum dma_status status;
535 enum dma_ctrl_flags flags; 535 enum dma_ctrl_flags flags;
536 u8 *pq_coefs = NULL; 536 u8 *pq_coefs = NULL;
537 int ret; 537 int ret;
538 int src_cnt; 538 int src_cnt;
539 int dst_cnt; 539 int dst_cnt;
540 int i; 540 int i;
541 struct dmatest_result *result; 541 struct dmatest_result *result;
542 542
543 thread_name = current->comm; 543 thread_name = current->comm;
544 set_freezable(); 544 set_freezable();
545 545
546 ret = -ENOMEM; 546 ret = -ENOMEM;
547 547
548 smp_rmb(); 548 smp_rmb();
549 info = thread->info; 549 info = thread->info;
550 params = &info->params; 550 params = &info->params;
551 chan = thread->chan; 551 chan = thread->chan;
552 dev = chan->device; 552 dev = chan->device;
553 if (thread->type == DMA_MEMCPY) 553 if (thread->type == DMA_MEMCPY)
554 src_cnt = dst_cnt = 1; 554 src_cnt = dst_cnt = 1;
555 else if (thread->type == DMA_XOR) { 555 else if (thread->type == DMA_XOR) {
556 /* force odd to ensure dst = src */ 556 /* force odd to ensure dst = src */
557 src_cnt = min_odd(params->xor_sources | 1, dev->max_xor); 557 src_cnt = min_odd(params->xor_sources | 1, dev->max_xor);
558 dst_cnt = 1; 558 dst_cnt = 1;
559 } else if (thread->type == DMA_PQ) { 559 } else if (thread->type == DMA_PQ) {
560 /* force odd to ensure dst = src */ 560 /* force odd to ensure dst = src */
561 src_cnt = min_odd(params->pq_sources | 1, dma_maxpq(dev, 0)); 561 src_cnt = min_odd(params->pq_sources | 1, dma_maxpq(dev, 0));
562 dst_cnt = 2; 562 dst_cnt = 2;
563 563
564 pq_coefs = kmalloc(params->pq_sources+1, GFP_KERNEL); 564 pq_coefs = kmalloc(params->pq_sources+1, GFP_KERNEL);
565 if (!pq_coefs) 565 if (!pq_coefs)
566 goto err_thread_type; 566 goto err_thread_type;
567 567
568 for (i = 0; i < src_cnt; i++) 568 for (i = 0; i < src_cnt; i++)
569 pq_coefs[i] = 1; 569 pq_coefs[i] = 1;
570 } else 570 } else
571 goto err_thread_type; 571 goto err_thread_type;
572 572
573 result = result_init(info, thread_name); 573 result = result_init(info, thread_name);
574 if (!result) 574 if (!result)
575 goto err_srcs; 575 goto err_srcs;
576 576
577 thread->srcs = kcalloc(src_cnt+1, sizeof(u8 *), GFP_KERNEL); 577 thread->srcs = kcalloc(src_cnt+1, sizeof(u8 *), GFP_KERNEL);
578 if (!thread->srcs) 578 if (!thread->srcs)
579 goto err_srcs; 579 goto err_srcs;
580 for (i = 0; i < src_cnt; i++) { 580 for (i = 0; i < src_cnt; i++) {
581 thread->srcs[i] = kmalloc(params->buf_size, GFP_KERNEL); 581 thread->srcs[i] = kmalloc(params->buf_size, GFP_KERNEL);
582 if (!thread->srcs[i]) 582 if (!thread->srcs[i])
583 goto err_srcbuf; 583 goto err_srcbuf;
584 } 584 }
585 thread->srcs[i] = NULL; 585 thread->srcs[i] = NULL;
586 586
587 thread->dsts = kcalloc(dst_cnt+1, sizeof(u8 *), GFP_KERNEL); 587 thread->dsts = kcalloc(dst_cnt+1, sizeof(u8 *), GFP_KERNEL);
588 if (!thread->dsts) 588 if (!thread->dsts)
589 goto err_dsts; 589 goto err_dsts;
590 for (i = 0; i < dst_cnt; i++) { 590 for (i = 0; i < dst_cnt; i++) {
591 thread->dsts[i] = kmalloc(params->buf_size, GFP_KERNEL); 591 thread->dsts[i] = kmalloc(params->buf_size, GFP_KERNEL);
592 if (!thread->dsts[i]) 592 if (!thread->dsts[i])
593 goto err_dstbuf; 593 goto err_dstbuf;
594 } 594 }
595 thread->dsts[i] = NULL; 595 thread->dsts[i] = NULL;
596 596
597 set_user_nice(current, 10); 597 set_user_nice(current, 10);
598 598
599 /* 599 /*
600 * src buffers are freed by the DMAEngine code with dma_unmap_single() 600 * src buffers are freed by the DMAEngine code with dma_unmap_single()
601 * dst buffers are freed by ourselves below 601 * dst buffers are freed by ourselves below
602 */ 602 */
603 flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT 603 flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT
604 | DMA_COMPL_SKIP_DEST_UNMAP | DMA_COMPL_SRC_UNMAP_SINGLE; 604 | DMA_COMPL_SKIP_DEST_UNMAP | DMA_COMPL_SRC_UNMAP_SINGLE;
605 605
606 while (!kthread_should_stop() 606 while (!kthread_should_stop()
607 && !(params->iterations && total_tests >= params->iterations)) { 607 && !(params->iterations && total_tests >= params->iterations)) {
608 struct dma_async_tx_descriptor *tx = NULL; 608 struct dma_async_tx_descriptor *tx = NULL;
609 dma_addr_t dma_srcs[src_cnt]; 609 dma_addr_t dma_srcs[src_cnt];
610 dma_addr_t dma_dsts[dst_cnt]; 610 dma_addr_t dma_dsts[dst_cnt];
611 u8 align = 0; 611 u8 align = 0;
612 612
613 total_tests++; 613 total_tests++;
614 614
615 /* honor alignment restrictions */ 615 /* honor alignment restrictions */
616 if (thread->type == DMA_MEMCPY) 616 if (thread->type == DMA_MEMCPY)
617 align = dev->copy_align; 617 align = dev->copy_align;
618 else if (thread->type == DMA_XOR) 618 else if (thread->type == DMA_XOR)
619 align = dev->xor_align; 619 align = dev->xor_align;
620 else if (thread->type == DMA_PQ) 620 else if (thread->type == DMA_PQ)
621 align = dev->pq_align; 621 align = dev->pq_align;
622 622
623 if (1 << align > params->buf_size) { 623 if (1 << align > params->buf_size) {
624 pr_err("%u-byte buffer too small for %d-byte alignment\n", 624 pr_err("%u-byte buffer too small for %d-byte alignment\n",
625 params->buf_size, 1 << align); 625 params->buf_size, 1 << align);
626 break; 626 break;
627 } 627 }
628 628
629 len = dmatest_random() % params->buf_size + 1; 629 len = dmatest_random() % params->buf_size + 1;
630 len = (len >> align) << align; 630 len = (len >> align) << align;
631 if (!len) 631 if (!len)
632 len = 1 << align; 632 len = 1 << align;
633 src_off = dmatest_random() % (params->buf_size - len + 1); 633 src_off = dmatest_random() % (params->buf_size - len + 1);
634 dst_off = dmatest_random() % (params->buf_size - len + 1); 634 dst_off = dmatest_random() % (params->buf_size - len + 1);
635 635
636 src_off = (src_off >> align) << align; 636 src_off = (src_off >> align) << align;
637 dst_off = (dst_off >> align) << align; 637 dst_off = (dst_off >> align) << align;
638 638
639 dmatest_init_srcs(thread->srcs, src_off, len, params->buf_size); 639 dmatest_init_srcs(thread->srcs, src_off, len, params->buf_size);
640 dmatest_init_dsts(thread->dsts, dst_off, len, params->buf_size); 640 dmatest_init_dsts(thread->dsts, dst_off, len, params->buf_size);
641 641
642 for (i = 0; i < src_cnt; i++) { 642 for (i = 0; i < src_cnt; i++) {
643 u8 *buf = thread->srcs[i] + src_off; 643 u8 *buf = thread->srcs[i] + src_off;
644 644
645 dma_srcs[i] = dma_map_single(dev->dev, buf, len, 645 dma_srcs[i] = dma_map_single(dev->dev, buf, len,
646 DMA_TO_DEVICE); 646 DMA_TO_DEVICE);
647 ret = dma_mapping_error(dev->dev, dma_srcs[i]); 647 ret = dma_mapping_error(dev->dev, dma_srcs[i]);
648 if (ret) { 648 if (ret) {
649 unmap_src(dev->dev, dma_srcs, len, i); 649 unmap_src(dev->dev, dma_srcs, len, i);
650 thread_result_add(info, result, 650 thread_result_add(info, result,
651 DMATEST_ET_MAP_SRC, 651 DMATEST_ET_MAP_SRC,
652 total_tests, src_off, dst_off, 652 total_tests, src_off, dst_off,
653 len, ret); 653 len, ret);
654 failed_tests++; 654 failed_tests++;
655 continue; 655 continue;
656 } 656 }
657 } 657 }
658 /* map with DMA_BIDIRECTIONAL to force writeback/invalidate */ 658 /* map with DMA_BIDIRECTIONAL to force writeback/invalidate */
659 for (i = 0; i < dst_cnt; i++) { 659 for (i = 0; i < dst_cnt; i++) {
660 dma_dsts[i] = dma_map_single(dev->dev, thread->dsts[i], 660 dma_dsts[i] = dma_map_single(dev->dev, thread->dsts[i],
661 params->buf_size, 661 params->buf_size,
662 DMA_BIDIRECTIONAL); 662 DMA_BIDIRECTIONAL);
663 ret = dma_mapping_error(dev->dev, dma_dsts[i]); 663 ret = dma_mapping_error(dev->dev, dma_dsts[i]);
664 if (ret) { 664 if (ret) {
665 unmap_src(dev->dev, dma_srcs, len, src_cnt); 665 unmap_src(dev->dev, dma_srcs, len, src_cnt);
666 unmap_dst(dev->dev, dma_dsts, params->buf_size, 666 unmap_dst(dev->dev, dma_dsts, params->buf_size,
667 i); 667 i);
668 thread_result_add(info, result, 668 thread_result_add(info, result,
669 DMATEST_ET_MAP_DST, 669 DMATEST_ET_MAP_DST,
670 total_tests, src_off, dst_off, 670 total_tests, src_off, dst_off,
671 len, ret); 671 len, ret);
672 failed_tests++; 672 failed_tests++;
673 continue; 673 continue;
674 } 674 }
675 } 675 }
676 676
677 if (thread->type == DMA_MEMCPY) 677 if (thread->type == DMA_MEMCPY)
678 tx = dev->device_prep_dma_memcpy(chan, 678 tx = dev->device_prep_dma_memcpy(chan,
679 dma_dsts[0] + dst_off, 679 dma_dsts[0] + dst_off,
680 dma_srcs[0], len, 680 dma_srcs[0], len,
681 flags); 681 flags);
682 else if (thread->type == DMA_XOR) 682 else if (thread->type == DMA_XOR)
683 tx = dev->device_prep_dma_xor(chan, 683 tx = dev->device_prep_dma_xor(chan,
684 dma_dsts[0] + dst_off, 684 dma_dsts[0] + dst_off,
685 dma_srcs, src_cnt, 685 dma_srcs, src_cnt,
686 len, flags); 686 len, flags);
687 else if (thread->type == DMA_PQ) { 687 else if (thread->type == DMA_PQ) {
688 dma_addr_t dma_pq[dst_cnt]; 688 dma_addr_t dma_pq[dst_cnt];
689 689
690 for (i = 0; i < dst_cnt; i++) 690 for (i = 0; i < dst_cnt; i++)
691 dma_pq[i] = dma_dsts[i] + dst_off; 691 dma_pq[i] = dma_dsts[i] + dst_off;
692 tx = dev->device_prep_dma_pq(chan, dma_pq, dma_srcs, 692 tx = dev->device_prep_dma_pq(chan, dma_pq, dma_srcs,
693 src_cnt, pq_coefs, 693 src_cnt, pq_coefs,
694 len, flags); 694 len, flags);
695 } 695 }
696 696
697 if (!tx) { 697 if (!tx) {
698 unmap_src(dev->dev, dma_srcs, len, src_cnt); 698 unmap_src(dev->dev, dma_srcs, len, src_cnt);
699 unmap_dst(dev->dev, dma_dsts, params->buf_size, 699 unmap_dst(dev->dev, dma_dsts, params->buf_size,
700 dst_cnt); 700 dst_cnt);
701 thread_result_add(info, result, DMATEST_ET_PREP, 701 thread_result_add(info, result, DMATEST_ET_PREP,
702 total_tests, src_off, dst_off, 702 total_tests, src_off, dst_off,
703 len, 0); 703 len, 0);
704 msleep(100); 704 msleep(100);
705 failed_tests++; 705 failed_tests++;
706 continue; 706 continue;
707 } 707 }
708 708
709 done.done = false; 709 done.done = false;
710 tx->callback = dmatest_callback; 710 tx->callback = dmatest_callback;
711 tx->callback_param = &done; 711 tx->callback_param = &done;
712 cookie = tx->tx_submit(tx); 712 cookie = tx->tx_submit(tx);
713 713
714 if (dma_submit_error(cookie)) { 714 if (dma_submit_error(cookie)) {
715 thread_result_add(info, result, DMATEST_ET_SUBMIT, 715 thread_result_add(info, result, DMATEST_ET_SUBMIT,
716 total_tests, src_off, dst_off, 716 total_tests, src_off, dst_off,
717 len, cookie); 717 len, cookie);
718 msleep(100); 718 msleep(100);
719 failed_tests++; 719 failed_tests++;
720 continue; 720 continue;
721 } 721 }
722 dma_async_issue_pending(chan); 722 dma_async_issue_pending(chan);
723 723
724 wait_event_freezable_timeout(done_wait, done.done, 724 wait_event_freezable_timeout(done_wait, done.done,
725 msecs_to_jiffies(params->timeout)); 725 msecs_to_jiffies(params->timeout));
726 726
727 status = dma_async_is_tx_complete(chan, cookie, NULL, NULL); 727 status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
728 728
729 if (!done.done) { 729 if (!done.done) {
730 /* 730 /*
731 * We're leaving the timed out dma operation with 731 * We're leaving the timed out dma operation with
732 * dangling pointer to done_wait. To make this 732 * dangling pointer to done_wait. To make this
733 * correct, we'll need to allocate wait_done for 733 * correct, we'll need to allocate wait_done for
734 * each test iteration and perform "who's gonna 734 * each test iteration and perform "who's gonna
735 * free it this time?" dancing. For now, just 735 * free it this time?" dancing. For now, just
736 * leave it dangling. 736 * leave it dangling.
737 */ 737 */
738 thread_result_add(info, result, DMATEST_ET_TIMEOUT, 738 thread_result_add(info, result, DMATEST_ET_TIMEOUT,
739 total_tests, src_off, dst_off, 739 total_tests, src_off, dst_off,
740 len, 0); 740 len, 0);
741 failed_tests++; 741 failed_tests++;
742 continue; 742 continue;
743 } else if (status != DMA_SUCCESS) { 743 } else if (status != DMA_COMPLETE) {
744 enum dmatest_error_type type = (status == DMA_ERROR) ? 744 enum dmatest_error_type type = (status == DMA_ERROR) ?
745 DMATEST_ET_DMA_ERROR : DMATEST_ET_DMA_IN_PROGRESS; 745 DMATEST_ET_DMA_ERROR : DMATEST_ET_DMA_IN_PROGRESS;
746 thread_result_add(info, result, type, 746 thread_result_add(info, result, type,
747 total_tests, src_off, dst_off, 747 total_tests, src_off, dst_off,
748 len, status); 748 len, status);
749 failed_tests++; 749 failed_tests++;
750 continue; 750 continue;
751 } 751 }
752 752
753 /* Unmap by myself (see DMA_COMPL_SKIP_DEST_UNMAP above) */ 753 /* Unmap by myself (see DMA_COMPL_SKIP_DEST_UNMAP above) */
754 unmap_dst(dev->dev, dma_dsts, params->buf_size, dst_cnt); 754 unmap_dst(dev->dev, dma_dsts, params->buf_size, dst_cnt);
755 755
756 error_count = 0; 756 error_count = 0;
757 757
758 pr_debug("%s: verifying source buffer...\n", thread_name); 758 pr_debug("%s: verifying source buffer...\n", thread_name);
759 error_count += verify_result_add(info, result, total_tests, 759 error_count += verify_result_add(info, result, total_tests,
760 src_off, dst_off, len, thread->srcs, -1, 760 src_off, dst_off, len, thread->srcs, -1,
761 0, PATTERN_SRC, true); 761 0, PATTERN_SRC, true);
762 error_count += verify_result_add(info, result, total_tests, 762 error_count += verify_result_add(info, result, total_tests,
763 src_off, dst_off, len, thread->srcs, 0, 763 src_off, dst_off, len, thread->srcs, 0,
764 src_off, PATTERN_SRC | PATTERN_COPY, true); 764 src_off, PATTERN_SRC | PATTERN_COPY, true);
765 error_count += verify_result_add(info, result, total_tests, 765 error_count += verify_result_add(info, result, total_tests,
766 src_off, dst_off, len, thread->srcs, 1, 766 src_off, dst_off, len, thread->srcs, 1,
767 src_off + len, PATTERN_SRC, true); 767 src_off + len, PATTERN_SRC, true);
768 768
769 pr_debug("%s: verifying dest buffer...\n", thread_name); 769 pr_debug("%s: verifying dest buffer...\n", thread_name);
770 error_count += verify_result_add(info, result, total_tests, 770 error_count += verify_result_add(info, result, total_tests,
771 src_off, dst_off, len, thread->dsts, -1, 771 src_off, dst_off, len, thread->dsts, -1,
772 0, PATTERN_DST, false); 772 0, PATTERN_DST, false);
773 error_count += verify_result_add(info, result, total_tests, 773 error_count += verify_result_add(info, result, total_tests,
774 src_off, dst_off, len, thread->dsts, 0, 774 src_off, dst_off, len, thread->dsts, 0,
775 src_off, PATTERN_SRC | PATTERN_COPY, false); 775 src_off, PATTERN_SRC | PATTERN_COPY, false);
776 error_count += verify_result_add(info, result, total_tests, 776 error_count += verify_result_add(info, result, total_tests,
777 src_off, dst_off, len, thread->dsts, 1, 777 src_off, dst_off, len, thread->dsts, 1,
778 dst_off + len, PATTERN_DST, false); 778 dst_off + len, PATTERN_DST, false);
779 779
780 if (error_count) { 780 if (error_count) {
781 thread_result_add(info, result, DMATEST_ET_VERIFY, 781 thread_result_add(info, result, DMATEST_ET_VERIFY,
782 total_tests, src_off, dst_off, 782 total_tests, src_off, dst_off,
783 len, error_count); 783 len, error_count);
784 failed_tests++; 784 failed_tests++;
785 } else { 785 } else {
786 thread_result_add(info, result, DMATEST_ET_OK, 786 thread_result_add(info, result, DMATEST_ET_OK,
787 total_tests, src_off, dst_off, 787 total_tests, src_off, dst_off,
788 len, 0); 788 len, 0);
789 } 789 }
790 } 790 }
791 791
792 ret = 0; 792 ret = 0;
793 for (i = 0; thread->dsts[i]; i++) 793 for (i = 0; thread->dsts[i]; i++)
794 kfree(thread->dsts[i]); 794 kfree(thread->dsts[i]);
795 err_dstbuf: 795 err_dstbuf:
796 kfree(thread->dsts); 796 kfree(thread->dsts);
797 err_dsts: 797 err_dsts:
798 for (i = 0; thread->srcs[i]; i++) 798 for (i = 0; thread->srcs[i]; i++)
799 kfree(thread->srcs[i]); 799 kfree(thread->srcs[i]);
800 err_srcbuf: 800 err_srcbuf:
801 kfree(thread->srcs); 801 kfree(thread->srcs);
802 err_srcs: 802 err_srcs:
803 kfree(pq_coefs); 803 kfree(pq_coefs);
804 err_thread_type: 804 err_thread_type:
805 pr_notice("%s: terminating after %u tests, %u failures (status %d)\n", 805 pr_notice("%s: terminating after %u tests, %u failures (status %d)\n",
806 thread_name, total_tests, failed_tests, ret); 806 thread_name, total_tests, failed_tests, ret);
807 807
808 /* terminate all transfers on specified channels */ 808 /* terminate all transfers on specified channels */
809 if (ret) 809 if (ret)
810 dmaengine_terminate_all(chan); 810 dmaengine_terminate_all(chan);
811 811
812 thread->done = true; 812 thread->done = true;
813 813
814 if (params->iterations > 0) 814 if (params->iterations > 0)
815 while (!kthread_should_stop()) { 815 while (!kthread_should_stop()) {
816 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wait_dmatest_exit); 816 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wait_dmatest_exit);
817 interruptible_sleep_on(&wait_dmatest_exit); 817 interruptible_sleep_on(&wait_dmatest_exit);
818 } 818 }
819 819
820 return ret; 820 return ret;
821 } 821 }
822 822
823 static void dmatest_cleanup_channel(struct dmatest_chan *dtc) 823 static void dmatest_cleanup_channel(struct dmatest_chan *dtc)
824 { 824 {
825 struct dmatest_thread *thread; 825 struct dmatest_thread *thread;
826 struct dmatest_thread *_thread; 826 struct dmatest_thread *_thread;
827 int ret; 827 int ret;
828 828
829 list_for_each_entry_safe(thread, _thread, &dtc->threads, node) { 829 list_for_each_entry_safe(thread, _thread, &dtc->threads, node) {
830 ret = kthread_stop(thread->task); 830 ret = kthread_stop(thread->task);
831 pr_debug("dmatest: thread %s exited with status %d\n", 831 pr_debug("dmatest: thread %s exited with status %d\n",
832 thread->task->comm, ret); 832 thread->task->comm, ret);
833 list_del(&thread->node); 833 list_del(&thread->node);
834 kfree(thread); 834 kfree(thread);
835 } 835 }
836 836
837 /* terminate all transfers on specified channels */ 837 /* terminate all transfers on specified channels */
838 dmaengine_terminate_all(dtc->chan); 838 dmaengine_terminate_all(dtc->chan);
839 839
840 kfree(dtc); 840 kfree(dtc);
841 } 841 }
842 842
843 static int dmatest_add_threads(struct dmatest_info *info, 843 static int dmatest_add_threads(struct dmatest_info *info,
844 struct dmatest_chan *dtc, enum dma_transaction_type type) 844 struct dmatest_chan *dtc, enum dma_transaction_type type)
845 { 845 {
846 struct dmatest_params *params = &info->params; 846 struct dmatest_params *params = &info->params;
847 struct dmatest_thread *thread; 847 struct dmatest_thread *thread;
848 struct dma_chan *chan = dtc->chan; 848 struct dma_chan *chan = dtc->chan;
849 char *op; 849 char *op;
850 unsigned int i; 850 unsigned int i;
851 851
852 if (type == DMA_MEMCPY) 852 if (type == DMA_MEMCPY)
853 op = "copy"; 853 op = "copy";
854 else if (type == DMA_XOR) 854 else if (type == DMA_XOR)
855 op = "xor"; 855 op = "xor";
856 else if (type == DMA_PQ) 856 else if (type == DMA_PQ)
857 op = "pq"; 857 op = "pq";
858 else 858 else
859 return -EINVAL; 859 return -EINVAL;
860 860
861 for (i = 0; i < params->threads_per_chan; i++) { 861 for (i = 0; i < params->threads_per_chan; i++) {
862 thread = kzalloc(sizeof(struct dmatest_thread), GFP_KERNEL); 862 thread = kzalloc(sizeof(struct dmatest_thread), GFP_KERNEL);
863 if (!thread) { 863 if (!thread) {
864 pr_warning("dmatest: No memory for %s-%s%u\n", 864 pr_warning("dmatest: No memory for %s-%s%u\n",
865 dma_chan_name(chan), op, i); 865 dma_chan_name(chan), op, i);
866 866
867 break; 867 break;
868 } 868 }
869 thread->info = info; 869 thread->info = info;
870 thread->chan = dtc->chan; 870 thread->chan = dtc->chan;
871 thread->type = type; 871 thread->type = type;
872 smp_wmb(); 872 smp_wmb();
873 thread->task = kthread_run(dmatest_func, thread, "%s-%s%u", 873 thread->task = kthread_run(dmatest_func, thread, "%s-%s%u",
874 dma_chan_name(chan), op, i); 874 dma_chan_name(chan), op, i);
875 if (IS_ERR(thread->task)) { 875 if (IS_ERR(thread->task)) {
876 pr_warning("dmatest: Failed to run thread %s-%s%u\n", 876 pr_warning("dmatest: Failed to run thread %s-%s%u\n",
877 dma_chan_name(chan), op, i); 877 dma_chan_name(chan), op, i);
878 kfree(thread); 878 kfree(thread);
879 break; 879 break;
880 } 880 }
881 881
882 /* srcbuf and dstbuf are allocated by the thread itself */ 882 /* srcbuf and dstbuf are allocated by the thread itself */
883 883
884 list_add_tail(&thread->node, &dtc->threads); 884 list_add_tail(&thread->node, &dtc->threads);
885 } 885 }
886 886
887 return i; 887 return i;
888 } 888 }
889 889
890 static int dmatest_add_channel(struct dmatest_info *info, 890 static int dmatest_add_channel(struct dmatest_info *info,
891 struct dma_chan *chan) 891 struct dma_chan *chan)
892 { 892 {
893 struct dmatest_chan *dtc; 893 struct dmatest_chan *dtc;
894 struct dma_device *dma_dev = chan->device; 894 struct dma_device *dma_dev = chan->device;
895 unsigned int thread_count = 0; 895 unsigned int thread_count = 0;
896 int cnt; 896 int cnt;
897 897
898 dtc = kmalloc(sizeof(struct dmatest_chan), GFP_KERNEL); 898 dtc = kmalloc(sizeof(struct dmatest_chan), GFP_KERNEL);
899 if (!dtc) { 899 if (!dtc) {
900 pr_warning("dmatest: No memory for %s\n", dma_chan_name(chan)); 900 pr_warning("dmatest: No memory for %s\n", dma_chan_name(chan));
901 return -ENOMEM; 901 return -ENOMEM;
902 } 902 }
903 903
904 dtc->chan = chan; 904 dtc->chan = chan;
905 INIT_LIST_HEAD(&dtc->threads); 905 INIT_LIST_HEAD(&dtc->threads);
906 906
907 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) { 907 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
908 cnt = dmatest_add_threads(info, dtc, DMA_MEMCPY); 908 cnt = dmatest_add_threads(info, dtc, DMA_MEMCPY);
909 thread_count += cnt > 0 ? cnt : 0; 909 thread_count += cnt > 0 ? cnt : 0;
910 } 910 }
911 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { 911 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
912 cnt = dmatest_add_threads(info, dtc, DMA_XOR); 912 cnt = dmatest_add_threads(info, dtc, DMA_XOR);
913 thread_count += cnt > 0 ? cnt : 0; 913 thread_count += cnt > 0 ? cnt : 0;
914 } 914 }
915 if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) { 915 if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) {
916 cnt = dmatest_add_threads(info, dtc, DMA_PQ); 916 cnt = dmatest_add_threads(info, dtc, DMA_PQ);
917 thread_count += cnt > 0 ? cnt : 0; 917 thread_count += cnt > 0 ? cnt : 0;
918 } 918 }
919 919
920 pr_info("dmatest: Started %u threads using %s\n", 920 pr_info("dmatest: Started %u threads using %s\n",
921 thread_count, dma_chan_name(chan)); 921 thread_count, dma_chan_name(chan));
922 922
923 list_add_tail(&dtc->node, &info->channels); 923 list_add_tail(&dtc->node, &info->channels);
924 info->nr_channels++; 924 info->nr_channels++;
925 925
926 return 0; 926 return 0;
927 } 927 }
928 928
929 static bool filter(struct dma_chan *chan, void *param) 929 static bool filter(struct dma_chan *chan, void *param)
930 { 930 {
931 struct dmatest_params *params = param; 931 struct dmatest_params *params = param;
932 932
933 if (!dmatest_match_channel(params, chan) || 933 if (!dmatest_match_channel(params, chan) ||
934 !dmatest_match_device(params, chan->device)) 934 !dmatest_match_device(params, chan->device))
935 return false; 935 return false;
936 else 936 else
937 return true; 937 return true;
938 } 938 }
939 939
940 static int __run_threaded_test(struct dmatest_info *info) 940 static int __run_threaded_test(struct dmatest_info *info)
941 { 941 {
942 dma_cap_mask_t mask; 942 dma_cap_mask_t mask;
943 struct dma_chan *chan; 943 struct dma_chan *chan;
944 struct dmatest_params *params = &info->params; 944 struct dmatest_params *params = &info->params;
945 int err = 0; 945 int err = 0;
946 946
947 dma_cap_zero(mask); 947 dma_cap_zero(mask);
948 dma_cap_set(DMA_MEMCPY, mask); 948 dma_cap_set(DMA_MEMCPY, mask);
949 for (;;) { 949 for (;;) {
950 chan = dma_request_channel(mask, filter, params); 950 chan = dma_request_channel(mask, filter, params);
951 if (chan) { 951 if (chan) {
952 err = dmatest_add_channel(info, chan); 952 err = dmatest_add_channel(info, chan);
953 if (err) { 953 if (err) {
954 dma_release_channel(chan); 954 dma_release_channel(chan);
955 break; /* add_channel failed, punt */ 955 break; /* add_channel failed, punt */
956 } 956 }
957 } else 957 } else
958 break; /* no more channels available */ 958 break; /* no more channels available */
959 if (params->max_channels && 959 if (params->max_channels &&
960 info->nr_channels >= params->max_channels) 960 info->nr_channels >= params->max_channels)
961 break; /* we have all we need */ 961 break; /* we have all we need */
962 } 962 }
963 return err; 963 return err;
964 } 964 }
965 965
966 #ifndef MODULE 966 #ifndef MODULE
967 static int run_threaded_test(struct dmatest_info *info) 967 static int run_threaded_test(struct dmatest_info *info)
968 { 968 {
969 int ret; 969 int ret;
970 970
971 mutex_lock(&info->lock); 971 mutex_lock(&info->lock);
972 ret = __run_threaded_test(info); 972 ret = __run_threaded_test(info);
973 mutex_unlock(&info->lock); 973 mutex_unlock(&info->lock);
974 return ret; 974 return ret;
975 } 975 }
976 #endif 976 #endif
977 977
978 static void __stop_threaded_test(struct dmatest_info *info) 978 static void __stop_threaded_test(struct dmatest_info *info)
979 { 979 {
980 struct dmatest_chan *dtc, *_dtc; 980 struct dmatest_chan *dtc, *_dtc;
981 struct dma_chan *chan; 981 struct dma_chan *chan;
982 982
983 list_for_each_entry_safe(dtc, _dtc, &info->channels, node) { 983 list_for_each_entry_safe(dtc, _dtc, &info->channels, node) {
984 list_del(&dtc->node); 984 list_del(&dtc->node);
985 chan = dtc->chan; 985 chan = dtc->chan;
986 dmatest_cleanup_channel(dtc); 986 dmatest_cleanup_channel(dtc);
987 pr_debug("dmatest: dropped channel %s\n", dma_chan_name(chan)); 987 pr_debug("dmatest: dropped channel %s\n", dma_chan_name(chan));
988 dma_release_channel(chan); 988 dma_release_channel(chan);
989 } 989 }
990 990
991 info->nr_channels = 0; 991 info->nr_channels = 0;
992 } 992 }
993 993
994 static void stop_threaded_test(struct dmatest_info *info) 994 static void stop_threaded_test(struct dmatest_info *info)
995 { 995 {
996 mutex_lock(&info->lock); 996 mutex_lock(&info->lock);
997 __stop_threaded_test(info); 997 __stop_threaded_test(info);
998 mutex_unlock(&info->lock); 998 mutex_unlock(&info->lock);
999 } 999 }
1000 1000
1001 static int __restart_threaded_test(struct dmatest_info *info, bool run) 1001 static int __restart_threaded_test(struct dmatest_info *info, bool run)
1002 { 1002 {
1003 struct dmatest_params *params = &info->params; 1003 struct dmatest_params *params = &info->params;
1004 1004
1005 /* Stop any running test first */ 1005 /* Stop any running test first */
1006 __stop_threaded_test(info); 1006 __stop_threaded_test(info);
1007 1007
1008 if (run == false) 1008 if (run == false)
1009 return 0; 1009 return 0;
1010 1010
1011 /* Clear results from previous run */ 1011 /* Clear results from previous run */
1012 result_free(info, NULL); 1012 result_free(info, NULL);
1013 1013
1014 /* Copy test parameters */ 1014 /* Copy test parameters */
1015 params->buf_size = test_buf_size; 1015 params->buf_size = test_buf_size;
1016 strlcpy(params->channel, strim(test_channel), sizeof(params->channel)); 1016 strlcpy(params->channel, strim(test_channel), sizeof(params->channel));
1017 strlcpy(params->device, strim(test_device), sizeof(params->device)); 1017 strlcpy(params->device, strim(test_device), sizeof(params->device));
1018 params->threads_per_chan = threads_per_chan; 1018 params->threads_per_chan = threads_per_chan;
1019 params->max_channels = max_channels; 1019 params->max_channels = max_channels;
1020 params->iterations = iterations; 1020 params->iterations = iterations;
1021 params->xor_sources = xor_sources; 1021 params->xor_sources = xor_sources;
1022 params->pq_sources = pq_sources; 1022 params->pq_sources = pq_sources;
1023 params->timeout = timeout; 1023 params->timeout = timeout;
1024 1024
1025 /* Run test with new parameters */ 1025 /* Run test with new parameters */
1026 return __run_threaded_test(info); 1026 return __run_threaded_test(info);
1027 } 1027 }
1028 1028
1029 static bool __is_threaded_test_run(struct dmatest_info *info) 1029 static bool __is_threaded_test_run(struct dmatest_info *info)
1030 { 1030 {
1031 struct dmatest_chan *dtc; 1031 struct dmatest_chan *dtc;
1032 1032
1033 list_for_each_entry(dtc, &info->channels, node) { 1033 list_for_each_entry(dtc, &info->channels, node) {
1034 struct dmatest_thread *thread; 1034 struct dmatest_thread *thread;
1035 1035
1036 list_for_each_entry(thread, &dtc->threads, node) { 1036 list_for_each_entry(thread, &dtc->threads, node) {
1037 if (!thread->done) 1037 if (!thread->done)
1038 return true; 1038 return true;
1039 } 1039 }
1040 } 1040 }
1041 1041
1042 return false; 1042 return false;
1043 } 1043 }
1044 1044
1045 static ssize_t dtf_read_run(struct file *file, char __user *user_buf, 1045 static ssize_t dtf_read_run(struct file *file, char __user *user_buf,
1046 size_t count, loff_t *ppos) 1046 size_t count, loff_t *ppos)
1047 { 1047 {
1048 struct dmatest_info *info = file->private_data; 1048 struct dmatest_info *info = file->private_data;
1049 char buf[3]; 1049 char buf[3];
1050 1050
1051 mutex_lock(&info->lock); 1051 mutex_lock(&info->lock);
1052 1052
1053 if (__is_threaded_test_run(info)) { 1053 if (__is_threaded_test_run(info)) {
1054 buf[0] = 'Y'; 1054 buf[0] = 'Y';
1055 } else { 1055 } else {
1056 __stop_threaded_test(info); 1056 __stop_threaded_test(info);
1057 buf[0] = 'N'; 1057 buf[0] = 'N';
1058 } 1058 }
1059 1059
1060 mutex_unlock(&info->lock); 1060 mutex_unlock(&info->lock);
1061 buf[1] = '\n'; 1061 buf[1] = '\n';
1062 buf[2] = 0x00; 1062 buf[2] = 0x00;
1063 return simple_read_from_buffer(user_buf, count, ppos, buf, 2); 1063 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
1064 } 1064 }
1065 1065
1066 static ssize_t dtf_write_run(struct file *file, const char __user *user_buf, 1066 static ssize_t dtf_write_run(struct file *file, const char __user *user_buf,
1067 size_t count, loff_t *ppos) 1067 size_t count, loff_t *ppos)
1068 { 1068 {
1069 struct dmatest_info *info = file->private_data; 1069 struct dmatest_info *info = file->private_data;
1070 char buf[16]; 1070 char buf[16];
1071 bool bv; 1071 bool bv;
1072 int ret = 0; 1072 int ret = 0;
1073 1073
1074 if (copy_from_user(buf, user_buf, min(count, (sizeof(buf) - 1)))) 1074 if (copy_from_user(buf, user_buf, min(count, (sizeof(buf) - 1))))
1075 return -EFAULT; 1075 return -EFAULT;
1076 1076
1077 if (strtobool(buf, &bv) == 0) { 1077 if (strtobool(buf, &bv) == 0) {
1078 mutex_lock(&info->lock); 1078 mutex_lock(&info->lock);
1079 1079
1080 if (__is_threaded_test_run(info)) 1080 if (__is_threaded_test_run(info))
1081 ret = -EBUSY; 1081 ret = -EBUSY;
1082 else 1082 else
1083 ret = __restart_threaded_test(info, bv); 1083 ret = __restart_threaded_test(info, bv);
1084 1084
1085 mutex_unlock(&info->lock); 1085 mutex_unlock(&info->lock);
1086 } 1086 }
1087 1087
1088 return ret ? ret : count; 1088 return ret ? ret : count;
1089 } 1089 }
1090 1090
1091 static const struct file_operations dtf_run_fops = { 1091 static const struct file_operations dtf_run_fops = {
1092 .read = dtf_read_run, 1092 .read = dtf_read_run,
1093 .write = dtf_write_run, 1093 .write = dtf_write_run,
1094 .open = simple_open, 1094 .open = simple_open,
1095 .llseek = default_llseek, 1095 .llseek = default_llseek,
1096 }; 1096 };
1097 1097
1098 static int dtf_results_show(struct seq_file *sf, void *data) 1098 static int dtf_results_show(struct seq_file *sf, void *data)
1099 { 1099 {
1100 struct dmatest_info *info = sf->private; 1100 struct dmatest_info *info = sf->private;
1101 struct dmatest_result *result; 1101 struct dmatest_result *result;
1102 struct dmatest_thread_result *tr; 1102 struct dmatest_thread_result *tr;
1103 unsigned int i; 1103 unsigned int i;
1104 1104
1105 mutex_lock(&info->results_lock); 1105 mutex_lock(&info->results_lock);
1106 list_for_each_entry(result, &info->results, node) { 1106 list_for_each_entry(result, &info->results, node) {
1107 list_for_each_entry(tr, &result->results, node) { 1107 list_for_each_entry(tr, &result->results, node) {
1108 seq_printf(sf, "%s\n", 1108 seq_printf(sf, "%s\n",
1109 thread_result_get(result->name, tr)); 1109 thread_result_get(result->name, tr));
1110 if (tr->type == DMATEST_ET_VERIFY_BUF) { 1110 if (tr->type == DMATEST_ET_VERIFY_BUF) {
1111 for (i = 0; i < tr->vr->error_count; i++) { 1111 for (i = 0; i < tr->vr->error_count; i++) {
1112 seq_printf(sf, "\t%s\n", 1112 seq_printf(sf, "\t%s\n",
1113 verify_result_get_one(tr->vr, i)); 1113 verify_result_get_one(tr->vr, i));
1114 } 1114 }
1115 } 1115 }
1116 } 1116 }
1117 } 1117 }
1118 1118
1119 mutex_unlock(&info->results_lock); 1119 mutex_unlock(&info->results_lock);
1120 return 0; 1120 return 0;
1121 } 1121 }
1122 1122
1123 static int dtf_results_open(struct inode *inode, struct file *file) 1123 static int dtf_results_open(struct inode *inode, struct file *file)
1124 { 1124 {
1125 return single_open(file, dtf_results_show, inode->i_private); 1125 return single_open(file, dtf_results_show, inode->i_private);
1126 } 1126 }
1127 1127
1128 static const struct file_operations dtf_results_fops = { 1128 static const struct file_operations dtf_results_fops = {
1129 .open = dtf_results_open, 1129 .open = dtf_results_open,
1130 .read = seq_read, 1130 .read = seq_read,
1131 .llseek = seq_lseek, 1131 .llseek = seq_lseek,
1132 .release = single_release, 1132 .release = single_release,
1133 }; 1133 };
1134 1134
1135 static int dmatest_register_dbgfs(struct dmatest_info *info) 1135 static int dmatest_register_dbgfs(struct dmatest_info *info)
1136 { 1136 {
1137 struct dentry *d; 1137 struct dentry *d;
1138 1138
1139 d = debugfs_create_dir("dmatest", NULL); 1139 d = debugfs_create_dir("dmatest", NULL);
1140 if (IS_ERR(d)) 1140 if (IS_ERR(d))
1141 return PTR_ERR(d); 1141 return PTR_ERR(d);
1142 if (!d) 1142 if (!d)
1143 goto err_root; 1143 goto err_root;
1144 1144
1145 info->root = d; 1145 info->root = d;
1146 1146
1147 /* Run or stop threaded test */ 1147 /* Run or stop threaded test */
1148 debugfs_create_file("run", S_IWUSR | S_IRUGO, info->root, info, 1148 debugfs_create_file("run", S_IWUSR | S_IRUGO, info->root, info,
1149 &dtf_run_fops); 1149 &dtf_run_fops);
1150 1150
1151 /* Results of test in progress */ 1151 /* Results of test in progress */
1152 debugfs_create_file("results", S_IRUGO, info->root, info, 1152 debugfs_create_file("results", S_IRUGO, info->root, info,
1153 &dtf_results_fops); 1153 &dtf_results_fops);
1154 1154
1155 return 0; 1155 return 0;
1156 1156
1157 err_root: 1157 err_root:
1158 pr_err("dmatest: Failed to initialize debugfs\n"); 1158 pr_err("dmatest: Failed to initialize debugfs\n");
1159 return -ENOMEM; 1159 return -ENOMEM;
1160 } 1160 }
1161 1161
1162 static int __init dmatest_init(void) 1162 static int __init dmatest_init(void)
1163 { 1163 {
1164 struct dmatest_info *info = &test_info; 1164 struct dmatest_info *info = &test_info;
1165 int ret; 1165 int ret;
1166 1166
1167 memset(info, 0, sizeof(*info)); 1167 memset(info, 0, sizeof(*info));
1168 1168
1169 mutex_init(&info->lock); 1169 mutex_init(&info->lock);
1170 INIT_LIST_HEAD(&info->channels); 1170 INIT_LIST_HEAD(&info->channels);
1171 1171
1172 mutex_init(&info->results_lock); 1172 mutex_init(&info->results_lock);
1173 INIT_LIST_HEAD(&info->results); 1173 INIT_LIST_HEAD(&info->results);
1174 1174
1175 ret = dmatest_register_dbgfs(info); 1175 ret = dmatest_register_dbgfs(info);
1176 if (ret) 1176 if (ret)
1177 return ret; 1177 return ret;
1178 1178
1179 #ifdef MODULE 1179 #ifdef MODULE
1180 return 0; 1180 return 0;
1181 #else 1181 #else
1182 return run_threaded_test(info); 1182 return run_threaded_test(info);
1183 #endif 1183 #endif
1184 } 1184 }
1185 /* when compiled-in wait for drivers to load first */ 1185 /* when compiled-in wait for drivers to load first */
1186 late_initcall(dmatest_init); 1186 late_initcall(dmatest_init);
1187 1187
1188 static void __exit dmatest_exit(void) 1188 static void __exit dmatest_exit(void)
1189 { 1189 {
1190 struct dmatest_info *info = &test_info; 1190 struct dmatest_info *info = &test_info;
1191 1191
1192 debugfs_remove_recursive(info->root); 1192 debugfs_remove_recursive(info->root);
1193 stop_threaded_test(info); 1193 stop_threaded_test(info);
1194 result_free(info, NULL); 1194 result_free(info, NULL);
1195 } 1195 }
1196 module_exit(dmatest_exit); 1196 module_exit(dmatest_exit);
1197 1197
1198 MODULE_AUTHOR("Haavard Skinnemoen (Atmel)"); 1198 MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
1199 MODULE_LICENSE("GPL v2"); 1199 MODULE_LICENSE("GPL v2");
1200 1200
drivers/dma/dw/core.c
1 /* 1 /*
2 * Core driver for the Synopsys DesignWare DMA Controller 2 * Core driver for the Synopsys DesignWare DMA Controller
3 * 3 *
4 * Copyright (C) 2007-2008 Atmel Corporation 4 * Copyright (C) 2007-2008 Atmel Corporation
5 * Copyright (C) 2010-2011 ST Microelectronics 5 * Copyright (C) 2010-2011 ST Microelectronics
6 * Copyright (C) 2013 Intel Corporation 6 * Copyright (C) 2013 Intel Corporation
7 * 7 *
8 * This program is free software; you can redistribute it and/or modify 8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as 9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation. 10 * published by the Free Software Foundation.
11 */ 11 */
12 12
13 #include <linux/bitops.h> 13 #include <linux/bitops.h>
14 #include <linux/clk.h> 14 #include <linux/clk.h>
15 #include <linux/delay.h> 15 #include <linux/delay.h>
16 #include <linux/dmaengine.h> 16 #include <linux/dmaengine.h>
17 #include <linux/dma-mapping.h> 17 #include <linux/dma-mapping.h>
18 #include <linux/dmapool.h> 18 #include <linux/dmapool.h>
19 #include <linux/err.h> 19 #include <linux/err.h>
20 #include <linux/init.h> 20 #include <linux/init.h>
21 #include <linux/interrupt.h> 21 #include <linux/interrupt.h>
22 #include <linux/io.h> 22 #include <linux/io.h>
23 #include <linux/mm.h> 23 #include <linux/mm.h>
24 #include <linux/module.h> 24 #include <linux/module.h>
25 #include <linux/slab.h> 25 #include <linux/slab.h>
26 26
27 #include "../dmaengine.h" 27 #include "../dmaengine.h"
28 #include "internal.h" 28 #include "internal.h"
29 29
30 /* 30 /*
31 * This supports the Synopsys "DesignWare AHB Central DMA Controller", 31 * This supports the Synopsys "DesignWare AHB Central DMA Controller",
32 * (DW_ahb_dmac) which is used with various AMBA 2.0 systems (not all 32 * (DW_ahb_dmac) which is used with various AMBA 2.0 systems (not all
33 * of which use ARM any more). See the "Databook" from Synopsys for 33 * of which use ARM any more). See the "Databook" from Synopsys for
34 * information beyond what licensees probably provide. 34 * information beyond what licensees probably provide.
35 * 35 *
36 * The driver has currently been tested only with the Atmel AT32AP7000, 36 * The driver has currently been tested only with the Atmel AT32AP7000,
37 * which does not support descriptor writeback. 37 * which does not support descriptor writeback.
38 */ 38 */
39 39
40 static inline bool is_request_line_unset(struct dw_dma_chan *dwc) 40 static inline bool is_request_line_unset(struct dw_dma_chan *dwc)
41 { 41 {
42 return dwc->request_line == (typeof(dwc->request_line))~0; 42 return dwc->request_line == (typeof(dwc->request_line))~0;
43 } 43 }
44 44
45 static inline void dwc_set_masters(struct dw_dma_chan *dwc) 45 static inline void dwc_set_masters(struct dw_dma_chan *dwc)
46 { 46 {
47 struct dw_dma *dw = to_dw_dma(dwc->chan.device); 47 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
48 struct dw_dma_slave *dws = dwc->chan.private; 48 struct dw_dma_slave *dws = dwc->chan.private;
49 unsigned char mmax = dw->nr_masters - 1; 49 unsigned char mmax = dw->nr_masters - 1;
50 50
51 if (!is_request_line_unset(dwc)) 51 if (!is_request_line_unset(dwc))
52 return; 52 return;
53 53
54 dwc->src_master = min_t(unsigned char, mmax, dwc_get_sms(dws)); 54 dwc->src_master = min_t(unsigned char, mmax, dwc_get_sms(dws));
55 dwc->dst_master = min_t(unsigned char, mmax, dwc_get_dms(dws)); 55 dwc->dst_master = min_t(unsigned char, mmax, dwc_get_dms(dws));
56 } 56 }
57 57
58 #define DWC_DEFAULT_CTLLO(_chan) ({ \ 58 #define DWC_DEFAULT_CTLLO(_chan) ({ \
59 struct dw_dma_chan *_dwc = to_dw_dma_chan(_chan); \ 59 struct dw_dma_chan *_dwc = to_dw_dma_chan(_chan); \
60 struct dma_slave_config *_sconfig = &_dwc->dma_sconfig; \ 60 struct dma_slave_config *_sconfig = &_dwc->dma_sconfig; \
61 bool _is_slave = is_slave_direction(_dwc->direction); \ 61 bool _is_slave = is_slave_direction(_dwc->direction); \
62 u8 _smsize = _is_slave ? _sconfig->src_maxburst : \ 62 u8 _smsize = _is_slave ? _sconfig->src_maxburst : \
63 DW_DMA_MSIZE_16; \ 63 DW_DMA_MSIZE_16; \
64 u8 _dmsize = _is_slave ? _sconfig->dst_maxburst : \ 64 u8 _dmsize = _is_slave ? _sconfig->dst_maxburst : \
65 DW_DMA_MSIZE_16; \ 65 DW_DMA_MSIZE_16; \
66 \ 66 \
67 (DWC_CTLL_DST_MSIZE(_dmsize) \ 67 (DWC_CTLL_DST_MSIZE(_dmsize) \
68 | DWC_CTLL_SRC_MSIZE(_smsize) \ 68 | DWC_CTLL_SRC_MSIZE(_smsize) \
69 | DWC_CTLL_LLP_D_EN \ 69 | DWC_CTLL_LLP_D_EN \
70 | DWC_CTLL_LLP_S_EN \ 70 | DWC_CTLL_LLP_S_EN \
71 | DWC_CTLL_DMS(_dwc->dst_master) \ 71 | DWC_CTLL_DMS(_dwc->dst_master) \
72 | DWC_CTLL_SMS(_dwc->src_master)); \ 72 | DWC_CTLL_SMS(_dwc->src_master)); \
73 }) 73 })
74 74
75 /* 75 /*
76 * Number of descriptors to allocate for each channel. This should be 76 * Number of descriptors to allocate for each channel. This should be
77 * made configurable somehow; preferably, the clients (at least the 77 * made configurable somehow; preferably, the clients (at least the
78 * ones using slave transfers) should be able to give us a hint. 78 * ones using slave transfers) should be able to give us a hint.
79 */ 79 */
80 #define NR_DESCS_PER_CHANNEL 64 80 #define NR_DESCS_PER_CHANNEL 64
81 81
82 /*----------------------------------------------------------------------*/ 82 /*----------------------------------------------------------------------*/
83 83
84 static struct device *chan2dev(struct dma_chan *chan) 84 static struct device *chan2dev(struct dma_chan *chan)
85 { 85 {
86 return &chan->dev->device; 86 return &chan->dev->device;
87 } 87 }
88 static struct device *chan2parent(struct dma_chan *chan) 88 static struct device *chan2parent(struct dma_chan *chan)
89 { 89 {
90 return chan->dev->device.parent; 90 return chan->dev->device.parent;
91 } 91 }
92 92
93 static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc) 93 static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc)
94 { 94 {
95 return to_dw_desc(dwc->active_list.next); 95 return to_dw_desc(dwc->active_list.next);
96 } 96 }
97 97
98 static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc) 98 static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc)
99 { 99 {
100 struct dw_desc *desc, *_desc; 100 struct dw_desc *desc, *_desc;
101 struct dw_desc *ret = NULL; 101 struct dw_desc *ret = NULL;
102 unsigned int i = 0; 102 unsigned int i = 0;
103 unsigned long flags; 103 unsigned long flags;
104 104
105 spin_lock_irqsave(&dwc->lock, flags); 105 spin_lock_irqsave(&dwc->lock, flags);
106 list_for_each_entry_safe(desc, _desc, &dwc->free_list, desc_node) { 106 list_for_each_entry_safe(desc, _desc, &dwc->free_list, desc_node) {
107 i++; 107 i++;
108 if (async_tx_test_ack(&desc->txd)) { 108 if (async_tx_test_ack(&desc->txd)) {
109 list_del(&desc->desc_node); 109 list_del(&desc->desc_node);
110 ret = desc; 110 ret = desc;
111 break; 111 break;
112 } 112 }
113 dev_dbg(chan2dev(&dwc->chan), "desc %p not ACKed\n", desc); 113 dev_dbg(chan2dev(&dwc->chan), "desc %p not ACKed\n", desc);
114 } 114 }
115 spin_unlock_irqrestore(&dwc->lock, flags); 115 spin_unlock_irqrestore(&dwc->lock, flags);
116 116
117 dev_vdbg(chan2dev(&dwc->chan), "scanned %u descriptors on freelist\n", i); 117 dev_vdbg(chan2dev(&dwc->chan), "scanned %u descriptors on freelist\n", i);
118 118
119 return ret; 119 return ret;
120 } 120 }
121 121
122 /* 122 /*
123 * Move a descriptor, including any children, to the free list. 123 * Move a descriptor, including any children, to the free list.
124 * `desc' must not be on any lists. 124 * `desc' must not be on any lists.
125 */ 125 */
126 static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc) 126 static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc)
127 { 127 {
128 unsigned long flags; 128 unsigned long flags;
129 129
130 if (desc) { 130 if (desc) {
131 struct dw_desc *child; 131 struct dw_desc *child;
132 132
133 spin_lock_irqsave(&dwc->lock, flags); 133 spin_lock_irqsave(&dwc->lock, flags);
134 list_for_each_entry(child, &desc->tx_list, desc_node) 134 list_for_each_entry(child, &desc->tx_list, desc_node)
135 dev_vdbg(chan2dev(&dwc->chan), 135 dev_vdbg(chan2dev(&dwc->chan),
136 "moving child desc %p to freelist\n", 136 "moving child desc %p to freelist\n",
137 child); 137 child);
138 list_splice_init(&desc->tx_list, &dwc->free_list); 138 list_splice_init(&desc->tx_list, &dwc->free_list);
139 dev_vdbg(chan2dev(&dwc->chan), "moving desc %p to freelist\n", desc); 139 dev_vdbg(chan2dev(&dwc->chan), "moving desc %p to freelist\n", desc);
140 list_add(&desc->desc_node, &dwc->free_list); 140 list_add(&desc->desc_node, &dwc->free_list);
141 spin_unlock_irqrestore(&dwc->lock, flags); 141 spin_unlock_irqrestore(&dwc->lock, flags);
142 } 142 }
143 } 143 }
144 144
145 static void dwc_initialize(struct dw_dma_chan *dwc) 145 static void dwc_initialize(struct dw_dma_chan *dwc)
146 { 146 {
147 struct dw_dma *dw = to_dw_dma(dwc->chan.device); 147 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
148 struct dw_dma_slave *dws = dwc->chan.private; 148 struct dw_dma_slave *dws = dwc->chan.private;
149 u32 cfghi = DWC_CFGH_FIFO_MODE; 149 u32 cfghi = DWC_CFGH_FIFO_MODE;
150 u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority); 150 u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority);
151 151
152 if (dwc->initialized == true) 152 if (dwc->initialized == true)
153 return; 153 return;
154 154
155 if (dws) { 155 if (dws) {
156 /* 156 /*
157 * We need controller-specific data to set up slave 157 * We need controller-specific data to set up slave
158 * transfers. 158 * transfers.
159 */ 159 */
160 BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev); 160 BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev);
161 161
162 cfghi = dws->cfg_hi; 162 cfghi = dws->cfg_hi;
163 cfglo |= dws->cfg_lo & ~DWC_CFGL_CH_PRIOR_MASK; 163 cfglo |= dws->cfg_lo & ~DWC_CFGL_CH_PRIOR_MASK;
164 } else { 164 } else {
165 if (dwc->direction == DMA_MEM_TO_DEV) 165 if (dwc->direction == DMA_MEM_TO_DEV)
166 cfghi = DWC_CFGH_DST_PER(dwc->request_line); 166 cfghi = DWC_CFGH_DST_PER(dwc->request_line);
167 else if (dwc->direction == DMA_DEV_TO_MEM) 167 else if (dwc->direction == DMA_DEV_TO_MEM)
168 cfghi = DWC_CFGH_SRC_PER(dwc->request_line); 168 cfghi = DWC_CFGH_SRC_PER(dwc->request_line);
169 } 169 }
170 170
171 channel_writel(dwc, CFG_LO, cfglo); 171 channel_writel(dwc, CFG_LO, cfglo);
172 channel_writel(dwc, CFG_HI, cfghi); 172 channel_writel(dwc, CFG_HI, cfghi);
173 173
174 /* Enable interrupts */ 174 /* Enable interrupts */
175 channel_set_bit(dw, MASK.XFER, dwc->mask); 175 channel_set_bit(dw, MASK.XFER, dwc->mask);
176 channel_set_bit(dw, MASK.ERROR, dwc->mask); 176 channel_set_bit(dw, MASK.ERROR, dwc->mask);
177 177
178 dwc->initialized = true; 178 dwc->initialized = true;
179 } 179 }
180 180
181 /*----------------------------------------------------------------------*/ 181 /*----------------------------------------------------------------------*/
182 182
183 static inline unsigned int dwc_fast_fls(unsigned long long v) 183 static inline unsigned int dwc_fast_fls(unsigned long long v)
184 { 184 {
185 /* 185 /*
186 * We can be a lot more clever here, but this should take care 186 * We can be a lot more clever here, but this should take care
187 * of the most common optimization. 187 * of the most common optimization.
188 */ 188 */
189 if (!(v & 7)) 189 if (!(v & 7))
190 return 3; 190 return 3;
191 else if (!(v & 3)) 191 else if (!(v & 3))
192 return 2; 192 return 2;
193 else if (!(v & 1)) 193 else if (!(v & 1))
194 return 1; 194 return 1;
195 return 0; 195 return 0;
196 } 196 }
197 197
198 static inline void dwc_dump_chan_regs(struct dw_dma_chan *dwc) 198 static inline void dwc_dump_chan_regs(struct dw_dma_chan *dwc)
199 { 199 {
200 dev_err(chan2dev(&dwc->chan), 200 dev_err(chan2dev(&dwc->chan),
201 " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n", 201 " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
202 channel_readl(dwc, SAR), 202 channel_readl(dwc, SAR),
203 channel_readl(dwc, DAR), 203 channel_readl(dwc, DAR),
204 channel_readl(dwc, LLP), 204 channel_readl(dwc, LLP),
205 channel_readl(dwc, CTL_HI), 205 channel_readl(dwc, CTL_HI),
206 channel_readl(dwc, CTL_LO)); 206 channel_readl(dwc, CTL_LO));
207 } 207 }
208 208
209 static inline void dwc_chan_disable(struct dw_dma *dw, struct dw_dma_chan *dwc) 209 static inline void dwc_chan_disable(struct dw_dma *dw, struct dw_dma_chan *dwc)
210 { 210 {
211 channel_clear_bit(dw, CH_EN, dwc->mask); 211 channel_clear_bit(dw, CH_EN, dwc->mask);
212 while (dma_readl(dw, CH_EN) & dwc->mask) 212 while (dma_readl(dw, CH_EN) & dwc->mask)
213 cpu_relax(); 213 cpu_relax();
214 } 214 }
215 215
216 /*----------------------------------------------------------------------*/ 216 /*----------------------------------------------------------------------*/
217 217
218 /* Perform single block transfer */ 218 /* Perform single block transfer */
219 static inline void dwc_do_single_block(struct dw_dma_chan *dwc, 219 static inline void dwc_do_single_block(struct dw_dma_chan *dwc,
220 struct dw_desc *desc) 220 struct dw_desc *desc)
221 { 221 {
222 struct dw_dma *dw = to_dw_dma(dwc->chan.device); 222 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
223 u32 ctllo; 223 u32 ctllo;
224 224
225 /* Software emulation of LLP mode relies on interrupts to continue 225 /* Software emulation of LLP mode relies on interrupts to continue
226 * multi block transfer. */ 226 * multi block transfer. */
227 ctllo = desc->lli.ctllo | DWC_CTLL_INT_EN; 227 ctllo = desc->lli.ctllo | DWC_CTLL_INT_EN;
228 228
229 channel_writel(dwc, SAR, desc->lli.sar); 229 channel_writel(dwc, SAR, desc->lli.sar);
230 channel_writel(dwc, DAR, desc->lli.dar); 230 channel_writel(dwc, DAR, desc->lli.dar);
231 channel_writel(dwc, CTL_LO, ctllo); 231 channel_writel(dwc, CTL_LO, ctllo);
232 channel_writel(dwc, CTL_HI, desc->lli.ctlhi); 232 channel_writel(dwc, CTL_HI, desc->lli.ctlhi);
233 channel_set_bit(dw, CH_EN, dwc->mask); 233 channel_set_bit(dw, CH_EN, dwc->mask);
234 234
235 /* Move pointer to next descriptor */ 235 /* Move pointer to next descriptor */
236 dwc->tx_node_active = dwc->tx_node_active->next; 236 dwc->tx_node_active = dwc->tx_node_active->next;
237 } 237 }
238 238
239 /* Called with dwc->lock held and bh disabled */ 239 /* Called with dwc->lock held and bh disabled */
240 static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first) 240 static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first)
241 { 241 {
242 struct dw_dma *dw = to_dw_dma(dwc->chan.device); 242 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
243 unsigned long was_soft_llp; 243 unsigned long was_soft_llp;
244 244
245 /* ASSERT: channel is idle */ 245 /* ASSERT: channel is idle */
246 if (dma_readl(dw, CH_EN) & dwc->mask) { 246 if (dma_readl(dw, CH_EN) & dwc->mask) {
247 dev_err(chan2dev(&dwc->chan), 247 dev_err(chan2dev(&dwc->chan),
248 "BUG: Attempted to start non-idle channel\n"); 248 "BUG: Attempted to start non-idle channel\n");
249 dwc_dump_chan_regs(dwc); 249 dwc_dump_chan_regs(dwc);
250 250
251 /* The tasklet will hopefully advance the queue... */ 251 /* The tasklet will hopefully advance the queue... */
252 return; 252 return;
253 } 253 }
254 254
255 if (dwc->nollp) { 255 if (dwc->nollp) {
256 was_soft_llp = test_and_set_bit(DW_DMA_IS_SOFT_LLP, 256 was_soft_llp = test_and_set_bit(DW_DMA_IS_SOFT_LLP,
257 &dwc->flags); 257 &dwc->flags);
258 if (was_soft_llp) { 258 if (was_soft_llp) {
259 dev_err(chan2dev(&dwc->chan), 259 dev_err(chan2dev(&dwc->chan),
260 "BUG: Attempted to start new LLP transfer " 260 "BUG: Attempted to start new LLP transfer "
261 "inside ongoing one\n"); 261 "inside ongoing one\n");
262 return; 262 return;
263 } 263 }
264 264
265 dwc_initialize(dwc); 265 dwc_initialize(dwc);
266 266
267 dwc->residue = first->total_len; 267 dwc->residue = first->total_len;
268 dwc->tx_node_active = &first->tx_list; 268 dwc->tx_node_active = &first->tx_list;
269 269
270 /* Submit first block */ 270 /* Submit first block */
271 dwc_do_single_block(dwc, first); 271 dwc_do_single_block(dwc, first);
272 272
273 return; 273 return;
274 } 274 }
275 275
276 dwc_initialize(dwc); 276 dwc_initialize(dwc);
277 277
278 channel_writel(dwc, LLP, first->txd.phys); 278 channel_writel(dwc, LLP, first->txd.phys);
279 channel_writel(dwc, CTL_LO, 279 channel_writel(dwc, CTL_LO,
280 DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN); 280 DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
281 channel_writel(dwc, CTL_HI, 0); 281 channel_writel(dwc, CTL_HI, 0);
282 channel_set_bit(dw, CH_EN, dwc->mask); 282 channel_set_bit(dw, CH_EN, dwc->mask);
283 } 283 }
284 284
285 /*----------------------------------------------------------------------*/ 285 /*----------------------------------------------------------------------*/
286 286
287 static void 287 static void
288 dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc, 288 dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc,
289 bool callback_required) 289 bool callback_required)
290 { 290 {
291 dma_async_tx_callback callback = NULL; 291 dma_async_tx_callback callback = NULL;
292 void *param = NULL; 292 void *param = NULL;
293 struct dma_async_tx_descriptor *txd = &desc->txd; 293 struct dma_async_tx_descriptor *txd = &desc->txd;
294 struct dw_desc *child; 294 struct dw_desc *child;
295 unsigned long flags; 295 unsigned long flags;
296 296
297 dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie); 297 dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie);
298 298
299 spin_lock_irqsave(&dwc->lock, flags); 299 spin_lock_irqsave(&dwc->lock, flags);
300 dma_cookie_complete(txd); 300 dma_cookie_complete(txd);
301 if (callback_required) { 301 if (callback_required) {
302 callback = txd->callback; 302 callback = txd->callback;
303 param = txd->callback_param; 303 param = txd->callback_param;
304 } 304 }
305 305
306 /* async_tx_ack */ 306 /* async_tx_ack */
307 list_for_each_entry(child, &desc->tx_list, desc_node) 307 list_for_each_entry(child, &desc->tx_list, desc_node)
308 async_tx_ack(&child->txd); 308 async_tx_ack(&child->txd);
309 async_tx_ack(&desc->txd); 309 async_tx_ack(&desc->txd);
310 310
311 list_splice_init(&desc->tx_list, &dwc->free_list); 311 list_splice_init(&desc->tx_list, &dwc->free_list);
312 list_move(&desc->desc_node, &dwc->free_list); 312 list_move(&desc->desc_node, &dwc->free_list);
313 313
314 if (!is_slave_direction(dwc->direction)) { 314 if (!is_slave_direction(dwc->direction)) {
315 struct device *parent = chan2parent(&dwc->chan); 315 struct device *parent = chan2parent(&dwc->chan);
316 if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) { 316 if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
317 if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE) 317 if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE)
318 dma_unmap_single(parent, desc->lli.dar, 318 dma_unmap_single(parent, desc->lli.dar,
319 desc->total_len, DMA_FROM_DEVICE); 319 desc->total_len, DMA_FROM_DEVICE);
320 else 320 else
321 dma_unmap_page(parent, desc->lli.dar, 321 dma_unmap_page(parent, desc->lli.dar,
322 desc->total_len, DMA_FROM_DEVICE); 322 desc->total_len, DMA_FROM_DEVICE);
323 } 323 }
324 if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) { 324 if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
325 if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE) 325 if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE)
326 dma_unmap_single(parent, desc->lli.sar, 326 dma_unmap_single(parent, desc->lli.sar,
327 desc->total_len, DMA_TO_DEVICE); 327 desc->total_len, DMA_TO_DEVICE);
328 else 328 else
329 dma_unmap_page(parent, desc->lli.sar, 329 dma_unmap_page(parent, desc->lli.sar,
330 desc->total_len, DMA_TO_DEVICE); 330 desc->total_len, DMA_TO_DEVICE);
331 } 331 }
332 } 332 }
333 333
334 spin_unlock_irqrestore(&dwc->lock, flags); 334 spin_unlock_irqrestore(&dwc->lock, flags);
335 335
336 if (callback) 336 if (callback)
337 callback(param); 337 callback(param);
338 } 338 }
339 339
340 static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc) 340 static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc)
341 { 341 {
342 struct dw_desc *desc, *_desc; 342 struct dw_desc *desc, *_desc;
343 LIST_HEAD(list); 343 LIST_HEAD(list);
344 unsigned long flags; 344 unsigned long flags;
345 345
346 spin_lock_irqsave(&dwc->lock, flags); 346 spin_lock_irqsave(&dwc->lock, flags);
347 if (dma_readl(dw, CH_EN) & dwc->mask) { 347 if (dma_readl(dw, CH_EN) & dwc->mask) {
348 dev_err(chan2dev(&dwc->chan), 348 dev_err(chan2dev(&dwc->chan),
349 "BUG: XFER bit set, but channel not idle!\n"); 349 "BUG: XFER bit set, but channel not idle!\n");
350 350
351 /* Try to continue after resetting the channel... */ 351 /* Try to continue after resetting the channel... */
352 dwc_chan_disable(dw, dwc); 352 dwc_chan_disable(dw, dwc);
353 } 353 }
354 354
355 /* 355 /*
356 * Submit queued descriptors ASAP, i.e. before we go through 356 * Submit queued descriptors ASAP, i.e. before we go through
357 * the completed ones. 357 * the completed ones.
358 */ 358 */
359 list_splice_init(&dwc->active_list, &list); 359 list_splice_init(&dwc->active_list, &list);
360 if (!list_empty(&dwc->queue)) { 360 if (!list_empty(&dwc->queue)) {
361 list_move(dwc->queue.next, &dwc->active_list); 361 list_move(dwc->queue.next, &dwc->active_list);
362 dwc_dostart(dwc, dwc_first_active(dwc)); 362 dwc_dostart(dwc, dwc_first_active(dwc));
363 } 363 }
364 364
365 spin_unlock_irqrestore(&dwc->lock, flags); 365 spin_unlock_irqrestore(&dwc->lock, flags);
366 366
367 list_for_each_entry_safe(desc, _desc, &list, desc_node) 367 list_for_each_entry_safe(desc, _desc, &list, desc_node)
368 dwc_descriptor_complete(dwc, desc, true); 368 dwc_descriptor_complete(dwc, desc, true);
369 } 369 }
370 370
371 /* Returns how many bytes were already received from source */ 371 /* Returns how many bytes were already received from source */
372 static inline u32 dwc_get_sent(struct dw_dma_chan *dwc) 372 static inline u32 dwc_get_sent(struct dw_dma_chan *dwc)
373 { 373 {
374 u32 ctlhi = channel_readl(dwc, CTL_HI); 374 u32 ctlhi = channel_readl(dwc, CTL_HI);
375 u32 ctllo = channel_readl(dwc, CTL_LO); 375 u32 ctllo = channel_readl(dwc, CTL_LO);
376 376
377 return (ctlhi & DWC_CTLH_BLOCK_TS_MASK) * (1 << (ctllo >> 4 & 7)); 377 return (ctlhi & DWC_CTLH_BLOCK_TS_MASK) * (1 << (ctllo >> 4 & 7));
378 } 378 }
379 379
380 static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc) 380 static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
381 { 381 {
382 dma_addr_t llp; 382 dma_addr_t llp;
383 struct dw_desc *desc, *_desc; 383 struct dw_desc *desc, *_desc;
384 struct dw_desc *child; 384 struct dw_desc *child;
385 u32 status_xfer; 385 u32 status_xfer;
386 unsigned long flags; 386 unsigned long flags;
387 387
388 spin_lock_irqsave(&dwc->lock, flags); 388 spin_lock_irqsave(&dwc->lock, flags);
389 llp = channel_readl(dwc, LLP); 389 llp = channel_readl(dwc, LLP);
390 status_xfer = dma_readl(dw, RAW.XFER); 390 status_xfer = dma_readl(dw, RAW.XFER);
391 391
392 if (status_xfer & dwc->mask) { 392 if (status_xfer & dwc->mask) {
393 /* Everything we've submitted is done */ 393 /* Everything we've submitted is done */
394 dma_writel(dw, CLEAR.XFER, dwc->mask); 394 dma_writel(dw, CLEAR.XFER, dwc->mask);
395 395
396 if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags)) { 396 if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags)) {
397 struct list_head *head, *active = dwc->tx_node_active; 397 struct list_head *head, *active = dwc->tx_node_active;
398 398
399 /* 399 /*
400 * We are inside first active descriptor. 400 * We are inside first active descriptor.
401 * Otherwise something is really wrong. 401 * Otherwise something is really wrong.
402 */ 402 */
403 desc = dwc_first_active(dwc); 403 desc = dwc_first_active(dwc);
404 404
405 head = &desc->tx_list; 405 head = &desc->tx_list;
406 if (active != head) { 406 if (active != head) {
407 /* Update desc to reflect last sent one */ 407 /* Update desc to reflect last sent one */
408 if (active != head->next) 408 if (active != head->next)
409 desc = to_dw_desc(active->prev); 409 desc = to_dw_desc(active->prev);
410 410
411 dwc->residue -= desc->len; 411 dwc->residue -= desc->len;
412 412
413 child = to_dw_desc(active); 413 child = to_dw_desc(active);
414 414
415 /* Submit next block */ 415 /* Submit next block */
416 dwc_do_single_block(dwc, child); 416 dwc_do_single_block(dwc, child);
417 417
418 spin_unlock_irqrestore(&dwc->lock, flags); 418 spin_unlock_irqrestore(&dwc->lock, flags);
419 return; 419 return;
420 } 420 }
421 421
422 /* We are done here */ 422 /* We are done here */
423 clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags); 423 clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags);
424 } 424 }
425 425
426 dwc->residue = 0; 426 dwc->residue = 0;
427 427
428 spin_unlock_irqrestore(&dwc->lock, flags); 428 spin_unlock_irqrestore(&dwc->lock, flags);
429 429
430 dwc_complete_all(dw, dwc); 430 dwc_complete_all(dw, dwc);
431 return; 431 return;
432 } 432 }
433 433
434 if (list_empty(&dwc->active_list)) { 434 if (list_empty(&dwc->active_list)) {
435 dwc->residue = 0; 435 dwc->residue = 0;
436 spin_unlock_irqrestore(&dwc->lock, flags); 436 spin_unlock_irqrestore(&dwc->lock, flags);
437 return; 437 return;
438 } 438 }
439 439
440 if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags)) { 440 if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags)) {
441 dev_vdbg(chan2dev(&dwc->chan), "%s: soft LLP mode\n", __func__); 441 dev_vdbg(chan2dev(&dwc->chan), "%s: soft LLP mode\n", __func__);
442 spin_unlock_irqrestore(&dwc->lock, flags); 442 spin_unlock_irqrestore(&dwc->lock, flags);
443 return; 443 return;
444 } 444 }
445 445
446 dev_vdbg(chan2dev(&dwc->chan), "%s: llp=0x%llx\n", __func__, 446 dev_vdbg(chan2dev(&dwc->chan), "%s: llp=0x%llx\n", __func__,
447 (unsigned long long)llp); 447 (unsigned long long)llp);
448 448
449 list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) { 449 list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) {
450 /* Initial residue value */ 450 /* Initial residue value */
451 dwc->residue = desc->total_len; 451 dwc->residue = desc->total_len;
452 452
453 /* Check first descriptors addr */ 453 /* Check first descriptors addr */
454 if (desc->txd.phys == llp) { 454 if (desc->txd.phys == llp) {
455 spin_unlock_irqrestore(&dwc->lock, flags); 455 spin_unlock_irqrestore(&dwc->lock, flags);
456 return; 456 return;
457 } 457 }
458 458
459 /* Check first descriptors llp */ 459 /* Check first descriptors llp */
460 if (desc->lli.llp == llp) { 460 if (desc->lli.llp == llp) {
461 /* This one is currently in progress */ 461 /* This one is currently in progress */
462 dwc->residue -= dwc_get_sent(dwc); 462 dwc->residue -= dwc_get_sent(dwc);
463 spin_unlock_irqrestore(&dwc->lock, flags); 463 spin_unlock_irqrestore(&dwc->lock, flags);
464 return; 464 return;
465 } 465 }
466 466
467 dwc->residue -= desc->len; 467 dwc->residue -= desc->len;
468 list_for_each_entry(child, &desc->tx_list, desc_node) { 468 list_for_each_entry(child, &desc->tx_list, desc_node) {
469 if (child->lli.llp == llp) { 469 if (child->lli.llp == llp) {
470 /* Currently in progress */ 470 /* Currently in progress */
471 dwc->residue -= dwc_get_sent(dwc); 471 dwc->residue -= dwc_get_sent(dwc);
472 spin_unlock_irqrestore(&dwc->lock, flags); 472 spin_unlock_irqrestore(&dwc->lock, flags);
473 return; 473 return;
474 } 474 }
475 dwc->residue -= child->len; 475 dwc->residue -= child->len;
476 } 476 }
477 477
478 /* 478 /*
479 * No descriptors so far seem to be in progress, i.e. 479 * No descriptors so far seem to be in progress, i.e.
480 * this one must be done. 480 * this one must be done.
481 */ 481 */
482 spin_unlock_irqrestore(&dwc->lock, flags); 482 spin_unlock_irqrestore(&dwc->lock, flags);
483 dwc_descriptor_complete(dwc, desc, true); 483 dwc_descriptor_complete(dwc, desc, true);
484 spin_lock_irqsave(&dwc->lock, flags); 484 spin_lock_irqsave(&dwc->lock, flags);
485 } 485 }
486 486
487 dev_err(chan2dev(&dwc->chan), 487 dev_err(chan2dev(&dwc->chan),
488 "BUG: All descriptors done, but channel not idle!\n"); 488 "BUG: All descriptors done, but channel not idle!\n");
489 489
490 /* Try to continue after resetting the channel... */ 490 /* Try to continue after resetting the channel... */
491 dwc_chan_disable(dw, dwc); 491 dwc_chan_disable(dw, dwc);
492 492
493 if (!list_empty(&dwc->queue)) { 493 if (!list_empty(&dwc->queue)) {
494 list_move(dwc->queue.next, &dwc->active_list); 494 list_move(dwc->queue.next, &dwc->active_list);
495 dwc_dostart(dwc, dwc_first_active(dwc)); 495 dwc_dostart(dwc, dwc_first_active(dwc));
496 } 496 }
497 spin_unlock_irqrestore(&dwc->lock, flags); 497 spin_unlock_irqrestore(&dwc->lock, flags);
498 } 498 }
499 499
500 static inline void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_lli *lli) 500 static inline void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_lli *lli)
501 { 501 {
502 dev_crit(chan2dev(&dwc->chan), " desc: s0x%x d0x%x l0x%x c0x%x:%x\n", 502 dev_crit(chan2dev(&dwc->chan), " desc: s0x%x d0x%x l0x%x c0x%x:%x\n",
503 lli->sar, lli->dar, lli->llp, lli->ctlhi, lli->ctllo); 503 lli->sar, lli->dar, lli->llp, lli->ctlhi, lli->ctllo);
504 } 504 }
505 505
506 static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc) 506 static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc)
507 { 507 {
508 struct dw_desc *bad_desc; 508 struct dw_desc *bad_desc;
509 struct dw_desc *child; 509 struct dw_desc *child;
510 unsigned long flags; 510 unsigned long flags;
511 511
512 dwc_scan_descriptors(dw, dwc); 512 dwc_scan_descriptors(dw, dwc);
513 513
514 spin_lock_irqsave(&dwc->lock, flags); 514 spin_lock_irqsave(&dwc->lock, flags);
515 515
516 /* 516 /*
517 * The descriptor currently at the head of the active list is 517 * The descriptor currently at the head of the active list is
518 * borked. Since we don't have any way to report errors, we'll 518 * borked. Since we don't have any way to report errors, we'll
519 * just have to scream loudly and try to carry on. 519 * just have to scream loudly and try to carry on.
520 */ 520 */
521 bad_desc = dwc_first_active(dwc); 521 bad_desc = dwc_first_active(dwc);
522 list_del_init(&bad_desc->desc_node); 522 list_del_init(&bad_desc->desc_node);
523 list_move(dwc->queue.next, dwc->active_list.prev); 523 list_move(dwc->queue.next, dwc->active_list.prev);
524 524
525 /* Clear the error flag and try to restart the controller */ 525 /* Clear the error flag and try to restart the controller */
526 dma_writel(dw, CLEAR.ERROR, dwc->mask); 526 dma_writel(dw, CLEAR.ERROR, dwc->mask);
527 if (!list_empty(&dwc->active_list)) 527 if (!list_empty(&dwc->active_list))
528 dwc_dostart(dwc, dwc_first_active(dwc)); 528 dwc_dostart(dwc, dwc_first_active(dwc));
529 529
530 /* 530 /*
531 * WARN may seem harsh, but since this only happens 531 * WARN may seem harsh, but since this only happens
532 * when someone submits a bad physical address in a 532 * when someone submits a bad physical address in a
533 * descriptor, we should consider ourselves lucky that the 533 * descriptor, we should consider ourselves lucky that the
534 * controller flagged an error instead of scribbling over 534 * controller flagged an error instead of scribbling over
535 * random memory locations. 535 * random memory locations.
536 */ 536 */
537 dev_WARN(chan2dev(&dwc->chan), "Bad descriptor submitted for DMA!\n" 537 dev_WARN(chan2dev(&dwc->chan), "Bad descriptor submitted for DMA!\n"
538 " cookie: %d\n", bad_desc->txd.cookie); 538 " cookie: %d\n", bad_desc->txd.cookie);
539 dwc_dump_lli(dwc, &bad_desc->lli); 539 dwc_dump_lli(dwc, &bad_desc->lli);
540 list_for_each_entry(child, &bad_desc->tx_list, desc_node) 540 list_for_each_entry(child, &bad_desc->tx_list, desc_node)
541 dwc_dump_lli(dwc, &child->lli); 541 dwc_dump_lli(dwc, &child->lli);
542 542
543 spin_unlock_irqrestore(&dwc->lock, flags); 543 spin_unlock_irqrestore(&dwc->lock, flags);
544 544
545 /* Pretend the descriptor completed successfully */ 545 /* Pretend the descriptor completed successfully */
546 dwc_descriptor_complete(dwc, bad_desc, true); 546 dwc_descriptor_complete(dwc, bad_desc, true);
547 } 547 }
548 548
549 /* --------------------- Cyclic DMA API extensions -------------------- */ 549 /* --------------------- Cyclic DMA API extensions -------------------- */
550 550
551 dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan) 551 dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan)
552 { 552 {
553 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 553 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
554 return channel_readl(dwc, SAR); 554 return channel_readl(dwc, SAR);
555 } 555 }
556 EXPORT_SYMBOL(dw_dma_get_src_addr); 556 EXPORT_SYMBOL(dw_dma_get_src_addr);
557 557
558 dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan) 558 dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan)
559 { 559 {
560 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 560 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
561 return channel_readl(dwc, DAR); 561 return channel_readl(dwc, DAR);
562 } 562 }
563 EXPORT_SYMBOL(dw_dma_get_dst_addr); 563 EXPORT_SYMBOL(dw_dma_get_dst_addr);
564 564
565 /* Called with dwc->lock held and all DMAC interrupts disabled */ 565 /* Called with dwc->lock held and all DMAC interrupts disabled */
566 static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc, 566 static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
567 u32 status_err, u32 status_xfer) 567 u32 status_err, u32 status_xfer)
568 { 568 {
569 unsigned long flags; 569 unsigned long flags;
570 570
571 if (dwc->mask) { 571 if (dwc->mask) {
572 void (*callback)(void *param); 572 void (*callback)(void *param);
573 void *callback_param; 573 void *callback_param;
574 574
575 dev_vdbg(chan2dev(&dwc->chan), "new cyclic period llp 0x%08x\n", 575 dev_vdbg(chan2dev(&dwc->chan), "new cyclic period llp 0x%08x\n",
576 channel_readl(dwc, LLP)); 576 channel_readl(dwc, LLP));
577 577
578 callback = dwc->cdesc->period_callback; 578 callback = dwc->cdesc->period_callback;
579 callback_param = dwc->cdesc->period_callback_param; 579 callback_param = dwc->cdesc->period_callback_param;
580 580
581 if (callback) 581 if (callback)
582 callback(callback_param); 582 callback(callback_param);
583 } 583 }
584 584
585 /* 585 /*
586 * Error and transfer complete are highly unlikely, and will most 586 * Error and transfer complete are highly unlikely, and will most
587 * likely be due to a configuration error by the user. 587 * likely be due to a configuration error by the user.
588 */ 588 */
589 if (unlikely(status_err & dwc->mask) || 589 if (unlikely(status_err & dwc->mask) ||
590 unlikely(status_xfer & dwc->mask)) { 590 unlikely(status_xfer & dwc->mask)) {
591 int i; 591 int i;
592 592
593 dev_err(chan2dev(&dwc->chan), "cyclic DMA unexpected %s " 593 dev_err(chan2dev(&dwc->chan), "cyclic DMA unexpected %s "
594 "interrupt, stopping DMA transfer\n", 594 "interrupt, stopping DMA transfer\n",
595 status_xfer ? "xfer" : "error"); 595 status_xfer ? "xfer" : "error");
596 596
597 spin_lock_irqsave(&dwc->lock, flags); 597 spin_lock_irqsave(&dwc->lock, flags);
598 598
599 dwc_dump_chan_regs(dwc); 599 dwc_dump_chan_regs(dwc);
600 600
601 dwc_chan_disable(dw, dwc); 601 dwc_chan_disable(dw, dwc);
602 602
603 /* Make sure DMA does not restart by loading a new list */ 603 /* Make sure DMA does not restart by loading a new list */
604 channel_writel(dwc, LLP, 0); 604 channel_writel(dwc, LLP, 0);
605 channel_writel(dwc, CTL_LO, 0); 605 channel_writel(dwc, CTL_LO, 0);
606 channel_writel(dwc, CTL_HI, 0); 606 channel_writel(dwc, CTL_HI, 0);
607 607
608 dma_writel(dw, CLEAR.ERROR, dwc->mask); 608 dma_writel(dw, CLEAR.ERROR, dwc->mask);
609 dma_writel(dw, CLEAR.XFER, dwc->mask); 609 dma_writel(dw, CLEAR.XFER, dwc->mask);
610 610
611 for (i = 0; i < dwc->cdesc->periods; i++) 611 for (i = 0; i < dwc->cdesc->periods; i++)
612 dwc_dump_lli(dwc, &dwc->cdesc->desc[i]->lli); 612 dwc_dump_lli(dwc, &dwc->cdesc->desc[i]->lli);
613 613
614 spin_unlock_irqrestore(&dwc->lock, flags); 614 spin_unlock_irqrestore(&dwc->lock, flags);
615 } 615 }
616 } 616 }
617 617
618 /* ------------------------------------------------------------------------- */ 618 /* ------------------------------------------------------------------------- */
619 619
620 static void dw_dma_tasklet(unsigned long data) 620 static void dw_dma_tasklet(unsigned long data)
621 { 621 {
622 struct dw_dma *dw = (struct dw_dma *)data; 622 struct dw_dma *dw = (struct dw_dma *)data;
623 struct dw_dma_chan *dwc; 623 struct dw_dma_chan *dwc;
624 u32 status_xfer; 624 u32 status_xfer;
625 u32 status_err; 625 u32 status_err;
626 int i; 626 int i;
627 627
628 status_xfer = dma_readl(dw, RAW.XFER); 628 status_xfer = dma_readl(dw, RAW.XFER);
629 status_err = dma_readl(dw, RAW.ERROR); 629 status_err = dma_readl(dw, RAW.ERROR);
630 630
631 dev_vdbg(dw->dma.dev, "%s: status_err=%x\n", __func__, status_err); 631 dev_vdbg(dw->dma.dev, "%s: status_err=%x\n", __func__, status_err);
632 632
633 for (i = 0; i < dw->dma.chancnt; i++) { 633 for (i = 0; i < dw->dma.chancnt; i++) {
634 dwc = &dw->chan[i]; 634 dwc = &dw->chan[i];
635 if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) 635 if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags))
636 dwc_handle_cyclic(dw, dwc, status_err, status_xfer); 636 dwc_handle_cyclic(dw, dwc, status_err, status_xfer);
637 else if (status_err & (1 << i)) 637 else if (status_err & (1 << i))
638 dwc_handle_error(dw, dwc); 638 dwc_handle_error(dw, dwc);
639 else if (status_xfer & (1 << i)) 639 else if (status_xfer & (1 << i))
640 dwc_scan_descriptors(dw, dwc); 640 dwc_scan_descriptors(dw, dwc);
641 } 641 }
642 642
643 /* 643 /*
644 * Re-enable interrupts. 644 * Re-enable interrupts.
645 */ 645 */
646 channel_set_bit(dw, MASK.XFER, dw->all_chan_mask); 646 channel_set_bit(dw, MASK.XFER, dw->all_chan_mask);
647 channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask); 647 channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask);
648 } 648 }
649 649
650 static irqreturn_t dw_dma_interrupt(int irq, void *dev_id) 650 static irqreturn_t dw_dma_interrupt(int irq, void *dev_id)
651 { 651 {
652 struct dw_dma *dw = dev_id; 652 struct dw_dma *dw = dev_id;
653 u32 status = dma_readl(dw, STATUS_INT); 653 u32 status = dma_readl(dw, STATUS_INT);
654 654
655 dev_vdbg(dw->dma.dev, "%s: status=0x%x\n", __func__, status); 655 dev_vdbg(dw->dma.dev, "%s: status=0x%x\n", __func__, status);
656 656
657 /* Check if we have any interrupt from the DMAC */ 657 /* Check if we have any interrupt from the DMAC */
658 if (!status) 658 if (!status)
659 return IRQ_NONE; 659 return IRQ_NONE;
660 660
661 /* 661 /*
662 * Just disable the interrupts. We'll turn them back on in the 662 * Just disable the interrupts. We'll turn them back on in the
663 * softirq handler. 663 * softirq handler.
664 */ 664 */
665 channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask); 665 channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
666 channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask); 666 channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
667 667
668 status = dma_readl(dw, STATUS_INT); 668 status = dma_readl(dw, STATUS_INT);
669 if (status) { 669 if (status) {
670 dev_err(dw->dma.dev, 670 dev_err(dw->dma.dev,
671 "BUG: Unexpected interrupts pending: 0x%x\n", 671 "BUG: Unexpected interrupts pending: 0x%x\n",
672 status); 672 status);
673 673
674 /* Try to recover */ 674 /* Try to recover */
675 channel_clear_bit(dw, MASK.XFER, (1 << 8) - 1); 675 channel_clear_bit(dw, MASK.XFER, (1 << 8) - 1);
676 channel_clear_bit(dw, MASK.SRC_TRAN, (1 << 8) - 1); 676 channel_clear_bit(dw, MASK.SRC_TRAN, (1 << 8) - 1);
677 channel_clear_bit(dw, MASK.DST_TRAN, (1 << 8) - 1); 677 channel_clear_bit(dw, MASK.DST_TRAN, (1 << 8) - 1);
678 channel_clear_bit(dw, MASK.ERROR, (1 << 8) - 1); 678 channel_clear_bit(dw, MASK.ERROR, (1 << 8) - 1);
679 } 679 }
680 680
681 tasklet_schedule(&dw->tasklet); 681 tasklet_schedule(&dw->tasklet);
682 682
683 return IRQ_HANDLED; 683 return IRQ_HANDLED;
684 } 684 }
685 685
686 /*----------------------------------------------------------------------*/ 686 /*----------------------------------------------------------------------*/
687 687
688 static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx) 688 static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx)
689 { 689 {
690 struct dw_desc *desc = txd_to_dw_desc(tx); 690 struct dw_desc *desc = txd_to_dw_desc(tx);
691 struct dw_dma_chan *dwc = to_dw_dma_chan(tx->chan); 691 struct dw_dma_chan *dwc = to_dw_dma_chan(tx->chan);
692 dma_cookie_t cookie; 692 dma_cookie_t cookie;
693 unsigned long flags; 693 unsigned long flags;
694 694
695 spin_lock_irqsave(&dwc->lock, flags); 695 spin_lock_irqsave(&dwc->lock, flags);
696 cookie = dma_cookie_assign(tx); 696 cookie = dma_cookie_assign(tx);
697 697
698 /* 698 /*
699 * REVISIT: We should attempt to chain as many descriptors as 699 * REVISIT: We should attempt to chain as many descriptors as
700 * possible, perhaps even appending to those already submitted 700 * possible, perhaps even appending to those already submitted
701 * for DMA. But this is hard to do in a race-free manner. 701 * for DMA. But this is hard to do in a race-free manner.
702 */ 702 */
703 if (list_empty(&dwc->active_list)) { 703 if (list_empty(&dwc->active_list)) {
704 dev_vdbg(chan2dev(tx->chan), "%s: started %u\n", __func__, 704 dev_vdbg(chan2dev(tx->chan), "%s: started %u\n", __func__,
705 desc->txd.cookie); 705 desc->txd.cookie);
706 list_add_tail(&desc->desc_node, &dwc->active_list); 706 list_add_tail(&desc->desc_node, &dwc->active_list);
707 dwc_dostart(dwc, dwc_first_active(dwc)); 707 dwc_dostart(dwc, dwc_first_active(dwc));
708 } else { 708 } else {
709 dev_vdbg(chan2dev(tx->chan), "%s: queued %u\n", __func__, 709 dev_vdbg(chan2dev(tx->chan), "%s: queued %u\n", __func__,
710 desc->txd.cookie); 710 desc->txd.cookie);
711 711
712 list_add_tail(&desc->desc_node, &dwc->queue); 712 list_add_tail(&desc->desc_node, &dwc->queue);
713 } 713 }
714 714
715 spin_unlock_irqrestore(&dwc->lock, flags); 715 spin_unlock_irqrestore(&dwc->lock, flags);
716 716
717 return cookie; 717 return cookie;
718 } 718 }
719 719
720 static struct dma_async_tx_descriptor * 720 static struct dma_async_tx_descriptor *
721 dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, 721 dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
722 size_t len, unsigned long flags) 722 size_t len, unsigned long flags)
723 { 723 {
724 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 724 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
725 struct dw_dma *dw = to_dw_dma(chan->device); 725 struct dw_dma *dw = to_dw_dma(chan->device);
726 struct dw_desc *desc; 726 struct dw_desc *desc;
727 struct dw_desc *first; 727 struct dw_desc *first;
728 struct dw_desc *prev; 728 struct dw_desc *prev;
729 size_t xfer_count; 729 size_t xfer_count;
730 size_t offset; 730 size_t offset;
731 unsigned int src_width; 731 unsigned int src_width;
732 unsigned int dst_width; 732 unsigned int dst_width;
733 unsigned int data_width; 733 unsigned int data_width;
734 u32 ctllo; 734 u32 ctllo;
735 735
736 dev_vdbg(chan2dev(chan), 736 dev_vdbg(chan2dev(chan),
737 "%s: d0x%llx s0x%llx l0x%zx f0x%lx\n", __func__, 737 "%s: d0x%llx s0x%llx l0x%zx f0x%lx\n", __func__,
738 (unsigned long long)dest, (unsigned long long)src, 738 (unsigned long long)dest, (unsigned long long)src,
739 len, flags); 739 len, flags);
740 740
741 if (unlikely(!len)) { 741 if (unlikely(!len)) {
742 dev_dbg(chan2dev(chan), "%s: length is zero!\n", __func__); 742 dev_dbg(chan2dev(chan), "%s: length is zero!\n", __func__);
743 return NULL; 743 return NULL;
744 } 744 }
745 745
746 dwc->direction = DMA_MEM_TO_MEM; 746 dwc->direction = DMA_MEM_TO_MEM;
747 747
748 data_width = min_t(unsigned int, dw->data_width[dwc->src_master], 748 data_width = min_t(unsigned int, dw->data_width[dwc->src_master],
749 dw->data_width[dwc->dst_master]); 749 dw->data_width[dwc->dst_master]);
750 750
751 src_width = dst_width = min_t(unsigned int, data_width, 751 src_width = dst_width = min_t(unsigned int, data_width,
752 dwc_fast_fls(src | dest | len)); 752 dwc_fast_fls(src | dest | len));
753 753
754 ctllo = DWC_DEFAULT_CTLLO(chan) 754 ctllo = DWC_DEFAULT_CTLLO(chan)
755 | DWC_CTLL_DST_WIDTH(dst_width) 755 | DWC_CTLL_DST_WIDTH(dst_width)
756 | DWC_CTLL_SRC_WIDTH(src_width) 756 | DWC_CTLL_SRC_WIDTH(src_width)
757 | DWC_CTLL_DST_INC 757 | DWC_CTLL_DST_INC
758 | DWC_CTLL_SRC_INC 758 | DWC_CTLL_SRC_INC
759 | DWC_CTLL_FC_M2M; 759 | DWC_CTLL_FC_M2M;
760 prev = first = NULL; 760 prev = first = NULL;
761 761
762 for (offset = 0; offset < len; offset += xfer_count << src_width) { 762 for (offset = 0; offset < len; offset += xfer_count << src_width) {
763 xfer_count = min_t(size_t, (len - offset) >> src_width, 763 xfer_count = min_t(size_t, (len - offset) >> src_width,
764 dwc->block_size); 764 dwc->block_size);
765 765
766 desc = dwc_desc_get(dwc); 766 desc = dwc_desc_get(dwc);
767 if (!desc) 767 if (!desc)
768 goto err_desc_get; 768 goto err_desc_get;
769 769
770 desc->lli.sar = src + offset; 770 desc->lli.sar = src + offset;
771 desc->lli.dar = dest + offset; 771 desc->lli.dar = dest + offset;
772 desc->lli.ctllo = ctllo; 772 desc->lli.ctllo = ctllo;
773 desc->lli.ctlhi = xfer_count; 773 desc->lli.ctlhi = xfer_count;
774 desc->len = xfer_count << src_width; 774 desc->len = xfer_count << src_width;
775 775
776 if (!first) { 776 if (!first) {
777 first = desc; 777 first = desc;
778 } else { 778 } else {
779 prev->lli.llp = desc->txd.phys; 779 prev->lli.llp = desc->txd.phys;
780 list_add_tail(&desc->desc_node, 780 list_add_tail(&desc->desc_node,
781 &first->tx_list); 781 &first->tx_list);
782 } 782 }
783 prev = desc; 783 prev = desc;
784 } 784 }
785 785
786 if (flags & DMA_PREP_INTERRUPT) 786 if (flags & DMA_PREP_INTERRUPT)
787 /* Trigger interrupt after last block */ 787 /* Trigger interrupt after last block */
788 prev->lli.ctllo |= DWC_CTLL_INT_EN; 788 prev->lli.ctllo |= DWC_CTLL_INT_EN;
789 789
790 prev->lli.llp = 0; 790 prev->lli.llp = 0;
791 first->txd.flags = flags; 791 first->txd.flags = flags;
792 first->total_len = len; 792 first->total_len = len;
793 793
794 return &first->txd; 794 return &first->txd;
795 795
796 err_desc_get: 796 err_desc_get:
797 dwc_desc_put(dwc, first); 797 dwc_desc_put(dwc, first);
798 return NULL; 798 return NULL;
799 } 799 }
800 800
801 static struct dma_async_tx_descriptor * 801 static struct dma_async_tx_descriptor *
802 dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, 802 dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
803 unsigned int sg_len, enum dma_transfer_direction direction, 803 unsigned int sg_len, enum dma_transfer_direction direction,
804 unsigned long flags, void *context) 804 unsigned long flags, void *context)
805 { 805 {
806 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 806 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
807 struct dw_dma *dw = to_dw_dma(chan->device); 807 struct dw_dma *dw = to_dw_dma(chan->device);
808 struct dma_slave_config *sconfig = &dwc->dma_sconfig; 808 struct dma_slave_config *sconfig = &dwc->dma_sconfig;
809 struct dw_desc *prev; 809 struct dw_desc *prev;
810 struct dw_desc *first; 810 struct dw_desc *first;
811 u32 ctllo; 811 u32 ctllo;
812 dma_addr_t reg; 812 dma_addr_t reg;
813 unsigned int reg_width; 813 unsigned int reg_width;
814 unsigned int mem_width; 814 unsigned int mem_width;
815 unsigned int data_width; 815 unsigned int data_width;
816 unsigned int i; 816 unsigned int i;
817 struct scatterlist *sg; 817 struct scatterlist *sg;
818 size_t total_len = 0; 818 size_t total_len = 0;
819 819
820 dev_vdbg(chan2dev(chan), "%s\n", __func__); 820 dev_vdbg(chan2dev(chan), "%s\n", __func__);
821 821
822 if (unlikely(!is_slave_direction(direction) || !sg_len)) 822 if (unlikely(!is_slave_direction(direction) || !sg_len))
823 return NULL; 823 return NULL;
824 824
825 dwc->direction = direction; 825 dwc->direction = direction;
826 826
827 prev = first = NULL; 827 prev = first = NULL;
828 828
829 switch (direction) { 829 switch (direction) {
830 case DMA_MEM_TO_DEV: 830 case DMA_MEM_TO_DEV:
831 reg_width = __fls(sconfig->dst_addr_width); 831 reg_width = __fls(sconfig->dst_addr_width);
832 reg = sconfig->dst_addr; 832 reg = sconfig->dst_addr;
833 ctllo = (DWC_DEFAULT_CTLLO(chan) 833 ctllo = (DWC_DEFAULT_CTLLO(chan)
834 | DWC_CTLL_DST_WIDTH(reg_width) 834 | DWC_CTLL_DST_WIDTH(reg_width)
835 | DWC_CTLL_DST_FIX 835 | DWC_CTLL_DST_FIX
836 | DWC_CTLL_SRC_INC); 836 | DWC_CTLL_SRC_INC);
837 837
838 ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_M2P) : 838 ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_M2P) :
839 DWC_CTLL_FC(DW_DMA_FC_D_M2P); 839 DWC_CTLL_FC(DW_DMA_FC_D_M2P);
840 840
841 data_width = dw->data_width[dwc->src_master]; 841 data_width = dw->data_width[dwc->src_master];
842 842
843 for_each_sg(sgl, sg, sg_len, i) { 843 for_each_sg(sgl, sg, sg_len, i) {
844 struct dw_desc *desc; 844 struct dw_desc *desc;
845 u32 len, dlen, mem; 845 u32 len, dlen, mem;
846 846
847 mem = sg_dma_address(sg); 847 mem = sg_dma_address(sg);
848 len = sg_dma_len(sg); 848 len = sg_dma_len(sg);
849 849
850 mem_width = min_t(unsigned int, 850 mem_width = min_t(unsigned int,
851 data_width, dwc_fast_fls(mem | len)); 851 data_width, dwc_fast_fls(mem | len));
852 852
853 slave_sg_todev_fill_desc: 853 slave_sg_todev_fill_desc:
854 desc = dwc_desc_get(dwc); 854 desc = dwc_desc_get(dwc);
855 if (!desc) { 855 if (!desc) {
856 dev_err(chan2dev(chan), 856 dev_err(chan2dev(chan),
857 "not enough descriptors available\n"); 857 "not enough descriptors available\n");
858 goto err_desc_get; 858 goto err_desc_get;
859 } 859 }
860 860
861 desc->lli.sar = mem; 861 desc->lli.sar = mem;
862 desc->lli.dar = reg; 862 desc->lli.dar = reg;
863 desc->lli.ctllo = ctllo | DWC_CTLL_SRC_WIDTH(mem_width); 863 desc->lli.ctllo = ctllo | DWC_CTLL_SRC_WIDTH(mem_width);
864 if ((len >> mem_width) > dwc->block_size) { 864 if ((len >> mem_width) > dwc->block_size) {
865 dlen = dwc->block_size << mem_width; 865 dlen = dwc->block_size << mem_width;
866 mem += dlen; 866 mem += dlen;
867 len -= dlen; 867 len -= dlen;
868 } else { 868 } else {
869 dlen = len; 869 dlen = len;
870 len = 0; 870 len = 0;
871 } 871 }
872 872
873 desc->lli.ctlhi = dlen >> mem_width; 873 desc->lli.ctlhi = dlen >> mem_width;
874 desc->len = dlen; 874 desc->len = dlen;
875 875
876 if (!first) { 876 if (!first) {
877 first = desc; 877 first = desc;
878 } else { 878 } else {
879 prev->lli.llp = desc->txd.phys; 879 prev->lli.llp = desc->txd.phys;
880 list_add_tail(&desc->desc_node, 880 list_add_tail(&desc->desc_node,
881 &first->tx_list); 881 &first->tx_list);
882 } 882 }
883 prev = desc; 883 prev = desc;
884 total_len += dlen; 884 total_len += dlen;
885 885
886 if (len) 886 if (len)
887 goto slave_sg_todev_fill_desc; 887 goto slave_sg_todev_fill_desc;
888 } 888 }
889 break; 889 break;
890 case DMA_DEV_TO_MEM: 890 case DMA_DEV_TO_MEM:
891 reg_width = __fls(sconfig->src_addr_width); 891 reg_width = __fls(sconfig->src_addr_width);
892 reg = sconfig->src_addr; 892 reg = sconfig->src_addr;
893 ctllo = (DWC_DEFAULT_CTLLO(chan) 893 ctllo = (DWC_DEFAULT_CTLLO(chan)
894 | DWC_CTLL_SRC_WIDTH(reg_width) 894 | DWC_CTLL_SRC_WIDTH(reg_width)
895 | DWC_CTLL_DST_INC 895 | DWC_CTLL_DST_INC
896 | DWC_CTLL_SRC_FIX); 896 | DWC_CTLL_SRC_FIX);
897 897
898 ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_P2M) : 898 ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_P2M) :
899 DWC_CTLL_FC(DW_DMA_FC_D_P2M); 899 DWC_CTLL_FC(DW_DMA_FC_D_P2M);
900 900
901 data_width = dw->data_width[dwc->dst_master]; 901 data_width = dw->data_width[dwc->dst_master];
902 902
903 for_each_sg(sgl, sg, sg_len, i) { 903 for_each_sg(sgl, sg, sg_len, i) {
904 struct dw_desc *desc; 904 struct dw_desc *desc;
905 u32 len, dlen, mem; 905 u32 len, dlen, mem;
906 906
907 mem = sg_dma_address(sg); 907 mem = sg_dma_address(sg);
908 len = sg_dma_len(sg); 908 len = sg_dma_len(sg);
909 909
910 mem_width = min_t(unsigned int, 910 mem_width = min_t(unsigned int,
911 data_width, dwc_fast_fls(mem | len)); 911 data_width, dwc_fast_fls(mem | len));
912 912
913 slave_sg_fromdev_fill_desc: 913 slave_sg_fromdev_fill_desc:
914 desc = dwc_desc_get(dwc); 914 desc = dwc_desc_get(dwc);
915 if (!desc) { 915 if (!desc) {
916 dev_err(chan2dev(chan), 916 dev_err(chan2dev(chan),
917 "not enough descriptors available\n"); 917 "not enough descriptors available\n");
918 goto err_desc_get; 918 goto err_desc_get;
919 } 919 }
920 920
921 desc->lli.sar = reg; 921 desc->lli.sar = reg;
922 desc->lli.dar = mem; 922 desc->lli.dar = mem;
923 desc->lli.ctllo = ctllo | DWC_CTLL_DST_WIDTH(mem_width); 923 desc->lli.ctllo = ctllo | DWC_CTLL_DST_WIDTH(mem_width);
924 if ((len >> reg_width) > dwc->block_size) { 924 if ((len >> reg_width) > dwc->block_size) {
925 dlen = dwc->block_size << reg_width; 925 dlen = dwc->block_size << reg_width;
926 mem += dlen; 926 mem += dlen;
927 len -= dlen; 927 len -= dlen;
928 } else { 928 } else {
929 dlen = len; 929 dlen = len;
930 len = 0; 930 len = 0;
931 } 931 }
932 desc->lli.ctlhi = dlen >> reg_width; 932 desc->lli.ctlhi = dlen >> reg_width;
933 desc->len = dlen; 933 desc->len = dlen;
934 934
935 if (!first) { 935 if (!first) {
936 first = desc; 936 first = desc;
937 } else { 937 } else {
938 prev->lli.llp = desc->txd.phys; 938 prev->lli.llp = desc->txd.phys;
939 list_add_tail(&desc->desc_node, 939 list_add_tail(&desc->desc_node,
940 &first->tx_list); 940 &first->tx_list);
941 } 941 }
942 prev = desc; 942 prev = desc;
943 total_len += dlen; 943 total_len += dlen;
944 944
945 if (len) 945 if (len)
946 goto slave_sg_fromdev_fill_desc; 946 goto slave_sg_fromdev_fill_desc;
947 } 947 }
948 break; 948 break;
949 default: 949 default:
950 return NULL; 950 return NULL;
951 } 951 }
952 952
953 if (flags & DMA_PREP_INTERRUPT) 953 if (flags & DMA_PREP_INTERRUPT)
954 /* Trigger interrupt after last block */ 954 /* Trigger interrupt after last block */
955 prev->lli.ctllo |= DWC_CTLL_INT_EN; 955 prev->lli.ctllo |= DWC_CTLL_INT_EN;
956 956
957 prev->lli.llp = 0; 957 prev->lli.llp = 0;
958 first->total_len = total_len; 958 first->total_len = total_len;
959 959
960 return &first->txd; 960 return &first->txd;
961 961
962 err_desc_get: 962 err_desc_get:
963 dwc_desc_put(dwc, first); 963 dwc_desc_put(dwc, first);
964 return NULL; 964 return NULL;
965 } 965 }
966 966
967 /* 967 /*
968 * Fix sconfig's burst size according to dw_dmac. We need to convert them as: 968 * Fix sconfig's burst size according to dw_dmac. We need to convert them as:
969 * 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3. 969 * 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3.
970 * 970 *
971 * NOTE: burst size 2 is not supported by controller. 971 * NOTE: burst size 2 is not supported by controller.
972 * 972 *
973 * This can be done by finding least significant bit set: n & (n - 1) 973 * This can be done by finding least significant bit set: n & (n - 1)
974 */ 974 */
975 static inline void convert_burst(u32 *maxburst) 975 static inline void convert_burst(u32 *maxburst)
976 { 976 {
977 if (*maxburst > 1) 977 if (*maxburst > 1)
978 *maxburst = fls(*maxburst) - 2; 978 *maxburst = fls(*maxburst) - 2;
979 else 979 else
980 *maxburst = 0; 980 *maxburst = 0;
981 } 981 }
982 982
983 static int 983 static int
984 set_runtime_config(struct dma_chan *chan, struct dma_slave_config *sconfig) 984 set_runtime_config(struct dma_chan *chan, struct dma_slave_config *sconfig)
985 { 985 {
986 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 986 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
987 987
988 /* Check if chan will be configured for slave transfers */ 988 /* Check if chan will be configured for slave transfers */
989 if (!is_slave_direction(sconfig->direction)) 989 if (!is_slave_direction(sconfig->direction))
990 return -EINVAL; 990 return -EINVAL;
991 991
992 memcpy(&dwc->dma_sconfig, sconfig, sizeof(*sconfig)); 992 memcpy(&dwc->dma_sconfig, sconfig, sizeof(*sconfig));
993 dwc->direction = sconfig->direction; 993 dwc->direction = sconfig->direction;
994 994
995 /* Take the request line from slave_id member */ 995 /* Take the request line from slave_id member */
996 if (is_request_line_unset(dwc)) 996 if (is_request_line_unset(dwc))
997 dwc->request_line = sconfig->slave_id; 997 dwc->request_line = sconfig->slave_id;
998 998
999 convert_burst(&dwc->dma_sconfig.src_maxburst); 999 convert_burst(&dwc->dma_sconfig.src_maxburst);
1000 convert_burst(&dwc->dma_sconfig.dst_maxburst); 1000 convert_burst(&dwc->dma_sconfig.dst_maxburst);
1001 1001
1002 return 0; 1002 return 0;
1003 } 1003 }
1004 1004
1005 static inline void dwc_chan_pause(struct dw_dma_chan *dwc) 1005 static inline void dwc_chan_pause(struct dw_dma_chan *dwc)
1006 { 1006 {
1007 u32 cfglo = channel_readl(dwc, CFG_LO); 1007 u32 cfglo = channel_readl(dwc, CFG_LO);
1008 unsigned int count = 20; /* timeout iterations */ 1008 unsigned int count = 20; /* timeout iterations */
1009 1009
1010 channel_writel(dwc, CFG_LO, cfglo | DWC_CFGL_CH_SUSP); 1010 channel_writel(dwc, CFG_LO, cfglo | DWC_CFGL_CH_SUSP);
1011 while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY) && count--) 1011 while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY) && count--)
1012 udelay(2); 1012 udelay(2);
1013 1013
1014 dwc->paused = true; 1014 dwc->paused = true;
1015 } 1015 }
1016 1016
1017 static inline void dwc_chan_resume(struct dw_dma_chan *dwc) 1017 static inline void dwc_chan_resume(struct dw_dma_chan *dwc)
1018 { 1018 {
1019 u32 cfglo = channel_readl(dwc, CFG_LO); 1019 u32 cfglo = channel_readl(dwc, CFG_LO);
1020 1020
1021 channel_writel(dwc, CFG_LO, cfglo & ~DWC_CFGL_CH_SUSP); 1021 channel_writel(dwc, CFG_LO, cfglo & ~DWC_CFGL_CH_SUSP);
1022 1022
1023 dwc->paused = false; 1023 dwc->paused = false;
1024 } 1024 }
1025 1025
1026 static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 1026 static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
1027 unsigned long arg) 1027 unsigned long arg)
1028 { 1028 {
1029 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 1029 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1030 struct dw_dma *dw = to_dw_dma(chan->device); 1030 struct dw_dma *dw = to_dw_dma(chan->device);
1031 struct dw_desc *desc, *_desc; 1031 struct dw_desc *desc, *_desc;
1032 unsigned long flags; 1032 unsigned long flags;
1033 LIST_HEAD(list); 1033 LIST_HEAD(list);
1034 1034
1035 if (cmd == DMA_PAUSE) { 1035 if (cmd == DMA_PAUSE) {
1036 spin_lock_irqsave(&dwc->lock, flags); 1036 spin_lock_irqsave(&dwc->lock, flags);
1037 1037
1038 dwc_chan_pause(dwc); 1038 dwc_chan_pause(dwc);
1039 1039
1040 spin_unlock_irqrestore(&dwc->lock, flags); 1040 spin_unlock_irqrestore(&dwc->lock, flags);
1041 } else if (cmd == DMA_RESUME) { 1041 } else if (cmd == DMA_RESUME) {
1042 if (!dwc->paused) 1042 if (!dwc->paused)
1043 return 0; 1043 return 0;
1044 1044
1045 spin_lock_irqsave(&dwc->lock, flags); 1045 spin_lock_irqsave(&dwc->lock, flags);
1046 1046
1047 dwc_chan_resume(dwc); 1047 dwc_chan_resume(dwc);
1048 1048
1049 spin_unlock_irqrestore(&dwc->lock, flags); 1049 spin_unlock_irqrestore(&dwc->lock, flags);
1050 } else if (cmd == DMA_TERMINATE_ALL) { 1050 } else if (cmd == DMA_TERMINATE_ALL) {
1051 spin_lock_irqsave(&dwc->lock, flags); 1051 spin_lock_irqsave(&dwc->lock, flags);
1052 1052
1053 clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags); 1053 clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags);
1054 1054
1055 dwc_chan_disable(dw, dwc); 1055 dwc_chan_disable(dw, dwc);
1056 1056
1057 dwc_chan_resume(dwc); 1057 dwc_chan_resume(dwc);
1058 1058
1059 /* active_list entries will end up before queued entries */ 1059 /* active_list entries will end up before queued entries */
1060 list_splice_init(&dwc->queue, &list); 1060 list_splice_init(&dwc->queue, &list);
1061 list_splice_init(&dwc->active_list, &list); 1061 list_splice_init(&dwc->active_list, &list);
1062 1062
1063 spin_unlock_irqrestore(&dwc->lock, flags); 1063 spin_unlock_irqrestore(&dwc->lock, flags);
1064 1064
1065 /* Flush all pending and queued descriptors */ 1065 /* Flush all pending and queued descriptors */
1066 list_for_each_entry_safe(desc, _desc, &list, desc_node) 1066 list_for_each_entry_safe(desc, _desc, &list, desc_node)
1067 dwc_descriptor_complete(dwc, desc, false); 1067 dwc_descriptor_complete(dwc, desc, false);
1068 } else if (cmd == DMA_SLAVE_CONFIG) { 1068 } else if (cmd == DMA_SLAVE_CONFIG) {
1069 return set_runtime_config(chan, (struct dma_slave_config *)arg); 1069 return set_runtime_config(chan, (struct dma_slave_config *)arg);
1070 } else { 1070 } else {
1071 return -ENXIO; 1071 return -ENXIO;
1072 } 1072 }
1073 1073
1074 return 0; 1074 return 0;
1075 } 1075 }
1076 1076
1077 static inline u32 dwc_get_residue(struct dw_dma_chan *dwc) 1077 static inline u32 dwc_get_residue(struct dw_dma_chan *dwc)
1078 { 1078 {
1079 unsigned long flags; 1079 unsigned long flags;
1080 u32 residue; 1080 u32 residue;
1081 1081
1082 spin_lock_irqsave(&dwc->lock, flags); 1082 spin_lock_irqsave(&dwc->lock, flags);
1083 1083
1084 residue = dwc->residue; 1084 residue = dwc->residue;
1085 if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags) && residue) 1085 if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags) && residue)
1086 residue -= dwc_get_sent(dwc); 1086 residue -= dwc_get_sent(dwc);
1087 1087
1088 spin_unlock_irqrestore(&dwc->lock, flags); 1088 spin_unlock_irqrestore(&dwc->lock, flags);
1089 return residue; 1089 return residue;
1090 } 1090 }
1091 1091
1092 static enum dma_status 1092 static enum dma_status
1093 dwc_tx_status(struct dma_chan *chan, 1093 dwc_tx_status(struct dma_chan *chan,
1094 dma_cookie_t cookie, 1094 dma_cookie_t cookie,
1095 struct dma_tx_state *txstate) 1095 struct dma_tx_state *txstate)
1096 { 1096 {
1097 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 1097 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1098 enum dma_status ret; 1098 enum dma_status ret;
1099 1099
1100 ret = dma_cookie_status(chan, cookie, txstate); 1100 ret = dma_cookie_status(chan, cookie, txstate);
1101 if (ret == DMA_SUCCESS) 1101 if (ret == DMA_COMPLETE)
1102 return ret; 1102 return ret;
1103 1103
1104 dwc_scan_descriptors(to_dw_dma(chan->device), dwc); 1104 dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
1105 1105
1106 ret = dma_cookie_status(chan, cookie, txstate); 1106 ret = dma_cookie_status(chan, cookie, txstate);
1107 if (ret != DMA_SUCCESS) 1107 if (ret != DMA_COMPLETE)
1108 dma_set_residue(txstate, dwc_get_residue(dwc)); 1108 dma_set_residue(txstate, dwc_get_residue(dwc));
1109 1109
1110 if (dwc->paused && ret == DMA_IN_PROGRESS) 1110 if (dwc->paused && ret == DMA_IN_PROGRESS)
1111 return DMA_PAUSED; 1111 return DMA_PAUSED;
1112 1112
1113 return ret; 1113 return ret;
1114 } 1114 }
1115 1115
1116 static void dwc_issue_pending(struct dma_chan *chan) 1116 static void dwc_issue_pending(struct dma_chan *chan)
1117 { 1117 {
1118 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 1118 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1119 1119
1120 if (!list_empty(&dwc->queue)) 1120 if (!list_empty(&dwc->queue))
1121 dwc_scan_descriptors(to_dw_dma(chan->device), dwc); 1121 dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
1122 } 1122 }
1123 1123
1124 static int dwc_alloc_chan_resources(struct dma_chan *chan) 1124 static int dwc_alloc_chan_resources(struct dma_chan *chan)
1125 { 1125 {
1126 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 1126 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1127 struct dw_dma *dw = to_dw_dma(chan->device); 1127 struct dw_dma *dw = to_dw_dma(chan->device);
1128 struct dw_desc *desc; 1128 struct dw_desc *desc;
1129 int i; 1129 int i;
1130 unsigned long flags; 1130 unsigned long flags;
1131 1131
1132 dev_vdbg(chan2dev(chan), "%s\n", __func__); 1132 dev_vdbg(chan2dev(chan), "%s\n", __func__);
1133 1133
1134 /* ASSERT: channel is idle */ 1134 /* ASSERT: channel is idle */
1135 if (dma_readl(dw, CH_EN) & dwc->mask) { 1135 if (dma_readl(dw, CH_EN) & dwc->mask) {
1136 dev_dbg(chan2dev(chan), "DMA channel not idle?\n"); 1136 dev_dbg(chan2dev(chan), "DMA channel not idle?\n");
1137 return -EIO; 1137 return -EIO;
1138 } 1138 }
1139 1139
1140 dma_cookie_init(chan); 1140 dma_cookie_init(chan);
1141 1141
1142 /* 1142 /*
1143 * NOTE: some controllers may have additional features that we 1143 * NOTE: some controllers may have additional features that we
1144 * need to initialize here, like "scatter-gather" (which 1144 * need to initialize here, like "scatter-gather" (which
1145 * doesn't mean what you think it means), and status writeback. 1145 * doesn't mean what you think it means), and status writeback.
1146 */ 1146 */
1147 1147
1148 dwc_set_masters(dwc); 1148 dwc_set_masters(dwc);
1149 1149
1150 spin_lock_irqsave(&dwc->lock, flags); 1150 spin_lock_irqsave(&dwc->lock, flags);
1151 i = dwc->descs_allocated; 1151 i = dwc->descs_allocated;
1152 while (dwc->descs_allocated < NR_DESCS_PER_CHANNEL) { 1152 while (dwc->descs_allocated < NR_DESCS_PER_CHANNEL) {
1153 dma_addr_t phys; 1153 dma_addr_t phys;
1154 1154
1155 spin_unlock_irqrestore(&dwc->lock, flags); 1155 spin_unlock_irqrestore(&dwc->lock, flags);
1156 1156
1157 desc = dma_pool_alloc(dw->desc_pool, GFP_ATOMIC, &phys); 1157 desc = dma_pool_alloc(dw->desc_pool, GFP_ATOMIC, &phys);
1158 if (!desc) 1158 if (!desc)
1159 goto err_desc_alloc; 1159 goto err_desc_alloc;
1160 1160
1161 memset(desc, 0, sizeof(struct dw_desc)); 1161 memset(desc, 0, sizeof(struct dw_desc));
1162 1162
1163 INIT_LIST_HEAD(&desc->tx_list); 1163 INIT_LIST_HEAD(&desc->tx_list);
1164 dma_async_tx_descriptor_init(&desc->txd, chan); 1164 dma_async_tx_descriptor_init(&desc->txd, chan);
1165 desc->txd.tx_submit = dwc_tx_submit; 1165 desc->txd.tx_submit = dwc_tx_submit;
1166 desc->txd.flags = DMA_CTRL_ACK; 1166 desc->txd.flags = DMA_CTRL_ACK;
1167 desc->txd.phys = phys; 1167 desc->txd.phys = phys;
1168 1168
1169 dwc_desc_put(dwc, desc); 1169 dwc_desc_put(dwc, desc);
1170 1170
1171 spin_lock_irqsave(&dwc->lock, flags); 1171 spin_lock_irqsave(&dwc->lock, flags);
1172 i = ++dwc->descs_allocated; 1172 i = ++dwc->descs_allocated;
1173 } 1173 }
1174 1174
1175 spin_unlock_irqrestore(&dwc->lock, flags); 1175 spin_unlock_irqrestore(&dwc->lock, flags);
1176 1176
1177 dev_dbg(chan2dev(chan), "%s: allocated %d descriptors\n", __func__, i); 1177 dev_dbg(chan2dev(chan), "%s: allocated %d descriptors\n", __func__, i);
1178 1178
1179 return i; 1179 return i;
1180 1180
1181 err_desc_alloc: 1181 err_desc_alloc:
1182 dev_info(chan2dev(chan), "only allocated %d descriptors\n", i); 1182 dev_info(chan2dev(chan), "only allocated %d descriptors\n", i);
1183 1183
1184 return i; 1184 return i;
1185 } 1185 }
1186 1186
1187 static void dwc_free_chan_resources(struct dma_chan *chan) 1187 static void dwc_free_chan_resources(struct dma_chan *chan)
1188 { 1188 {
1189 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 1189 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1190 struct dw_dma *dw = to_dw_dma(chan->device); 1190 struct dw_dma *dw = to_dw_dma(chan->device);
1191 struct dw_desc *desc, *_desc; 1191 struct dw_desc *desc, *_desc;
1192 unsigned long flags; 1192 unsigned long flags;
1193 LIST_HEAD(list); 1193 LIST_HEAD(list);
1194 1194
1195 dev_dbg(chan2dev(chan), "%s: descs allocated=%u\n", __func__, 1195 dev_dbg(chan2dev(chan), "%s: descs allocated=%u\n", __func__,
1196 dwc->descs_allocated); 1196 dwc->descs_allocated);
1197 1197
1198 /* ASSERT: channel is idle */ 1198 /* ASSERT: channel is idle */
1199 BUG_ON(!list_empty(&dwc->active_list)); 1199 BUG_ON(!list_empty(&dwc->active_list));
1200 BUG_ON(!list_empty(&dwc->queue)); 1200 BUG_ON(!list_empty(&dwc->queue));
1201 BUG_ON(dma_readl(to_dw_dma(chan->device), CH_EN) & dwc->mask); 1201 BUG_ON(dma_readl(to_dw_dma(chan->device), CH_EN) & dwc->mask);
1202 1202
1203 spin_lock_irqsave(&dwc->lock, flags); 1203 spin_lock_irqsave(&dwc->lock, flags);
1204 list_splice_init(&dwc->free_list, &list); 1204 list_splice_init(&dwc->free_list, &list);
1205 dwc->descs_allocated = 0; 1205 dwc->descs_allocated = 0;
1206 dwc->initialized = false; 1206 dwc->initialized = false;
1207 dwc->request_line = ~0; 1207 dwc->request_line = ~0;
1208 1208
1209 /* Disable interrupts */ 1209 /* Disable interrupts */
1210 channel_clear_bit(dw, MASK.XFER, dwc->mask); 1210 channel_clear_bit(dw, MASK.XFER, dwc->mask);
1211 channel_clear_bit(dw, MASK.ERROR, dwc->mask); 1211 channel_clear_bit(dw, MASK.ERROR, dwc->mask);
1212 1212
1213 spin_unlock_irqrestore(&dwc->lock, flags); 1213 spin_unlock_irqrestore(&dwc->lock, flags);
1214 1214
1215 list_for_each_entry_safe(desc, _desc, &list, desc_node) { 1215 list_for_each_entry_safe(desc, _desc, &list, desc_node) {
1216 dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc); 1216 dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc);
1217 dma_pool_free(dw->desc_pool, desc, desc->txd.phys); 1217 dma_pool_free(dw->desc_pool, desc, desc->txd.phys);
1218 } 1218 }
1219 1219
1220 dev_vdbg(chan2dev(chan), "%s: done\n", __func__); 1220 dev_vdbg(chan2dev(chan), "%s: done\n", __func__);
1221 } 1221 }
1222 1222
1223 /* --------------------- Cyclic DMA API extensions -------------------- */ 1223 /* --------------------- Cyclic DMA API extensions -------------------- */
1224 1224
1225 /** 1225 /**
1226 * dw_dma_cyclic_start - start the cyclic DMA transfer 1226 * dw_dma_cyclic_start - start the cyclic DMA transfer
1227 * @chan: the DMA channel to start 1227 * @chan: the DMA channel to start
1228 * 1228 *
1229 * Must be called with soft interrupts disabled. Returns zero on success or 1229 * Must be called with soft interrupts disabled. Returns zero on success or
1230 * -errno on failure. 1230 * -errno on failure.
1231 */ 1231 */
1232 int dw_dma_cyclic_start(struct dma_chan *chan) 1232 int dw_dma_cyclic_start(struct dma_chan *chan)
1233 { 1233 {
1234 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 1234 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1235 struct dw_dma *dw = to_dw_dma(dwc->chan.device); 1235 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
1236 unsigned long flags; 1236 unsigned long flags;
1237 1237
1238 if (!test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) { 1238 if (!test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) {
1239 dev_err(chan2dev(&dwc->chan), "missing prep for cyclic DMA\n"); 1239 dev_err(chan2dev(&dwc->chan), "missing prep for cyclic DMA\n");
1240 return -ENODEV; 1240 return -ENODEV;
1241 } 1241 }
1242 1242
1243 spin_lock_irqsave(&dwc->lock, flags); 1243 spin_lock_irqsave(&dwc->lock, flags);
1244 1244
1245 /* Assert channel is idle */ 1245 /* Assert channel is idle */
1246 if (dma_readl(dw, CH_EN) & dwc->mask) { 1246 if (dma_readl(dw, CH_EN) & dwc->mask) {
1247 dev_err(chan2dev(&dwc->chan), 1247 dev_err(chan2dev(&dwc->chan),
1248 "BUG: Attempted to start non-idle channel\n"); 1248 "BUG: Attempted to start non-idle channel\n");
1249 dwc_dump_chan_regs(dwc); 1249 dwc_dump_chan_regs(dwc);
1250 spin_unlock_irqrestore(&dwc->lock, flags); 1250 spin_unlock_irqrestore(&dwc->lock, flags);
1251 return -EBUSY; 1251 return -EBUSY;
1252 } 1252 }
1253 1253
1254 dma_writel(dw, CLEAR.ERROR, dwc->mask); 1254 dma_writel(dw, CLEAR.ERROR, dwc->mask);
1255 dma_writel(dw, CLEAR.XFER, dwc->mask); 1255 dma_writel(dw, CLEAR.XFER, dwc->mask);
1256 1256
1257 /* Setup DMAC channel registers */ 1257 /* Setup DMAC channel registers */
1258 channel_writel(dwc, LLP, dwc->cdesc->desc[0]->txd.phys); 1258 channel_writel(dwc, LLP, dwc->cdesc->desc[0]->txd.phys);
1259 channel_writel(dwc, CTL_LO, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN); 1259 channel_writel(dwc, CTL_LO, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
1260 channel_writel(dwc, CTL_HI, 0); 1260 channel_writel(dwc, CTL_HI, 0);
1261 1261
1262 channel_set_bit(dw, CH_EN, dwc->mask); 1262 channel_set_bit(dw, CH_EN, dwc->mask);
1263 1263
1264 spin_unlock_irqrestore(&dwc->lock, flags); 1264 spin_unlock_irqrestore(&dwc->lock, flags);
1265 1265
1266 return 0; 1266 return 0;
1267 } 1267 }
1268 EXPORT_SYMBOL(dw_dma_cyclic_start); 1268 EXPORT_SYMBOL(dw_dma_cyclic_start);
1269 1269
1270 /** 1270 /**
1271 * dw_dma_cyclic_stop - stop the cyclic DMA transfer 1271 * dw_dma_cyclic_stop - stop the cyclic DMA transfer
1272 * @chan: the DMA channel to stop 1272 * @chan: the DMA channel to stop
1273 * 1273 *
1274 * Must be called with soft interrupts disabled. 1274 * Must be called with soft interrupts disabled.
1275 */ 1275 */
1276 void dw_dma_cyclic_stop(struct dma_chan *chan) 1276 void dw_dma_cyclic_stop(struct dma_chan *chan)
1277 { 1277 {
1278 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 1278 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1279 struct dw_dma *dw = to_dw_dma(dwc->chan.device); 1279 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
1280 unsigned long flags; 1280 unsigned long flags;
1281 1281
1282 spin_lock_irqsave(&dwc->lock, flags); 1282 spin_lock_irqsave(&dwc->lock, flags);
1283 1283
1284 dwc_chan_disable(dw, dwc); 1284 dwc_chan_disable(dw, dwc);
1285 1285
1286 spin_unlock_irqrestore(&dwc->lock, flags); 1286 spin_unlock_irqrestore(&dwc->lock, flags);
1287 } 1287 }
1288 EXPORT_SYMBOL(dw_dma_cyclic_stop); 1288 EXPORT_SYMBOL(dw_dma_cyclic_stop);
1289 1289
1290 /** 1290 /**
1291 * dw_dma_cyclic_prep - prepare the cyclic DMA transfer 1291 * dw_dma_cyclic_prep - prepare the cyclic DMA transfer
1292 * @chan: the DMA channel to prepare 1292 * @chan: the DMA channel to prepare
1293 * @buf_addr: physical DMA address where the buffer starts 1293 * @buf_addr: physical DMA address where the buffer starts
1294 * @buf_len: total number of bytes for the entire buffer 1294 * @buf_len: total number of bytes for the entire buffer
1295 * @period_len: number of bytes for each period 1295 * @period_len: number of bytes for each period
1296 * @direction: transfer direction, to or from device 1296 * @direction: transfer direction, to or from device
1297 * 1297 *
1298 * Must be called before trying to start the transfer. Returns a valid struct 1298 * Must be called before trying to start the transfer. Returns a valid struct
1299 * dw_cyclic_desc if successful or an ERR_PTR(-errno) if not successful. 1299 * dw_cyclic_desc if successful or an ERR_PTR(-errno) if not successful.
1300 */ 1300 */
1301 struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan, 1301 struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
1302 dma_addr_t buf_addr, size_t buf_len, size_t period_len, 1302 dma_addr_t buf_addr, size_t buf_len, size_t period_len,
1303 enum dma_transfer_direction direction) 1303 enum dma_transfer_direction direction)
1304 { 1304 {
1305 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 1305 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1306 struct dma_slave_config *sconfig = &dwc->dma_sconfig; 1306 struct dma_slave_config *sconfig = &dwc->dma_sconfig;
1307 struct dw_cyclic_desc *cdesc; 1307 struct dw_cyclic_desc *cdesc;
1308 struct dw_cyclic_desc *retval = NULL; 1308 struct dw_cyclic_desc *retval = NULL;
1309 struct dw_desc *desc; 1309 struct dw_desc *desc;
1310 struct dw_desc *last = NULL; 1310 struct dw_desc *last = NULL;
1311 unsigned long was_cyclic; 1311 unsigned long was_cyclic;
1312 unsigned int reg_width; 1312 unsigned int reg_width;
1313 unsigned int periods; 1313 unsigned int periods;
1314 unsigned int i; 1314 unsigned int i;
1315 unsigned long flags; 1315 unsigned long flags;
1316 1316
1317 spin_lock_irqsave(&dwc->lock, flags); 1317 spin_lock_irqsave(&dwc->lock, flags);
1318 if (dwc->nollp) { 1318 if (dwc->nollp) {
1319 spin_unlock_irqrestore(&dwc->lock, flags); 1319 spin_unlock_irqrestore(&dwc->lock, flags);
1320 dev_dbg(chan2dev(&dwc->chan), 1320 dev_dbg(chan2dev(&dwc->chan),
1321 "channel doesn't support LLP transfers\n"); 1321 "channel doesn't support LLP transfers\n");
1322 return ERR_PTR(-EINVAL); 1322 return ERR_PTR(-EINVAL);
1323 } 1323 }
1324 1324
1325 if (!list_empty(&dwc->queue) || !list_empty(&dwc->active_list)) { 1325 if (!list_empty(&dwc->queue) || !list_empty(&dwc->active_list)) {
1326 spin_unlock_irqrestore(&dwc->lock, flags); 1326 spin_unlock_irqrestore(&dwc->lock, flags);
1327 dev_dbg(chan2dev(&dwc->chan), 1327 dev_dbg(chan2dev(&dwc->chan),
1328 "queue and/or active list are not empty\n"); 1328 "queue and/or active list are not empty\n");
1329 return ERR_PTR(-EBUSY); 1329 return ERR_PTR(-EBUSY);
1330 } 1330 }
1331 1331
1332 was_cyclic = test_and_set_bit(DW_DMA_IS_CYCLIC, &dwc->flags); 1332 was_cyclic = test_and_set_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
1333 spin_unlock_irqrestore(&dwc->lock, flags); 1333 spin_unlock_irqrestore(&dwc->lock, flags);
1334 if (was_cyclic) { 1334 if (was_cyclic) {
1335 dev_dbg(chan2dev(&dwc->chan), 1335 dev_dbg(chan2dev(&dwc->chan),
1336 "channel already prepared for cyclic DMA\n"); 1336 "channel already prepared for cyclic DMA\n");
1337 return ERR_PTR(-EBUSY); 1337 return ERR_PTR(-EBUSY);
1338 } 1338 }
1339 1339
1340 retval = ERR_PTR(-EINVAL); 1340 retval = ERR_PTR(-EINVAL);
1341 1341
1342 if (unlikely(!is_slave_direction(direction))) 1342 if (unlikely(!is_slave_direction(direction)))
1343 goto out_err; 1343 goto out_err;
1344 1344
1345 dwc->direction = direction; 1345 dwc->direction = direction;
1346 1346
1347 if (direction == DMA_MEM_TO_DEV) 1347 if (direction == DMA_MEM_TO_DEV)
1348 reg_width = __ffs(sconfig->dst_addr_width); 1348 reg_width = __ffs(sconfig->dst_addr_width);
1349 else 1349 else
1350 reg_width = __ffs(sconfig->src_addr_width); 1350 reg_width = __ffs(sconfig->src_addr_width);
1351 1351
1352 periods = buf_len / period_len; 1352 periods = buf_len / period_len;
1353 1353
1354 /* Check for too big/unaligned periods and unaligned DMA buffer. */ 1354 /* Check for too big/unaligned periods and unaligned DMA buffer. */
1355 if (period_len > (dwc->block_size << reg_width)) 1355 if (period_len > (dwc->block_size << reg_width))
1356 goto out_err; 1356 goto out_err;
1357 if (unlikely(period_len & ((1 << reg_width) - 1))) 1357 if (unlikely(period_len & ((1 << reg_width) - 1)))
1358 goto out_err; 1358 goto out_err;
1359 if (unlikely(buf_addr & ((1 << reg_width) - 1))) 1359 if (unlikely(buf_addr & ((1 << reg_width) - 1)))
1360 goto out_err; 1360 goto out_err;
1361 1361
1362 retval = ERR_PTR(-ENOMEM); 1362 retval = ERR_PTR(-ENOMEM);
1363 1363
1364 if (periods > NR_DESCS_PER_CHANNEL) 1364 if (periods > NR_DESCS_PER_CHANNEL)
1365 goto out_err; 1365 goto out_err;
1366 1366
1367 cdesc = kzalloc(sizeof(struct dw_cyclic_desc), GFP_KERNEL); 1367 cdesc = kzalloc(sizeof(struct dw_cyclic_desc), GFP_KERNEL);
1368 if (!cdesc) 1368 if (!cdesc)
1369 goto out_err; 1369 goto out_err;
1370 1370
1371 cdesc->desc = kzalloc(sizeof(struct dw_desc *) * periods, GFP_KERNEL); 1371 cdesc->desc = kzalloc(sizeof(struct dw_desc *) * periods, GFP_KERNEL);
1372 if (!cdesc->desc) 1372 if (!cdesc->desc)
1373 goto out_err_alloc; 1373 goto out_err_alloc;
1374 1374
1375 for (i = 0; i < periods; i++) { 1375 for (i = 0; i < periods; i++) {
1376 desc = dwc_desc_get(dwc); 1376 desc = dwc_desc_get(dwc);
1377 if (!desc) 1377 if (!desc)
1378 goto out_err_desc_get; 1378 goto out_err_desc_get;
1379 1379
1380 switch (direction) { 1380 switch (direction) {
1381 case DMA_MEM_TO_DEV: 1381 case DMA_MEM_TO_DEV:
1382 desc->lli.dar = sconfig->dst_addr; 1382 desc->lli.dar = sconfig->dst_addr;
1383 desc->lli.sar = buf_addr + (period_len * i); 1383 desc->lli.sar = buf_addr + (period_len * i);
1384 desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan) 1384 desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan)
1385 | DWC_CTLL_DST_WIDTH(reg_width) 1385 | DWC_CTLL_DST_WIDTH(reg_width)
1386 | DWC_CTLL_SRC_WIDTH(reg_width) 1386 | DWC_CTLL_SRC_WIDTH(reg_width)
1387 | DWC_CTLL_DST_FIX 1387 | DWC_CTLL_DST_FIX
1388 | DWC_CTLL_SRC_INC 1388 | DWC_CTLL_SRC_INC
1389 | DWC_CTLL_INT_EN); 1389 | DWC_CTLL_INT_EN);
1390 1390
1391 desc->lli.ctllo |= sconfig->device_fc ? 1391 desc->lli.ctllo |= sconfig->device_fc ?
1392 DWC_CTLL_FC(DW_DMA_FC_P_M2P) : 1392 DWC_CTLL_FC(DW_DMA_FC_P_M2P) :
1393 DWC_CTLL_FC(DW_DMA_FC_D_M2P); 1393 DWC_CTLL_FC(DW_DMA_FC_D_M2P);
1394 1394
1395 break; 1395 break;
1396 case DMA_DEV_TO_MEM: 1396 case DMA_DEV_TO_MEM:
1397 desc->lli.dar = buf_addr + (period_len * i); 1397 desc->lli.dar = buf_addr + (period_len * i);
1398 desc->lli.sar = sconfig->src_addr; 1398 desc->lli.sar = sconfig->src_addr;
1399 desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan) 1399 desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan)
1400 | DWC_CTLL_SRC_WIDTH(reg_width) 1400 | DWC_CTLL_SRC_WIDTH(reg_width)
1401 | DWC_CTLL_DST_WIDTH(reg_width) 1401 | DWC_CTLL_DST_WIDTH(reg_width)
1402 | DWC_CTLL_DST_INC 1402 | DWC_CTLL_DST_INC
1403 | DWC_CTLL_SRC_FIX 1403 | DWC_CTLL_SRC_FIX
1404 | DWC_CTLL_INT_EN); 1404 | DWC_CTLL_INT_EN);
1405 1405
1406 desc->lli.ctllo |= sconfig->device_fc ? 1406 desc->lli.ctllo |= sconfig->device_fc ?
1407 DWC_CTLL_FC(DW_DMA_FC_P_P2M) : 1407 DWC_CTLL_FC(DW_DMA_FC_P_P2M) :
1408 DWC_CTLL_FC(DW_DMA_FC_D_P2M); 1408 DWC_CTLL_FC(DW_DMA_FC_D_P2M);
1409 1409
1410 break; 1410 break;
1411 default: 1411 default:
1412 break; 1412 break;
1413 } 1413 }
1414 1414
1415 desc->lli.ctlhi = (period_len >> reg_width); 1415 desc->lli.ctlhi = (period_len >> reg_width);
1416 cdesc->desc[i] = desc; 1416 cdesc->desc[i] = desc;
1417 1417
1418 if (last) 1418 if (last)
1419 last->lli.llp = desc->txd.phys; 1419 last->lli.llp = desc->txd.phys;
1420 1420
1421 last = desc; 1421 last = desc;
1422 } 1422 }
1423 1423
1424 /* Let's make a cyclic list */ 1424 /* Let's make a cyclic list */
1425 last->lli.llp = cdesc->desc[0]->txd.phys; 1425 last->lli.llp = cdesc->desc[0]->txd.phys;
1426 1426
1427 dev_dbg(chan2dev(&dwc->chan), "cyclic prepared buf 0x%llx len %zu " 1427 dev_dbg(chan2dev(&dwc->chan), "cyclic prepared buf 0x%llx len %zu "
1428 "period %zu periods %d\n", (unsigned long long)buf_addr, 1428 "period %zu periods %d\n", (unsigned long long)buf_addr,
1429 buf_len, period_len, periods); 1429 buf_len, period_len, periods);
1430 1430
1431 cdesc->periods = periods; 1431 cdesc->periods = periods;
1432 dwc->cdesc = cdesc; 1432 dwc->cdesc = cdesc;
1433 1433
1434 return cdesc; 1434 return cdesc;
1435 1435
1436 out_err_desc_get: 1436 out_err_desc_get:
1437 while (i--) 1437 while (i--)
1438 dwc_desc_put(dwc, cdesc->desc[i]); 1438 dwc_desc_put(dwc, cdesc->desc[i]);
1439 out_err_alloc: 1439 out_err_alloc:
1440 kfree(cdesc); 1440 kfree(cdesc);
1441 out_err: 1441 out_err:
1442 clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags); 1442 clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
1443 return (struct dw_cyclic_desc *)retval; 1443 return (struct dw_cyclic_desc *)retval;
1444 } 1444 }
1445 EXPORT_SYMBOL(dw_dma_cyclic_prep); 1445 EXPORT_SYMBOL(dw_dma_cyclic_prep);
1446 1446
1447 /** 1447 /**
1448 * dw_dma_cyclic_free - free a prepared cyclic DMA transfer 1448 * dw_dma_cyclic_free - free a prepared cyclic DMA transfer
1449 * @chan: the DMA channel to free 1449 * @chan: the DMA channel to free
1450 */ 1450 */
1451 void dw_dma_cyclic_free(struct dma_chan *chan) 1451 void dw_dma_cyclic_free(struct dma_chan *chan)
1452 { 1452 {
1453 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 1453 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1454 struct dw_dma *dw = to_dw_dma(dwc->chan.device); 1454 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
1455 struct dw_cyclic_desc *cdesc = dwc->cdesc; 1455 struct dw_cyclic_desc *cdesc = dwc->cdesc;
1456 int i; 1456 int i;
1457 unsigned long flags; 1457 unsigned long flags;
1458 1458
1459 dev_dbg(chan2dev(&dwc->chan), "%s\n", __func__); 1459 dev_dbg(chan2dev(&dwc->chan), "%s\n", __func__);
1460 1460
1461 if (!cdesc) 1461 if (!cdesc)
1462 return; 1462 return;
1463 1463
1464 spin_lock_irqsave(&dwc->lock, flags); 1464 spin_lock_irqsave(&dwc->lock, flags);
1465 1465
1466 dwc_chan_disable(dw, dwc); 1466 dwc_chan_disable(dw, dwc);
1467 1467
1468 dma_writel(dw, CLEAR.ERROR, dwc->mask); 1468 dma_writel(dw, CLEAR.ERROR, dwc->mask);
1469 dma_writel(dw, CLEAR.XFER, dwc->mask); 1469 dma_writel(dw, CLEAR.XFER, dwc->mask);
1470 1470
1471 spin_unlock_irqrestore(&dwc->lock, flags); 1471 spin_unlock_irqrestore(&dwc->lock, flags);
1472 1472
1473 for (i = 0; i < cdesc->periods; i++) 1473 for (i = 0; i < cdesc->periods; i++)
1474 dwc_desc_put(dwc, cdesc->desc[i]); 1474 dwc_desc_put(dwc, cdesc->desc[i]);
1475 1475
1476 kfree(cdesc->desc); 1476 kfree(cdesc->desc);
1477 kfree(cdesc); 1477 kfree(cdesc);
1478 1478
1479 clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags); 1479 clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
1480 } 1480 }
1481 EXPORT_SYMBOL(dw_dma_cyclic_free); 1481 EXPORT_SYMBOL(dw_dma_cyclic_free);
1482 1482
1483 /*----------------------------------------------------------------------*/ 1483 /*----------------------------------------------------------------------*/
1484 1484
1485 static void dw_dma_off(struct dw_dma *dw) 1485 static void dw_dma_off(struct dw_dma *dw)
1486 { 1486 {
1487 int i; 1487 int i;
1488 1488
1489 dma_writel(dw, CFG, 0); 1489 dma_writel(dw, CFG, 0);
1490 1490
1491 channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask); 1491 channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
1492 channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask); 1492 channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask);
1493 channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask); 1493 channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask);
1494 channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask); 1494 channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
1495 1495
1496 while (dma_readl(dw, CFG) & DW_CFG_DMA_EN) 1496 while (dma_readl(dw, CFG) & DW_CFG_DMA_EN)
1497 cpu_relax(); 1497 cpu_relax();
1498 1498
1499 for (i = 0; i < dw->dma.chancnt; i++) 1499 for (i = 0; i < dw->dma.chancnt; i++)
1500 dw->chan[i].initialized = false; 1500 dw->chan[i].initialized = false;
1501 } 1501 }
1502 1502
1503 int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata) 1503 int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
1504 { 1504 {
1505 struct dw_dma *dw; 1505 struct dw_dma *dw;
1506 size_t size; 1506 size_t size;
1507 bool autocfg; 1507 bool autocfg;
1508 unsigned int dw_params; 1508 unsigned int dw_params;
1509 unsigned int nr_channels; 1509 unsigned int nr_channels;
1510 unsigned int max_blk_size = 0; 1510 unsigned int max_blk_size = 0;
1511 int err; 1511 int err;
1512 int i; 1512 int i;
1513 1513
1514 dw_params = dma_read_byaddr(chip->regs, DW_PARAMS); 1514 dw_params = dma_read_byaddr(chip->regs, DW_PARAMS);
1515 autocfg = dw_params >> DW_PARAMS_EN & 0x1; 1515 autocfg = dw_params >> DW_PARAMS_EN & 0x1;
1516 1516
1517 dev_dbg(chip->dev, "DW_PARAMS: 0x%08x\n", dw_params); 1517 dev_dbg(chip->dev, "DW_PARAMS: 0x%08x\n", dw_params);
1518 1518
1519 if (!pdata && autocfg) { 1519 if (!pdata && autocfg) {
1520 pdata = devm_kzalloc(chip->dev, sizeof(*pdata), GFP_KERNEL); 1520 pdata = devm_kzalloc(chip->dev, sizeof(*pdata), GFP_KERNEL);
1521 if (!pdata) 1521 if (!pdata)
1522 return -ENOMEM; 1522 return -ENOMEM;
1523 1523
1524 /* Fill platform data with the default values */ 1524 /* Fill platform data with the default values */
1525 pdata->is_private = true; 1525 pdata->is_private = true;
1526 pdata->chan_allocation_order = CHAN_ALLOCATION_ASCENDING; 1526 pdata->chan_allocation_order = CHAN_ALLOCATION_ASCENDING;
1527 pdata->chan_priority = CHAN_PRIORITY_ASCENDING; 1527 pdata->chan_priority = CHAN_PRIORITY_ASCENDING;
1528 } else if (!pdata || pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS) 1528 } else if (!pdata || pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS)
1529 return -EINVAL; 1529 return -EINVAL;
1530 1530
1531 if (autocfg) 1531 if (autocfg)
1532 nr_channels = (dw_params >> DW_PARAMS_NR_CHAN & 0x7) + 1; 1532 nr_channels = (dw_params >> DW_PARAMS_NR_CHAN & 0x7) + 1;
1533 else 1533 else
1534 nr_channels = pdata->nr_channels; 1534 nr_channels = pdata->nr_channels;
1535 1535
1536 size = sizeof(struct dw_dma) + nr_channels * sizeof(struct dw_dma_chan); 1536 size = sizeof(struct dw_dma) + nr_channels * sizeof(struct dw_dma_chan);
1537 dw = devm_kzalloc(chip->dev, size, GFP_KERNEL); 1537 dw = devm_kzalloc(chip->dev, size, GFP_KERNEL);
1538 if (!dw) 1538 if (!dw)
1539 return -ENOMEM; 1539 return -ENOMEM;
1540 1540
1541 dw->clk = devm_clk_get(chip->dev, "hclk"); 1541 dw->clk = devm_clk_get(chip->dev, "hclk");
1542 if (IS_ERR(dw->clk)) 1542 if (IS_ERR(dw->clk))
1543 return PTR_ERR(dw->clk); 1543 return PTR_ERR(dw->clk);
1544 clk_prepare_enable(dw->clk); 1544 clk_prepare_enable(dw->clk);
1545 1545
1546 dw->regs = chip->regs; 1546 dw->regs = chip->regs;
1547 chip->dw = dw; 1547 chip->dw = dw;
1548 1548
1549 /* Get hardware configuration parameters */ 1549 /* Get hardware configuration parameters */
1550 if (autocfg) { 1550 if (autocfg) {
1551 max_blk_size = dma_readl(dw, MAX_BLK_SIZE); 1551 max_blk_size = dma_readl(dw, MAX_BLK_SIZE);
1552 1552
1553 dw->nr_masters = (dw_params >> DW_PARAMS_NR_MASTER & 3) + 1; 1553 dw->nr_masters = (dw_params >> DW_PARAMS_NR_MASTER & 3) + 1;
1554 for (i = 0; i < dw->nr_masters; i++) { 1554 for (i = 0; i < dw->nr_masters; i++) {
1555 dw->data_width[i] = 1555 dw->data_width[i] =
1556 (dw_params >> DW_PARAMS_DATA_WIDTH(i) & 3) + 2; 1556 (dw_params >> DW_PARAMS_DATA_WIDTH(i) & 3) + 2;
1557 } 1557 }
1558 } else { 1558 } else {
1559 dw->nr_masters = pdata->nr_masters; 1559 dw->nr_masters = pdata->nr_masters;
1560 memcpy(dw->data_width, pdata->data_width, 4); 1560 memcpy(dw->data_width, pdata->data_width, 4);
1561 } 1561 }
1562 1562
1563 /* Calculate all channel mask before DMA setup */ 1563 /* Calculate all channel mask before DMA setup */
1564 dw->all_chan_mask = (1 << nr_channels) - 1; 1564 dw->all_chan_mask = (1 << nr_channels) - 1;
1565 1565
1566 /* Force dma off, just in case */ 1566 /* Force dma off, just in case */
1567 dw_dma_off(dw); 1567 dw_dma_off(dw);
1568 1568
1569 /* Disable BLOCK interrupts as well */ 1569 /* Disable BLOCK interrupts as well */
1570 channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask); 1570 channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
1571 1571
1572 err = devm_request_irq(chip->dev, chip->irq, dw_dma_interrupt, 1572 err = devm_request_irq(chip->dev, chip->irq, dw_dma_interrupt,
1573 IRQF_SHARED, "dw_dmac", dw); 1573 IRQF_SHARED, "dw_dmac", dw);
1574 if (err) 1574 if (err)
1575 return err; 1575 return err;
1576 1576
1577 /* Create a pool of consistent memory blocks for hardware descriptors */ 1577 /* Create a pool of consistent memory blocks for hardware descriptors */
1578 dw->desc_pool = dmam_pool_create("dw_dmac_desc_pool", chip->dev, 1578 dw->desc_pool = dmam_pool_create("dw_dmac_desc_pool", chip->dev,
1579 sizeof(struct dw_desc), 4, 0); 1579 sizeof(struct dw_desc), 4, 0);
1580 if (!dw->desc_pool) { 1580 if (!dw->desc_pool) {
1581 dev_err(chip->dev, "No memory for descriptors dma pool\n"); 1581 dev_err(chip->dev, "No memory for descriptors dma pool\n");
1582 return -ENOMEM; 1582 return -ENOMEM;
1583 } 1583 }
1584 1584
1585 tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw); 1585 tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw);
1586 1586
1587 INIT_LIST_HEAD(&dw->dma.channels); 1587 INIT_LIST_HEAD(&dw->dma.channels);
1588 for (i = 0; i < nr_channels; i++) { 1588 for (i = 0; i < nr_channels; i++) {
1589 struct dw_dma_chan *dwc = &dw->chan[i]; 1589 struct dw_dma_chan *dwc = &dw->chan[i];
1590 int r = nr_channels - i - 1; 1590 int r = nr_channels - i - 1;
1591 1591
1592 dwc->chan.device = &dw->dma; 1592 dwc->chan.device = &dw->dma;
1593 dma_cookie_init(&dwc->chan); 1593 dma_cookie_init(&dwc->chan);
1594 if (pdata->chan_allocation_order == CHAN_ALLOCATION_ASCENDING) 1594 if (pdata->chan_allocation_order == CHAN_ALLOCATION_ASCENDING)
1595 list_add_tail(&dwc->chan.device_node, 1595 list_add_tail(&dwc->chan.device_node,
1596 &dw->dma.channels); 1596 &dw->dma.channels);
1597 else 1597 else
1598 list_add(&dwc->chan.device_node, &dw->dma.channels); 1598 list_add(&dwc->chan.device_node, &dw->dma.channels);
1599 1599
1600 /* 7 is highest priority & 0 is lowest. */ 1600 /* 7 is highest priority & 0 is lowest. */
1601 if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING) 1601 if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING)
1602 dwc->priority = r; 1602 dwc->priority = r;
1603 else 1603 else
1604 dwc->priority = i; 1604 dwc->priority = i;
1605 1605
1606 dwc->ch_regs = &__dw_regs(dw)->CHAN[i]; 1606 dwc->ch_regs = &__dw_regs(dw)->CHAN[i];
1607 spin_lock_init(&dwc->lock); 1607 spin_lock_init(&dwc->lock);
1608 dwc->mask = 1 << i; 1608 dwc->mask = 1 << i;
1609 1609
1610 INIT_LIST_HEAD(&dwc->active_list); 1610 INIT_LIST_HEAD(&dwc->active_list);
1611 INIT_LIST_HEAD(&dwc->queue); 1611 INIT_LIST_HEAD(&dwc->queue);
1612 INIT_LIST_HEAD(&dwc->free_list); 1612 INIT_LIST_HEAD(&dwc->free_list);
1613 1613
1614 channel_clear_bit(dw, CH_EN, dwc->mask); 1614 channel_clear_bit(dw, CH_EN, dwc->mask);
1615 1615
1616 dwc->direction = DMA_TRANS_NONE; 1616 dwc->direction = DMA_TRANS_NONE;
1617 dwc->request_line = ~0; 1617 dwc->request_line = ~0;
1618 1618
1619 /* Hardware configuration */ 1619 /* Hardware configuration */
1620 if (autocfg) { 1620 if (autocfg) {
1621 unsigned int dwc_params; 1621 unsigned int dwc_params;
1622 void __iomem *addr = chip->regs + r * sizeof(u32); 1622 void __iomem *addr = chip->regs + r * sizeof(u32);
1623 1623
1624 dwc_params = dma_read_byaddr(addr, DWC_PARAMS); 1624 dwc_params = dma_read_byaddr(addr, DWC_PARAMS);
1625 1625
1626 dev_dbg(chip->dev, "DWC_PARAMS[%d]: 0x%08x\n", i, 1626 dev_dbg(chip->dev, "DWC_PARAMS[%d]: 0x%08x\n", i,
1627 dwc_params); 1627 dwc_params);
1628 1628
1629 /* Decode maximum block size for given channel. The 1629 /* Decode maximum block size for given channel. The
1630 * stored 4 bit value represents blocks from 0x00 for 3 1630 * stored 4 bit value represents blocks from 0x00 for 3
1631 * up to 0x0a for 4095. */ 1631 * up to 0x0a for 4095. */
1632 dwc->block_size = 1632 dwc->block_size =
1633 (4 << ((max_blk_size >> 4 * i) & 0xf)) - 1; 1633 (4 << ((max_blk_size >> 4 * i) & 0xf)) - 1;
1634 dwc->nollp = 1634 dwc->nollp =
1635 (dwc_params >> DWC_PARAMS_MBLK_EN & 0x1) == 0; 1635 (dwc_params >> DWC_PARAMS_MBLK_EN & 0x1) == 0;
1636 } else { 1636 } else {
1637 dwc->block_size = pdata->block_size; 1637 dwc->block_size = pdata->block_size;
1638 1638
1639 /* Check if channel supports multi block transfer */ 1639 /* Check if channel supports multi block transfer */
1640 channel_writel(dwc, LLP, 0xfffffffc); 1640 channel_writel(dwc, LLP, 0xfffffffc);
1641 dwc->nollp = 1641 dwc->nollp =
1642 (channel_readl(dwc, LLP) & 0xfffffffc) == 0; 1642 (channel_readl(dwc, LLP) & 0xfffffffc) == 0;
1643 channel_writel(dwc, LLP, 0); 1643 channel_writel(dwc, LLP, 0);
1644 } 1644 }
1645 } 1645 }
1646 1646
1647 /* Clear all interrupts on all channels. */ 1647 /* Clear all interrupts on all channels. */
1648 dma_writel(dw, CLEAR.XFER, dw->all_chan_mask); 1648 dma_writel(dw, CLEAR.XFER, dw->all_chan_mask);
1649 dma_writel(dw, CLEAR.BLOCK, dw->all_chan_mask); 1649 dma_writel(dw, CLEAR.BLOCK, dw->all_chan_mask);
1650 dma_writel(dw, CLEAR.SRC_TRAN, dw->all_chan_mask); 1650 dma_writel(dw, CLEAR.SRC_TRAN, dw->all_chan_mask);
1651 dma_writel(dw, CLEAR.DST_TRAN, dw->all_chan_mask); 1651 dma_writel(dw, CLEAR.DST_TRAN, dw->all_chan_mask);
1652 dma_writel(dw, CLEAR.ERROR, dw->all_chan_mask); 1652 dma_writel(dw, CLEAR.ERROR, dw->all_chan_mask);
1653 1653
1654 dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask); 1654 dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask);
1655 dma_cap_set(DMA_SLAVE, dw->dma.cap_mask); 1655 dma_cap_set(DMA_SLAVE, dw->dma.cap_mask);
1656 if (pdata->is_private) 1656 if (pdata->is_private)
1657 dma_cap_set(DMA_PRIVATE, dw->dma.cap_mask); 1657 dma_cap_set(DMA_PRIVATE, dw->dma.cap_mask);
1658 dw->dma.dev = chip->dev; 1658 dw->dma.dev = chip->dev;
1659 dw->dma.device_alloc_chan_resources = dwc_alloc_chan_resources; 1659 dw->dma.device_alloc_chan_resources = dwc_alloc_chan_resources;
1660 dw->dma.device_free_chan_resources = dwc_free_chan_resources; 1660 dw->dma.device_free_chan_resources = dwc_free_chan_resources;
1661 1661
1662 dw->dma.device_prep_dma_memcpy = dwc_prep_dma_memcpy; 1662 dw->dma.device_prep_dma_memcpy = dwc_prep_dma_memcpy;
1663 1663
1664 dw->dma.device_prep_slave_sg = dwc_prep_slave_sg; 1664 dw->dma.device_prep_slave_sg = dwc_prep_slave_sg;
1665 dw->dma.device_control = dwc_control; 1665 dw->dma.device_control = dwc_control;
1666 1666
1667 dw->dma.device_tx_status = dwc_tx_status; 1667 dw->dma.device_tx_status = dwc_tx_status;
1668 dw->dma.device_issue_pending = dwc_issue_pending; 1668 dw->dma.device_issue_pending = dwc_issue_pending;
1669 1669
1670 dma_writel(dw, CFG, DW_CFG_DMA_EN); 1670 dma_writel(dw, CFG, DW_CFG_DMA_EN);
1671 1671
1672 dev_info(chip->dev, "DesignWare DMA Controller, %d channels\n", 1672 dev_info(chip->dev, "DesignWare DMA Controller, %d channels\n",
1673 nr_channels); 1673 nr_channels);
1674 1674
1675 dma_async_device_register(&dw->dma); 1675 dma_async_device_register(&dw->dma);
1676 1676
1677 return 0; 1677 return 0;
1678 } 1678 }
1679 EXPORT_SYMBOL_GPL(dw_dma_probe); 1679 EXPORT_SYMBOL_GPL(dw_dma_probe);
1680 1680
1681 int dw_dma_remove(struct dw_dma_chip *chip) 1681 int dw_dma_remove(struct dw_dma_chip *chip)
1682 { 1682 {
1683 struct dw_dma *dw = chip->dw; 1683 struct dw_dma *dw = chip->dw;
1684 struct dw_dma_chan *dwc, *_dwc; 1684 struct dw_dma_chan *dwc, *_dwc;
1685 1685
1686 dw_dma_off(dw); 1686 dw_dma_off(dw);
1687 dma_async_device_unregister(&dw->dma); 1687 dma_async_device_unregister(&dw->dma);
1688 1688
1689 tasklet_kill(&dw->tasklet); 1689 tasklet_kill(&dw->tasklet);
1690 1690
1691 list_for_each_entry_safe(dwc, _dwc, &dw->dma.channels, 1691 list_for_each_entry_safe(dwc, _dwc, &dw->dma.channels,
1692 chan.device_node) { 1692 chan.device_node) {
1693 list_del(&dwc->chan.device_node); 1693 list_del(&dwc->chan.device_node);
1694 channel_clear_bit(dw, CH_EN, dwc->mask); 1694 channel_clear_bit(dw, CH_EN, dwc->mask);
1695 } 1695 }
1696 1696
1697 return 0; 1697 return 0;
1698 } 1698 }
1699 EXPORT_SYMBOL_GPL(dw_dma_remove); 1699 EXPORT_SYMBOL_GPL(dw_dma_remove);
1700 1700
1701 void dw_dma_shutdown(struct dw_dma_chip *chip) 1701 void dw_dma_shutdown(struct dw_dma_chip *chip)
1702 { 1702 {
1703 struct dw_dma *dw = chip->dw; 1703 struct dw_dma *dw = chip->dw;
1704 1704
1705 dw_dma_off(dw); 1705 dw_dma_off(dw);
1706 clk_disable_unprepare(dw->clk); 1706 clk_disable_unprepare(dw->clk);
1707 } 1707 }
1708 EXPORT_SYMBOL_GPL(dw_dma_shutdown); 1708 EXPORT_SYMBOL_GPL(dw_dma_shutdown);
1709 1709
1710 #ifdef CONFIG_PM_SLEEP 1710 #ifdef CONFIG_PM_SLEEP
1711 1711
1712 int dw_dma_suspend(struct dw_dma_chip *chip) 1712 int dw_dma_suspend(struct dw_dma_chip *chip)
1713 { 1713 {
1714 struct dw_dma *dw = chip->dw; 1714 struct dw_dma *dw = chip->dw;
1715 1715
1716 dw_dma_off(dw); 1716 dw_dma_off(dw);
1717 clk_disable_unprepare(dw->clk); 1717 clk_disable_unprepare(dw->clk);
1718 1718
1719 return 0; 1719 return 0;
1720 } 1720 }
1721 EXPORT_SYMBOL_GPL(dw_dma_suspend); 1721 EXPORT_SYMBOL_GPL(dw_dma_suspend);
1722 1722
1723 int dw_dma_resume(struct dw_dma_chip *chip) 1723 int dw_dma_resume(struct dw_dma_chip *chip)
1724 { 1724 {
1725 struct dw_dma *dw = chip->dw; 1725 struct dw_dma *dw = chip->dw;
1726 1726
1727 clk_prepare_enable(dw->clk); 1727 clk_prepare_enable(dw->clk);
1728 dma_writel(dw, CFG, DW_CFG_DMA_EN); 1728 dma_writel(dw, CFG, DW_CFG_DMA_EN);
1729 1729
1730 return 0; 1730 return 0;
1731 } 1731 }
1732 EXPORT_SYMBOL_GPL(dw_dma_resume); 1732 EXPORT_SYMBOL_GPL(dw_dma_resume);
1733 1733
1734 #endif /* CONFIG_PM_SLEEP */ 1734 #endif /* CONFIG_PM_SLEEP */
1735 1735
1736 MODULE_LICENSE("GPL v2"); 1736 MODULE_LICENSE("GPL v2");
1737 MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller core driver"); 1737 MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller core driver");
1738 MODULE_AUTHOR("Haavard Skinnemoen (Atmel)"); 1738 MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
1739 MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>"); 1739 MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
1740 1740
1 /* 1 /*
2 * TI EDMA DMA engine driver 2 * TI EDMA DMA engine driver
3 * 3 *
4 * Copyright 2012 Texas Instruments 4 * Copyright 2012 Texas Instruments
5 * 5 *
6 * This program is free software; you can redistribute it and/or 6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as 7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation version 2. 8 * published by the Free Software Foundation version 2.
9 * 9 *
10 * This program is distributed "as is" WITHOUT ANY WARRANTY of any 10 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
11 * kind, whether express or implied; without even the implied warranty 11 * kind, whether express or implied; without even the implied warranty
12 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details. 13 * GNU General Public License for more details.
14 */ 14 */
15 15
16 #include <linux/dmaengine.h> 16 #include <linux/dmaengine.h>
17 #include <linux/dma-mapping.h> 17 #include <linux/dma-mapping.h>
18 #include <linux/err.h> 18 #include <linux/err.h>
19 #include <linux/init.h> 19 #include <linux/init.h>
20 #include <linux/interrupt.h> 20 #include <linux/interrupt.h>
21 #include <linux/list.h> 21 #include <linux/list.h>
22 #include <linux/module.h> 22 #include <linux/module.h>
23 #include <linux/platform_device.h> 23 #include <linux/platform_device.h>
24 #include <linux/slab.h> 24 #include <linux/slab.h>
25 #include <linux/spinlock.h> 25 #include <linux/spinlock.h>
26 26
27 #include <linux/platform_data/edma.h> 27 #include <linux/platform_data/edma.h>
28 28
29 #include "dmaengine.h" 29 #include "dmaengine.h"
30 #include "virt-dma.h" 30 #include "virt-dma.h"
31 31
32 /* 32 /*
33 * This will go away when the private EDMA API is folded 33 * This will go away when the private EDMA API is folded
34 * into this driver and the platform device(s) are 34 * into this driver and the platform device(s) are
35 * instantiated in the arch code. We can only get away 35 * instantiated in the arch code. We can only get away
36 * with this simplification because DA8XX may not be built 36 * with this simplification because DA8XX may not be built
37 * in the same kernel image with other DaVinci parts. This 37 * in the same kernel image with other DaVinci parts. This
38 * avoids having to sprinkle dmaengine driver platform devices 38 * avoids having to sprinkle dmaengine driver platform devices
39 * and data throughout all the existing board files. 39 * and data throughout all the existing board files.
40 */ 40 */
41 #ifdef CONFIG_ARCH_DAVINCI_DA8XX 41 #ifdef CONFIG_ARCH_DAVINCI_DA8XX
42 #define EDMA_CTLRS 2 42 #define EDMA_CTLRS 2
43 #define EDMA_CHANS 32 43 #define EDMA_CHANS 32
44 #else 44 #else
45 #define EDMA_CTLRS 1 45 #define EDMA_CTLRS 1
46 #define EDMA_CHANS 64 46 #define EDMA_CHANS 64
47 #endif /* CONFIG_ARCH_DAVINCI_DA8XX */ 47 #endif /* CONFIG_ARCH_DAVINCI_DA8XX */
48 48
49 /* 49 /*
50 * Max of 20 segments per channel to conserve PaRAM slots 50 * Max of 20 segments per channel to conserve PaRAM slots
51 * Also note that MAX_NR_SG should be atleast the no.of periods 51 * Also note that MAX_NR_SG should be atleast the no.of periods
52 * that are required for ASoC, otherwise DMA prep calls will 52 * that are required for ASoC, otherwise DMA prep calls will
53 * fail. Today davinci-pcm is the only user of this driver and 53 * fail. Today davinci-pcm is the only user of this driver and
54 * requires atleast 17 slots, so we setup the default to 20. 54 * requires atleast 17 slots, so we setup the default to 20.
55 */ 55 */
56 #define MAX_NR_SG 20 56 #define MAX_NR_SG 20
57 #define EDMA_MAX_SLOTS MAX_NR_SG 57 #define EDMA_MAX_SLOTS MAX_NR_SG
58 #define EDMA_DESCRIPTORS 16 58 #define EDMA_DESCRIPTORS 16
59 59
60 struct edma_desc { 60 struct edma_desc {
61 struct virt_dma_desc vdesc; 61 struct virt_dma_desc vdesc;
62 struct list_head node; 62 struct list_head node;
63 int absync; 63 int absync;
64 int pset_nr; 64 int pset_nr;
65 int processed; 65 int processed;
66 struct edmacc_param pset[0]; 66 struct edmacc_param pset[0];
67 }; 67 };
68 68
69 struct edma_cc; 69 struct edma_cc;
70 70
71 struct edma_chan { 71 struct edma_chan {
72 struct virt_dma_chan vchan; 72 struct virt_dma_chan vchan;
73 struct list_head node; 73 struct list_head node;
74 struct edma_desc *edesc; 74 struct edma_desc *edesc;
75 struct edma_cc *ecc; 75 struct edma_cc *ecc;
76 int ch_num; 76 int ch_num;
77 bool alloced; 77 bool alloced;
78 int slot[EDMA_MAX_SLOTS]; 78 int slot[EDMA_MAX_SLOTS];
79 int missed; 79 int missed;
80 struct dma_slave_config cfg; 80 struct dma_slave_config cfg;
81 }; 81 };
82 82
83 struct edma_cc { 83 struct edma_cc {
84 int ctlr; 84 int ctlr;
85 struct dma_device dma_slave; 85 struct dma_device dma_slave;
86 struct edma_chan slave_chans[EDMA_CHANS]; 86 struct edma_chan slave_chans[EDMA_CHANS];
87 int num_slave_chans; 87 int num_slave_chans;
88 int dummy_slot; 88 int dummy_slot;
89 }; 89 };
90 90
91 static inline struct edma_cc *to_edma_cc(struct dma_device *d) 91 static inline struct edma_cc *to_edma_cc(struct dma_device *d)
92 { 92 {
93 return container_of(d, struct edma_cc, dma_slave); 93 return container_of(d, struct edma_cc, dma_slave);
94 } 94 }
95 95
96 static inline struct edma_chan *to_edma_chan(struct dma_chan *c) 96 static inline struct edma_chan *to_edma_chan(struct dma_chan *c)
97 { 97 {
98 return container_of(c, struct edma_chan, vchan.chan); 98 return container_of(c, struct edma_chan, vchan.chan);
99 } 99 }
100 100
101 static inline struct edma_desc 101 static inline struct edma_desc
102 *to_edma_desc(struct dma_async_tx_descriptor *tx) 102 *to_edma_desc(struct dma_async_tx_descriptor *tx)
103 { 103 {
104 return container_of(tx, struct edma_desc, vdesc.tx); 104 return container_of(tx, struct edma_desc, vdesc.tx);
105 } 105 }
106 106
107 static void edma_desc_free(struct virt_dma_desc *vdesc) 107 static void edma_desc_free(struct virt_dma_desc *vdesc)
108 { 108 {
109 kfree(container_of(vdesc, struct edma_desc, vdesc)); 109 kfree(container_of(vdesc, struct edma_desc, vdesc));
110 } 110 }
111 111
112 /* Dispatch a queued descriptor to the controller (caller holds lock) */ 112 /* Dispatch a queued descriptor to the controller (caller holds lock) */
113 static void edma_execute(struct edma_chan *echan) 113 static void edma_execute(struct edma_chan *echan)
114 { 114 {
115 struct virt_dma_desc *vdesc; 115 struct virt_dma_desc *vdesc;
116 struct edma_desc *edesc; 116 struct edma_desc *edesc;
117 struct device *dev = echan->vchan.chan.device->dev; 117 struct device *dev = echan->vchan.chan.device->dev;
118 int i, j, left, nslots; 118 int i, j, left, nslots;
119 119
120 /* If either we processed all psets or we're still not started */ 120 /* If either we processed all psets or we're still not started */
121 if (!echan->edesc || 121 if (!echan->edesc ||
122 echan->edesc->pset_nr == echan->edesc->processed) { 122 echan->edesc->pset_nr == echan->edesc->processed) {
123 /* Get next vdesc */ 123 /* Get next vdesc */
124 vdesc = vchan_next_desc(&echan->vchan); 124 vdesc = vchan_next_desc(&echan->vchan);
125 if (!vdesc) { 125 if (!vdesc) {
126 echan->edesc = NULL; 126 echan->edesc = NULL;
127 return; 127 return;
128 } 128 }
129 list_del(&vdesc->node); 129 list_del(&vdesc->node);
130 echan->edesc = to_edma_desc(&vdesc->tx); 130 echan->edesc = to_edma_desc(&vdesc->tx);
131 } 131 }
132 132
133 edesc = echan->edesc; 133 edesc = echan->edesc;
134 134
135 /* Find out how many left */ 135 /* Find out how many left */
136 left = edesc->pset_nr - edesc->processed; 136 left = edesc->pset_nr - edesc->processed;
137 nslots = min(MAX_NR_SG, left); 137 nslots = min(MAX_NR_SG, left);
138 138
139 /* Write descriptor PaRAM set(s) */ 139 /* Write descriptor PaRAM set(s) */
140 for (i = 0; i < nslots; i++) { 140 for (i = 0; i < nslots; i++) {
141 j = i + edesc->processed; 141 j = i + edesc->processed;
142 edma_write_slot(echan->slot[i], &edesc->pset[j]); 142 edma_write_slot(echan->slot[i], &edesc->pset[j]);
143 dev_dbg(echan->vchan.chan.device->dev, 143 dev_dbg(echan->vchan.chan.device->dev,
144 "\n pset[%d]:\n" 144 "\n pset[%d]:\n"
145 " chnum\t%d\n" 145 " chnum\t%d\n"
146 " slot\t%d\n" 146 " slot\t%d\n"
147 " opt\t%08x\n" 147 " opt\t%08x\n"
148 " src\t%08x\n" 148 " src\t%08x\n"
149 " dst\t%08x\n" 149 " dst\t%08x\n"
150 " abcnt\t%08x\n" 150 " abcnt\t%08x\n"
151 " ccnt\t%08x\n" 151 " ccnt\t%08x\n"
152 " bidx\t%08x\n" 152 " bidx\t%08x\n"
153 " cidx\t%08x\n" 153 " cidx\t%08x\n"
154 " lkrld\t%08x\n", 154 " lkrld\t%08x\n",
155 j, echan->ch_num, echan->slot[i], 155 j, echan->ch_num, echan->slot[i],
156 edesc->pset[j].opt, 156 edesc->pset[j].opt,
157 edesc->pset[j].src, 157 edesc->pset[j].src,
158 edesc->pset[j].dst, 158 edesc->pset[j].dst,
159 edesc->pset[j].a_b_cnt, 159 edesc->pset[j].a_b_cnt,
160 edesc->pset[j].ccnt, 160 edesc->pset[j].ccnt,
161 edesc->pset[j].src_dst_bidx, 161 edesc->pset[j].src_dst_bidx,
162 edesc->pset[j].src_dst_cidx, 162 edesc->pset[j].src_dst_cidx,
163 edesc->pset[j].link_bcntrld); 163 edesc->pset[j].link_bcntrld);
164 /* Link to the previous slot if not the last set */ 164 /* Link to the previous slot if not the last set */
165 if (i != (nslots - 1)) 165 if (i != (nslots - 1))
166 edma_link(echan->slot[i], echan->slot[i+1]); 166 edma_link(echan->slot[i], echan->slot[i+1]);
167 } 167 }
168 168
169 edesc->processed += nslots; 169 edesc->processed += nslots;
170 170
171 /* 171 /*
172 * If this is either the last set in a set of SG-list transactions 172 * If this is either the last set in a set of SG-list transactions
173 * then setup a link to the dummy slot, this results in all future 173 * then setup a link to the dummy slot, this results in all future
174 * events being absorbed and that's OK because we're done 174 * events being absorbed and that's OK because we're done
175 */ 175 */
176 if (edesc->processed == edesc->pset_nr) 176 if (edesc->processed == edesc->pset_nr)
177 edma_link(echan->slot[nslots-1], echan->ecc->dummy_slot); 177 edma_link(echan->slot[nslots-1], echan->ecc->dummy_slot);
178 178
179 edma_resume(echan->ch_num); 179 edma_resume(echan->ch_num);
180 180
181 if (edesc->processed <= MAX_NR_SG) { 181 if (edesc->processed <= MAX_NR_SG) {
182 dev_dbg(dev, "first transfer starting %d\n", echan->ch_num); 182 dev_dbg(dev, "first transfer starting %d\n", echan->ch_num);
183 edma_start(echan->ch_num); 183 edma_start(echan->ch_num);
184 } 184 }
185 185
186 /* 186 /*
187 * This happens due to setup times between intermediate transfers 187 * This happens due to setup times between intermediate transfers
188 * in long SG lists which have to be broken up into transfers of 188 * in long SG lists which have to be broken up into transfers of
189 * MAX_NR_SG 189 * MAX_NR_SG
190 */ 190 */
191 if (echan->missed) { 191 if (echan->missed) {
192 dev_dbg(dev, "missed event in execute detected\n"); 192 dev_dbg(dev, "missed event in execute detected\n");
193 edma_clean_channel(echan->ch_num); 193 edma_clean_channel(echan->ch_num);
194 edma_stop(echan->ch_num); 194 edma_stop(echan->ch_num);
195 edma_start(echan->ch_num); 195 edma_start(echan->ch_num);
196 edma_trigger_channel(echan->ch_num); 196 edma_trigger_channel(echan->ch_num);
197 echan->missed = 0; 197 echan->missed = 0;
198 } 198 }
199 } 199 }
200 200
201 static int edma_terminate_all(struct edma_chan *echan) 201 static int edma_terminate_all(struct edma_chan *echan)
202 { 202 {
203 unsigned long flags; 203 unsigned long flags;
204 LIST_HEAD(head); 204 LIST_HEAD(head);
205 205
206 spin_lock_irqsave(&echan->vchan.lock, flags); 206 spin_lock_irqsave(&echan->vchan.lock, flags);
207 207
208 /* 208 /*
209 * Stop DMA activity: we assume the callback will not be called 209 * Stop DMA activity: we assume the callback will not be called
210 * after edma_dma() returns (even if it does, it will see 210 * after edma_dma() returns (even if it does, it will see
211 * echan->edesc is NULL and exit.) 211 * echan->edesc is NULL and exit.)
212 */ 212 */
213 if (echan->edesc) { 213 if (echan->edesc) {
214 echan->edesc = NULL; 214 echan->edesc = NULL;
215 edma_stop(echan->ch_num); 215 edma_stop(echan->ch_num);
216 } 216 }
217 217
218 vchan_get_all_descriptors(&echan->vchan, &head); 218 vchan_get_all_descriptors(&echan->vchan, &head);
219 spin_unlock_irqrestore(&echan->vchan.lock, flags); 219 spin_unlock_irqrestore(&echan->vchan.lock, flags);
220 vchan_dma_desc_free_list(&echan->vchan, &head); 220 vchan_dma_desc_free_list(&echan->vchan, &head);
221 221
222 return 0; 222 return 0;
223 } 223 }
224 224
225 static int edma_slave_config(struct edma_chan *echan, 225 static int edma_slave_config(struct edma_chan *echan,
226 struct dma_slave_config *cfg) 226 struct dma_slave_config *cfg)
227 { 227 {
228 if (cfg->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES || 228 if (cfg->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
229 cfg->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES) 229 cfg->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
230 return -EINVAL; 230 return -EINVAL;
231 231
232 memcpy(&echan->cfg, cfg, sizeof(echan->cfg)); 232 memcpy(&echan->cfg, cfg, sizeof(echan->cfg));
233 233
234 return 0; 234 return 0;
235 } 235 }
236 236
237 static int edma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 237 static int edma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
238 unsigned long arg) 238 unsigned long arg)
239 { 239 {
240 int ret = 0; 240 int ret = 0;
241 struct dma_slave_config *config; 241 struct dma_slave_config *config;
242 struct edma_chan *echan = to_edma_chan(chan); 242 struct edma_chan *echan = to_edma_chan(chan);
243 243
244 switch (cmd) { 244 switch (cmd) {
245 case DMA_TERMINATE_ALL: 245 case DMA_TERMINATE_ALL:
246 edma_terminate_all(echan); 246 edma_terminate_all(echan);
247 break; 247 break;
248 case DMA_SLAVE_CONFIG: 248 case DMA_SLAVE_CONFIG:
249 config = (struct dma_slave_config *)arg; 249 config = (struct dma_slave_config *)arg;
250 ret = edma_slave_config(echan, config); 250 ret = edma_slave_config(echan, config);
251 break; 251 break;
252 default: 252 default:
253 ret = -ENOSYS; 253 ret = -ENOSYS;
254 } 254 }
255 255
256 return ret; 256 return ret;
257 } 257 }
258 258
259 /* 259 /*
260 * A PaRAM set configuration abstraction used by other modes 260 * A PaRAM set configuration abstraction used by other modes
261 * @chan: Channel who's PaRAM set we're configuring 261 * @chan: Channel who's PaRAM set we're configuring
262 * @pset: PaRAM set to initialize and setup. 262 * @pset: PaRAM set to initialize and setup.
263 * @src_addr: Source address of the DMA 263 * @src_addr: Source address of the DMA
264 * @dst_addr: Destination address of the DMA 264 * @dst_addr: Destination address of the DMA
265 * @burst: In units of dev_width, how much to send 265 * @burst: In units of dev_width, how much to send
266 * @dev_width: How much is the dev_width 266 * @dev_width: How much is the dev_width
267 * @dma_length: Total length of the DMA transfer 267 * @dma_length: Total length of the DMA transfer
268 * @direction: Direction of the transfer 268 * @direction: Direction of the transfer
269 */ 269 */
270 static int edma_config_pset(struct dma_chan *chan, struct edmacc_param *pset, 270 static int edma_config_pset(struct dma_chan *chan, struct edmacc_param *pset,
271 dma_addr_t src_addr, dma_addr_t dst_addr, u32 burst, 271 dma_addr_t src_addr, dma_addr_t dst_addr, u32 burst,
272 enum dma_slave_buswidth dev_width, unsigned int dma_length, 272 enum dma_slave_buswidth dev_width, unsigned int dma_length,
273 enum dma_transfer_direction direction) 273 enum dma_transfer_direction direction)
274 { 274 {
275 struct edma_chan *echan = to_edma_chan(chan); 275 struct edma_chan *echan = to_edma_chan(chan);
276 struct device *dev = chan->device->dev; 276 struct device *dev = chan->device->dev;
277 int acnt, bcnt, ccnt, cidx; 277 int acnt, bcnt, ccnt, cidx;
278 int src_bidx, dst_bidx, src_cidx, dst_cidx; 278 int src_bidx, dst_bidx, src_cidx, dst_cidx;
279 int absync; 279 int absync;
280 280
281 acnt = dev_width; 281 acnt = dev_width;
282 /* 282 /*
283 * If the maxburst is equal to the fifo width, use 283 * If the maxburst is equal to the fifo width, use
284 * A-synced transfers. This allows for large contiguous 284 * A-synced transfers. This allows for large contiguous
285 * buffer transfers using only one PaRAM set. 285 * buffer transfers using only one PaRAM set.
286 */ 286 */
287 if (burst == 1) { 287 if (burst == 1) {
288 /* 288 /*
289 * For the A-sync case, bcnt and ccnt are the remainder 289 * For the A-sync case, bcnt and ccnt are the remainder
290 * and quotient respectively of the division of: 290 * and quotient respectively of the division of:
291 * (dma_length / acnt) by (SZ_64K -1). This is so 291 * (dma_length / acnt) by (SZ_64K -1). This is so
292 * that in case bcnt over flows, we have ccnt to use. 292 * that in case bcnt over flows, we have ccnt to use.
293 * Note: In A-sync tranfer only, bcntrld is used, but it 293 * Note: In A-sync tranfer only, bcntrld is used, but it
294 * only applies for sg_dma_len(sg) >= SZ_64K. 294 * only applies for sg_dma_len(sg) >= SZ_64K.
295 * In this case, the best way adopted is- bccnt for the 295 * In this case, the best way adopted is- bccnt for the
296 * first frame will be the remainder below. Then for 296 * first frame will be the remainder below. Then for
297 * every successive frame, bcnt will be SZ_64K-1. This 297 * every successive frame, bcnt will be SZ_64K-1. This
298 * is assured as bcntrld = 0xffff in end of function. 298 * is assured as bcntrld = 0xffff in end of function.
299 */ 299 */
300 absync = false; 300 absync = false;
301 ccnt = dma_length / acnt / (SZ_64K - 1); 301 ccnt = dma_length / acnt / (SZ_64K - 1);
302 bcnt = dma_length / acnt - ccnt * (SZ_64K - 1); 302 bcnt = dma_length / acnt - ccnt * (SZ_64K - 1);
303 /* 303 /*
304 * If bcnt is non-zero, we have a remainder and hence an 304 * If bcnt is non-zero, we have a remainder and hence an
305 * extra frame to transfer, so increment ccnt. 305 * extra frame to transfer, so increment ccnt.
306 */ 306 */
307 if (bcnt) 307 if (bcnt)
308 ccnt++; 308 ccnt++;
309 else 309 else
310 bcnt = SZ_64K - 1; 310 bcnt = SZ_64K - 1;
311 cidx = acnt; 311 cidx = acnt;
312 } else { 312 } else {
313 /* 313 /*
314 * If maxburst is greater than the fifo address_width, 314 * If maxburst is greater than the fifo address_width,
315 * use AB-synced transfers where A count is the fifo 315 * use AB-synced transfers where A count is the fifo
316 * address_width and B count is the maxburst. In this 316 * address_width and B count is the maxburst. In this
317 * case, we are limited to transfers of C count frames 317 * case, we are limited to transfers of C count frames
318 * of (address_width * maxburst) where C count is limited 318 * of (address_width * maxburst) where C count is limited
319 * to SZ_64K-1. This places an upper bound on the length 319 * to SZ_64K-1. This places an upper bound on the length
320 * of an SG segment that can be handled. 320 * of an SG segment that can be handled.
321 */ 321 */
322 absync = true; 322 absync = true;
323 bcnt = burst; 323 bcnt = burst;
324 ccnt = dma_length / (acnt * bcnt); 324 ccnt = dma_length / (acnt * bcnt);
325 if (ccnt > (SZ_64K - 1)) { 325 if (ccnt > (SZ_64K - 1)) {
326 dev_err(dev, "Exceeded max SG segment size\n"); 326 dev_err(dev, "Exceeded max SG segment size\n");
327 return -EINVAL; 327 return -EINVAL;
328 } 328 }
329 cidx = acnt * bcnt; 329 cidx = acnt * bcnt;
330 } 330 }
331 331
332 if (direction == DMA_MEM_TO_DEV) { 332 if (direction == DMA_MEM_TO_DEV) {
333 src_bidx = acnt; 333 src_bidx = acnt;
334 src_cidx = cidx; 334 src_cidx = cidx;
335 dst_bidx = 0; 335 dst_bidx = 0;
336 dst_cidx = 0; 336 dst_cidx = 0;
337 } else if (direction == DMA_DEV_TO_MEM) { 337 } else if (direction == DMA_DEV_TO_MEM) {
338 src_bidx = 0; 338 src_bidx = 0;
339 src_cidx = 0; 339 src_cidx = 0;
340 dst_bidx = acnt; 340 dst_bidx = acnt;
341 dst_cidx = cidx; 341 dst_cidx = cidx;
342 } else { 342 } else {
343 dev_err(dev, "%s: direction not implemented yet\n", __func__); 343 dev_err(dev, "%s: direction not implemented yet\n", __func__);
344 return -EINVAL; 344 return -EINVAL;
345 } 345 }
346 346
347 pset->opt = EDMA_TCC(EDMA_CHAN_SLOT(echan->ch_num)); 347 pset->opt = EDMA_TCC(EDMA_CHAN_SLOT(echan->ch_num));
348 /* Configure A or AB synchronized transfers */ 348 /* Configure A or AB synchronized transfers */
349 if (absync) 349 if (absync)
350 pset->opt |= SYNCDIM; 350 pset->opt |= SYNCDIM;
351 351
352 pset->src = src_addr; 352 pset->src = src_addr;
353 pset->dst = dst_addr; 353 pset->dst = dst_addr;
354 354
355 pset->src_dst_bidx = (dst_bidx << 16) | src_bidx; 355 pset->src_dst_bidx = (dst_bidx << 16) | src_bidx;
356 pset->src_dst_cidx = (dst_cidx << 16) | src_cidx; 356 pset->src_dst_cidx = (dst_cidx << 16) | src_cidx;
357 357
358 pset->a_b_cnt = bcnt << 16 | acnt; 358 pset->a_b_cnt = bcnt << 16 | acnt;
359 pset->ccnt = ccnt; 359 pset->ccnt = ccnt;
360 /* 360 /*
361 * Only time when (bcntrld) auto reload is required is for 361 * Only time when (bcntrld) auto reload is required is for
362 * A-sync case, and in this case, a requirement of reload value 362 * A-sync case, and in this case, a requirement of reload value
363 * of SZ_64K-1 only is assured. 'link' is initially set to NULL 363 * of SZ_64K-1 only is assured. 'link' is initially set to NULL
364 * and then later will be populated by edma_execute. 364 * and then later will be populated by edma_execute.
365 */ 365 */
366 pset->link_bcntrld = 0xffffffff; 366 pset->link_bcntrld = 0xffffffff;
367 return absync; 367 return absync;
368 } 368 }
369 369
370 static struct dma_async_tx_descriptor *edma_prep_slave_sg( 370 static struct dma_async_tx_descriptor *edma_prep_slave_sg(
371 struct dma_chan *chan, struct scatterlist *sgl, 371 struct dma_chan *chan, struct scatterlist *sgl,
372 unsigned int sg_len, enum dma_transfer_direction direction, 372 unsigned int sg_len, enum dma_transfer_direction direction,
373 unsigned long tx_flags, void *context) 373 unsigned long tx_flags, void *context)
374 { 374 {
375 struct edma_chan *echan = to_edma_chan(chan); 375 struct edma_chan *echan = to_edma_chan(chan);
376 struct device *dev = chan->device->dev; 376 struct device *dev = chan->device->dev;
377 struct edma_desc *edesc; 377 struct edma_desc *edesc;
378 dma_addr_t src_addr = 0, dst_addr = 0; 378 dma_addr_t src_addr = 0, dst_addr = 0;
379 enum dma_slave_buswidth dev_width; 379 enum dma_slave_buswidth dev_width;
380 u32 burst; 380 u32 burst;
381 struct scatterlist *sg; 381 struct scatterlist *sg;
382 int i, nslots, ret; 382 int i, nslots, ret;
383 383
384 if (unlikely(!echan || !sgl || !sg_len)) 384 if (unlikely(!echan || !sgl || !sg_len))
385 return NULL; 385 return NULL;
386 386
387 if (direction == DMA_DEV_TO_MEM) { 387 if (direction == DMA_DEV_TO_MEM) {
388 src_addr = echan->cfg.src_addr; 388 src_addr = echan->cfg.src_addr;
389 dev_width = echan->cfg.src_addr_width; 389 dev_width = echan->cfg.src_addr_width;
390 burst = echan->cfg.src_maxburst; 390 burst = echan->cfg.src_maxburst;
391 } else if (direction == DMA_MEM_TO_DEV) { 391 } else if (direction == DMA_MEM_TO_DEV) {
392 dst_addr = echan->cfg.dst_addr; 392 dst_addr = echan->cfg.dst_addr;
393 dev_width = echan->cfg.dst_addr_width; 393 dev_width = echan->cfg.dst_addr_width;
394 burst = echan->cfg.dst_maxburst; 394 burst = echan->cfg.dst_maxburst;
395 } else { 395 } else {
396 dev_err(dev, "%s: bad direction?\n", __func__); 396 dev_err(dev, "%s: bad direction?\n", __func__);
397 return NULL; 397 return NULL;
398 } 398 }
399 399
400 if (dev_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) { 400 if (dev_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) {
401 dev_err(dev, "Undefined slave buswidth\n"); 401 dev_err(dev, "Undefined slave buswidth\n");
402 return NULL; 402 return NULL;
403 } 403 }
404 404
405 edesc = kzalloc(sizeof(*edesc) + sg_len * 405 edesc = kzalloc(sizeof(*edesc) + sg_len *
406 sizeof(edesc->pset[0]), GFP_ATOMIC); 406 sizeof(edesc->pset[0]), GFP_ATOMIC);
407 if (!edesc) { 407 if (!edesc) {
408 dev_dbg(dev, "Failed to allocate a descriptor\n"); 408 dev_dbg(dev, "Failed to allocate a descriptor\n");
409 return NULL; 409 return NULL;
410 } 410 }
411 411
412 edesc->pset_nr = sg_len; 412 edesc->pset_nr = sg_len;
413 413
414 /* Allocate a PaRAM slot, if needed */ 414 /* Allocate a PaRAM slot, if needed */
415 nslots = min_t(unsigned, MAX_NR_SG, sg_len); 415 nslots = min_t(unsigned, MAX_NR_SG, sg_len);
416 416
417 for (i = 0; i < nslots; i++) { 417 for (i = 0; i < nslots; i++) {
418 if (echan->slot[i] < 0) { 418 if (echan->slot[i] < 0) {
419 echan->slot[i] = 419 echan->slot[i] =
420 edma_alloc_slot(EDMA_CTLR(echan->ch_num), 420 edma_alloc_slot(EDMA_CTLR(echan->ch_num),
421 EDMA_SLOT_ANY); 421 EDMA_SLOT_ANY);
422 if (echan->slot[i] < 0) { 422 if (echan->slot[i] < 0) {
423 kfree(edesc); 423 kfree(edesc);
424 dev_err(dev, "Failed to allocate slot\n"); 424 dev_err(dev, "Failed to allocate slot\n");
425 kfree(edesc); 425 kfree(edesc);
426 return NULL; 426 return NULL;
427 } 427 }
428 } 428 }
429 } 429 }
430 430
431 /* Configure PaRAM sets for each SG */ 431 /* Configure PaRAM sets for each SG */
432 for_each_sg(sgl, sg, sg_len, i) { 432 for_each_sg(sgl, sg, sg_len, i) {
433 /* Get address for each SG */ 433 /* Get address for each SG */
434 if (direction == DMA_DEV_TO_MEM) 434 if (direction == DMA_DEV_TO_MEM)
435 dst_addr = sg_dma_address(sg); 435 dst_addr = sg_dma_address(sg);
436 else 436 else
437 src_addr = sg_dma_address(sg); 437 src_addr = sg_dma_address(sg);
438 438
439 ret = edma_config_pset(chan, &edesc->pset[i], src_addr, 439 ret = edma_config_pset(chan, &edesc->pset[i], src_addr,
440 dst_addr, burst, dev_width, 440 dst_addr, burst, dev_width,
441 sg_dma_len(sg), direction); 441 sg_dma_len(sg), direction);
442 if (ret < 0) { 442 if (ret < 0) {
443 kfree(edesc); 443 kfree(edesc);
444 return NULL; 444 return NULL;
445 } 445 }
446 446
447 edesc->absync = ret; 447 edesc->absync = ret;
448 448
449 /* If this is the last in a current SG set of transactions, 449 /* If this is the last in a current SG set of transactions,
450 enable interrupts so that next set is processed */ 450 enable interrupts so that next set is processed */
451 if (!((i+1) % MAX_NR_SG)) 451 if (!((i+1) % MAX_NR_SG))
452 edesc->pset[i].opt |= TCINTEN; 452 edesc->pset[i].opt |= TCINTEN;
453 453
454 /* If this is the last set, enable completion interrupt flag */ 454 /* If this is the last set, enable completion interrupt flag */
455 if (i == sg_len - 1) 455 if (i == sg_len - 1)
456 edesc->pset[i].opt |= TCINTEN; 456 edesc->pset[i].opt |= TCINTEN;
457 } 457 }
458 458
459 return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags); 459 return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
460 } 460 }
461 461
462 static void edma_callback(unsigned ch_num, u16 ch_status, void *data) 462 static void edma_callback(unsigned ch_num, u16 ch_status, void *data)
463 { 463 {
464 struct edma_chan *echan = data; 464 struct edma_chan *echan = data;
465 struct device *dev = echan->vchan.chan.device->dev; 465 struct device *dev = echan->vchan.chan.device->dev;
466 struct edma_desc *edesc; 466 struct edma_desc *edesc;
467 unsigned long flags; 467 unsigned long flags;
468 struct edmacc_param p; 468 struct edmacc_param p;
469 469
470 /* Pause the channel */ 470 /* Pause the channel */
471 edma_pause(echan->ch_num); 471 edma_pause(echan->ch_num);
472 472
473 switch (ch_status) { 473 switch (ch_status) {
474 case DMA_COMPLETE: 474 case DMA_COMPLETE:
475 spin_lock_irqsave(&echan->vchan.lock, flags); 475 spin_lock_irqsave(&echan->vchan.lock, flags);
476 476
477 edesc = echan->edesc; 477 edesc = echan->edesc;
478 if (edesc) { 478 if (edesc) {
479 if (edesc->processed == edesc->pset_nr) { 479 if (edesc->processed == edesc->pset_nr) {
480 dev_dbg(dev, "Transfer complete, stopping channel %d\n", ch_num); 480 dev_dbg(dev, "Transfer complete, stopping channel %d\n", ch_num);
481 edma_stop(echan->ch_num); 481 edma_stop(echan->ch_num);
482 vchan_cookie_complete(&edesc->vdesc); 482 vchan_cookie_complete(&edesc->vdesc);
483 } else { 483 } else {
484 dev_dbg(dev, "Intermediate transfer complete on channel %d\n", ch_num); 484 dev_dbg(dev, "Intermediate transfer complete on channel %d\n", ch_num);
485 } 485 }
486 486
487 edma_execute(echan); 487 edma_execute(echan);
488 } 488 }
489 489
490 spin_unlock_irqrestore(&echan->vchan.lock, flags); 490 spin_unlock_irqrestore(&echan->vchan.lock, flags);
491 491
492 break; 492 break;
493 case DMA_CC_ERROR: 493 case DMA_CC_ERROR:
494 spin_lock_irqsave(&echan->vchan.lock, flags); 494 spin_lock_irqsave(&echan->vchan.lock, flags);
495 495
496 edma_read_slot(EDMA_CHAN_SLOT(echan->slot[0]), &p); 496 edma_read_slot(EDMA_CHAN_SLOT(echan->slot[0]), &p);
497 497
498 /* 498 /*
499 * Issue later based on missed flag which will be sure 499 * Issue later based on missed flag which will be sure
500 * to happen as: 500 * to happen as:
501 * (1) we finished transmitting an intermediate slot and 501 * (1) we finished transmitting an intermediate slot and
502 * edma_execute is coming up. 502 * edma_execute is coming up.
503 * (2) or we finished current transfer and issue will 503 * (2) or we finished current transfer and issue will
504 * call edma_execute. 504 * call edma_execute.
505 * 505 *
506 * Important note: issuing can be dangerous here and 506 * Important note: issuing can be dangerous here and
507 * lead to some nasty recursion when we are in a NULL 507 * lead to some nasty recursion when we are in a NULL
508 * slot. So we avoid doing so and set the missed flag. 508 * slot. So we avoid doing so and set the missed flag.
509 */ 509 */
510 if (p.a_b_cnt == 0 && p.ccnt == 0) { 510 if (p.a_b_cnt == 0 && p.ccnt == 0) {
511 dev_dbg(dev, "Error occurred, looks like slot is null, just setting miss\n"); 511 dev_dbg(dev, "Error occurred, looks like slot is null, just setting miss\n");
512 echan->missed = 1; 512 echan->missed = 1;
513 } else { 513 } else {
514 /* 514 /*
515 * The slot is already programmed but the event got 515 * The slot is already programmed but the event got
516 * missed, so its safe to issue it here. 516 * missed, so its safe to issue it here.
517 */ 517 */
518 dev_dbg(dev, "Error occurred but slot is non-null, TRIGGERING\n"); 518 dev_dbg(dev, "Error occurred but slot is non-null, TRIGGERING\n");
519 edma_clean_channel(echan->ch_num); 519 edma_clean_channel(echan->ch_num);
520 edma_stop(echan->ch_num); 520 edma_stop(echan->ch_num);
521 edma_start(echan->ch_num); 521 edma_start(echan->ch_num);
522 edma_trigger_channel(echan->ch_num); 522 edma_trigger_channel(echan->ch_num);
523 } 523 }
524 524
525 spin_unlock_irqrestore(&echan->vchan.lock, flags); 525 spin_unlock_irqrestore(&echan->vchan.lock, flags);
526 526
527 break; 527 break;
528 default: 528 default:
529 break; 529 break;
530 } 530 }
531 } 531 }
532 532
533 /* Alloc channel resources */ 533 /* Alloc channel resources */
534 static int edma_alloc_chan_resources(struct dma_chan *chan) 534 static int edma_alloc_chan_resources(struct dma_chan *chan)
535 { 535 {
536 struct edma_chan *echan = to_edma_chan(chan); 536 struct edma_chan *echan = to_edma_chan(chan);
537 struct device *dev = chan->device->dev; 537 struct device *dev = chan->device->dev;
538 int ret; 538 int ret;
539 int a_ch_num; 539 int a_ch_num;
540 LIST_HEAD(descs); 540 LIST_HEAD(descs);
541 541
542 a_ch_num = edma_alloc_channel(echan->ch_num, edma_callback, 542 a_ch_num = edma_alloc_channel(echan->ch_num, edma_callback,
543 chan, EVENTQ_DEFAULT); 543 chan, EVENTQ_DEFAULT);
544 544
545 if (a_ch_num < 0) { 545 if (a_ch_num < 0) {
546 ret = -ENODEV; 546 ret = -ENODEV;
547 goto err_no_chan; 547 goto err_no_chan;
548 } 548 }
549 549
550 if (a_ch_num != echan->ch_num) { 550 if (a_ch_num != echan->ch_num) {
551 dev_err(dev, "failed to allocate requested channel %u:%u\n", 551 dev_err(dev, "failed to allocate requested channel %u:%u\n",
552 EDMA_CTLR(echan->ch_num), 552 EDMA_CTLR(echan->ch_num),
553 EDMA_CHAN_SLOT(echan->ch_num)); 553 EDMA_CHAN_SLOT(echan->ch_num));
554 ret = -ENODEV; 554 ret = -ENODEV;
555 goto err_wrong_chan; 555 goto err_wrong_chan;
556 } 556 }
557 557
558 echan->alloced = true; 558 echan->alloced = true;
559 echan->slot[0] = echan->ch_num; 559 echan->slot[0] = echan->ch_num;
560 560
561 dev_info(dev, "allocated channel for %u:%u\n", 561 dev_info(dev, "allocated channel for %u:%u\n",
562 EDMA_CTLR(echan->ch_num), EDMA_CHAN_SLOT(echan->ch_num)); 562 EDMA_CTLR(echan->ch_num), EDMA_CHAN_SLOT(echan->ch_num));
563 563
564 return 0; 564 return 0;
565 565
566 err_wrong_chan: 566 err_wrong_chan:
567 edma_free_channel(a_ch_num); 567 edma_free_channel(a_ch_num);
568 err_no_chan: 568 err_no_chan:
569 return ret; 569 return ret;
570 } 570 }
571 571
572 /* Free channel resources */ 572 /* Free channel resources */
573 static void edma_free_chan_resources(struct dma_chan *chan) 573 static void edma_free_chan_resources(struct dma_chan *chan)
574 { 574 {
575 struct edma_chan *echan = to_edma_chan(chan); 575 struct edma_chan *echan = to_edma_chan(chan);
576 struct device *dev = chan->device->dev; 576 struct device *dev = chan->device->dev;
577 int i; 577 int i;
578 578
579 /* Terminate transfers */ 579 /* Terminate transfers */
580 edma_stop(echan->ch_num); 580 edma_stop(echan->ch_num);
581 581
582 vchan_free_chan_resources(&echan->vchan); 582 vchan_free_chan_resources(&echan->vchan);
583 583
584 /* Free EDMA PaRAM slots */ 584 /* Free EDMA PaRAM slots */
585 for (i = 1; i < EDMA_MAX_SLOTS; i++) { 585 for (i = 1; i < EDMA_MAX_SLOTS; i++) {
586 if (echan->slot[i] >= 0) { 586 if (echan->slot[i] >= 0) {
587 edma_free_slot(echan->slot[i]); 587 edma_free_slot(echan->slot[i]);
588 echan->slot[i] = -1; 588 echan->slot[i] = -1;
589 } 589 }
590 } 590 }
591 591
592 /* Free EDMA channel */ 592 /* Free EDMA channel */
593 if (echan->alloced) { 593 if (echan->alloced) {
594 edma_free_channel(echan->ch_num); 594 edma_free_channel(echan->ch_num);
595 echan->alloced = false; 595 echan->alloced = false;
596 } 596 }
597 597
598 dev_info(dev, "freeing channel for %u\n", echan->ch_num); 598 dev_info(dev, "freeing channel for %u\n", echan->ch_num);
599 } 599 }
600 600
601 /* Send pending descriptor to hardware */ 601 /* Send pending descriptor to hardware */
602 static void edma_issue_pending(struct dma_chan *chan) 602 static void edma_issue_pending(struct dma_chan *chan)
603 { 603 {
604 struct edma_chan *echan = to_edma_chan(chan); 604 struct edma_chan *echan = to_edma_chan(chan);
605 unsigned long flags; 605 unsigned long flags;
606 606
607 spin_lock_irqsave(&echan->vchan.lock, flags); 607 spin_lock_irqsave(&echan->vchan.lock, flags);
608 if (vchan_issue_pending(&echan->vchan) && !echan->edesc) 608 if (vchan_issue_pending(&echan->vchan) && !echan->edesc)
609 edma_execute(echan); 609 edma_execute(echan);
610 spin_unlock_irqrestore(&echan->vchan.lock, flags); 610 spin_unlock_irqrestore(&echan->vchan.lock, flags);
611 } 611 }
612 612
613 static size_t edma_desc_size(struct edma_desc *edesc) 613 static size_t edma_desc_size(struct edma_desc *edesc)
614 { 614 {
615 int i; 615 int i;
616 size_t size; 616 size_t size;
617 617
618 if (edesc->absync) 618 if (edesc->absync)
619 for (size = i = 0; i < edesc->pset_nr; i++) 619 for (size = i = 0; i < edesc->pset_nr; i++)
620 size += (edesc->pset[i].a_b_cnt & 0xffff) * 620 size += (edesc->pset[i].a_b_cnt & 0xffff) *
621 (edesc->pset[i].a_b_cnt >> 16) * 621 (edesc->pset[i].a_b_cnt >> 16) *
622 edesc->pset[i].ccnt; 622 edesc->pset[i].ccnt;
623 else 623 else
624 size = (edesc->pset[0].a_b_cnt & 0xffff) * 624 size = (edesc->pset[0].a_b_cnt & 0xffff) *
625 (edesc->pset[0].a_b_cnt >> 16) + 625 (edesc->pset[0].a_b_cnt >> 16) +
626 (edesc->pset[0].a_b_cnt & 0xffff) * 626 (edesc->pset[0].a_b_cnt & 0xffff) *
627 (SZ_64K - 1) * edesc->pset[0].ccnt; 627 (SZ_64K - 1) * edesc->pset[0].ccnt;
628 628
629 return size; 629 return size;
630 } 630 }
631 631
632 /* Check request completion status */ 632 /* Check request completion status */
633 static enum dma_status edma_tx_status(struct dma_chan *chan, 633 static enum dma_status edma_tx_status(struct dma_chan *chan,
634 dma_cookie_t cookie, 634 dma_cookie_t cookie,
635 struct dma_tx_state *txstate) 635 struct dma_tx_state *txstate)
636 { 636 {
637 struct edma_chan *echan = to_edma_chan(chan); 637 struct edma_chan *echan = to_edma_chan(chan);
638 struct virt_dma_desc *vdesc; 638 struct virt_dma_desc *vdesc;
639 enum dma_status ret; 639 enum dma_status ret;
640 unsigned long flags; 640 unsigned long flags;
641 641
642 ret = dma_cookie_status(chan, cookie, txstate); 642 ret = dma_cookie_status(chan, cookie, txstate);
643 if (ret == DMA_SUCCESS || !txstate) 643 if (ret == DMA_COMPLETE || !txstate)
644 return ret; 644 return ret;
645 645
646 spin_lock_irqsave(&echan->vchan.lock, flags); 646 spin_lock_irqsave(&echan->vchan.lock, flags);
647 vdesc = vchan_find_desc(&echan->vchan, cookie); 647 vdesc = vchan_find_desc(&echan->vchan, cookie);
648 if (vdesc) { 648 if (vdesc) {
649 txstate->residue = edma_desc_size(to_edma_desc(&vdesc->tx)); 649 txstate->residue = edma_desc_size(to_edma_desc(&vdesc->tx));
650 } else if (echan->edesc && echan->edesc->vdesc.tx.cookie == cookie) { 650 } else if (echan->edesc && echan->edesc->vdesc.tx.cookie == cookie) {
651 struct edma_desc *edesc = echan->edesc; 651 struct edma_desc *edesc = echan->edesc;
652 txstate->residue = edma_desc_size(edesc); 652 txstate->residue = edma_desc_size(edesc);
653 } 653 }
654 spin_unlock_irqrestore(&echan->vchan.lock, flags); 654 spin_unlock_irqrestore(&echan->vchan.lock, flags);
655 655
656 return ret; 656 return ret;
657 } 657 }
658 658
659 static void __init edma_chan_init(struct edma_cc *ecc, 659 static void __init edma_chan_init(struct edma_cc *ecc,
660 struct dma_device *dma, 660 struct dma_device *dma,
661 struct edma_chan *echans) 661 struct edma_chan *echans)
662 { 662 {
663 int i, j; 663 int i, j;
664 664
665 for (i = 0; i < EDMA_CHANS; i++) { 665 for (i = 0; i < EDMA_CHANS; i++) {
666 struct edma_chan *echan = &echans[i]; 666 struct edma_chan *echan = &echans[i];
667 echan->ch_num = EDMA_CTLR_CHAN(ecc->ctlr, i); 667 echan->ch_num = EDMA_CTLR_CHAN(ecc->ctlr, i);
668 echan->ecc = ecc; 668 echan->ecc = ecc;
669 echan->vchan.desc_free = edma_desc_free; 669 echan->vchan.desc_free = edma_desc_free;
670 670
671 vchan_init(&echan->vchan, dma); 671 vchan_init(&echan->vchan, dma);
672 672
673 INIT_LIST_HEAD(&echan->node); 673 INIT_LIST_HEAD(&echan->node);
674 for (j = 0; j < EDMA_MAX_SLOTS; j++) 674 for (j = 0; j < EDMA_MAX_SLOTS; j++)
675 echan->slot[j] = -1; 675 echan->slot[j] = -1;
676 } 676 }
677 } 677 }
678 678
679 static void edma_dma_init(struct edma_cc *ecc, struct dma_device *dma, 679 static void edma_dma_init(struct edma_cc *ecc, struct dma_device *dma,
680 struct device *dev) 680 struct device *dev)
681 { 681 {
682 dma->device_prep_slave_sg = edma_prep_slave_sg; 682 dma->device_prep_slave_sg = edma_prep_slave_sg;
683 dma->device_alloc_chan_resources = edma_alloc_chan_resources; 683 dma->device_alloc_chan_resources = edma_alloc_chan_resources;
684 dma->device_free_chan_resources = edma_free_chan_resources; 684 dma->device_free_chan_resources = edma_free_chan_resources;
685 dma->device_issue_pending = edma_issue_pending; 685 dma->device_issue_pending = edma_issue_pending;
686 dma->device_tx_status = edma_tx_status; 686 dma->device_tx_status = edma_tx_status;
687 dma->device_control = edma_control; 687 dma->device_control = edma_control;
688 dma->dev = dev; 688 dma->dev = dev;
689 689
690 INIT_LIST_HEAD(&dma->channels); 690 INIT_LIST_HEAD(&dma->channels);
691 } 691 }
692 692
693 static int edma_probe(struct platform_device *pdev) 693 static int edma_probe(struct platform_device *pdev)
694 { 694 {
695 struct edma_cc *ecc; 695 struct edma_cc *ecc;
696 int ret; 696 int ret;
697 697
698 ecc = devm_kzalloc(&pdev->dev, sizeof(*ecc), GFP_KERNEL); 698 ecc = devm_kzalloc(&pdev->dev, sizeof(*ecc), GFP_KERNEL);
699 if (!ecc) { 699 if (!ecc) {
700 dev_err(&pdev->dev, "Can't allocate controller\n"); 700 dev_err(&pdev->dev, "Can't allocate controller\n");
701 return -ENOMEM; 701 return -ENOMEM;
702 } 702 }
703 703
704 ecc->ctlr = pdev->id; 704 ecc->ctlr = pdev->id;
705 ecc->dummy_slot = edma_alloc_slot(ecc->ctlr, EDMA_SLOT_ANY); 705 ecc->dummy_slot = edma_alloc_slot(ecc->ctlr, EDMA_SLOT_ANY);
706 if (ecc->dummy_slot < 0) { 706 if (ecc->dummy_slot < 0) {
707 dev_err(&pdev->dev, "Can't allocate PaRAM dummy slot\n"); 707 dev_err(&pdev->dev, "Can't allocate PaRAM dummy slot\n");
708 return -EIO; 708 return -EIO;
709 } 709 }
710 710
711 dma_cap_zero(ecc->dma_slave.cap_mask); 711 dma_cap_zero(ecc->dma_slave.cap_mask);
712 dma_cap_set(DMA_SLAVE, ecc->dma_slave.cap_mask); 712 dma_cap_set(DMA_SLAVE, ecc->dma_slave.cap_mask);
713 713
714 edma_dma_init(ecc, &ecc->dma_slave, &pdev->dev); 714 edma_dma_init(ecc, &ecc->dma_slave, &pdev->dev);
715 715
716 edma_chan_init(ecc, &ecc->dma_slave, ecc->slave_chans); 716 edma_chan_init(ecc, &ecc->dma_slave, ecc->slave_chans);
717 717
718 ret = dma_async_device_register(&ecc->dma_slave); 718 ret = dma_async_device_register(&ecc->dma_slave);
719 if (ret) 719 if (ret)
720 goto err_reg1; 720 goto err_reg1;
721 721
722 platform_set_drvdata(pdev, ecc); 722 platform_set_drvdata(pdev, ecc);
723 723
724 dev_info(&pdev->dev, "TI EDMA DMA engine driver\n"); 724 dev_info(&pdev->dev, "TI EDMA DMA engine driver\n");
725 725
726 return 0; 726 return 0;
727 727
728 err_reg1: 728 err_reg1:
729 edma_free_slot(ecc->dummy_slot); 729 edma_free_slot(ecc->dummy_slot);
730 return ret; 730 return ret;
731 } 731 }
732 732
733 static int edma_remove(struct platform_device *pdev) 733 static int edma_remove(struct platform_device *pdev)
734 { 734 {
735 struct device *dev = &pdev->dev; 735 struct device *dev = &pdev->dev;
736 struct edma_cc *ecc = dev_get_drvdata(dev); 736 struct edma_cc *ecc = dev_get_drvdata(dev);
737 737
738 dma_async_device_unregister(&ecc->dma_slave); 738 dma_async_device_unregister(&ecc->dma_slave);
739 edma_free_slot(ecc->dummy_slot); 739 edma_free_slot(ecc->dummy_slot);
740 740
741 return 0; 741 return 0;
742 } 742 }
743 743
744 static struct platform_driver edma_driver = { 744 static struct platform_driver edma_driver = {
745 .probe = edma_probe, 745 .probe = edma_probe,
746 .remove = edma_remove, 746 .remove = edma_remove,
747 .driver = { 747 .driver = {
748 .name = "edma-dma-engine", 748 .name = "edma-dma-engine",
749 .owner = THIS_MODULE, 749 .owner = THIS_MODULE,
750 }, 750 },
751 }; 751 };
752 752
753 bool edma_filter_fn(struct dma_chan *chan, void *param) 753 bool edma_filter_fn(struct dma_chan *chan, void *param)
754 { 754 {
755 if (chan->device->dev->driver == &edma_driver.driver) { 755 if (chan->device->dev->driver == &edma_driver.driver) {
756 struct edma_chan *echan = to_edma_chan(chan); 756 struct edma_chan *echan = to_edma_chan(chan);
757 unsigned ch_req = *(unsigned *)param; 757 unsigned ch_req = *(unsigned *)param;
758 return ch_req == echan->ch_num; 758 return ch_req == echan->ch_num;
759 } 759 }
760 return false; 760 return false;
761 } 761 }
762 EXPORT_SYMBOL(edma_filter_fn); 762 EXPORT_SYMBOL(edma_filter_fn);
763 763
764 static struct platform_device *pdev0, *pdev1; 764 static struct platform_device *pdev0, *pdev1;
765 765
766 static const struct platform_device_info edma_dev_info0 = { 766 static const struct platform_device_info edma_dev_info0 = {
767 .name = "edma-dma-engine", 767 .name = "edma-dma-engine",
768 .id = 0, 768 .id = 0,
769 }; 769 };
770 770
771 static const struct platform_device_info edma_dev_info1 = { 771 static const struct platform_device_info edma_dev_info1 = {
772 .name = "edma-dma-engine", 772 .name = "edma-dma-engine",
773 .id = 1, 773 .id = 1,
774 }; 774 };
775 775
776 static int edma_init(void) 776 static int edma_init(void)
777 { 777 {
778 int ret = platform_driver_register(&edma_driver); 778 int ret = platform_driver_register(&edma_driver);
779 779
780 if (ret == 0) { 780 if (ret == 0) {
781 pdev0 = platform_device_register_full(&edma_dev_info0); 781 pdev0 = platform_device_register_full(&edma_dev_info0);
782 if (IS_ERR(pdev0)) { 782 if (IS_ERR(pdev0)) {
783 platform_driver_unregister(&edma_driver); 783 platform_driver_unregister(&edma_driver);
784 ret = PTR_ERR(pdev0); 784 ret = PTR_ERR(pdev0);
785 goto out; 785 goto out;
786 } 786 }
787 pdev0->dev.dma_mask = &pdev0->dev.coherent_dma_mask; 787 pdev0->dev.dma_mask = &pdev0->dev.coherent_dma_mask;
788 pdev0->dev.coherent_dma_mask = DMA_BIT_MASK(32); 788 pdev0->dev.coherent_dma_mask = DMA_BIT_MASK(32);
789 } 789 }
790 790
791 if (EDMA_CTLRS == 2) { 791 if (EDMA_CTLRS == 2) {
792 pdev1 = platform_device_register_full(&edma_dev_info1); 792 pdev1 = platform_device_register_full(&edma_dev_info1);
793 if (IS_ERR(pdev1)) { 793 if (IS_ERR(pdev1)) {
794 platform_driver_unregister(&edma_driver); 794 platform_driver_unregister(&edma_driver);
795 platform_device_unregister(pdev0); 795 platform_device_unregister(pdev0);
796 ret = PTR_ERR(pdev1); 796 ret = PTR_ERR(pdev1);
797 } 797 }
798 pdev1->dev.dma_mask = &pdev1->dev.coherent_dma_mask; 798 pdev1->dev.dma_mask = &pdev1->dev.coherent_dma_mask;
799 pdev1->dev.coherent_dma_mask = DMA_BIT_MASK(32); 799 pdev1->dev.coherent_dma_mask = DMA_BIT_MASK(32);
800 } 800 }
801 801
802 out: 802 out:
803 return ret; 803 return ret;
804 } 804 }
805 subsys_initcall(edma_init); 805 subsys_initcall(edma_init);
806 806
807 static void __exit edma_exit(void) 807 static void __exit edma_exit(void)
808 { 808 {
809 platform_device_unregister(pdev0); 809 platform_device_unregister(pdev0);
810 if (pdev1) 810 if (pdev1)
811 platform_device_unregister(pdev1); 811 platform_device_unregister(pdev1);
812 platform_driver_unregister(&edma_driver); 812 platform_driver_unregister(&edma_driver);
813 } 813 }
814 module_exit(edma_exit); 814 module_exit(edma_exit);
815 815
816 MODULE_AUTHOR("Matt Porter <matt.porter@linaro.org>"); 816 MODULE_AUTHOR("Matt Porter <matt.porter@linaro.org>");
817 MODULE_DESCRIPTION("TI EDMA DMA engine driver"); 817 MODULE_DESCRIPTION("TI EDMA DMA engine driver");
818 MODULE_LICENSE("GPL v2"); 818 MODULE_LICENSE("GPL v2");
819 819
drivers/dma/imx-dma.c
1 /* 1 /*
2 * drivers/dma/imx-dma.c 2 * drivers/dma/imx-dma.c
3 * 3 *
4 * This file contains a driver for the Freescale i.MX DMA engine 4 * This file contains a driver for the Freescale i.MX DMA engine
5 * found on i.MX1/21/27 5 * found on i.MX1/21/27
6 * 6 *
7 * Copyright 2010 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de> 7 * Copyright 2010 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
8 * Copyright 2012 Javier Martin, Vista Silicon <javier.martin@vista-silicon.com> 8 * Copyright 2012 Javier Martin, Vista Silicon <javier.martin@vista-silicon.com>
9 * 9 *
10 * The code contained herein is licensed under the GNU General Public 10 * The code contained herein is licensed under the GNU General Public
11 * License. You may obtain a copy of the GNU General Public License 11 * License. You may obtain a copy of the GNU General Public License
12 * Version 2 or later at the following locations: 12 * Version 2 or later at the following locations:
13 * 13 *
14 * http://www.opensource.org/licenses/gpl-license.html 14 * http://www.opensource.org/licenses/gpl-license.html
15 * http://www.gnu.org/copyleft/gpl.html 15 * http://www.gnu.org/copyleft/gpl.html
16 */ 16 */
17 #include <linux/err.h> 17 #include <linux/err.h>
18 #include <linux/init.h> 18 #include <linux/init.h>
19 #include <linux/types.h> 19 #include <linux/types.h>
20 #include <linux/mm.h> 20 #include <linux/mm.h>
21 #include <linux/interrupt.h> 21 #include <linux/interrupt.h>
22 #include <linux/spinlock.h> 22 #include <linux/spinlock.h>
23 #include <linux/device.h> 23 #include <linux/device.h>
24 #include <linux/dma-mapping.h> 24 #include <linux/dma-mapping.h>
25 #include <linux/slab.h> 25 #include <linux/slab.h>
26 #include <linux/platform_device.h> 26 #include <linux/platform_device.h>
27 #include <linux/clk.h> 27 #include <linux/clk.h>
28 #include <linux/dmaengine.h> 28 #include <linux/dmaengine.h>
29 #include <linux/module.h> 29 #include <linux/module.h>
30 #include <linux/of_device.h> 30 #include <linux/of_device.h>
31 #include <linux/of_dma.h> 31 #include <linux/of_dma.h>
32 32
33 #include <asm/irq.h> 33 #include <asm/irq.h>
34 #include <linux/platform_data/dma-imx.h> 34 #include <linux/platform_data/dma-imx.h>
35 35
36 #include "dmaengine.h" 36 #include "dmaengine.h"
37 #define IMXDMA_MAX_CHAN_DESCRIPTORS 16 37 #define IMXDMA_MAX_CHAN_DESCRIPTORS 16
38 #define IMX_DMA_CHANNELS 16 38 #define IMX_DMA_CHANNELS 16
39 39
40 #define IMX_DMA_2D_SLOTS 2 40 #define IMX_DMA_2D_SLOTS 2
41 #define IMX_DMA_2D_SLOT_A 0 41 #define IMX_DMA_2D_SLOT_A 0
42 #define IMX_DMA_2D_SLOT_B 1 42 #define IMX_DMA_2D_SLOT_B 1
43 43
44 #define IMX_DMA_LENGTH_LOOP ((unsigned int)-1) 44 #define IMX_DMA_LENGTH_LOOP ((unsigned int)-1)
45 #define IMX_DMA_MEMSIZE_32 (0 << 4) 45 #define IMX_DMA_MEMSIZE_32 (0 << 4)
46 #define IMX_DMA_MEMSIZE_8 (1 << 4) 46 #define IMX_DMA_MEMSIZE_8 (1 << 4)
47 #define IMX_DMA_MEMSIZE_16 (2 << 4) 47 #define IMX_DMA_MEMSIZE_16 (2 << 4)
48 #define IMX_DMA_TYPE_LINEAR (0 << 10) 48 #define IMX_DMA_TYPE_LINEAR (0 << 10)
49 #define IMX_DMA_TYPE_2D (1 << 10) 49 #define IMX_DMA_TYPE_2D (1 << 10)
50 #define IMX_DMA_TYPE_FIFO (2 << 10) 50 #define IMX_DMA_TYPE_FIFO (2 << 10)
51 51
52 #define IMX_DMA_ERR_BURST (1 << 0) 52 #define IMX_DMA_ERR_BURST (1 << 0)
53 #define IMX_DMA_ERR_REQUEST (1 << 1) 53 #define IMX_DMA_ERR_REQUEST (1 << 1)
54 #define IMX_DMA_ERR_TRANSFER (1 << 2) 54 #define IMX_DMA_ERR_TRANSFER (1 << 2)
55 #define IMX_DMA_ERR_BUFFER (1 << 3) 55 #define IMX_DMA_ERR_BUFFER (1 << 3)
56 #define IMX_DMA_ERR_TIMEOUT (1 << 4) 56 #define IMX_DMA_ERR_TIMEOUT (1 << 4)
57 57
58 #define DMA_DCR 0x00 /* Control Register */ 58 #define DMA_DCR 0x00 /* Control Register */
59 #define DMA_DISR 0x04 /* Interrupt status Register */ 59 #define DMA_DISR 0x04 /* Interrupt status Register */
60 #define DMA_DIMR 0x08 /* Interrupt mask Register */ 60 #define DMA_DIMR 0x08 /* Interrupt mask Register */
61 #define DMA_DBTOSR 0x0c /* Burst timeout status Register */ 61 #define DMA_DBTOSR 0x0c /* Burst timeout status Register */
62 #define DMA_DRTOSR 0x10 /* Request timeout Register */ 62 #define DMA_DRTOSR 0x10 /* Request timeout Register */
63 #define DMA_DSESR 0x14 /* Transfer Error Status Register */ 63 #define DMA_DSESR 0x14 /* Transfer Error Status Register */
64 #define DMA_DBOSR 0x18 /* Buffer overflow status Register */ 64 #define DMA_DBOSR 0x18 /* Buffer overflow status Register */
65 #define DMA_DBTOCR 0x1c /* Burst timeout control Register */ 65 #define DMA_DBTOCR 0x1c /* Burst timeout control Register */
66 #define DMA_WSRA 0x40 /* W-Size Register A */ 66 #define DMA_WSRA 0x40 /* W-Size Register A */
67 #define DMA_XSRA 0x44 /* X-Size Register A */ 67 #define DMA_XSRA 0x44 /* X-Size Register A */
68 #define DMA_YSRA 0x48 /* Y-Size Register A */ 68 #define DMA_YSRA 0x48 /* Y-Size Register A */
69 #define DMA_WSRB 0x4c /* W-Size Register B */ 69 #define DMA_WSRB 0x4c /* W-Size Register B */
70 #define DMA_XSRB 0x50 /* X-Size Register B */ 70 #define DMA_XSRB 0x50 /* X-Size Register B */
71 #define DMA_YSRB 0x54 /* Y-Size Register B */ 71 #define DMA_YSRB 0x54 /* Y-Size Register B */
72 #define DMA_SAR(x) (0x80 + ((x) << 6)) /* Source Address Registers */ 72 #define DMA_SAR(x) (0x80 + ((x) << 6)) /* Source Address Registers */
73 #define DMA_DAR(x) (0x84 + ((x) << 6)) /* Destination Address Registers */ 73 #define DMA_DAR(x) (0x84 + ((x) << 6)) /* Destination Address Registers */
74 #define DMA_CNTR(x) (0x88 + ((x) << 6)) /* Count Registers */ 74 #define DMA_CNTR(x) (0x88 + ((x) << 6)) /* Count Registers */
75 #define DMA_CCR(x) (0x8c + ((x) << 6)) /* Control Registers */ 75 #define DMA_CCR(x) (0x8c + ((x) << 6)) /* Control Registers */
76 #define DMA_RSSR(x) (0x90 + ((x) << 6)) /* Request source select Registers */ 76 #define DMA_RSSR(x) (0x90 + ((x) << 6)) /* Request source select Registers */
77 #define DMA_BLR(x) (0x94 + ((x) << 6)) /* Burst length Registers */ 77 #define DMA_BLR(x) (0x94 + ((x) << 6)) /* Burst length Registers */
78 #define DMA_RTOR(x) (0x98 + ((x) << 6)) /* Request timeout Registers */ 78 #define DMA_RTOR(x) (0x98 + ((x) << 6)) /* Request timeout Registers */
79 #define DMA_BUCR(x) (0x98 + ((x) << 6)) /* Bus Utilization Registers */ 79 #define DMA_BUCR(x) (0x98 + ((x) << 6)) /* Bus Utilization Registers */
80 #define DMA_CCNR(x) (0x9C + ((x) << 6)) /* Channel counter Registers */ 80 #define DMA_CCNR(x) (0x9C + ((x) << 6)) /* Channel counter Registers */
81 81
82 #define DCR_DRST (1<<1) 82 #define DCR_DRST (1<<1)
83 #define DCR_DEN (1<<0) 83 #define DCR_DEN (1<<0)
84 #define DBTOCR_EN (1<<15) 84 #define DBTOCR_EN (1<<15)
85 #define DBTOCR_CNT(x) ((x) & 0x7fff) 85 #define DBTOCR_CNT(x) ((x) & 0x7fff)
86 #define CNTR_CNT(x) ((x) & 0xffffff) 86 #define CNTR_CNT(x) ((x) & 0xffffff)
87 #define CCR_ACRPT (1<<14) 87 #define CCR_ACRPT (1<<14)
88 #define CCR_DMOD_LINEAR (0x0 << 12) 88 #define CCR_DMOD_LINEAR (0x0 << 12)
89 #define CCR_DMOD_2D (0x1 << 12) 89 #define CCR_DMOD_2D (0x1 << 12)
90 #define CCR_DMOD_FIFO (0x2 << 12) 90 #define CCR_DMOD_FIFO (0x2 << 12)
91 #define CCR_DMOD_EOBFIFO (0x3 << 12) 91 #define CCR_DMOD_EOBFIFO (0x3 << 12)
92 #define CCR_SMOD_LINEAR (0x0 << 10) 92 #define CCR_SMOD_LINEAR (0x0 << 10)
93 #define CCR_SMOD_2D (0x1 << 10) 93 #define CCR_SMOD_2D (0x1 << 10)
94 #define CCR_SMOD_FIFO (0x2 << 10) 94 #define CCR_SMOD_FIFO (0x2 << 10)
95 #define CCR_SMOD_EOBFIFO (0x3 << 10) 95 #define CCR_SMOD_EOBFIFO (0x3 << 10)
96 #define CCR_MDIR_DEC (1<<9) 96 #define CCR_MDIR_DEC (1<<9)
97 #define CCR_MSEL_B (1<<8) 97 #define CCR_MSEL_B (1<<8)
98 #define CCR_DSIZ_32 (0x0 << 6) 98 #define CCR_DSIZ_32 (0x0 << 6)
99 #define CCR_DSIZ_8 (0x1 << 6) 99 #define CCR_DSIZ_8 (0x1 << 6)
100 #define CCR_DSIZ_16 (0x2 << 6) 100 #define CCR_DSIZ_16 (0x2 << 6)
101 #define CCR_SSIZ_32 (0x0 << 4) 101 #define CCR_SSIZ_32 (0x0 << 4)
102 #define CCR_SSIZ_8 (0x1 << 4) 102 #define CCR_SSIZ_8 (0x1 << 4)
103 #define CCR_SSIZ_16 (0x2 << 4) 103 #define CCR_SSIZ_16 (0x2 << 4)
104 #define CCR_REN (1<<3) 104 #define CCR_REN (1<<3)
105 #define CCR_RPT (1<<2) 105 #define CCR_RPT (1<<2)
106 #define CCR_FRC (1<<1) 106 #define CCR_FRC (1<<1)
107 #define CCR_CEN (1<<0) 107 #define CCR_CEN (1<<0)
108 #define RTOR_EN (1<<15) 108 #define RTOR_EN (1<<15)
109 #define RTOR_CLK (1<<14) 109 #define RTOR_CLK (1<<14)
110 #define RTOR_PSC (1<<13) 110 #define RTOR_PSC (1<<13)
111 111
112 enum imxdma_prep_type { 112 enum imxdma_prep_type {
113 IMXDMA_DESC_MEMCPY, 113 IMXDMA_DESC_MEMCPY,
114 IMXDMA_DESC_INTERLEAVED, 114 IMXDMA_DESC_INTERLEAVED,
115 IMXDMA_DESC_SLAVE_SG, 115 IMXDMA_DESC_SLAVE_SG,
116 IMXDMA_DESC_CYCLIC, 116 IMXDMA_DESC_CYCLIC,
117 }; 117 };
118 118
119 struct imx_dma_2d_config { 119 struct imx_dma_2d_config {
120 u16 xsr; 120 u16 xsr;
121 u16 ysr; 121 u16 ysr;
122 u16 wsr; 122 u16 wsr;
123 int count; 123 int count;
124 }; 124 };
125 125
126 struct imxdma_desc { 126 struct imxdma_desc {
127 struct list_head node; 127 struct list_head node;
128 struct dma_async_tx_descriptor desc; 128 struct dma_async_tx_descriptor desc;
129 enum dma_status status; 129 enum dma_status status;
130 dma_addr_t src; 130 dma_addr_t src;
131 dma_addr_t dest; 131 dma_addr_t dest;
132 size_t len; 132 size_t len;
133 enum dma_transfer_direction direction; 133 enum dma_transfer_direction direction;
134 enum imxdma_prep_type type; 134 enum imxdma_prep_type type;
135 /* For memcpy and interleaved */ 135 /* For memcpy and interleaved */
136 unsigned int config_port; 136 unsigned int config_port;
137 unsigned int config_mem; 137 unsigned int config_mem;
138 /* For interleaved transfers */ 138 /* For interleaved transfers */
139 unsigned int x; 139 unsigned int x;
140 unsigned int y; 140 unsigned int y;
141 unsigned int w; 141 unsigned int w;
142 /* For slave sg and cyclic */ 142 /* For slave sg and cyclic */
143 struct scatterlist *sg; 143 struct scatterlist *sg;
144 unsigned int sgcount; 144 unsigned int sgcount;
145 }; 145 };
146 146
147 struct imxdma_channel { 147 struct imxdma_channel {
148 int hw_chaining; 148 int hw_chaining;
149 struct timer_list watchdog; 149 struct timer_list watchdog;
150 struct imxdma_engine *imxdma; 150 struct imxdma_engine *imxdma;
151 unsigned int channel; 151 unsigned int channel;
152 152
153 struct tasklet_struct dma_tasklet; 153 struct tasklet_struct dma_tasklet;
154 struct list_head ld_free; 154 struct list_head ld_free;
155 struct list_head ld_queue; 155 struct list_head ld_queue;
156 struct list_head ld_active; 156 struct list_head ld_active;
157 int descs_allocated; 157 int descs_allocated;
158 enum dma_slave_buswidth word_size; 158 enum dma_slave_buswidth word_size;
159 dma_addr_t per_address; 159 dma_addr_t per_address;
160 u32 watermark_level; 160 u32 watermark_level;
161 struct dma_chan chan; 161 struct dma_chan chan;
162 struct dma_async_tx_descriptor desc; 162 struct dma_async_tx_descriptor desc;
163 enum dma_status status; 163 enum dma_status status;
164 int dma_request; 164 int dma_request;
165 struct scatterlist *sg_list; 165 struct scatterlist *sg_list;
166 u32 ccr_from_device; 166 u32 ccr_from_device;
167 u32 ccr_to_device; 167 u32 ccr_to_device;
168 bool enabled_2d; 168 bool enabled_2d;
169 int slot_2d; 169 int slot_2d;
170 }; 170 };
171 171
172 enum imx_dma_type { 172 enum imx_dma_type {
173 IMX1_DMA, 173 IMX1_DMA,
174 IMX21_DMA, 174 IMX21_DMA,
175 IMX27_DMA, 175 IMX27_DMA,
176 }; 176 };
177 177
178 struct imxdma_engine { 178 struct imxdma_engine {
179 struct device *dev; 179 struct device *dev;
180 struct device_dma_parameters dma_parms; 180 struct device_dma_parameters dma_parms;
181 struct dma_device dma_device; 181 struct dma_device dma_device;
182 void __iomem *base; 182 void __iomem *base;
183 struct clk *dma_ahb; 183 struct clk *dma_ahb;
184 struct clk *dma_ipg; 184 struct clk *dma_ipg;
185 spinlock_t lock; 185 spinlock_t lock;
186 struct imx_dma_2d_config slots_2d[IMX_DMA_2D_SLOTS]; 186 struct imx_dma_2d_config slots_2d[IMX_DMA_2D_SLOTS];
187 struct imxdma_channel channel[IMX_DMA_CHANNELS]; 187 struct imxdma_channel channel[IMX_DMA_CHANNELS];
188 enum imx_dma_type devtype; 188 enum imx_dma_type devtype;
189 }; 189 };
190 190
191 struct imxdma_filter_data { 191 struct imxdma_filter_data {
192 struct imxdma_engine *imxdma; 192 struct imxdma_engine *imxdma;
193 int request; 193 int request;
194 }; 194 };
195 195
196 static struct platform_device_id imx_dma_devtype[] = { 196 static struct platform_device_id imx_dma_devtype[] = {
197 { 197 {
198 .name = "imx1-dma", 198 .name = "imx1-dma",
199 .driver_data = IMX1_DMA, 199 .driver_data = IMX1_DMA,
200 }, { 200 }, {
201 .name = "imx21-dma", 201 .name = "imx21-dma",
202 .driver_data = IMX21_DMA, 202 .driver_data = IMX21_DMA,
203 }, { 203 }, {
204 .name = "imx27-dma", 204 .name = "imx27-dma",
205 .driver_data = IMX27_DMA, 205 .driver_data = IMX27_DMA,
206 }, { 206 }, {
207 /* sentinel */ 207 /* sentinel */
208 } 208 }
209 }; 209 };
210 MODULE_DEVICE_TABLE(platform, imx_dma_devtype); 210 MODULE_DEVICE_TABLE(platform, imx_dma_devtype);
211 211
212 static const struct of_device_id imx_dma_of_dev_id[] = { 212 static const struct of_device_id imx_dma_of_dev_id[] = {
213 { 213 {
214 .compatible = "fsl,imx1-dma", 214 .compatible = "fsl,imx1-dma",
215 .data = &imx_dma_devtype[IMX1_DMA], 215 .data = &imx_dma_devtype[IMX1_DMA],
216 }, { 216 }, {
217 .compatible = "fsl,imx21-dma", 217 .compatible = "fsl,imx21-dma",
218 .data = &imx_dma_devtype[IMX21_DMA], 218 .data = &imx_dma_devtype[IMX21_DMA],
219 }, { 219 }, {
220 .compatible = "fsl,imx27-dma", 220 .compatible = "fsl,imx27-dma",
221 .data = &imx_dma_devtype[IMX27_DMA], 221 .data = &imx_dma_devtype[IMX27_DMA],
222 }, { 222 }, {
223 /* sentinel */ 223 /* sentinel */
224 } 224 }
225 }; 225 };
226 MODULE_DEVICE_TABLE(of, imx_dma_of_dev_id); 226 MODULE_DEVICE_TABLE(of, imx_dma_of_dev_id);
227 227
228 static inline int is_imx1_dma(struct imxdma_engine *imxdma) 228 static inline int is_imx1_dma(struct imxdma_engine *imxdma)
229 { 229 {
230 return imxdma->devtype == IMX1_DMA; 230 return imxdma->devtype == IMX1_DMA;
231 } 231 }
232 232
233 static inline int is_imx21_dma(struct imxdma_engine *imxdma) 233 static inline int is_imx21_dma(struct imxdma_engine *imxdma)
234 { 234 {
235 return imxdma->devtype == IMX21_DMA; 235 return imxdma->devtype == IMX21_DMA;
236 } 236 }
237 237
238 static inline int is_imx27_dma(struct imxdma_engine *imxdma) 238 static inline int is_imx27_dma(struct imxdma_engine *imxdma)
239 { 239 {
240 return imxdma->devtype == IMX27_DMA; 240 return imxdma->devtype == IMX27_DMA;
241 } 241 }
242 242
243 static struct imxdma_channel *to_imxdma_chan(struct dma_chan *chan) 243 static struct imxdma_channel *to_imxdma_chan(struct dma_chan *chan)
244 { 244 {
245 return container_of(chan, struct imxdma_channel, chan); 245 return container_of(chan, struct imxdma_channel, chan);
246 } 246 }
247 247
248 static inline bool imxdma_chan_is_doing_cyclic(struct imxdma_channel *imxdmac) 248 static inline bool imxdma_chan_is_doing_cyclic(struct imxdma_channel *imxdmac)
249 { 249 {
250 struct imxdma_desc *desc; 250 struct imxdma_desc *desc;
251 251
252 if (!list_empty(&imxdmac->ld_active)) { 252 if (!list_empty(&imxdmac->ld_active)) {
253 desc = list_first_entry(&imxdmac->ld_active, struct imxdma_desc, 253 desc = list_first_entry(&imxdmac->ld_active, struct imxdma_desc,
254 node); 254 node);
255 if (desc->type == IMXDMA_DESC_CYCLIC) 255 if (desc->type == IMXDMA_DESC_CYCLIC)
256 return true; 256 return true;
257 } 257 }
258 return false; 258 return false;
259 } 259 }
260 260
261 261
262 262
263 static void imx_dmav1_writel(struct imxdma_engine *imxdma, unsigned val, 263 static void imx_dmav1_writel(struct imxdma_engine *imxdma, unsigned val,
264 unsigned offset) 264 unsigned offset)
265 { 265 {
266 __raw_writel(val, imxdma->base + offset); 266 __raw_writel(val, imxdma->base + offset);
267 } 267 }
268 268
269 static unsigned imx_dmav1_readl(struct imxdma_engine *imxdma, unsigned offset) 269 static unsigned imx_dmav1_readl(struct imxdma_engine *imxdma, unsigned offset)
270 { 270 {
271 return __raw_readl(imxdma->base + offset); 271 return __raw_readl(imxdma->base + offset);
272 } 272 }
273 273
274 static int imxdma_hw_chain(struct imxdma_channel *imxdmac) 274 static int imxdma_hw_chain(struct imxdma_channel *imxdmac)
275 { 275 {
276 struct imxdma_engine *imxdma = imxdmac->imxdma; 276 struct imxdma_engine *imxdma = imxdmac->imxdma;
277 277
278 if (is_imx27_dma(imxdma)) 278 if (is_imx27_dma(imxdma))
279 return imxdmac->hw_chaining; 279 return imxdmac->hw_chaining;
280 else 280 else
281 return 0; 281 return 0;
282 } 282 }
283 283
284 /* 284 /*
285 * imxdma_sg_next - prepare next chunk for scatter-gather DMA emulation 285 * imxdma_sg_next - prepare next chunk for scatter-gather DMA emulation
286 */ 286 */
287 static inline int imxdma_sg_next(struct imxdma_desc *d) 287 static inline int imxdma_sg_next(struct imxdma_desc *d)
288 { 288 {
289 struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan); 289 struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan);
290 struct imxdma_engine *imxdma = imxdmac->imxdma; 290 struct imxdma_engine *imxdma = imxdmac->imxdma;
291 struct scatterlist *sg = d->sg; 291 struct scatterlist *sg = d->sg;
292 unsigned long now; 292 unsigned long now;
293 293
294 now = min(d->len, sg_dma_len(sg)); 294 now = min(d->len, sg_dma_len(sg));
295 if (d->len != IMX_DMA_LENGTH_LOOP) 295 if (d->len != IMX_DMA_LENGTH_LOOP)
296 d->len -= now; 296 d->len -= now;
297 297
298 if (d->direction == DMA_DEV_TO_MEM) 298 if (d->direction == DMA_DEV_TO_MEM)
299 imx_dmav1_writel(imxdma, sg->dma_address, 299 imx_dmav1_writel(imxdma, sg->dma_address,
300 DMA_DAR(imxdmac->channel)); 300 DMA_DAR(imxdmac->channel));
301 else 301 else
302 imx_dmav1_writel(imxdma, sg->dma_address, 302 imx_dmav1_writel(imxdma, sg->dma_address,
303 DMA_SAR(imxdmac->channel)); 303 DMA_SAR(imxdmac->channel));
304 304
305 imx_dmav1_writel(imxdma, now, DMA_CNTR(imxdmac->channel)); 305 imx_dmav1_writel(imxdma, now, DMA_CNTR(imxdmac->channel));
306 306
307 dev_dbg(imxdma->dev, " %s channel: %d dst 0x%08x, src 0x%08x, " 307 dev_dbg(imxdma->dev, " %s channel: %d dst 0x%08x, src 0x%08x, "
308 "size 0x%08x\n", __func__, imxdmac->channel, 308 "size 0x%08x\n", __func__, imxdmac->channel,
309 imx_dmav1_readl(imxdma, DMA_DAR(imxdmac->channel)), 309 imx_dmav1_readl(imxdma, DMA_DAR(imxdmac->channel)),
310 imx_dmav1_readl(imxdma, DMA_SAR(imxdmac->channel)), 310 imx_dmav1_readl(imxdma, DMA_SAR(imxdmac->channel)),
311 imx_dmav1_readl(imxdma, DMA_CNTR(imxdmac->channel))); 311 imx_dmav1_readl(imxdma, DMA_CNTR(imxdmac->channel)));
312 312
313 return now; 313 return now;
314 } 314 }
315 315
316 static void imxdma_enable_hw(struct imxdma_desc *d) 316 static void imxdma_enable_hw(struct imxdma_desc *d)
317 { 317 {
318 struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan); 318 struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan);
319 struct imxdma_engine *imxdma = imxdmac->imxdma; 319 struct imxdma_engine *imxdma = imxdmac->imxdma;
320 int channel = imxdmac->channel; 320 int channel = imxdmac->channel;
321 unsigned long flags; 321 unsigned long flags;
322 322
323 dev_dbg(imxdma->dev, "%s channel %d\n", __func__, channel); 323 dev_dbg(imxdma->dev, "%s channel %d\n", __func__, channel);
324 324
325 local_irq_save(flags); 325 local_irq_save(flags);
326 326
327 imx_dmav1_writel(imxdma, 1 << channel, DMA_DISR); 327 imx_dmav1_writel(imxdma, 1 << channel, DMA_DISR);
328 imx_dmav1_writel(imxdma, imx_dmav1_readl(imxdma, DMA_DIMR) & 328 imx_dmav1_writel(imxdma, imx_dmav1_readl(imxdma, DMA_DIMR) &
329 ~(1 << channel), DMA_DIMR); 329 ~(1 << channel), DMA_DIMR);
330 imx_dmav1_writel(imxdma, imx_dmav1_readl(imxdma, DMA_CCR(channel)) | 330 imx_dmav1_writel(imxdma, imx_dmav1_readl(imxdma, DMA_CCR(channel)) |
331 CCR_CEN | CCR_ACRPT, DMA_CCR(channel)); 331 CCR_CEN | CCR_ACRPT, DMA_CCR(channel));
332 332
333 if (!is_imx1_dma(imxdma) && 333 if (!is_imx1_dma(imxdma) &&
334 d->sg && imxdma_hw_chain(imxdmac)) { 334 d->sg && imxdma_hw_chain(imxdmac)) {
335 d->sg = sg_next(d->sg); 335 d->sg = sg_next(d->sg);
336 if (d->sg) { 336 if (d->sg) {
337 u32 tmp; 337 u32 tmp;
338 imxdma_sg_next(d); 338 imxdma_sg_next(d);
339 tmp = imx_dmav1_readl(imxdma, DMA_CCR(channel)); 339 tmp = imx_dmav1_readl(imxdma, DMA_CCR(channel));
340 imx_dmav1_writel(imxdma, tmp | CCR_RPT | CCR_ACRPT, 340 imx_dmav1_writel(imxdma, tmp | CCR_RPT | CCR_ACRPT,
341 DMA_CCR(channel)); 341 DMA_CCR(channel));
342 } 342 }
343 } 343 }
344 344
345 local_irq_restore(flags); 345 local_irq_restore(flags);
346 } 346 }
347 347
348 static void imxdma_disable_hw(struct imxdma_channel *imxdmac) 348 static void imxdma_disable_hw(struct imxdma_channel *imxdmac)
349 { 349 {
350 struct imxdma_engine *imxdma = imxdmac->imxdma; 350 struct imxdma_engine *imxdma = imxdmac->imxdma;
351 int channel = imxdmac->channel; 351 int channel = imxdmac->channel;
352 unsigned long flags; 352 unsigned long flags;
353 353
354 dev_dbg(imxdma->dev, "%s channel %d\n", __func__, channel); 354 dev_dbg(imxdma->dev, "%s channel %d\n", __func__, channel);
355 355
356 if (imxdma_hw_chain(imxdmac)) 356 if (imxdma_hw_chain(imxdmac))
357 del_timer(&imxdmac->watchdog); 357 del_timer(&imxdmac->watchdog);
358 358
359 local_irq_save(flags); 359 local_irq_save(flags);
360 imx_dmav1_writel(imxdma, imx_dmav1_readl(imxdma, DMA_DIMR) | 360 imx_dmav1_writel(imxdma, imx_dmav1_readl(imxdma, DMA_DIMR) |
361 (1 << channel), DMA_DIMR); 361 (1 << channel), DMA_DIMR);
362 imx_dmav1_writel(imxdma, imx_dmav1_readl(imxdma, DMA_CCR(channel)) & 362 imx_dmav1_writel(imxdma, imx_dmav1_readl(imxdma, DMA_CCR(channel)) &
363 ~CCR_CEN, DMA_CCR(channel)); 363 ~CCR_CEN, DMA_CCR(channel));
364 imx_dmav1_writel(imxdma, 1 << channel, DMA_DISR); 364 imx_dmav1_writel(imxdma, 1 << channel, DMA_DISR);
365 local_irq_restore(flags); 365 local_irq_restore(flags);
366 } 366 }
367 367
368 static void imxdma_watchdog(unsigned long data) 368 static void imxdma_watchdog(unsigned long data)
369 { 369 {
370 struct imxdma_channel *imxdmac = (struct imxdma_channel *)data; 370 struct imxdma_channel *imxdmac = (struct imxdma_channel *)data;
371 struct imxdma_engine *imxdma = imxdmac->imxdma; 371 struct imxdma_engine *imxdma = imxdmac->imxdma;
372 int channel = imxdmac->channel; 372 int channel = imxdmac->channel;
373 373
374 imx_dmav1_writel(imxdma, 0, DMA_CCR(channel)); 374 imx_dmav1_writel(imxdma, 0, DMA_CCR(channel));
375 375
376 /* Tasklet watchdog error handler */ 376 /* Tasklet watchdog error handler */
377 tasklet_schedule(&imxdmac->dma_tasklet); 377 tasklet_schedule(&imxdmac->dma_tasklet);
378 dev_dbg(imxdma->dev, "channel %d: watchdog timeout!\n", 378 dev_dbg(imxdma->dev, "channel %d: watchdog timeout!\n",
379 imxdmac->channel); 379 imxdmac->channel);
380 } 380 }
381 381
382 static irqreturn_t imxdma_err_handler(int irq, void *dev_id) 382 static irqreturn_t imxdma_err_handler(int irq, void *dev_id)
383 { 383 {
384 struct imxdma_engine *imxdma = dev_id; 384 struct imxdma_engine *imxdma = dev_id;
385 unsigned int err_mask; 385 unsigned int err_mask;
386 int i, disr; 386 int i, disr;
387 int errcode; 387 int errcode;
388 388
389 disr = imx_dmav1_readl(imxdma, DMA_DISR); 389 disr = imx_dmav1_readl(imxdma, DMA_DISR);
390 390
391 err_mask = imx_dmav1_readl(imxdma, DMA_DBTOSR) | 391 err_mask = imx_dmav1_readl(imxdma, DMA_DBTOSR) |
392 imx_dmav1_readl(imxdma, DMA_DRTOSR) | 392 imx_dmav1_readl(imxdma, DMA_DRTOSR) |
393 imx_dmav1_readl(imxdma, DMA_DSESR) | 393 imx_dmav1_readl(imxdma, DMA_DSESR) |
394 imx_dmav1_readl(imxdma, DMA_DBOSR); 394 imx_dmav1_readl(imxdma, DMA_DBOSR);
395 395
396 if (!err_mask) 396 if (!err_mask)
397 return IRQ_HANDLED; 397 return IRQ_HANDLED;
398 398
399 imx_dmav1_writel(imxdma, disr & err_mask, DMA_DISR); 399 imx_dmav1_writel(imxdma, disr & err_mask, DMA_DISR);
400 400
401 for (i = 0; i < IMX_DMA_CHANNELS; i++) { 401 for (i = 0; i < IMX_DMA_CHANNELS; i++) {
402 if (!(err_mask & (1 << i))) 402 if (!(err_mask & (1 << i)))
403 continue; 403 continue;
404 errcode = 0; 404 errcode = 0;
405 405
406 if (imx_dmav1_readl(imxdma, DMA_DBTOSR) & (1 << i)) { 406 if (imx_dmav1_readl(imxdma, DMA_DBTOSR) & (1 << i)) {
407 imx_dmav1_writel(imxdma, 1 << i, DMA_DBTOSR); 407 imx_dmav1_writel(imxdma, 1 << i, DMA_DBTOSR);
408 errcode |= IMX_DMA_ERR_BURST; 408 errcode |= IMX_DMA_ERR_BURST;
409 } 409 }
410 if (imx_dmav1_readl(imxdma, DMA_DRTOSR) & (1 << i)) { 410 if (imx_dmav1_readl(imxdma, DMA_DRTOSR) & (1 << i)) {
411 imx_dmav1_writel(imxdma, 1 << i, DMA_DRTOSR); 411 imx_dmav1_writel(imxdma, 1 << i, DMA_DRTOSR);
412 errcode |= IMX_DMA_ERR_REQUEST; 412 errcode |= IMX_DMA_ERR_REQUEST;
413 } 413 }
414 if (imx_dmav1_readl(imxdma, DMA_DSESR) & (1 << i)) { 414 if (imx_dmav1_readl(imxdma, DMA_DSESR) & (1 << i)) {
415 imx_dmav1_writel(imxdma, 1 << i, DMA_DSESR); 415 imx_dmav1_writel(imxdma, 1 << i, DMA_DSESR);
416 errcode |= IMX_DMA_ERR_TRANSFER; 416 errcode |= IMX_DMA_ERR_TRANSFER;
417 } 417 }
418 if (imx_dmav1_readl(imxdma, DMA_DBOSR) & (1 << i)) { 418 if (imx_dmav1_readl(imxdma, DMA_DBOSR) & (1 << i)) {
419 imx_dmav1_writel(imxdma, 1 << i, DMA_DBOSR); 419 imx_dmav1_writel(imxdma, 1 << i, DMA_DBOSR);
420 errcode |= IMX_DMA_ERR_BUFFER; 420 errcode |= IMX_DMA_ERR_BUFFER;
421 } 421 }
422 /* Tasklet error handler */ 422 /* Tasklet error handler */
423 tasklet_schedule(&imxdma->channel[i].dma_tasklet); 423 tasklet_schedule(&imxdma->channel[i].dma_tasklet);
424 424
425 printk(KERN_WARNING 425 printk(KERN_WARNING
426 "DMA timeout on channel %d -%s%s%s%s\n", i, 426 "DMA timeout on channel %d -%s%s%s%s\n", i,
427 errcode & IMX_DMA_ERR_BURST ? " burst" : "", 427 errcode & IMX_DMA_ERR_BURST ? " burst" : "",
428 errcode & IMX_DMA_ERR_REQUEST ? " request" : "", 428 errcode & IMX_DMA_ERR_REQUEST ? " request" : "",
429 errcode & IMX_DMA_ERR_TRANSFER ? " transfer" : "", 429 errcode & IMX_DMA_ERR_TRANSFER ? " transfer" : "",
430 errcode & IMX_DMA_ERR_BUFFER ? " buffer" : ""); 430 errcode & IMX_DMA_ERR_BUFFER ? " buffer" : "");
431 } 431 }
432 return IRQ_HANDLED; 432 return IRQ_HANDLED;
433 } 433 }
434 434
435 static void dma_irq_handle_channel(struct imxdma_channel *imxdmac) 435 static void dma_irq_handle_channel(struct imxdma_channel *imxdmac)
436 { 436 {
437 struct imxdma_engine *imxdma = imxdmac->imxdma; 437 struct imxdma_engine *imxdma = imxdmac->imxdma;
438 int chno = imxdmac->channel; 438 int chno = imxdmac->channel;
439 struct imxdma_desc *desc; 439 struct imxdma_desc *desc;
440 unsigned long flags; 440 unsigned long flags;
441 441
442 spin_lock_irqsave(&imxdma->lock, flags); 442 spin_lock_irqsave(&imxdma->lock, flags);
443 if (list_empty(&imxdmac->ld_active)) { 443 if (list_empty(&imxdmac->ld_active)) {
444 spin_unlock_irqrestore(&imxdma->lock, flags); 444 spin_unlock_irqrestore(&imxdma->lock, flags);
445 goto out; 445 goto out;
446 } 446 }
447 447
448 desc = list_first_entry(&imxdmac->ld_active, 448 desc = list_first_entry(&imxdmac->ld_active,
449 struct imxdma_desc, 449 struct imxdma_desc,
450 node); 450 node);
451 spin_unlock_irqrestore(&imxdma->lock, flags); 451 spin_unlock_irqrestore(&imxdma->lock, flags);
452 452
453 if (desc->sg) { 453 if (desc->sg) {
454 u32 tmp; 454 u32 tmp;
455 desc->sg = sg_next(desc->sg); 455 desc->sg = sg_next(desc->sg);
456 456
457 if (desc->sg) { 457 if (desc->sg) {
458 imxdma_sg_next(desc); 458 imxdma_sg_next(desc);
459 459
460 tmp = imx_dmav1_readl(imxdma, DMA_CCR(chno)); 460 tmp = imx_dmav1_readl(imxdma, DMA_CCR(chno));
461 461
462 if (imxdma_hw_chain(imxdmac)) { 462 if (imxdma_hw_chain(imxdmac)) {
463 /* FIXME: The timeout should probably be 463 /* FIXME: The timeout should probably be
464 * configurable 464 * configurable
465 */ 465 */
466 mod_timer(&imxdmac->watchdog, 466 mod_timer(&imxdmac->watchdog,
467 jiffies + msecs_to_jiffies(500)); 467 jiffies + msecs_to_jiffies(500));
468 468
469 tmp |= CCR_CEN | CCR_RPT | CCR_ACRPT; 469 tmp |= CCR_CEN | CCR_RPT | CCR_ACRPT;
470 imx_dmav1_writel(imxdma, tmp, DMA_CCR(chno)); 470 imx_dmav1_writel(imxdma, tmp, DMA_CCR(chno));
471 } else { 471 } else {
472 imx_dmav1_writel(imxdma, tmp & ~CCR_CEN, 472 imx_dmav1_writel(imxdma, tmp & ~CCR_CEN,
473 DMA_CCR(chno)); 473 DMA_CCR(chno));
474 tmp |= CCR_CEN; 474 tmp |= CCR_CEN;
475 } 475 }
476 476
477 imx_dmav1_writel(imxdma, tmp, DMA_CCR(chno)); 477 imx_dmav1_writel(imxdma, tmp, DMA_CCR(chno));
478 478
479 if (imxdma_chan_is_doing_cyclic(imxdmac)) 479 if (imxdma_chan_is_doing_cyclic(imxdmac))
480 /* Tasklet progression */ 480 /* Tasklet progression */
481 tasklet_schedule(&imxdmac->dma_tasklet); 481 tasklet_schedule(&imxdmac->dma_tasklet);
482 482
483 return; 483 return;
484 } 484 }
485 485
486 if (imxdma_hw_chain(imxdmac)) { 486 if (imxdma_hw_chain(imxdmac)) {
487 del_timer(&imxdmac->watchdog); 487 del_timer(&imxdmac->watchdog);
488 return; 488 return;
489 } 489 }
490 } 490 }
491 491
492 out: 492 out:
493 imx_dmav1_writel(imxdma, 0, DMA_CCR(chno)); 493 imx_dmav1_writel(imxdma, 0, DMA_CCR(chno));
494 /* Tasklet irq */ 494 /* Tasklet irq */
495 tasklet_schedule(&imxdmac->dma_tasklet); 495 tasklet_schedule(&imxdmac->dma_tasklet);
496 } 496 }
497 497
498 static irqreturn_t dma_irq_handler(int irq, void *dev_id) 498 static irqreturn_t dma_irq_handler(int irq, void *dev_id)
499 { 499 {
500 struct imxdma_engine *imxdma = dev_id; 500 struct imxdma_engine *imxdma = dev_id;
501 int i, disr; 501 int i, disr;
502 502
503 if (!is_imx1_dma(imxdma)) 503 if (!is_imx1_dma(imxdma))
504 imxdma_err_handler(irq, dev_id); 504 imxdma_err_handler(irq, dev_id);
505 505
506 disr = imx_dmav1_readl(imxdma, DMA_DISR); 506 disr = imx_dmav1_readl(imxdma, DMA_DISR);
507 507
508 dev_dbg(imxdma->dev, "%s called, disr=0x%08x\n", __func__, disr); 508 dev_dbg(imxdma->dev, "%s called, disr=0x%08x\n", __func__, disr);
509 509
510 imx_dmav1_writel(imxdma, disr, DMA_DISR); 510 imx_dmav1_writel(imxdma, disr, DMA_DISR);
511 for (i = 0; i < IMX_DMA_CHANNELS; i++) { 511 for (i = 0; i < IMX_DMA_CHANNELS; i++) {
512 if (disr & (1 << i)) 512 if (disr & (1 << i))
513 dma_irq_handle_channel(&imxdma->channel[i]); 513 dma_irq_handle_channel(&imxdma->channel[i]);
514 } 514 }
515 515
516 return IRQ_HANDLED; 516 return IRQ_HANDLED;
517 } 517 }
518 518
519 static int imxdma_xfer_desc(struct imxdma_desc *d) 519 static int imxdma_xfer_desc(struct imxdma_desc *d)
520 { 520 {
521 struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan); 521 struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan);
522 struct imxdma_engine *imxdma = imxdmac->imxdma; 522 struct imxdma_engine *imxdma = imxdmac->imxdma;
523 int slot = -1; 523 int slot = -1;
524 int i; 524 int i;
525 525
526 /* Configure and enable */ 526 /* Configure and enable */
527 switch (d->type) { 527 switch (d->type) {
528 case IMXDMA_DESC_INTERLEAVED: 528 case IMXDMA_DESC_INTERLEAVED:
529 /* Try to get a free 2D slot */ 529 /* Try to get a free 2D slot */
530 for (i = 0; i < IMX_DMA_2D_SLOTS; i++) { 530 for (i = 0; i < IMX_DMA_2D_SLOTS; i++) {
531 if ((imxdma->slots_2d[i].count > 0) && 531 if ((imxdma->slots_2d[i].count > 0) &&
532 ((imxdma->slots_2d[i].xsr != d->x) || 532 ((imxdma->slots_2d[i].xsr != d->x) ||
533 (imxdma->slots_2d[i].ysr != d->y) || 533 (imxdma->slots_2d[i].ysr != d->y) ||
534 (imxdma->slots_2d[i].wsr != d->w))) 534 (imxdma->slots_2d[i].wsr != d->w)))
535 continue; 535 continue;
536 slot = i; 536 slot = i;
537 break; 537 break;
538 } 538 }
539 if (slot < 0) 539 if (slot < 0)
540 return -EBUSY; 540 return -EBUSY;
541 541
542 imxdma->slots_2d[slot].xsr = d->x; 542 imxdma->slots_2d[slot].xsr = d->x;
543 imxdma->slots_2d[slot].ysr = d->y; 543 imxdma->slots_2d[slot].ysr = d->y;
544 imxdma->slots_2d[slot].wsr = d->w; 544 imxdma->slots_2d[slot].wsr = d->w;
545 imxdma->slots_2d[slot].count++; 545 imxdma->slots_2d[slot].count++;
546 546
547 imxdmac->slot_2d = slot; 547 imxdmac->slot_2d = slot;
548 imxdmac->enabled_2d = true; 548 imxdmac->enabled_2d = true;
549 549
550 if (slot == IMX_DMA_2D_SLOT_A) { 550 if (slot == IMX_DMA_2D_SLOT_A) {
551 d->config_mem &= ~CCR_MSEL_B; 551 d->config_mem &= ~CCR_MSEL_B;
552 d->config_port &= ~CCR_MSEL_B; 552 d->config_port &= ~CCR_MSEL_B;
553 imx_dmav1_writel(imxdma, d->x, DMA_XSRA); 553 imx_dmav1_writel(imxdma, d->x, DMA_XSRA);
554 imx_dmav1_writel(imxdma, d->y, DMA_YSRA); 554 imx_dmav1_writel(imxdma, d->y, DMA_YSRA);
555 imx_dmav1_writel(imxdma, d->w, DMA_WSRA); 555 imx_dmav1_writel(imxdma, d->w, DMA_WSRA);
556 } else { 556 } else {
557 d->config_mem |= CCR_MSEL_B; 557 d->config_mem |= CCR_MSEL_B;
558 d->config_port |= CCR_MSEL_B; 558 d->config_port |= CCR_MSEL_B;
559 imx_dmav1_writel(imxdma, d->x, DMA_XSRB); 559 imx_dmav1_writel(imxdma, d->x, DMA_XSRB);
560 imx_dmav1_writel(imxdma, d->y, DMA_YSRB); 560 imx_dmav1_writel(imxdma, d->y, DMA_YSRB);
561 imx_dmav1_writel(imxdma, d->w, DMA_WSRB); 561 imx_dmav1_writel(imxdma, d->w, DMA_WSRB);
562 } 562 }
563 /* 563 /*
564 * We fall-through here intentionally, since a 2D transfer is 564 * We fall-through here intentionally, since a 2D transfer is
565 * similar to MEMCPY just adding the 2D slot configuration. 565 * similar to MEMCPY just adding the 2D slot configuration.
566 */ 566 */
567 case IMXDMA_DESC_MEMCPY: 567 case IMXDMA_DESC_MEMCPY:
568 imx_dmav1_writel(imxdma, d->src, DMA_SAR(imxdmac->channel)); 568 imx_dmav1_writel(imxdma, d->src, DMA_SAR(imxdmac->channel));
569 imx_dmav1_writel(imxdma, d->dest, DMA_DAR(imxdmac->channel)); 569 imx_dmav1_writel(imxdma, d->dest, DMA_DAR(imxdmac->channel));
570 imx_dmav1_writel(imxdma, d->config_mem | (d->config_port << 2), 570 imx_dmav1_writel(imxdma, d->config_mem | (d->config_port << 2),
571 DMA_CCR(imxdmac->channel)); 571 DMA_CCR(imxdmac->channel));
572 572
573 imx_dmav1_writel(imxdma, d->len, DMA_CNTR(imxdmac->channel)); 573 imx_dmav1_writel(imxdma, d->len, DMA_CNTR(imxdmac->channel));
574 574
575 dev_dbg(imxdma->dev, "%s channel: %d dest=0x%08x src=0x%08x " 575 dev_dbg(imxdma->dev, "%s channel: %d dest=0x%08x src=0x%08x "
576 "dma_length=%d\n", __func__, imxdmac->channel, 576 "dma_length=%d\n", __func__, imxdmac->channel,
577 d->dest, d->src, d->len); 577 d->dest, d->src, d->len);
578 578
579 break; 579 break;
580 /* Cyclic transfer is the same as slave_sg with special sg configuration. */ 580 /* Cyclic transfer is the same as slave_sg with special sg configuration. */
581 case IMXDMA_DESC_CYCLIC: 581 case IMXDMA_DESC_CYCLIC:
582 case IMXDMA_DESC_SLAVE_SG: 582 case IMXDMA_DESC_SLAVE_SG:
583 if (d->direction == DMA_DEV_TO_MEM) { 583 if (d->direction == DMA_DEV_TO_MEM) {
584 imx_dmav1_writel(imxdma, imxdmac->per_address, 584 imx_dmav1_writel(imxdma, imxdmac->per_address,
585 DMA_SAR(imxdmac->channel)); 585 DMA_SAR(imxdmac->channel));
586 imx_dmav1_writel(imxdma, imxdmac->ccr_from_device, 586 imx_dmav1_writel(imxdma, imxdmac->ccr_from_device,
587 DMA_CCR(imxdmac->channel)); 587 DMA_CCR(imxdmac->channel));
588 588
589 dev_dbg(imxdma->dev, "%s channel: %d sg=%p sgcount=%d " 589 dev_dbg(imxdma->dev, "%s channel: %d sg=%p sgcount=%d "
590 "total length=%d dev_addr=0x%08x (dev2mem)\n", 590 "total length=%d dev_addr=0x%08x (dev2mem)\n",
591 __func__, imxdmac->channel, d->sg, d->sgcount, 591 __func__, imxdmac->channel, d->sg, d->sgcount,
592 d->len, imxdmac->per_address); 592 d->len, imxdmac->per_address);
593 } else if (d->direction == DMA_MEM_TO_DEV) { 593 } else if (d->direction == DMA_MEM_TO_DEV) {
594 imx_dmav1_writel(imxdma, imxdmac->per_address, 594 imx_dmav1_writel(imxdma, imxdmac->per_address,
595 DMA_DAR(imxdmac->channel)); 595 DMA_DAR(imxdmac->channel));
596 imx_dmav1_writel(imxdma, imxdmac->ccr_to_device, 596 imx_dmav1_writel(imxdma, imxdmac->ccr_to_device,
597 DMA_CCR(imxdmac->channel)); 597 DMA_CCR(imxdmac->channel));
598 598
599 dev_dbg(imxdma->dev, "%s channel: %d sg=%p sgcount=%d " 599 dev_dbg(imxdma->dev, "%s channel: %d sg=%p sgcount=%d "
600 "total length=%d dev_addr=0x%08x (mem2dev)\n", 600 "total length=%d dev_addr=0x%08x (mem2dev)\n",
601 __func__, imxdmac->channel, d->sg, d->sgcount, 601 __func__, imxdmac->channel, d->sg, d->sgcount,
602 d->len, imxdmac->per_address); 602 d->len, imxdmac->per_address);
603 } else { 603 } else {
604 dev_err(imxdma->dev, "%s channel: %d bad dma mode\n", 604 dev_err(imxdma->dev, "%s channel: %d bad dma mode\n",
605 __func__, imxdmac->channel); 605 __func__, imxdmac->channel);
606 return -EINVAL; 606 return -EINVAL;
607 } 607 }
608 608
609 imxdma_sg_next(d); 609 imxdma_sg_next(d);
610 610
611 break; 611 break;
612 default: 612 default:
613 return -EINVAL; 613 return -EINVAL;
614 } 614 }
615 imxdma_enable_hw(d); 615 imxdma_enable_hw(d);
616 return 0; 616 return 0;
617 } 617 }
618 618
619 static void imxdma_tasklet(unsigned long data) 619 static void imxdma_tasklet(unsigned long data)
620 { 620 {
621 struct imxdma_channel *imxdmac = (void *)data; 621 struct imxdma_channel *imxdmac = (void *)data;
622 struct imxdma_engine *imxdma = imxdmac->imxdma; 622 struct imxdma_engine *imxdma = imxdmac->imxdma;
623 struct imxdma_desc *desc; 623 struct imxdma_desc *desc;
624 unsigned long flags; 624 unsigned long flags;
625 625
626 spin_lock_irqsave(&imxdma->lock, flags); 626 spin_lock_irqsave(&imxdma->lock, flags);
627 627
628 if (list_empty(&imxdmac->ld_active)) { 628 if (list_empty(&imxdmac->ld_active)) {
629 /* Someone might have called terminate all */ 629 /* Someone might have called terminate all */
630 spin_unlock_irqrestore(&imxdma->lock, flags); 630 spin_unlock_irqrestore(&imxdma->lock, flags);
631 return; 631 return;
632 } 632 }
633 desc = list_first_entry(&imxdmac->ld_active, struct imxdma_desc, node); 633 desc = list_first_entry(&imxdmac->ld_active, struct imxdma_desc, node);
634 634
635 /* If we are dealing with a cyclic descriptor, keep it on ld_active 635 /* If we are dealing with a cyclic descriptor, keep it on ld_active
636 * and dont mark the descriptor as complete. 636 * and dont mark the descriptor as complete.
637 * Only in non-cyclic cases it would be marked as complete 637 * Only in non-cyclic cases it would be marked as complete
638 */ 638 */
639 if (imxdma_chan_is_doing_cyclic(imxdmac)) 639 if (imxdma_chan_is_doing_cyclic(imxdmac))
640 goto out; 640 goto out;
641 else 641 else
642 dma_cookie_complete(&desc->desc); 642 dma_cookie_complete(&desc->desc);
643 643
644 /* Free 2D slot if it was an interleaved transfer */ 644 /* Free 2D slot if it was an interleaved transfer */
645 if (imxdmac->enabled_2d) { 645 if (imxdmac->enabled_2d) {
646 imxdma->slots_2d[imxdmac->slot_2d].count--; 646 imxdma->slots_2d[imxdmac->slot_2d].count--;
647 imxdmac->enabled_2d = false; 647 imxdmac->enabled_2d = false;
648 } 648 }
649 649
650 list_move_tail(imxdmac->ld_active.next, &imxdmac->ld_free); 650 list_move_tail(imxdmac->ld_active.next, &imxdmac->ld_free);
651 651
652 if (!list_empty(&imxdmac->ld_queue)) { 652 if (!list_empty(&imxdmac->ld_queue)) {
653 desc = list_first_entry(&imxdmac->ld_queue, struct imxdma_desc, 653 desc = list_first_entry(&imxdmac->ld_queue, struct imxdma_desc,
654 node); 654 node);
655 list_move_tail(imxdmac->ld_queue.next, &imxdmac->ld_active); 655 list_move_tail(imxdmac->ld_queue.next, &imxdmac->ld_active);
656 if (imxdma_xfer_desc(desc) < 0) 656 if (imxdma_xfer_desc(desc) < 0)
657 dev_warn(imxdma->dev, "%s: channel: %d couldn't xfer desc\n", 657 dev_warn(imxdma->dev, "%s: channel: %d couldn't xfer desc\n",
658 __func__, imxdmac->channel); 658 __func__, imxdmac->channel);
659 } 659 }
660 out: 660 out:
661 spin_unlock_irqrestore(&imxdma->lock, flags); 661 spin_unlock_irqrestore(&imxdma->lock, flags);
662 662
663 if (desc->desc.callback) 663 if (desc->desc.callback)
664 desc->desc.callback(desc->desc.callback_param); 664 desc->desc.callback(desc->desc.callback_param);
665 665
666 } 666 }
667 667
668 static int imxdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 668 static int imxdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
669 unsigned long arg) 669 unsigned long arg)
670 { 670 {
671 struct imxdma_channel *imxdmac = to_imxdma_chan(chan); 671 struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
672 struct dma_slave_config *dmaengine_cfg = (void *)arg; 672 struct dma_slave_config *dmaengine_cfg = (void *)arg;
673 struct imxdma_engine *imxdma = imxdmac->imxdma; 673 struct imxdma_engine *imxdma = imxdmac->imxdma;
674 unsigned long flags; 674 unsigned long flags;
675 unsigned int mode = 0; 675 unsigned int mode = 0;
676 676
677 switch (cmd) { 677 switch (cmd) {
678 case DMA_TERMINATE_ALL: 678 case DMA_TERMINATE_ALL:
679 imxdma_disable_hw(imxdmac); 679 imxdma_disable_hw(imxdmac);
680 680
681 spin_lock_irqsave(&imxdma->lock, flags); 681 spin_lock_irqsave(&imxdma->lock, flags);
682 list_splice_tail_init(&imxdmac->ld_active, &imxdmac->ld_free); 682 list_splice_tail_init(&imxdmac->ld_active, &imxdmac->ld_free);
683 list_splice_tail_init(&imxdmac->ld_queue, &imxdmac->ld_free); 683 list_splice_tail_init(&imxdmac->ld_queue, &imxdmac->ld_free);
684 spin_unlock_irqrestore(&imxdma->lock, flags); 684 spin_unlock_irqrestore(&imxdma->lock, flags);
685 return 0; 685 return 0;
686 case DMA_SLAVE_CONFIG: 686 case DMA_SLAVE_CONFIG:
687 if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) { 687 if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) {
688 imxdmac->per_address = dmaengine_cfg->src_addr; 688 imxdmac->per_address = dmaengine_cfg->src_addr;
689 imxdmac->watermark_level = dmaengine_cfg->src_maxburst; 689 imxdmac->watermark_level = dmaengine_cfg->src_maxburst;
690 imxdmac->word_size = dmaengine_cfg->src_addr_width; 690 imxdmac->word_size = dmaengine_cfg->src_addr_width;
691 } else { 691 } else {
692 imxdmac->per_address = dmaengine_cfg->dst_addr; 692 imxdmac->per_address = dmaengine_cfg->dst_addr;
693 imxdmac->watermark_level = dmaengine_cfg->dst_maxburst; 693 imxdmac->watermark_level = dmaengine_cfg->dst_maxburst;
694 imxdmac->word_size = dmaengine_cfg->dst_addr_width; 694 imxdmac->word_size = dmaengine_cfg->dst_addr_width;
695 } 695 }
696 696
697 switch (imxdmac->word_size) { 697 switch (imxdmac->word_size) {
698 case DMA_SLAVE_BUSWIDTH_1_BYTE: 698 case DMA_SLAVE_BUSWIDTH_1_BYTE:
699 mode = IMX_DMA_MEMSIZE_8; 699 mode = IMX_DMA_MEMSIZE_8;
700 break; 700 break;
701 case DMA_SLAVE_BUSWIDTH_2_BYTES: 701 case DMA_SLAVE_BUSWIDTH_2_BYTES:
702 mode = IMX_DMA_MEMSIZE_16; 702 mode = IMX_DMA_MEMSIZE_16;
703 break; 703 break;
704 default: 704 default:
705 case DMA_SLAVE_BUSWIDTH_4_BYTES: 705 case DMA_SLAVE_BUSWIDTH_4_BYTES:
706 mode = IMX_DMA_MEMSIZE_32; 706 mode = IMX_DMA_MEMSIZE_32;
707 break; 707 break;
708 } 708 }
709 709
710 imxdmac->hw_chaining = 0; 710 imxdmac->hw_chaining = 0;
711 711
712 imxdmac->ccr_from_device = (mode | IMX_DMA_TYPE_FIFO) | 712 imxdmac->ccr_from_device = (mode | IMX_DMA_TYPE_FIFO) |
713 ((IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR) << 2) | 713 ((IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR) << 2) |
714 CCR_REN; 714 CCR_REN;
715 imxdmac->ccr_to_device = 715 imxdmac->ccr_to_device =
716 (IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR) | 716 (IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR) |
717 ((mode | IMX_DMA_TYPE_FIFO) << 2) | CCR_REN; 717 ((mode | IMX_DMA_TYPE_FIFO) << 2) | CCR_REN;
718 imx_dmav1_writel(imxdma, imxdmac->dma_request, 718 imx_dmav1_writel(imxdma, imxdmac->dma_request,
719 DMA_RSSR(imxdmac->channel)); 719 DMA_RSSR(imxdmac->channel));
720 720
721 /* Set burst length */ 721 /* Set burst length */
722 imx_dmav1_writel(imxdma, imxdmac->watermark_level * 722 imx_dmav1_writel(imxdma, imxdmac->watermark_level *
723 imxdmac->word_size, DMA_BLR(imxdmac->channel)); 723 imxdmac->word_size, DMA_BLR(imxdmac->channel));
724 724
725 return 0; 725 return 0;
726 default: 726 default:
727 return -ENOSYS; 727 return -ENOSYS;
728 } 728 }
729 729
730 return -EINVAL; 730 return -EINVAL;
731 } 731 }
732 732
733 static enum dma_status imxdma_tx_status(struct dma_chan *chan, 733 static enum dma_status imxdma_tx_status(struct dma_chan *chan,
734 dma_cookie_t cookie, 734 dma_cookie_t cookie,
735 struct dma_tx_state *txstate) 735 struct dma_tx_state *txstate)
736 { 736 {
737 return dma_cookie_status(chan, cookie, txstate); 737 return dma_cookie_status(chan, cookie, txstate);
738 } 738 }
739 739
740 static dma_cookie_t imxdma_tx_submit(struct dma_async_tx_descriptor *tx) 740 static dma_cookie_t imxdma_tx_submit(struct dma_async_tx_descriptor *tx)
741 { 741 {
742 struct imxdma_channel *imxdmac = to_imxdma_chan(tx->chan); 742 struct imxdma_channel *imxdmac = to_imxdma_chan(tx->chan);
743 struct imxdma_engine *imxdma = imxdmac->imxdma; 743 struct imxdma_engine *imxdma = imxdmac->imxdma;
744 dma_cookie_t cookie; 744 dma_cookie_t cookie;
745 unsigned long flags; 745 unsigned long flags;
746 746
747 spin_lock_irqsave(&imxdma->lock, flags); 747 spin_lock_irqsave(&imxdma->lock, flags);
748 list_move_tail(imxdmac->ld_free.next, &imxdmac->ld_queue); 748 list_move_tail(imxdmac->ld_free.next, &imxdmac->ld_queue);
749 cookie = dma_cookie_assign(tx); 749 cookie = dma_cookie_assign(tx);
750 spin_unlock_irqrestore(&imxdma->lock, flags); 750 spin_unlock_irqrestore(&imxdma->lock, flags);
751 751
752 return cookie; 752 return cookie;
753 } 753 }
754 754
755 static int imxdma_alloc_chan_resources(struct dma_chan *chan) 755 static int imxdma_alloc_chan_resources(struct dma_chan *chan)
756 { 756 {
757 struct imxdma_channel *imxdmac = to_imxdma_chan(chan); 757 struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
758 struct imx_dma_data *data = chan->private; 758 struct imx_dma_data *data = chan->private;
759 759
760 if (data != NULL) 760 if (data != NULL)
761 imxdmac->dma_request = data->dma_request; 761 imxdmac->dma_request = data->dma_request;
762 762
763 while (imxdmac->descs_allocated < IMXDMA_MAX_CHAN_DESCRIPTORS) { 763 while (imxdmac->descs_allocated < IMXDMA_MAX_CHAN_DESCRIPTORS) {
764 struct imxdma_desc *desc; 764 struct imxdma_desc *desc;
765 765
766 desc = kzalloc(sizeof(*desc), GFP_KERNEL); 766 desc = kzalloc(sizeof(*desc), GFP_KERNEL);
767 if (!desc) 767 if (!desc)
768 break; 768 break;
769 __memzero(&desc->desc, sizeof(struct dma_async_tx_descriptor)); 769 __memzero(&desc->desc, sizeof(struct dma_async_tx_descriptor));
770 dma_async_tx_descriptor_init(&desc->desc, chan); 770 dma_async_tx_descriptor_init(&desc->desc, chan);
771 desc->desc.tx_submit = imxdma_tx_submit; 771 desc->desc.tx_submit = imxdma_tx_submit;
772 /* txd.flags will be overwritten in prep funcs */ 772 /* txd.flags will be overwritten in prep funcs */
773 desc->desc.flags = DMA_CTRL_ACK; 773 desc->desc.flags = DMA_CTRL_ACK;
774 desc->status = DMA_SUCCESS; 774 desc->status = DMA_COMPLETE;
775 775
776 list_add_tail(&desc->node, &imxdmac->ld_free); 776 list_add_tail(&desc->node, &imxdmac->ld_free);
777 imxdmac->descs_allocated++; 777 imxdmac->descs_allocated++;
778 } 778 }
779 779
780 if (!imxdmac->descs_allocated) 780 if (!imxdmac->descs_allocated)
781 return -ENOMEM; 781 return -ENOMEM;
782 782
783 return imxdmac->descs_allocated; 783 return imxdmac->descs_allocated;
784 } 784 }
785 785
786 static void imxdma_free_chan_resources(struct dma_chan *chan) 786 static void imxdma_free_chan_resources(struct dma_chan *chan)
787 { 787 {
788 struct imxdma_channel *imxdmac = to_imxdma_chan(chan); 788 struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
789 struct imxdma_engine *imxdma = imxdmac->imxdma; 789 struct imxdma_engine *imxdma = imxdmac->imxdma;
790 struct imxdma_desc *desc, *_desc; 790 struct imxdma_desc *desc, *_desc;
791 unsigned long flags; 791 unsigned long flags;
792 792
793 spin_lock_irqsave(&imxdma->lock, flags); 793 spin_lock_irqsave(&imxdma->lock, flags);
794 794
795 imxdma_disable_hw(imxdmac); 795 imxdma_disable_hw(imxdmac);
796 list_splice_tail_init(&imxdmac->ld_active, &imxdmac->ld_free); 796 list_splice_tail_init(&imxdmac->ld_active, &imxdmac->ld_free);
797 list_splice_tail_init(&imxdmac->ld_queue, &imxdmac->ld_free); 797 list_splice_tail_init(&imxdmac->ld_queue, &imxdmac->ld_free);
798 798
799 spin_unlock_irqrestore(&imxdma->lock, flags); 799 spin_unlock_irqrestore(&imxdma->lock, flags);
800 800
801 list_for_each_entry_safe(desc, _desc, &imxdmac->ld_free, node) { 801 list_for_each_entry_safe(desc, _desc, &imxdmac->ld_free, node) {
802 kfree(desc); 802 kfree(desc);
803 imxdmac->descs_allocated--; 803 imxdmac->descs_allocated--;
804 } 804 }
805 INIT_LIST_HEAD(&imxdmac->ld_free); 805 INIT_LIST_HEAD(&imxdmac->ld_free);
806 806
807 kfree(imxdmac->sg_list); 807 kfree(imxdmac->sg_list);
808 imxdmac->sg_list = NULL; 808 imxdmac->sg_list = NULL;
809 } 809 }
810 810
811 static struct dma_async_tx_descriptor *imxdma_prep_slave_sg( 811 static struct dma_async_tx_descriptor *imxdma_prep_slave_sg(
812 struct dma_chan *chan, struct scatterlist *sgl, 812 struct dma_chan *chan, struct scatterlist *sgl,
813 unsigned int sg_len, enum dma_transfer_direction direction, 813 unsigned int sg_len, enum dma_transfer_direction direction,
814 unsigned long flags, void *context) 814 unsigned long flags, void *context)
815 { 815 {
816 struct imxdma_channel *imxdmac = to_imxdma_chan(chan); 816 struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
817 struct scatterlist *sg; 817 struct scatterlist *sg;
818 int i, dma_length = 0; 818 int i, dma_length = 0;
819 struct imxdma_desc *desc; 819 struct imxdma_desc *desc;
820 820
821 if (list_empty(&imxdmac->ld_free) || 821 if (list_empty(&imxdmac->ld_free) ||
822 imxdma_chan_is_doing_cyclic(imxdmac)) 822 imxdma_chan_is_doing_cyclic(imxdmac))
823 return NULL; 823 return NULL;
824 824
825 desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node); 825 desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node);
826 826
827 for_each_sg(sgl, sg, sg_len, i) { 827 for_each_sg(sgl, sg, sg_len, i) {
828 dma_length += sg_dma_len(sg); 828 dma_length += sg_dma_len(sg);
829 } 829 }
830 830
831 switch (imxdmac->word_size) { 831 switch (imxdmac->word_size) {
832 case DMA_SLAVE_BUSWIDTH_4_BYTES: 832 case DMA_SLAVE_BUSWIDTH_4_BYTES:
833 if (sg_dma_len(sgl) & 3 || sgl->dma_address & 3) 833 if (sg_dma_len(sgl) & 3 || sgl->dma_address & 3)
834 return NULL; 834 return NULL;
835 break; 835 break;
836 case DMA_SLAVE_BUSWIDTH_2_BYTES: 836 case DMA_SLAVE_BUSWIDTH_2_BYTES:
837 if (sg_dma_len(sgl) & 1 || sgl->dma_address & 1) 837 if (sg_dma_len(sgl) & 1 || sgl->dma_address & 1)
838 return NULL; 838 return NULL;
839 break; 839 break;
840 case DMA_SLAVE_BUSWIDTH_1_BYTE: 840 case DMA_SLAVE_BUSWIDTH_1_BYTE:
841 break; 841 break;
842 default: 842 default:
843 return NULL; 843 return NULL;
844 } 844 }
845 845
846 desc->type = IMXDMA_DESC_SLAVE_SG; 846 desc->type = IMXDMA_DESC_SLAVE_SG;
847 desc->sg = sgl; 847 desc->sg = sgl;
848 desc->sgcount = sg_len; 848 desc->sgcount = sg_len;
849 desc->len = dma_length; 849 desc->len = dma_length;
850 desc->direction = direction; 850 desc->direction = direction;
851 if (direction == DMA_DEV_TO_MEM) { 851 if (direction == DMA_DEV_TO_MEM) {
852 desc->src = imxdmac->per_address; 852 desc->src = imxdmac->per_address;
853 } else { 853 } else {
854 desc->dest = imxdmac->per_address; 854 desc->dest = imxdmac->per_address;
855 } 855 }
856 desc->desc.callback = NULL; 856 desc->desc.callback = NULL;
857 desc->desc.callback_param = NULL; 857 desc->desc.callback_param = NULL;
858 858
859 return &desc->desc; 859 return &desc->desc;
860 } 860 }
861 861
862 static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic( 862 static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic(
863 struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len, 863 struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
864 size_t period_len, enum dma_transfer_direction direction, 864 size_t period_len, enum dma_transfer_direction direction,
865 unsigned long flags, void *context) 865 unsigned long flags, void *context)
866 { 866 {
867 struct imxdma_channel *imxdmac = to_imxdma_chan(chan); 867 struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
868 struct imxdma_engine *imxdma = imxdmac->imxdma; 868 struct imxdma_engine *imxdma = imxdmac->imxdma;
869 struct imxdma_desc *desc; 869 struct imxdma_desc *desc;
870 int i; 870 int i;
871 unsigned int periods = buf_len / period_len; 871 unsigned int periods = buf_len / period_len;
872 872
873 dev_dbg(imxdma->dev, "%s channel: %d buf_len=%d period_len=%d\n", 873 dev_dbg(imxdma->dev, "%s channel: %d buf_len=%d period_len=%d\n",
874 __func__, imxdmac->channel, buf_len, period_len); 874 __func__, imxdmac->channel, buf_len, period_len);
875 875
876 if (list_empty(&imxdmac->ld_free) || 876 if (list_empty(&imxdmac->ld_free) ||
877 imxdma_chan_is_doing_cyclic(imxdmac)) 877 imxdma_chan_is_doing_cyclic(imxdmac))
878 return NULL; 878 return NULL;
879 879
880 desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node); 880 desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node);
881 881
882 kfree(imxdmac->sg_list); 882 kfree(imxdmac->sg_list);
883 883
884 imxdmac->sg_list = kcalloc(periods + 1, 884 imxdmac->sg_list = kcalloc(periods + 1,
885 sizeof(struct scatterlist), GFP_ATOMIC); 885 sizeof(struct scatterlist), GFP_ATOMIC);
886 if (!imxdmac->sg_list) 886 if (!imxdmac->sg_list)
887 return NULL; 887 return NULL;
888 888
889 sg_init_table(imxdmac->sg_list, periods); 889 sg_init_table(imxdmac->sg_list, periods);
890 890
891 for (i = 0; i < periods; i++) { 891 for (i = 0; i < periods; i++) {
892 imxdmac->sg_list[i].page_link = 0; 892 imxdmac->sg_list[i].page_link = 0;
893 imxdmac->sg_list[i].offset = 0; 893 imxdmac->sg_list[i].offset = 0;
894 imxdmac->sg_list[i].dma_address = dma_addr; 894 imxdmac->sg_list[i].dma_address = dma_addr;
895 sg_dma_len(&imxdmac->sg_list[i]) = period_len; 895 sg_dma_len(&imxdmac->sg_list[i]) = period_len;
896 dma_addr += period_len; 896 dma_addr += period_len;
897 } 897 }
898 898
899 /* close the loop */ 899 /* close the loop */
900 imxdmac->sg_list[periods].offset = 0; 900 imxdmac->sg_list[periods].offset = 0;
901 sg_dma_len(&imxdmac->sg_list[periods]) = 0; 901 sg_dma_len(&imxdmac->sg_list[periods]) = 0;
902 imxdmac->sg_list[periods].page_link = 902 imxdmac->sg_list[periods].page_link =
903 ((unsigned long)imxdmac->sg_list | 0x01) & ~0x02; 903 ((unsigned long)imxdmac->sg_list | 0x01) & ~0x02;
904 904
905 desc->type = IMXDMA_DESC_CYCLIC; 905 desc->type = IMXDMA_DESC_CYCLIC;
906 desc->sg = imxdmac->sg_list; 906 desc->sg = imxdmac->sg_list;
907 desc->sgcount = periods; 907 desc->sgcount = periods;
908 desc->len = IMX_DMA_LENGTH_LOOP; 908 desc->len = IMX_DMA_LENGTH_LOOP;
909 desc->direction = direction; 909 desc->direction = direction;
910 if (direction == DMA_DEV_TO_MEM) { 910 if (direction == DMA_DEV_TO_MEM) {
911 desc->src = imxdmac->per_address; 911 desc->src = imxdmac->per_address;
912 } else { 912 } else {
913 desc->dest = imxdmac->per_address; 913 desc->dest = imxdmac->per_address;
914 } 914 }
915 desc->desc.callback = NULL; 915 desc->desc.callback = NULL;
916 desc->desc.callback_param = NULL; 916 desc->desc.callback_param = NULL;
917 917
918 return &desc->desc; 918 return &desc->desc;
919 } 919 }
920 920
921 static struct dma_async_tx_descriptor *imxdma_prep_dma_memcpy( 921 static struct dma_async_tx_descriptor *imxdma_prep_dma_memcpy(
922 struct dma_chan *chan, dma_addr_t dest, 922 struct dma_chan *chan, dma_addr_t dest,
923 dma_addr_t src, size_t len, unsigned long flags) 923 dma_addr_t src, size_t len, unsigned long flags)
924 { 924 {
925 struct imxdma_channel *imxdmac = to_imxdma_chan(chan); 925 struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
926 struct imxdma_engine *imxdma = imxdmac->imxdma; 926 struct imxdma_engine *imxdma = imxdmac->imxdma;
927 struct imxdma_desc *desc; 927 struct imxdma_desc *desc;
928 928
929 dev_dbg(imxdma->dev, "%s channel: %d src=0x%x dst=0x%x len=%d\n", 929 dev_dbg(imxdma->dev, "%s channel: %d src=0x%x dst=0x%x len=%d\n",
930 __func__, imxdmac->channel, src, dest, len); 930 __func__, imxdmac->channel, src, dest, len);
931 931
932 if (list_empty(&imxdmac->ld_free) || 932 if (list_empty(&imxdmac->ld_free) ||
933 imxdma_chan_is_doing_cyclic(imxdmac)) 933 imxdma_chan_is_doing_cyclic(imxdmac))
934 return NULL; 934 return NULL;
935 935
936 desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node); 936 desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node);
937 937
938 desc->type = IMXDMA_DESC_MEMCPY; 938 desc->type = IMXDMA_DESC_MEMCPY;
939 desc->src = src; 939 desc->src = src;
940 desc->dest = dest; 940 desc->dest = dest;
941 desc->len = len; 941 desc->len = len;
942 desc->direction = DMA_MEM_TO_MEM; 942 desc->direction = DMA_MEM_TO_MEM;
943 desc->config_port = IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR; 943 desc->config_port = IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR;
944 desc->config_mem = IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR; 944 desc->config_mem = IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR;
945 desc->desc.callback = NULL; 945 desc->desc.callback = NULL;
946 desc->desc.callback_param = NULL; 946 desc->desc.callback_param = NULL;
947 947
948 return &desc->desc; 948 return &desc->desc;
949 } 949 }
950 950
951 static struct dma_async_tx_descriptor *imxdma_prep_dma_interleaved( 951 static struct dma_async_tx_descriptor *imxdma_prep_dma_interleaved(
952 struct dma_chan *chan, struct dma_interleaved_template *xt, 952 struct dma_chan *chan, struct dma_interleaved_template *xt,
953 unsigned long flags) 953 unsigned long flags)
954 { 954 {
955 struct imxdma_channel *imxdmac = to_imxdma_chan(chan); 955 struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
956 struct imxdma_engine *imxdma = imxdmac->imxdma; 956 struct imxdma_engine *imxdma = imxdmac->imxdma;
957 struct imxdma_desc *desc; 957 struct imxdma_desc *desc;
958 958
959 dev_dbg(imxdma->dev, "%s channel: %d src_start=0x%x dst_start=0x%x\n" 959 dev_dbg(imxdma->dev, "%s channel: %d src_start=0x%x dst_start=0x%x\n"
960 " src_sgl=%s dst_sgl=%s numf=%d frame_size=%d\n", __func__, 960 " src_sgl=%s dst_sgl=%s numf=%d frame_size=%d\n", __func__,
961 imxdmac->channel, xt->src_start, xt->dst_start, 961 imxdmac->channel, xt->src_start, xt->dst_start,
962 xt->src_sgl ? "true" : "false", xt->dst_sgl ? "true" : "false", 962 xt->src_sgl ? "true" : "false", xt->dst_sgl ? "true" : "false",
963 xt->numf, xt->frame_size); 963 xt->numf, xt->frame_size);
964 964
965 if (list_empty(&imxdmac->ld_free) || 965 if (list_empty(&imxdmac->ld_free) ||
966 imxdma_chan_is_doing_cyclic(imxdmac)) 966 imxdma_chan_is_doing_cyclic(imxdmac))
967 return NULL; 967 return NULL;
968 968
969 if (xt->frame_size != 1 || xt->numf <= 0 || xt->dir != DMA_MEM_TO_MEM) 969 if (xt->frame_size != 1 || xt->numf <= 0 || xt->dir != DMA_MEM_TO_MEM)
970 return NULL; 970 return NULL;
971 971
972 desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node); 972 desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node);
973 973
974 desc->type = IMXDMA_DESC_INTERLEAVED; 974 desc->type = IMXDMA_DESC_INTERLEAVED;
975 desc->src = xt->src_start; 975 desc->src = xt->src_start;
976 desc->dest = xt->dst_start; 976 desc->dest = xt->dst_start;
977 desc->x = xt->sgl[0].size; 977 desc->x = xt->sgl[0].size;
978 desc->y = xt->numf; 978 desc->y = xt->numf;
979 desc->w = xt->sgl[0].icg + desc->x; 979 desc->w = xt->sgl[0].icg + desc->x;
980 desc->len = desc->x * desc->y; 980 desc->len = desc->x * desc->y;
981 desc->direction = DMA_MEM_TO_MEM; 981 desc->direction = DMA_MEM_TO_MEM;
982 desc->config_port = IMX_DMA_MEMSIZE_32; 982 desc->config_port = IMX_DMA_MEMSIZE_32;
983 desc->config_mem = IMX_DMA_MEMSIZE_32; 983 desc->config_mem = IMX_DMA_MEMSIZE_32;
984 if (xt->src_sgl) 984 if (xt->src_sgl)
985 desc->config_mem |= IMX_DMA_TYPE_2D; 985 desc->config_mem |= IMX_DMA_TYPE_2D;
986 if (xt->dst_sgl) 986 if (xt->dst_sgl)
987 desc->config_port |= IMX_DMA_TYPE_2D; 987 desc->config_port |= IMX_DMA_TYPE_2D;
988 desc->desc.callback = NULL; 988 desc->desc.callback = NULL;
989 desc->desc.callback_param = NULL; 989 desc->desc.callback_param = NULL;
990 990
991 return &desc->desc; 991 return &desc->desc;
992 } 992 }
993 993
994 static void imxdma_issue_pending(struct dma_chan *chan) 994 static void imxdma_issue_pending(struct dma_chan *chan)
995 { 995 {
996 struct imxdma_channel *imxdmac = to_imxdma_chan(chan); 996 struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
997 struct imxdma_engine *imxdma = imxdmac->imxdma; 997 struct imxdma_engine *imxdma = imxdmac->imxdma;
998 struct imxdma_desc *desc; 998 struct imxdma_desc *desc;
999 unsigned long flags; 999 unsigned long flags;
1000 1000
1001 spin_lock_irqsave(&imxdma->lock, flags); 1001 spin_lock_irqsave(&imxdma->lock, flags);
1002 if (list_empty(&imxdmac->ld_active) && 1002 if (list_empty(&imxdmac->ld_active) &&
1003 !list_empty(&imxdmac->ld_queue)) { 1003 !list_empty(&imxdmac->ld_queue)) {
1004 desc = list_first_entry(&imxdmac->ld_queue, 1004 desc = list_first_entry(&imxdmac->ld_queue,
1005 struct imxdma_desc, node); 1005 struct imxdma_desc, node);
1006 1006
1007 if (imxdma_xfer_desc(desc) < 0) { 1007 if (imxdma_xfer_desc(desc) < 0) {
1008 dev_warn(imxdma->dev, 1008 dev_warn(imxdma->dev,
1009 "%s: channel: %d couldn't issue DMA xfer\n", 1009 "%s: channel: %d couldn't issue DMA xfer\n",
1010 __func__, imxdmac->channel); 1010 __func__, imxdmac->channel);
1011 } else { 1011 } else {
1012 list_move_tail(imxdmac->ld_queue.next, 1012 list_move_tail(imxdmac->ld_queue.next,
1013 &imxdmac->ld_active); 1013 &imxdmac->ld_active);
1014 } 1014 }
1015 } 1015 }
1016 spin_unlock_irqrestore(&imxdma->lock, flags); 1016 spin_unlock_irqrestore(&imxdma->lock, flags);
1017 } 1017 }
1018 1018
1019 static bool imxdma_filter_fn(struct dma_chan *chan, void *param) 1019 static bool imxdma_filter_fn(struct dma_chan *chan, void *param)
1020 { 1020 {
1021 struct imxdma_filter_data *fdata = param; 1021 struct imxdma_filter_data *fdata = param;
1022 struct imxdma_channel *imxdma_chan = to_imxdma_chan(chan); 1022 struct imxdma_channel *imxdma_chan = to_imxdma_chan(chan);
1023 1023
1024 if (chan->device->dev != fdata->imxdma->dev) 1024 if (chan->device->dev != fdata->imxdma->dev)
1025 return false; 1025 return false;
1026 1026
1027 imxdma_chan->dma_request = fdata->request; 1027 imxdma_chan->dma_request = fdata->request;
1028 chan->private = NULL; 1028 chan->private = NULL;
1029 1029
1030 return true; 1030 return true;
1031 } 1031 }
1032 1032
1033 static struct dma_chan *imxdma_xlate(struct of_phandle_args *dma_spec, 1033 static struct dma_chan *imxdma_xlate(struct of_phandle_args *dma_spec,
1034 struct of_dma *ofdma) 1034 struct of_dma *ofdma)
1035 { 1035 {
1036 int count = dma_spec->args_count; 1036 int count = dma_spec->args_count;
1037 struct imxdma_engine *imxdma = ofdma->of_dma_data; 1037 struct imxdma_engine *imxdma = ofdma->of_dma_data;
1038 struct imxdma_filter_data fdata = { 1038 struct imxdma_filter_data fdata = {
1039 .imxdma = imxdma, 1039 .imxdma = imxdma,
1040 }; 1040 };
1041 1041
1042 if (count != 1) 1042 if (count != 1)
1043 return NULL; 1043 return NULL;
1044 1044
1045 fdata.request = dma_spec->args[0]; 1045 fdata.request = dma_spec->args[0];
1046 1046
1047 return dma_request_channel(imxdma->dma_device.cap_mask, 1047 return dma_request_channel(imxdma->dma_device.cap_mask,
1048 imxdma_filter_fn, &fdata); 1048 imxdma_filter_fn, &fdata);
1049 } 1049 }
1050 1050
1051 static int __init imxdma_probe(struct platform_device *pdev) 1051 static int __init imxdma_probe(struct platform_device *pdev)
1052 { 1052 {
1053 struct imxdma_engine *imxdma; 1053 struct imxdma_engine *imxdma;
1054 struct resource *res; 1054 struct resource *res;
1055 const struct of_device_id *of_id; 1055 const struct of_device_id *of_id;
1056 int ret, i; 1056 int ret, i;
1057 int irq, irq_err; 1057 int irq, irq_err;
1058 1058
1059 of_id = of_match_device(imx_dma_of_dev_id, &pdev->dev); 1059 of_id = of_match_device(imx_dma_of_dev_id, &pdev->dev);
1060 if (of_id) 1060 if (of_id)
1061 pdev->id_entry = of_id->data; 1061 pdev->id_entry = of_id->data;
1062 1062
1063 imxdma = devm_kzalloc(&pdev->dev, sizeof(*imxdma), GFP_KERNEL); 1063 imxdma = devm_kzalloc(&pdev->dev, sizeof(*imxdma), GFP_KERNEL);
1064 if (!imxdma) 1064 if (!imxdma)
1065 return -ENOMEM; 1065 return -ENOMEM;
1066 1066
1067 imxdma->dev = &pdev->dev; 1067 imxdma->dev = &pdev->dev;
1068 imxdma->devtype = pdev->id_entry->driver_data; 1068 imxdma->devtype = pdev->id_entry->driver_data;
1069 1069
1070 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1070 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1071 imxdma->base = devm_ioremap_resource(&pdev->dev, res); 1071 imxdma->base = devm_ioremap_resource(&pdev->dev, res);
1072 if (IS_ERR(imxdma->base)) 1072 if (IS_ERR(imxdma->base))
1073 return PTR_ERR(imxdma->base); 1073 return PTR_ERR(imxdma->base);
1074 1074
1075 irq = platform_get_irq(pdev, 0); 1075 irq = platform_get_irq(pdev, 0);
1076 if (irq < 0) 1076 if (irq < 0)
1077 return irq; 1077 return irq;
1078 1078
1079 imxdma->dma_ipg = devm_clk_get(&pdev->dev, "ipg"); 1079 imxdma->dma_ipg = devm_clk_get(&pdev->dev, "ipg");
1080 if (IS_ERR(imxdma->dma_ipg)) 1080 if (IS_ERR(imxdma->dma_ipg))
1081 return PTR_ERR(imxdma->dma_ipg); 1081 return PTR_ERR(imxdma->dma_ipg);
1082 1082
1083 imxdma->dma_ahb = devm_clk_get(&pdev->dev, "ahb"); 1083 imxdma->dma_ahb = devm_clk_get(&pdev->dev, "ahb");
1084 if (IS_ERR(imxdma->dma_ahb)) 1084 if (IS_ERR(imxdma->dma_ahb))
1085 return PTR_ERR(imxdma->dma_ahb); 1085 return PTR_ERR(imxdma->dma_ahb);
1086 1086
1087 clk_prepare_enable(imxdma->dma_ipg); 1087 clk_prepare_enable(imxdma->dma_ipg);
1088 clk_prepare_enable(imxdma->dma_ahb); 1088 clk_prepare_enable(imxdma->dma_ahb);
1089 1089
1090 /* reset DMA module */ 1090 /* reset DMA module */
1091 imx_dmav1_writel(imxdma, DCR_DRST, DMA_DCR); 1091 imx_dmav1_writel(imxdma, DCR_DRST, DMA_DCR);
1092 1092
1093 if (is_imx1_dma(imxdma)) { 1093 if (is_imx1_dma(imxdma)) {
1094 ret = devm_request_irq(&pdev->dev, irq, 1094 ret = devm_request_irq(&pdev->dev, irq,
1095 dma_irq_handler, 0, "DMA", imxdma); 1095 dma_irq_handler, 0, "DMA", imxdma);
1096 if (ret) { 1096 if (ret) {
1097 dev_warn(imxdma->dev, "Can't register IRQ for DMA\n"); 1097 dev_warn(imxdma->dev, "Can't register IRQ for DMA\n");
1098 goto err; 1098 goto err;
1099 } 1099 }
1100 1100
1101 irq_err = platform_get_irq(pdev, 1); 1101 irq_err = platform_get_irq(pdev, 1);
1102 if (irq_err < 0) { 1102 if (irq_err < 0) {
1103 ret = irq_err; 1103 ret = irq_err;
1104 goto err; 1104 goto err;
1105 } 1105 }
1106 1106
1107 ret = devm_request_irq(&pdev->dev, irq_err, 1107 ret = devm_request_irq(&pdev->dev, irq_err,
1108 imxdma_err_handler, 0, "DMA", imxdma); 1108 imxdma_err_handler, 0, "DMA", imxdma);
1109 if (ret) { 1109 if (ret) {
1110 dev_warn(imxdma->dev, "Can't register ERRIRQ for DMA\n"); 1110 dev_warn(imxdma->dev, "Can't register ERRIRQ for DMA\n");
1111 goto err; 1111 goto err;
1112 } 1112 }
1113 } 1113 }
1114 1114
1115 /* enable DMA module */ 1115 /* enable DMA module */
1116 imx_dmav1_writel(imxdma, DCR_DEN, DMA_DCR); 1116 imx_dmav1_writel(imxdma, DCR_DEN, DMA_DCR);
1117 1117
1118 /* clear all interrupts */ 1118 /* clear all interrupts */
1119 imx_dmav1_writel(imxdma, (1 << IMX_DMA_CHANNELS) - 1, DMA_DISR); 1119 imx_dmav1_writel(imxdma, (1 << IMX_DMA_CHANNELS) - 1, DMA_DISR);
1120 1120
1121 /* disable interrupts */ 1121 /* disable interrupts */
1122 imx_dmav1_writel(imxdma, (1 << IMX_DMA_CHANNELS) - 1, DMA_DIMR); 1122 imx_dmav1_writel(imxdma, (1 << IMX_DMA_CHANNELS) - 1, DMA_DIMR);
1123 1123
1124 INIT_LIST_HEAD(&imxdma->dma_device.channels); 1124 INIT_LIST_HEAD(&imxdma->dma_device.channels);
1125 1125
1126 dma_cap_set(DMA_SLAVE, imxdma->dma_device.cap_mask); 1126 dma_cap_set(DMA_SLAVE, imxdma->dma_device.cap_mask);
1127 dma_cap_set(DMA_CYCLIC, imxdma->dma_device.cap_mask); 1127 dma_cap_set(DMA_CYCLIC, imxdma->dma_device.cap_mask);
1128 dma_cap_set(DMA_MEMCPY, imxdma->dma_device.cap_mask); 1128 dma_cap_set(DMA_MEMCPY, imxdma->dma_device.cap_mask);
1129 dma_cap_set(DMA_INTERLEAVE, imxdma->dma_device.cap_mask); 1129 dma_cap_set(DMA_INTERLEAVE, imxdma->dma_device.cap_mask);
1130 1130
1131 /* Initialize 2D global parameters */ 1131 /* Initialize 2D global parameters */
1132 for (i = 0; i < IMX_DMA_2D_SLOTS; i++) 1132 for (i = 0; i < IMX_DMA_2D_SLOTS; i++)
1133 imxdma->slots_2d[i].count = 0; 1133 imxdma->slots_2d[i].count = 0;
1134 1134
1135 spin_lock_init(&imxdma->lock); 1135 spin_lock_init(&imxdma->lock);
1136 1136
1137 /* Initialize channel parameters */ 1137 /* Initialize channel parameters */
1138 for (i = 0; i < IMX_DMA_CHANNELS; i++) { 1138 for (i = 0; i < IMX_DMA_CHANNELS; i++) {
1139 struct imxdma_channel *imxdmac = &imxdma->channel[i]; 1139 struct imxdma_channel *imxdmac = &imxdma->channel[i];
1140 1140
1141 if (!is_imx1_dma(imxdma)) { 1141 if (!is_imx1_dma(imxdma)) {
1142 ret = devm_request_irq(&pdev->dev, irq + i, 1142 ret = devm_request_irq(&pdev->dev, irq + i,
1143 dma_irq_handler, 0, "DMA", imxdma); 1143 dma_irq_handler, 0, "DMA", imxdma);
1144 if (ret) { 1144 if (ret) {
1145 dev_warn(imxdma->dev, "Can't register IRQ %d " 1145 dev_warn(imxdma->dev, "Can't register IRQ %d "
1146 "for DMA channel %d\n", 1146 "for DMA channel %d\n",
1147 irq + i, i); 1147 irq + i, i);
1148 goto err; 1148 goto err;
1149 } 1149 }
1150 init_timer(&imxdmac->watchdog); 1150 init_timer(&imxdmac->watchdog);
1151 imxdmac->watchdog.function = &imxdma_watchdog; 1151 imxdmac->watchdog.function = &imxdma_watchdog;
1152 imxdmac->watchdog.data = (unsigned long)imxdmac; 1152 imxdmac->watchdog.data = (unsigned long)imxdmac;
1153 } 1153 }
1154 1154
1155 imxdmac->imxdma = imxdma; 1155 imxdmac->imxdma = imxdma;
1156 1156
1157 INIT_LIST_HEAD(&imxdmac->ld_queue); 1157 INIT_LIST_HEAD(&imxdmac->ld_queue);
1158 INIT_LIST_HEAD(&imxdmac->ld_free); 1158 INIT_LIST_HEAD(&imxdmac->ld_free);
1159 INIT_LIST_HEAD(&imxdmac->ld_active); 1159 INIT_LIST_HEAD(&imxdmac->ld_active);
1160 1160
1161 tasklet_init(&imxdmac->dma_tasklet, imxdma_tasklet, 1161 tasklet_init(&imxdmac->dma_tasklet, imxdma_tasklet,
1162 (unsigned long)imxdmac); 1162 (unsigned long)imxdmac);
1163 imxdmac->chan.device = &imxdma->dma_device; 1163 imxdmac->chan.device = &imxdma->dma_device;
1164 dma_cookie_init(&imxdmac->chan); 1164 dma_cookie_init(&imxdmac->chan);
1165 imxdmac->channel = i; 1165 imxdmac->channel = i;
1166 1166
1167 /* Add the channel to the DMAC list */ 1167 /* Add the channel to the DMAC list */
1168 list_add_tail(&imxdmac->chan.device_node, 1168 list_add_tail(&imxdmac->chan.device_node,
1169 &imxdma->dma_device.channels); 1169 &imxdma->dma_device.channels);
1170 } 1170 }
1171 1171
1172 imxdma->dma_device.dev = &pdev->dev; 1172 imxdma->dma_device.dev = &pdev->dev;
1173 1173
1174 imxdma->dma_device.device_alloc_chan_resources = imxdma_alloc_chan_resources; 1174 imxdma->dma_device.device_alloc_chan_resources = imxdma_alloc_chan_resources;
1175 imxdma->dma_device.device_free_chan_resources = imxdma_free_chan_resources; 1175 imxdma->dma_device.device_free_chan_resources = imxdma_free_chan_resources;
1176 imxdma->dma_device.device_tx_status = imxdma_tx_status; 1176 imxdma->dma_device.device_tx_status = imxdma_tx_status;
1177 imxdma->dma_device.device_prep_slave_sg = imxdma_prep_slave_sg; 1177 imxdma->dma_device.device_prep_slave_sg = imxdma_prep_slave_sg;
1178 imxdma->dma_device.device_prep_dma_cyclic = imxdma_prep_dma_cyclic; 1178 imxdma->dma_device.device_prep_dma_cyclic = imxdma_prep_dma_cyclic;
1179 imxdma->dma_device.device_prep_dma_memcpy = imxdma_prep_dma_memcpy; 1179 imxdma->dma_device.device_prep_dma_memcpy = imxdma_prep_dma_memcpy;
1180 imxdma->dma_device.device_prep_interleaved_dma = imxdma_prep_dma_interleaved; 1180 imxdma->dma_device.device_prep_interleaved_dma = imxdma_prep_dma_interleaved;
1181 imxdma->dma_device.device_control = imxdma_control; 1181 imxdma->dma_device.device_control = imxdma_control;
1182 imxdma->dma_device.device_issue_pending = imxdma_issue_pending; 1182 imxdma->dma_device.device_issue_pending = imxdma_issue_pending;
1183 1183
1184 platform_set_drvdata(pdev, imxdma); 1184 platform_set_drvdata(pdev, imxdma);
1185 1185
1186 imxdma->dma_device.copy_align = 2; /* 2^2 = 4 bytes alignment */ 1186 imxdma->dma_device.copy_align = 2; /* 2^2 = 4 bytes alignment */
1187 imxdma->dma_device.dev->dma_parms = &imxdma->dma_parms; 1187 imxdma->dma_device.dev->dma_parms = &imxdma->dma_parms;
1188 dma_set_max_seg_size(imxdma->dma_device.dev, 0xffffff); 1188 dma_set_max_seg_size(imxdma->dma_device.dev, 0xffffff);
1189 1189
1190 ret = dma_async_device_register(&imxdma->dma_device); 1190 ret = dma_async_device_register(&imxdma->dma_device);
1191 if (ret) { 1191 if (ret) {
1192 dev_err(&pdev->dev, "unable to register\n"); 1192 dev_err(&pdev->dev, "unable to register\n");
1193 goto err; 1193 goto err;
1194 } 1194 }
1195 1195
1196 if (pdev->dev.of_node) { 1196 if (pdev->dev.of_node) {
1197 ret = of_dma_controller_register(pdev->dev.of_node, 1197 ret = of_dma_controller_register(pdev->dev.of_node,
1198 imxdma_xlate, imxdma); 1198 imxdma_xlate, imxdma);
1199 if (ret) { 1199 if (ret) {
1200 dev_err(&pdev->dev, "unable to register of_dma_controller\n"); 1200 dev_err(&pdev->dev, "unable to register of_dma_controller\n");
1201 goto err_of_dma_controller; 1201 goto err_of_dma_controller;
1202 } 1202 }
1203 } 1203 }
1204 1204
1205 return 0; 1205 return 0;
1206 1206
1207 err_of_dma_controller: 1207 err_of_dma_controller:
1208 dma_async_device_unregister(&imxdma->dma_device); 1208 dma_async_device_unregister(&imxdma->dma_device);
1209 err: 1209 err:
1210 clk_disable_unprepare(imxdma->dma_ipg); 1210 clk_disable_unprepare(imxdma->dma_ipg);
1211 clk_disable_unprepare(imxdma->dma_ahb); 1211 clk_disable_unprepare(imxdma->dma_ahb);
1212 return ret; 1212 return ret;
1213 } 1213 }
1214 1214
1215 static int imxdma_remove(struct platform_device *pdev) 1215 static int imxdma_remove(struct platform_device *pdev)
1216 { 1216 {
1217 struct imxdma_engine *imxdma = platform_get_drvdata(pdev); 1217 struct imxdma_engine *imxdma = platform_get_drvdata(pdev);
1218 1218
1219 dma_async_device_unregister(&imxdma->dma_device); 1219 dma_async_device_unregister(&imxdma->dma_device);
1220 1220
1221 if (pdev->dev.of_node) 1221 if (pdev->dev.of_node)
1222 of_dma_controller_free(pdev->dev.of_node); 1222 of_dma_controller_free(pdev->dev.of_node);
1223 1223
1224 clk_disable_unprepare(imxdma->dma_ipg); 1224 clk_disable_unprepare(imxdma->dma_ipg);
1225 clk_disable_unprepare(imxdma->dma_ahb); 1225 clk_disable_unprepare(imxdma->dma_ahb);
1226 1226
1227 return 0; 1227 return 0;
1228 } 1228 }
1229 1229
1230 static struct platform_driver imxdma_driver = { 1230 static struct platform_driver imxdma_driver = {
1231 .driver = { 1231 .driver = {
1232 .name = "imx-dma", 1232 .name = "imx-dma",
1233 .of_match_table = imx_dma_of_dev_id, 1233 .of_match_table = imx_dma_of_dev_id,
1234 }, 1234 },
1235 .id_table = imx_dma_devtype, 1235 .id_table = imx_dma_devtype,
1236 .remove = imxdma_remove, 1236 .remove = imxdma_remove,
1237 }; 1237 };
1238 1238
1239 static int __init imxdma_module_init(void) 1239 static int __init imxdma_module_init(void)
1240 { 1240 {
1241 return platform_driver_probe(&imxdma_driver, imxdma_probe); 1241 return platform_driver_probe(&imxdma_driver, imxdma_probe);
1242 } 1242 }
1243 subsys_initcall(imxdma_module_init); 1243 subsys_initcall(imxdma_module_init);
1244 1244
1245 MODULE_AUTHOR("Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>"); 1245 MODULE_AUTHOR("Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>");
1246 MODULE_DESCRIPTION("i.MX dma driver"); 1246 MODULE_DESCRIPTION("i.MX dma driver");
1247 MODULE_LICENSE("GPL"); 1247 MODULE_LICENSE("GPL");
1248 1248
drivers/dma/imx-sdma.c
1 /* 1 /*
2 * drivers/dma/imx-sdma.c 2 * drivers/dma/imx-sdma.c
3 * 3 *
4 * This file contains a driver for the Freescale Smart DMA engine 4 * This file contains a driver for the Freescale Smart DMA engine
5 * 5 *
6 * Copyright 2010 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de> 6 * Copyright 2010 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
7 * 7 *
8 * Based on code from Freescale: 8 * Based on code from Freescale:
9 * 9 *
10 * Copyright 2004-2009 Freescale Semiconductor, Inc. All Rights Reserved. 10 * Copyright 2004-2009 Freescale Semiconductor, Inc. All Rights Reserved.
11 * 11 *
12 * The code contained herein is licensed under the GNU General Public 12 * The code contained herein is licensed under the GNU General Public
13 * License. You may obtain a copy of the GNU General Public License 13 * License. You may obtain a copy of the GNU General Public License
14 * Version 2 or later at the following locations: 14 * Version 2 or later at the following locations:
15 * 15 *
16 * http://www.opensource.org/licenses/gpl-license.html 16 * http://www.opensource.org/licenses/gpl-license.html
17 * http://www.gnu.org/copyleft/gpl.html 17 * http://www.gnu.org/copyleft/gpl.html
18 */ 18 */
19 19
20 #include <linux/init.h> 20 #include <linux/init.h>
21 #include <linux/module.h> 21 #include <linux/module.h>
22 #include <linux/types.h> 22 #include <linux/types.h>
23 #include <linux/bitops.h> 23 #include <linux/bitops.h>
24 #include <linux/mm.h> 24 #include <linux/mm.h>
25 #include <linux/interrupt.h> 25 #include <linux/interrupt.h>
26 #include <linux/clk.h> 26 #include <linux/clk.h>
27 #include <linux/delay.h> 27 #include <linux/delay.h>
28 #include <linux/sched.h> 28 #include <linux/sched.h>
29 #include <linux/semaphore.h> 29 #include <linux/semaphore.h>
30 #include <linux/spinlock.h> 30 #include <linux/spinlock.h>
31 #include <linux/device.h> 31 #include <linux/device.h>
32 #include <linux/dma-mapping.h> 32 #include <linux/dma-mapping.h>
33 #include <linux/firmware.h> 33 #include <linux/firmware.h>
34 #include <linux/slab.h> 34 #include <linux/slab.h>
35 #include <linux/platform_device.h> 35 #include <linux/platform_device.h>
36 #include <linux/dmaengine.h> 36 #include <linux/dmaengine.h>
37 #include <linux/of.h> 37 #include <linux/of.h>
38 #include <linux/of_device.h> 38 #include <linux/of_device.h>
39 #include <linux/of_dma.h> 39 #include <linux/of_dma.h>
40 40
41 #include <asm/irq.h> 41 #include <asm/irq.h>
42 #include <linux/platform_data/dma-imx-sdma.h> 42 #include <linux/platform_data/dma-imx-sdma.h>
43 #include <linux/platform_data/dma-imx.h> 43 #include <linux/platform_data/dma-imx.h>
44 44
45 #include "dmaengine.h" 45 #include "dmaengine.h"
46 46
47 /* SDMA registers */ 47 /* SDMA registers */
48 #define SDMA_H_C0PTR 0x000 48 #define SDMA_H_C0PTR 0x000
49 #define SDMA_H_INTR 0x004 49 #define SDMA_H_INTR 0x004
50 #define SDMA_H_STATSTOP 0x008 50 #define SDMA_H_STATSTOP 0x008
51 #define SDMA_H_START 0x00c 51 #define SDMA_H_START 0x00c
52 #define SDMA_H_EVTOVR 0x010 52 #define SDMA_H_EVTOVR 0x010
53 #define SDMA_H_DSPOVR 0x014 53 #define SDMA_H_DSPOVR 0x014
54 #define SDMA_H_HOSTOVR 0x018 54 #define SDMA_H_HOSTOVR 0x018
55 #define SDMA_H_EVTPEND 0x01c 55 #define SDMA_H_EVTPEND 0x01c
56 #define SDMA_H_DSPENBL 0x020 56 #define SDMA_H_DSPENBL 0x020
57 #define SDMA_H_RESET 0x024 57 #define SDMA_H_RESET 0x024
58 #define SDMA_H_EVTERR 0x028 58 #define SDMA_H_EVTERR 0x028
59 #define SDMA_H_INTRMSK 0x02c 59 #define SDMA_H_INTRMSK 0x02c
60 #define SDMA_H_PSW 0x030 60 #define SDMA_H_PSW 0x030
61 #define SDMA_H_EVTERRDBG 0x034 61 #define SDMA_H_EVTERRDBG 0x034
62 #define SDMA_H_CONFIG 0x038 62 #define SDMA_H_CONFIG 0x038
63 #define SDMA_ONCE_ENB 0x040 63 #define SDMA_ONCE_ENB 0x040
64 #define SDMA_ONCE_DATA 0x044 64 #define SDMA_ONCE_DATA 0x044
65 #define SDMA_ONCE_INSTR 0x048 65 #define SDMA_ONCE_INSTR 0x048
66 #define SDMA_ONCE_STAT 0x04c 66 #define SDMA_ONCE_STAT 0x04c
67 #define SDMA_ONCE_CMD 0x050 67 #define SDMA_ONCE_CMD 0x050
68 #define SDMA_EVT_MIRROR 0x054 68 #define SDMA_EVT_MIRROR 0x054
69 #define SDMA_ILLINSTADDR 0x058 69 #define SDMA_ILLINSTADDR 0x058
70 #define SDMA_CHN0ADDR 0x05c 70 #define SDMA_CHN0ADDR 0x05c
71 #define SDMA_ONCE_RTB 0x060 71 #define SDMA_ONCE_RTB 0x060
72 #define SDMA_XTRIG_CONF1 0x070 72 #define SDMA_XTRIG_CONF1 0x070
73 #define SDMA_XTRIG_CONF2 0x074 73 #define SDMA_XTRIG_CONF2 0x074
74 #define SDMA_CHNENBL0_IMX35 0x200 74 #define SDMA_CHNENBL0_IMX35 0x200
75 #define SDMA_CHNENBL0_IMX31 0x080 75 #define SDMA_CHNENBL0_IMX31 0x080
76 #define SDMA_CHNPRI_0 0x100 76 #define SDMA_CHNPRI_0 0x100
77 77
78 /* 78 /*
79 * Buffer descriptor status values. 79 * Buffer descriptor status values.
80 */ 80 */
81 #define BD_DONE 0x01 81 #define BD_DONE 0x01
82 #define BD_WRAP 0x02 82 #define BD_WRAP 0x02
83 #define BD_CONT 0x04 83 #define BD_CONT 0x04
84 #define BD_INTR 0x08 84 #define BD_INTR 0x08
85 #define BD_RROR 0x10 85 #define BD_RROR 0x10
86 #define BD_LAST 0x20 86 #define BD_LAST 0x20
87 #define BD_EXTD 0x80 87 #define BD_EXTD 0x80
88 88
89 /* 89 /*
90 * Data Node descriptor status values. 90 * Data Node descriptor status values.
91 */ 91 */
92 #define DND_END_OF_FRAME 0x80 92 #define DND_END_OF_FRAME 0x80
93 #define DND_END_OF_XFER 0x40 93 #define DND_END_OF_XFER 0x40
94 #define DND_DONE 0x20 94 #define DND_DONE 0x20
95 #define DND_UNUSED 0x01 95 #define DND_UNUSED 0x01
96 96
97 /* 97 /*
98 * IPCV2 descriptor status values. 98 * IPCV2 descriptor status values.
99 */ 99 */
100 #define BD_IPCV2_END_OF_FRAME 0x40 100 #define BD_IPCV2_END_OF_FRAME 0x40
101 101
102 #define IPCV2_MAX_NODES 50 102 #define IPCV2_MAX_NODES 50
103 /* 103 /*
104 * Error bit set in the CCB status field by the SDMA, 104 * Error bit set in the CCB status field by the SDMA,
105 * in setbd routine, in case of a transfer error 105 * in setbd routine, in case of a transfer error
106 */ 106 */
107 #define DATA_ERROR 0x10000000 107 #define DATA_ERROR 0x10000000
108 108
109 /* 109 /*
110 * Buffer descriptor commands. 110 * Buffer descriptor commands.
111 */ 111 */
112 #define C0_ADDR 0x01 112 #define C0_ADDR 0x01
113 #define C0_LOAD 0x02 113 #define C0_LOAD 0x02
114 #define C0_DUMP 0x03 114 #define C0_DUMP 0x03
115 #define C0_SETCTX 0x07 115 #define C0_SETCTX 0x07
116 #define C0_GETCTX 0x03 116 #define C0_GETCTX 0x03
117 #define C0_SETDM 0x01 117 #define C0_SETDM 0x01
118 #define C0_SETPM 0x04 118 #define C0_SETPM 0x04
119 #define C0_GETDM 0x02 119 #define C0_GETDM 0x02
120 #define C0_GETPM 0x08 120 #define C0_GETPM 0x08
121 /* 121 /*
122 * Change endianness indicator in the BD command field 122 * Change endianness indicator in the BD command field
123 */ 123 */
124 #define CHANGE_ENDIANNESS 0x80 124 #define CHANGE_ENDIANNESS 0x80
125 125
126 /* 126 /*
127 * Mode/Count of data node descriptors - IPCv2 127 * Mode/Count of data node descriptors - IPCv2
128 */ 128 */
129 struct sdma_mode_count { 129 struct sdma_mode_count {
130 u32 count : 16; /* size of the buffer pointed by this BD */ 130 u32 count : 16; /* size of the buffer pointed by this BD */
131 u32 status : 8; /* E,R,I,C,W,D status bits stored here */ 131 u32 status : 8; /* E,R,I,C,W,D status bits stored here */
132 u32 command : 8; /* command mostlky used for channel 0 */ 132 u32 command : 8; /* command mostlky used for channel 0 */
133 }; 133 };
134 134
135 /* 135 /*
136 * Buffer descriptor 136 * Buffer descriptor
137 */ 137 */
138 struct sdma_buffer_descriptor { 138 struct sdma_buffer_descriptor {
139 struct sdma_mode_count mode; 139 struct sdma_mode_count mode;
140 u32 buffer_addr; /* address of the buffer described */ 140 u32 buffer_addr; /* address of the buffer described */
141 u32 ext_buffer_addr; /* extended buffer address */ 141 u32 ext_buffer_addr; /* extended buffer address */
142 } __attribute__ ((packed)); 142 } __attribute__ ((packed));
143 143
144 /** 144 /**
145 * struct sdma_channel_control - Channel control Block 145 * struct sdma_channel_control - Channel control Block
146 * 146 *
147 * @current_bd_ptr current buffer descriptor processed 147 * @current_bd_ptr current buffer descriptor processed
148 * @base_bd_ptr first element of buffer descriptor array 148 * @base_bd_ptr first element of buffer descriptor array
149 * @unused padding. The SDMA engine expects an array of 128 byte 149 * @unused padding. The SDMA engine expects an array of 128 byte
150 * control blocks 150 * control blocks
151 */ 151 */
152 struct sdma_channel_control { 152 struct sdma_channel_control {
153 u32 current_bd_ptr; 153 u32 current_bd_ptr;
154 u32 base_bd_ptr; 154 u32 base_bd_ptr;
155 u32 unused[2]; 155 u32 unused[2];
156 } __attribute__ ((packed)); 156 } __attribute__ ((packed));
157 157
158 /** 158 /**
159 * struct sdma_state_registers - SDMA context for a channel 159 * struct sdma_state_registers - SDMA context for a channel
160 * 160 *
161 * @pc: program counter 161 * @pc: program counter
162 * @t: test bit: status of arithmetic & test instruction 162 * @t: test bit: status of arithmetic & test instruction
163 * @rpc: return program counter 163 * @rpc: return program counter
164 * @sf: source fault while loading data 164 * @sf: source fault while loading data
165 * @spc: loop start program counter 165 * @spc: loop start program counter
166 * @df: destination fault while storing data 166 * @df: destination fault while storing data
167 * @epc: loop end program counter 167 * @epc: loop end program counter
168 * @lm: loop mode 168 * @lm: loop mode
169 */ 169 */
170 struct sdma_state_registers { 170 struct sdma_state_registers {
171 u32 pc :14; 171 u32 pc :14;
172 u32 unused1: 1; 172 u32 unused1: 1;
173 u32 t : 1; 173 u32 t : 1;
174 u32 rpc :14; 174 u32 rpc :14;
175 u32 unused0: 1; 175 u32 unused0: 1;
176 u32 sf : 1; 176 u32 sf : 1;
177 u32 spc :14; 177 u32 spc :14;
178 u32 unused2: 1; 178 u32 unused2: 1;
179 u32 df : 1; 179 u32 df : 1;
180 u32 epc :14; 180 u32 epc :14;
181 u32 lm : 2; 181 u32 lm : 2;
182 } __attribute__ ((packed)); 182 } __attribute__ ((packed));
183 183
184 /** 184 /**
185 * struct sdma_context_data - sdma context specific to a channel 185 * struct sdma_context_data - sdma context specific to a channel
186 * 186 *
187 * @channel_state: channel state bits 187 * @channel_state: channel state bits
188 * @gReg: general registers 188 * @gReg: general registers
189 * @mda: burst dma destination address register 189 * @mda: burst dma destination address register
190 * @msa: burst dma source address register 190 * @msa: burst dma source address register
191 * @ms: burst dma status register 191 * @ms: burst dma status register
192 * @md: burst dma data register 192 * @md: burst dma data register
193 * @pda: peripheral dma destination address register 193 * @pda: peripheral dma destination address register
194 * @psa: peripheral dma source address register 194 * @psa: peripheral dma source address register
195 * @ps: peripheral dma status register 195 * @ps: peripheral dma status register
196 * @pd: peripheral dma data register 196 * @pd: peripheral dma data register
197 * @ca: CRC polynomial register 197 * @ca: CRC polynomial register
198 * @cs: CRC accumulator register 198 * @cs: CRC accumulator register
199 * @dda: dedicated core destination address register 199 * @dda: dedicated core destination address register
200 * @dsa: dedicated core source address register 200 * @dsa: dedicated core source address register
201 * @ds: dedicated core status register 201 * @ds: dedicated core status register
202 * @dd: dedicated core data register 202 * @dd: dedicated core data register
203 */ 203 */
204 struct sdma_context_data { 204 struct sdma_context_data {
205 struct sdma_state_registers channel_state; 205 struct sdma_state_registers channel_state;
206 u32 gReg[8]; 206 u32 gReg[8];
207 u32 mda; 207 u32 mda;
208 u32 msa; 208 u32 msa;
209 u32 ms; 209 u32 ms;
210 u32 md; 210 u32 md;
211 u32 pda; 211 u32 pda;
212 u32 psa; 212 u32 psa;
213 u32 ps; 213 u32 ps;
214 u32 pd; 214 u32 pd;
215 u32 ca; 215 u32 ca;
216 u32 cs; 216 u32 cs;
217 u32 dda; 217 u32 dda;
218 u32 dsa; 218 u32 dsa;
219 u32 ds; 219 u32 ds;
220 u32 dd; 220 u32 dd;
221 u32 scratch0; 221 u32 scratch0;
222 u32 scratch1; 222 u32 scratch1;
223 u32 scratch2; 223 u32 scratch2;
224 u32 scratch3; 224 u32 scratch3;
225 u32 scratch4; 225 u32 scratch4;
226 u32 scratch5; 226 u32 scratch5;
227 u32 scratch6; 227 u32 scratch6;
228 u32 scratch7; 228 u32 scratch7;
229 } __attribute__ ((packed)); 229 } __attribute__ ((packed));
230 230
231 #define NUM_BD (int)(PAGE_SIZE / sizeof(struct sdma_buffer_descriptor)) 231 #define NUM_BD (int)(PAGE_SIZE / sizeof(struct sdma_buffer_descriptor))
232 232
233 struct sdma_engine; 233 struct sdma_engine;
234 234
235 /** 235 /**
236 * struct sdma_channel - housekeeping for a SDMA channel 236 * struct sdma_channel - housekeeping for a SDMA channel
237 * 237 *
238 * @sdma pointer to the SDMA engine for this channel 238 * @sdma pointer to the SDMA engine for this channel
239 * @channel the channel number, matches dmaengine chan_id + 1 239 * @channel the channel number, matches dmaengine chan_id + 1
240 * @direction transfer type. Needed for setting SDMA script 240 * @direction transfer type. Needed for setting SDMA script
241 * @peripheral_type Peripheral type. Needed for setting SDMA script 241 * @peripheral_type Peripheral type. Needed for setting SDMA script
242 * @event_id0 aka dma request line 242 * @event_id0 aka dma request line
243 * @event_id1 for channels that use 2 events 243 * @event_id1 for channels that use 2 events
244 * @word_size peripheral access size 244 * @word_size peripheral access size
245 * @buf_tail ID of the buffer that was processed 245 * @buf_tail ID of the buffer that was processed
246 * @num_bd max NUM_BD. number of descriptors currently handling 246 * @num_bd max NUM_BD. number of descriptors currently handling
247 */ 247 */
248 struct sdma_channel { 248 struct sdma_channel {
249 struct sdma_engine *sdma; 249 struct sdma_engine *sdma;
250 unsigned int channel; 250 unsigned int channel;
251 enum dma_transfer_direction direction; 251 enum dma_transfer_direction direction;
252 enum sdma_peripheral_type peripheral_type; 252 enum sdma_peripheral_type peripheral_type;
253 unsigned int event_id0; 253 unsigned int event_id0;
254 unsigned int event_id1; 254 unsigned int event_id1;
255 enum dma_slave_buswidth word_size; 255 enum dma_slave_buswidth word_size;
256 unsigned int buf_tail; 256 unsigned int buf_tail;
257 unsigned int num_bd; 257 unsigned int num_bd;
258 struct sdma_buffer_descriptor *bd; 258 struct sdma_buffer_descriptor *bd;
259 dma_addr_t bd_phys; 259 dma_addr_t bd_phys;
260 unsigned int pc_from_device, pc_to_device; 260 unsigned int pc_from_device, pc_to_device;
261 unsigned long flags; 261 unsigned long flags;
262 dma_addr_t per_address; 262 dma_addr_t per_address;
263 unsigned long event_mask[2]; 263 unsigned long event_mask[2];
264 unsigned long watermark_level; 264 unsigned long watermark_level;
265 u32 shp_addr, per_addr; 265 u32 shp_addr, per_addr;
266 struct dma_chan chan; 266 struct dma_chan chan;
267 spinlock_t lock; 267 spinlock_t lock;
268 struct dma_async_tx_descriptor desc; 268 struct dma_async_tx_descriptor desc;
269 enum dma_status status; 269 enum dma_status status;
270 unsigned int chn_count; 270 unsigned int chn_count;
271 unsigned int chn_real_count; 271 unsigned int chn_real_count;
272 struct tasklet_struct tasklet; 272 struct tasklet_struct tasklet;
273 }; 273 };
274 274
275 #define IMX_DMA_SG_LOOP BIT(0) 275 #define IMX_DMA_SG_LOOP BIT(0)
276 276
277 #define MAX_DMA_CHANNELS 32 277 #define MAX_DMA_CHANNELS 32
278 #define MXC_SDMA_DEFAULT_PRIORITY 1 278 #define MXC_SDMA_DEFAULT_PRIORITY 1
279 #define MXC_SDMA_MIN_PRIORITY 1 279 #define MXC_SDMA_MIN_PRIORITY 1
280 #define MXC_SDMA_MAX_PRIORITY 7 280 #define MXC_SDMA_MAX_PRIORITY 7
281 281
282 #define SDMA_FIRMWARE_MAGIC 0x414d4453 282 #define SDMA_FIRMWARE_MAGIC 0x414d4453
283 283
284 /** 284 /**
285 * struct sdma_firmware_header - Layout of the firmware image 285 * struct sdma_firmware_header - Layout of the firmware image
286 * 286 *
287 * @magic "SDMA" 287 * @magic "SDMA"
288 * @version_major increased whenever layout of struct sdma_script_start_addrs 288 * @version_major increased whenever layout of struct sdma_script_start_addrs
289 * changes. 289 * changes.
290 * @version_minor firmware minor version (for binary compatible changes) 290 * @version_minor firmware minor version (for binary compatible changes)
291 * @script_addrs_start offset of struct sdma_script_start_addrs in this image 291 * @script_addrs_start offset of struct sdma_script_start_addrs in this image
292 * @num_script_addrs Number of script addresses in this image 292 * @num_script_addrs Number of script addresses in this image
293 * @ram_code_start offset of SDMA ram image in this firmware image 293 * @ram_code_start offset of SDMA ram image in this firmware image
294 * @ram_code_size size of SDMA ram image 294 * @ram_code_size size of SDMA ram image
295 * @script_addrs Stores the start address of the SDMA scripts 295 * @script_addrs Stores the start address of the SDMA scripts
296 * (in SDMA memory space) 296 * (in SDMA memory space)
297 */ 297 */
298 struct sdma_firmware_header { 298 struct sdma_firmware_header {
299 u32 magic; 299 u32 magic;
300 u32 version_major; 300 u32 version_major;
301 u32 version_minor; 301 u32 version_minor;
302 u32 script_addrs_start; 302 u32 script_addrs_start;
303 u32 num_script_addrs; 303 u32 num_script_addrs;
304 u32 ram_code_start; 304 u32 ram_code_start;
305 u32 ram_code_size; 305 u32 ram_code_size;
306 }; 306 };
307 307
308 struct sdma_driver_data { 308 struct sdma_driver_data {
309 int chnenbl0; 309 int chnenbl0;
310 int num_events; 310 int num_events;
311 struct sdma_script_start_addrs *script_addrs; 311 struct sdma_script_start_addrs *script_addrs;
312 }; 312 };
313 313
314 struct sdma_engine { 314 struct sdma_engine {
315 struct device *dev; 315 struct device *dev;
316 struct device_dma_parameters dma_parms; 316 struct device_dma_parameters dma_parms;
317 struct sdma_channel channel[MAX_DMA_CHANNELS]; 317 struct sdma_channel channel[MAX_DMA_CHANNELS];
318 struct sdma_channel_control *channel_control; 318 struct sdma_channel_control *channel_control;
319 void __iomem *regs; 319 void __iomem *regs;
320 struct sdma_context_data *context; 320 struct sdma_context_data *context;
321 dma_addr_t context_phys; 321 dma_addr_t context_phys;
322 struct dma_device dma_device; 322 struct dma_device dma_device;
323 struct clk *clk_ipg; 323 struct clk *clk_ipg;
324 struct clk *clk_ahb; 324 struct clk *clk_ahb;
325 spinlock_t channel_0_lock; 325 spinlock_t channel_0_lock;
326 struct sdma_script_start_addrs *script_addrs; 326 struct sdma_script_start_addrs *script_addrs;
327 const struct sdma_driver_data *drvdata; 327 const struct sdma_driver_data *drvdata;
328 }; 328 };
329 329
330 static struct sdma_driver_data sdma_imx31 = { 330 static struct sdma_driver_data sdma_imx31 = {
331 .chnenbl0 = SDMA_CHNENBL0_IMX31, 331 .chnenbl0 = SDMA_CHNENBL0_IMX31,
332 .num_events = 32, 332 .num_events = 32,
333 }; 333 };
334 334
335 static struct sdma_script_start_addrs sdma_script_imx25 = { 335 static struct sdma_script_start_addrs sdma_script_imx25 = {
336 .ap_2_ap_addr = 729, 336 .ap_2_ap_addr = 729,
337 .uart_2_mcu_addr = 904, 337 .uart_2_mcu_addr = 904,
338 .per_2_app_addr = 1255, 338 .per_2_app_addr = 1255,
339 .mcu_2_app_addr = 834, 339 .mcu_2_app_addr = 834,
340 .uartsh_2_mcu_addr = 1120, 340 .uartsh_2_mcu_addr = 1120,
341 .per_2_shp_addr = 1329, 341 .per_2_shp_addr = 1329,
342 .mcu_2_shp_addr = 1048, 342 .mcu_2_shp_addr = 1048,
343 .ata_2_mcu_addr = 1560, 343 .ata_2_mcu_addr = 1560,
344 .mcu_2_ata_addr = 1479, 344 .mcu_2_ata_addr = 1479,
345 .app_2_per_addr = 1189, 345 .app_2_per_addr = 1189,
346 .app_2_mcu_addr = 770, 346 .app_2_mcu_addr = 770,
347 .shp_2_per_addr = 1407, 347 .shp_2_per_addr = 1407,
348 .shp_2_mcu_addr = 979, 348 .shp_2_mcu_addr = 979,
349 }; 349 };
350 350
351 static struct sdma_driver_data sdma_imx25 = { 351 static struct sdma_driver_data sdma_imx25 = {
352 .chnenbl0 = SDMA_CHNENBL0_IMX35, 352 .chnenbl0 = SDMA_CHNENBL0_IMX35,
353 .num_events = 48, 353 .num_events = 48,
354 .script_addrs = &sdma_script_imx25, 354 .script_addrs = &sdma_script_imx25,
355 }; 355 };
356 356
357 static struct sdma_driver_data sdma_imx35 = { 357 static struct sdma_driver_data sdma_imx35 = {
358 .chnenbl0 = SDMA_CHNENBL0_IMX35, 358 .chnenbl0 = SDMA_CHNENBL0_IMX35,
359 .num_events = 48, 359 .num_events = 48,
360 }; 360 };
361 361
362 static struct sdma_script_start_addrs sdma_script_imx51 = { 362 static struct sdma_script_start_addrs sdma_script_imx51 = {
363 .ap_2_ap_addr = 642, 363 .ap_2_ap_addr = 642,
364 .uart_2_mcu_addr = 817, 364 .uart_2_mcu_addr = 817,
365 .mcu_2_app_addr = 747, 365 .mcu_2_app_addr = 747,
366 .mcu_2_shp_addr = 961, 366 .mcu_2_shp_addr = 961,
367 .ata_2_mcu_addr = 1473, 367 .ata_2_mcu_addr = 1473,
368 .mcu_2_ata_addr = 1392, 368 .mcu_2_ata_addr = 1392,
369 .app_2_per_addr = 1033, 369 .app_2_per_addr = 1033,
370 .app_2_mcu_addr = 683, 370 .app_2_mcu_addr = 683,
371 .shp_2_per_addr = 1251, 371 .shp_2_per_addr = 1251,
372 .shp_2_mcu_addr = 892, 372 .shp_2_mcu_addr = 892,
373 }; 373 };
374 374
375 static struct sdma_driver_data sdma_imx51 = { 375 static struct sdma_driver_data sdma_imx51 = {
376 .chnenbl0 = SDMA_CHNENBL0_IMX35, 376 .chnenbl0 = SDMA_CHNENBL0_IMX35,
377 .num_events = 48, 377 .num_events = 48,
378 .script_addrs = &sdma_script_imx51, 378 .script_addrs = &sdma_script_imx51,
379 }; 379 };
380 380
381 static struct sdma_script_start_addrs sdma_script_imx53 = { 381 static struct sdma_script_start_addrs sdma_script_imx53 = {
382 .ap_2_ap_addr = 642, 382 .ap_2_ap_addr = 642,
383 .app_2_mcu_addr = 683, 383 .app_2_mcu_addr = 683,
384 .mcu_2_app_addr = 747, 384 .mcu_2_app_addr = 747,
385 .uart_2_mcu_addr = 817, 385 .uart_2_mcu_addr = 817,
386 .shp_2_mcu_addr = 891, 386 .shp_2_mcu_addr = 891,
387 .mcu_2_shp_addr = 960, 387 .mcu_2_shp_addr = 960,
388 .uartsh_2_mcu_addr = 1032, 388 .uartsh_2_mcu_addr = 1032,
389 .spdif_2_mcu_addr = 1100, 389 .spdif_2_mcu_addr = 1100,
390 .mcu_2_spdif_addr = 1134, 390 .mcu_2_spdif_addr = 1134,
391 .firi_2_mcu_addr = 1193, 391 .firi_2_mcu_addr = 1193,
392 .mcu_2_firi_addr = 1290, 392 .mcu_2_firi_addr = 1290,
393 }; 393 };
394 394
395 static struct sdma_driver_data sdma_imx53 = { 395 static struct sdma_driver_data sdma_imx53 = {
396 .chnenbl0 = SDMA_CHNENBL0_IMX35, 396 .chnenbl0 = SDMA_CHNENBL0_IMX35,
397 .num_events = 48, 397 .num_events = 48,
398 .script_addrs = &sdma_script_imx53, 398 .script_addrs = &sdma_script_imx53,
399 }; 399 };
400 400
401 static struct sdma_script_start_addrs sdma_script_imx6q = { 401 static struct sdma_script_start_addrs sdma_script_imx6q = {
402 .ap_2_ap_addr = 642, 402 .ap_2_ap_addr = 642,
403 .uart_2_mcu_addr = 817, 403 .uart_2_mcu_addr = 817,
404 .mcu_2_app_addr = 747, 404 .mcu_2_app_addr = 747,
405 .per_2_per_addr = 6331, 405 .per_2_per_addr = 6331,
406 .uartsh_2_mcu_addr = 1032, 406 .uartsh_2_mcu_addr = 1032,
407 .mcu_2_shp_addr = 960, 407 .mcu_2_shp_addr = 960,
408 .app_2_mcu_addr = 683, 408 .app_2_mcu_addr = 683,
409 .shp_2_mcu_addr = 891, 409 .shp_2_mcu_addr = 891,
410 .spdif_2_mcu_addr = 1100, 410 .spdif_2_mcu_addr = 1100,
411 .mcu_2_spdif_addr = 1134, 411 .mcu_2_spdif_addr = 1134,
412 }; 412 };
413 413
414 static struct sdma_driver_data sdma_imx6q = { 414 static struct sdma_driver_data sdma_imx6q = {
415 .chnenbl0 = SDMA_CHNENBL0_IMX35, 415 .chnenbl0 = SDMA_CHNENBL0_IMX35,
416 .num_events = 48, 416 .num_events = 48,
417 .script_addrs = &sdma_script_imx6q, 417 .script_addrs = &sdma_script_imx6q,
418 }; 418 };
419 419
420 static struct platform_device_id sdma_devtypes[] = { 420 static struct platform_device_id sdma_devtypes[] = {
421 { 421 {
422 .name = "imx25-sdma", 422 .name = "imx25-sdma",
423 .driver_data = (unsigned long)&sdma_imx25, 423 .driver_data = (unsigned long)&sdma_imx25,
424 }, { 424 }, {
425 .name = "imx31-sdma", 425 .name = "imx31-sdma",
426 .driver_data = (unsigned long)&sdma_imx31, 426 .driver_data = (unsigned long)&sdma_imx31,
427 }, { 427 }, {
428 .name = "imx35-sdma", 428 .name = "imx35-sdma",
429 .driver_data = (unsigned long)&sdma_imx35, 429 .driver_data = (unsigned long)&sdma_imx35,
430 }, { 430 }, {
431 .name = "imx51-sdma", 431 .name = "imx51-sdma",
432 .driver_data = (unsigned long)&sdma_imx51, 432 .driver_data = (unsigned long)&sdma_imx51,
433 }, { 433 }, {
434 .name = "imx53-sdma", 434 .name = "imx53-sdma",
435 .driver_data = (unsigned long)&sdma_imx53, 435 .driver_data = (unsigned long)&sdma_imx53,
436 }, { 436 }, {
437 .name = "imx6q-sdma", 437 .name = "imx6q-sdma",
438 .driver_data = (unsigned long)&sdma_imx6q, 438 .driver_data = (unsigned long)&sdma_imx6q,
439 }, { 439 }, {
440 /* sentinel */ 440 /* sentinel */
441 } 441 }
442 }; 442 };
443 MODULE_DEVICE_TABLE(platform, sdma_devtypes); 443 MODULE_DEVICE_TABLE(platform, sdma_devtypes);
444 444
445 static const struct of_device_id sdma_dt_ids[] = { 445 static const struct of_device_id sdma_dt_ids[] = {
446 { .compatible = "fsl,imx6q-sdma", .data = &sdma_imx6q, }, 446 { .compatible = "fsl,imx6q-sdma", .data = &sdma_imx6q, },
447 { .compatible = "fsl,imx53-sdma", .data = &sdma_imx53, }, 447 { .compatible = "fsl,imx53-sdma", .data = &sdma_imx53, },
448 { .compatible = "fsl,imx51-sdma", .data = &sdma_imx51, }, 448 { .compatible = "fsl,imx51-sdma", .data = &sdma_imx51, },
449 { .compatible = "fsl,imx35-sdma", .data = &sdma_imx35, }, 449 { .compatible = "fsl,imx35-sdma", .data = &sdma_imx35, },
450 { .compatible = "fsl,imx31-sdma", .data = &sdma_imx31, }, 450 { .compatible = "fsl,imx31-sdma", .data = &sdma_imx31, },
451 { /* sentinel */ } 451 { /* sentinel */ }
452 }; 452 };
453 MODULE_DEVICE_TABLE(of, sdma_dt_ids); 453 MODULE_DEVICE_TABLE(of, sdma_dt_ids);
454 454
455 #define SDMA_H_CONFIG_DSPDMA BIT(12) /* indicates if the DSPDMA is used */ 455 #define SDMA_H_CONFIG_DSPDMA BIT(12) /* indicates if the DSPDMA is used */
456 #define SDMA_H_CONFIG_RTD_PINS BIT(11) /* indicates if Real-Time Debug pins are enabled */ 456 #define SDMA_H_CONFIG_RTD_PINS BIT(11) /* indicates if Real-Time Debug pins are enabled */
457 #define SDMA_H_CONFIG_ACR BIT(4) /* indicates if AHB freq /core freq = 2 or 1 */ 457 #define SDMA_H_CONFIG_ACR BIT(4) /* indicates if AHB freq /core freq = 2 or 1 */
458 #define SDMA_H_CONFIG_CSM (3) /* indicates which context switch mode is selected*/ 458 #define SDMA_H_CONFIG_CSM (3) /* indicates which context switch mode is selected*/
459 459
460 static inline u32 chnenbl_ofs(struct sdma_engine *sdma, unsigned int event) 460 static inline u32 chnenbl_ofs(struct sdma_engine *sdma, unsigned int event)
461 { 461 {
462 u32 chnenbl0 = sdma->drvdata->chnenbl0; 462 u32 chnenbl0 = sdma->drvdata->chnenbl0;
463 return chnenbl0 + event * 4; 463 return chnenbl0 + event * 4;
464 } 464 }
465 465
466 static int sdma_config_ownership(struct sdma_channel *sdmac, 466 static int sdma_config_ownership(struct sdma_channel *sdmac,
467 bool event_override, bool mcu_override, bool dsp_override) 467 bool event_override, bool mcu_override, bool dsp_override)
468 { 468 {
469 struct sdma_engine *sdma = sdmac->sdma; 469 struct sdma_engine *sdma = sdmac->sdma;
470 int channel = sdmac->channel; 470 int channel = sdmac->channel;
471 unsigned long evt, mcu, dsp; 471 unsigned long evt, mcu, dsp;
472 472
473 if (event_override && mcu_override && dsp_override) 473 if (event_override && mcu_override && dsp_override)
474 return -EINVAL; 474 return -EINVAL;
475 475
476 evt = readl_relaxed(sdma->regs + SDMA_H_EVTOVR); 476 evt = readl_relaxed(sdma->regs + SDMA_H_EVTOVR);
477 mcu = readl_relaxed(sdma->regs + SDMA_H_HOSTOVR); 477 mcu = readl_relaxed(sdma->regs + SDMA_H_HOSTOVR);
478 dsp = readl_relaxed(sdma->regs + SDMA_H_DSPOVR); 478 dsp = readl_relaxed(sdma->regs + SDMA_H_DSPOVR);
479 479
480 if (dsp_override) 480 if (dsp_override)
481 __clear_bit(channel, &dsp); 481 __clear_bit(channel, &dsp);
482 else 482 else
483 __set_bit(channel, &dsp); 483 __set_bit(channel, &dsp);
484 484
485 if (event_override) 485 if (event_override)
486 __clear_bit(channel, &evt); 486 __clear_bit(channel, &evt);
487 else 487 else
488 __set_bit(channel, &evt); 488 __set_bit(channel, &evt);
489 489
490 if (mcu_override) 490 if (mcu_override)
491 __clear_bit(channel, &mcu); 491 __clear_bit(channel, &mcu);
492 else 492 else
493 __set_bit(channel, &mcu); 493 __set_bit(channel, &mcu);
494 494
495 writel_relaxed(evt, sdma->regs + SDMA_H_EVTOVR); 495 writel_relaxed(evt, sdma->regs + SDMA_H_EVTOVR);
496 writel_relaxed(mcu, sdma->regs + SDMA_H_HOSTOVR); 496 writel_relaxed(mcu, sdma->regs + SDMA_H_HOSTOVR);
497 writel_relaxed(dsp, sdma->regs + SDMA_H_DSPOVR); 497 writel_relaxed(dsp, sdma->regs + SDMA_H_DSPOVR);
498 498
499 return 0; 499 return 0;
500 } 500 }
501 501
502 static void sdma_enable_channel(struct sdma_engine *sdma, int channel) 502 static void sdma_enable_channel(struct sdma_engine *sdma, int channel)
503 { 503 {
504 writel(BIT(channel), sdma->regs + SDMA_H_START); 504 writel(BIT(channel), sdma->regs + SDMA_H_START);
505 } 505 }
506 506
507 /* 507 /*
508 * sdma_run_channel0 - run a channel and wait till it's done 508 * sdma_run_channel0 - run a channel and wait till it's done
509 */ 509 */
510 static int sdma_run_channel0(struct sdma_engine *sdma) 510 static int sdma_run_channel0(struct sdma_engine *sdma)
511 { 511 {
512 int ret; 512 int ret;
513 unsigned long timeout = 500; 513 unsigned long timeout = 500;
514 514
515 sdma_enable_channel(sdma, 0); 515 sdma_enable_channel(sdma, 0);
516 516
517 while (!(ret = readl_relaxed(sdma->regs + SDMA_H_INTR) & 1)) { 517 while (!(ret = readl_relaxed(sdma->regs + SDMA_H_INTR) & 1)) {
518 if (timeout-- <= 0) 518 if (timeout-- <= 0)
519 break; 519 break;
520 udelay(1); 520 udelay(1);
521 } 521 }
522 522
523 if (ret) { 523 if (ret) {
524 /* Clear the interrupt status */ 524 /* Clear the interrupt status */
525 writel_relaxed(ret, sdma->regs + SDMA_H_INTR); 525 writel_relaxed(ret, sdma->regs + SDMA_H_INTR);
526 } else { 526 } else {
527 dev_err(sdma->dev, "Timeout waiting for CH0 ready\n"); 527 dev_err(sdma->dev, "Timeout waiting for CH0 ready\n");
528 } 528 }
529 529
530 return ret ? 0 : -ETIMEDOUT; 530 return ret ? 0 : -ETIMEDOUT;
531 } 531 }
532 532
533 static int sdma_load_script(struct sdma_engine *sdma, void *buf, int size, 533 static int sdma_load_script(struct sdma_engine *sdma, void *buf, int size,
534 u32 address) 534 u32 address)
535 { 535 {
536 struct sdma_buffer_descriptor *bd0 = sdma->channel[0].bd; 536 struct sdma_buffer_descriptor *bd0 = sdma->channel[0].bd;
537 void *buf_virt; 537 void *buf_virt;
538 dma_addr_t buf_phys; 538 dma_addr_t buf_phys;
539 int ret; 539 int ret;
540 unsigned long flags; 540 unsigned long flags;
541 541
542 buf_virt = dma_alloc_coherent(NULL, 542 buf_virt = dma_alloc_coherent(NULL,
543 size, 543 size,
544 &buf_phys, GFP_KERNEL); 544 &buf_phys, GFP_KERNEL);
545 if (!buf_virt) { 545 if (!buf_virt) {
546 return -ENOMEM; 546 return -ENOMEM;
547 } 547 }
548 548
549 spin_lock_irqsave(&sdma->channel_0_lock, flags); 549 spin_lock_irqsave(&sdma->channel_0_lock, flags);
550 550
551 bd0->mode.command = C0_SETPM; 551 bd0->mode.command = C0_SETPM;
552 bd0->mode.status = BD_DONE | BD_INTR | BD_WRAP | BD_EXTD; 552 bd0->mode.status = BD_DONE | BD_INTR | BD_WRAP | BD_EXTD;
553 bd0->mode.count = size / 2; 553 bd0->mode.count = size / 2;
554 bd0->buffer_addr = buf_phys; 554 bd0->buffer_addr = buf_phys;
555 bd0->ext_buffer_addr = address; 555 bd0->ext_buffer_addr = address;
556 556
557 memcpy(buf_virt, buf, size); 557 memcpy(buf_virt, buf, size);
558 558
559 ret = sdma_run_channel0(sdma); 559 ret = sdma_run_channel0(sdma);
560 560
561 spin_unlock_irqrestore(&sdma->channel_0_lock, flags); 561 spin_unlock_irqrestore(&sdma->channel_0_lock, flags);
562 562
563 dma_free_coherent(NULL, size, buf_virt, buf_phys); 563 dma_free_coherent(NULL, size, buf_virt, buf_phys);
564 564
565 return ret; 565 return ret;
566 } 566 }
567 567
568 static void sdma_event_enable(struct sdma_channel *sdmac, unsigned int event) 568 static void sdma_event_enable(struct sdma_channel *sdmac, unsigned int event)
569 { 569 {
570 struct sdma_engine *sdma = sdmac->sdma; 570 struct sdma_engine *sdma = sdmac->sdma;
571 int channel = sdmac->channel; 571 int channel = sdmac->channel;
572 unsigned long val; 572 unsigned long val;
573 u32 chnenbl = chnenbl_ofs(sdma, event); 573 u32 chnenbl = chnenbl_ofs(sdma, event);
574 574
575 val = readl_relaxed(sdma->regs + chnenbl); 575 val = readl_relaxed(sdma->regs + chnenbl);
576 __set_bit(channel, &val); 576 __set_bit(channel, &val);
577 writel_relaxed(val, sdma->regs + chnenbl); 577 writel_relaxed(val, sdma->regs + chnenbl);
578 } 578 }
579 579
580 static void sdma_event_disable(struct sdma_channel *sdmac, unsigned int event) 580 static void sdma_event_disable(struct sdma_channel *sdmac, unsigned int event)
581 { 581 {
582 struct sdma_engine *sdma = sdmac->sdma; 582 struct sdma_engine *sdma = sdmac->sdma;
583 int channel = sdmac->channel; 583 int channel = sdmac->channel;
584 u32 chnenbl = chnenbl_ofs(sdma, event); 584 u32 chnenbl = chnenbl_ofs(sdma, event);
585 unsigned long val; 585 unsigned long val;
586 586
587 val = readl_relaxed(sdma->regs + chnenbl); 587 val = readl_relaxed(sdma->regs + chnenbl);
588 __clear_bit(channel, &val); 588 __clear_bit(channel, &val);
589 writel_relaxed(val, sdma->regs + chnenbl); 589 writel_relaxed(val, sdma->regs + chnenbl);
590 } 590 }
591 591
592 static void sdma_handle_channel_loop(struct sdma_channel *sdmac) 592 static void sdma_handle_channel_loop(struct sdma_channel *sdmac)
593 { 593 {
594 struct sdma_buffer_descriptor *bd; 594 struct sdma_buffer_descriptor *bd;
595 595
596 /* 596 /*
597 * loop mode. Iterate over descriptors, re-setup them and 597 * loop mode. Iterate over descriptors, re-setup them and
598 * call callback function. 598 * call callback function.
599 */ 599 */
600 while (1) { 600 while (1) {
601 bd = &sdmac->bd[sdmac->buf_tail]; 601 bd = &sdmac->bd[sdmac->buf_tail];
602 602
603 if (bd->mode.status & BD_DONE) 603 if (bd->mode.status & BD_DONE)
604 break; 604 break;
605 605
606 if (bd->mode.status & BD_RROR) 606 if (bd->mode.status & BD_RROR)
607 sdmac->status = DMA_ERROR; 607 sdmac->status = DMA_ERROR;
608 else 608 else
609 sdmac->status = DMA_IN_PROGRESS; 609 sdmac->status = DMA_IN_PROGRESS;
610 610
611 bd->mode.status |= BD_DONE; 611 bd->mode.status |= BD_DONE;
612 sdmac->buf_tail++; 612 sdmac->buf_tail++;
613 sdmac->buf_tail %= sdmac->num_bd; 613 sdmac->buf_tail %= sdmac->num_bd;
614 614
615 if (sdmac->desc.callback) 615 if (sdmac->desc.callback)
616 sdmac->desc.callback(sdmac->desc.callback_param); 616 sdmac->desc.callback(sdmac->desc.callback_param);
617 } 617 }
618 } 618 }
619 619
620 static void mxc_sdma_handle_channel_normal(struct sdma_channel *sdmac) 620 static void mxc_sdma_handle_channel_normal(struct sdma_channel *sdmac)
621 { 621 {
622 struct sdma_buffer_descriptor *bd; 622 struct sdma_buffer_descriptor *bd;
623 int i, error = 0; 623 int i, error = 0;
624 624
625 sdmac->chn_real_count = 0; 625 sdmac->chn_real_count = 0;
626 /* 626 /*
627 * non loop mode. Iterate over all descriptors, collect 627 * non loop mode. Iterate over all descriptors, collect
628 * errors and call callback function 628 * errors and call callback function
629 */ 629 */
630 for (i = 0; i < sdmac->num_bd; i++) { 630 for (i = 0; i < sdmac->num_bd; i++) {
631 bd = &sdmac->bd[i]; 631 bd = &sdmac->bd[i];
632 632
633 if (bd->mode.status & (BD_DONE | BD_RROR)) 633 if (bd->mode.status & (BD_DONE | BD_RROR))
634 error = -EIO; 634 error = -EIO;
635 sdmac->chn_real_count += bd->mode.count; 635 sdmac->chn_real_count += bd->mode.count;
636 } 636 }
637 637
638 if (error) 638 if (error)
639 sdmac->status = DMA_ERROR; 639 sdmac->status = DMA_ERROR;
640 else 640 else
641 sdmac->status = DMA_SUCCESS; 641 sdmac->status = DMA_COMPLETE;
642 642
643 dma_cookie_complete(&sdmac->desc); 643 dma_cookie_complete(&sdmac->desc);
644 if (sdmac->desc.callback) 644 if (sdmac->desc.callback)
645 sdmac->desc.callback(sdmac->desc.callback_param); 645 sdmac->desc.callback(sdmac->desc.callback_param);
646 } 646 }
647 647
648 static void sdma_tasklet(unsigned long data) 648 static void sdma_tasklet(unsigned long data)
649 { 649 {
650 struct sdma_channel *sdmac = (struct sdma_channel *) data; 650 struct sdma_channel *sdmac = (struct sdma_channel *) data;
651 651
652 if (sdmac->flags & IMX_DMA_SG_LOOP) 652 if (sdmac->flags & IMX_DMA_SG_LOOP)
653 sdma_handle_channel_loop(sdmac); 653 sdma_handle_channel_loop(sdmac);
654 else 654 else
655 mxc_sdma_handle_channel_normal(sdmac); 655 mxc_sdma_handle_channel_normal(sdmac);
656 } 656 }
657 657
658 static irqreturn_t sdma_int_handler(int irq, void *dev_id) 658 static irqreturn_t sdma_int_handler(int irq, void *dev_id)
659 { 659 {
660 struct sdma_engine *sdma = dev_id; 660 struct sdma_engine *sdma = dev_id;
661 unsigned long stat; 661 unsigned long stat;
662 662
663 stat = readl_relaxed(sdma->regs + SDMA_H_INTR); 663 stat = readl_relaxed(sdma->regs + SDMA_H_INTR);
664 /* not interested in channel 0 interrupts */ 664 /* not interested in channel 0 interrupts */
665 stat &= ~1; 665 stat &= ~1;
666 writel_relaxed(stat, sdma->regs + SDMA_H_INTR); 666 writel_relaxed(stat, sdma->regs + SDMA_H_INTR);
667 667
668 while (stat) { 668 while (stat) {
669 int channel = fls(stat) - 1; 669 int channel = fls(stat) - 1;
670 struct sdma_channel *sdmac = &sdma->channel[channel]; 670 struct sdma_channel *sdmac = &sdma->channel[channel];
671 671
672 tasklet_schedule(&sdmac->tasklet); 672 tasklet_schedule(&sdmac->tasklet);
673 673
674 __clear_bit(channel, &stat); 674 __clear_bit(channel, &stat);
675 } 675 }
676 676
677 return IRQ_HANDLED; 677 return IRQ_HANDLED;
678 } 678 }
679 679
680 /* 680 /*
681 * sets the pc of SDMA script according to the peripheral type 681 * sets the pc of SDMA script according to the peripheral type
682 */ 682 */
683 static void sdma_get_pc(struct sdma_channel *sdmac, 683 static void sdma_get_pc(struct sdma_channel *sdmac,
684 enum sdma_peripheral_type peripheral_type) 684 enum sdma_peripheral_type peripheral_type)
685 { 685 {
686 struct sdma_engine *sdma = sdmac->sdma; 686 struct sdma_engine *sdma = sdmac->sdma;
687 int per_2_emi = 0, emi_2_per = 0; 687 int per_2_emi = 0, emi_2_per = 0;
688 /* 688 /*
689 * These are needed once we start to support transfers between 689 * These are needed once we start to support transfers between
690 * two peripherals or memory-to-memory transfers 690 * two peripherals or memory-to-memory transfers
691 */ 691 */
692 int per_2_per = 0, emi_2_emi = 0; 692 int per_2_per = 0, emi_2_emi = 0;
693 693
694 sdmac->pc_from_device = 0; 694 sdmac->pc_from_device = 0;
695 sdmac->pc_to_device = 0; 695 sdmac->pc_to_device = 0;
696 696
697 switch (peripheral_type) { 697 switch (peripheral_type) {
698 case IMX_DMATYPE_MEMORY: 698 case IMX_DMATYPE_MEMORY:
699 emi_2_emi = sdma->script_addrs->ap_2_ap_addr; 699 emi_2_emi = sdma->script_addrs->ap_2_ap_addr;
700 break; 700 break;
701 case IMX_DMATYPE_DSP: 701 case IMX_DMATYPE_DSP:
702 emi_2_per = sdma->script_addrs->bp_2_ap_addr; 702 emi_2_per = sdma->script_addrs->bp_2_ap_addr;
703 per_2_emi = sdma->script_addrs->ap_2_bp_addr; 703 per_2_emi = sdma->script_addrs->ap_2_bp_addr;
704 break; 704 break;
705 case IMX_DMATYPE_FIRI: 705 case IMX_DMATYPE_FIRI:
706 per_2_emi = sdma->script_addrs->firi_2_mcu_addr; 706 per_2_emi = sdma->script_addrs->firi_2_mcu_addr;
707 emi_2_per = sdma->script_addrs->mcu_2_firi_addr; 707 emi_2_per = sdma->script_addrs->mcu_2_firi_addr;
708 break; 708 break;
709 case IMX_DMATYPE_UART: 709 case IMX_DMATYPE_UART:
710 per_2_emi = sdma->script_addrs->uart_2_mcu_addr; 710 per_2_emi = sdma->script_addrs->uart_2_mcu_addr;
711 emi_2_per = sdma->script_addrs->mcu_2_app_addr; 711 emi_2_per = sdma->script_addrs->mcu_2_app_addr;
712 break; 712 break;
713 case IMX_DMATYPE_UART_SP: 713 case IMX_DMATYPE_UART_SP:
714 per_2_emi = sdma->script_addrs->uartsh_2_mcu_addr; 714 per_2_emi = sdma->script_addrs->uartsh_2_mcu_addr;
715 emi_2_per = sdma->script_addrs->mcu_2_shp_addr; 715 emi_2_per = sdma->script_addrs->mcu_2_shp_addr;
716 break; 716 break;
717 case IMX_DMATYPE_ATA: 717 case IMX_DMATYPE_ATA:
718 per_2_emi = sdma->script_addrs->ata_2_mcu_addr; 718 per_2_emi = sdma->script_addrs->ata_2_mcu_addr;
719 emi_2_per = sdma->script_addrs->mcu_2_ata_addr; 719 emi_2_per = sdma->script_addrs->mcu_2_ata_addr;
720 break; 720 break;
721 case IMX_DMATYPE_CSPI: 721 case IMX_DMATYPE_CSPI:
722 case IMX_DMATYPE_EXT: 722 case IMX_DMATYPE_EXT:
723 case IMX_DMATYPE_SSI: 723 case IMX_DMATYPE_SSI:
724 per_2_emi = sdma->script_addrs->app_2_mcu_addr; 724 per_2_emi = sdma->script_addrs->app_2_mcu_addr;
725 emi_2_per = sdma->script_addrs->mcu_2_app_addr; 725 emi_2_per = sdma->script_addrs->mcu_2_app_addr;
726 break; 726 break;
727 case IMX_DMATYPE_SSI_SP: 727 case IMX_DMATYPE_SSI_SP:
728 case IMX_DMATYPE_MMC: 728 case IMX_DMATYPE_MMC:
729 case IMX_DMATYPE_SDHC: 729 case IMX_DMATYPE_SDHC:
730 case IMX_DMATYPE_CSPI_SP: 730 case IMX_DMATYPE_CSPI_SP:
731 case IMX_DMATYPE_ESAI: 731 case IMX_DMATYPE_ESAI:
732 case IMX_DMATYPE_MSHC_SP: 732 case IMX_DMATYPE_MSHC_SP:
733 per_2_emi = sdma->script_addrs->shp_2_mcu_addr; 733 per_2_emi = sdma->script_addrs->shp_2_mcu_addr;
734 emi_2_per = sdma->script_addrs->mcu_2_shp_addr; 734 emi_2_per = sdma->script_addrs->mcu_2_shp_addr;
735 break; 735 break;
736 case IMX_DMATYPE_ASRC: 736 case IMX_DMATYPE_ASRC:
737 per_2_emi = sdma->script_addrs->asrc_2_mcu_addr; 737 per_2_emi = sdma->script_addrs->asrc_2_mcu_addr;
738 emi_2_per = sdma->script_addrs->asrc_2_mcu_addr; 738 emi_2_per = sdma->script_addrs->asrc_2_mcu_addr;
739 per_2_per = sdma->script_addrs->per_2_per_addr; 739 per_2_per = sdma->script_addrs->per_2_per_addr;
740 break; 740 break;
741 case IMX_DMATYPE_MSHC: 741 case IMX_DMATYPE_MSHC:
742 per_2_emi = sdma->script_addrs->mshc_2_mcu_addr; 742 per_2_emi = sdma->script_addrs->mshc_2_mcu_addr;
743 emi_2_per = sdma->script_addrs->mcu_2_mshc_addr; 743 emi_2_per = sdma->script_addrs->mcu_2_mshc_addr;
744 break; 744 break;
745 case IMX_DMATYPE_CCM: 745 case IMX_DMATYPE_CCM:
746 per_2_emi = sdma->script_addrs->dptc_dvfs_addr; 746 per_2_emi = sdma->script_addrs->dptc_dvfs_addr;
747 break; 747 break;
748 case IMX_DMATYPE_SPDIF: 748 case IMX_DMATYPE_SPDIF:
749 per_2_emi = sdma->script_addrs->spdif_2_mcu_addr; 749 per_2_emi = sdma->script_addrs->spdif_2_mcu_addr;
750 emi_2_per = sdma->script_addrs->mcu_2_spdif_addr; 750 emi_2_per = sdma->script_addrs->mcu_2_spdif_addr;
751 break; 751 break;
752 case IMX_DMATYPE_IPU_MEMORY: 752 case IMX_DMATYPE_IPU_MEMORY:
753 emi_2_per = sdma->script_addrs->ext_mem_2_ipu_addr; 753 emi_2_per = sdma->script_addrs->ext_mem_2_ipu_addr;
754 break; 754 break;
755 default: 755 default:
756 break; 756 break;
757 } 757 }
758 758
759 sdmac->pc_from_device = per_2_emi; 759 sdmac->pc_from_device = per_2_emi;
760 sdmac->pc_to_device = emi_2_per; 760 sdmac->pc_to_device = emi_2_per;
761 } 761 }
762 762
763 static int sdma_load_context(struct sdma_channel *sdmac) 763 static int sdma_load_context(struct sdma_channel *sdmac)
764 { 764 {
765 struct sdma_engine *sdma = sdmac->sdma; 765 struct sdma_engine *sdma = sdmac->sdma;
766 int channel = sdmac->channel; 766 int channel = sdmac->channel;
767 int load_address; 767 int load_address;
768 struct sdma_context_data *context = sdma->context; 768 struct sdma_context_data *context = sdma->context;
769 struct sdma_buffer_descriptor *bd0 = sdma->channel[0].bd; 769 struct sdma_buffer_descriptor *bd0 = sdma->channel[0].bd;
770 int ret; 770 int ret;
771 unsigned long flags; 771 unsigned long flags;
772 772
773 if (sdmac->direction == DMA_DEV_TO_MEM) { 773 if (sdmac->direction == DMA_DEV_TO_MEM) {
774 load_address = sdmac->pc_from_device; 774 load_address = sdmac->pc_from_device;
775 } else { 775 } else {
776 load_address = sdmac->pc_to_device; 776 load_address = sdmac->pc_to_device;
777 } 777 }
778 778
779 if (load_address < 0) 779 if (load_address < 0)
780 return load_address; 780 return load_address;
781 781
782 dev_dbg(sdma->dev, "load_address = %d\n", load_address); 782 dev_dbg(sdma->dev, "load_address = %d\n", load_address);
783 dev_dbg(sdma->dev, "wml = 0x%08x\n", (u32)sdmac->watermark_level); 783 dev_dbg(sdma->dev, "wml = 0x%08x\n", (u32)sdmac->watermark_level);
784 dev_dbg(sdma->dev, "shp_addr = 0x%08x\n", sdmac->shp_addr); 784 dev_dbg(sdma->dev, "shp_addr = 0x%08x\n", sdmac->shp_addr);
785 dev_dbg(sdma->dev, "per_addr = 0x%08x\n", sdmac->per_addr); 785 dev_dbg(sdma->dev, "per_addr = 0x%08x\n", sdmac->per_addr);
786 dev_dbg(sdma->dev, "event_mask0 = 0x%08x\n", (u32)sdmac->event_mask[0]); 786 dev_dbg(sdma->dev, "event_mask0 = 0x%08x\n", (u32)sdmac->event_mask[0]);
787 dev_dbg(sdma->dev, "event_mask1 = 0x%08x\n", (u32)sdmac->event_mask[1]); 787 dev_dbg(sdma->dev, "event_mask1 = 0x%08x\n", (u32)sdmac->event_mask[1]);
788 788
789 spin_lock_irqsave(&sdma->channel_0_lock, flags); 789 spin_lock_irqsave(&sdma->channel_0_lock, flags);
790 790
791 memset(context, 0, sizeof(*context)); 791 memset(context, 0, sizeof(*context));
792 context->channel_state.pc = load_address; 792 context->channel_state.pc = load_address;
793 793
794 /* Send by context the event mask,base address for peripheral 794 /* Send by context the event mask,base address for peripheral
795 * and watermark level 795 * and watermark level
796 */ 796 */
797 context->gReg[0] = sdmac->event_mask[1]; 797 context->gReg[0] = sdmac->event_mask[1];
798 context->gReg[1] = sdmac->event_mask[0]; 798 context->gReg[1] = sdmac->event_mask[0];
799 context->gReg[2] = sdmac->per_addr; 799 context->gReg[2] = sdmac->per_addr;
800 context->gReg[6] = sdmac->shp_addr; 800 context->gReg[6] = sdmac->shp_addr;
801 context->gReg[7] = sdmac->watermark_level; 801 context->gReg[7] = sdmac->watermark_level;
802 802
803 bd0->mode.command = C0_SETDM; 803 bd0->mode.command = C0_SETDM;
804 bd0->mode.status = BD_DONE | BD_INTR | BD_WRAP | BD_EXTD; 804 bd0->mode.status = BD_DONE | BD_INTR | BD_WRAP | BD_EXTD;
805 bd0->mode.count = sizeof(*context) / 4; 805 bd0->mode.count = sizeof(*context) / 4;
806 bd0->buffer_addr = sdma->context_phys; 806 bd0->buffer_addr = sdma->context_phys;
807 bd0->ext_buffer_addr = 2048 + (sizeof(*context) / 4) * channel; 807 bd0->ext_buffer_addr = 2048 + (sizeof(*context) / 4) * channel;
808 ret = sdma_run_channel0(sdma); 808 ret = sdma_run_channel0(sdma);
809 809
810 spin_unlock_irqrestore(&sdma->channel_0_lock, flags); 810 spin_unlock_irqrestore(&sdma->channel_0_lock, flags);
811 811
812 return ret; 812 return ret;
813 } 813 }
814 814
815 static void sdma_disable_channel(struct sdma_channel *sdmac) 815 static void sdma_disable_channel(struct sdma_channel *sdmac)
816 { 816 {
817 struct sdma_engine *sdma = sdmac->sdma; 817 struct sdma_engine *sdma = sdmac->sdma;
818 int channel = sdmac->channel; 818 int channel = sdmac->channel;
819 819
820 writel_relaxed(BIT(channel), sdma->regs + SDMA_H_STATSTOP); 820 writel_relaxed(BIT(channel), sdma->regs + SDMA_H_STATSTOP);
821 sdmac->status = DMA_ERROR; 821 sdmac->status = DMA_ERROR;
822 } 822 }
823 823
824 static int sdma_config_channel(struct sdma_channel *sdmac) 824 static int sdma_config_channel(struct sdma_channel *sdmac)
825 { 825 {
826 int ret; 826 int ret;
827 827
828 sdma_disable_channel(sdmac); 828 sdma_disable_channel(sdmac);
829 829
830 sdmac->event_mask[0] = 0; 830 sdmac->event_mask[0] = 0;
831 sdmac->event_mask[1] = 0; 831 sdmac->event_mask[1] = 0;
832 sdmac->shp_addr = 0; 832 sdmac->shp_addr = 0;
833 sdmac->per_addr = 0; 833 sdmac->per_addr = 0;
834 834
835 if (sdmac->event_id0) { 835 if (sdmac->event_id0) {
836 if (sdmac->event_id0 >= sdmac->sdma->drvdata->num_events) 836 if (sdmac->event_id0 >= sdmac->sdma->drvdata->num_events)
837 return -EINVAL; 837 return -EINVAL;
838 sdma_event_enable(sdmac, sdmac->event_id0); 838 sdma_event_enable(sdmac, sdmac->event_id0);
839 } 839 }
840 840
841 switch (sdmac->peripheral_type) { 841 switch (sdmac->peripheral_type) {
842 case IMX_DMATYPE_DSP: 842 case IMX_DMATYPE_DSP:
843 sdma_config_ownership(sdmac, false, true, true); 843 sdma_config_ownership(sdmac, false, true, true);
844 break; 844 break;
845 case IMX_DMATYPE_MEMORY: 845 case IMX_DMATYPE_MEMORY:
846 sdma_config_ownership(sdmac, false, true, false); 846 sdma_config_ownership(sdmac, false, true, false);
847 break; 847 break;
848 default: 848 default:
849 sdma_config_ownership(sdmac, true, true, false); 849 sdma_config_ownership(sdmac, true, true, false);
850 break; 850 break;
851 } 851 }
852 852
853 sdma_get_pc(sdmac, sdmac->peripheral_type); 853 sdma_get_pc(sdmac, sdmac->peripheral_type);
854 854
855 if ((sdmac->peripheral_type != IMX_DMATYPE_MEMORY) && 855 if ((sdmac->peripheral_type != IMX_DMATYPE_MEMORY) &&
856 (sdmac->peripheral_type != IMX_DMATYPE_DSP)) { 856 (sdmac->peripheral_type != IMX_DMATYPE_DSP)) {
857 /* Handle multiple event channels differently */ 857 /* Handle multiple event channels differently */
858 if (sdmac->event_id1) { 858 if (sdmac->event_id1) {
859 sdmac->event_mask[1] = BIT(sdmac->event_id1 % 32); 859 sdmac->event_mask[1] = BIT(sdmac->event_id1 % 32);
860 if (sdmac->event_id1 > 31) 860 if (sdmac->event_id1 > 31)
861 __set_bit(31, &sdmac->watermark_level); 861 __set_bit(31, &sdmac->watermark_level);
862 sdmac->event_mask[0] = BIT(sdmac->event_id0 % 32); 862 sdmac->event_mask[0] = BIT(sdmac->event_id0 % 32);
863 if (sdmac->event_id0 > 31) 863 if (sdmac->event_id0 > 31)
864 __set_bit(30, &sdmac->watermark_level); 864 __set_bit(30, &sdmac->watermark_level);
865 } else { 865 } else {
866 __set_bit(sdmac->event_id0, sdmac->event_mask); 866 __set_bit(sdmac->event_id0, sdmac->event_mask);
867 } 867 }
868 /* Watermark Level */ 868 /* Watermark Level */
869 sdmac->watermark_level |= sdmac->watermark_level; 869 sdmac->watermark_level |= sdmac->watermark_level;
870 /* Address */ 870 /* Address */
871 sdmac->shp_addr = sdmac->per_address; 871 sdmac->shp_addr = sdmac->per_address;
872 } else { 872 } else {
873 sdmac->watermark_level = 0; /* FIXME: M3_BASE_ADDRESS */ 873 sdmac->watermark_level = 0; /* FIXME: M3_BASE_ADDRESS */
874 } 874 }
875 875
876 ret = sdma_load_context(sdmac); 876 ret = sdma_load_context(sdmac);
877 877
878 return ret; 878 return ret;
879 } 879 }
880 880
881 static int sdma_set_channel_priority(struct sdma_channel *sdmac, 881 static int sdma_set_channel_priority(struct sdma_channel *sdmac,
882 unsigned int priority) 882 unsigned int priority)
883 { 883 {
884 struct sdma_engine *sdma = sdmac->sdma; 884 struct sdma_engine *sdma = sdmac->sdma;
885 int channel = sdmac->channel; 885 int channel = sdmac->channel;
886 886
887 if (priority < MXC_SDMA_MIN_PRIORITY 887 if (priority < MXC_SDMA_MIN_PRIORITY
888 || priority > MXC_SDMA_MAX_PRIORITY) { 888 || priority > MXC_SDMA_MAX_PRIORITY) {
889 return -EINVAL; 889 return -EINVAL;
890 } 890 }
891 891
892 writel_relaxed(priority, sdma->regs + SDMA_CHNPRI_0 + 4 * channel); 892 writel_relaxed(priority, sdma->regs + SDMA_CHNPRI_0 + 4 * channel);
893 893
894 return 0; 894 return 0;
895 } 895 }
896 896
897 static int sdma_request_channel(struct sdma_channel *sdmac) 897 static int sdma_request_channel(struct sdma_channel *sdmac)
898 { 898 {
899 struct sdma_engine *sdma = sdmac->sdma; 899 struct sdma_engine *sdma = sdmac->sdma;
900 int channel = sdmac->channel; 900 int channel = sdmac->channel;
901 int ret = -EBUSY; 901 int ret = -EBUSY;
902 902
903 sdmac->bd = dma_alloc_coherent(NULL, PAGE_SIZE, &sdmac->bd_phys, GFP_KERNEL); 903 sdmac->bd = dma_alloc_coherent(NULL, PAGE_SIZE, &sdmac->bd_phys, GFP_KERNEL);
904 if (!sdmac->bd) { 904 if (!sdmac->bd) {
905 ret = -ENOMEM; 905 ret = -ENOMEM;
906 goto out; 906 goto out;
907 } 907 }
908 908
909 memset(sdmac->bd, 0, PAGE_SIZE); 909 memset(sdmac->bd, 0, PAGE_SIZE);
910 910
911 sdma->channel_control[channel].base_bd_ptr = sdmac->bd_phys; 911 sdma->channel_control[channel].base_bd_ptr = sdmac->bd_phys;
912 sdma->channel_control[channel].current_bd_ptr = sdmac->bd_phys; 912 sdma->channel_control[channel].current_bd_ptr = sdmac->bd_phys;
913 913
914 sdma_set_channel_priority(sdmac, MXC_SDMA_DEFAULT_PRIORITY); 914 sdma_set_channel_priority(sdmac, MXC_SDMA_DEFAULT_PRIORITY);
915 return 0; 915 return 0;
916 out: 916 out:
917 917
918 return ret; 918 return ret;
919 } 919 }
920 920
921 static struct sdma_channel *to_sdma_chan(struct dma_chan *chan) 921 static struct sdma_channel *to_sdma_chan(struct dma_chan *chan)
922 { 922 {
923 return container_of(chan, struct sdma_channel, chan); 923 return container_of(chan, struct sdma_channel, chan);
924 } 924 }
925 925
926 static dma_cookie_t sdma_tx_submit(struct dma_async_tx_descriptor *tx) 926 static dma_cookie_t sdma_tx_submit(struct dma_async_tx_descriptor *tx)
927 { 927 {
928 unsigned long flags; 928 unsigned long flags;
929 struct sdma_channel *sdmac = to_sdma_chan(tx->chan); 929 struct sdma_channel *sdmac = to_sdma_chan(tx->chan);
930 dma_cookie_t cookie; 930 dma_cookie_t cookie;
931 931
932 spin_lock_irqsave(&sdmac->lock, flags); 932 spin_lock_irqsave(&sdmac->lock, flags);
933 933
934 cookie = dma_cookie_assign(tx); 934 cookie = dma_cookie_assign(tx);
935 935
936 spin_unlock_irqrestore(&sdmac->lock, flags); 936 spin_unlock_irqrestore(&sdmac->lock, flags);
937 937
938 return cookie; 938 return cookie;
939 } 939 }
940 940
941 static int sdma_alloc_chan_resources(struct dma_chan *chan) 941 static int sdma_alloc_chan_resources(struct dma_chan *chan)
942 { 942 {
943 struct sdma_channel *sdmac = to_sdma_chan(chan); 943 struct sdma_channel *sdmac = to_sdma_chan(chan);
944 struct imx_dma_data *data = chan->private; 944 struct imx_dma_data *data = chan->private;
945 int prio, ret; 945 int prio, ret;
946 946
947 if (!data) 947 if (!data)
948 return -EINVAL; 948 return -EINVAL;
949 949
950 switch (data->priority) { 950 switch (data->priority) {
951 case DMA_PRIO_HIGH: 951 case DMA_PRIO_HIGH:
952 prio = 3; 952 prio = 3;
953 break; 953 break;
954 case DMA_PRIO_MEDIUM: 954 case DMA_PRIO_MEDIUM:
955 prio = 2; 955 prio = 2;
956 break; 956 break;
957 case DMA_PRIO_LOW: 957 case DMA_PRIO_LOW:
958 default: 958 default:
959 prio = 1; 959 prio = 1;
960 break; 960 break;
961 } 961 }
962 962
963 sdmac->peripheral_type = data->peripheral_type; 963 sdmac->peripheral_type = data->peripheral_type;
964 sdmac->event_id0 = data->dma_request; 964 sdmac->event_id0 = data->dma_request;
965 965
966 clk_enable(sdmac->sdma->clk_ipg); 966 clk_enable(sdmac->sdma->clk_ipg);
967 clk_enable(sdmac->sdma->clk_ahb); 967 clk_enable(sdmac->sdma->clk_ahb);
968 968
969 ret = sdma_request_channel(sdmac); 969 ret = sdma_request_channel(sdmac);
970 if (ret) 970 if (ret)
971 return ret; 971 return ret;
972 972
973 ret = sdma_set_channel_priority(sdmac, prio); 973 ret = sdma_set_channel_priority(sdmac, prio);
974 if (ret) 974 if (ret)
975 return ret; 975 return ret;
976 976
977 dma_async_tx_descriptor_init(&sdmac->desc, chan); 977 dma_async_tx_descriptor_init(&sdmac->desc, chan);
978 sdmac->desc.tx_submit = sdma_tx_submit; 978 sdmac->desc.tx_submit = sdma_tx_submit;
979 /* txd.flags will be overwritten in prep funcs */ 979 /* txd.flags will be overwritten in prep funcs */
980 sdmac->desc.flags = DMA_CTRL_ACK; 980 sdmac->desc.flags = DMA_CTRL_ACK;
981 981
982 return 0; 982 return 0;
983 } 983 }
984 984
985 static void sdma_free_chan_resources(struct dma_chan *chan) 985 static void sdma_free_chan_resources(struct dma_chan *chan)
986 { 986 {
987 struct sdma_channel *sdmac = to_sdma_chan(chan); 987 struct sdma_channel *sdmac = to_sdma_chan(chan);
988 struct sdma_engine *sdma = sdmac->sdma; 988 struct sdma_engine *sdma = sdmac->sdma;
989 989
990 sdma_disable_channel(sdmac); 990 sdma_disable_channel(sdmac);
991 991
992 if (sdmac->event_id0) 992 if (sdmac->event_id0)
993 sdma_event_disable(sdmac, sdmac->event_id0); 993 sdma_event_disable(sdmac, sdmac->event_id0);
994 if (sdmac->event_id1) 994 if (sdmac->event_id1)
995 sdma_event_disable(sdmac, sdmac->event_id1); 995 sdma_event_disable(sdmac, sdmac->event_id1);
996 996
997 sdmac->event_id0 = 0; 997 sdmac->event_id0 = 0;
998 sdmac->event_id1 = 0; 998 sdmac->event_id1 = 0;
999 999
1000 sdma_set_channel_priority(sdmac, 0); 1000 sdma_set_channel_priority(sdmac, 0);
1001 1001
1002 dma_free_coherent(NULL, PAGE_SIZE, sdmac->bd, sdmac->bd_phys); 1002 dma_free_coherent(NULL, PAGE_SIZE, sdmac->bd, sdmac->bd_phys);
1003 1003
1004 clk_disable(sdma->clk_ipg); 1004 clk_disable(sdma->clk_ipg);
1005 clk_disable(sdma->clk_ahb); 1005 clk_disable(sdma->clk_ahb);
1006 } 1006 }
1007 1007
1008 static struct dma_async_tx_descriptor *sdma_prep_slave_sg( 1008 static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
1009 struct dma_chan *chan, struct scatterlist *sgl, 1009 struct dma_chan *chan, struct scatterlist *sgl,
1010 unsigned int sg_len, enum dma_transfer_direction direction, 1010 unsigned int sg_len, enum dma_transfer_direction direction,
1011 unsigned long flags, void *context) 1011 unsigned long flags, void *context)
1012 { 1012 {
1013 struct sdma_channel *sdmac = to_sdma_chan(chan); 1013 struct sdma_channel *sdmac = to_sdma_chan(chan);
1014 struct sdma_engine *sdma = sdmac->sdma; 1014 struct sdma_engine *sdma = sdmac->sdma;
1015 int ret, i, count; 1015 int ret, i, count;
1016 int channel = sdmac->channel; 1016 int channel = sdmac->channel;
1017 struct scatterlist *sg; 1017 struct scatterlist *sg;
1018 1018
1019 if (sdmac->status == DMA_IN_PROGRESS) 1019 if (sdmac->status == DMA_IN_PROGRESS)
1020 return NULL; 1020 return NULL;
1021 sdmac->status = DMA_IN_PROGRESS; 1021 sdmac->status = DMA_IN_PROGRESS;
1022 1022
1023 sdmac->flags = 0; 1023 sdmac->flags = 0;
1024 1024
1025 sdmac->buf_tail = 0; 1025 sdmac->buf_tail = 0;
1026 1026
1027 dev_dbg(sdma->dev, "setting up %d entries for channel %d.\n", 1027 dev_dbg(sdma->dev, "setting up %d entries for channel %d.\n",
1028 sg_len, channel); 1028 sg_len, channel);
1029 1029
1030 sdmac->direction = direction; 1030 sdmac->direction = direction;
1031 ret = sdma_load_context(sdmac); 1031 ret = sdma_load_context(sdmac);
1032 if (ret) 1032 if (ret)
1033 goto err_out; 1033 goto err_out;
1034 1034
1035 if (sg_len > NUM_BD) { 1035 if (sg_len > NUM_BD) {
1036 dev_err(sdma->dev, "SDMA channel %d: maximum number of sg exceeded: %d > %d\n", 1036 dev_err(sdma->dev, "SDMA channel %d: maximum number of sg exceeded: %d > %d\n",
1037 channel, sg_len, NUM_BD); 1037 channel, sg_len, NUM_BD);
1038 ret = -EINVAL; 1038 ret = -EINVAL;
1039 goto err_out; 1039 goto err_out;
1040 } 1040 }
1041 1041
1042 sdmac->chn_count = 0; 1042 sdmac->chn_count = 0;
1043 for_each_sg(sgl, sg, sg_len, i) { 1043 for_each_sg(sgl, sg, sg_len, i) {
1044 struct sdma_buffer_descriptor *bd = &sdmac->bd[i]; 1044 struct sdma_buffer_descriptor *bd = &sdmac->bd[i];
1045 int param; 1045 int param;
1046 1046
1047 bd->buffer_addr = sg->dma_address; 1047 bd->buffer_addr = sg->dma_address;
1048 1048
1049 count = sg_dma_len(sg); 1049 count = sg_dma_len(sg);
1050 1050
1051 if (count > 0xffff) { 1051 if (count > 0xffff) {
1052 dev_err(sdma->dev, "SDMA channel %d: maximum bytes for sg entry exceeded: %d > %d\n", 1052 dev_err(sdma->dev, "SDMA channel %d: maximum bytes for sg entry exceeded: %d > %d\n",
1053 channel, count, 0xffff); 1053 channel, count, 0xffff);
1054 ret = -EINVAL; 1054 ret = -EINVAL;
1055 goto err_out; 1055 goto err_out;
1056 } 1056 }
1057 1057
1058 bd->mode.count = count; 1058 bd->mode.count = count;
1059 sdmac->chn_count += count; 1059 sdmac->chn_count += count;
1060 1060
1061 if (sdmac->word_size > DMA_SLAVE_BUSWIDTH_4_BYTES) { 1061 if (sdmac->word_size > DMA_SLAVE_BUSWIDTH_4_BYTES) {
1062 ret = -EINVAL; 1062 ret = -EINVAL;
1063 goto err_out; 1063 goto err_out;
1064 } 1064 }
1065 1065
1066 switch (sdmac->word_size) { 1066 switch (sdmac->word_size) {
1067 case DMA_SLAVE_BUSWIDTH_4_BYTES: 1067 case DMA_SLAVE_BUSWIDTH_4_BYTES:
1068 bd->mode.command = 0; 1068 bd->mode.command = 0;
1069 if (count & 3 || sg->dma_address & 3) 1069 if (count & 3 || sg->dma_address & 3)
1070 return NULL; 1070 return NULL;
1071 break; 1071 break;
1072 case DMA_SLAVE_BUSWIDTH_2_BYTES: 1072 case DMA_SLAVE_BUSWIDTH_2_BYTES:
1073 bd->mode.command = 2; 1073 bd->mode.command = 2;
1074 if (count & 1 || sg->dma_address & 1) 1074 if (count & 1 || sg->dma_address & 1)
1075 return NULL; 1075 return NULL;
1076 break; 1076 break;
1077 case DMA_SLAVE_BUSWIDTH_1_BYTE: 1077 case DMA_SLAVE_BUSWIDTH_1_BYTE:
1078 bd->mode.command = 1; 1078 bd->mode.command = 1;
1079 break; 1079 break;
1080 default: 1080 default:
1081 return NULL; 1081 return NULL;
1082 } 1082 }
1083 1083
1084 param = BD_DONE | BD_EXTD | BD_CONT; 1084 param = BD_DONE | BD_EXTD | BD_CONT;
1085 1085
1086 if (i + 1 == sg_len) { 1086 if (i + 1 == sg_len) {
1087 param |= BD_INTR; 1087 param |= BD_INTR;
1088 param |= BD_LAST; 1088 param |= BD_LAST;
1089 param &= ~BD_CONT; 1089 param &= ~BD_CONT;
1090 } 1090 }
1091 1091
1092 dev_dbg(sdma->dev, "entry %d: count: %d dma: 0x%08x %s%s\n", 1092 dev_dbg(sdma->dev, "entry %d: count: %d dma: 0x%08x %s%s\n",
1093 i, count, sg->dma_address, 1093 i, count, sg->dma_address,
1094 param & BD_WRAP ? "wrap" : "", 1094 param & BD_WRAP ? "wrap" : "",
1095 param & BD_INTR ? " intr" : ""); 1095 param & BD_INTR ? " intr" : "");
1096 1096
1097 bd->mode.status = param; 1097 bd->mode.status = param;
1098 } 1098 }
1099 1099
1100 sdmac->num_bd = sg_len; 1100 sdmac->num_bd = sg_len;
1101 sdma->channel_control[channel].current_bd_ptr = sdmac->bd_phys; 1101 sdma->channel_control[channel].current_bd_ptr = sdmac->bd_phys;
1102 1102
1103 return &sdmac->desc; 1103 return &sdmac->desc;
1104 err_out: 1104 err_out:
1105 sdmac->status = DMA_ERROR; 1105 sdmac->status = DMA_ERROR;
1106 return NULL; 1106 return NULL;
1107 } 1107 }
1108 1108
1109 static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic( 1109 static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
1110 struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len, 1110 struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
1111 size_t period_len, enum dma_transfer_direction direction, 1111 size_t period_len, enum dma_transfer_direction direction,
1112 unsigned long flags, void *context) 1112 unsigned long flags, void *context)
1113 { 1113 {
1114 struct sdma_channel *sdmac = to_sdma_chan(chan); 1114 struct sdma_channel *sdmac = to_sdma_chan(chan);
1115 struct sdma_engine *sdma = sdmac->sdma; 1115 struct sdma_engine *sdma = sdmac->sdma;
1116 int num_periods = buf_len / period_len; 1116 int num_periods = buf_len / period_len;
1117 int channel = sdmac->channel; 1117 int channel = sdmac->channel;
1118 int ret, i = 0, buf = 0; 1118 int ret, i = 0, buf = 0;
1119 1119
1120 dev_dbg(sdma->dev, "%s channel: %d\n", __func__, channel); 1120 dev_dbg(sdma->dev, "%s channel: %d\n", __func__, channel);
1121 1121
1122 if (sdmac->status == DMA_IN_PROGRESS) 1122 if (sdmac->status == DMA_IN_PROGRESS)
1123 return NULL; 1123 return NULL;
1124 1124
1125 sdmac->status = DMA_IN_PROGRESS; 1125 sdmac->status = DMA_IN_PROGRESS;
1126 1126
1127 sdmac->buf_tail = 0; 1127 sdmac->buf_tail = 0;
1128 1128
1129 sdmac->flags |= IMX_DMA_SG_LOOP; 1129 sdmac->flags |= IMX_DMA_SG_LOOP;
1130 sdmac->direction = direction; 1130 sdmac->direction = direction;
1131 ret = sdma_load_context(sdmac); 1131 ret = sdma_load_context(sdmac);
1132 if (ret) 1132 if (ret)
1133 goto err_out; 1133 goto err_out;
1134 1134
1135 if (num_periods > NUM_BD) { 1135 if (num_periods > NUM_BD) {
1136 dev_err(sdma->dev, "SDMA channel %d: maximum number of sg exceeded: %d > %d\n", 1136 dev_err(sdma->dev, "SDMA channel %d: maximum number of sg exceeded: %d > %d\n",
1137 channel, num_periods, NUM_BD); 1137 channel, num_periods, NUM_BD);
1138 goto err_out; 1138 goto err_out;
1139 } 1139 }
1140 1140
1141 if (period_len > 0xffff) { 1141 if (period_len > 0xffff) {
1142 dev_err(sdma->dev, "SDMA channel %d: maximum period size exceeded: %d > %d\n", 1142 dev_err(sdma->dev, "SDMA channel %d: maximum period size exceeded: %d > %d\n",
1143 channel, period_len, 0xffff); 1143 channel, period_len, 0xffff);
1144 goto err_out; 1144 goto err_out;
1145 } 1145 }
1146 1146
1147 while (buf < buf_len) { 1147 while (buf < buf_len) {
1148 struct sdma_buffer_descriptor *bd = &sdmac->bd[i]; 1148 struct sdma_buffer_descriptor *bd = &sdmac->bd[i];
1149 int param; 1149 int param;
1150 1150
1151 bd->buffer_addr = dma_addr; 1151 bd->buffer_addr = dma_addr;
1152 1152
1153 bd->mode.count = period_len; 1153 bd->mode.count = period_len;
1154 1154
1155 if (sdmac->word_size > DMA_SLAVE_BUSWIDTH_4_BYTES) 1155 if (sdmac->word_size > DMA_SLAVE_BUSWIDTH_4_BYTES)
1156 goto err_out; 1156 goto err_out;
1157 if (sdmac->word_size == DMA_SLAVE_BUSWIDTH_4_BYTES) 1157 if (sdmac->word_size == DMA_SLAVE_BUSWIDTH_4_BYTES)
1158 bd->mode.command = 0; 1158 bd->mode.command = 0;
1159 else 1159 else
1160 bd->mode.command = sdmac->word_size; 1160 bd->mode.command = sdmac->word_size;
1161 1161
1162 param = BD_DONE | BD_EXTD | BD_CONT | BD_INTR; 1162 param = BD_DONE | BD_EXTD | BD_CONT | BD_INTR;
1163 if (i + 1 == num_periods) 1163 if (i + 1 == num_periods)
1164 param |= BD_WRAP; 1164 param |= BD_WRAP;
1165 1165
1166 dev_dbg(sdma->dev, "entry %d: count: %d dma: 0x%08x %s%s\n", 1166 dev_dbg(sdma->dev, "entry %d: count: %d dma: 0x%08x %s%s\n",
1167 i, period_len, dma_addr, 1167 i, period_len, dma_addr,
1168 param & BD_WRAP ? "wrap" : "", 1168 param & BD_WRAP ? "wrap" : "",
1169 param & BD_INTR ? " intr" : ""); 1169 param & BD_INTR ? " intr" : "");
1170 1170
1171 bd->mode.status = param; 1171 bd->mode.status = param;
1172 1172
1173 dma_addr += period_len; 1173 dma_addr += period_len;
1174 buf += period_len; 1174 buf += period_len;
1175 1175
1176 i++; 1176 i++;
1177 } 1177 }
1178 1178
1179 sdmac->num_bd = num_periods; 1179 sdmac->num_bd = num_periods;
1180 sdma->channel_control[channel].current_bd_ptr = sdmac->bd_phys; 1180 sdma->channel_control[channel].current_bd_ptr = sdmac->bd_phys;
1181 1181
1182 return &sdmac->desc; 1182 return &sdmac->desc;
1183 err_out: 1183 err_out:
1184 sdmac->status = DMA_ERROR; 1184 sdmac->status = DMA_ERROR;
1185 return NULL; 1185 return NULL;
1186 } 1186 }
1187 1187
1188 static int sdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 1188 static int sdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
1189 unsigned long arg) 1189 unsigned long arg)
1190 { 1190 {
1191 struct sdma_channel *sdmac = to_sdma_chan(chan); 1191 struct sdma_channel *sdmac = to_sdma_chan(chan);
1192 struct dma_slave_config *dmaengine_cfg = (void *)arg; 1192 struct dma_slave_config *dmaengine_cfg = (void *)arg;
1193 1193
1194 switch (cmd) { 1194 switch (cmd) {
1195 case DMA_TERMINATE_ALL: 1195 case DMA_TERMINATE_ALL:
1196 sdma_disable_channel(sdmac); 1196 sdma_disable_channel(sdmac);
1197 return 0; 1197 return 0;
1198 case DMA_SLAVE_CONFIG: 1198 case DMA_SLAVE_CONFIG:
1199 if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) { 1199 if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) {
1200 sdmac->per_address = dmaengine_cfg->src_addr; 1200 sdmac->per_address = dmaengine_cfg->src_addr;
1201 sdmac->watermark_level = dmaengine_cfg->src_maxburst * 1201 sdmac->watermark_level = dmaengine_cfg->src_maxburst *
1202 dmaengine_cfg->src_addr_width; 1202 dmaengine_cfg->src_addr_width;
1203 sdmac->word_size = dmaengine_cfg->src_addr_width; 1203 sdmac->word_size = dmaengine_cfg->src_addr_width;
1204 } else { 1204 } else {
1205 sdmac->per_address = dmaengine_cfg->dst_addr; 1205 sdmac->per_address = dmaengine_cfg->dst_addr;
1206 sdmac->watermark_level = dmaengine_cfg->dst_maxburst * 1206 sdmac->watermark_level = dmaengine_cfg->dst_maxburst *
1207 dmaengine_cfg->dst_addr_width; 1207 dmaengine_cfg->dst_addr_width;
1208 sdmac->word_size = dmaengine_cfg->dst_addr_width; 1208 sdmac->word_size = dmaengine_cfg->dst_addr_width;
1209 } 1209 }
1210 sdmac->direction = dmaengine_cfg->direction; 1210 sdmac->direction = dmaengine_cfg->direction;
1211 return sdma_config_channel(sdmac); 1211 return sdma_config_channel(sdmac);
1212 default: 1212 default:
1213 return -ENOSYS; 1213 return -ENOSYS;
1214 } 1214 }
1215 1215
1216 return -EINVAL; 1216 return -EINVAL;
1217 } 1217 }
1218 1218
1219 static enum dma_status sdma_tx_status(struct dma_chan *chan, 1219 static enum dma_status sdma_tx_status(struct dma_chan *chan,
1220 dma_cookie_t cookie, 1220 dma_cookie_t cookie,
1221 struct dma_tx_state *txstate) 1221 struct dma_tx_state *txstate)
1222 { 1222 {
1223 struct sdma_channel *sdmac = to_sdma_chan(chan); 1223 struct sdma_channel *sdmac = to_sdma_chan(chan);
1224 1224
1225 dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie, 1225 dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie,
1226 sdmac->chn_count - sdmac->chn_real_count); 1226 sdmac->chn_count - sdmac->chn_real_count);
1227 1227
1228 return sdmac->status; 1228 return sdmac->status;
1229 } 1229 }
1230 1230
1231 static void sdma_issue_pending(struct dma_chan *chan) 1231 static void sdma_issue_pending(struct dma_chan *chan)
1232 { 1232 {
1233 struct sdma_channel *sdmac = to_sdma_chan(chan); 1233 struct sdma_channel *sdmac = to_sdma_chan(chan);
1234 struct sdma_engine *sdma = sdmac->sdma; 1234 struct sdma_engine *sdma = sdmac->sdma;
1235 1235
1236 if (sdmac->status == DMA_IN_PROGRESS) 1236 if (sdmac->status == DMA_IN_PROGRESS)
1237 sdma_enable_channel(sdma, sdmac->channel); 1237 sdma_enable_channel(sdma, sdmac->channel);
1238 } 1238 }
1239 1239
1240 #define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1 34 1240 #define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1 34
1241 1241
1242 static void sdma_add_scripts(struct sdma_engine *sdma, 1242 static void sdma_add_scripts(struct sdma_engine *sdma,
1243 const struct sdma_script_start_addrs *addr) 1243 const struct sdma_script_start_addrs *addr)
1244 { 1244 {
1245 s32 *addr_arr = (u32 *)addr; 1245 s32 *addr_arr = (u32 *)addr;
1246 s32 *saddr_arr = (u32 *)sdma->script_addrs; 1246 s32 *saddr_arr = (u32 *)sdma->script_addrs;
1247 int i; 1247 int i;
1248 1248
1249 for (i = 0; i < SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1; i++) 1249 for (i = 0; i < SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1; i++)
1250 if (addr_arr[i] > 0) 1250 if (addr_arr[i] > 0)
1251 saddr_arr[i] = addr_arr[i]; 1251 saddr_arr[i] = addr_arr[i];
1252 } 1252 }
1253 1253
1254 static void sdma_load_firmware(const struct firmware *fw, void *context) 1254 static void sdma_load_firmware(const struct firmware *fw, void *context)
1255 { 1255 {
1256 struct sdma_engine *sdma = context; 1256 struct sdma_engine *sdma = context;
1257 const struct sdma_firmware_header *header; 1257 const struct sdma_firmware_header *header;
1258 const struct sdma_script_start_addrs *addr; 1258 const struct sdma_script_start_addrs *addr;
1259 unsigned short *ram_code; 1259 unsigned short *ram_code;
1260 1260
1261 if (!fw) { 1261 if (!fw) {
1262 dev_err(sdma->dev, "firmware not found\n"); 1262 dev_err(sdma->dev, "firmware not found\n");
1263 return; 1263 return;
1264 } 1264 }
1265 1265
1266 if (fw->size < sizeof(*header)) 1266 if (fw->size < sizeof(*header))
1267 goto err_firmware; 1267 goto err_firmware;
1268 1268
1269 header = (struct sdma_firmware_header *)fw->data; 1269 header = (struct sdma_firmware_header *)fw->data;
1270 1270
1271 if (header->magic != SDMA_FIRMWARE_MAGIC) 1271 if (header->magic != SDMA_FIRMWARE_MAGIC)
1272 goto err_firmware; 1272 goto err_firmware;
1273 if (header->ram_code_start + header->ram_code_size > fw->size) 1273 if (header->ram_code_start + header->ram_code_size > fw->size)
1274 goto err_firmware; 1274 goto err_firmware;
1275 1275
1276 addr = (void *)header + header->script_addrs_start; 1276 addr = (void *)header + header->script_addrs_start;
1277 ram_code = (void *)header + header->ram_code_start; 1277 ram_code = (void *)header + header->ram_code_start;
1278 1278
1279 clk_enable(sdma->clk_ipg); 1279 clk_enable(sdma->clk_ipg);
1280 clk_enable(sdma->clk_ahb); 1280 clk_enable(sdma->clk_ahb);
1281 /* download the RAM image for SDMA */ 1281 /* download the RAM image for SDMA */
1282 sdma_load_script(sdma, ram_code, 1282 sdma_load_script(sdma, ram_code,
1283 header->ram_code_size, 1283 header->ram_code_size,
1284 addr->ram_code_start_addr); 1284 addr->ram_code_start_addr);
1285 clk_disable(sdma->clk_ipg); 1285 clk_disable(sdma->clk_ipg);
1286 clk_disable(sdma->clk_ahb); 1286 clk_disable(sdma->clk_ahb);
1287 1287
1288 sdma_add_scripts(sdma, addr); 1288 sdma_add_scripts(sdma, addr);
1289 1289
1290 dev_info(sdma->dev, "loaded firmware %d.%d\n", 1290 dev_info(sdma->dev, "loaded firmware %d.%d\n",
1291 header->version_major, 1291 header->version_major,
1292 header->version_minor); 1292 header->version_minor);
1293 1293
1294 err_firmware: 1294 err_firmware:
1295 release_firmware(fw); 1295 release_firmware(fw);
1296 } 1296 }
1297 1297
1298 static int __init sdma_get_firmware(struct sdma_engine *sdma, 1298 static int __init sdma_get_firmware(struct sdma_engine *sdma,
1299 const char *fw_name) 1299 const char *fw_name)
1300 { 1300 {
1301 int ret; 1301 int ret;
1302 1302
1303 ret = request_firmware_nowait(THIS_MODULE, 1303 ret = request_firmware_nowait(THIS_MODULE,
1304 FW_ACTION_HOTPLUG, fw_name, sdma->dev, 1304 FW_ACTION_HOTPLUG, fw_name, sdma->dev,
1305 GFP_KERNEL, sdma, sdma_load_firmware); 1305 GFP_KERNEL, sdma, sdma_load_firmware);
1306 1306
1307 return ret; 1307 return ret;
1308 } 1308 }
1309 1309
1310 static int __init sdma_init(struct sdma_engine *sdma) 1310 static int __init sdma_init(struct sdma_engine *sdma)
1311 { 1311 {
1312 int i, ret; 1312 int i, ret;
1313 dma_addr_t ccb_phys; 1313 dma_addr_t ccb_phys;
1314 1314
1315 clk_enable(sdma->clk_ipg); 1315 clk_enable(sdma->clk_ipg);
1316 clk_enable(sdma->clk_ahb); 1316 clk_enable(sdma->clk_ahb);
1317 1317
1318 /* Be sure SDMA has not started yet */ 1318 /* Be sure SDMA has not started yet */
1319 writel_relaxed(0, sdma->regs + SDMA_H_C0PTR); 1319 writel_relaxed(0, sdma->regs + SDMA_H_C0PTR);
1320 1320
1321 sdma->channel_control = dma_alloc_coherent(NULL, 1321 sdma->channel_control = dma_alloc_coherent(NULL,
1322 MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control) + 1322 MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control) +
1323 sizeof(struct sdma_context_data), 1323 sizeof(struct sdma_context_data),
1324 &ccb_phys, GFP_KERNEL); 1324 &ccb_phys, GFP_KERNEL);
1325 1325
1326 if (!sdma->channel_control) { 1326 if (!sdma->channel_control) {
1327 ret = -ENOMEM; 1327 ret = -ENOMEM;
1328 goto err_dma_alloc; 1328 goto err_dma_alloc;
1329 } 1329 }
1330 1330
1331 sdma->context = (void *)sdma->channel_control + 1331 sdma->context = (void *)sdma->channel_control +
1332 MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control); 1332 MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control);
1333 sdma->context_phys = ccb_phys + 1333 sdma->context_phys = ccb_phys +
1334 MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control); 1334 MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control);
1335 1335
1336 /* Zero-out the CCB structures array just allocated */ 1336 /* Zero-out the CCB structures array just allocated */
1337 memset(sdma->channel_control, 0, 1337 memset(sdma->channel_control, 0,
1338 MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control)); 1338 MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control));
1339 1339
1340 /* disable all channels */ 1340 /* disable all channels */
1341 for (i = 0; i < sdma->drvdata->num_events; i++) 1341 for (i = 0; i < sdma->drvdata->num_events; i++)
1342 writel_relaxed(0, sdma->regs + chnenbl_ofs(sdma, i)); 1342 writel_relaxed(0, sdma->regs + chnenbl_ofs(sdma, i));
1343 1343
1344 /* All channels have priority 0 */ 1344 /* All channels have priority 0 */
1345 for (i = 0; i < MAX_DMA_CHANNELS; i++) 1345 for (i = 0; i < MAX_DMA_CHANNELS; i++)
1346 writel_relaxed(0, sdma->regs + SDMA_CHNPRI_0 + i * 4); 1346 writel_relaxed(0, sdma->regs + SDMA_CHNPRI_0 + i * 4);
1347 1347
1348 ret = sdma_request_channel(&sdma->channel[0]); 1348 ret = sdma_request_channel(&sdma->channel[0]);
1349 if (ret) 1349 if (ret)
1350 goto err_dma_alloc; 1350 goto err_dma_alloc;
1351 1351
1352 sdma_config_ownership(&sdma->channel[0], false, true, false); 1352 sdma_config_ownership(&sdma->channel[0], false, true, false);
1353 1353
1354 /* Set Command Channel (Channel Zero) */ 1354 /* Set Command Channel (Channel Zero) */
1355 writel_relaxed(0x4050, sdma->regs + SDMA_CHN0ADDR); 1355 writel_relaxed(0x4050, sdma->regs + SDMA_CHN0ADDR);
1356 1356
1357 /* Set bits of CONFIG register but with static context switching */ 1357 /* Set bits of CONFIG register but with static context switching */
1358 /* FIXME: Check whether to set ACR bit depending on clock ratios */ 1358 /* FIXME: Check whether to set ACR bit depending on clock ratios */
1359 writel_relaxed(0, sdma->regs + SDMA_H_CONFIG); 1359 writel_relaxed(0, sdma->regs + SDMA_H_CONFIG);
1360 1360
1361 writel_relaxed(ccb_phys, sdma->regs + SDMA_H_C0PTR); 1361 writel_relaxed(ccb_phys, sdma->regs + SDMA_H_C0PTR);
1362 1362
1363 /* Set bits of CONFIG register with given context switching mode */ 1363 /* Set bits of CONFIG register with given context switching mode */
1364 writel_relaxed(SDMA_H_CONFIG_CSM, sdma->regs + SDMA_H_CONFIG); 1364 writel_relaxed(SDMA_H_CONFIG_CSM, sdma->regs + SDMA_H_CONFIG);
1365 1365
1366 /* Initializes channel's priorities */ 1366 /* Initializes channel's priorities */
1367 sdma_set_channel_priority(&sdma->channel[0], 7); 1367 sdma_set_channel_priority(&sdma->channel[0], 7);
1368 1368
1369 clk_disable(sdma->clk_ipg); 1369 clk_disable(sdma->clk_ipg);
1370 clk_disable(sdma->clk_ahb); 1370 clk_disable(sdma->clk_ahb);
1371 1371
1372 return 0; 1372 return 0;
1373 1373
1374 err_dma_alloc: 1374 err_dma_alloc:
1375 clk_disable(sdma->clk_ipg); 1375 clk_disable(sdma->clk_ipg);
1376 clk_disable(sdma->clk_ahb); 1376 clk_disable(sdma->clk_ahb);
1377 dev_err(sdma->dev, "initialisation failed with %d\n", ret); 1377 dev_err(sdma->dev, "initialisation failed with %d\n", ret);
1378 return ret; 1378 return ret;
1379 } 1379 }
1380 1380
1381 static bool sdma_filter_fn(struct dma_chan *chan, void *fn_param) 1381 static bool sdma_filter_fn(struct dma_chan *chan, void *fn_param)
1382 { 1382 {
1383 struct imx_dma_data *data = fn_param; 1383 struct imx_dma_data *data = fn_param;
1384 1384
1385 if (!imx_dma_is_general_purpose(chan)) 1385 if (!imx_dma_is_general_purpose(chan))
1386 return false; 1386 return false;
1387 1387
1388 chan->private = data; 1388 chan->private = data;
1389 1389
1390 return true; 1390 return true;
1391 } 1391 }
1392 1392
1393 static struct dma_chan *sdma_xlate(struct of_phandle_args *dma_spec, 1393 static struct dma_chan *sdma_xlate(struct of_phandle_args *dma_spec,
1394 struct of_dma *ofdma) 1394 struct of_dma *ofdma)
1395 { 1395 {
1396 struct sdma_engine *sdma = ofdma->of_dma_data; 1396 struct sdma_engine *sdma = ofdma->of_dma_data;
1397 dma_cap_mask_t mask = sdma->dma_device.cap_mask; 1397 dma_cap_mask_t mask = sdma->dma_device.cap_mask;
1398 struct imx_dma_data data; 1398 struct imx_dma_data data;
1399 1399
1400 if (dma_spec->args_count != 3) 1400 if (dma_spec->args_count != 3)
1401 return NULL; 1401 return NULL;
1402 1402
1403 data.dma_request = dma_spec->args[0]; 1403 data.dma_request = dma_spec->args[0];
1404 data.peripheral_type = dma_spec->args[1]; 1404 data.peripheral_type = dma_spec->args[1];
1405 data.priority = dma_spec->args[2]; 1405 data.priority = dma_spec->args[2];
1406 1406
1407 return dma_request_channel(mask, sdma_filter_fn, &data); 1407 return dma_request_channel(mask, sdma_filter_fn, &data);
1408 } 1408 }
1409 1409
1410 static int __init sdma_probe(struct platform_device *pdev) 1410 static int __init sdma_probe(struct platform_device *pdev)
1411 { 1411 {
1412 const struct of_device_id *of_id = 1412 const struct of_device_id *of_id =
1413 of_match_device(sdma_dt_ids, &pdev->dev); 1413 of_match_device(sdma_dt_ids, &pdev->dev);
1414 struct device_node *np = pdev->dev.of_node; 1414 struct device_node *np = pdev->dev.of_node;
1415 const char *fw_name; 1415 const char *fw_name;
1416 int ret; 1416 int ret;
1417 int irq; 1417 int irq;
1418 struct resource *iores; 1418 struct resource *iores;
1419 struct sdma_platform_data *pdata = dev_get_platdata(&pdev->dev); 1419 struct sdma_platform_data *pdata = dev_get_platdata(&pdev->dev);
1420 int i; 1420 int i;
1421 struct sdma_engine *sdma; 1421 struct sdma_engine *sdma;
1422 s32 *saddr_arr; 1422 s32 *saddr_arr;
1423 const struct sdma_driver_data *drvdata = NULL; 1423 const struct sdma_driver_data *drvdata = NULL;
1424 1424
1425 if (of_id) 1425 if (of_id)
1426 drvdata = of_id->data; 1426 drvdata = of_id->data;
1427 else if (pdev->id_entry) 1427 else if (pdev->id_entry)
1428 drvdata = (void *)pdev->id_entry->driver_data; 1428 drvdata = (void *)pdev->id_entry->driver_data;
1429 1429
1430 if (!drvdata) { 1430 if (!drvdata) {
1431 dev_err(&pdev->dev, "unable to find driver data\n"); 1431 dev_err(&pdev->dev, "unable to find driver data\n");
1432 return -EINVAL; 1432 return -EINVAL;
1433 } 1433 }
1434 1434
1435 sdma = kzalloc(sizeof(*sdma), GFP_KERNEL); 1435 sdma = kzalloc(sizeof(*sdma), GFP_KERNEL);
1436 if (!sdma) 1436 if (!sdma)
1437 return -ENOMEM; 1437 return -ENOMEM;
1438 1438
1439 spin_lock_init(&sdma->channel_0_lock); 1439 spin_lock_init(&sdma->channel_0_lock);
1440 1440
1441 sdma->dev = &pdev->dev; 1441 sdma->dev = &pdev->dev;
1442 sdma->drvdata = drvdata; 1442 sdma->drvdata = drvdata;
1443 1443
1444 iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1444 iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1445 irq = platform_get_irq(pdev, 0); 1445 irq = platform_get_irq(pdev, 0);
1446 if (!iores || irq < 0) { 1446 if (!iores || irq < 0) {
1447 ret = -EINVAL; 1447 ret = -EINVAL;
1448 goto err_irq; 1448 goto err_irq;
1449 } 1449 }
1450 1450
1451 if (!request_mem_region(iores->start, resource_size(iores), pdev->name)) { 1451 if (!request_mem_region(iores->start, resource_size(iores), pdev->name)) {
1452 ret = -EBUSY; 1452 ret = -EBUSY;
1453 goto err_request_region; 1453 goto err_request_region;
1454 } 1454 }
1455 1455
1456 sdma->clk_ipg = devm_clk_get(&pdev->dev, "ipg"); 1456 sdma->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
1457 if (IS_ERR(sdma->clk_ipg)) { 1457 if (IS_ERR(sdma->clk_ipg)) {
1458 ret = PTR_ERR(sdma->clk_ipg); 1458 ret = PTR_ERR(sdma->clk_ipg);
1459 goto err_clk; 1459 goto err_clk;
1460 } 1460 }
1461 1461
1462 sdma->clk_ahb = devm_clk_get(&pdev->dev, "ahb"); 1462 sdma->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
1463 if (IS_ERR(sdma->clk_ahb)) { 1463 if (IS_ERR(sdma->clk_ahb)) {
1464 ret = PTR_ERR(sdma->clk_ahb); 1464 ret = PTR_ERR(sdma->clk_ahb);
1465 goto err_clk; 1465 goto err_clk;
1466 } 1466 }
1467 1467
1468 clk_prepare(sdma->clk_ipg); 1468 clk_prepare(sdma->clk_ipg);
1469 clk_prepare(sdma->clk_ahb); 1469 clk_prepare(sdma->clk_ahb);
1470 1470
1471 sdma->regs = ioremap(iores->start, resource_size(iores)); 1471 sdma->regs = ioremap(iores->start, resource_size(iores));
1472 if (!sdma->regs) { 1472 if (!sdma->regs) {
1473 ret = -ENOMEM; 1473 ret = -ENOMEM;
1474 goto err_ioremap; 1474 goto err_ioremap;
1475 } 1475 }
1476 1476
1477 ret = request_irq(irq, sdma_int_handler, 0, "sdma", sdma); 1477 ret = request_irq(irq, sdma_int_handler, 0, "sdma", sdma);
1478 if (ret) 1478 if (ret)
1479 goto err_request_irq; 1479 goto err_request_irq;
1480 1480
1481 sdma->script_addrs = kzalloc(sizeof(*sdma->script_addrs), GFP_KERNEL); 1481 sdma->script_addrs = kzalloc(sizeof(*sdma->script_addrs), GFP_KERNEL);
1482 if (!sdma->script_addrs) { 1482 if (!sdma->script_addrs) {
1483 ret = -ENOMEM; 1483 ret = -ENOMEM;
1484 goto err_alloc; 1484 goto err_alloc;
1485 } 1485 }
1486 1486
1487 /* initially no scripts available */ 1487 /* initially no scripts available */
1488 saddr_arr = (s32 *)sdma->script_addrs; 1488 saddr_arr = (s32 *)sdma->script_addrs;
1489 for (i = 0; i < SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1; i++) 1489 for (i = 0; i < SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1; i++)
1490 saddr_arr[i] = -EINVAL; 1490 saddr_arr[i] = -EINVAL;
1491 1491
1492 dma_cap_set(DMA_SLAVE, sdma->dma_device.cap_mask); 1492 dma_cap_set(DMA_SLAVE, sdma->dma_device.cap_mask);
1493 dma_cap_set(DMA_CYCLIC, sdma->dma_device.cap_mask); 1493 dma_cap_set(DMA_CYCLIC, sdma->dma_device.cap_mask);
1494 1494
1495 INIT_LIST_HEAD(&sdma->dma_device.channels); 1495 INIT_LIST_HEAD(&sdma->dma_device.channels);
1496 /* Initialize channel parameters */ 1496 /* Initialize channel parameters */
1497 for (i = 0; i < MAX_DMA_CHANNELS; i++) { 1497 for (i = 0; i < MAX_DMA_CHANNELS; i++) {
1498 struct sdma_channel *sdmac = &sdma->channel[i]; 1498 struct sdma_channel *sdmac = &sdma->channel[i];
1499 1499
1500 sdmac->sdma = sdma; 1500 sdmac->sdma = sdma;
1501 spin_lock_init(&sdmac->lock); 1501 spin_lock_init(&sdmac->lock);
1502 1502
1503 sdmac->chan.device = &sdma->dma_device; 1503 sdmac->chan.device = &sdma->dma_device;
1504 dma_cookie_init(&sdmac->chan); 1504 dma_cookie_init(&sdmac->chan);
1505 sdmac->channel = i; 1505 sdmac->channel = i;
1506 1506
1507 tasklet_init(&sdmac->tasklet, sdma_tasklet, 1507 tasklet_init(&sdmac->tasklet, sdma_tasklet,
1508 (unsigned long) sdmac); 1508 (unsigned long) sdmac);
1509 /* 1509 /*
1510 * Add the channel to the DMAC list. Do not add channel 0 though 1510 * Add the channel to the DMAC list. Do not add channel 0 though
1511 * because we need it internally in the SDMA driver. This also means 1511 * because we need it internally in the SDMA driver. This also means
1512 * that channel 0 in dmaengine counting matches sdma channel 1. 1512 * that channel 0 in dmaengine counting matches sdma channel 1.
1513 */ 1513 */
1514 if (i) 1514 if (i)
1515 list_add_tail(&sdmac->chan.device_node, 1515 list_add_tail(&sdmac->chan.device_node,
1516 &sdma->dma_device.channels); 1516 &sdma->dma_device.channels);
1517 } 1517 }
1518 1518
1519 ret = sdma_init(sdma); 1519 ret = sdma_init(sdma);
1520 if (ret) 1520 if (ret)
1521 goto err_init; 1521 goto err_init;
1522 1522
1523 if (sdma->drvdata->script_addrs) 1523 if (sdma->drvdata->script_addrs)
1524 sdma_add_scripts(sdma, sdma->drvdata->script_addrs); 1524 sdma_add_scripts(sdma, sdma->drvdata->script_addrs);
1525 if (pdata && pdata->script_addrs) 1525 if (pdata && pdata->script_addrs)
1526 sdma_add_scripts(sdma, pdata->script_addrs); 1526 sdma_add_scripts(sdma, pdata->script_addrs);
1527 1527
1528 if (pdata) { 1528 if (pdata) {
1529 ret = sdma_get_firmware(sdma, pdata->fw_name); 1529 ret = sdma_get_firmware(sdma, pdata->fw_name);
1530 if (ret) 1530 if (ret)
1531 dev_warn(&pdev->dev, "failed to get firmware from platform data\n"); 1531 dev_warn(&pdev->dev, "failed to get firmware from platform data\n");
1532 } else { 1532 } else {
1533 /* 1533 /*
1534 * Because that device tree does not encode ROM script address, 1534 * Because that device tree does not encode ROM script address,
1535 * the RAM script in firmware is mandatory for device tree 1535 * the RAM script in firmware is mandatory for device tree
1536 * probe, otherwise it fails. 1536 * probe, otherwise it fails.
1537 */ 1537 */
1538 ret = of_property_read_string(np, "fsl,sdma-ram-script-name", 1538 ret = of_property_read_string(np, "fsl,sdma-ram-script-name",
1539 &fw_name); 1539 &fw_name);
1540 if (ret) 1540 if (ret)
1541 dev_warn(&pdev->dev, "failed to get firmware name\n"); 1541 dev_warn(&pdev->dev, "failed to get firmware name\n");
1542 else { 1542 else {
1543 ret = sdma_get_firmware(sdma, fw_name); 1543 ret = sdma_get_firmware(sdma, fw_name);
1544 if (ret) 1544 if (ret)
1545 dev_warn(&pdev->dev, "failed to get firmware from device tree\n"); 1545 dev_warn(&pdev->dev, "failed to get firmware from device tree\n");
1546 } 1546 }
1547 } 1547 }
1548 1548
1549 sdma->dma_device.dev = &pdev->dev; 1549 sdma->dma_device.dev = &pdev->dev;
1550 1550
1551 sdma->dma_device.device_alloc_chan_resources = sdma_alloc_chan_resources; 1551 sdma->dma_device.device_alloc_chan_resources = sdma_alloc_chan_resources;
1552 sdma->dma_device.device_free_chan_resources = sdma_free_chan_resources; 1552 sdma->dma_device.device_free_chan_resources = sdma_free_chan_resources;
1553 sdma->dma_device.device_tx_status = sdma_tx_status; 1553 sdma->dma_device.device_tx_status = sdma_tx_status;
1554 sdma->dma_device.device_prep_slave_sg = sdma_prep_slave_sg; 1554 sdma->dma_device.device_prep_slave_sg = sdma_prep_slave_sg;
1555 sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic; 1555 sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic;
1556 sdma->dma_device.device_control = sdma_control; 1556 sdma->dma_device.device_control = sdma_control;
1557 sdma->dma_device.device_issue_pending = sdma_issue_pending; 1557 sdma->dma_device.device_issue_pending = sdma_issue_pending;
1558 sdma->dma_device.dev->dma_parms = &sdma->dma_parms; 1558 sdma->dma_device.dev->dma_parms = &sdma->dma_parms;
1559 dma_set_max_seg_size(sdma->dma_device.dev, 65535); 1559 dma_set_max_seg_size(sdma->dma_device.dev, 65535);
1560 1560
1561 ret = dma_async_device_register(&sdma->dma_device); 1561 ret = dma_async_device_register(&sdma->dma_device);
1562 if (ret) { 1562 if (ret) {
1563 dev_err(&pdev->dev, "unable to register\n"); 1563 dev_err(&pdev->dev, "unable to register\n");
1564 goto err_init; 1564 goto err_init;
1565 } 1565 }
1566 1566
1567 if (np) { 1567 if (np) {
1568 ret = of_dma_controller_register(np, sdma_xlate, sdma); 1568 ret = of_dma_controller_register(np, sdma_xlate, sdma);
1569 if (ret) { 1569 if (ret) {
1570 dev_err(&pdev->dev, "failed to register controller\n"); 1570 dev_err(&pdev->dev, "failed to register controller\n");
1571 goto err_register; 1571 goto err_register;
1572 } 1572 }
1573 } 1573 }
1574 1574
1575 dev_info(sdma->dev, "initialized\n"); 1575 dev_info(sdma->dev, "initialized\n");
1576 1576
1577 return 0; 1577 return 0;
1578 1578
1579 err_register: 1579 err_register:
1580 dma_async_device_unregister(&sdma->dma_device); 1580 dma_async_device_unregister(&sdma->dma_device);
1581 err_init: 1581 err_init:
1582 kfree(sdma->script_addrs); 1582 kfree(sdma->script_addrs);
1583 err_alloc: 1583 err_alloc:
1584 free_irq(irq, sdma); 1584 free_irq(irq, sdma);
1585 err_request_irq: 1585 err_request_irq:
1586 iounmap(sdma->regs); 1586 iounmap(sdma->regs);
1587 err_ioremap: 1587 err_ioremap:
1588 err_clk: 1588 err_clk:
1589 release_mem_region(iores->start, resource_size(iores)); 1589 release_mem_region(iores->start, resource_size(iores));
1590 err_request_region: 1590 err_request_region:
1591 err_irq: 1591 err_irq:
1592 kfree(sdma); 1592 kfree(sdma);
1593 return ret; 1593 return ret;
1594 } 1594 }
1595 1595
1596 static int sdma_remove(struct platform_device *pdev) 1596 static int sdma_remove(struct platform_device *pdev)
1597 { 1597 {
1598 return -EBUSY; 1598 return -EBUSY;
1599 } 1599 }
1600 1600
1601 static struct platform_driver sdma_driver = { 1601 static struct platform_driver sdma_driver = {
1602 .driver = { 1602 .driver = {
1603 .name = "imx-sdma", 1603 .name = "imx-sdma",
1604 .of_match_table = sdma_dt_ids, 1604 .of_match_table = sdma_dt_ids,
1605 }, 1605 },
1606 .id_table = sdma_devtypes, 1606 .id_table = sdma_devtypes,
1607 .remove = sdma_remove, 1607 .remove = sdma_remove,
1608 }; 1608 };
1609 1609
1610 static int __init sdma_module_init(void) 1610 static int __init sdma_module_init(void)
1611 { 1611 {
1612 return platform_driver_probe(&sdma_driver, sdma_probe); 1612 return platform_driver_probe(&sdma_driver, sdma_probe);
1613 } 1613 }
1614 module_init(sdma_module_init); 1614 module_init(sdma_module_init);
1615 1615
1616 MODULE_AUTHOR("Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>"); 1616 MODULE_AUTHOR("Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>");
1617 MODULE_DESCRIPTION("i.MX SDMA driver"); 1617 MODULE_DESCRIPTION("i.MX SDMA driver");
1618 MODULE_LICENSE("GPL"); 1618 MODULE_LICENSE("GPL");
1619 1619
drivers/dma/intel_mid_dma.c
1 /* 1 /*
2 * intel_mid_dma.c - Intel Langwell DMA Drivers 2 * intel_mid_dma.c - Intel Langwell DMA Drivers
3 * 3 *
4 * Copyright (C) 2008-10 Intel Corp 4 * Copyright (C) 2008-10 Intel Corp
5 * Author: Vinod Koul <vinod.koul@intel.com> 5 * Author: Vinod Koul <vinod.koul@intel.com>
6 * The driver design is based on dw_dmac driver 6 * The driver design is based on dw_dmac driver
7 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 7 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
8 * 8 *
9 * This program is free software; you can redistribute it and/or modify 9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by 10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License. 11 * the Free Software Foundation; version 2 of the License.
12 * 12 *
13 * This program is distributed in the hope that it will be useful, but 13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of 14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details. 16 * General Public License for more details.
17 * 17 *
18 * You should have received a copy of the GNU General Public License along 18 * You should have received a copy of the GNU General Public License along
19 * with this program; if not, write to the Free Software Foundation, Inc., 19 * with this program; if not, write to the Free Software Foundation, Inc.,
20 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. 20 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
21 * 21 *
22 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 22 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
23 * 23 *
24 * 24 *
25 */ 25 */
26 #include <linux/pci.h> 26 #include <linux/pci.h>
27 #include <linux/interrupt.h> 27 #include <linux/interrupt.h>
28 #include <linux/pm_runtime.h> 28 #include <linux/pm_runtime.h>
29 #include <linux/intel_mid_dma.h> 29 #include <linux/intel_mid_dma.h>
30 #include <linux/module.h> 30 #include <linux/module.h>
31 31
32 #include "dmaengine.h" 32 #include "dmaengine.h"
33 33
34 #define MAX_CHAN 4 /*max ch across controllers*/ 34 #define MAX_CHAN 4 /*max ch across controllers*/
35 #include "intel_mid_dma_regs.h" 35 #include "intel_mid_dma_regs.h"
36 36
37 #define INTEL_MID_DMAC1_ID 0x0814 37 #define INTEL_MID_DMAC1_ID 0x0814
38 #define INTEL_MID_DMAC2_ID 0x0813 38 #define INTEL_MID_DMAC2_ID 0x0813
39 #define INTEL_MID_GP_DMAC2_ID 0x0827 39 #define INTEL_MID_GP_DMAC2_ID 0x0827
40 #define INTEL_MFLD_DMAC1_ID 0x0830 40 #define INTEL_MFLD_DMAC1_ID 0x0830
41 #define LNW_PERIPHRAL_MASK_BASE 0xFFAE8008 41 #define LNW_PERIPHRAL_MASK_BASE 0xFFAE8008
42 #define LNW_PERIPHRAL_MASK_SIZE 0x10 42 #define LNW_PERIPHRAL_MASK_SIZE 0x10
43 #define LNW_PERIPHRAL_STATUS 0x0 43 #define LNW_PERIPHRAL_STATUS 0x0
44 #define LNW_PERIPHRAL_MASK 0x8 44 #define LNW_PERIPHRAL_MASK 0x8
45 45
46 struct intel_mid_dma_probe_info { 46 struct intel_mid_dma_probe_info {
47 u8 max_chan; 47 u8 max_chan;
48 u8 ch_base; 48 u8 ch_base;
49 u16 block_size; 49 u16 block_size;
50 u32 pimr_mask; 50 u32 pimr_mask;
51 }; 51 };
52 52
53 #define INFO(_max_chan, _ch_base, _block_size, _pimr_mask) \ 53 #define INFO(_max_chan, _ch_base, _block_size, _pimr_mask) \
54 ((kernel_ulong_t)&(struct intel_mid_dma_probe_info) { \ 54 ((kernel_ulong_t)&(struct intel_mid_dma_probe_info) { \
55 .max_chan = (_max_chan), \ 55 .max_chan = (_max_chan), \
56 .ch_base = (_ch_base), \ 56 .ch_base = (_ch_base), \
57 .block_size = (_block_size), \ 57 .block_size = (_block_size), \
58 .pimr_mask = (_pimr_mask), \ 58 .pimr_mask = (_pimr_mask), \
59 }) 59 })
60 60
61 /***************************************************************************** 61 /*****************************************************************************
62 Utility Functions*/ 62 Utility Functions*/
63 /** 63 /**
64 * get_ch_index - convert status to channel 64 * get_ch_index - convert status to channel
65 * @status: status mask 65 * @status: status mask
66 * @base: dma ch base value 66 * @base: dma ch base value
67 * 67 *
68 * Modify the status mask and return the channel index needing 68 * Modify the status mask and return the channel index needing
69 * attention (or -1 if neither) 69 * attention (or -1 if neither)
70 */ 70 */
71 static int get_ch_index(int *status, unsigned int base) 71 static int get_ch_index(int *status, unsigned int base)
72 { 72 {
73 int i; 73 int i;
74 for (i = 0; i < MAX_CHAN; i++) { 74 for (i = 0; i < MAX_CHAN; i++) {
75 if (*status & (1 << (i + base))) { 75 if (*status & (1 << (i + base))) {
76 *status = *status & ~(1 << (i + base)); 76 *status = *status & ~(1 << (i + base));
77 pr_debug("MDMA: index %d New status %x\n", i, *status); 77 pr_debug("MDMA: index %d New status %x\n", i, *status);
78 return i; 78 return i;
79 } 79 }
80 } 80 }
81 return -1; 81 return -1;
82 } 82 }
83 83
84 /** 84 /**
85 * get_block_ts - calculates dma transaction length 85 * get_block_ts - calculates dma transaction length
86 * @len: dma transfer length 86 * @len: dma transfer length
87 * @tx_width: dma transfer src width 87 * @tx_width: dma transfer src width
88 * @block_size: dma controller max block size 88 * @block_size: dma controller max block size
89 * 89 *
90 * Based on src width calculate the DMA trsaction length in data items 90 * Based on src width calculate the DMA trsaction length in data items
91 * return data items or FFFF if exceeds max length for block 91 * return data items or FFFF if exceeds max length for block
92 */ 92 */
93 static int get_block_ts(int len, int tx_width, int block_size) 93 static int get_block_ts(int len, int tx_width, int block_size)
94 { 94 {
95 int byte_width = 0, block_ts = 0; 95 int byte_width = 0, block_ts = 0;
96 96
97 switch (tx_width) { 97 switch (tx_width) {
98 case DMA_SLAVE_BUSWIDTH_1_BYTE: 98 case DMA_SLAVE_BUSWIDTH_1_BYTE:
99 byte_width = 1; 99 byte_width = 1;
100 break; 100 break;
101 case DMA_SLAVE_BUSWIDTH_2_BYTES: 101 case DMA_SLAVE_BUSWIDTH_2_BYTES:
102 byte_width = 2; 102 byte_width = 2;
103 break; 103 break;
104 case DMA_SLAVE_BUSWIDTH_4_BYTES: 104 case DMA_SLAVE_BUSWIDTH_4_BYTES:
105 default: 105 default:
106 byte_width = 4; 106 byte_width = 4;
107 break; 107 break;
108 } 108 }
109 109
110 block_ts = len/byte_width; 110 block_ts = len/byte_width;
111 if (block_ts > block_size) 111 if (block_ts > block_size)
112 block_ts = 0xFFFF; 112 block_ts = 0xFFFF;
113 return block_ts; 113 return block_ts;
114 } 114 }
115 115
116 /***************************************************************************** 116 /*****************************************************************************
117 DMAC1 interrupt Functions*/ 117 DMAC1 interrupt Functions*/
118 118
119 /** 119 /**
120 * dmac1_mask_periphral_intr - mask the periphral interrupt 120 * dmac1_mask_periphral_intr - mask the periphral interrupt
121 * @mid: dma device for which masking is required 121 * @mid: dma device for which masking is required
122 * 122 *
123 * Masks the DMA periphral interrupt 123 * Masks the DMA periphral interrupt
124 * this is valid for DMAC1 family controllers only 124 * this is valid for DMAC1 family controllers only
125 * This controller should have periphral mask registers already mapped 125 * This controller should have periphral mask registers already mapped
126 */ 126 */
127 static void dmac1_mask_periphral_intr(struct middma_device *mid) 127 static void dmac1_mask_periphral_intr(struct middma_device *mid)
128 { 128 {
129 u32 pimr; 129 u32 pimr;
130 130
131 if (mid->pimr_mask) { 131 if (mid->pimr_mask) {
132 pimr = readl(mid->mask_reg + LNW_PERIPHRAL_MASK); 132 pimr = readl(mid->mask_reg + LNW_PERIPHRAL_MASK);
133 pimr |= mid->pimr_mask; 133 pimr |= mid->pimr_mask;
134 writel(pimr, mid->mask_reg + LNW_PERIPHRAL_MASK); 134 writel(pimr, mid->mask_reg + LNW_PERIPHRAL_MASK);
135 } 135 }
136 return; 136 return;
137 } 137 }
138 138
139 /** 139 /**
140 * dmac1_unmask_periphral_intr - unmask the periphral interrupt 140 * dmac1_unmask_periphral_intr - unmask the periphral interrupt
141 * @midc: dma channel for which masking is required 141 * @midc: dma channel for which masking is required
142 * 142 *
143 * UnMasks the DMA periphral interrupt, 143 * UnMasks the DMA periphral interrupt,
144 * this is valid for DMAC1 family controllers only 144 * this is valid for DMAC1 family controllers only
145 * This controller should have periphral mask registers already mapped 145 * This controller should have periphral mask registers already mapped
146 */ 146 */
147 static void dmac1_unmask_periphral_intr(struct intel_mid_dma_chan *midc) 147 static void dmac1_unmask_periphral_intr(struct intel_mid_dma_chan *midc)
148 { 148 {
149 u32 pimr; 149 u32 pimr;
150 struct middma_device *mid = to_middma_device(midc->chan.device); 150 struct middma_device *mid = to_middma_device(midc->chan.device);
151 151
152 if (mid->pimr_mask) { 152 if (mid->pimr_mask) {
153 pimr = readl(mid->mask_reg + LNW_PERIPHRAL_MASK); 153 pimr = readl(mid->mask_reg + LNW_PERIPHRAL_MASK);
154 pimr &= ~mid->pimr_mask; 154 pimr &= ~mid->pimr_mask;
155 writel(pimr, mid->mask_reg + LNW_PERIPHRAL_MASK); 155 writel(pimr, mid->mask_reg + LNW_PERIPHRAL_MASK);
156 } 156 }
157 return; 157 return;
158 } 158 }
159 159
160 /** 160 /**
161 * enable_dma_interrupt - enable the periphral interrupt 161 * enable_dma_interrupt - enable the periphral interrupt
162 * @midc: dma channel for which enable interrupt is required 162 * @midc: dma channel for which enable interrupt is required
163 * 163 *
164 * Enable the DMA periphral interrupt, 164 * Enable the DMA periphral interrupt,
165 * this is valid for DMAC1 family controllers only 165 * this is valid for DMAC1 family controllers only
166 * This controller should have periphral mask registers already mapped 166 * This controller should have periphral mask registers already mapped
167 */ 167 */
168 static void enable_dma_interrupt(struct intel_mid_dma_chan *midc) 168 static void enable_dma_interrupt(struct intel_mid_dma_chan *midc)
169 { 169 {
170 dmac1_unmask_periphral_intr(midc); 170 dmac1_unmask_periphral_intr(midc);
171 171
172 /*en ch interrupts*/ 172 /*en ch interrupts*/
173 iowrite32(UNMASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_TFR); 173 iowrite32(UNMASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_TFR);
174 iowrite32(UNMASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_ERR); 174 iowrite32(UNMASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_ERR);
175 return; 175 return;
176 } 176 }
177 177
178 /** 178 /**
179 * disable_dma_interrupt - disable the periphral interrupt 179 * disable_dma_interrupt - disable the periphral interrupt
180 * @midc: dma channel for which disable interrupt is required 180 * @midc: dma channel for which disable interrupt is required
181 * 181 *
182 * Disable the DMA periphral interrupt, 182 * Disable the DMA periphral interrupt,
183 * this is valid for DMAC1 family controllers only 183 * this is valid for DMAC1 family controllers only
184 * This controller should have periphral mask registers already mapped 184 * This controller should have periphral mask registers already mapped
185 */ 185 */
186 static void disable_dma_interrupt(struct intel_mid_dma_chan *midc) 186 static void disable_dma_interrupt(struct intel_mid_dma_chan *midc)
187 { 187 {
188 /*Check LPE PISR, make sure fwd is disabled*/ 188 /*Check LPE PISR, make sure fwd is disabled*/
189 iowrite32(MASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_BLOCK); 189 iowrite32(MASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_BLOCK);
190 iowrite32(MASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_TFR); 190 iowrite32(MASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_TFR);
191 iowrite32(MASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_ERR); 191 iowrite32(MASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_ERR);
192 return; 192 return;
193 } 193 }
194 194
195 /***************************************************************************** 195 /*****************************************************************************
196 DMA channel helper Functions*/ 196 DMA channel helper Functions*/
197 /** 197 /**
198 * mid_desc_get - get a descriptor 198 * mid_desc_get - get a descriptor
199 * @midc: dma channel for which descriptor is required 199 * @midc: dma channel for which descriptor is required
200 * 200 *
201 * Obtain a descriptor for the channel. Returns NULL if none are free. 201 * Obtain a descriptor for the channel. Returns NULL if none are free.
202 * Once the descriptor is returned it is private until put on another 202 * Once the descriptor is returned it is private until put on another
203 * list or freed 203 * list or freed
204 */ 204 */
205 static struct intel_mid_dma_desc *midc_desc_get(struct intel_mid_dma_chan *midc) 205 static struct intel_mid_dma_desc *midc_desc_get(struct intel_mid_dma_chan *midc)
206 { 206 {
207 struct intel_mid_dma_desc *desc, *_desc; 207 struct intel_mid_dma_desc *desc, *_desc;
208 struct intel_mid_dma_desc *ret = NULL; 208 struct intel_mid_dma_desc *ret = NULL;
209 209
210 spin_lock_bh(&midc->lock); 210 spin_lock_bh(&midc->lock);
211 list_for_each_entry_safe(desc, _desc, &midc->free_list, desc_node) { 211 list_for_each_entry_safe(desc, _desc, &midc->free_list, desc_node) {
212 if (async_tx_test_ack(&desc->txd)) { 212 if (async_tx_test_ack(&desc->txd)) {
213 list_del(&desc->desc_node); 213 list_del(&desc->desc_node);
214 ret = desc; 214 ret = desc;
215 break; 215 break;
216 } 216 }
217 } 217 }
218 spin_unlock_bh(&midc->lock); 218 spin_unlock_bh(&midc->lock);
219 return ret; 219 return ret;
220 } 220 }
221 221
222 /** 222 /**
223 * mid_desc_put - put a descriptor 223 * mid_desc_put - put a descriptor
224 * @midc: dma channel for which descriptor is required 224 * @midc: dma channel for which descriptor is required
225 * @desc: descriptor to put 225 * @desc: descriptor to put
226 * 226 *
227 * Return a descriptor from lwn_desc_get back to the free pool 227 * Return a descriptor from lwn_desc_get back to the free pool
228 */ 228 */
229 static void midc_desc_put(struct intel_mid_dma_chan *midc, 229 static void midc_desc_put(struct intel_mid_dma_chan *midc,
230 struct intel_mid_dma_desc *desc) 230 struct intel_mid_dma_desc *desc)
231 { 231 {
232 if (desc) { 232 if (desc) {
233 spin_lock_bh(&midc->lock); 233 spin_lock_bh(&midc->lock);
234 list_add_tail(&desc->desc_node, &midc->free_list); 234 list_add_tail(&desc->desc_node, &midc->free_list);
235 spin_unlock_bh(&midc->lock); 235 spin_unlock_bh(&midc->lock);
236 } 236 }
237 } 237 }
238 /** 238 /**
239 * midc_dostart - begin a DMA transaction 239 * midc_dostart - begin a DMA transaction
240 * @midc: channel for which txn is to be started 240 * @midc: channel for which txn is to be started
241 * @first: first descriptor of series 241 * @first: first descriptor of series
242 * 242 *
243 * Load a transaction into the engine. This must be called with midc->lock 243 * Load a transaction into the engine. This must be called with midc->lock
244 * held and bh disabled. 244 * held and bh disabled.
245 */ 245 */
246 static void midc_dostart(struct intel_mid_dma_chan *midc, 246 static void midc_dostart(struct intel_mid_dma_chan *midc,
247 struct intel_mid_dma_desc *first) 247 struct intel_mid_dma_desc *first)
248 { 248 {
249 struct middma_device *mid = to_middma_device(midc->chan.device); 249 struct middma_device *mid = to_middma_device(midc->chan.device);
250 250
251 /* channel is idle */ 251 /* channel is idle */
252 if (midc->busy && test_ch_en(midc->dma_base, midc->ch_id)) { 252 if (midc->busy && test_ch_en(midc->dma_base, midc->ch_id)) {
253 /*error*/ 253 /*error*/
254 pr_err("ERR_MDMA: channel is busy in start\n"); 254 pr_err("ERR_MDMA: channel is busy in start\n");
255 /* The tasklet will hopefully advance the queue... */ 255 /* The tasklet will hopefully advance the queue... */
256 return; 256 return;
257 } 257 }
258 midc->busy = true; 258 midc->busy = true;
259 /*write registers and en*/ 259 /*write registers and en*/
260 iowrite32(first->sar, midc->ch_regs + SAR); 260 iowrite32(first->sar, midc->ch_regs + SAR);
261 iowrite32(first->dar, midc->ch_regs + DAR); 261 iowrite32(first->dar, midc->ch_regs + DAR);
262 iowrite32(first->lli_phys, midc->ch_regs + LLP); 262 iowrite32(first->lli_phys, midc->ch_regs + LLP);
263 iowrite32(first->cfg_hi, midc->ch_regs + CFG_HIGH); 263 iowrite32(first->cfg_hi, midc->ch_regs + CFG_HIGH);
264 iowrite32(first->cfg_lo, midc->ch_regs + CFG_LOW); 264 iowrite32(first->cfg_lo, midc->ch_regs + CFG_LOW);
265 iowrite32(first->ctl_lo, midc->ch_regs + CTL_LOW); 265 iowrite32(first->ctl_lo, midc->ch_regs + CTL_LOW);
266 iowrite32(first->ctl_hi, midc->ch_regs + CTL_HIGH); 266 iowrite32(first->ctl_hi, midc->ch_regs + CTL_HIGH);
267 pr_debug("MDMA:TX SAR %x,DAR %x,CFGL %x,CFGH %x,CTLH %x, CTLL %x\n", 267 pr_debug("MDMA:TX SAR %x,DAR %x,CFGL %x,CFGH %x,CTLH %x, CTLL %x\n",
268 (int)first->sar, (int)first->dar, first->cfg_hi, 268 (int)first->sar, (int)first->dar, first->cfg_hi,
269 first->cfg_lo, first->ctl_hi, first->ctl_lo); 269 first->cfg_lo, first->ctl_hi, first->ctl_lo);
270 first->status = DMA_IN_PROGRESS; 270 first->status = DMA_IN_PROGRESS;
271 271
272 iowrite32(ENABLE_CHANNEL(midc->ch_id), mid->dma_base + DMA_CHAN_EN); 272 iowrite32(ENABLE_CHANNEL(midc->ch_id), mid->dma_base + DMA_CHAN_EN);
273 } 273 }
274 274
275 /** 275 /**
276 * midc_descriptor_complete - process completed descriptor 276 * midc_descriptor_complete - process completed descriptor
277 * @midc: channel owning the descriptor 277 * @midc: channel owning the descriptor
278 * @desc: the descriptor itself 278 * @desc: the descriptor itself
279 * 279 *
280 * Process a completed descriptor and perform any callbacks upon 280 * Process a completed descriptor and perform any callbacks upon
281 * the completion. The completion handling drops the lock during the 281 * the completion. The completion handling drops the lock during the
282 * callbacks but must be called with the lock held. 282 * callbacks but must be called with the lock held.
283 */ 283 */
284 static void midc_descriptor_complete(struct intel_mid_dma_chan *midc, 284 static void midc_descriptor_complete(struct intel_mid_dma_chan *midc,
285 struct intel_mid_dma_desc *desc) 285 struct intel_mid_dma_desc *desc)
286 __releases(&midc->lock) __acquires(&midc->lock) 286 __releases(&midc->lock) __acquires(&midc->lock)
287 { 287 {
288 struct dma_async_tx_descriptor *txd = &desc->txd; 288 struct dma_async_tx_descriptor *txd = &desc->txd;
289 dma_async_tx_callback callback_txd = NULL; 289 dma_async_tx_callback callback_txd = NULL;
290 struct intel_mid_dma_lli *llitem; 290 struct intel_mid_dma_lli *llitem;
291 void *param_txd = NULL; 291 void *param_txd = NULL;
292 292
293 dma_cookie_complete(txd); 293 dma_cookie_complete(txd);
294 callback_txd = txd->callback; 294 callback_txd = txd->callback;
295 param_txd = txd->callback_param; 295 param_txd = txd->callback_param;
296 296
297 if (desc->lli != NULL) { 297 if (desc->lli != NULL) {
298 /*clear the DONE bit of completed LLI in memory*/ 298 /*clear the DONE bit of completed LLI in memory*/
299 llitem = desc->lli + desc->current_lli; 299 llitem = desc->lli + desc->current_lli;
300 llitem->ctl_hi &= CLEAR_DONE; 300 llitem->ctl_hi &= CLEAR_DONE;
301 if (desc->current_lli < desc->lli_length-1) 301 if (desc->current_lli < desc->lli_length-1)
302 (desc->current_lli)++; 302 (desc->current_lli)++;
303 else 303 else
304 desc->current_lli = 0; 304 desc->current_lli = 0;
305 } 305 }
306 spin_unlock_bh(&midc->lock); 306 spin_unlock_bh(&midc->lock);
307 if (callback_txd) { 307 if (callback_txd) {
308 pr_debug("MDMA: TXD callback set ... calling\n"); 308 pr_debug("MDMA: TXD callback set ... calling\n");
309 callback_txd(param_txd); 309 callback_txd(param_txd);
310 } 310 }
311 if (midc->raw_tfr) { 311 if (midc->raw_tfr) {
312 desc->status = DMA_SUCCESS; 312 desc->status = DMA_COMPLETE;
313 if (desc->lli != NULL) { 313 if (desc->lli != NULL) {
314 pci_pool_free(desc->lli_pool, desc->lli, 314 pci_pool_free(desc->lli_pool, desc->lli,
315 desc->lli_phys); 315 desc->lli_phys);
316 pci_pool_destroy(desc->lli_pool); 316 pci_pool_destroy(desc->lli_pool);
317 desc->lli = NULL; 317 desc->lli = NULL;
318 } 318 }
319 list_move(&desc->desc_node, &midc->free_list); 319 list_move(&desc->desc_node, &midc->free_list);
320 midc->busy = false; 320 midc->busy = false;
321 } 321 }
322 spin_lock_bh(&midc->lock); 322 spin_lock_bh(&midc->lock);
323 323
324 } 324 }
325 /** 325 /**
326 * midc_scan_descriptors - check the descriptors in channel 326 * midc_scan_descriptors - check the descriptors in channel
327 * mark completed when tx is completete 327 * mark completed when tx is completete
328 * @mid: device 328 * @mid: device
329 * @midc: channel to scan 329 * @midc: channel to scan
330 * 330 *
331 * Walk the descriptor chain for the device and process any entries 331 * Walk the descriptor chain for the device and process any entries
332 * that are complete. 332 * that are complete.
333 */ 333 */
334 static void midc_scan_descriptors(struct middma_device *mid, 334 static void midc_scan_descriptors(struct middma_device *mid,
335 struct intel_mid_dma_chan *midc) 335 struct intel_mid_dma_chan *midc)
336 { 336 {
337 struct intel_mid_dma_desc *desc = NULL, *_desc = NULL; 337 struct intel_mid_dma_desc *desc = NULL, *_desc = NULL;
338 338
339 /*tx is complete*/ 339 /*tx is complete*/
340 list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) { 340 list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) {
341 if (desc->status == DMA_IN_PROGRESS) 341 if (desc->status == DMA_IN_PROGRESS)
342 midc_descriptor_complete(midc, desc); 342 midc_descriptor_complete(midc, desc);
343 } 343 }
344 return; 344 return;
345 } 345 }
346 /** 346 /**
347 * midc_lli_fill_sg - Helper function to convert 347 * midc_lli_fill_sg - Helper function to convert
348 * SG list to Linked List Items. 348 * SG list to Linked List Items.
349 *@midc: Channel 349 *@midc: Channel
350 *@desc: DMA descriptor 350 *@desc: DMA descriptor
351 *@sglist: Pointer to SG list 351 *@sglist: Pointer to SG list
352 *@sglen: SG list length 352 *@sglen: SG list length
353 *@flags: DMA transaction flags 353 *@flags: DMA transaction flags
354 * 354 *
355 * Walk through the SG list and convert the SG list into Linked 355 * Walk through the SG list and convert the SG list into Linked
356 * List Items (LLI). 356 * List Items (LLI).
357 */ 357 */
358 static int midc_lli_fill_sg(struct intel_mid_dma_chan *midc, 358 static int midc_lli_fill_sg(struct intel_mid_dma_chan *midc,
359 struct intel_mid_dma_desc *desc, 359 struct intel_mid_dma_desc *desc,
360 struct scatterlist *sglist, 360 struct scatterlist *sglist,
361 unsigned int sglen, 361 unsigned int sglen,
362 unsigned int flags) 362 unsigned int flags)
363 { 363 {
364 struct intel_mid_dma_slave *mids; 364 struct intel_mid_dma_slave *mids;
365 struct scatterlist *sg; 365 struct scatterlist *sg;
366 dma_addr_t lli_next, sg_phy_addr; 366 dma_addr_t lli_next, sg_phy_addr;
367 struct intel_mid_dma_lli *lli_bloc_desc; 367 struct intel_mid_dma_lli *lli_bloc_desc;
368 union intel_mid_dma_ctl_lo ctl_lo; 368 union intel_mid_dma_ctl_lo ctl_lo;
369 union intel_mid_dma_ctl_hi ctl_hi; 369 union intel_mid_dma_ctl_hi ctl_hi;
370 int i; 370 int i;
371 371
372 pr_debug("MDMA: Entered midc_lli_fill_sg\n"); 372 pr_debug("MDMA: Entered midc_lli_fill_sg\n");
373 mids = midc->mid_slave; 373 mids = midc->mid_slave;
374 374
375 lli_bloc_desc = desc->lli; 375 lli_bloc_desc = desc->lli;
376 lli_next = desc->lli_phys; 376 lli_next = desc->lli_phys;
377 377
378 ctl_lo.ctl_lo = desc->ctl_lo; 378 ctl_lo.ctl_lo = desc->ctl_lo;
379 ctl_hi.ctl_hi = desc->ctl_hi; 379 ctl_hi.ctl_hi = desc->ctl_hi;
380 for_each_sg(sglist, sg, sglen, i) { 380 for_each_sg(sglist, sg, sglen, i) {
381 /*Populate CTL_LOW and LLI values*/ 381 /*Populate CTL_LOW and LLI values*/
382 if (i != sglen - 1) { 382 if (i != sglen - 1) {
383 lli_next = lli_next + 383 lli_next = lli_next +
384 sizeof(struct intel_mid_dma_lli); 384 sizeof(struct intel_mid_dma_lli);
385 } else { 385 } else {
386 /*Check for circular list, otherwise terminate LLI to ZERO*/ 386 /*Check for circular list, otherwise terminate LLI to ZERO*/
387 if (flags & DMA_PREP_CIRCULAR_LIST) { 387 if (flags & DMA_PREP_CIRCULAR_LIST) {
388 pr_debug("MDMA: LLI is configured in circular mode\n"); 388 pr_debug("MDMA: LLI is configured in circular mode\n");
389 lli_next = desc->lli_phys; 389 lli_next = desc->lli_phys;
390 } else { 390 } else {
391 lli_next = 0; 391 lli_next = 0;
392 ctl_lo.ctlx.llp_dst_en = 0; 392 ctl_lo.ctlx.llp_dst_en = 0;
393 ctl_lo.ctlx.llp_src_en = 0; 393 ctl_lo.ctlx.llp_src_en = 0;
394 } 394 }
395 } 395 }
396 /*Populate CTL_HI values*/ 396 /*Populate CTL_HI values*/
397 ctl_hi.ctlx.block_ts = get_block_ts(sg_dma_len(sg), 397 ctl_hi.ctlx.block_ts = get_block_ts(sg_dma_len(sg),
398 desc->width, 398 desc->width,
399 midc->dma->block_size); 399 midc->dma->block_size);
400 /*Populate SAR and DAR values*/ 400 /*Populate SAR and DAR values*/
401 sg_phy_addr = sg_dma_address(sg); 401 sg_phy_addr = sg_dma_address(sg);
402 if (desc->dirn == DMA_MEM_TO_DEV) { 402 if (desc->dirn == DMA_MEM_TO_DEV) {
403 lli_bloc_desc->sar = sg_phy_addr; 403 lli_bloc_desc->sar = sg_phy_addr;
404 lli_bloc_desc->dar = mids->dma_slave.dst_addr; 404 lli_bloc_desc->dar = mids->dma_slave.dst_addr;
405 } else if (desc->dirn == DMA_DEV_TO_MEM) { 405 } else if (desc->dirn == DMA_DEV_TO_MEM) {
406 lli_bloc_desc->sar = mids->dma_slave.src_addr; 406 lli_bloc_desc->sar = mids->dma_slave.src_addr;
407 lli_bloc_desc->dar = sg_phy_addr; 407 lli_bloc_desc->dar = sg_phy_addr;
408 } 408 }
409 /*Copy values into block descriptor in system memroy*/ 409 /*Copy values into block descriptor in system memroy*/
410 lli_bloc_desc->llp = lli_next; 410 lli_bloc_desc->llp = lli_next;
411 lli_bloc_desc->ctl_lo = ctl_lo.ctl_lo; 411 lli_bloc_desc->ctl_lo = ctl_lo.ctl_lo;
412 lli_bloc_desc->ctl_hi = ctl_hi.ctl_hi; 412 lli_bloc_desc->ctl_hi = ctl_hi.ctl_hi;
413 413
414 lli_bloc_desc++; 414 lli_bloc_desc++;
415 } 415 }
416 /*Copy very first LLI values to descriptor*/ 416 /*Copy very first LLI values to descriptor*/
417 desc->ctl_lo = desc->lli->ctl_lo; 417 desc->ctl_lo = desc->lli->ctl_lo;
418 desc->ctl_hi = desc->lli->ctl_hi; 418 desc->ctl_hi = desc->lli->ctl_hi;
419 desc->sar = desc->lli->sar; 419 desc->sar = desc->lli->sar;
420 desc->dar = desc->lli->dar; 420 desc->dar = desc->lli->dar;
421 421
422 return 0; 422 return 0;
423 } 423 }
424 /***************************************************************************** 424 /*****************************************************************************
425 DMA engine callback Functions*/ 425 DMA engine callback Functions*/
426 /** 426 /**
427 * intel_mid_dma_tx_submit - callback to submit DMA transaction 427 * intel_mid_dma_tx_submit - callback to submit DMA transaction
428 * @tx: dma engine descriptor 428 * @tx: dma engine descriptor
429 * 429 *
430 * Submit the DMA transaction for this descriptor, start if ch idle 430 * Submit the DMA transaction for this descriptor, start if ch idle
431 */ 431 */
432 static dma_cookie_t intel_mid_dma_tx_submit(struct dma_async_tx_descriptor *tx) 432 static dma_cookie_t intel_mid_dma_tx_submit(struct dma_async_tx_descriptor *tx)
433 { 433 {
434 struct intel_mid_dma_desc *desc = to_intel_mid_dma_desc(tx); 434 struct intel_mid_dma_desc *desc = to_intel_mid_dma_desc(tx);
435 struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(tx->chan); 435 struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(tx->chan);
436 dma_cookie_t cookie; 436 dma_cookie_t cookie;
437 437
438 spin_lock_bh(&midc->lock); 438 spin_lock_bh(&midc->lock);
439 cookie = dma_cookie_assign(tx); 439 cookie = dma_cookie_assign(tx);
440 440
441 if (list_empty(&midc->active_list)) 441 if (list_empty(&midc->active_list))
442 list_add_tail(&desc->desc_node, &midc->active_list); 442 list_add_tail(&desc->desc_node, &midc->active_list);
443 else 443 else
444 list_add_tail(&desc->desc_node, &midc->queue); 444 list_add_tail(&desc->desc_node, &midc->queue);
445 445
446 midc_dostart(midc, desc); 446 midc_dostart(midc, desc);
447 spin_unlock_bh(&midc->lock); 447 spin_unlock_bh(&midc->lock);
448 448
449 return cookie; 449 return cookie;
450 } 450 }
451 451
452 /** 452 /**
453 * intel_mid_dma_issue_pending - callback to issue pending txn 453 * intel_mid_dma_issue_pending - callback to issue pending txn
454 * @chan: chan where pending trascation needs to be checked and submitted 454 * @chan: chan where pending trascation needs to be checked and submitted
455 * 455 *
456 * Call for scan to issue pending descriptors 456 * Call for scan to issue pending descriptors
457 */ 457 */
458 static void intel_mid_dma_issue_pending(struct dma_chan *chan) 458 static void intel_mid_dma_issue_pending(struct dma_chan *chan)
459 { 459 {
460 struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan); 460 struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan);
461 461
462 spin_lock_bh(&midc->lock); 462 spin_lock_bh(&midc->lock);
463 if (!list_empty(&midc->queue)) 463 if (!list_empty(&midc->queue))
464 midc_scan_descriptors(to_middma_device(chan->device), midc); 464 midc_scan_descriptors(to_middma_device(chan->device), midc);
465 spin_unlock_bh(&midc->lock); 465 spin_unlock_bh(&midc->lock);
466 } 466 }
467 467
468 /** 468 /**
469 * intel_mid_dma_tx_status - Return status of txn 469 * intel_mid_dma_tx_status - Return status of txn
470 * @chan: chan for where status needs to be checked 470 * @chan: chan for where status needs to be checked
471 * @cookie: cookie for txn 471 * @cookie: cookie for txn
472 * @txstate: DMA txn state 472 * @txstate: DMA txn state
473 * 473 *
474 * Return status of DMA txn 474 * Return status of DMA txn
475 */ 475 */
476 static enum dma_status intel_mid_dma_tx_status(struct dma_chan *chan, 476 static enum dma_status intel_mid_dma_tx_status(struct dma_chan *chan,
477 dma_cookie_t cookie, 477 dma_cookie_t cookie,
478 struct dma_tx_state *txstate) 478 struct dma_tx_state *txstate)
479 { 479 {
480 struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan); 480 struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan);
481 enum dma_status ret; 481 enum dma_status ret;
482 482
483 ret = dma_cookie_status(chan, cookie, txstate); 483 ret = dma_cookie_status(chan, cookie, txstate);
484 if (ret != DMA_SUCCESS) { 484 if (ret != DMA_COMPLETE) {
485 spin_lock_bh(&midc->lock); 485 spin_lock_bh(&midc->lock);
486 midc_scan_descriptors(to_middma_device(chan->device), midc); 486 midc_scan_descriptors(to_middma_device(chan->device), midc);
487 spin_unlock_bh(&midc->lock); 487 spin_unlock_bh(&midc->lock);
488 488
489 ret = dma_cookie_status(chan, cookie, txstate); 489 ret = dma_cookie_status(chan, cookie, txstate);
490 } 490 }
491 491
492 return ret; 492 return ret;
493 } 493 }
494 494
495 static int dma_slave_control(struct dma_chan *chan, unsigned long arg) 495 static int dma_slave_control(struct dma_chan *chan, unsigned long arg)
496 { 496 {
497 struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan); 497 struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan);
498 struct dma_slave_config *slave = (struct dma_slave_config *)arg; 498 struct dma_slave_config *slave = (struct dma_slave_config *)arg;
499 struct intel_mid_dma_slave *mid_slave; 499 struct intel_mid_dma_slave *mid_slave;
500 500
501 BUG_ON(!midc); 501 BUG_ON(!midc);
502 BUG_ON(!slave); 502 BUG_ON(!slave);
503 pr_debug("MDMA: slave control called\n"); 503 pr_debug("MDMA: slave control called\n");
504 504
505 mid_slave = to_intel_mid_dma_slave(slave); 505 mid_slave = to_intel_mid_dma_slave(slave);
506 506
507 BUG_ON(!mid_slave); 507 BUG_ON(!mid_slave);
508 508
509 midc->mid_slave = mid_slave; 509 midc->mid_slave = mid_slave;
510 return 0; 510 return 0;
511 } 511 }
512 /** 512 /**
513 * intel_mid_dma_device_control - DMA device control 513 * intel_mid_dma_device_control - DMA device control
514 * @chan: chan for DMA control 514 * @chan: chan for DMA control
515 * @cmd: control cmd 515 * @cmd: control cmd
516 * @arg: cmd arg value 516 * @arg: cmd arg value
517 * 517 *
518 * Perform DMA control command 518 * Perform DMA control command
519 */ 519 */
520 static int intel_mid_dma_device_control(struct dma_chan *chan, 520 static int intel_mid_dma_device_control(struct dma_chan *chan,
521 enum dma_ctrl_cmd cmd, unsigned long arg) 521 enum dma_ctrl_cmd cmd, unsigned long arg)
522 { 522 {
523 struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan); 523 struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan);
524 struct middma_device *mid = to_middma_device(chan->device); 524 struct middma_device *mid = to_middma_device(chan->device);
525 struct intel_mid_dma_desc *desc, *_desc; 525 struct intel_mid_dma_desc *desc, *_desc;
526 union intel_mid_dma_cfg_lo cfg_lo; 526 union intel_mid_dma_cfg_lo cfg_lo;
527 527
528 if (cmd == DMA_SLAVE_CONFIG) 528 if (cmd == DMA_SLAVE_CONFIG)
529 return dma_slave_control(chan, arg); 529 return dma_slave_control(chan, arg);
530 530
531 if (cmd != DMA_TERMINATE_ALL) 531 if (cmd != DMA_TERMINATE_ALL)
532 return -ENXIO; 532 return -ENXIO;
533 533
534 spin_lock_bh(&midc->lock); 534 spin_lock_bh(&midc->lock);
535 if (midc->busy == false) { 535 if (midc->busy == false) {
536 spin_unlock_bh(&midc->lock); 536 spin_unlock_bh(&midc->lock);
537 return 0; 537 return 0;
538 } 538 }
539 /*Suspend and disable the channel*/ 539 /*Suspend and disable the channel*/
540 cfg_lo.cfg_lo = ioread32(midc->ch_regs + CFG_LOW); 540 cfg_lo.cfg_lo = ioread32(midc->ch_regs + CFG_LOW);
541 cfg_lo.cfgx.ch_susp = 1; 541 cfg_lo.cfgx.ch_susp = 1;
542 iowrite32(cfg_lo.cfg_lo, midc->ch_regs + CFG_LOW); 542 iowrite32(cfg_lo.cfg_lo, midc->ch_regs + CFG_LOW);
543 iowrite32(DISABLE_CHANNEL(midc->ch_id), mid->dma_base + DMA_CHAN_EN); 543 iowrite32(DISABLE_CHANNEL(midc->ch_id), mid->dma_base + DMA_CHAN_EN);
544 midc->busy = false; 544 midc->busy = false;
545 /* Disable interrupts */ 545 /* Disable interrupts */
546 disable_dma_interrupt(midc); 546 disable_dma_interrupt(midc);
547 midc->descs_allocated = 0; 547 midc->descs_allocated = 0;
548 548
549 spin_unlock_bh(&midc->lock); 549 spin_unlock_bh(&midc->lock);
550 list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) { 550 list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) {
551 if (desc->lli != NULL) { 551 if (desc->lli != NULL) {
552 pci_pool_free(desc->lli_pool, desc->lli, 552 pci_pool_free(desc->lli_pool, desc->lli,
553 desc->lli_phys); 553 desc->lli_phys);
554 pci_pool_destroy(desc->lli_pool); 554 pci_pool_destroy(desc->lli_pool);
555 desc->lli = NULL; 555 desc->lli = NULL;
556 } 556 }
557 list_move(&desc->desc_node, &midc->free_list); 557 list_move(&desc->desc_node, &midc->free_list);
558 } 558 }
559 return 0; 559 return 0;
560 } 560 }
561 561
562 562
563 /** 563 /**
564 * intel_mid_dma_prep_memcpy - Prep memcpy txn 564 * intel_mid_dma_prep_memcpy - Prep memcpy txn
565 * @chan: chan for DMA transfer 565 * @chan: chan for DMA transfer
566 * @dest: destn address 566 * @dest: destn address
567 * @src: src address 567 * @src: src address
568 * @len: DMA transfer len 568 * @len: DMA transfer len
569 * @flags: DMA flags 569 * @flags: DMA flags
570 * 570 *
571 * Perform a DMA memcpy. Note we support slave periphral DMA transfers only 571 * Perform a DMA memcpy. Note we support slave periphral DMA transfers only
572 * The periphral txn details should be filled in slave structure properly 572 * The periphral txn details should be filled in slave structure properly
573 * Returns the descriptor for this txn 573 * Returns the descriptor for this txn
574 */ 574 */
575 static struct dma_async_tx_descriptor *intel_mid_dma_prep_memcpy( 575 static struct dma_async_tx_descriptor *intel_mid_dma_prep_memcpy(
576 struct dma_chan *chan, dma_addr_t dest, 576 struct dma_chan *chan, dma_addr_t dest,
577 dma_addr_t src, size_t len, unsigned long flags) 577 dma_addr_t src, size_t len, unsigned long flags)
578 { 578 {
579 struct intel_mid_dma_chan *midc; 579 struct intel_mid_dma_chan *midc;
580 struct intel_mid_dma_desc *desc = NULL; 580 struct intel_mid_dma_desc *desc = NULL;
581 struct intel_mid_dma_slave *mids; 581 struct intel_mid_dma_slave *mids;
582 union intel_mid_dma_ctl_lo ctl_lo; 582 union intel_mid_dma_ctl_lo ctl_lo;
583 union intel_mid_dma_ctl_hi ctl_hi; 583 union intel_mid_dma_ctl_hi ctl_hi;
584 union intel_mid_dma_cfg_lo cfg_lo; 584 union intel_mid_dma_cfg_lo cfg_lo;
585 union intel_mid_dma_cfg_hi cfg_hi; 585 union intel_mid_dma_cfg_hi cfg_hi;
586 enum dma_slave_buswidth width; 586 enum dma_slave_buswidth width;
587 587
588 pr_debug("MDMA: Prep for memcpy\n"); 588 pr_debug("MDMA: Prep for memcpy\n");
589 BUG_ON(!chan); 589 BUG_ON(!chan);
590 if (!len) 590 if (!len)
591 return NULL; 591 return NULL;
592 592
593 midc = to_intel_mid_dma_chan(chan); 593 midc = to_intel_mid_dma_chan(chan);
594 BUG_ON(!midc); 594 BUG_ON(!midc);
595 595
596 mids = midc->mid_slave; 596 mids = midc->mid_slave;
597 BUG_ON(!mids); 597 BUG_ON(!mids);
598 598
599 pr_debug("MDMA:called for DMA %x CH %d Length %zu\n", 599 pr_debug("MDMA:called for DMA %x CH %d Length %zu\n",
600 midc->dma->pci_id, midc->ch_id, len); 600 midc->dma->pci_id, midc->ch_id, len);
601 pr_debug("MDMA:Cfg passed Mode %x, Dirn %x, HS %x, Width %x\n", 601 pr_debug("MDMA:Cfg passed Mode %x, Dirn %x, HS %x, Width %x\n",
602 mids->cfg_mode, mids->dma_slave.direction, 602 mids->cfg_mode, mids->dma_slave.direction,
603 mids->hs_mode, mids->dma_slave.src_addr_width); 603 mids->hs_mode, mids->dma_slave.src_addr_width);
604 604
605 /*calculate CFG_LO*/ 605 /*calculate CFG_LO*/
606 if (mids->hs_mode == LNW_DMA_SW_HS) { 606 if (mids->hs_mode == LNW_DMA_SW_HS) {
607 cfg_lo.cfg_lo = 0; 607 cfg_lo.cfg_lo = 0;
608 cfg_lo.cfgx.hs_sel_dst = 1; 608 cfg_lo.cfgx.hs_sel_dst = 1;
609 cfg_lo.cfgx.hs_sel_src = 1; 609 cfg_lo.cfgx.hs_sel_src = 1;
610 } else if (mids->hs_mode == LNW_DMA_HW_HS) 610 } else if (mids->hs_mode == LNW_DMA_HW_HS)
611 cfg_lo.cfg_lo = 0x00000; 611 cfg_lo.cfg_lo = 0x00000;
612 612
613 /*calculate CFG_HI*/ 613 /*calculate CFG_HI*/
614 if (mids->cfg_mode == LNW_DMA_MEM_TO_MEM) { 614 if (mids->cfg_mode == LNW_DMA_MEM_TO_MEM) {
615 /*SW HS only*/ 615 /*SW HS only*/
616 cfg_hi.cfg_hi = 0; 616 cfg_hi.cfg_hi = 0;
617 } else { 617 } else {
618 cfg_hi.cfg_hi = 0; 618 cfg_hi.cfg_hi = 0;
619 if (midc->dma->pimr_mask) { 619 if (midc->dma->pimr_mask) {
620 cfg_hi.cfgx.protctl = 0x0; /*default value*/ 620 cfg_hi.cfgx.protctl = 0x0; /*default value*/
621 cfg_hi.cfgx.fifo_mode = 1; 621 cfg_hi.cfgx.fifo_mode = 1;
622 if (mids->dma_slave.direction == DMA_MEM_TO_DEV) { 622 if (mids->dma_slave.direction == DMA_MEM_TO_DEV) {
623 cfg_hi.cfgx.src_per = 0; 623 cfg_hi.cfgx.src_per = 0;
624 if (mids->device_instance == 0) 624 if (mids->device_instance == 0)
625 cfg_hi.cfgx.dst_per = 3; 625 cfg_hi.cfgx.dst_per = 3;
626 if (mids->device_instance == 1) 626 if (mids->device_instance == 1)
627 cfg_hi.cfgx.dst_per = 1; 627 cfg_hi.cfgx.dst_per = 1;
628 } else if (mids->dma_slave.direction == DMA_DEV_TO_MEM) { 628 } else if (mids->dma_slave.direction == DMA_DEV_TO_MEM) {
629 if (mids->device_instance == 0) 629 if (mids->device_instance == 0)
630 cfg_hi.cfgx.src_per = 2; 630 cfg_hi.cfgx.src_per = 2;
631 if (mids->device_instance == 1) 631 if (mids->device_instance == 1)
632 cfg_hi.cfgx.src_per = 0; 632 cfg_hi.cfgx.src_per = 0;
633 cfg_hi.cfgx.dst_per = 0; 633 cfg_hi.cfgx.dst_per = 0;
634 } 634 }
635 } else { 635 } else {
636 cfg_hi.cfgx.protctl = 0x1; /*default value*/ 636 cfg_hi.cfgx.protctl = 0x1; /*default value*/
637 cfg_hi.cfgx.src_per = cfg_hi.cfgx.dst_per = 637 cfg_hi.cfgx.src_per = cfg_hi.cfgx.dst_per =
638 midc->ch_id - midc->dma->chan_base; 638 midc->ch_id - midc->dma->chan_base;
639 } 639 }
640 } 640 }
641 641
642 /*calculate CTL_HI*/ 642 /*calculate CTL_HI*/
643 ctl_hi.ctlx.reser = 0; 643 ctl_hi.ctlx.reser = 0;
644 ctl_hi.ctlx.done = 0; 644 ctl_hi.ctlx.done = 0;
645 width = mids->dma_slave.src_addr_width; 645 width = mids->dma_slave.src_addr_width;
646 646
647 ctl_hi.ctlx.block_ts = get_block_ts(len, width, midc->dma->block_size); 647 ctl_hi.ctlx.block_ts = get_block_ts(len, width, midc->dma->block_size);
648 pr_debug("MDMA:calc len %d for block size %d\n", 648 pr_debug("MDMA:calc len %d for block size %d\n",
649 ctl_hi.ctlx.block_ts, midc->dma->block_size); 649 ctl_hi.ctlx.block_ts, midc->dma->block_size);
650 /*calculate CTL_LO*/ 650 /*calculate CTL_LO*/
651 ctl_lo.ctl_lo = 0; 651 ctl_lo.ctl_lo = 0;
652 ctl_lo.ctlx.int_en = 1; 652 ctl_lo.ctlx.int_en = 1;
653 ctl_lo.ctlx.dst_msize = mids->dma_slave.src_maxburst; 653 ctl_lo.ctlx.dst_msize = mids->dma_slave.src_maxburst;
654 ctl_lo.ctlx.src_msize = mids->dma_slave.dst_maxburst; 654 ctl_lo.ctlx.src_msize = mids->dma_slave.dst_maxburst;
655 655
656 /* 656 /*
657 * Here we need some translation from "enum dma_slave_buswidth" 657 * Here we need some translation from "enum dma_slave_buswidth"
658 * to the format for our dma controller 658 * to the format for our dma controller
659 * standard intel_mid_dmac's format 659 * standard intel_mid_dmac's format
660 * 1 Byte 0b000 660 * 1 Byte 0b000
661 * 2 Bytes 0b001 661 * 2 Bytes 0b001
662 * 4 Bytes 0b010 662 * 4 Bytes 0b010
663 */ 663 */
664 ctl_lo.ctlx.dst_tr_width = mids->dma_slave.dst_addr_width / 2; 664 ctl_lo.ctlx.dst_tr_width = mids->dma_slave.dst_addr_width / 2;
665 ctl_lo.ctlx.src_tr_width = mids->dma_slave.src_addr_width / 2; 665 ctl_lo.ctlx.src_tr_width = mids->dma_slave.src_addr_width / 2;
666 666
667 if (mids->cfg_mode == LNW_DMA_MEM_TO_MEM) { 667 if (mids->cfg_mode == LNW_DMA_MEM_TO_MEM) {
668 ctl_lo.ctlx.tt_fc = 0; 668 ctl_lo.ctlx.tt_fc = 0;
669 ctl_lo.ctlx.sinc = 0; 669 ctl_lo.ctlx.sinc = 0;
670 ctl_lo.ctlx.dinc = 0; 670 ctl_lo.ctlx.dinc = 0;
671 } else { 671 } else {
672 if (mids->dma_slave.direction == DMA_MEM_TO_DEV) { 672 if (mids->dma_slave.direction == DMA_MEM_TO_DEV) {
673 ctl_lo.ctlx.sinc = 0; 673 ctl_lo.ctlx.sinc = 0;
674 ctl_lo.ctlx.dinc = 2; 674 ctl_lo.ctlx.dinc = 2;
675 ctl_lo.ctlx.tt_fc = 1; 675 ctl_lo.ctlx.tt_fc = 1;
676 } else if (mids->dma_slave.direction == DMA_DEV_TO_MEM) { 676 } else if (mids->dma_slave.direction == DMA_DEV_TO_MEM) {
677 ctl_lo.ctlx.sinc = 2; 677 ctl_lo.ctlx.sinc = 2;
678 ctl_lo.ctlx.dinc = 0; 678 ctl_lo.ctlx.dinc = 0;
679 ctl_lo.ctlx.tt_fc = 2; 679 ctl_lo.ctlx.tt_fc = 2;
680 } 680 }
681 } 681 }
682 682
683 pr_debug("MDMA:Calc CTL LO %x, CTL HI %x, CFG LO %x, CFG HI %x\n", 683 pr_debug("MDMA:Calc CTL LO %x, CTL HI %x, CFG LO %x, CFG HI %x\n",
684 ctl_lo.ctl_lo, ctl_hi.ctl_hi, cfg_lo.cfg_lo, cfg_hi.cfg_hi); 684 ctl_lo.ctl_lo, ctl_hi.ctl_hi, cfg_lo.cfg_lo, cfg_hi.cfg_hi);
685 685
686 enable_dma_interrupt(midc); 686 enable_dma_interrupt(midc);
687 687
688 desc = midc_desc_get(midc); 688 desc = midc_desc_get(midc);
689 if (desc == NULL) 689 if (desc == NULL)
690 goto err_desc_get; 690 goto err_desc_get;
691 desc->sar = src; 691 desc->sar = src;
692 desc->dar = dest ; 692 desc->dar = dest ;
693 desc->len = len; 693 desc->len = len;
694 desc->cfg_hi = cfg_hi.cfg_hi; 694 desc->cfg_hi = cfg_hi.cfg_hi;
695 desc->cfg_lo = cfg_lo.cfg_lo; 695 desc->cfg_lo = cfg_lo.cfg_lo;
696 desc->ctl_lo = ctl_lo.ctl_lo; 696 desc->ctl_lo = ctl_lo.ctl_lo;
697 desc->ctl_hi = ctl_hi.ctl_hi; 697 desc->ctl_hi = ctl_hi.ctl_hi;
698 desc->width = width; 698 desc->width = width;
699 desc->dirn = mids->dma_slave.direction; 699 desc->dirn = mids->dma_slave.direction;
700 desc->lli_phys = 0; 700 desc->lli_phys = 0;
701 desc->lli = NULL; 701 desc->lli = NULL;
702 desc->lli_pool = NULL; 702 desc->lli_pool = NULL;
703 return &desc->txd; 703 return &desc->txd;
704 704
705 err_desc_get: 705 err_desc_get:
706 pr_err("ERR_MDMA: Failed to get desc\n"); 706 pr_err("ERR_MDMA: Failed to get desc\n");
707 midc_desc_put(midc, desc); 707 midc_desc_put(midc, desc);
708 return NULL; 708 return NULL;
709 } 709 }
710 /** 710 /**
711 * intel_mid_dma_prep_slave_sg - Prep slave sg txn 711 * intel_mid_dma_prep_slave_sg - Prep slave sg txn
712 * @chan: chan for DMA transfer 712 * @chan: chan for DMA transfer
713 * @sgl: scatter gather list 713 * @sgl: scatter gather list
714 * @sg_len: length of sg txn 714 * @sg_len: length of sg txn
715 * @direction: DMA transfer dirtn 715 * @direction: DMA transfer dirtn
716 * @flags: DMA flags 716 * @flags: DMA flags
717 * @context: transfer context (ignored) 717 * @context: transfer context (ignored)
718 * 718 *
719 * Prepares LLI based periphral transfer 719 * Prepares LLI based periphral transfer
720 */ 720 */
721 static struct dma_async_tx_descriptor *intel_mid_dma_prep_slave_sg( 721 static struct dma_async_tx_descriptor *intel_mid_dma_prep_slave_sg(
722 struct dma_chan *chan, struct scatterlist *sgl, 722 struct dma_chan *chan, struct scatterlist *sgl,
723 unsigned int sg_len, enum dma_transfer_direction direction, 723 unsigned int sg_len, enum dma_transfer_direction direction,
724 unsigned long flags, void *context) 724 unsigned long flags, void *context)
725 { 725 {
726 struct intel_mid_dma_chan *midc = NULL; 726 struct intel_mid_dma_chan *midc = NULL;
727 struct intel_mid_dma_slave *mids = NULL; 727 struct intel_mid_dma_slave *mids = NULL;
728 struct intel_mid_dma_desc *desc = NULL; 728 struct intel_mid_dma_desc *desc = NULL;
729 struct dma_async_tx_descriptor *txd = NULL; 729 struct dma_async_tx_descriptor *txd = NULL;
730 union intel_mid_dma_ctl_lo ctl_lo; 730 union intel_mid_dma_ctl_lo ctl_lo;
731 731
732 pr_debug("MDMA: Prep for slave SG\n"); 732 pr_debug("MDMA: Prep for slave SG\n");
733 733
734 if (!sg_len) { 734 if (!sg_len) {
735 pr_err("MDMA: Invalid SG length\n"); 735 pr_err("MDMA: Invalid SG length\n");
736 return NULL; 736 return NULL;
737 } 737 }
738 midc = to_intel_mid_dma_chan(chan); 738 midc = to_intel_mid_dma_chan(chan);
739 BUG_ON(!midc); 739 BUG_ON(!midc);
740 740
741 mids = midc->mid_slave; 741 mids = midc->mid_slave;
742 BUG_ON(!mids); 742 BUG_ON(!mids);
743 743
744 if (!midc->dma->pimr_mask) { 744 if (!midc->dma->pimr_mask) {
745 /* We can still handle sg list with only one item */ 745 /* We can still handle sg list with only one item */
746 if (sg_len == 1) { 746 if (sg_len == 1) {
747 txd = intel_mid_dma_prep_memcpy(chan, 747 txd = intel_mid_dma_prep_memcpy(chan,
748 mids->dma_slave.dst_addr, 748 mids->dma_slave.dst_addr,
749 mids->dma_slave.src_addr, 749 mids->dma_slave.src_addr,
750 sg_dma_len(sgl), 750 sg_dma_len(sgl),
751 flags); 751 flags);
752 return txd; 752 return txd;
753 } else { 753 } else {
754 pr_warn("MDMA: SG list is not supported by this controller\n"); 754 pr_warn("MDMA: SG list is not supported by this controller\n");
755 return NULL; 755 return NULL;
756 } 756 }
757 } 757 }
758 758
759 pr_debug("MDMA: SG Length = %d, direction = %d, Flags = %#lx\n", 759 pr_debug("MDMA: SG Length = %d, direction = %d, Flags = %#lx\n",
760 sg_len, direction, flags); 760 sg_len, direction, flags);
761 761
762 txd = intel_mid_dma_prep_memcpy(chan, 0, 0, sg_dma_len(sgl), flags); 762 txd = intel_mid_dma_prep_memcpy(chan, 0, 0, sg_dma_len(sgl), flags);
763 if (NULL == txd) { 763 if (NULL == txd) {
764 pr_err("MDMA: Prep memcpy failed\n"); 764 pr_err("MDMA: Prep memcpy failed\n");
765 return NULL; 765 return NULL;
766 } 766 }
767 767
768 desc = to_intel_mid_dma_desc(txd); 768 desc = to_intel_mid_dma_desc(txd);
769 desc->dirn = direction; 769 desc->dirn = direction;
770 ctl_lo.ctl_lo = desc->ctl_lo; 770 ctl_lo.ctl_lo = desc->ctl_lo;
771 ctl_lo.ctlx.llp_dst_en = 1; 771 ctl_lo.ctlx.llp_dst_en = 1;
772 ctl_lo.ctlx.llp_src_en = 1; 772 ctl_lo.ctlx.llp_src_en = 1;
773 desc->ctl_lo = ctl_lo.ctl_lo; 773 desc->ctl_lo = ctl_lo.ctl_lo;
774 desc->lli_length = sg_len; 774 desc->lli_length = sg_len;
775 desc->current_lli = 0; 775 desc->current_lli = 0;
776 /* DMA coherent memory pool for LLI descriptors*/ 776 /* DMA coherent memory pool for LLI descriptors*/
777 desc->lli_pool = pci_pool_create("intel_mid_dma_lli_pool", 777 desc->lli_pool = pci_pool_create("intel_mid_dma_lli_pool",
778 midc->dma->pdev, 778 midc->dma->pdev,
779 (sizeof(struct intel_mid_dma_lli)*sg_len), 779 (sizeof(struct intel_mid_dma_lli)*sg_len),
780 32, 0); 780 32, 0);
781 if (NULL == desc->lli_pool) { 781 if (NULL == desc->lli_pool) {
782 pr_err("MID_DMA:LLI pool create failed\n"); 782 pr_err("MID_DMA:LLI pool create failed\n");
783 return NULL; 783 return NULL;
784 } 784 }
785 785
786 desc->lli = pci_pool_alloc(desc->lli_pool, GFP_KERNEL, &desc->lli_phys); 786 desc->lli = pci_pool_alloc(desc->lli_pool, GFP_KERNEL, &desc->lli_phys);
787 if (!desc->lli) { 787 if (!desc->lli) {
788 pr_err("MID_DMA: LLI alloc failed\n"); 788 pr_err("MID_DMA: LLI alloc failed\n");
789 pci_pool_destroy(desc->lli_pool); 789 pci_pool_destroy(desc->lli_pool);
790 return NULL; 790 return NULL;
791 } 791 }
792 792
793 midc_lli_fill_sg(midc, desc, sgl, sg_len, flags); 793 midc_lli_fill_sg(midc, desc, sgl, sg_len, flags);
794 if (flags & DMA_PREP_INTERRUPT) { 794 if (flags & DMA_PREP_INTERRUPT) {
795 iowrite32(UNMASK_INTR_REG(midc->ch_id), 795 iowrite32(UNMASK_INTR_REG(midc->ch_id),
796 midc->dma_base + MASK_BLOCK); 796 midc->dma_base + MASK_BLOCK);
797 pr_debug("MDMA:Enabled Block interrupt\n"); 797 pr_debug("MDMA:Enabled Block interrupt\n");
798 } 798 }
799 return &desc->txd; 799 return &desc->txd;
800 } 800 }
801 801
802 /** 802 /**
803 * intel_mid_dma_free_chan_resources - Frees dma resources 803 * intel_mid_dma_free_chan_resources - Frees dma resources
804 * @chan: chan requiring attention 804 * @chan: chan requiring attention
805 * 805 *
806 * Frees the allocated resources on this DMA chan 806 * Frees the allocated resources on this DMA chan
807 */ 807 */
808 static void intel_mid_dma_free_chan_resources(struct dma_chan *chan) 808 static void intel_mid_dma_free_chan_resources(struct dma_chan *chan)
809 { 809 {
810 struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan); 810 struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan);
811 struct middma_device *mid = to_middma_device(chan->device); 811 struct middma_device *mid = to_middma_device(chan->device);
812 struct intel_mid_dma_desc *desc, *_desc; 812 struct intel_mid_dma_desc *desc, *_desc;
813 813
814 if (true == midc->busy) { 814 if (true == midc->busy) {
815 /*trying to free ch in use!!!!!*/ 815 /*trying to free ch in use!!!!!*/
816 pr_err("ERR_MDMA: trying to free ch in use\n"); 816 pr_err("ERR_MDMA: trying to free ch in use\n");
817 } 817 }
818 spin_lock_bh(&midc->lock); 818 spin_lock_bh(&midc->lock);
819 midc->descs_allocated = 0; 819 midc->descs_allocated = 0;
820 list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) { 820 list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) {
821 list_del(&desc->desc_node); 821 list_del(&desc->desc_node);
822 pci_pool_free(mid->dma_pool, desc, desc->txd.phys); 822 pci_pool_free(mid->dma_pool, desc, desc->txd.phys);
823 } 823 }
824 list_for_each_entry_safe(desc, _desc, &midc->free_list, desc_node) { 824 list_for_each_entry_safe(desc, _desc, &midc->free_list, desc_node) {
825 list_del(&desc->desc_node); 825 list_del(&desc->desc_node);
826 pci_pool_free(mid->dma_pool, desc, desc->txd.phys); 826 pci_pool_free(mid->dma_pool, desc, desc->txd.phys);
827 } 827 }
828 list_for_each_entry_safe(desc, _desc, &midc->queue, desc_node) { 828 list_for_each_entry_safe(desc, _desc, &midc->queue, desc_node) {
829 list_del(&desc->desc_node); 829 list_del(&desc->desc_node);
830 pci_pool_free(mid->dma_pool, desc, desc->txd.phys); 830 pci_pool_free(mid->dma_pool, desc, desc->txd.phys);
831 } 831 }
832 spin_unlock_bh(&midc->lock); 832 spin_unlock_bh(&midc->lock);
833 midc->in_use = false; 833 midc->in_use = false;
834 midc->busy = false; 834 midc->busy = false;
835 /* Disable CH interrupts */ 835 /* Disable CH interrupts */
836 iowrite32(MASK_INTR_REG(midc->ch_id), mid->dma_base + MASK_BLOCK); 836 iowrite32(MASK_INTR_REG(midc->ch_id), mid->dma_base + MASK_BLOCK);
837 iowrite32(MASK_INTR_REG(midc->ch_id), mid->dma_base + MASK_ERR); 837 iowrite32(MASK_INTR_REG(midc->ch_id), mid->dma_base + MASK_ERR);
838 pm_runtime_put(&mid->pdev->dev); 838 pm_runtime_put(&mid->pdev->dev);
839 } 839 }
840 840
841 /** 841 /**
842 * intel_mid_dma_alloc_chan_resources - Allocate dma resources 842 * intel_mid_dma_alloc_chan_resources - Allocate dma resources
843 * @chan: chan requiring attention 843 * @chan: chan requiring attention
844 * 844 *
845 * Allocates DMA resources on this chan 845 * Allocates DMA resources on this chan
846 * Return the descriptors allocated 846 * Return the descriptors allocated
847 */ 847 */
848 static int intel_mid_dma_alloc_chan_resources(struct dma_chan *chan) 848 static int intel_mid_dma_alloc_chan_resources(struct dma_chan *chan)
849 { 849 {
850 struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan); 850 struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan);
851 struct middma_device *mid = to_middma_device(chan->device); 851 struct middma_device *mid = to_middma_device(chan->device);
852 struct intel_mid_dma_desc *desc; 852 struct intel_mid_dma_desc *desc;
853 dma_addr_t phys; 853 dma_addr_t phys;
854 int i = 0; 854 int i = 0;
855 855
856 pm_runtime_get_sync(&mid->pdev->dev); 856 pm_runtime_get_sync(&mid->pdev->dev);
857 857
858 if (mid->state == SUSPENDED) { 858 if (mid->state == SUSPENDED) {
859 if (dma_resume(&mid->pdev->dev)) { 859 if (dma_resume(&mid->pdev->dev)) {
860 pr_err("ERR_MDMA: resume failed"); 860 pr_err("ERR_MDMA: resume failed");
861 return -EFAULT; 861 return -EFAULT;
862 } 862 }
863 } 863 }
864 864
865 /* ASSERT: channel is idle */ 865 /* ASSERT: channel is idle */
866 if (test_ch_en(mid->dma_base, midc->ch_id)) { 866 if (test_ch_en(mid->dma_base, midc->ch_id)) {
867 /*ch is not idle*/ 867 /*ch is not idle*/
868 pr_err("ERR_MDMA: ch not idle\n"); 868 pr_err("ERR_MDMA: ch not idle\n");
869 pm_runtime_put(&mid->pdev->dev); 869 pm_runtime_put(&mid->pdev->dev);
870 return -EIO; 870 return -EIO;
871 } 871 }
872 dma_cookie_init(chan); 872 dma_cookie_init(chan);
873 873
874 spin_lock_bh(&midc->lock); 874 spin_lock_bh(&midc->lock);
875 while (midc->descs_allocated < DESCS_PER_CHANNEL) { 875 while (midc->descs_allocated < DESCS_PER_CHANNEL) {
876 spin_unlock_bh(&midc->lock); 876 spin_unlock_bh(&midc->lock);
877 desc = pci_pool_alloc(mid->dma_pool, GFP_KERNEL, &phys); 877 desc = pci_pool_alloc(mid->dma_pool, GFP_KERNEL, &phys);
878 if (!desc) { 878 if (!desc) {
879 pr_err("ERR_MDMA: desc failed\n"); 879 pr_err("ERR_MDMA: desc failed\n");
880 pm_runtime_put(&mid->pdev->dev); 880 pm_runtime_put(&mid->pdev->dev);
881 return -ENOMEM; 881 return -ENOMEM;
882 /*check*/ 882 /*check*/
883 } 883 }
884 dma_async_tx_descriptor_init(&desc->txd, chan); 884 dma_async_tx_descriptor_init(&desc->txd, chan);
885 desc->txd.tx_submit = intel_mid_dma_tx_submit; 885 desc->txd.tx_submit = intel_mid_dma_tx_submit;
886 desc->txd.flags = DMA_CTRL_ACK; 886 desc->txd.flags = DMA_CTRL_ACK;
887 desc->txd.phys = phys; 887 desc->txd.phys = phys;
888 spin_lock_bh(&midc->lock); 888 spin_lock_bh(&midc->lock);
889 i = ++midc->descs_allocated; 889 i = ++midc->descs_allocated;
890 list_add_tail(&desc->desc_node, &midc->free_list); 890 list_add_tail(&desc->desc_node, &midc->free_list);
891 } 891 }
892 spin_unlock_bh(&midc->lock); 892 spin_unlock_bh(&midc->lock);
893 midc->in_use = true; 893 midc->in_use = true;
894 midc->busy = false; 894 midc->busy = false;
895 pr_debug("MID_DMA: Desc alloc done ret: %d desc\n", i); 895 pr_debug("MID_DMA: Desc alloc done ret: %d desc\n", i);
896 return i; 896 return i;
897 } 897 }
898 898
899 /** 899 /**
900 * midc_handle_error - Handle DMA txn error 900 * midc_handle_error - Handle DMA txn error
901 * @mid: controller where error occurred 901 * @mid: controller where error occurred
902 * @midc: chan where error occurred 902 * @midc: chan where error occurred
903 * 903 *
904 * Scan the descriptor for error 904 * Scan the descriptor for error
905 */ 905 */
906 static void midc_handle_error(struct middma_device *mid, 906 static void midc_handle_error(struct middma_device *mid,
907 struct intel_mid_dma_chan *midc) 907 struct intel_mid_dma_chan *midc)
908 { 908 {
909 midc_scan_descriptors(mid, midc); 909 midc_scan_descriptors(mid, midc);
910 } 910 }
911 911
912 /** 912 /**
913 * dma_tasklet - DMA interrupt tasklet 913 * dma_tasklet - DMA interrupt tasklet
914 * @data: tasklet arg (the controller structure) 914 * @data: tasklet arg (the controller structure)
915 * 915 *
916 * Scan the controller for interrupts for completion/error 916 * Scan the controller for interrupts for completion/error
917 * Clear the interrupt and call for handling completion/error 917 * Clear the interrupt and call for handling completion/error
918 */ 918 */
919 static void dma_tasklet(unsigned long data) 919 static void dma_tasklet(unsigned long data)
920 { 920 {
921 struct middma_device *mid = NULL; 921 struct middma_device *mid = NULL;
922 struct intel_mid_dma_chan *midc = NULL; 922 struct intel_mid_dma_chan *midc = NULL;
923 u32 status, raw_tfr, raw_block; 923 u32 status, raw_tfr, raw_block;
924 int i; 924 int i;
925 925
926 mid = (struct middma_device *)data; 926 mid = (struct middma_device *)data;
927 if (mid == NULL) { 927 if (mid == NULL) {
928 pr_err("ERR_MDMA: tasklet Null param\n"); 928 pr_err("ERR_MDMA: tasklet Null param\n");
929 return; 929 return;
930 } 930 }
931 pr_debug("MDMA: in tasklet for device %x\n", mid->pci_id); 931 pr_debug("MDMA: in tasklet for device %x\n", mid->pci_id);
932 raw_tfr = ioread32(mid->dma_base + RAW_TFR); 932 raw_tfr = ioread32(mid->dma_base + RAW_TFR);
933 raw_block = ioread32(mid->dma_base + RAW_BLOCK); 933 raw_block = ioread32(mid->dma_base + RAW_BLOCK);
934 status = raw_tfr | raw_block; 934 status = raw_tfr | raw_block;
935 status &= mid->intr_mask; 935 status &= mid->intr_mask;
936 while (status) { 936 while (status) {
937 /*txn interrupt*/ 937 /*txn interrupt*/
938 i = get_ch_index(&status, mid->chan_base); 938 i = get_ch_index(&status, mid->chan_base);
939 if (i < 0) { 939 if (i < 0) {
940 pr_err("ERR_MDMA:Invalid ch index %x\n", i); 940 pr_err("ERR_MDMA:Invalid ch index %x\n", i);
941 return; 941 return;
942 } 942 }
943 midc = &mid->ch[i]; 943 midc = &mid->ch[i];
944 if (midc == NULL) { 944 if (midc == NULL) {
945 pr_err("ERR_MDMA:Null param midc\n"); 945 pr_err("ERR_MDMA:Null param midc\n");
946 return; 946 return;
947 } 947 }
948 pr_debug("MDMA:Tx complete interrupt %x, Ch No %d Index %d\n", 948 pr_debug("MDMA:Tx complete interrupt %x, Ch No %d Index %d\n",
949 status, midc->ch_id, i); 949 status, midc->ch_id, i);
950 midc->raw_tfr = raw_tfr; 950 midc->raw_tfr = raw_tfr;
951 midc->raw_block = raw_block; 951 midc->raw_block = raw_block;
952 spin_lock_bh(&midc->lock); 952 spin_lock_bh(&midc->lock);
953 /*clearing this interrupts first*/ 953 /*clearing this interrupts first*/
954 iowrite32((1 << midc->ch_id), mid->dma_base + CLEAR_TFR); 954 iowrite32((1 << midc->ch_id), mid->dma_base + CLEAR_TFR);
955 if (raw_block) { 955 if (raw_block) {
956 iowrite32((1 << midc->ch_id), 956 iowrite32((1 << midc->ch_id),
957 mid->dma_base + CLEAR_BLOCK); 957 mid->dma_base + CLEAR_BLOCK);
958 } 958 }
959 midc_scan_descriptors(mid, midc); 959 midc_scan_descriptors(mid, midc);
960 pr_debug("MDMA:Scan of desc... complete, unmasking\n"); 960 pr_debug("MDMA:Scan of desc... complete, unmasking\n");
961 iowrite32(UNMASK_INTR_REG(midc->ch_id), 961 iowrite32(UNMASK_INTR_REG(midc->ch_id),
962 mid->dma_base + MASK_TFR); 962 mid->dma_base + MASK_TFR);
963 if (raw_block) { 963 if (raw_block) {
964 iowrite32(UNMASK_INTR_REG(midc->ch_id), 964 iowrite32(UNMASK_INTR_REG(midc->ch_id),
965 mid->dma_base + MASK_BLOCK); 965 mid->dma_base + MASK_BLOCK);
966 } 966 }
967 spin_unlock_bh(&midc->lock); 967 spin_unlock_bh(&midc->lock);
968 } 968 }
969 969
970 status = ioread32(mid->dma_base + RAW_ERR); 970 status = ioread32(mid->dma_base + RAW_ERR);
971 status &= mid->intr_mask; 971 status &= mid->intr_mask;
972 while (status) { 972 while (status) {
973 /*err interrupt*/ 973 /*err interrupt*/
974 i = get_ch_index(&status, mid->chan_base); 974 i = get_ch_index(&status, mid->chan_base);
975 if (i < 0) { 975 if (i < 0) {
976 pr_err("ERR_MDMA:Invalid ch index %x\n", i); 976 pr_err("ERR_MDMA:Invalid ch index %x\n", i);
977 return; 977 return;
978 } 978 }
979 midc = &mid->ch[i]; 979 midc = &mid->ch[i];
980 if (midc == NULL) { 980 if (midc == NULL) {
981 pr_err("ERR_MDMA:Null param midc\n"); 981 pr_err("ERR_MDMA:Null param midc\n");
982 return; 982 return;
983 } 983 }
984 pr_debug("MDMA:Tx complete interrupt %x, Ch No %d Index %d\n", 984 pr_debug("MDMA:Tx complete interrupt %x, Ch No %d Index %d\n",
985 status, midc->ch_id, i); 985 status, midc->ch_id, i);
986 986
987 iowrite32((1 << midc->ch_id), mid->dma_base + CLEAR_ERR); 987 iowrite32((1 << midc->ch_id), mid->dma_base + CLEAR_ERR);
988 spin_lock_bh(&midc->lock); 988 spin_lock_bh(&midc->lock);
989 midc_handle_error(mid, midc); 989 midc_handle_error(mid, midc);
990 iowrite32(UNMASK_INTR_REG(midc->ch_id), 990 iowrite32(UNMASK_INTR_REG(midc->ch_id),
991 mid->dma_base + MASK_ERR); 991 mid->dma_base + MASK_ERR);
992 spin_unlock_bh(&midc->lock); 992 spin_unlock_bh(&midc->lock);
993 } 993 }
994 pr_debug("MDMA:Exiting takslet...\n"); 994 pr_debug("MDMA:Exiting takslet...\n");
995 return; 995 return;
996 } 996 }
997 997
998 static void dma_tasklet1(unsigned long data) 998 static void dma_tasklet1(unsigned long data)
999 { 999 {
1000 pr_debug("MDMA:in takslet1...\n"); 1000 pr_debug("MDMA:in takslet1...\n");
1001 return dma_tasklet(data); 1001 return dma_tasklet(data);
1002 } 1002 }
1003 1003
1004 static void dma_tasklet2(unsigned long data) 1004 static void dma_tasklet2(unsigned long data)
1005 { 1005 {
1006 pr_debug("MDMA:in takslet2...\n"); 1006 pr_debug("MDMA:in takslet2...\n");
1007 return dma_tasklet(data); 1007 return dma_tasklet(data);
1008 } 1008 }
1009 1009
1010 /** 1010 /**
1011 * intel_mid_dma_interrupt - DMA ISR 1011 * intel_mid_dma_interrupt - DMA ISR
1012 * @irq: IRQ where interrupt occurred 1012 * @irq: IRQ where interrupt occurred
1013 * @data: ISR cllback data (the controller structure) 1013 * @data: ISR cllback data (the controller structure)
1014 * 1014 *
1015 * See if this is our interrupt if so then schedule the tasklet 1015 * See if this is our interrupt if so then schedule the tasklet
1016 * otherwise ignore 1016 * otherwise ignore
1017 */ 1017 */
1018 static irqreturn_t intel_mid_dma_interrupt(int irq, void *data) 1018 static irqreturn_t intel_mid_dma_interrupt(int irq, void *data)
1019 { 1019 {
1020 struct middma_device *mid = data; 1020 struct middma_device *mid = data;
1021 u32 tfr_status, err_status; 1021 u32 tfr_status, err_status;
1022 int call_tasklet = 0; 1022 int call_tasklet = 0;
1023 1023
1024 tfr_status = ioread32(mid->dma_base + RAW_TFR); 1024 tfr_status = ioread32(mid->dma_base + RAW_TFR);
1025 err_status = ioread32(mid->dma_base + RAW_ERR); 1025 err_status = ioread32(mid->dma_base + RAW_ERR);
1026 if (!tfr_status && !err_status) 1026 if (!tfr_status && !err_status)
1027 return IRQ_NONE; 1027 return IRQ_NONE;
1028 1028
1029 /*DMA Interrupt*/ 1029 /*DMA Interrupt*/
1030 pr_debug("MDMA:Got an interrupt on irq %d\n", irq); 1030 pr_debug("MDMA:Got an interrupt on irq %d\n", irq);
1031 pr_debug("MDMA: Status %x, Mask %x\n", tfr_status, mid->intr_mask); 1031 pr_debug("MDMA: Status %x, Mask %x\n", tfr_status, mid->intr_mask);
1032 tfr_status &= mid->intr_mask; 1032 tfr_status &= mid->intr_mask;
1033 if (tfr_status) { 1033 if (tfr_status) {
1034 /*need to disable intr*/ 1034 /*need to disable intr*/
1035 iowrite32((tfr_status << INT_MASK_WE), mid->dma_base + MASK_TFR); 1035 iowrite32((tfr_status << INT_MASK_WE), mid->dma_base + MASK_TFR);
1036 iowrite32((tfr_status << INT_MASK_WE), mid->dma_base + MASK_BLOCK); 1036 iowrite32((tfr_status << INT_MASK_WE), mid->dma_base + MASK_BLOCK);
1037 pr_debug("MDMA: Calling tasklet %x\n", tfr_status); 1037 pr_debug("MDMA: Calling tasklet %x\n", tfr_status);
1038 call_tasklet = 1; 1038 call_tasklet = 1;
1039 } 1039 }
1040 err_status &= mid->intr_mask; 1040 err_status &= mid->intr_mask;
1041 if (err_status) { 1041 if (err_status) {
1042 iowrite32((err_status << INT_MASK_WE), 1042 iowrite32((err_status << INT_MASK_WE),
1043 mid->dma_base + MASK_ERR); 1043 mid->dma_base + MASK_ERR);
1044 call_tasklet = 1; 1044 call_tasklet = 1;
1045 } 1045 }
1046 if (call_tasklet) 1046 if (call_tasklet)
1047 tasklet_schedule(&mid->tasklet); 1047 tasklet_schedule(&mid->tasklet);
1048 1048
1049 return IRQ_HANDLED; 1049 return IRQ_HANDLED;
1050 } 1050 }
1051 1051
1052 static irqreturn_t intel_mid_dma_interrupt1(int irq, void *data) 1052 static irqreturn_t intel_mid_dma_interrupt1(int irq, void *data)
1053 { 1053 {
1054 return intel_mid_dma_interrupt(irq, data); 1054 return intel_mid_dma_interrupt(irq, data);
1055 } 1055 }
1056 1056
1057 static irqreturn_t intel_mid_dma_interrupt2(int irq, void *data) 1057 static irqreturn_t intel_mid_dma_interrupt2(int irq, void *data)
1058 { 1058 {
1059 return intel_mid_dma_interrupt(irq, data); 1059 return intel_mid_dma_interrupt(irq, data);
1060 } 1060 }
1061 1061
1062 /** 1062 /**
1063 * mid_setup_dma - Setup the DMA controller 1063 * mid_setup_dma - Setup the DMA controller
1064 * @pdev: Controller PCI device structure 1064 * @pdev: Controller PCI device structure
1065 * 1065 *
1066 * Initialize the DMA controller, channels, registers with DMA engine, 1066 * Initialize the DMA controller, channels, registers with DMA engine,
1067 * ISR. Initialize DMA controller channels. 1067 * ISR. Initialize DMA controller channels.
1068 */ 1068 */
1069 static int mid_setup_dma(struct pci_dev *pdev) 1069 static int mid_setup_dma(struct pci_dev *pdev)
1070 { 1070 {
1071 struct middma_device *dma = pci_get_drvdata(pdev); 1071 struct middma_device *dma = pci_get_drvdata(pdev);
1072 int err, i; 1072 int err, i;
1073 1073
1074 /* DMA coherent memory pool for DMA descriptor allocations */ 1074 /* DMA coherent memory pool for DMA descriptor allocations */
1075 dma->dma_pool = pci_pool_create("intel_mid_dma_desc_pool", pdev, 1075 dma->dma_pool = pci_pool_create("intel_mid_dma_desc_pool", pdev,
1076 sizeof(struct intel_mid_dma_desc), 1076 sizeof(struct intel_mid_dma_desc),
1077 32, 0); 1077 32, 0);
1078 if (NULL == dma->dma_pool) { 1078 if (NULL == dma->dma_pool) {
1079 pr_err("ERR_MDMA:pci_pool_create failed\n"); 1079 pr_err("ERR_MDMA:pci_pool_create failed\n");
1080 err = -ENOMEM; 1080 err = -ENOMEM;
1081 goto err_dma_pool; 1081 goto err_dma_pool;
1082 } 1082 }
1083 1083
1084 INIT_LIST_HEAD(&dma->common.channels); 1084 INIT_LIST_HEAD(&dma->common.channels);
1085 dma->pci_id = pdev->device; 1085 dma->pci_id = pdev->device;
1086 if (dma->pimr_mask) { 1086 if (dma->pimr_mask) {
1087 dma->mask_reg = ioremap(LNW_PERIPHRAL_MASK_BASE, 1087 dma->mask_reg = ioremap(LNW_PERIPHRAL_MASK_BASE,
1088 LNW_PERIPHRAL_MASK_SIZE); 1088 LNW_PERIPHRAL_MASK_SIZE);
1089 if (dma->mask_reg == NULL) { 1089 if (dma->mask_reg == NULL) {
1090 pr_err("ERR_MDMA:Can't map periphral intr space !!\n"); 1090 pr_err("ERR_MDMA:Can't map periphral intr space !!\n");
1091 err = -ENOMEM; 1091 err = -ENOMEM;
1092 goto err_ioremap; 1092 goto err_ioremap;
1093 } 1093 }
1094 } else 1094 } else
1095 dma->mask_reg = NULL; 1095 dma->mask_reg = NULL;
1096 1096
1097 pr_debug("MDMA:Adding %d channel for this controller\n", dma->max_chan); 1097 pr_debug("MDMA:Adding %d channel for this controller\n", dma->max_chan);
1098 /*init CH structures*/ 1098 /*init CH structures*/
1099 dma->intr_mask = 0; 1099 dma->intr_mask = 0;
1100 dma->state = RUNNING; 1100 dma->state = RUNNING;
1101 for (i = 0; i < dma->max_chan; i++) { 1101 for (i = 0; i < dma->max_chan; i++) {
1102 struct intel_mid_dma_chan *midch = &dma->ch[i]; 1102 struct intel_mid_dma_chan *midch = &dma->ch[i];
1103 1103
1104 midch->chan.device = &dma->common; 1104 midch->chan.device = &dma->common;
1105 dma_cookie_init(&midch->chan); 1105 dma_cookie_init(&midch->chan);
1106 midch->ch_id = dma->chan_base + i; 1106 midch->ch_id = dma->chan_base + i;
1107 pr_debug("MDMA:Init CH %d, ID %d\n", i, midch->ch_id); 1107 pr_debug("MDMA:Init CH %d, ID %d\n", i, midch->ch_id);
1108 1108
1109 midch->dma_base = dma->dma_base; 1109 midch->dma_base = dma->dma_base;
1110 midch->ch_regs = dma->dma_base + DMA_CH_SIZE * midch->ch_id; 1110 midch->ch_regs = dma->dma_base + DMA_CH_SIZE * midch->ch_id;
1111 midch->dma = dma; 1111 midch->dma = dma;
1112 dma->intr_mask |= 1 << (dma->chan_base + i); 1112 dma->intr_mask |= 1 << (dma->chan_base + i);
1113 spin_lock_init(&midch->lock); 1113 spin_lock_init(&midch->lock);
1114 1114
1115 INIT_LIST_HEAD(&midch->active_list); 1115 INIT_LIST_HEAD(&midch->active_list);
1116 INIT_LIST_HEAD(&midch->queue); 1116 INIT_LIST_HEAD(&midch->queue);
1117 INIT_LIST_HEAD(&midch->free_list); 1117 INIT_LIST_HEAD(&midch->free_list);
1118 /*mask interrupts*/ 1118 /*mask interrupts*/
1119 iowrite32(MASK_INTR_REG(midch->ch_id), 1119 iowrite32(MASK_INTR_REG(midch->ch_id),
1120 dma->dma_base + MASK_BLOCK); 1120 dma->dma_base + MASK_BLOCK);
1121 iowrite32(MASK_INTR_REG(midch->ch_id), 1121 iowrite32(MASK_INTR_REG(midch->ch_id),
1122 dma->dma_base + MASK_SRC_TRAN); 1122 dma->dma_base + MASK_SRC_TRAN);
1123 iowrite32(MASK_INTR_REG(midch->ch_id), 1123 iowrite32(MASK_INTR_REG(midch->ch_id),
1124 dma->dma_base + MASK_DST_TRAN); 1124 dma->dma_base + MASK_DST_TRAN);
1125 iowrite32(MASK_INTR_REG(midch->ch_id), 1125 iowrite32(MASK_INTR_REG(midch->ch_id),
1126 dma->dma_base + MASK_ERR); 1126 dma->dma_base + MASK_ERR);
1127 iowrite32(MASK_INTR_REG(midch->ch_id), 1127 iowrite32(MASK_INTR_REG(midch->ch_id),
1128 dma->dma_base + MASK_TFR); 1128 dma->dma_base + MASK_TFR);
1129 1129
1130 disable_dma_interrupt(midch); 1130 disable_dma_interrupt(midch);
1131 list_add_tail(&midch->chan.device_node, &dma->common.channels); 1131 list_add_tail(&midch->chan.device_node, &dma->common.channels);
1132 } 1132 }
1133 pr_debug("MDMA: Calc Mask as %x for this controller\n", dma->intr_mask); 1133 pr_debug("MDMA: Calc Mask as %x for this controller\n", dma->intr_mask);
1134 1134
1135 /*init dma structure*/ 1135 /*init dma structure*/
1136 dma_cap_zero(dma->common.cap_mask); 1136 dma_cap_zero(dma->common.cap_mask);
1137 dma_cap_set(DMA_MEMCPY, dma->common.cap_mask); 1137 dma_cap_set(DMA_MEMCPY, dma->common.cap_mask);
1138 dma_cap_set(DMA_SLAVE, dma->common.cap_mask); 1138 dma_cap_set(DMA_SLAVE, dma->common.cap_mask);
1139 dma_cap_set(DMA_PRIVATE, dma->common.cap_mask); 1139 dma_cap_set(DMA_PRIVATE, dma->common.cap_mask);
1140 dma->common.dev = &pdev->dev; 1140 dma->common.dev = &pdev->dev;
1141 1141
1142 dma->common.device_alloc_chan_resources = 1142 dma->common.device_alloc_chan_resources =
1143 intel_mid_dma_alloc_chan_resources; 1143 intel_mid_dma_alloc_chan_resources;
1144 dma->common.device_free_chan_resources = 1144 dma->common.device_free_chan_resources =
1145 intel_mid_dma_free_chan_resources; 1145 intel_mid_dma_free_chan_resources;
1146 1146
1147 dma->common.device_tx_status = intel_mid_dma_tx_status; 1147 dma->common.device_tx_status = intel_mid_dma_tx_status;
1148 dma->common.device_prep_dma_memcpy = intel_mid_dma_prep_memcpy; 1148 dma->common.device_prep_dma_memcpy = intel_mid_dma_prep_memcpy;
1149 dma->common.device_issue_pending = intel_mid_dma_issue_pending; 1149 dma->common.device_issue_pending = intel_mid_dma_issue_pending;
1150 dma->common.device_prep_slave_sg = intel_mid_dma_prep_slave_sg; 1150 dma->common.device_prep_slave_sg = intel_mid_dma_prep_slave_sg;
1151 dma->common.device_control = intel_mid_dma_device_control; 1151 dma->common.device_control = intel_mid_dma_device_control;
1152 1152
1153 /*enable dma cntrl*/ 1153 /*enable dma cntrl*/
1154 iowrite32(REG_BIT0, dma->dma_base + DMA_CFG); 1154 iowrite32(REG_BIT0, dma->dma_base + DMA_CFG);
1155 1155
1156 /*register irq */ 1156 /*register irq */
1157 if (dma->pimr_mask) { 1157 if (dma->pimr_mask) {
1158 pr_debug("MDMA:Requesting irq shared for DMAC1\n"); 1158 pr_debug("MDMA:Requesting irq shared for DMAC1\n");
1159 err = request_irq(pdev->irq, intel_mid_dma_interrupt1, 1159 err = request_irq(pdev->irq, intel_mid_dma_interrupt1,
1160 IRQF_SHARED, "INTEL_MID_DMAC1", dma); 1160 IRQF_SHARED, "INTEL_MID_DMAC1", dma);
1161 if (0 != err) 1161 if (0 != err)
1162 goto err_irq; 1162 goto err_irq;
1163 } else { 1163 } else {
1164 dma->intr_mask = 0x03; 1164 dma->intr_mask = 0x03;
1165 pr_debug("MDMA:Requesting irq for DMAC2\n"); 1165 pr_debug("MDMA:Requesting irq for DMAC2\n");
1166 err = request_irq(pdev->irq, intel_mid_dma_interrupt2, 1166 err = request_irq(pdev->irq, intel_mid_dma_interrupt2,
1167 IRQF_SHARED, "INTEL_MID_DMAC2", dma); 1167 IRQF_SHARED, "INTEL_MID_DMAC2", dma);
1168 if (0 != err) 1168 if (0 != err)
1169 goto err_irq; 1169 goto err_irq;
1170 } 1170 }
1171 /*register device w/ engine*/ 1171 /*register device w/ engine*/
1172 err = dma_async_device_register(&dma->common); 1172 err = dma_async_device_register(&dma->common);
1173 if (0 != err) { 1173 if (0 != err) {
1174 pr_err("ERR_MDMA:device_register failed: %d\n", err); 1174 pr_err("ERR_MDMA:device_register failed: %d\n", err);
1175 goto err_engine; 1175 goto err_engine;
1176 } 1176 }
1177 if (dma->pimr_mask) { 1177 if (dma->pimr_mask) {
1178 pr_debug("setting up tasklet1 for DMAC1\n"); 1178 pr_debug("setting up tasklet1 for DMAC1\n");
1179 tasklet_init(&dma->tasklet, dma_tasklet1, (unsigned long)dma); 1179 tasklet_init(&dma->tasklet, dma_tasklet1, (unsigned long)dma);
1180 } else { 1180 } else {
1181 pr_debug("setting up tasklet2 for DMAC2\n"); 1181 pr_debug("setting up tasklet2 for DMAC2\n");
1182 tasklet_init(&dma->tasklet, dma_tasklet2, (unsigned long)dma); 1182 tasklet_init(&dma->tasklet, dma_tasklet2, (unsigned long)dma);
1183 } 1183 }
1184 return 0; 1184 return 0;
1185 1185
1186 err_engine: 1186 err_engine:
1187 free_irq(pdev->irq, dma); 1187 free_irq(pdev->irq, dma);
1188 err_irq: 1188 err_irq:
1189 if (dma->mask_reg) 1189 if (dma->mask_reg)
1190 iounmap(dma->mask_reg); 1190 iounmap(dma->mask_reg);
1191 err_ioremap: 1191 err_ioremap:
1192 pci_pool_destroy(dma->dma_pool); 1192 pci_pool_destroy(dma->dma_pool);
1193 err_dma_pool: 1193 err_dma_pool:
1194 pr_err("ERR_MDMA:setup_dma failed: %d\n", err); 1194 pr_err("ERR_MDMA:setup_dma failed: %d\n", err);
1195 return err; 1195 return err;
1196 1196
1197 } 1197 }
1198 1198
1199 /** 1199 /**
1200 * middma_shutdown - Shutdown the DMA controller 1200 * middma_shutdown - Shutdown the DMA controller
1201 * @pdev: Controller PCI device structure 1201 * @pdev: Controller PCI device structure
1202 * 1202 *
1203 * Called by remove 1203 * Called by remove
1204 * Unregister DMa controller, clear all structures and free interrupt 1204 * Unregister DMa controller, clear all structures and free interrupt
1205 */ 1205 */
1206 static void middma_shutdown(struct pci_dev *pdev) 1206 static void middma_shutdown(struct pci_dev *pdev)
1207 { 1207 {
1208 struct middma_device *device = pci_get_drvdata(pdev); 1208 struct middma_device *device = pci_get_drvdata(pdev);
1209 1209
1210 dma_async_device_unregister(&device->common); 1210 dma_async_device_unregister(&device->common);
1211 pci_pool_destroy(device->dma_pool); 1211 pci_pool_destroy(device->dma_pool);
1212 if (device->mask_reg) 1212 if (device->mask_reg)
1213 iounmap(device->mask_reg); 1213 iounmap(device->mask_reg);
1214 if (device->dma_base) 1214 if (device->dma_base)
1215 iounmap(device->dma_base); 1215 iounmap(device->dma_base);
1216 free_irq(pdev->irq, device); 1216 free_irq(pdev->irq, device);
1217 return; 1217 return;
1218 } 1218 }
1219 1219
1220 /** 1220 /**
1221 * intel_mid_dma_probe - PCI Probe 1221 * intel_mid_dma_probe - PCI Probe
1222 * @pdev: Controller PCI device structure 1222 * @pdev: Controller PCI device structure
1223 * @id: pci device id structure 1223 * @id: pci device id structure
1224 * 1224 *
1225 * Initialize the PCI device, map BARs, query driver data. 1225 * Initialize the PCI device, map BARs, query driver data.
1226 * Call setup_dma to complete contoller and chan initilzation 1226 * Call setup_dma to complete contoller and chan initilzation
1227 */ 1227 */
1228 static int intel_mid_dma_probe(struct pci_dev *pdev, 1228 static int intel_mid_dma_probe(struct pci_dev *pdev,
1229 const struct pci_device_id *id) 1229 const struct pci_device_id *id)
1230 { 1230 {
1231 struct middma_device *device; 1231 struct middma_device *device;
1232 u32 base_addr, bar_size; 1232 u32 base_addr, bar_size;
1233 struct intel_mid_dma_probe_info *info; 1233 struct intel_mid_dma_probe_info *info;
1234 int err; 1234 int err;
1235 1235
1236 pr_debug("MDMA: probe for %x\n", pdev->device); 1236 pr_debug("MDMA: probe for %x\n", pdev->device);
1237 info = (void *)id->driver_data; 1237 info = (void *)id->driver_data;
1238 pr_debug("MDMA: CH %d, base %d, block len %d, Periphral mask %x\n", 1238 pr_debug("MDMA: CH %d, base %d, block len %d, Periphral mask %x\n",
1239 info->max_chan, info->ch_base, 1239 info->max_chan, info->ch_base,
1240 info->block_size, info->pimr_mask); 1240 info->block_size, info->pimr_mask);
1241 1241
1242 err = pci_enable_device(pdev); 1242 err = pci_enable_device(pdev);
1243 if (err) 1243 if (err)
1244 goto err_enable_device; 1244 goto err_enable_device;
1245 1245
1246 err = pci_request_regions(pdev, "intel_mid_dmac"); 1246 err = pci_request_regions(pdev, "intel_mid_dmac");
1247 if (err) 1247 if (err)
1248 goto err_request_regions; 1248 goto err_request_regions;
1249 1249
1250 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 1250 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1251 if (err) 1251 if (err)
1252 goto err_set_dma_mask; 1252 goto err_set_dma_mask;
1253 1253
1254 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 1254 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
1255 if (err) 1255 if (err)
1256 goto err_set_dma_mask; 1256 goto err_set_dma_mask;
1257 1257
1258 device = kzalloc(sizeof(*device), GFP_KERNEL); 1258 device = kzalloc(sizeof(*device), GFP_KERNEL);
1259 if (!device) { 1259 if (!device) {
1260 pr_err("ERR_MDMA:kzalloc failed probe\n"); 1260 pr_err("ERR_MDMA:kzalloc failed probe\n");
1261 err = -ENOMEM; 1261 err = -ENOMEM;
1262 goto err_kzalloc; 1262 goto err_kzalloc;
1263 } 1263 }
1264 device->pdev = pci_dev_get(pdev); 1264 device->pdev = pci_dev_get(pdev);
1265 1265
1266 base_addr = pci_resource_start(pdev, 0); 1266 base_addr = pci_resource_start(pdev, 0);
1267 bar_size = pci_resource_len(pdev, 0); 1267 bar_size = pci_resource_len(pdev, 0);
1268 device->dma_base = ioremap_nocache(base_addr, DMA_REG_SIZE); 1268 device->dma_base = ioremap_nocache(base_addr, DMA_REG_SIZE);
1269 if (!device->dma_base) { 1269 if (!device->dma_base) {
1270 pr_err("ERR_MDMA:ioremap failed\n"); 1270 pr_err("ERR_MDMA:ioremap failed\n");
1271 err = -ENOMEM; 1271 err = -ENOMEM;
1272 goto err_ioremap; 1272 goto err_ioremap;
1273 } 1273 }
1274 pci_set_drvdata(pdev, device); 1274 pci_set_drvdata(pdev, device);
1275 pci_set_master(pdev); 1275 pci_set_master(pdev);
1276 device->max_chan = info->max_chan; 1276 device->max_chan = info->max_chan;
1277 device->chan_base = info->ch_base; 1277 device->chan_base = info->ch_base;
1278 device->block_size = info->block_size; 1278 device->block_size = info->block_size;
1279 device->pimr_mask = info->pimr_mask; 1279 device->pimr_mask = info->pimr_mask;
1280 1280
1281 err = mid_setup_dma(pdev); 1281 err = mid_setup_dma(pdev);
1282 if (err) 1282 if (err)
1283 goto err_dma; 1283 goto err_dma;
1284 1284
1285 pm_runtime_put_noidle(&pdev->dev); 1285 pm_runtime_put_noidle(&pdev->dev);
1286 pm_runtime_allow(&pdev->dev); 1286 pm_runtime_allow(&pdev->dev);
1287 return 0; 1287 return 0;
1288 1288
1289 err_dma: 1289 err_dma:
1290 iounmap(device->dma_base); 1290 iounmap(device->dma_base);
1291 err_ioremap: 1291 err_ioremap:
1292 pci_dev_put(pdev); 1292 pci_dev_put(pdev);
1293 kfree(device); 1293 kfree(device);
1294 err_kzalloc: 1294 err_kzalloc:
1295 err_set_dma_mask: 1295 err_set_dma_mask:
1296 pci_release_regions(pdev); 1296 pci_release_regions(pdev);
1297 pci_disable_device(pdev); 1297 pci_disable_device(pdev);
1298 err_request_regions: 1298 err_request_regions:
1299 err_enable_device: 1299 err_enable_device:
1300 pr_err("ERR_MDMA:Probe failed %d\n", err); 1300 pr_err("ERR_MDMA:Probe failed %d\n", err);
1301 return err; 1301 return err;
1302 } 1302 }
1303 1303
1304 /** 1304 /**
1305 * intel_mid_dma_remove - PCI remove 1305 * intel_mid_dma_remove - PCI remove
1306 * @pdev: Controller PCI device structure 1306 * @pdev: Controller PCI device structure
1307 * 1307 *
1308 * Free up all resources and data 1308 * Free up all resources and data
1309 * Call shutdown_dma to complete contoller and chan cleanup 1309 * Call shutdown_dma to complete contoller and chan cleanup
1310 */ 1310 */
1311 static void intel_mid_dma_remove(struct pci_dev *pdev) 1311 static void intel_mid_dma_remove(struct pci_dev *pdev)
1312 { 1312 {
1313 struct middma_device *device = pci_get_drvdata(pdev); 1313 struct middma_device *device = pci_get_drvdata(pdev);
1314 1314
1315 pm_runtime_get_noresume(&pdev->dev); 1315 pm_runtime_get_noresume(&pdev->dev);
1316 pm_runtime_forbid(&pdev->dev); 1316 pm_runtime_forbid(&pdev->dev);
1317 middma_shutdown(pdev); 1317 middma_shutdown(pdev);
1318 pci_dev_put(pdev); 1318 pci_dev_put(pdev);
1319 kfree(device); 1319 kfree(device);
1320 pci_release_regions(pdev); 1320 pci_release_regions(pdev);
1321 pci_disable_device(pdev); 1321 pci_disable_device(pdev);
1322 } 1322 }
1323 1323
1324 /* Power Management */ 1324 /* Power Management */
1325 /* 1325 /*
1326 * dma_suspend - PCI suspend function 1326 * dma_suspend - PCI suspend function
1327 * 1327 *
1328 * @pci: PCI device structure 1328 * @pci: PCI device structure
1329 * @state: PM message 1329 * @state: PM message
1330 * 1330 *
1331 * This function is called by OS when a power event occurs 1331 * This function is called by OS when a power event occurs
1332 */ 1332 */
1333 static int dma_suspend(struct device *dev) 1333 static int dma_suspend(struct device *dev)
1334 { 1334 {
1335 struct pci_dev *pci = to_pci_dev(dev); 1335 struct pci_dev *pci = to_pci_dev(dev);
1336 int i; 1336 int i;
1337 struct middma_device *device = pci_get_drvdata(pci); 1337 struct middma_device *device = pci_get_drvdata(pci);
1338 pr_debug("MDMA: dma_suspend called\n"); 1338 pr_debug("MDMA: dma_suspend called\n");
1339 1339
1340 for (i = 0; i < device->max_chan; i++) { 1340 for (i = 0; i < device->max_chan; i++) {
1341 if (device->ch[i].in_use) 1341 if (device->ch[i].in_use)
1342 return -EAGAIN; 1342 return -EAGAIN;
1343 } 1343 }
1344 dmac1_mask_periphral_intr(device); 1344 dmac1_mask_periphral_intr(device);
1345 device->state = SUSPENDED; 1345 device->state = SUSPENDED;
1346 pci_save_state(pci); 1346 pci_save_state(pci);
1347 pci_disable_device(pci); 1347 pci_disable_device(pci);
1348 pci_set_power_state(pci, PCI_D3hot); 1348 pci_set_power_state(pci, PCI_D3hot);
1349 return 0; 1349 return 0;
1350 } 1350 }
1351 1351
1352 /** 1352 /**
1353 * dma_resume - PCI resume function 1353 * dma_resume - PCI resume function
1354 * 1354 *
1355 * @pci: PCI device structure 1355 * @pci: PCI device structure
1356 * 1356 *
1357 * This function is called by OS when a power event occurs 1357 * This function is called by OS when a power event occurs
1358 */ 1358 */
1359 int dma_resume(struct device *dev) 1359 int dma_resume(struct device *dev)
1360 { 1360 {
1361 struct pci_dev *pci = to_pci_dev(dev); 1361 struct pci_dev *pci = to_pci_dev(dev);
1362 int ret; 1362 int ret;
1363 struct middma_device *device = pci_get_drvdata(pci); 1363 struct middma_device *device = pci_get_drvdata(pci);
1364 1364
1365 pr_debug("MDMA: dma_resume called\n"); 1365 pr_debug("MDMA: dma_resume called\n");
1366 pci_set_power_state(pci, PCI_D0); 1366 pci_set_power_state(pci, PCI_D0);
1367 pci_restore_state(pci); 1367 pci_restore_state(pci);
1368 ret = pci_enable_device(pci); 1368 ret = pci_enable_device(pci);
1369 if (ret) { 1369 if (ret) {
1370 pr_err("MDMA: device can't be enabled for %x\n", pci->device); 1370 pr_err("MDMA: device can't be enabled for %x\n", pci->device);
1371 return ret; 1371 return ret;
1372 } 1372 }
1373 device->state = RUNNING; 1373 device->state = RUNNING;
1374 iowrite32(REG_BIT0, device->dma_base + DMA_CFG); 1374 iowrite32(REG_BIT0, device->dma_base + DMA_CFG);
1375 return 0; 1375 return 0;
1376 } 1376 }
1377 1377
1378 static int dma_runtime_suspend(struct device *dev) 1378 static int dma_runtime_suspend(struct device *dev)
1379 { 1379 {
1380 struct pci_dev *pci_dev = to_pci_dev(dev); 1380 struct pci_dev *pci_dev = to_pci_dev(dev);
1381 struct middma_device *device = pci_get_drvdata(pci_dev); 1381 struct middma_device *device = pci_get_drvdata(pci_dev);
1382 1382
1383 device->state = SUSPENDED; 1383 device->state = SUSPENDED;
1384 return 0; 1384 return 0;
1385 } 1385 }
1386 1386
1387 static int dma_runtime_resume(struct device *dev) 1387 static int dma_runtime_resume(struct device *dev)
1388 { 1388 {
1389 struct pci_dev *pci_dev = to_pci_dev(dev); 1389 struct pci_dev *pci_dev = to_pci_dev(dev);
1390 struct middma_device *device = pci_get_drvdata(pci_dev); 1390 struct middma_device *device = pci_get_drvdata(pci_dev);
1391 1391
1392 device->state = RUNNING; 1392 device->state = RUNNING;
1393 iowrite32(REG_BIT0, device->dma_base + DMA_CFG); 1393 iowrite32(REG_BIT0, device->dma_base + DMA_CFG);
1394 return 0; 1394 return 0;
1395 } 1395 }
1396 1396
1397 static int dma_runtime_idle(struct device *dev) 1397 static int dma_runtime_idle(struct device *dev)
1398 { 1398 {
1399 struct pci_dev *pdev = to_pci_dev(dev); 1399 struct pci_dev *pdev = to_pci_dev(dev);
1400 struct middma_device *device = pci_get_drvdata(pdev); 1400 struct middma_device *device = pci_get_drvdata(pdev);
1401 int i; 1401 int i;
1402 1402
1403 for (i = 0; i < device->max_chan; i++) { 1403 for (i = 0; i < device->max_chan; i++) {
1404 if (device->ch[i].in_use) 1404 if (device->ch[i].in_use)
1405 return -EAGAIN; 1405 return -EAGAIN;
1406 } 1406 }
1407 1407
1408 return 0; 1408 return 0;
1409 } 1409 }
1410 1410
1411 /****************************************************************************** 1411 /******************************************************************************
1412 * PCI stuff 1412 * PCI stuff
1413 */ 1413 */
1414 static struct pci_device_id intel_mid_dma_ids[] = { 1414 static struct pci_device_id intel_mid_dma_ids[] = {
1415 { PCI_VDEVICE(INTEL, INTEL_MID_DMAC1_ID), INFO(2, 6, 4095, 0x200020)}, 1415 { PCI_VDEVICE(INTEL, INTEL_MID_DMAC1_ID), INFO(2, 6, 4095, 0x200020)},
1416 { PCI_VDEVICE(INTEL, INTEL_MID_DMAC2_ID), INFO(2, 0, 2047, 0)}, 1416 { PCI_VDEVICE(INTEL, INTEL_MID_DMAC2_ID), INFO(2, 0, 2047, 0)},
1417 { PCI_VDEVICE(INTEL, INTEL_MID_GP_DMAC2_ID), INFO(2, 0, 2047, 0)}, 1417 { PCI_VDEVICE(INTEL, INTEL_MID_GP_DMAC2_ID), INFO(2, 0, 2047, 0)},
1418 { PCI_VDEVICE(INTEL, INTEL_MFLD_DMAC1_ID), INFO(4, 0, 4095, 0x400040)}, 1418 { PCI_VDEVICE(INTEL, INTEL_MFLD_DMAC1_ID), INFO(4, 0, 4095, 0x400040)},
1419 { 0, } 1419 { 0, }
1420 }; 1420 };
1421 MODULE_DEVICE_TABLE(pci, intel_mid_dma_ids); 1421 MODULE_DEVICE_TABLE(pci, intel_mid_dma_ids);
1422 1422
1423 static const struct dev_pm_ops intel_mid_dma_pm = { 1423 static const struct dev_pm_ops intel_mid_dma_pm = {
1424 .runtime_suspend = dma_runtime_suspend, 1424 .runtime_suspend = dma_runtime_suspend,
1425 .runtime_resume = dma_runtime_resume, 1425 .runtime_resume = dma_runtime_resume,
1426 .runtime_idle = dma_runtime_idle, 1426 .runtime_idle = dma_runtime_idle,
1427 .suspend = dma_suspend, 1427 .suspend = dma_suspend,
1428 .resume = dma_resume, 1428 .resume = dma_resume,
1429 }; 1429 };
1430 1430
1431 static struct pci_driver intel_mid_dma_pci_driver = { 1431 static struct pci_driver intel_mid_dma_pci_driver = {
1432 .name = "Intel MID DMA", 1432 .name = "Intel MID DMA",
1433 .id_table = intel_mid_dma_ids, 1433 .id_table = intel_mid_dma_ids,
1434 .probe = intel_mid_dma_probe, 1434 .probe = intel_mid_dma_probe,
1435 .remove = intel_mid_dma_remove, 1435 .remove = intel_mid_dma_remove,
1436 #ifdef CONFIG_PM 1436 #ifdef CONFIG_PM
1437 .driver = { 1437 .driver = {
1438 .pm = &intel_mid_dma_pm, 1438 .pm = &intel_mid_dma_pm,
1439 }, 1439 },
1440 #endif 1440 #endif
1441 }; 1441 };
1442 1442
1443 static int __init intel_mid_dma_init(void) 1443 static int __init intel_mid_dma_init(void)
1444 { 1444 {
1445 pr_debug("INFO_MDMA: LNW DMA Driver Version %s\n", 1445 pr_debug("INFO_MDMA: LNW DMA Driver Version %s\n",
1446 INTEL_MID_DMA_DRIVER_VERSION); 1446 INTEL_MID_DMA_DRIVER_VERSION);
1447 return pci_register_driver(&intel_mid_dma_pci_driver); 1447 return pci_register_driver(&intel_mid_dma_pci_driver);
1448 } 1448 }
1449 fs_initcall(intel_mid_dma_init); 1449 fs_initcall(intel_mid_dma_init);
1450 1450
1451 static void __exit intel_mid_dma_exit(void) 1451 static void __exit intel_mid_dma_exit(void)
1452 { 1452 {
1453 pci_unregister_driver(&intel_mid_dma_pci_driver); 1453 pci_unregister_driver(&intel_mid_dma_pci_driver);
1454 } 1454 }
1455 module_exit(intel_mid_dma_exit); 1455 module_exit(intel_mid_dma_exit);
1456 1456
1457 MODULE_AUTHOR("Vinod Koul <vinod.koul@intel.com>"); 1457 MODULE_AUTHOR("Vinod Koul <vinod.koul@intel.com>");
1458 MODULE_DESCRIPTION("Intel (R) MID DMAC Driver"); 1458 MODULE_DESCRIPTION("Intel (R) MID DMAC Driver");
1459 MODULE_LICENSE("GPL v2"); 1459 MODULE_LICENSE("GPL v2");
1460 MODULE_VERSION(INTEL_MID_DMA_DRIVER_VERSION); 1460 MODULE_VERSION(INTEL_MID_DMA_DRIVER_VERSION);
1461 1461
drivers/dma/ioat/dma.c
1 /* 1 /*
2 * Intel I/OAT DMA Linux driver 2 * Intel I/OAT DMA Linux driver
3 * Copyright(c) 2004 - 2009 Intel Corporation. 3 * Copyright(c) 2004 - 2009 Intel Corporation.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License, 6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation. 7 * version 2, as published by the Free Software Foundation.
8 * 8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT 9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details. 12 * more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License along with 14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 * 17 *
18 * The full GNU General Public License is included in this distribution in 18 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING". 19 * the file called "COPYING".
20 * 20 *
21 */ 21 */
22 22
23 /* 23 /*
24 * This driver supports an Intel I/OAT DMA engine, which does asynchronous 24 * This driver supports an Intel I/OAT DMA engine, which does asynchronous
25 * copy operations. 25 * copy operations.
26 */ 26 */
27 27
28 #include <linux/init.h> 28 #include <linux/init.h>
29 #include <linux/module.h> 29 #include <linux/module.h>
30 #include <linux/slab.h> 30 #include <linux/slab.h>
31 #include <linux/pci.h> 31 #include <linux/pci.h>
32 #include <linux/interrupt.h> 32 #include <linux/interrupt.h>
33 #include <linux/dmaengine.h> 33 #include <linux/dmaengine.h>
34 #include <linux/delay.h> 34 #include <linux/delay.h>
35 #include <linux/dma-mapping.h> 35 #include <linux/dma-mapping.h>
36 #include <linux/workqueue.h> 36 #include <linux/workqueue.h>
37 #include <linux/prefetch.h> 37 #include <linux/prefetch.h>
38 #include <linux/i7300_idle.h> 38 #include <linux/i7300_idle.h>
39 #include "dma.h" 39 #include "dma.h"
40 #include "registers.h" 40 #include "registers.h"
41 #include "hw.h" 41 #include "hw.h"
42 42
43 #include "../dmaengine.h" 43 #include "../dmaengine.h"
44 44
45 int ioat_pending_level = 4; 45 int ioat_pending_level = 4;
46 module_param(ioat_pending_level, int, 0644); 46 module_param(ioat_pending_level, int, 0644);
47 MODULE_PARM_DESC(ioat_pending_level, 47 MODULE_PARM_DESC(ioat_pending_level,
48 "high-water mark for pushing ioat descriptors (default: 4)"); 48 "high-water mark for pushing ioat descriptors (default: 4)");
49 49
50 /* internal functions */ 50 /* internal functions */
51 static void ioat1_cleanup(struct ioat_dma_chan *ioat); 51 static void ioat1_cleanup(struct ioat_dma_chan *ioat);
52 static void ioat1_dma_start_null_desc(struct ioat_dma_chan *ioat); 52 static void ioat1_dma_start_null_desc(struct ioat_dma_chan *ioat);
53 53
54 /** 54 /**
55 * ioat_dma_do_interrupt - handler used for single vector interrupt mode 55 * ioat_dma_do_interrupt - handler used for single vector interrupt mode
56 * @irq: interrupt id 56 * @irq: interrupt id
57 * @data: interrupt data 57 * @data: interrupt data
58 */ 58 */
59 static irqreturn_t ioat_dma_do_interrupt(int irq, void *data) 59 static irqreturn_t ioat_dma_do_interrupt(int irq, void *data)
60 { 60 {
61 struct ioatdma_device *instance = data; 61 struct ioatdma_device *instance = data;
62 struct ioat_chan_common *chan; 62 struct ioat_chan_common *chan;
63 unsigned long attnstatus; 63 unsigned long attnstatus;
64 int bit; 64 int bit;
65 u8 intrctrl; 65 u8 intrctrl;
66 66
67 intrctrl = readb(instance->reg_base + IOAT_INTRCTRL_OFFSET); 67 intrctrl = readb(instance->reg_base + IOAT_INTRCTRL_OFFSET);
68 68
69 if (!(intrctrl & IOAT_INTRCTRL_MASTER_INT_EN)) 69 if (!(intrctrl & IOAT_INTRCTRL_MASTER_INT_EN))
70 return IRQ_NONE; 70 return IRQ_NONE;
71 71
72 if (!(intrctrl & IOAT_INTRCTRL_INT_STATUS)) { 72 if (!(intrctrl & IOAT_INTRCTRL_INT_STATUS)) {
73 writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET); 73 writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
74 return IRQ_NONE; 74 return IRQ_NONE;
75 } 75 }
76 76
77 attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET); 77 attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET);
78 for_each_set_bit(bit, &attnstatus, BITS_PER_LONG) { 78 for_each_set_bit(bit, &attnstatus, BITS_PER_LONG) {
79 chan = ioat_chan_by_index(instance, bit); 79 chan = ioat_chan_by_index(instance, bit);
80 tasklet_schedule(&chan->cleanup_task); 80 tasklet_schedule(&chan->cleanup_task);
81 } 81 }
82 82
83 writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET); 83 writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
84 return IRQ_HANDLED; 84 return IRQ_HANDLED;
85 } 85 }
86 86
87 /** 87 /**
88 * ioat_dma_do_interrupt_msix - handler used for vector-per-channel interrupt mode 88 * ioat_dma_do_interrupt_msix - handler used for vector-per-channel interrupt mode
89 * @irq: interrupt id 89 * @irq: interrupt id
90 * @data: interrupt data 90 * @data: interrupt data
91 */ 91 */
92 static irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data) 92 static irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data)
93 { 93 {
94 struct ioat_chan_common *chan = data; 94 struct ioat_chan_common *chan = data;
95 95
96 tasklet_schedule(&chan->cleanup_task); 96 tasklet_schedule(&chan->cleanup_task);
97 97
98 return IRQ_HANDLED; 98 return IRQ_HANDLED;
99 } 99 }
100 100
101 /* common channel initialization */ 101 /* common channel initialization */
102 void ioat_init_channel(struct ioatdma_device *device, struct ioat_chan_common *chan, int idx) 102 void ioat_init_channel(struct ioatdma_device *device, struct ioat_chan_common *chan, int idx)
103 { 103 {
104 struct dma_device *dma = &device->common; 104 struct dma_device *dma = &device->common;
105 struct dma_chan *c = &chan->common; 105 struct dma_chan *c = &chan->common;
106 unsigned long data = (unsigned long) c; 106 unsigned long data = (unsigned long) c;
107 107
108 chan->device = device; 108 chan->device = device;
109 chan->reg_base = device->reg_base + (0x80 * (idx + 1)); 109 chan->reg_base = device->reg_base + (0x80 * (idx + 1));
110 spin_lock_init(&chan->cleanup_lock); 110 spin_lock_init(&chan->cleanup_lock);
111 chan->common.device = dma; 111 chan->common.device = dma;
112 dma_cookie_init(&chan->common); 112 dma_cookie_init(&chan->common);
113 list_add_tail(&chan->common.device_node, &dma->channels); 113 list_add_tail(&chan->common.device_node, &dma->channels);
114 device->idx[idx] = chan; 114 device->idx[idx] = chan;
115 init_timer(&chan->timer); 115 init_timer(&chan->timer);
116 chan->timer.function = device->timer_fn; 116 chan->timer.function = device->timer_fn;
117 chan->timer.data = data; 117 chan->timer.data = data;
118 tasklet_init(&chan->cleanup_task, device->cleanup_fn, data); 118 tasklet_init(&chan->cleanup_task, device->cleanup_fn, data);
119 tasklet_disable(&chan->cleanup_task); 119 tasklet_disable(&chan->cleanup_task);
120 } 120 }
121 121
122 /** 122 /**
123 * ioat1_dma_enumerate_channels - find and initialize the device's channels 123 * ioat1_dma_enumerate_channels - find and initialize the device's channels
124 * @device: the device to be enumerated 124 * @device: the device to be enumerated
125 */ 125 */
126 static int ioat1_enumerate_channels(struct ioatdma_device *device) 126 static int ioat1_enumerate_channels(struct ioatdma_device *device)
127 { 127 {
128 u8 xfercap_scale; 128 u8 xfercap_scale;
129 u32 xfercap; 129 u32 xfercap;
130 int i; 130 int i;
131 struct ioat_dma_chan *ioat; 131 struct ioat_dma_chan *ioat;
132 struct device *dev = &device->pdev->dev; 132 struct device *dev = &device->pdev->dev;
133 struct dma_device *dma = &device->common; 133 struct dma_device *dma = &device->common;
134 134
135 INIT_LIST_HEAD(&dma->channels); 135 INIT_LIST_HEAD(&dma->channels);
136 dma->chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET); 136 dma->chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET);
137 dma->chancnt &= 0x1f; /* bits [4:0] valid */ 137 dma->chancnt &= 0x1f; /* bits [4:0] valid */
138 if (dma->chancnt > ARRAY_SIZE(device->idx)) { 138 if (dma->chancnt > ARRAY_SIZE(device->idx)) {
139 dev_warn(dev, "(%d) exceeds max supported channels (%zu)\n", 139 dev_warn(dev, "(%d) exceeds max supported channels (%zu)\n",
140 dma->chancnt, ARRAY_SIZE(device->idx)); 140 dma->chancnt, ARRAY_SIZE(device->idx));
141 dma->chancnt = ARRAY_SIZE(device->idx); 141 dma->chancnt = ARRAY_SIZE(device->idx);
142 } 142 }
143 xfercap_scale = readb(device->reg_base + IOAT_XFERCAP_OFFSET); 143 xfercap_scale = readb(device->reg_base + IOAT_XFERCAP_OFFSET);
144 xfercap_scale &= 0x1f; /* bits [4:0] valid */ 144 xfercap_scale &= 0x1f; /* bits [4:0] valid */
145 xfercap = (xfercap_scale == 0 ? -1 : (1UL << xfercap_scale)); 145 xfercap = (xfercap_scale == 0 ? -1 : (1UL << xfercap_scale));
146 dev_dbg(dev, "%s: xfercap = %d\n", __func__, xfercap); 146 dev_dbg(dev, "%s: xfercap = %d\n", __func__, xfercap);
147 147
148 #ifdef CONFIG_I7300_IDLE_IOAT_CHANNEL 148 #ifdef CONFIG_I7300_IDLE_IOAT_CHANNEL
149 if (i7300_idle_platform_probe(NULL, NULL, 1) == 0) 149 if (i7300_idle_platform_probe(NULL, NULL, 1) == 0)
150 dma->chancnt--; 150 dma->chancnt--;
151 #endif 151 #endif
152 for (i = 0; i < dma->chancnt; i++) { 152 for (i = 0; i < dma->chancnt; i++) {
153 ioat = devm_kzalloc(dev, sizeof(*ioat), GFP_KERNEL); 153 ioat = devm_kzalloc(dev, sizeof(*ioat), GFP_KERNEL);
154 if (!ioat) 154 if (!ioat)
155 break; 155 break;
156 156
157 ioat_init_channel(device, &ioat->base, i); 157 ioat_init_channel(device, &ioat->base, i);
158 ioat->xfercap = xfercap; 158 ioat->xfercap = xfercap;
159 spin_lock_init(&ioat->desc_lock); 159 spin_lock_init(&ioat->desc_lock);
160 INIT_LIST_HEAD(&ioat->free_desc); 160 INIT_LIST_HEAD(&ioat->free_desc);
161 INIT_LIST_HEAD(&ioat->used_desc); 161 INIT_LIST_HEAD(&ioat->used_desc);
162 } 162 }
163 dma->chancnt = i; 163 dma->chancnt = i;
164 return i; 164 return i;
165 } 165 }
166 166
167 /** 167 /**
168 * ioat_dma_memcpy_issue_pending - push potentially unrecognized appended 168 * ioat_dma_memcpy_issue_pending - push potentially unrecognized appended
169 * descriptors to hw 169 * descriptors to hw
170 * @chan: DMA channel handle 170 * @chan: DMA channel handle
171 */ 171 */
172 static inline void 172 static inline void
173 __ioat1_dma_memcpy_issue_pending(struct ioat_dma_chan *ioat) 173 __ioat1_dma_memcpy_issue_pending(struct ioat_dma_chan *ioat)
174 { 174 {
175 void __iomem *reg_base = ioat->base.reg_base; 175 void __iomem *reg_base = ioat->base.reg_base;
176 176
177 dev_dbg(to_dev(&ioat->base), "%s: pending: %d\n", 177 dev_dbg(to_dev(&ioat->base), "%s: pending: %d\n",
178 __func__, ioat->pending); 178 __func__, ioat->pending);
179 ioat->pending = 0; 179 ioat->pending = 0;
180 writeb(IOAT_CHANCMD_APPEND, reg_base + IOAT1_CHANCMD_OFFSET); 180 writeb(IOAT_CHANCMD_APPEND, reg_base + IOAT1_CHANCMD_OFFSET);
181 } 181 }
182 182
183 static void ioat1_dma_memcpy_issue_pending(struct dma_chan *chan) 183 static void ioat1_dma_memcpy_issue_pending(struct dma_chan *chan)
184 { 184 {
185 struct ioat_dma_chan *ioat = to_ioat_chan(chan); 185 struct ioat_dma_chan *ioat = to_ioat_chan(chan);
186 186
187 if (ioat->pending > 0) { 187 if (ioat->pending > 0) {
188 spin_lock_bh(&ioat->desc_lock); 188 spin_lock_bh(&ioat->desc_lock);
189 __ioat1_dma_memcpy_issue_pending(ioat); 189 __ioat1_dma_memcpy_issue_pending(ioat);
190 spin_unlock_bh(&ioat->desc_lock); 190 spin_unlock_bh(&ioat->desc_lock);
191 } 191 }
192 } 192 }
193 193
194 /** 194 /**
195 * ioat1_reset_channel - restart a channel 195 * ioat1_reset_channel - restart a channel
196 * @ioat: IOAT DMA channel handle 196 * @ioat: IOAT DMA channel handle
197 */ 197 */
198 static void ioat1_reset_channel(struct ioat_dma_chan *ioat) 198 static void ioat1_reset_channel(struct ioat_dma_chan *ioat)
199 { 199 {
200 struct ioat_chan_common *chan = &ioat->base; 200 struct ioat_chan_common *chan = &ioat->base;
201 void __iomem *reg_base = chan->reg_base; 201 void __iomem *reg_base = chan->reg_base;
202 u32 chansts, chanerr; 202 u32 chansts, chanerr;
203 203
204 dev_warn(to_dev(chan), "reset\n"); 204 dev_warn(to_dev(chan), "reset\n");
205 chanerr = readl(reg_base + IOAT_CHANERR_OFFSET); 205 chanerr = readl(reg_base + IOAT_CHANERR_OFFSET);
206 chansts = *chan->completion & IOAT_CHANSTS_STATUS; 206 chansts = *chan->completion & IOAT_CHANSTS_STATUS;
207 if (chanerr) { 207 if (chanerr) {
208 dev_err(to_dev(chan), 208 dev_err(to_dev(chan),
209 "chan%d, CHANSTS = 0x%08x CHANERR = 0x%04x, clearing\n", 209 "chan%d, CHANSTS = 0x%08x CHANERR = 0x%04x, clearing\n",
210 chan_num(chan), chansts, chanerr); 210 chan_num(chan), chansts, chanerr);
211 writel(chanerr, reg_base + IOAT_CHANERR_OFFSET); 211 writel(chanerr, reg_base + IOAT_CHANERR_OFFSET);
212 } 212 }
213 213
214 /* 214 /*
215 * whack it upside the head with a reset 215 * whack it upside the head with a reset
216 * and wait for things to settle out. 216 * and wait for things to settle out.
217 * force the pending count to a really big negative 217 * force the pending count to a really big negative
218 * to make sure no one forces an issue_pending 218 * to make sure no one forces an issue_pending
219 * while we're waiting. 219 * while we're waiting.
220 */ 220 */
221 221
222 ioat->pending = INT_MIN; 222 ioat->pending = INT_MIN;
223 writeb(IOAT_CHANCMD_RESET, 223 writeb(IOAT_CHANCMD_RESET,
224 reg_base + IOAT_CHANCMD_OFFSET(chan->device->version)); 224 reg_base + IOAT_CHANCMD_OFFSET(chan->device->version));
225 set_bit(IOAT_RESET_PENDING, &chan->state); 225 set_bit(IOAT_RESET_PENDING, &chan->state);
226 mod_timer(&chan->timer, jiffies + RESET_DELAY); 226 mod_timer(&chan->timer, jiffies + RESET_DELAY);
227 } 227 }
228 228
229 static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx) 229 static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx)
230 { 230 {
231 struct dma_chan *c = tx->chan; 231 struct dma_chan *c = tx->chan;
232 struct ioat_dma_chan *ioat = to_ioat_chan(c); 232 struct ioat_dma_chan *ioat = to_ioat_chan(c);
233 struct ioat_desc_sw *desc = tx_to_ioat_desc(tx); 233 struct ioat_desc_sw *desc = tx_to_ioat_desc(tx);
234 struct ioat_chan_common *chan = &ioat->base; 234 struct ioat_chan_common *chan = &ioat->base;
235 struct ioat_desc_sw *first; 235 struct ioat_desc_sw *first;
236 struct ioat_desc_sw *chain_tail; 236 struct ioat_desc_sw *chain_tail;
237 dma_cookie_t cookie; 237 dma_cookie_t cookie;
238 238
239 spin_lock_bh(&ioat->desc_lock); 239 spin_lock_bh(&ioat->desc_lock);
240 /* cookie incr and addition to used_list must be atomic */ 240 /* cookie incr and addition to used_list must be atomic */
241 cookie = dma_cookie_assign(tx); 241 cookie = dma_cookie_assign(tx);
242 dev_dbg(to_dev(&ioat->base), "%s: cookie: %d\n", __func__, cookie); 242 dev_dbg(to_dev(&ioat->base), "%s: cookie: %d\n", __func__, cookie);
243 243
244 /* write address into NextDescriptor field of last desc in chain */ 244 /* write address into NextDescriptor field of last desc in chain */
245 first = to_ioat_desc(desc->tx_list.next); 245 first = to_ioat_desc(desc->tx_list.next);
246 chain_tail = to_ioat_desc(ioat->used_desc.prev); 246 chain_tail = to_ioat_desc(ioat->used_desc.prev);
247 /* make descriptor updates globally visible before chaining */ 247 /* make descriptor updates globally visible before chaining */
248 wmb(); 248 wmb();
249 chain_tail->hw->next = first->txd.phys; 249 chain_tail->hw->next = first->txd.phys;
250 list_splice_tail_init(&desc->tx_list, &ioat->used_desc); 250 list_splice_tail_init(&desc->tx_list, &ioat->used_desc);
251 dump_desc_dbg(ioat, chain_tail); 251 dump_desc_dbg(ioat, chain_tail);
252 dump_desc_dbg(ioat, first); 252 dump_desc_dbg(ioat, first);
253 253
254 if (!test_and_set_bit(IOAT_COMPLETION_PENDING, &chan->state)) 254 if (!test_and_set_bit(IOAT_COMPLETION_PENDING, &chan->state))
255 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); 255 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
256 256
257 ioat->active += desc->hw->tx_cnt; 257 ioat->active += desc->hw->tx_cnt;
258 ioat->pending += desc->hw->tx_cnt; 258 ioat->pending += desc->hw->tx_cnt;
259 if (ioat->pending >= ioat_pending_level) 259 if (ioat->pending >= ioat_pending_level)
260 __ioat1_dma_memcpy_issue_pending(ioat); 260 __ioat1_dma_memcpy_issue_pending(ioat);
261 spin_unlock_bh(&ioat->desc_lock); 261 spin_unlock_bh(&ioat->desc_lock);
262 262
263 return cookie; 263 return cookie;
264 } 264 }
265 265
266 /** 266 /**
267 * ioat_dma_alloc_descriptor - allocate and return a sw and hw descriptor pair 267 * ioat_dma_alloc_descriptor - allocate and return a sw and hw descriptor pair
268 * @ioat: the channel supplying the memory pool for the descriptors 268 * @ioat: the channel supplying the memory pool for the descriptors
269 * @flags: allocation flags 269 * @flags: allocation flags
270 */ 270 */
271 static struct ioat_desc_sw * 271 static struct ioat_desc_sw *
272 ioat_dma_alloc_descriptor(struct ioat_dma_chan *ioat, gfp_t flags) 272 ioat_dma_alloc_descriptor(struct ioat_dma_chan *ioat, gfp_t flags)
273 { 273 {
274 struct ioat_dma_descriptor *desc; 274 struct ioat_dma_descriptor *desc;
275 struct ioat_desc_sw *desc_sw; 275 struct ioat_desc_sw *desc_sw;
276 struct ioatdma_device *ioatdma_device; 276 struct ioatdma_device *ioatdma_device;
277 dma_addr_t phys; 277 dma_addr_t phys;
278 278
279 ioatdma_device = ioat->base.device; 279 ioatdma_device = ioat->base.device;
280 desc = pci_pool_alloc(ioatdma_device->dma_pool, flags, &phys); 280 desc = pci_pool_alloc(ioatdma_device->dma_pool, flags, &phys);
281 if (unlikely(!desc)) 281 if (unlikely(!desc))
282 return NULL; 282 return NULL;
283 283
284 desc_sw = kzalloc(sizeof(*desc_sw), flags); 284 desc_sw = kzalloc(sizeof(*desc_sw), flags);
285 if (unlikely(!desc_sw)) { 285 if (unlikely(!desc_sw)) {
286 pci_pool_free(ioatdma_device->dma_pool, desc, phys); 286 pci_pool_free(ioatdma_device->dma_pool, desc, phys);
287 return NULL; 287 return NULL;
288 } 288 }
289 289
290 memset(desc, 0, sizeof(*desc)); 290 memset(desc, 0, sizeof(*desc));
291 291
292 INIT_LIST_HEAD(&desc_sw->tx_list); 292 INIT_LIST_HEAD(&desc_sw->tx_list);
293 dma_async_tx_descriptor_init(&desc_sw->txd, &ioat->base.common); 293 dma_async_tx_descriptor_init(&desc_sw->txd, &ioat->base.common);
294 desc_sw->txd.tx_submit = ioat1_tx_submit; 294 desc_sw->txd.tx_submit = ioat1_tx_submit;
295 desc_sw->hw = desc; 295 desc_sw->hw = desc;
296 desc_sw->txd.phys = phys; 296 desc_sw->txd.phys = phys;
297 set_desc_id(desc_sw, -1); 297 set_desc_id(desc_sw, -1);
298 298
299 return desc_sw; 299 return desc_sw;
300 } 300 }
301 301
302 static int ioat_initial_desc_count = 256; 302 static int ioat_initial_desc_count = 256;
303 module_param(ioat_initial_desc_count, int, 0644); 303 module_param(ioat_initial_desc_count, int, 0644);
304 MODULE_PARM_DESC(ioat_initial_desc_count, 304 MODULE_PARM_DESC(ioat_initial_desc_count,
305 "ioat1: initial descriptors per channel (default: 256)"); 305 "ioat1: initial descriptors per channel (default: 256)");
306 /** 306 /**
307 * ioat1_dma_alloc_chan_resources - returns the number of allocated descriptors 307 * ioat1_dma_alloc_chan_resources - returns the number of allocated descriptors
308 * @chan: the channel to be filled out 308 * @chan: the channel to be filled out
309 */ 309 */
310 static int ioat1_dma_alloc_chan_resources(struct dma_chan *c) 310 static int ioat1_dma_alloc_chan_resources(struct dma_chan *c)
311 { 311 {
312 struct ioat_dma_chan *ioat = to_ioat_chan(c); 312 struct ioat_dma_chan *ioat = to_ioat_chan(c);
313 struct ioat_chan_common *chan = &ioat->base; 313 struct ioat_chan_common *chan = &ioat->base;
314 struct ioat_desc_sw *desc; 314 struct ioat_desc_sw *desc;
315 u32 chanerr; 315 u32 chanerr;
316 int i; 316 int i;
317 LIST_HEAD(tmp_list); 317 LIST_HEAD(tmp_list);
318 318
319 /* have we already been set up? */ 319 /* have we already been set up? */
320 if (!list_empty(&ioat->free_desc)) 320 if (!list_empty(&ioat->free_desc))
321 return ioat->desccount; 321 return ioat->desccount;
322 322
323 /* Setup register to interrupt and write completion status on error */ 323 /* Setup register to interrupt and write completion status on error */
324 writew(IOAT_CHANCTRL_RUN, chan->reg_base + IOAT_CHANCTRL_OFFSET); 324 writew(IOAT_CHANCTRL_RUN, chan->reg_base + IOAT_CHANCTRL_OFFSET);
325 325
326 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); 326 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
327 if (chanerr) { 327 if (chanerr) {
328 dev_err(to_dev(chan), "CHANERR = %x, clearing\n", chanerr); 328 dev_err(to_dev(chan), "CHANERR = %x, clearing\n", chanerr);
329 writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET); 329 writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET);
330 } 330 }
331 331
332 /* Allocate descriptors */ 332 /* Allocate descriptors */
333 for (i = 0; i < ioat_initial_desc_count; i++) { 333 for (i = 0; i < ioat_initial_desc_count; i++) {
334 desc = ioat_dma_alloc_descriptor(ioat, GFP_KERNEL); 334 desc = ioat_dma_alloc_descriptor(ioat, GFP_KERNEL);
335 if (!desc) { 335 if (!desc) {
336 dev_err(to_dev(chan), "Only %d initial descriptors\n", i); 336 dev_err(to_dev(chan), "Only %d initial descriptors\n", i);
337 break; 337 break;
338 } 338 }
339 set_desc_id(desc, i); 339 set_desc_id(desc, i);
340 list_add_tail(&desc->node, &tmp_list); 340 list_add_tail(&desc->node, &tmp_list);
341 } 341 }
342 spin_lock_bh(&ioat->desc_lock); 342 spin_lock_bh(&ioat->desc_lock);
343 ioat->desccount = i; 343 ioat->desccount = i;
344 list_splice(&tmp_list, &ioat->free_desc); 344 list_splice(&tmp_list, &ioat->free_desc);
345 spin_unlock_bh(&ioat->desc_lock); 345 spin_unlock_bh(&ioat->desc_lock);
346 346
347 /* allocate a completion writeback area */ 347 /* allocate a completion writeback area */
348 /* doing 2 32bit writes to mmio since 1 64b write doesn't work */ 348 /* doing 2 32bit writes to mmio since 1 64b write doesn't work */
349 chan->completion = pci_pool_alloc(chan->device->completion_pool, 349 chan->completion = pci_pool_alloc(chan->device->completion_pool,
350 GFP_KERNEL, &chan->completion_dma); 350 GFP_KERNEL, &chan->completion_dma);
351 memset(chan->completion, 0, sizeof(*chan->completion)); 351 memset(chan->completion, 0, sizeof(*chan->completion));
352 writel(((u64) chan->completion_dma) & 0x00000000FFFFFFFF, 352 writel(((u64) chan->completion_dma) & 0x00000000FFFFFFFF,
353 chan->reg_base + IOAT_CHANCMP_OFFSET_LOW); 353 chan->reg_base + IOAT_CHANCMP_OFFSET_LOW);
354 writel(((u64) chan->completion_dma) >> 32, 354 writel(((u64) chan->completion_dma) >> 32,
355 chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH); 355 chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
356 356
357 tasklet_enable(&chan->cleanup_task); 357 tasklet_enable(&chan->cleanup_task);
358 ioat1_dma_start_null_desc(ioat); /* give chain to dma device */ 358 ioat1_dma_start_null_desc(ioat); /* give chain to dma device */
359 dev_dbg(to_dev(chan), "%s: allocated %d descriptors\n", 359 dev_dbg(to_dev(chan), "%s: allocated %d descriptors\n",
360 __func__, ioat->desccount); 360 __func__, ioat->desccount);
361 return ioat->desccount; 361 return ioat->desccount;
362 } 362 }
363 363
364 /** 364 /**
365 * ioat1_dma_free_chan_resources - release all the descriptors 365 * ioat1_dma_free_chan_resources - release all the descriptors
366 * @chan: the channel to be cleaned 366 * @chan: the channel to be cleaned
367 */ 367 */
368 static void ioat1_dma_free_chan_resources(struct dma_chan *c) 368 static void ioat1_dma_free_chan_resources(struct dma_chan *c)
369 { 369 {
370 struct ioat_dma_chan *ioat = to_ioat_chan(c); 370 struct ioat_dma_chan *ioat = to_ioat_chan(c);
371 struct ioat_chan_common *chan = &ioat->base; 371 struct ioat_chan_common *chan = &ioat->base;
372 struct ioatdma_device *ioatdma_device = chan->device; 372 struct ioatdma_device *ioatdma_device = chan->device;
373 struct ioat_desc_sw *desc, *_desc; 373 struct ioat_desc_sw *desc, *_desc;
374 int in_use_descs = 0; 374 int in_use_descs = 0;
375 375
376 /* Before freeing channel resources first check 376 /* Before freeing channel resources first check
377 * if they have been previously allocated for this channel. 377 * if they have been previously allocated for this channel.
378 */ 378 */
379 if (ioat->desccount == 0) 379 if (ioat->desccount == 0)
380 return; 380 return;
381 381
382 tasklet_disable(&chan->cleanup_task); 382 tasklet_disable(&chan->cleanup_task);
383 del_timer_sync(&chan->timer); 383 del_timer_sync(&chan->timer);
384 ioat1_cleanup(ioat); 384 ioat1_cleanup(ioat);
385 385
386 /* Delay 100ms after reset to allow internal DMA logic to quiesce 386 /* Delay 100ms after reset to allow internal DMA logic to quiesce
387 * before removing DMA descriptor resources. 387 * before removing DMA descriptor resources.
388 */ 388 */
389 writeb(IOAT_CHANCMD_RESET, 389 writeb(IOAT_CHANCMD_RESET,
390 chan->reg_base + IOAT_CHANCMD_OFFSET(chan->device->version)); 390 chan->reg_base + IOAT_CHANCMD_OFFSET(chan->device->version));
391 mdelay(100); 391 mdelay(100);
392 392
393 spin_lock_bh(&ioat->desc_lock); 393 spin_lock_bh(&ioat->desc_lock);
394 list_for_each_entry_safe(desc, _desc, &ioat->used_desc, node) { 394 list_for_each_entry_safe(desc, _desc, &ioat->used_desc, node) {
395 dev_dbg(to_dev(chan), "%s: freeing %d from used list\n", 395 dev_dbg(to_dev(chan), "%s: freeing %d from used list\n",
396 __func__, desc_id(desc)); 396 __func__, desc_id(desc));
397 dump_desc_dbg(ioat, desc); 397 dump_desc_dbg(ioat, desc);
398 in_use_descs++; 398 in_use_descs++;
399 list_del(&desc->node); 399 list_del(&desc->node);
400 pci_pool_free(ioatdma_device->dma_pool, desc->hw, 400 pci_pool_free(ioatdma_device->dma_pool, desc->hw,
401 desc->txd.phys); 401 desc->txd.phys);
402 kfree(desc); 402 kfree(desc);
403 } 403 }
404 list_for_each_entry_safe(desc, _desc, 404 list_for_each_entry_safe(desc, _desc,
405 &ioat->free_desc, node) { 405 &ioat->free_desc, node) {
406 list_del(&desc->node); 406 list_del(&desc->node);
407 pci_pool_free(ioatdma_device->dma_pool, desc->hw, 407 pci_pool_free(ioatdma_device->dma_pool, desc->hw,
408 desc->txd.phys); 408 desc->txd.phys);
409 kfree(desc); 409 kfree(desc);
410 } 410 }
411 spin_unlock_bh(&ioat->desc_lock); 411 spin_unlock_bh(&ioat->desc_lock);
412 412
413 pci_pool_free(ioatdma_device->completion_pool, 413 pci_pool_free(ioatdma_device->completion_pool,
414 chan->completion, 414 chan->completion,
415 chan->completion_dma); 415 chan->completion_dma);
416 416
417 /* one is ok since we left it on there on purpose */ 417 /* one is ok since we left it on there on purpose */
418 if (in_use_descs > 1) 418 if (in_use_descs > 1)
419 dev_err(to_dev(chan), "Freeing %d in use descriptors!\n", 419 dev_err(to_dev(chan), "Freeing %d in use descriptors!\n",
420 in_use_descs - 1); 420 in_use_descs - 1);
421 421
422 chan->last_completion = 0; 422 chan->last_completion = 0;
423 chan->completion_dma = 0; 423 chan->completion_dma = 0;
424 ioat->pending = 0; 424 ioat->pending = 0;
425 ioat->desccount = 0; 425 ioat->desccount = 0;
426 } 426 }
427 427
428 /** 428 /**
429 * ioat1_dma_get_next_descriptor - return the next available descriptor 429 * ioat1_dma_get_next_descriptor - return the next available descriptor
430 * @ioat: IOAT DMA channel handle 430 * @ioat: IOAT DMA channel handle
431 * 431 *
432 * Gets the next descriptor from the chain, and must be called with the 432 * Gets the next descriptor from the chain, and must be called with the
433 * channel's desc_lock held. Allocates more descriptors if the channel 433 * channel's desc_lock held. Allocates more descriptors if the channel
434 * has run out. 434 * has run out.
435 */ 435 */
436 static struct ioat_desc_sw * 436 static struct ioat_desc_sw *
437 ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat) 437 ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat)
438 { 438 {
439 struct ioat_desc_sw *new; 439 struct ioat_desc_sw *new;
440 440
441 if (!list_empty(&ioat->free_desc)) { 441 if (!list_empty(&ioat->free_desc)) {
442 new = to_ioat_desc(ioat->free_desc.next); 442 new = to_ioat_desc(ioat->free_desc.next);
443 list_del(&new->node); 443 list_del(&new->node);
444 } else { 444 } else {
445 /* try to get another desc */ 445 /* try to get another desc */
446 new = ioat_dma_alloc_descriptor(ioat, GFP_ATOMIC); 446 new = ioat_dma_alloc_descriptor(ioat, GFP_ATOMIC);
447 if (!new) { 447 if (!new) {
448 dev_err(to_dev(&ioat->base), "alloc failed\n"); 448 dev_err(to_dev(&ioat->base), "alloc failed\n");
449 return NULL; 449 return NULL;
450 } 450 }
451 } 451 }
452 dev_dbg(to_dev(&ioat->base), "%s: allocated: %d\n", 452 dev_dbg(to_dev(&ioat->base), "%s: allocated: %d\n",
453 __func__, desc_id(new)); 453 __func__, desc_id(new));
454 prefetch(new->hw); 454 prefetch(new->hw);
455 return new; 455 return new;
456 } 456 }
457 457
458 static struct dma_async_tx_descriptor * 458 static struct dma_async_tx_descriptor *
459 ioat1_dma_prep_memcpy(struct dma_chan *c, dma_addr_t dma_dest, 459 ioat1_dma_prep_memcpy(struct dma_chan *c, dma_addr_t dma_dest,
460 dma_addr_t dma_src, size_t len, unsigned long flags) 460 dma_addr_t dma_src, size_t len, unsigned long flags)
461 { 461 {
462 struct ioat_dma_chan *ioat = to_ioat_chan(c); 462 struct ioat_dma_chan *ioat = to_ioat_chan(c);
463 struct ioat_desc_sw *desc; 463 struct ioat_desc_sw *desc;
464 size_t copy; 464 size_t copy;
465 LIST_HEAD(chain); 465 LIST_HEAD(chain);
466 dma_addr_t src = dma_src; 466 dma_addr_t src = dma_src;
467 dma_addr_t dest = dma_dest; 467 dma_addr_t dest = dma_dest;
468 size_t total_len = len; 468 size_t total_len = len;
469 struct ioat_dma_descriptor *hw = NULL; 469 struct ioat_dma_descriptor *hw = NULL;
470 int tx_cnt = 0; 470 int tx_cnt = 0;
471 471
472 spin_lock_bh(&ioat->desc_lock); 472 spin_lock_bh(&ioat->desc_lock);
473 desc = ioat1_dma_get_next_descriptor(ioat); 473 desc = ioat1_dma_get_next_descriptor(ioat);
474 do { 474 do {
475 if (!desc) 475 if (!desc)
476 break; 476 break;
477 477
478 tx_cnt++; 478 tx_cnt++;
479 copy = min_t(size_t, len, ioat->xfercap); 479 copy = min_t(size_t, len, ioat->xfercap);
480 480
481 hw = desc->hw; 481 hw = desc->hw;
482 hw->size = copy; 482 hw->size = copy;
483 hw->ctl = 0; 483 hw->ctl = 0;
484 hw->src_addr = src; 484 hw->src_addr = src;
485 hw->dst_addr = dest; 485 hw->dst_addr = dest;
486 486
487 list_add_tail(&desc->node, &chain); 487 list_add_tail(&desc->node, &chain);
488 488
489 len -= copy; 489 len -= copy;
490 dest += copy; 490 dest += copy;
491 src += copy; 491 src += copy;
492 if (len) { 492 if (len) {
493 struct ioat_desc_sw *next; 493 struct ioat_desc_sw *next;
494 494
495 async_tx_ack(&desc->txd); 495 async_tx_ack(&desc->txd);
496 next = ioat1_dma_get_next_descriptor(ioat); 496 next = ioat1_dma_get_next_descriptor(ioat);
497 hw->next = next ? next->txd.phys : 0; 497 hw->next = next ? next->txd.phys : 0;
498 dump_desc_dbg(ioat, desc); 498 dump_desc_dbg(ioat, desc);
499 desc = next; 499 desc = next;
500 } else 500 } else
501 hw->next = 0; 501 hw->next = 0;
502 } while (len); 502 } while (len);
503 503
504 if (!desc) { 504 if (!desc) {
505 struct ioat_chan_common *chan = &ioat->base; 505 struct ioat_chan_common *chan = &ioat->base;
506 506
507 dev_err(to_dev(chan), 507 dev_err(to_dev(chan),
508 "chan%d - get_next_desc failed\n", chan_num(chan)); 508 "chan%d - get_next_desc failed\n", chan_num(chan));
509 list_splice(&chain, &ioat->free_desc); 509 list_splice(&chain, &ioat->free_desc);
510 spin_unlock_bh(&ioat->desc_lock); 510 spin_unlock_bh(&ioat->desc_lock);
511 return NULL; 511 return NULL;
512 } 512 }
513 spin_unlock_bh(&ioat->desc_lock); 513 spin_unlock_bh(&ioat->desc_lock);
514 514
515 desc->txd.flags = flags; 515 desc->txd.flags = flags;
516 desc->len = total_len; 516 desc->len = total_len;
517 list_splice(&chain, &desc->tx_list); 517 list_splice(&chain, &desc->tx_list);
518 hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT); 518 hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
519 hw->ctl_f.compl_write = 1; 519 hw->ctl_f.compl_write = 1;
520 hw->tx_cnt = tx_cnt; 520 hw->tx_cnt = tx_cnt;
521 dump_desc_dbg(ioat, desc); 521 dump_desc_dbg(ioat, desc);
522 522
523 return &desc->txd; 523 return &desc->txd;
524 } 524 }
525 525
526 static void ioat1_cleanup_event(unsigned long data) 526 static void ioat1_cleanup_event(unsigned long data)
527 { 527 {
528 struct ioat_dma_chan *ioat = to_ioat_chan((void *) data); 528 struct ioat_dma_chan *ioat = to_ioat_chan((void *) data);
529 529
530 ioat1_cleanup(ioat); 530 ioat1_cleanup(ioat);
531 writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET); 531 writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET);
532 } 532 }
533 533
534 void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags, 534 void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags,
535 size_t len, struct ioat_dma_descriptor *hw) 535 size_t len, struct ioat_dma_descriptor *hw)
536 { 536 {
537 struct pci_dev *pdev = chan->device->pdev; 537 struct pci_dev *pdev = chan->device->pdev;
538 size_t offset = len - hw->size; 538 size_t offset = len - hw->size;
539 539
540 if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) 540 if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP))
541 ioat_unmap(pdev, hw->dst_addr - offset, len, 541 ioat_unmap(pdev, hw->dst_addr - offset, len,
542 PCI_DMA_FROMDEVICE, flags, 1); 542 PCI_DMA_FROMDEVICE, flags, 1);
543 543
544 if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) 544 if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP))
545 ioat_unmap(pdev, hw->src_addr - offset, len, 545 ioat_unmap(pdev, hw->src_addr - offset, len,
546 PCI_DMA_TODEVICE, flags, 0); 546 PCI_DMA_TODEVICE, flags, 0);
547 } 547 }
548 548
549 dma_addr_t ioat_get_current_completion(struct ioat_chan_common *chan) 549 dma_addr_t ioat_get_current_completion(struct ioat_chan_common *chan)
550 { 550 {
551 dma_addr_t phys_complete; 551 dma_addr_t phys_complete;
552 u64 completion; 552 u64 completion;
553 553
554 completion = *chan->completion; 554 completion = *chan->completion;
555 phys_complete = ioat_chansts_to_addr(completion); 555 phys_complete = ioat_chansts_to_addr(completion);
556 556
557 dev_dbg(to_dev(chan), "%s: phys_complete: %#llx\n", __func__, 557 dev_dbg(to_dev(chan), "%s: phys_complete: %#llx\n", __func__,
558 (unsigned long long) phys_complete); 558 (unsigned long long) phys_complete);
559 559
560 if (is_ioat_halted(completion)) { 560 if (is_ioat_halted(completion)) {
561 u32 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); 561 u32 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
562 dev_err(to_dev(chan), "Channel halted, chanerr = %x\n", 562 dev_err(to_dev(chan), "Channel halted, chanerr = %x\n",
563 chanerr); 563 chanerr);
564 564
565 /* TODO do something to salvage the situation */ 565 /* TODO do something to salvage the situation */
566 } 566 }
567 567
568 return phys_complete; 568 return phys_complete;
569 } 569 }
570 570
571 bool ioat_cleanup_preamble(struct ioat_chan_common *chan, 571 bool ioat_cleanup_preamble(struct ioat_chan_common *chan,
572 dma_addr_t *phys_complete) 572 dma_addr_t *phys_complete)
573 { 573 {
574 *phys_complete = ioat_get_current_completion(chan); 574 *phys_complete = ioat_get_current_completion(chan);
575 if (*phys_complete == chan->last_completion) 575 if (*phys_complete == chan->last_completion)
576 return false; 576 return false;
577 clear_bit(IOAT_COMPLETION_ACK, &chan->state); 577 clear_bit(IOAT_COMPLETION_ACK, &chan->state);
578 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); 578 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
579 579
580 return true; 580 return true;
581 } 581 }
582 582
583 static void __cleanup(struct ioat_dma_chan *ioat, dma_addr_t phys_complete) 583 static void __cleanup(struct ioat_dma_chan *ioat, dma_addr_t phys_complete)
584 { 584 {
585 struct ioat_chan_common *chan = &ioat->base; 585 struct ioat_chan_common *chan = &ioat->base;
586 struct list_head *_desc, *n; 586 struct list_head *_desc, *n;
587 struct dma_async_tx_descriptor *tx; 587 struct dma_async_tx_descriptor *tx;
588 588
589 dev_dbg(to_dev(chan), "%s: phys_complete: %llx\n", 589 dev_dbg(to_dev(chan), "%s: phys_complete: %llx\n",
590 __func__, (unsigned long long) phys_complete); 590 __func__, (unsigned long long) phys_complete);
591 list_for_each_safe(_desc, n, &ioat->used_desc) { 591 list_for_each_safe(_desc, n, &ioat->used_desc) {
592 struct ioat_desc_sw *desc; 592 struct ioat_desc_sw *desc;
593 593
594 prefetch(n); 594 prefetch(n);
595 desc = list_entry(_desc, typeof(*desc), node); 595 desc = list_entry(_desc, typeof(*desc), node);
596 tx = &desc->txd; 596 tx = &desc->txd;
597 /* 597 /*
598 * Incoming DMA requests may use multiple descriptors, 598 * Incoming DMA requests may use multiple descriptors,
599 * due to exceeding xfercap, perhaps. If so, only the 599 * due to exceeding xfercap, perhaps. If so, only the
600 * last one will have a cookie, and require unmapping. 600 * last one will have a cookie, and require unmapping.
601 */ 601 */
602 dump_desc_dbg(ioat, desc); 602 dump_desc_dbg(ioat, desc);
603 if (tx->cookie) { 603 if (tx->cookie) {
604 dma_cookie_complete(tx); 604 dma_cookie_complete(tx);
605 ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw); 605 ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw);
606 ioat->active -= desc->hw->tx_cnt; 606 ioat->active -= desc->hw->tx_cnt;
607 if (tx->callback) { 607 if (tx->callback) {
608 tx->callback(tx->callback_param); 608 tx->callback(tx->callback_param);
609 tx->callback = NULL; 609 tx->callback = NULL;
610 } 610 }
611 } 611 }
612 612
613 if (tx->phys != phys_complete) { 613 if (tx->phys != phys_complete) {
614 /* 614 /*
615 * a completed entry, but not the last, so clean 615 * a completed entry, but not the last, so clean
616 * up if the client is done with the descriptor 616 * up if the client is done with the descriptor
617 */ 617 */
618 if (async_tx_test_ack(tx)) 618 if (async_tx_test_ack(tx))
619 list_move_tail(&desc->node, &ioat->free_desc); 619 list_move_tail(&desc->node, &ioat->free_desc);
620 } else { 620 } else {
621 /* 621 /*
622 * last used desc. Do not remove, so we can 622 * last used desc. Do not remove, so we can
623 * append from it. 623 * append from it.
624 */ 624 */
625 625
626 /* if nothing else is pending, cancel the 626 /* if nothing else is pending, cancel the
627 * completion timeout 627 * completion timeout
628 */ 628 */
629 if (n == &ioat->used_desc) { 629 if (n == &ioat->used_desc) {
630 dev_dbg(to_dev(chan), 630 dev_dbg(to_dev(chan),
631 "%s cancel completion timeout\n", 631 "%s cancel completion timeout\n",
632 __func__); 632 __func__);
633 clear_bit(IOAT_COMPLETION_PENDING, &chan->state); 633 clear_bit(IOAT_COMPLETION_PENDING, &chan->state);
634 } 634 }
635 635
636 /* TODO check status bits? */ 636 /* TODO check status bits? */
637 break; 637 break;
638 } 638 }
639 } 639 }
640 640
641 chan->last_completion = phys_complete; 641 chan->last_completion = phys_complete;
642 } 642 }
643 643
644 /** 644 /**
645 * ioat1_cleanup - cleanup up finished descriptors 645 * ioat1_cleanup - cleanup up finished descriptors
646 * @chan: ioat channel to be cleaned up 646 * @chan: ioat channel to be cleaned up
647 * 647 *
648 * To prevent lock contention we defer cleanup when the locks are 648 * To prevent lock contention we defer cleanup when the locks are
649 * contended with a terminal timeout that forces cleanup and catches 649 * contended with a terminal timeout that forces cleanup and catches
650 * completion notification errors. 650 * completion notification errors.
651 */ 651 */
652 static void ioat1_cleanup(struct ioat_dma_chan *ioat) 652 static void ioat1_cleanup(struct ioat_dma_chan *ioat)
653 { 653 {
654 struct ioat_chan_common *chan = &ioat->base; 654 struct ioat_chan_common *chan = &ioat->base;
655 dma_addr_t phys_complete; 655 dma_addr_t phys_complete;
656 656
657 prefetch(chan->completion); 657 prefetch(chan->completion);
658 658
659 if (!spin_trylock_bh(&chan->cleanup_lock)) 659 if (!spin_trylock_bh(&chan->cleanup_lock))
660 return; 660 return;
661 661
662 if (!ioat_cleanup_preamble(chan, &phys_complete)) { 662 if (!ioat_cleanup_preamble(chan, &phys_complete)) {
663 spin_unlock_bh(&chan->cleanup_lock); 663 spin_unlock_bh(&chan->cleanup_lock);
664 return; 664 return;
665 } 665 }
666 666
667 if (!spin_trylock_bh(&ioat->desc_lock)) { 667 if (!spin_trylock_bh(&ioat->desc_lock)) {
668 spin_unlock_bh(&chan->cleanup_lock); 668 spin_unlock_bh(&chan->cleanup_lock);
669 return; 669 return;
670 } 670 }
671 671
672 __cleanup(ioat, phys_complete); 672 __cleanup(ioat, phys_complete);
673 673
674 spin_unlock_bh(&ioat->desc_lock); 674 spin_unlock_bh(&ioat->desc_lock);
675 spin_unlock_bh(&chan->cleanup_lock); 675 spin_unlock_bh(&chan->cleanup_lock);
676 } 676 }
677 677
678 static void ioat1_timer_event(unsigned long data) 678 static void ioat1_timer_event(unsigned long data)
679 { 679 {
680 struct ioat_dma_chan *ioat = to_ioat_chan((void *) data); 680 struct ioat_dma_chan *ioat = to_ioat_chan((void *) data);
681 struct ioat_chan_common *chan = &ioat->base; 681 struct ioat_chan_common *chan = &ioat->base;
682 682
683 dev_dbg(to_dev(chan), "%s: state: %lx\n", __func__, chan->state); 683 dev_dbg(to_dev(chan), "%s: state: %lx\n", __func__, chan->state);
684 684
685 spin_lock_bh(&chan->cleanup_lock); 685 spin_lock_bh(&chan->cleanup_lock);
686 if (test_and_clear_bit(IOAT_RESET_PENDING, &chan->state)) { 686 if (test_and_clear_bit(IOAT_RESET_PENDING, &chan->state)) {
687 struct ioat_desc_sw *desc; 687 struct ioat_desc_sw *desc;
688 688
689 spin_lock_bh(&ioat->desc_lock); 689 spin_lock_bh(&ioat->desc_lock);
690 690
691 /* restart active descriptors */ 691 /* restart active descriptors */
692 desc = to_ioat_desc(ioat->used_desc.prev); 692 desc = to_ioat_desc(ioat->used_desc.prev);
693 ioat_set_chainaddr(ioat, desc->txd.phys); 693 ioat_set_chainaddr(ioat, desc->txd.phys);
694 ioat_start(chan); 694 ioat_start(chan);
695 695
696 ioat->pending = 0; 696 ioat->pending = 0;
697 set_bit(IOAT_COMPLETION_PENDING, &chan->state); 697 set_bit(IOAT_COMPLETION_PENDING, &chan->state);
698 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); 698 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
699 spin_unlock_bh(&ioat->desc_lock); 699 spin_unlock_bh(&ioat->desc_lock);
700 } else if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) { 700 } else if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) {
701 dma_addr_t phys_complete; 701 dma_addr_t phys_complete;
702 702
703 spin_lock_bh(&ioat->desc_lock); 703 spin_lock_bh(&ioat->desc_lock);
704 /* if we haven't made progress and we have already 704 /* if we haven't made progress and we have already
705 * acknowledged a pending completion once, then be more 705 * acknowledged a pending completion once, then be more
706 * forceful with a restart 706 * forceful with a restart
707 */ 707 */
708 if (ioat_cleanup_preamble(chan, &phys_complete)) 708 if (ioat_cleanup_preamble(chan, &phys_complete))
709 __cleanup(ioat, phys_complete); 709 __cleanup(ioat, phys_complete);
710 else if (test_bit(IOAT_COMPLETION_ACK, &chan->state)) 710 else if (test_bit(IOAT_COMPLETION_ACK, &chan->state))
711 ioat1_reset_channel(ioat); 711 ioat1_reset_channel(ioat);
712 else { 712 else {
713 u64 status = ioat_chansts(chan); 713 u64 status = ioat_chansts(chan);
714 714
715 /* manually update the last completion address */ 715 /* manually update the last completion address */
716 if (ioat_chansts_to_addr(status) != 0) 716 if (ioat_chansts_to_addr(status) != 0)
717 *chan->completion = status; 717 *chan->completion = status;
718 718
719 set_bit(IOAT_COMPLETION_ACK, &chan->state); 719 set_bit(IOAT_COMPLETION_ACK, &chan->state);
720 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); 720 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
721 } 721 }
722 spin_unlock_bh(&ioat->desc_lock); 722 spin_unlock_bh(&ioat->desc_lock);
723 } 723 }
724 spin_unlock_bh(&chan->cleanup_lock); 724 spin_unlock_bh(&chan->cleanup_lock);
725 } 725 }
726 726
727 enum dma_status 727 enum dma_status
728 ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie, 728 ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie,
729 struct dma_tx_state *txstate) 729 struct dma_tx_state *txstate)
730 { 730 {
731 struct ioat_chan_common *chan = to_chan_common(c); 731 struct ioat_chan_common *chan = to_chan_common(c);
732 struct ioatdma_device *device = chan->device; 732 struct ioatdma_device *device = chan->device;
733 enum dma_status ret; 733 enum dma_status ret;
734 734
735 ret = dma_cookie_status(c, cookie, txstate); 735 ret = dma_cookie_status(c, cookie, txstate);
736 if (ret == DMA_SUCCESS) 736 if (ret == DMA_COMPLETE)
737 return ret; 737 return ret;
738 738
739 device->cleanup_fn((unsigned long) c); 739 device->cleanup_fn((unsigned long) c);
740 740
741 return dma_cookie_status(c, cookie, txstate); 741 return dma_cookie_status(c, cookie, txstate);
742 } 742 }
743 743
744 static void ioat1_dma_start_null_desc(struct ioat_dma_chan *ioat) 744 static void ioat1_dma_start_null_desc(struct ioat_dma_chan *ioat)
745 { 745 {
746 struct ioat_chan_common *chan = &ioat->base; 746 struct ioat_chan_common *chan = &ioat->base;
747 struct ioat_desc_sw *desc; 747 struct ioat_desc_sw *desc;
748 struct ioat_dma_descriptor *hw; 748 struct ioat_dma_descriptor *hw;
749 749
750 spin_lock_bh(&ioat->desc_lock); 750 spin_lock_bh(&ioat->desc_lock);
751 751
752 desc = ioat1_dma_get_next_descriptor(ioat); 752 desc = ioat1_dma_get_next_descriptor(ioat);
753 753
754 if (!desc) { 754 if (!desc) {
755 dev_err(to_dev(chan), 755 dev_err(to_dev(chan),
756 "Unable to start null desc - get next desc failed\n"); 756 "Unable to start null desc - get next desc failed\n");
757 spin_unlock_bh(&ioat->desc_lock); 757 spin_unlock_bh(&ioat->desc_lock);
758 return; 758 return;
759 } 759 }
760 760
761 hw = desc->hw; 761 hw = desc->hw;
762 hw->ctl = 0; 762 hw->ctl = 0;
763 hw->ctl_f.null = 1; 763 hw->ctl_f.null = 1;
764 hw->ctl_f.int_en = 1; 764 hw->ctl_f.int_en = 1;
765 hw->ctl_f.compl_write = 1; 765 hw->ctl_f.compl_write = 1;
766 /* set size to non-zero value (channel returns error when size is 0) */ 766 /* set size to non-zero value (channel returns error when size is 0) */
767 hw->size = NULL_DESC_BUFFER_SIZE; 767 hw->size = NULL_DESC_BUFFER_SIZE;
768 hw->src_addr = 0; 768 hw->src_addr = 0;
769 hw->dst_addr = 0; 769 hw->dst_addr = 0;
770 async_tx_ack(&desc->txd); 770 async_tx_ack(&desc->txd);
771 hw->next = 0; 771 hw->next = 0;
772 list_add_tail(&desc->node, &ioat->used_desc); 772 list_add_tail(&desc->node, &ioat->used_desc);
773 dump_desc_dbg(ioat, desc); 773 dump_desc_dbg(ioat, desc);
774 774
775 ioat_set_chainaddr(ioat, desc->txd.phys); 775 ioat_set_chainaddr(ioat, desc->txd.phys);
776 ioat_start(chan); 776 ioat_start(chan);
777 spin_unlock_bh(&ioat->desc_lock); 777 spin_unlock_bh(&ioat->desc_lock);
778 } 778 }
779 779
780 /* 780 /*
781 * Perform a IOAT transaction to verify the HW works. 781 * Perform a IOAT transaction to verify the HW works.
782 */ 782 */
783 #define IOAT_TEST_SIZE 2000 783 #define IOAT_TEST_SIZE 2000
784 784
785 static void ioat_dma_test_callback(void *dma_async_param) 785 static void ioat_dma_test_callback(void *dma_async_param)
786 { 786 {
787 struct completion *cmp = dma_async_param; 787 struct completion *cmp = dma_async_param;
788 788
789 complete(cmp); 789 complete(cmp);
790 } 790 }
791 791
792 /** 792 /**
793 * ioat_dma_self_test - Perform a IOAT transaction to verify the HW works. 793 * ioat_dma_self_test - Perform a IOAT transaction to verify the HW works.
794 * @device: device to be tested 794 * @device: device to be tested
795 */ 795 */
796 int ioat_dma_self_test(struct ioatdma_device *device) 796 int ioat_dma_self_test(struct ioatdma_device *device)
797 { 797 {
798 int i; 798 int i;
799 u8 *src; 799 u8 *src;
800 u8 *dest; 800 u8 *dest;
801 struct dma_device *dma = &device->common; 801 struct dma_device *dma = &device->common;
802 struct device *dev = &device->pdev->dev; 802 struct device *dev = &device->pdev->dev;
803 struct dma_chan *dma_chan; 803 struct dma_chan *dma_chan;
804 struct dma_async_tx_descriptor *tx; 804 struct dma_async_tx_descriptor *tx;
805 dma_addr_t dma_dest, dma_src; 805 dma_addr_t dma_dest, dma_src;
806 dma_cookie_t cookie; 806 dma_cookie_t cookie;
807 int err = 0; 807 int err = 0;
808 struct completion cmp; 808 struct completion cmp;
809 unsigned long tmo; 809 unsigned long tmo;
810 unsigned long flags; 810 unsigned long flags;
811 811
812 src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL); 812 src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
813 if (!src) 813 if (!src)
814 return -ENOMEM; 814 return -ENOMEM;
815 dest = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL); 815 dest = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
816 if (!dest) { 816 if (!dest) {
817 kfree(src); 817 kfree(src);
818 return -ENOMEM; 818 return -ENOMEM;
819 } 819 }
820 820
821 /* Fill in src buffer */ 821 /* Fill in src buffer */
822 for (i = 0; i < IOAT_TEST_SIZE; i++) 822 for (i = 0; i < IOAT_TEST_SIZE; i++)
823 src[i] = (u8)i; 823 src[i] = (u8)i;
824 824
825 /* Start copy, using first DMA channel */ 825 /* Start copy, using first DMA channel */
826 dma_chan = container_of(dma->channels.next, struct dma_chan, 826 dma_chan = container_of(dma->channels.next, struct dma_chan,
827 device_node); 827 device_node);
828 if (dma->device_alloc_chan_resources(dma_chan) < 1) { 828 if (dma->device_alloc_chan_resources(dma_chan) < 1) {
829 dev_err(dev, "selftest cannot allocate chan resource\n"); 829 dev_err(dev, "selftest cannot allocate chan resource\n");
830 err = -ENODEV; 830 err = -ENODEV;
831 goto out; 831 goto out;
832 } 832 }
833 833
834 dma_src = dma_map_single(dev, src, IOAT_TEST_SIZE, DMA_TO_DEVICE); 834 dma_src = dma_map_single(dev, src, IOAT_TEST_SIZE, DMA_TO_DEVICE);
835 dma_dest = dma_map_single(dev, dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE); 835 dma_dest = dma_map_single(dev, dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE);
836 flags = DMA_COMPL_SKIP_SRC_UNMAP | DMA_COMPL_SKIP_DEST_UNMAP | 836 flags = DMA_COMPL_SKIP_SRC_UNMAP | DMA_COMPL_SKIP_DEST_UNMAP |
837 DMA_PREP_INTERRUPT; 837 DMA_PREP_INTERRUPT;
838 tx = device->common.device_prep_dma_memcpy(dma_chan, dma_dest, dma_src, 838 tx = device->common.device_prep_dma_memcpy(dma_chan, dma_dest, dma_src,
839 IOAT_TEST_SIZE, flags); 839 IOAT_TEST_SIZE, flags);
840 if (!tx) { 840 if (!tx) {
841 dev_err(dev, "Self-test prep failed, disabling\n"); 841 dev_err(dev, "Self-test prep failed, disabling\n");
842 err = -ENODEV; 842 err = -ENODEV;
843 goto unmap_dma; 843 goto unmap_dma;
844 } 844 }
845 845
846 async_tx_ack(tx); 846 async_tx_ack(tx);
847 init_completion(&cmp); 847 init_completion(&cmp);
848 tx->callback = ioat_dma_test_callback; 848 tx->callback = ioat_dma_test_callback;
849 tx->callback_param = &cmp; 849 tx->callback_param = &cmp;
850 cookie = tx->tx_submit(tx); 850 cookie = tx->tx_submit(tx);
851 if (cookie < 0) { 851 if (cookie < 0) {
852 dev_err(dev, "Self-test setup failed, disabling\n"); 852 dev_err(dev, "Self-test setup failed, disabling\n");
853 err = -ENODEV; 853 err = -ENODEV;
854 goto unmap_dma; 854 goto unmap_dma;
855 } 855 }
856 dma->device_issue_pending(dma_chan); 856 dma->device_issue_pending(dma_chan);
857 857
858 tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); 858 tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
859 859
860 if (tmo == 0 || 860 if (tmo == 0 ||
861 dma->device_tx_status(dma_chan, cookie, NULL) 861 dma->device_tx_status(dma_chan, cookie, NULL)
862 != DMA_SUCCESS) { 862 != DMA_COMPLETE) {
863 dev_err(dev, "Self-test copy timed out, disabling\n"); 863 dev_err(dev, "Self-test copy timed out, disabling\n");
864 err = -ENODEV; 864 err = -ENODEV;
865 goto unmap_dma; 865 goto unmap_dma;
866 } 866 }
867 if (memcmp(src, dest, IOAT_TEST_SIZE)) { 867 if (memcmp(src, dest, IOAT_TEST_SIZE)) {
868 dev_err(dev, "Self-test copy failed compare, disabling\n"); 868 dev_err(dev, "Self-test copy failed compare, disabling\n");
869 err = -ENODEV; 869 err = -ENODEV;
870 goto free_resources; 870 goto free_resources;
871 } 871 }
872 872
873 unmap_dma: 873 unmap_dma:
874 dma_unmap_single(dev, dma_src, IOAT_TEST_SIZE, DMA_TO_DEVICE); 874 dma_unmap_single(dev, dma_src, IOAT_TEST_SIZE, DMA_TO_DEVICE);
875 dma_unmap_single(dev, dma_dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE); 875 dma_unmap_single(dev, dma_dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE);
876 free_resources: 876 free_resources:
877 dma->device_free_chan_resources(dma_chan); 877 dma->device_free_chan_resources(dma_chan);
878 out: 878 out:
879 kfree(src); 879 kfree(src);
880 kfree(dest); 880 kfree(dest);
881 return err; 881 return err;
882 } 882 }
883 883
884 static char ioat_interrupt_style[32] = "msix"; 884 static char ioat_interrupt_style[32] = "msix";
885 module_param_string(ioat_interrupt_style, ioat_interrupt_style, 885 module_param_string(ioat_interrupt_style, ioat_interrupt_style,
886 sizeof(ioat_interrupt_style), 0644); 886 sizeof(ioat_interrupt_style), 0644);
887 MODULE_PARM_DESC(ioat_interrupt_style, 887 MODULE_PARM_DESC(ioat_interrupt_style,
888 "set ioat interrupt style: msix (default), " 888 "set ioat interrupt style: msix (default), "
889 "msix-single-vector, msi, intx)"); 889 "msix-single-vector, msi, intx)");
890 890
891 /** 891 /**
892 * ioat_dma_setup_interrupts - setup interrupt handler 892 * ioat_dma_setup_interrupts - setup interrupt handler
893 * @device: ioat device 893 * @device: ioat device
894 */ 894 */
895 int ioat_dma_setup_interrupts(struct ioatdma_device *device) 895 int ioat_dma_setup_interrupts(struct ioatdma_device *device)
896 { 896 {
897 struct ioat_chan_common *chan; 897 struct ioat_chan_common *chan;
898 struct pci_dev *pdev = device->pdev; 898 struct pci_dev *pdev = device->pdev;
899 struct device *dev = &pdev->dev; 899 struct device *dev = &pdev->dev;
900 struct msix_entry *msix; 900 struct msix_entry *msix;
901 int i, j, msixcnt; 901 int i, j, msixcnt;
902 int err = -EINVAL; 902 int err = -EINVAL;
903 u8 intrctrl = 0; 903 u8 intrctrl = 0;
904 904
905 if (!strcmp(ioat_interrupt_style, "msix")) 905 if (!strcmp(ioat_interrupt_style, "msix"))
906 goto msix; 906 goto msix;
907 if (!strcmp(ioat_interrupt_style, "msix-single-vector")) 907 if (!strcmp(ioat_interrupt_style, "msix-single-vector"))
908 goto msix_single_vector; 908 goto msix_single_vector;
909 if (!strcmp(ioat_interrupt_style, "msi")) 909 if (!strcmp(ioat_interrupt_style, "msi"))
910 goto msi; 910 goto msi;
911 if (!strcmp(ioat_interrupt_style, "intx")) 911 if (!strcmp(ioat_interrupt_style, "intx"))
912 goto intx; 912 goto intx;
913 dev_err(dev, "invalid ioat_interrupt_style %s\n", ioat_interrupt_style); 913 dev_err(dev, "invalid ioat_interrupt_style %s\n", ioat_interrupt_style);
914 goto err_no_irq; 914 goto err_no_irq;
915 915
916 msix: 916 msix:
917 /* The number of MSI-X vectors should equal the number of channels */ 917 /* The number of MSI-X vectors should equal the number of channels */
918 msixcnt = device->common.chancnt; 918 msixcnt = device->common.chancnt;
919 for (i = 0; i < msixcnt; i++) 919 for (i = 0; i < msixcnt; i++)
920 device->msix_entries[i].entry = i; 920 device->msix_entries[i].entry = i;
921 921
922 err = pci_enable_msix(pdev, device->msix_entries, msixcnt); 922 err = pci_enable_msix(pdev, device->msix_entries, msixcnt);
923 if (err < 0) 923 if (err < 0)
924 goto msi; 924 goto msi;
925 if (err > 0) 925 if (err > 0)
926 goto msix_single_vector; 926 goto msix_single_vector;
927 927
928 for (i = 0; i < msixcnt; i++) { 928 for (i = 0; i < msixcnt; i++) {
929 msix = &device->msix_entries[i]; 929 msix = &device->msix_entries[i];
930 chan = ioat_chan_by_index(device, i); 930 chan = ioat_chan_by_index(device, i);
931 err = devm_request_irq(dev, msix->vector, 931 err = devm_request_irq(dev, msix->vector,
932 ioat_dma_do_interrupt_msix, 0, 932 ioat_dma_do_interrupt_msix, 0,
933 "ioat-msix", chan); 933 "ioat-msix", chan);
934 if (err) { 934 if (err) {
935 for (j = 0; j < i; j++) { 935 for (j = 0; j < i; j++) {
936 msix = &device->msix_entries[j]; 936 msix = &device->msix_entries[j];
937 chan = ioat_chan_by_index(device, j); 937 chan = ioat_chan_by_index(device, j);
938 devm_free_irq(dev, msix->vector, chan); 938 devm_free_irq(dev, msix->vector, chan);
939 } 939 }
940 goto msix_single_vector; 940 goto msix_single_vector;
941 } 941 }
942 } 942 }
943 intrctrl |= IOAT_INTRCTRL_MSIX_VECTOR_CONTROL; 943 intrctrl |= IOAT_INTRCTRL_MSIX_VECTOR_CONTROL;
944 device->irq_mode = IOAT_MSIX; 944 device->irq_mode = IOAT_MSIX;
945 goto done; 945 goto done;
946 946
947 msix_single_vector: 947 msix_single_vector:
948 msix = &device->msix_entries[0]; 948 msix = &device->msix_entries[0];
949 msix->entry = 0; 949 msix->entry = 0;
950 err = pci_enable_msix(pdev, device->msix_entries, 1); 950 err = pci_enable_msix(pdev, device->msix_entries, 1);
951 if (err) 951 if (err)
952 goto msi; 952 goto msi;
953 953
954 err = devm_request_irq(dev, msix->vector, ioat_dma_do_interrupt, 0, 954 err = devm_request_irq(dev, msix->vector, ioat_dma_do_interrupt, 0,
955 "ioat-msix", device); 955 "ioat-msix", device);
956 if (err) { 956 if (err) {
957 pci_disable_msix(pdev); 957 pci_disable_msix(pdev);
958 goto msi; 958 goto msi;
959 } 959 }
960 device->irq_mode = IOAT_MSIX_SINGLE; 960 device->irq_mode = IOAT_MSIX_SINGLE;
961 goto done; 961 goto done;
962 962
963 msi: 963 msi:
964 err = pci_enable_msi(pdev); 964 err = pci_enable_msi(pdev);
965 if (err) 965 if (err)
966 goto intx; 966 goto intx;
967 967
968 err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt, 0, 968 err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt, 0,
969 "ioat-msi", device); 969 "ioat-msi", device);
970 if (err) { 970 if (err) {
971 pci_disable_msi(pdev); 971 pci_disable_msi(pdev);
972 goto intx; 972 goto intx;
973 } 973 }
974 device->irq_mode = IOAT_MSIX; 974 device->irq_mode = IOAT_MSIX;
975 goto done; 975 goto done;
976 976
977 intx: 977 intx:
978 err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt, 978 err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt,
979 IRQF_SHARED, "ioat-intx", device); 979 IRQF_SHARED, "ioat-intx", device);
980 if (err) 980 if (err)
981 goto err_no_irq; 981 goto err_no_irq;
982 982
983 device->irq_mode = IOAT_INTX; 983 device->irq_mode = IOAT_INTX;
984 done: 984 done:
985 if (device->intr_quirk) 985 if (device->intr_quirk)
986 device->intr_quirk(device); 986 device->intr_quirk(device);
987 intrctrl |= IOAT_INTRCTRL_MASTER_INT_EN; 987 intrctrl |= IOAT_INTRCTRL_MASTER_INT_EN;
988 writeb(intrctrl, device->reg_base + IOAT_INTRCTRL_OFFSET); 988 writeb(intrctrl, device->reg_base + IOAT_INTRCTRL_OFFSET);
989 return 0; 989 return 0;
990 990
991 err_no_irq: 991 err_no_irq:
992 /* Disable all interrupt generation */ 992 /* Disable all interrupt generation */
993 writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET); 993 writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET);
994 device->irq_mode = IOAT_NOIRQ; 994 device->irq_mode = IOAT_NOIRQ;
995 dev_err(dev, "no usable interrupts\n"); 995 dev_err(dev, "no usable interrupts\n");
996 return err; 996 return err;
997 } 997 }
998 EXPORT_SYMBOL(ioat_dma_setup_interrupts); 998 EXPORT_SYMBOL(ioat_dma_setup_interrupts);
999 999
1000 static void ioat_disable_interrupts(struct ioatdma_device *device) 1000 static void ioat_disable_interrupts(struct ioatdma_device *device)
1001 { 1001 {
1002 /* Disable all interrupt generation */ 1002 /* Disable all interrupt generation */
1003 writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET); 1003 writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET);
1004 } 1004 }
1005 1005
1006 int ioat_probe(struct ioatdma_device *device) 1006 int ioat_probe(struct ioatdma_device *device)
1007 { 1007 {
1008 int err = -ENODEV; 1008 int err = -ENODEV;
1009 struct dma_device *dma = &device->common; 1009 struct dma_device *dma = &device->common;
1010 struct pci_dev *pdev = device->pdev; 1010 struct pci_dev *pdev = device->pdev;
1011 struct device *dev = &pdev->dev; 1011 struct device *dev = &pdev->dev;
1012 1012
1013 /* DMA coherent memory pool for DMA descriptor allocations */ 1013 /* DMA coherent memory pool for DMA descriptor allocations */
1014 device->dma_pool = pci_pool_create("dma_desc_pool", pdev, 1014 device->dma_pool = pci_pool_create("dma_desc_pool", pdev,
1015 sizeof(struct ioat_dma_descriptor), 1015 sizeof(struct ioat_dma_descriptor),
1016 64, 0); 1016 64, 0);
1017 if (!device->dma_pool) { 1017 if (!device->dma_pool) {
1018 err = -ENOMEM; 1018 err = -ENOMEM;
1019 goto err_dma_pool; 1019 goto err_dma_pool;
1020 } 1020 }
1021 1021
1022 device->completion_pool = pci_pool_create("completion_pool", pdev, 1022 device->completion_pool = pci_pool_create("completion_pool", pdev,
1023 sizeof(u64), SMP_CACHE_BYTES, 1023 sizeof(u64), SMP_CACHE_BYTES,
1024 SMP_CACHE_BYTES); 1024 SMP_CACHE_BYTES);
1025 1025
1026 if (!device->completion_pool) { 1026 if (!device->completion_pool) {
1027 err = -ENOMEM; 1027 err = -ENOMEM;
1028 goto err_completion_pool; 1028 goto err_completion_pool;
1029 } 1029 }
1030 1030
1031 device->enumerate_channels(device); 1031 device->enumerate_channels(device);
1032 1032
1033 dma_cap_set(DMA_MEMCPY, dma->cap_mask); 1033 dma_cap_set(DMA_MEMCPY, dma->cap_mask);
1034 dma->dev = &pdev->dev; 1034 dma->dev = &pdev->dev;
1035 1035
1036 if (!dma->chancnt) { 1036 if (!dma->chancnt) {
1037 dev_err(dev, "channel enumeration error\n"); 1037 dev_err(dev, "channel enumeration error\n");
1038 goto err_setup_interrupts; 1038 goto err_setup_interrupts;
1039 } 1039 }
1040 1040
1041 err = ioat_dma_setup_interrupts(device); 1041 err = ioat_dma_setup_interrupts(device);
1042 if (err) 1042 if (err)
1043 goto err_setup_interrupts; 1043 goto err_setup_interrupts;
1044 1044
1045 err = device->self_test(device); 1045 err = device->self_test(device);
1046 if (err) 1046 if (err)
1047 goto err_self_test; 1047 goto err_self_test;
1048 1048
1049 return 0; 1049 return 0;
1050 1050
1051 err_self_test: 1051 err_self_test:
1052 ioat_disable_interrupts(device); 1052 ioat_disable_interrupts(device);
1053 err_setup_interrupts: 1053 err_setup_interrupts:
1054 pci_pool_destroy(device->completion_pool); 1054 pci_pool_destroy(device->completion_pool);
1055 err_completion_pool: 1055 err_completion_pool:
1056 pci_pool_destroy(device->dma_pool); 1056 pci_pool_destroy(device->dma_pool);
1057 err_dma_pool: 1057 err_dma_pool:
1058 return err; 1058 return err;
1059 } 1059 }
1060 1060
1061 int ioat_register(struct ioatdma_device *device) 1061 int ioat_register(struct ioatdma_device *device)
1062 { 1062 {
1063 int err = dma_async_device_register(&device->common); 1063 int err = dma_async_device_register(&device->common);
1064 1064
1065 if (err) { 1065 if (err) {
1066 ioat_disable_interrupts(device); 1066 ioat_disable_interrupts(device);
1067 pci_pool_destroy(device->completion_pool); 1067 pci_pool_destroy(device->completion_pool);
1068 pci_pool_destroy(device->dma_pool); 1068 pci_pool_destroy(device->dma_pool);
1069 } 1069 }
1070 1070
1071 return err; 1071 return err;
1072 } 1072 }
1073 1073
1074 /* ioat1_intr_quirk - fix up dma ctrl register to enable / disable msi */ 1074 /* ioat1_intr_quirk - fix up dma ctrl register to enable / disable msi */
1075 static void ioat1_intr_quirk(struct ioatdma_device *device) 1075 static void ioat1_intr_quirk(struct ioatdma_device *device)
1076 { 1076 {
1077 struct pci_dev *pdev = device->pdev; 1077 struct pci_dev *pdev = device->pdev;
1078 u32 dmactrl; 1078 u32 dmactrl;
1079 1079
1080 pci_read_config_dword(pdev, IOAT_PCI_DMACTRL_OFFSET, &dmactrl); 1080 pci_read_config_dword(pdev, IOAT_PCI_DMACTRL_OFFSET, &dmactrl);
1081 if (pdev->msi_enabled) 1081 if (pdev->msi_enabled)
1082 dmactrl |= IOAT_PCI_DMACTRL_MSI_EN; 1082 dmactrl |= IOAT_PCI_DMACTRL_MSI_EN;
1083 else 1083 else
1084 dmactrl &= ~IOAT_PCI_DMACTRL_MSI_EN; 1084 dmactrl &= ~IOAT_PCI_DMACTRL_MSI_EN;
1085 pci_write_config_dword(pdev, IOAT_PCI_DMACTRL_OFFSET, dmactrl); 1085 pci_write_config_dword(pdev, IOAT_PCI_DMACTRL_OFFSET, dmactrl);
1086 } 1086 }
1087 1087
1088 static ssize_t ring_size_show(struct dma_chan *c, char *page) 1088 static ssize_t ring_size_show(struct dma_chan *c, char *page)
1089 { 1089 {
1090 struct ioat_dma_chan *ioat = to_ioat_chan(c); 1090 struct ioat_dma_chan *ioat = to_ioat_chan(c);
1091 1091
1092 return sprintf(page, "%d\n", ioat->desccount); 1092 return sprintf(page, "%d\n", ioat->desccount);
1093 } 1093 }
1094 static struct ioat_sysfs_entry ring_size_attr = __ATTR_RO(ring_size); 1094 static struct ioat_sysfs_entry ring_size_attr = __ATTR_RO(ring_size);
1095 1095
1096 static ssize_t ring_active_show(struct dma_chan *c, char *page) 1096 static ssize_t ring_active_show(struct dma_chan *c, char *page)
1097 { 1097 {
1098 struct ioat_dma_chan *ioat = to_ioat_chan(c); 1098 struct ioat_dma_chan *ioat = to_ioat_chan(c);
1099 1099
1100 return sprintf(page, "%d\n", ioat->active); 1100 return sprintf(page, "%d\n", ioat->active);
1101 } 1101 }
1102 static struct ioat_sysfs_entry ring_active_attr = __ATTR_RO(ring_active); 1102 static struct ioat_sysfs_entry ring_active_attr = __ATTR_RO(ring_active);
1103 1103
1104 static ssize_t cap_show(struct dma_chan *c, char *page) 1104 static ssize_t cap_show(struct dma_chan *c, char *page)
1105 { 1105 {
1106 struct dma_device *dma = c->device; 1106 struct dma_device *dma = c->device;
1107 1107
1108 return sprintf(page, "copy%s%s%s%s%s\n", 1108 return sprintf(page, "copy%s%s%s%s%s\n",
1109 dma_has_cap(DMA_PQ, dma->cap_mask) ? " pq" : "", 1109 dma_has_cap(DMA_PQ, dma->cap_mask) ? " pq" : "",
1110 dma_has_cap(DMA_PQ_VAL, dma->cap_mask) ? " pq_val" : "", 1110 dma_has_cap(DMA_PQ_VAL, dma->cap_mask) ? " pq_val" : "",
1111 dma_has_cap(DMA_XOR, dma->cap_mask) ? " xor" : "", 1111 dma_has_cap(DMA_XOR, dma->cap_mask) ? " xor" : "",
1112 dma_has_cap(DMA_XOR_VAL, dma->cap_mask) ? " xor_val" : "", 1112 dma_has_cap(DMA_XOR_VAL, dma->cap_mask) ? " xor_val" : "",
1113 dma_has_cap(DMA_INTERRUPT, dma->cap_mask) ? " intr" : ""); 1113 dma_has_cap(DMA_INTERRUPT, dma->cap_mask) ? " intr" : "");
1114 1114
1115 } 1115 }
1116 struct ioat_sysfs_entry ioat_cap_attr = __ATTR_RO(cap); 1116 struct ioat_sysfs_entry ioat_cap_attr = __ATTR_RO(cap);
1117 1117
1118 static ssize_t version_show(struct dma_chan *c, char *page) 1118 static ssize_t version_show(struct dma_chan *c, char *page)
1119 { 1119 {
1120 struct dma_device *dma = c->device; 1120 struct dma_device *dma = c->device;
1121 struct ioatdma_device *device = to_ioatdma_device(dma); 1121 struct ioatdma_device *device = to_ioatdma_device(dma);
1122 1122
1123 return sprintf(page, "%d.%d\n", 1123 return sprintf(page, "%d.%d\n",
1124 device->version >> 4, device->version & 0xf); 1124 device->version >> 4, device->version & 0xf);
1125 } 1125 }
1126 struct ioat_sysfs_entry ioat_version_attr = __ATTR_RO(version); 1126 struct ioat_sysfs_entry ioat_version_attr = __ATTR_RO(version);
1127 1127
1128 static struct attribute *ioat1_attrs[] = { 1128 static struct attribute *ioat1_attrs[] = {
1129 &ring_size_attr.attr, 1129 &ring_size_attr.attr,
1130 &ring_active_attr.attr, 1130 &ring_active_attr.attr,
1131 &ioat_cap_attr.attr, 1131 &ioat_cap_attr.attr,
1132 &ioat_version_attr.attr, 1132 &ioat_version_attr.attr,
1133 NULL, 1133 NULL,
1134 }; 1134 };
1135 1135
1136 static ssize_t 1136 static ssize_t
1137 ioat_attr_show(struct kobject *kobj, struct attribute *attr, char *page) 1137 ioat_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
1138 { 1138 {
1139 struct ioat_sysfs_entry *entry; 1139 struct ioat_sysfs_entry *entry;
1140 struct ioat_chan_common *chan; 1140 struct ioat_chan_common *chan;
1141 1141
1142 entry = container_of(attr, struct ioat_sysfs_entry, attr); 1142 entry = container_of(attr, struct ioat_sysfs_entry, attr);
1143 chan = container_of(kobj, struct ioat_chan_common, kobj); 1143 chan = container_of(kobj, struct ioat_chan_common, kobj);
1144 1144
1145 if (!entry->show) 1145 if (!entry->show)
1146 return -EIO; 1146 return -EIO;
1147 return entry->show(&chan->common, page); 1147 return entry->show(&chan->common, page);
1148 } 1148 }
1149 1149
1150 const struct sysfs_ops ioat_sysfs_ops = { 1150 const struct sysfs_ops ioat_sysfs_ops = {
1151 .show = ioat_attr_show, 1151 .show = ioat_attr_show,
1152 }; 1152 };
1153 1153
1154 static struct kobj_type ioat1_ktype = { 1154 static struct kobj_type ioat1_ktype = {
1155 .sysfs_ops = &ioat_sysfs_ops, 1155 .sysfs_ops = &ioat_sysfs_ops,
1156 .default_attrs = ioat1_attrs, 1156 .default_attrs = ioat1_attrs,
1157 }; 1157 };
1158 1158
1159 void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type) 1159 void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type)
1160 { 1160 {
1161 struct dma_device *dma = &device->common; 1161 struct dma_device *dma = &device->common;
1162 struct dma_chan *c; 1162 struct dma_chan *c;
1163 1163
1164 list_for_each_entry(c, &dma->channels, device_node) { 1164 list_for_each_entry(c, &dma->channels, device_node) {
1165 struct ioat_chan_common *chan = to_chan_common(c); 1165 struct ioat_chan_common *chan = to_chan_common(c);
1166 struct kobject *parent = &c->dev->device.kobj; 1166 struct kobject *parent = &c->dev->device.kobj;
1167 int err; 1167 int err;
1168 1168
1169 err = kobject_init_and_add(&chan->kobj, type, parent, "quickdata"); 1169 err = kobject_init_and_add(&chan->kobj, type, parent, "quickdata");
1170 if (err) { 1170 if (err) {
1171 dev_warn(to_dev(chan), 1171 dev_warn(to_dev(chan),
1172 "sysfs init error (%d), continuing...\n", err); 1172 "sysfs init error (%d), continuing...\n", err);
1173 kobject_put(&chan->kobj); 1173 kobject_put(&chan->kobj);
1174 set_bit(IOAT_KOBJ_INIT_FAIL, &chan->state); 1174 set_bit(IOAT_KOBJ_INIT_FAIL, &chan->state);
1175 } 1175 }
1176 } 1176 }
1177 } 1177 }
1178 1178
1179 void ioat_kobject_del(struct ioatdma_device *device) 1179 void ioat_kobject_del(struct ioatdma_device *device)
1180 { 1180 {
1181 struct dma_device *dma = &device->common; 1181 struct dma_device *dma = &device->common;
1182 struct dma_chan *c; 1182 struct dma_chan *c;
1183 1183
1184 list_for_each_entry(c, &dma->channels, device_node) { 1184 list_for_each_entry(c, &dma->channels, device_node) {
1185 struct ioat_chan_common *chan = to_chan_common(c); 1185 struct ioat_chan_common *chan = to_chan_common(c);
1186 1186
1187 if (!test_bit(IOAT_KOBJ_INIT_FAIL, &chan->state)) { 1187 if (!test_bit(IOAT_KOBJ_INIT_FAIL, &chan->state)) {
1188 kobject_del(&chan->kobj); 1188 kobject_del(&chan->kobj);
1189 kobject_put(&chan->kobj); 1189 kobject_put(&chan->kobj);
1190 } 1190 }
1191 } 1191 }
1192 } 1192 }
1193 1193
1194 int ioat1_dma_probe(struct ioatdma_device *device, int dca) 1194 int ioat1_dma_probe(struct ioatdma_device *device, int dca)
1195 { 1195 {
1196 struct pci_dev *pdev = device->pdev; 1196 struct pci_dev *pdev = device->pdev;
1197 struct dma_device *dma; 1197 struct dma_device *dma;
1198 int err; 1198 int err;
1199 1199
1200 device->intr_quirk = ioat1_intr_quirk; 1200 device->intr_quirk = ioat1_intr_quirk;
1201 device->enumerate_channels = ioat1_enumerate_channels; 1201 device->enumerate_channels = ioat1_enumerate_channels;
1202 device->self_test = ioat_dma_self_test; 1202 device->self_test = ioat_dma_self_test;
1203 device->timer_fn = ioat1_timer_event; 1203 device->timer_fn = ioat1_timer_event;
1204 device->cleanup_fn = ioat1_cleanup_event; 1204 device->cleanup_fn = ioat1_cleanup_event;
1205 dma = &device->common; 1205 dma = &device->common;
1206 dma->device_prep_dma_memcpy = ioat1_dma_prep_memcpy; 1206 dma->device_prep_dma_memcpy = ioat1_dma_prep_memcpy;
1207 dma->device_issue_pending = ioat1_dma_memcpy_issue_pending; 1207 dma->device_issue_pending = ioat1_dma_memcpy_issue_pending;
1208 dma->device_alloc_chan_resources = ioat1_dma_alloc_chan_resources; 1208 dma->device_alloc_chan_resources = ioat1_dma_alloc_chan_resources;
1209 dma->device_free_chan_resources = ioat1_dma_free_chan_resources; 1209 dma->device_free_chan_resources = ioat1_dma_free_chan_resources;
1210 dma->device_tx_status = ioat_dma_tx_status; 1210 dma->device_tx_status = ioat_dma_tx_status;
1211 1211
1212 err = ioat_probe(device); 1212 err = ioat_probe(device);
1213 if (err) 1213 if (err)
1214 return err; 1214 return err;
1215 ioat_set_tcp_copy_break(4096); 1215 ioat_set_tcp_copy_break(4096);
1216 err = ioat_register(device); 1216 err = ioat_register(device);
1217 if (err) 1217 if (err)
1218 return err; 1218 return err;
1219 ioat_kobject_add(device, &ioat1_ktype); 1219 ioat_kobject_add(device, &ioat1_ktype);
1220 1220
1221 if (dca) 1221 if (dca)
1222 device->dca = ioat_dca_init(pdev, device->reg_base); 1222 device->dca = ioat_dca_init(pdev, device->reg_base);
1223 1223
1224 return err; 1224 return err;
1225 } 1225 }
1226 1226
1227 void ioat_dma_remove(struct ioatdma_device *device) 1227 void ioat_dma_remove(struct ioatdma_device *device)
1228 { 1228 {
1229 struct dma_device *dma = &device->common; 1229 struct dma_device *dma = &device->common;
1230 1230
1231 ioat_disable_interrupts(device); 1231 ioat_disable_interrupts(device);
1232 1232
1233 ioat_kobject_del(device); 1233 ioat_kobject_del(device);
1234 1234
1235 dma_async_device_unregister(dma); 1235 dma_async_device_unregister(dma);
1236 1236
1237 pci_pool_destroy(device->dma_pool); 1237 pci_pool_destroy(device->dma_pool);
1238 pci_pool_destroy(device->completion_pool); 1238 pci_pool_destroy(device->completion_pool);
1239 1239
1240 INIT_LIST_HEAD(&dma->channels); 1240 INIT_LIST_HEAD(&dma->channels);
1241 } 1241 }
1242 1242
drivers/dma/ioat/dma_v3.c
1 /* 1 /*
2 * This file is provided under a dual BSD/GPLv2 license. When using or 2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license. 3 * redistributing this file, you may do so under either license.
4 * 4 *
5 * GPL LICENSE SUMMARY 5 * GPL LICENSE SUMMARY
6 * 6 *
7 * Copyright(c) 2004 - 2009 Intel Corporation. All rights reserved. 7 * Copyright(c) 2004 - 2009 Intel Corporation. All rights reserved.
8 * 8 *
9 * This program is free software; you can redistribute it and/or modify it 9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms and conditions of the GNU General Public License, 10 * under the terms and conditions of the GNU General Public License,
11 * version 2, as published by the Free Software Foundation. 11 * version 2, as published by the Free Software Foundation.
12 * 12 *
13 * This program is distributed in the hope that it will be useful, but WITHOUT 13 * This program is distributed in the hope that it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * more details. 16 * more details.
17 * 17 *
18 * You should have received a copy of the GNU General Public License along with 18 * You should have received a copy of the GNU General Public License along with
19 * this program; if not, write to the Free Software Foundation, Inc., 19 * this program; if not, write to the Free Software Foundation, Inc.,
20 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 20 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 * 21 *
22 * The full GNU General Public License is included in this distribution in 22 * The full GNU General Public License is included in this distribution in
23 * the file called "COPYING". 23 * the file called "COPYING".
24 * 24 *
25 * BSD LICENSE 25 * BSD LICENSE
26 * 26 *
27 * Copyright(c) 2004-2009 Intel Corporation. All rights reserved. 27 * Copyright(c) 2004-2009 Intel Corporation. All rights reserved.
28 * 28 *
29 * Redistribution and use in source and binary forms, with or without 29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions are met: 30 * modification, are permitted provided that the following conditions are met:
31 * 31 *
32 * * Redistributions of source code must retain the above copyright 32 * * Redistributions of source code must retain the above copyright
33 * notice, this list of conditions and the following disclaimer. 33 * notice, this list of conditions and the following disclaimer.
34 * * Redistributions in binary form must reproduce the above copyright 34 * * Redistributions in binary form must reproduce the above copyright
35 * notice, this list of conditions and the following disclaimer in 35 * notice, this list of conditions and the following disclaimer in
36 * the documentation and/or other materials provided with the 36 * the documentation and/or other materials provided with the
37 * distribution. 37 * distribution.
38 * * Neither the name of Intel Corporation nor the names of its 38 * * Neither the name of Intel Corporation nor the names of its
39 * contributors may be used to endorse or promote products derived 39 * contributors may be used to endorse or promote products derived
40 * from this software without specific prior written permission. 40 * from this software without specific prior written permission.
41 * 41 *
42 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 42 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
43 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 43 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
44 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 44 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
45 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 45 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
46 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 46 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
47 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 47 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
48 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 48 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
49 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 49 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
50 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 50 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
51 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 51 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
52 * POSSIBILITY OF SUCH DAMAGE. 52 * POSSIBILITY OF SUCH DAMAGE.
53 */ 53 */
54 54
55 /* 55 /*
56 * Support routines for v3+ hardware 56 * Support routines for v3+ hardware
57 */ 57 */
58 #include <linux/module.h> 58 #include <linux/module.h>
59 #include <linux/pci.h> 59 #include <linux/pci.h>
60 #include <linux/gfp.h> 60 #include <linux/gfp.h>
61 #include <linux/dmaengine.h> 61 #include <linux/dmaengine.h>
62 #include <linux/dma-mapping.h> 62 #include <linux/dma-mapping.h>
63 #include <linux/prefetch.h> 63 #include <linux/prefetch.h>
64 #include "../dmaengine.h" 64 #include "../dmaengine.h"
65 #include "registers.h" 65 #include "registers.h"
66 #include "hw.h" 66 #include "hw.h"
67 #include "dma.h" 67 #include "dma.h"
68 #include "dma_v2.h" 68 #include "dma_v2.h"
69 69
70 /* ioat hardware assumes at least two sources for raid operations */ 70 /* ioat hardware assumes at least two sources for raid operations */
71 #define src_cnt_to_sw(x) ((x) + 2) 71 #define src_cnt_to_sw(x) ((x) + 2)
72 #define src_cnt_to_hw(x) ((x) - 2) 72 #define src_cnt_to_hw(x) ((x) - 2)
73 #define ndest_to_sw(x) ((x) + 1) 73 #define ndest_to_sw(x) ((x) + 1)
74 #define ndest_to_hw(x) ((x) - 1) 74 #define ndest_to_hw(x) ((x) - 1)
75 #define src16_cnt_to_sw(x) ((x) + 9) 75 #define src16_cnt_to_sw(x) ((x) + 9)
76 #define src16_cnt_to_hw(x) ((x) - 9) 76 #define src16_cnt_to_hw(x) ((x) - 9)
77 77
78 /* provide a lookup table for setting the source address in the base or 78 /* provide a lookup table for setting the source address in the base or
79 * extended descriptor of an xor or pq descriptor 79 * extended descriptor of an xor or pq descriptor
80 */ 80 */
81 static const u8 xor_idx_to_desc = 0xe0; 81 static const u8 xor_idx_to_desc = 0xe0;
82 static const u8 xor_idx_to_field[] = { 1, 4, 5, 6, 7, 0, 1, 2 }; 82 static const u8 xor_idx_to_field[] = { 1, 4, 5, 6, 7, 0, 1, 2 };
83 static const u8 pq_idx_to_desc = 0xf8; 83 static const u8 pq_idx_to_desc = 0xf8;
84 static const u8 pq16_idx_to_desc[] = { 0, 0, 1, 1, 1, 1, 1, 1, 1, 84 static const u8 pq16_idx_to_desc[] = { 0, 0, 1, 1, 1, 1, 1, 1, 1,
85 2, 2, 2, 2, 2, 2, 2 }; 85 2, 2, 2, 2, 2, 2, 2 };
86 static const u8 pq_idx_to_field[] = { 1, 4, 5, 0, 1, 2, 4, 5 }; 86 static const u8 pq_idx_to_field[] = { 1, 4, 5, 0, 1, 2, 4, 5 };
87 static const u8 pq16_idx_to_field[] = { 1, 4, 1, 2, 3, 4, 5, 6, 7, 87 static const u8 pq16_idx_to_field[] = { 1, 4, 1, 2, 3, 4, 5, 6, 7,
88 0, 1, 2, 3, 4, 5, 6 }; 88 0, 1, 2, 3, 4, 5, 6 };
89 89
90 /* 90 /*
91 * technically sources 1 and 2 do not require SED, but the op will have 91 * technically sources 1 and 2 do not require SED, but the op will have
92 * at least 9 descriptors so that's irrelevant. 92 * at least 9 descriptors so that's irrelevant.
93 */ 93 */
94 static const u8 pq16_idx_to_sed[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 94 static const u8 pq16_idx_to_sed[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0,
95 1, 1, 1, 1, 1, 1, 1 }; 95 1, 1, 1, 1, 1, 1, 1 };
96 96
97 static void ioat3_eh(struct ioat2_dma_chan *ioat); 97 static void ioat3_eh(struct ioat2_dma_chan *ioat);
98 98
99 static dma_addr_t xor_get_src(struct ioat_raw_descriptor *descs[2], int idx) 99 static dma_addr_t xor_get_src(struct ioat_raw_descriptor *descs[2], int idx)
100 { 100 {
101 struct ioat_raw_descriptor *raw = descs[xor_idx_to_desc >> idx & 1]; 101 struct ioat_raw_descriptor *raw = descs[xor_idx_to_desc >> idx & 1];
102 102
103 return raw->field[xor_idx_to_field[idx]]; 103 return raw->field[xor_idx_to_field[idx]];
104 } 104 }
105 105
106 static void xor_set_src(struct ioat_raw_descriptor *descs[2], 106 static void xor_set_src(struct ioat_raw_descriptor *descs[2],
107 dma_addr_t addr, u32 offset, int idx) 107 dma_addr_t addr, u32 offset, int idx)
108 { 108 {
109 struct ioat_raw_descriptor *raw = descs[xor_idx_to_desc >> idx & 1]; 109 struct ioat_raw_descriptor *raw = descs[xor_idx_to_desc >> idx & 1];
110 110
111 raw->field[xor_idx_to_field[idx]] = addr + offset; 111 raw->field[xor_idx_to_field[idx]] = addr + offset;
112 } 112 }
113 113
114 static dma_addr_t pq_get_src(struct ioat_raw_descriptor *descs[2], int idx) 114 static dma_addr_t pq_get_src(struct ioat_raw_descriptor *descs[2], int idx)
115 { 115 {
116 struct ioat_raw_descriptor *raw = descs[pq_idx_to_desc >> idx & 1]; 116 struct ioat_raw_descriptor *raw = descs[pq_idx_to_desc >> idx & 1];
117 117
118 return raw->field[pq_idx_to_field[idx]]; 118 return raw->field[pq_idx_to_field[idx]];
119 } 119 }
120 120
121 static dma_addr_t pq16_get_src(struct ioat_raw_descriptor *desc[3], int idx) 121 static dma_addr_t pq16_get_src(struct ioat_raw_descriptor *desc[3], int idx)
122 { 122 {
123 struct ioat_raw_descriptor *raw = desc[pq16_idx_to_desc[idx]]; 123 struct ioat_raw_descriptor *raw = desc[pq16_idx_to_desc[idx]];
124 124
125 return raw->field[pq16_idx_to_field[idx]]; 125 return raw->field[pq16_idx_to_field[idx]];
126 } 126 }
127 127
128 static void pq_set_src(struct ioat_raw_descriptor *descs[2], 128 static void pq_set_src(struct ioat_raw_descriptor *descs[2],
129 dma_addr_t addr, u32 offset, u8 coef, int idx) 129 dma_addr_t addr, u32 offset, u8 coef, int idx)
130 { 130 {
131 struct ioat_pq_descriptor *pq = (struct ioat_pq_descriptor *) descs[0]; 131 struct ioat_pq_descriptor *pq = (struct ioat_pq_descriptor *) descs[0];
132 struct ioat_raw_descriptor *raw = descs[pq_idx_to_desc >> idx & 1]; 132 struct ioat_raw_descriptor *raw = descs[pq_idx_to_desc >> idx & 1];
133 133
134 raw->field[pq_idx_to_field[idx]] = addr + offset; 134 raw->field[pq_idx_to_field[idx]] = addr + offset;
135 pq->coef[idx] = coef; 135 pq->coef[idx] = coef;
136 } 136 }
137 137
138 static int sed_get_pq16_pool_idx(int src_cnt) 138 static int sed_get_pq16_pool_idx(int src_cnt)
139 { 139 {
140 140
141 return pq16_idx_to_sed[src_cnt]; 141 return pq16_idx_to_sed[src_cnt];
142 } 142 }
143 143
144 static bool is_jf_ioat(struct pci_dev *pdev) 144 static bool is_jf_ioat(struct pci_dev *pdev)
145 { 145 {
146 switch (pdev->device) { 146 switch (pdev->device) {
147 case PCI_DEVICE_ID_INTEL_IOAT_JSF0: 147 case PCI_DEVICE_ID_INTEL_IOAT_JSF0:
148 case PCI_DEVICE_ID_INTEL_IOAT_JSF1: 148 case PCI_DEVICE_ID_INTEL_IOAT_JSF1:
149 case PCI_DEVICE_ID_INTEL_IOAT_JSF2: 149 case PCI_DEVICE_ID_INTEL_IOAT_JSF2:
150 case PCI_DEVICE_ID_INTEL_IOAT_JSF3: 150 case PCI_DEVICE_ID_INTEL_IOAT_JSF3:
151 case PCI_DEVICE_ID_INTEL_IOAT_JSF4: 151 case PCI_DEVICE_ID_INTEL_IOAT_JSF4:
152 case PCI_DEVICE_ID_INTEL_IOAT_JSF5: 152 case PCI_DEVICE_ID_INTEL_IOAT_JSF5:
153 case PCI_DEVICE_ID_INTEL_IOAT_JSF6: 153 case PCI_DEVICE_ID_INTEL_IOAT_JSF6:
154 case PCI_DEVICE_ID_INTEL_IOAT_JSF7: 154 case PCI_DEVICE_ID_INTEL_IOAT_JSF7:
155 case PCI_DEVICE_ID_INTEL_IOAT_JSF8: 155 case PCI_DEVICE_ID_INTEL_IOAT_JSF8:
156 case PCI_DEVICE_ID_INTEL_IOAT_JSF9: 156 case PCI_DEVICE_ID_INTEL_IOAT_JSF9:
157 return true; 157 return true;
158 default: 158 default:
159 return false; 159 return false;
160 } 160 }
161 } 161 }
162 162
163 static bool is_snb_ioat(struct pci_dev *pdev) 163 static bool is_snb_ioat(struct pci_dev *pdev)
164 { 164 {
165 switch (pdev->device) { 165 switch (pdev->device) {
166 case PCI_DEVICE_ID_INTEL_IOAT_SNB0: 166 case PCI_DEVICE_ID_INTEL_IOAT_SNB0:
167 case PCI_DEVICE_ID_INTEL_IOAT_SNB1: 167 case PCI_DEVICE_ID_INTEL_IOAT_SNB1:
168 case PCI_DEVICE_ID_INTEL_IOAT_SNB2: 168 case PCI_DEVICE_ID_INTEL_IOAT_SNB2:
169 case PCI_DEVICE_ID_INTEL_IOAT_SNB3: 169 case PCI_DEVICE_ID_INTEL_IOAT_SNB3:
170 case PCI_DEVICE_ID_INTEL_IOAT_SNB4: 170 case PCI_DEVICE_ID_INTEL_IOAT_SNB4:
171 case PCI_DEVICE_ID_INTEL_IOAT_SNB5: 171 case PCI_DEVICE_ID_INTEL_IOAT_SNB5:
172 case PCI_DEVICE_ID_INTEL_IOAT_SNB6: 172 case PCI_DEVICE_ID_INTEL_IOAT_SNB6:
173 case PCI_DEVICE_ID_INTEL_IOAT_SNB7: 173 case PCI_DEVICE_ID_INTEL_IOAT_SNB7:
174 case PCI_DEVICE_ID_INTEL_IOAT_SNB8: 174 case PCI_DEVICE_ID_INTEL_IOAT_SNB8:
175 case PCI_DEVICE_ID_INTEL_IOAT_SNB9: 175 case PCI_DEVICE_ID_INTEL_IOAT_SNB9:
176 return true; 176 return true;
177 default: 177 default:
178 return false; 178 return false;
179 } 179 }
180 } 180 }
181 181
182 static bool is_ivb_ioat(struct pci_dev *pdev) 182 static bool is_ivb_ioat(struct pci_dev *pdev)
183 { 183 {
184 switch (pdev->device) { 184 switch (pdev->device) {
185 case PCI_DEVICE_ID_INTEL_IOAT_IVB0: 185 case PCI_DEVICE_ID_INTEL_IOAT_IVB0:
186 case PCI_DEVICE_ID_INTEL_IOAT_IVB1: 186 case PCI_DEVICE_ID_INTEL_IOAT_IVB1:
187 case PCI_DEVICE_ID_INTEL_IOAT_IVB2: 187 case PCI_DEVICE_ID_INTEL_IOAT_IVB2:
188 case PCI_DEVICE_ID_INTEL_IOAT_IVB3: 188 case PCI_DEVICE_ID_INTEL_IOAT_IVB3:
189 case PCI_DEVICE_ID_INTEL_IOAT_IVB4: 189 case PCI_DEVICE_ID_INTEL_IOAT_IVB4:
190 case PCI_DEVICE_ID_INTEL_IOAT_IVB5: 190 case PCI_DEVICE_ID_INTEL_IOAT_IVB5:
191 case PCI_DEVICE_ID_INTEL_IOAT_IVB6: 191 case PCI_DEVICE_ID_INTEL_IOAT_IVB6:
192 case PCI_DEVICE_ID_INTEL_IOAT_IVB7: 192 case PCI_DEVICE_ID_INTEL_IOAT_IVB7:
193 case PCI_DEVICE_ID_INTEL_IOAT_IVB8: 193 case PCI_DEVICE_ID_INTEL_IOAT_IVB8:
194 case PCI_DEVICE_ID_INTEL_IOAT_IVB9: 194 case PCI_DEVICE_ID_INTEL_IOAT_IVB9:
195 return true; 195 return true;
196 default: 196 default:
197 return false; 197 return false;
198 } 198 }
199 199
200 } 200 }
201 201
202 static bool is_hsw_ioat(struct pci_dev *pdev) 202 static bool is_hsw_ioat(struct pci_dev *pdev)
203 { 203 {
204 switch (pdev->device) { 204 switch (pdev->device) {
205 case PCI_DEVICE_ID_INTEL_IOAT_HSW0: 205 case PCI_DEVICE_ID_INTEL_IOAT_HSW0:
206 case PCI_DEVICE_ID_INTEL_IOAT_HSW1: 206 case PCI_DEVICE_ID_INTEL_IOAT_HSW1:
207 case PCI_DEVICE_ID_INTEL_IOAT_HSW2: 207 case PCI_DEVICE_ID_INTEL_IOAT_HSW2:
208 case PCI_DEVICE_ID_INTEL_IOAT_HSW3: 208 case PCI_DEVICE_ID_INTEL_IOAT_HSW3:
209 case PCI_DEVICE_ID_INTEL_IOAT_HSW4: 209 case PCI_DEVICE_ID_INTEL_IOAT_HSW4:
210 case PCI_DEVICE_ID_INTEL_IOAT_HSW5: 210 case PCI_DEVICE_ID_INTEL_IOAT_HSW5:
211 case PCI_DEVICE_ID_INTEL_IOAT_HSW6: 211 case PCI_DEVICE_ID_INTEL_IOAT_HSW6:
212 case PCI_DEVICE_ID_INTEL_IOAT_HSW7: 212 case PCI_DEVICE_ID_INTEL_IOAT_HSW7:
213 case PCI_DEVICE_ID_INTEL_IOAT_HSW8: 213 case PCI_DEVICE_ID_INTEL_IOAT_HSW8:
214 case PCI_DEVICE_ID_INTEL_IOAT_HSW9: 214 case PCI_DEVICE_ID_INTEL_IOAT_HSW9:
215 return true; 215 return true;
216 default: 216 default:
217 return false; 217 return false;
218 } 218 }
219 219
220 } 220 }
221 221
222 static bool is_xeon_cb32(struct pci_dev *pdev) 222 static bool is_xeon_cb32(struct pci_dev *pdev)
223 { 223 {
224 return is_jf_ioat(pdev) || is_snb_ioat(pdev) || is_ivb_ioat(pdev) || 224 return is_jf_ioat(pdev) || is_snb_ioat(pdev) || is_ivb_ioat(pdev) ||
225 is_hsw_ioat(pdev); 225 is_hsw_ioat(pdev);
226 } 226 }
227 227
228 static bool is_bwd_ioat(struct pci_dev *pdev) 228 static bool is_bwd_ioat(struct pci_dev *pdev)
229 { 229 {
230 switch (pdev->device) { 230 switch (pdev->device) {
231 case PCI_DEVICE_ID_INTEL_IOAT_BWD0: 231 case PCI_DEVICE_ID_INTEL_IOAT_BWD0:
232 case PCI_DEVICE_ID_INTEL_IOAT_BWD1: 232 case PCI_DEVICE_ID_INTEL_IOAT_BWD1:
233 case PCI_DEVICE_ID_INTEL_IOAT_BWD2: 233 case PCI_DEVICE_ID_INTEL_IOAT_BWD2:
234 case PCI_DEVICE_ID_INTEL_IOAT_BWD3: 234 case PCI_DEVICE_ID_INTEL_IOAT_BWD3:
235 return true; 235 return true;
236 default: 236 default:
237 return false; 237 return false;
238 } 238 }
239 } 239 }
240 240
241 static bool is_bwd_noraid(struct pci_dev *pdev) 241 static bool is_bwd_noraid(struct pci_dev *pdev)
242 { 242 {
243 switch (pdev->device) { 243 switch (pdev->device) {
244 case PCI_DEVICE_ID_INTEL_IOAT_BWD2: 244 case PCI_DEVICE_ID_INTEL_IOAT_BWD2:
245 case PCI_DEVICE_ID_INTEL_IOAT_BWD3: 245 case PCI_DEVICE_ID_INTEL_IOAT_BWD3:
246 return true; 246 return true;
247 default: 247 default:
248 return false; 248 return false;
249 } 249 }
250 250
251 } 251 }
252 252
253 static void pq16_set_src(struct ioat_raw_descriptor *desc[3], 253 static void pq16_set_src(struct ioat_raw_descriptor *desc[3],
254 dma_addr_t addr, u32 offset, u8 coef, unsigned idx) 254 dma_addr_t addr, u32 offset, u8 coef, unsigned idx)
255 { 255 {
256 struct ioat_pq_descriptor *pq = (struct ioat_pq_descriptor *)desc[0]; 256 struct ioat_pq_descriptor *pq = (struct ioat_pq_descriptor *)desc[0];
257 struct ioat_pq16a_descriptor *pq16 = 257 struct ioat_pq16a_descriptor *pq16 =
258 (struct ioat_pq16a_descriptor *)desc[1]; 258 (struct ioat_pq16a_descriptor *)desc[1];
259 struct ioat_raw_descriptor *raw = desc[pq16_idx_to_desc[idx]]; 259 struct ioat_raw_descriptor *raw = desc[pq16_idx_to_desc[idx]];
260 260
261 raw->field[pq16_idx_to_field[idx]] = addr + offset; 261 raw->field[pq16_idx_to_field[idx]] = addr + offset;
262 262
263 if (idx < 8) 263 if (idx < 8)
264 pq->coef[idx] = coef; 264 pq->coef[idx] = coef;
265 else 265 else
266 pq16->coef[idx - 8] = coef; 266 pq16->coef[idx - 8] = coef;
267 } 267 }
268 268
269 static struct ioat_sed_ent * 269 static struct ioat_sed_ent *
270 ioat3_alloc_sed(struct ioatdma_device *device, unsigned int hw_pool) 270 ioat3_alloc_sed(struct ioatdma_device *device, unsigned int hw_pool)
271 { 271 {
272 struct ioat_sed_ent *sed; 272 struct ioat_sed_ent *sed;
273 gfp_t flags = __GFP_ZERO | GFP_ATOMIC; 273 gfp_t flags = __GFP_ZERO | GFP_ATOMIC;
274 274
275 sed = kmem_cache_alloc(device->sed_pool, flags); 275 sed = kmem_cache_alloc(device->sed_pool, flags);
276 if (!sed) 276 if (!sed)
277 return NULL; 277 return NULL;
278 278
279 sed->hw_pool = hw_pool; 279 sed->hw_pool = hw_pool;
280 sed->hw = dma_pool_alloc(device->sed_hw_pool[hw_pool], 280 sed->hw = dma_pool_alloc(device->sed_hw_pool[hw_pool],
281 flags, &sed->dma); 281 flags, &sed->dma);
282 if (!sed->hw) { 282 if (!sed->hw) {
283 kmem_cache_free(device->sed_pool, sed); 283 kmem_cache_free(device->sed_pool, sed);
284 return NULL; 284 return NULL;
285 } 285 }
286 286
287 return sed; 287 return sed;
288 } 288 }
289 289
290 static void ioat3_free_sed(struct ioatdma_device *device, struct ioat_sed_ent *sed) 290 static void ioat3_free_sed(struct ioatdma_device *device, struct ioat_sed_ent *sed)
291 { 291 {
292 if (!sed) 292 if (!sed)
293 return; 293 return;
294 294
295 dma_pool_free(device->sed_hw_pool[sed->hw_pool], sed->hw, sed->dma); 295 dma_pool_free(device->sed_hw_pool[sed->hw_pool], sed->hw, sed->dma);
296 kmem_cache_free(device->sed_pool, sed); 296 kmem_cache_free(device->sed_pool, sed);
297 } 297 }
298 298
299 static void ioat3_dma_unmap(struct ioat2_dma_chan *ioat, 299 static void ioat3_dma_unmap(struct ioat2_dma_chan *ioat,
300 struct ioat_ring_ent *desc, int idx) 300 struct ioat_ring_ent *desc, int idx)
301 { 301 {
302 struct ioat_chan_common *chan = &ioat->base; 302 struct ioat_chan_common *chan = &ioat->base;
303 struct pci_dev *pdev = chan->device->pdev; 303 struct pci_dev *pdev = chan->device->pdev;
304 size_t len = desc->len; 304 size_t len = desc->len;
305 size_t offset = len - desc->hw->size; 305 size_t offset = len - desc->hw->size;
306 struct dma_async_tx_descriptor *tx = &desc->txd; 306 struct dma_async_tx_descriptor *tx = &desc->txd;
307 enum dma_ctrl_flags flags = tx->flags; 307 enum dma_ctrl_flags flags = tx->flags;
308 308
309 switch (desc->hw->ctl_f.op) { 309 switch (desc->hw->ctl_f.op) {
310 case IOAT_OP_COPY: 310 case IOAT_OP_COPY:
311 if (!desc->hw->ctl_f.null) /* skip 'interrupt' ops */ 311 if (!desc->hw->ctl_f.null) /* skip 'interrupt' ops */
312 ioat_dma_unmap(chan, flags, len, desc->hw); 312 ioat_dma_unmap(chan, flags, len, desc->hw);
313 break; 313 break;
314 case IOAT_OP_XOR_VAL: 314 case IOAT_OP_XOR_VAL:
315 case IOAT_OP_XOR: { 315 case IOAT_OP_XOR: {
316 struct ioat_xor_descriptor *xor = desc->xor; 316 struct ioat_xor_descriptor *xor = desc->xor;
317 struct ioat_ring_ent *ext; 317 struct ioat_ring_ent *ext;
318 struct ioat_xor_ext_descriptor *xor_ex = NULL; 318 struct ioat_xor_ext_descriptor *xor_ex = NULL;
319 int src_cnt = src_cnt_to_sw(xor->ctl_f.src_cnt); 319 int src_cnt = src_cnt_to_sw(xor->ctl_f.src_cnt);
320 struct ioat_raw_descriptor *descs[2]; 320 struct ioat_raw_descriptor *descs[2];
321 int i; 321 int i;
322 322
323 if (src_cnt > 5) { 323 if (src_cnt > 5) {
324 ext = ioat2_get_ring_ent(ioat, idx + 1); 324 ext = ioat2_get_ring_ent(ioat, idx + 1);
325 xor_ex = ext->xor_ex; 325 xor_ex = ext->xor_ex;
326 } 326 }
327 327
328 if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) { 328 if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
329 descs[0] = (struct ioat_raw_descriptor *) xor; 329 descs[0] = (struct ioat_raw_descriptor *) xor;
330 descs[1] = (struct ioat_raw_descriptor *) xor_ex; 330 descs[1] = (struct ioat_raw_descriptor *) xor_ex;
331 for (i = 0; i < src_cnt; i++) { 331 for (i = 0; i < src_cnt; i++) {
332 dma_addr_t src = xor_get_src(descs, i); 332 dma_addr_t src = xor_get_src(descs, i);
333 333
334 ioat_unmap(pdev, src - offset, len, 334 ioat_unmap(pdev, src - offset, len,
335 PCI_DMA_TODEVICE, flags, 0); 335 PCI_DMA_TODEVICE, flags, 0);
336 } 336 }
337 337
338 /* dest is a source in xor validate operations */ 338 /* dest is a source in xor validate operations */
339 if (xor->ctl_f.op == IOAT_OP_XOR_VAL) { 339 if (xor->ctl_f.op == IOAT_OP_XOR_VAL) {
340 ioat_unmap(pdev, xor->dst_addr - offset, len, 340 ioat_unmap(pdev, xor->dst_addr - offset, len,
341 PCI_DMA_TODEVICE, flags, 1); 341 PCI_DMA_TODEVICE, flags, 1);
342 break; 342 break;
343 } 343 }
344 } 344 }
345 345
346 if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) 346 if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP))
347 ioat_unmap(pdev, xor->dst_addr - offset, len, 347 ioat_unmap(pdev, xor->dst_addr - offset, len,
348 PCI_DMA_FROMDEVICE, flags, 1); 348 PCI_DMA_FROMDEVICE, flags, 1);
349 break; 349 break;
350 } 350 }
351 case IOAT_OP_PQ_VAL: 351 case IOAT_OP_PQ_VAL:
352 case IOAT_OP_PQ: { 352 case IOAT_OP_PQ: {
353 struct ioat_pq_descriptor *pq = desc->pq; 353 struct ioat_pq_descriptor *pq = desc->pq;
354 struct ioat_ring_ent *ext; 354 struct ioat_ring_ent *ext;
355 struct ioat_pq_ext_descriptor *pq_ex = NULL; 355 struct ioat_pq_ext_descriptor *pq_ex = NULL;
356 int src_cnt = src_cnt_to_sw(pq->ctl_f.src_cnt); 356 int src_cnt = src_cnt_to_sw(pq->ctl_f.src_cnt);
357 struct ioat_raw_descriptor *descs[2]; 357 struct ioat_raw_descriptor *descs[2];
358 int i; 358 int i;
359 359
360 if (src_cnt > 3) { 360 if (src_cnt > 3) {
361 ext = ioat2_get_ring_ent(ioat, idx + 1); 361 ext = ioat2_get_ring_ent(ioat, idx + 1);
362 pq_ex = ext->pq_ex; 362 pq_ex = ext->pq_ex;
363 } 363 }
364 364
365 /* in the 'continue' case don't unmap the dests as sources */ 365 /* in the 'continue' case don't unmap the dests as sources */
366 if (dmaf_p_disabled_continue(flags)) 366 if (dmaf_p_disabled_continue(flags))
367 src_cnt--; 367 src_cnt--;
368 else if (dmaf_continue(flags)) 368 else if (dmaf_continue(flags))
369 src_cnt -= 3; 369 src_cnt -= 3;
370 370
371 if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) { 371 if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
372 descs[0] = (struct ioat_raw_descriptor *) pq; 372 descs[0] = (struct ioat_raw_descriptor *) pq;
373 descs[1] = (struct ioat_raw_descriptor *) pq_ex; 373 descs[1] = (struct ioat_raw_descriptor *) pq_ex;
374 for (i = 0; i < src_cnt; i++) { 374 for (i = 0; i < src_cnt; i++) {
375 dma_addr_t src = pq_get_src(descs, i); 375 dma_addr_t src = pq_get_src(descs, i);
376 376
377 ioat_unmap(pdev, src - offset, len, 377 ioat_unmap(pdev, src - offset, len,
378 PCI_DMA_TODEVICE, flags, 0); 378 PCI_DMA_TODEVICE, flags, 0);
379 } 379 }
380 380
381 /* the dests are sources in pq validate operations */ 381 /* the dests are sources in pq validate operations */
382 if (pq->ctl_f.op == IOAT_OP_XOR_VAL) { 382 if (pq->ctl_f.op == IOAT_OP_XOR_VAL) {
383 if (!(flags & DMA_PREP_PQ_DISABLE_P)) 383 if (!(flags & DMA_PREP_PQ_DISABLE_P))
384 ioat_unmap(pdev, pq->p_addr - offset, 384 ioat_unmap(pdev, pq->p_addr - offset,
385 len, PCI_DMA_TODEVICE, flags, 0); 385 len, PCI_DMA_TODEVICE, flags, 0);
386 if (!(flags & DMA_PREP_PQ_DISABLE_Q)) 386 if (!(flags & DMA_PREP_PQ_DISABLE_Q))
387 ioat_unmap(pdev, pq->q_addr - offset, 387 ioat_unmap(pdev, pq->q_addr - offset,
388 len, PCI_DMA_TODEVICE, flags, 0); 388 len, PCI_DMA_TODEVICE, flags, 0);
389 break; 389 break;
390 } 390 }
391 } 391 }
392 392
393 if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) { 393 if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
394 if (!(flags & DMA_PREP_PQ_DISABLE_P)) 394 if (!(flags & DMA_PREP_PQ_DISABLE_P))
395 ioat_unmap(pdev, pq->p_addr - offset, len, 395 ioat_unmap(pdev, pq->p_addr - offset, len,
396 PCI_DMA_BIDIRECTIONAL, flags, 1); 396 PCI_DMA_BIDIRECTIONAL, flags, 1);
397 if (!(flags & DMA_PREP_PQ_DISABLE_Q)) 397 if (!(flags & DMA_PREP_PQ_DISABLE_Q))
398 ioat_unmap(pdev, pq->q_addr - offset, len, 398 ioat_unmap(pdev, pq->q_addr - offset, len,
399 PCI_DMA_BIDIRECTIONAL, flags, 1); 399 PCI_DMA_BIDIRECTIONAL, flags, 1);
400 } 400 }
401 break; 401 break;
402 } 402 }
403 case IOAT_OP_PQ_16S: 403 case IOAT_OP_PQ_16S:
404 case IOAT_OP_PQ_VAL_16S: { 404 case IOAT_OP_PQ_VAL_16S: {
405 struct ioat_pq_descriptor *pq = desc->pq; 405 struct ioat_pq_descriptor *pq = desc->pq;
406 int src_cnt = src16_cnt_to_sw(pq->ctl_f.src_cnt); 406 int src_cnt = src16_cnt_to_sw(pq->ctl_f.src_cnt);
407 struct ioat_raw_descriptor *descs[4]; 407 struct ioat_raw_descriptor *descs[4];
408 int i; 408 int i;
409 409
410 /* in the 'continue' case don't unmap the dests as sources */ 410 /* in the 'continue' case don't unmap the dests as sources */
411 if (dmaf_p_disabled_continue(flags)) 411 if (dmaf_p_disabled_continue(flags))
412 src_cnt--; 412 src_cnt--;
413 else if (dmaf_continue(flags)) 413 else if (dmaf_continue(flags))
414 src_cnt -= 3; 414 src_cnt -= 3;
415 415
416 if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) { 416 if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
417 descs[0] = (struct ioat_raw_descriptor *)pq; 417 descs[0] = (struct ioat_raw_descriptor *)pq;
418 descs[1] = (struct ioat_raw_descriptor *)(desc->sed->hw); 418 descs[1] = (struct ioat_raw_descriptor *)(desc->sed->hw);
419 descs[2] = (struct ioat_raw_descriptor *)(&desc->sed->hw->b[0]); 419 descs[2] = (struct ioat_raw_descriptor *)(&desc->sed->hw->b[0]);
420 for (i = 0; i < src_cnt; i++) { 420 for (i = 0; i < src_cnt; i++) {
421 dma_addr_t src = pq16_get_src(descs, i); 421 dma_addr_t src = pq16_get_src(descs, i);
422 422
423 ioat_unmap(pdev, src - offset, len, 423 ioat_unmap(pdev, src - offset, len,
424 PCI_DMA_TODEVICE, flags, 0); 424 PCI_DMA_TODEVICE, flags, 0);
425 } 425 }
426 426
427 /* the dests are sources in pq validate operations */ 427 /* the dests are sources in pq validate operations */
428 if (pq->ctl_f.op == IOAT_OP_XOR_VAL) { 428 if (pq->ctl_f.op == IOAT_OP_XOR_VAL) {
429 if (!(flags & DMA_PREP_PQ_DISABLE_P)) 429 if (!(flags & DMA_PREP_PQ_DISABLE_P))
430 ioat_unmap(pdev, pq->p_addr - offset, 430 ioat_unmap(pdev, pq->p_addr - offset,
431 len, PCI_DMA_TODEVICE, 431 len, PCI_DMA_TODEVICE,
432 flags, 0); 432 flags, 0);
433 if (!(flags & DMA_PREP_PQ_DISABLE_Q)) 433 if (!(flags & DMA_PREP_PQ_DISABLE_Q))
434 ioat_unmap(pdev, pq->q_addr - offset, 434 ioat_unmap(pdev, pq->q_addr - offset,
435 len, PCI_DMA_TODEVICE, 435 len, PCI_DMA_TODEVICE,
436 flags, 0); 436 flags, 0);
437 break; 437 break;
438 } 438 }
439 } 439 }
440 440
441 if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) { 441 if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
442 if (!(flags & DMA_PREP_PQ_DISABLE_P)) 442 if (!(flags & DMA_PREP_PQ_DISABLE_P))
443 ioat_unmap(pdev, pq->p_addr - offset, len, 443 ioat_unmap(pdev, pq->p_addr - offset, len,
444 PCI_DMA_BIDIRECTIONAL, flags, 1); 444 PCI_DMA_BIDIRECTIONAL, flags, 1);
445 if (!(flags & DMA_PREP_PQ_DISABLE_Q)) 445 if (!(flags & DMA_PREP_PQ_DISABLE_Q))
446 ioat_unmap(pdev, pq->q_addr - offset, len, 446 ioat_unmap(pdev, pq->q_addr - offset, len,
447 PCI_DMA_BIDIRECTIONAL, flags, 1); 447 PCI_DMA_BIDIRECTIONAL, flags, 1);
448 } 448 }
449 break; 449 break;
450 } 450 }
451 default: 451 default:
452 dev_err(&pdev->dev, "%s: unknown op type: %#x\n", 452 dev_err(&pdev->dev, "%s: unknown op type: %#x\n",
453 __func__, desc->hw->ctl_f.op); 453 __func__, desc->hw->ctl_f.op);
454 } 454 }
455 } 455 }
456 456
457 static bool desc_has_ext(struct ioat_ring_ent *desc) 457 static bool desc_has_ext(struct ioat_ring_ent *desc)
458 { 458 {
459 struct ioat_dma_descriptor *hw = desc->hw; 459 struct ioat_dma_descriptor *hw = desc->hw;
460 460
461 if (hw->ctl_f.op == IOAT_OP_XOR || 461 if (hw->ctl_f.op == IOAT_OP_XOR ||
462 hw->ctl_f.op == IOAT_OP_XOR_VAL) { 462 hw->ctl_f.op == IOAT_OP_XOR_VAL) {
463 struct ioat_xor_descriptor *xor = desc->xor; 463 struct ioat_xor_descriptor *xor = desc->xor;
464 464
465 if (src_cnt_to_sw(xor->ctl_f.src_cnt) > 5) 465 if (src_cnt_to_sw(xor->ctl_f.src_cnt) > 5)
466 return true; 466 return true;
467 } else if (hw->ctl_f.op == IOAT_OP_PQ || 467 } else if (hw->ctl_f.op == IOAT_OP_PQ ||
468 hw->ctl_f.op == IOAT_OP_PQ_VAL) { 468 hw->ctl_f.op == IOAT_OP_PQ_VAL) {
469 struct ioat_pq_descriptor *pq = desc->pq; 469 struct ioat_pq_descriptor *pq = desc->pq;
470 470
471 if (src_cnt_to_sw(pq->ctl_f.src_cnt) > 3) 471 if (src_cnt_to_sw(pq->ctl_f.src_cnt) > 3)
472 return true; 472 return true;
473 } 473 }
474 474
475 return false; 475 return false;
476 } 476 }
477 477
478 static u64 ioat3_get_current_completion(struct ioat_chan_common *chan) 478 static u64 ioat3_get_current_completion(struct ioat_chan_common *chan)
479 { 479 {
480 u64 phys_complete; 480 u64 phys_complete;
481 u64 completion; 481 u64 completion;
482 482
483 completion = *chan->completion; 483 completion = *chan->completion;
484 phys_complete = ioat_chansts_to_addr(completion); 484 phys_complete = ioat_chansts_to_addr(completion);
485 485
486 dev_dbg(to_dev(chan), "%s: phys_complete: %#llx\n", __func__, 486 dev_dbg(to_dev(chan), "%s: phys_complete: %#llx\n", __func__,
487 (unsigned long long) phys_complete); 487 (unsigned long long) phys_complete);
488 488
489 return phys_complete; 489 return phys_complete;
490 } 490 }
491 491
492 static bool ioat3_cleanup_preamble(struct ioat_chan_common *chan, 492 static bool ioat3_cleanup_preamble(struct ioat_chan_common *chan,
493 u64 *phys_complete) 493 u64 *phys_complete)
494 { 494 {
495 *phys_complete = ioat3_get_current_completion(chan); 495 *phys_complete = ioat3_get_current_completion(chan);
496 if (*phys_complete == chan->last_completion) 496 if (*phys_complete == chan->last_completion)
497 return false; 497 return false;
498 498
499 clear_bit(IOAT_COMPLETION_ACK, &chan->state); 499 clear_bit(IOAT_COMPLETION_ACK, &chan->state);
500 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); 500 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
501 501
502 return true; 502 return true;
503 } 503 }
504 504
505 static void 505 static void
506 desc_get_errstat(struct ioat2_dma_chan *ioat, struct ioat_ring_ent *desc) 506 desc_get_errstat(struct ioat2_dma_chan *ioat, struct ioat_ring_ent *desc)
507 { 507 {
508 struct ioat_dma_descriptor *hw = desc->hw; 508 struct ioat_dma_descriptor *hw = desc->hw;
509 509
510 switch (hw->ctl_f.op) { 510 switch (hw->ctl_f.op) {
511 case IOAT_OP_PQ_VAL: 511 case IOAT_OP_PQ_VAL:
512 case IOAT_OP_PQ_VAL_16S: 512 case IOAT_OP_PQ_VAL_16S:
513 { 513 {
514 struct ioat_pq_descriptor *pq = desc->pq; 514 struct ioat_pq_descriptor *pq = desc->pq;
515 515
516 /* check if there's error written */ 516 /* check if there's error written */
517 if (!pq->dwbes_f.wbes) 517 if (!pq->dwbes_f.wbes)
518 return; 518 return;
519 519
520 /* need to set a chanerr var for checking to clear later */ 520 /* need to set a chanerr var for checking to clear later */
521 521
522 if (pq->dwbes_f.p_val_err) 522 if (pq->dwbes_f.p_val_err)
523 *desc->result |= SUM_CHECK_P_RESULT; 523 *desc->result |= SUM_CHECK_P_RESULT;
524 524
525 if (pq->dwbes_f.q_val_err) 525 if (pq->dwbes_f.q_val_err)
526 *desc->result |= SUM_CHECK_Q_RESULT; 526 *desc->result |= SUM_CHECK_Q_RESULT;
527 527
528 return; 528 return;
529 } 529 }
530 default: 530 default:
531 return; 531 return;
532 } 532 }
533 } 533 }
534 534
535 /** 535 /**
536 * __cleanup - reclaim used descriptors 536 * __cleanup - reclaim used descriptors
537 * @ioat: channel (ring) to clean 537 * @ioat: channel (ring) to clean
538 * 538 *
539 * The difference from the dma_v2.c __cleanup() is that this routine 539 * The difference from the dma_v2.c __cleanup() is that this routine
540 * handles extended descriptors and dma-unmapping raid operations. 540 * handles extended descriptors and dma-unmapping raid operations.
541 */ 541 */
542 static void __cleanup(struct ioat2_dma_chan *ioat, dma_addr_t phys_complete) 542 static void __cleanup(struct ioat2_dma_chan *ioat, dma_addr_t phys_complete)
543 { 543 {
544 struct ioat_chan_common *chan = &ioat->base; 544 struct ioat_chan_common *chan = &ioat->base;
545 struct ioatdma_device *device = chan->device; 545 struct ioatdma_device *device = chan->device;
546 struct ioat_ring_ent *desc; 546 struct ioat_ring_ent *desc;
547 bool seen_current = false; 547 bool seen_current = false;
548 int idx = ioat->tail, i; 548 int idx = ioat->tail, i;
549 u16 active; 549 u16 active;
550 550
551 dev_dbg(to_dev(chan), "%s: head: %#x tail: %#x issued: %#x\n", 551 dev_dbg(to_dev(chan), "%s: head: %#x tail: %#x issued: %#x\n",
552 __func__, ioat->head, ioat->tail, ioat->issued); 552 __func__, ioat->head, ioat->tail, ioat->issued);
553 553
554 /* 554 /*
555 * At restart of the channel, the completion address and the 555 * At restart of the channel, the completion address and the
556 * channel status will be 0 due to starting a new chain. Since 556 * channel status will be 0 due to starting a new chain. Since
557 * it's new chain and the first descriptor "fails", there is 557 * it's new chain and the first descriptor "fails", there is
558 * nothing to clean up. We do not want to reap the entire submitted 558 * nothing to clean up. We do not want to reap the entire submitted
559 * chain due to this 0 address value and then BUG. 559 * chain due to this 0 address value and then BUG.
560 */ 560 */
561 if (!phys_complete) 561 if (!phys_complete)
562 return; 562 return;
563 563
564 active = ioat2_ring_active(ioat); 564 active = ioat2_ring_active(ioat);
565 for (i = 0; i < active && !seen_current; i++) { 565 for (i = 0; i < active && !seen_current; i++) {
566 struct dma_async_tx_descriptor *tx; 566 struct dma_async_tx_descriptor *tx;
567 567
568 smp_read_barrier_depends(); 568 smp_read_barrier_depends();
569 prefetch(ioat2_get_ring_ent(ioat, idx + i + 1)); 569 prefetch(ioat2_get_ring_ent(ioat, idx + i + 1));
570 desc = ioat2_get_ring_ent(ioat, idx + i); 570 desc = ioat2_get_ring_ent(ioat, idx + i);
571 dump_desc_dbg(ioat, desc); 571 dump_desc_dbg(ioat, desc);
572 572
573 /* set err stat if we are using dwbes */ 573 /* set err stat if we are using dwbes */
574 if (device->cap & IOAT_CAP_DWBES) 574 if (device->cap & IOAT_CAP_DWBES)
575 desc_get_errstat(ioat, desc); 575 desc_get_errstat(ioat, desc);
576 576
577 tx = &desc->txd; 577 tx = &desc->txd;
578 if (tx->cookie) { 578 if (tx->cookie) {
579 dma_cookie_complete(tx); 579 dma_cookie_complete(tx);
580 ioat3_dma_unmap(ioat, desc, idx + i); 580 ioat3_dma_unmap(ioat, desc, idx + i);
581 if (tx->callback) { 581 if (tx->callback) {
582 tx->callback(tx->callback_param); 582 tx->callback(tx->callback_param);
583 tx->callback = NULL; 583 tx->callback = NULL;
584 } 584 }
585 } 585 }
586 586
587 if (tx->phys == phys_complete) 587 if (tx->phys == phys_complete)
588 seen_current = true; 588 seen_current = true;
589 589
590 /* skip extended descriptors */ 590 /* skip extended descriptors */
591 if (desc_has_ext(desc)) { 591 if (desc_has_ext(desc)) {
592 BUG_ON(i + 1 >= active); 592 BUG_ON(i + 1 >= active);
593 i++; 593 i++;
594 } 594 }
595 595
596 /* cleanup super extended descriptors */ 596 /* cleanup super extended descriptors */
597 if (desc->sed) { 597 if (desc->sed) {
598 ioat3_free_sed(device, desc->sed); 598 ioat3_free_sed(device, desc->sed);
599 desc->sed = NULL; 599 desc->sed = NULL;
600 } 600 }
601 } 601 }
602 smp_mb(); /* finish all descriptor reads before incrementing tail */ 602 smp_mb(); /* finish all descriptor reads before incrementing tail */
603 ioat->tail = idx + i; 603 ioat->tail = idx + i;
604 BUG_ON(active && !seen_current); /* no active descs have written a completion? */ 604 BUG_ON(active && !seen_current); /* no active descs have written a completion? */
605 chan->last_completion = phys_complete; 605 chan->last_completion = phys_complete;
606 606
607 if (active - i == 0) { 607 if (active - i == 0) {
608 dev_dbg(to_dev(chan), "%s: cancel completion timeout\n", 608 dev_dbg(to_dev(chan), "%s: cancel completion timeout\n",
609 __func__); 609 __func__);
610 clear_bit(IOAT_COMPLETION_PENDING, &chan->state); 610 clear_bit(IOAT_COMPLETION_PENDING, &chan->state);
611 mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT); 611 mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
612 } 612 }
613 /* 5 microsecond delay per pending descriptor */ 613 /* 5 microsecond delay per pending descriptor */
614 writew(min((5 * (active - i)), IOAT_INTRDELAY_MASK), 614 writew(min((5 * (active - i)), IOAT_INTRDELAY_MASK),
615 chan->device->reg_base + IOAT_INTRDELAY_OFFSET); 615 chan->device->reg_base + IOAT_INTRDELAY_OFFSET);
616 } 616 }
617 617
618 static void ioat3_cleanup(struct ioat2_dma_chan *ioat) 618 static void ioat3_cleanup(struct ioat2_dma_chan *ioat)
619 { 619 {
620 struct ioat_chan_common *chan = &ioat->base; 620 struct ioat_chan_common *chan = &ioat->base;
621 u64 phys_complete; 621 u64 phys_complete;
622 622
623 spin_lock_bh(&chan->cleanup_lock); 623 spin_lock_bh(&chan->cleanup_lock);
624 624
625 if (ioat3_cleanup_preamble(chan, &phys_complete)) 625 if (ioat3_cleanup_preamble(chan, &phys_complete))
626 __cleanup(ioat, phys_complete); 626 __cleanup(ioat, phys_complete);
627 627
628 if (is_ioat_halted(*chan->completion)) { 628 if (is_ioat_halted(*chan->completion)) {
629 u32 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); 629 u32 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
630 630
631 if (chanerr & IOAT_CHANERR_HANDLE_MASK) { 631 if (chanerr & IOAT_CHANERR_HANDLE_MASK) {
632 mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT); 632 mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
633 ioat3_eh(ioat); 633 ioat3_eh(ioat);
634 } 634 }
635 } 635 }
636 636
637 spin_unlock_bh(&chan->cleanup_lock); 637 spin_unlock_bh(&chan->cleanup_lock);
638 } 638 }
639 639
640 static void ioat3_cleanup_event(unsigned long data) 640 static void ioat3_cleanup_event(unsigned long data)
641 { 641 {
642 struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data); 642 struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
643 643
644 ioat3_cleanup(ioat); 644 ioat3_cleanup(ioat);
645 writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET); 645 writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET);
646 } 646 }
647 647
648 static void ioat3_restart_channel(struct ioat2_dma_chan *ioat) 648 static void ioat3_restart_channel(struct ioat2_dma_chan *ioat)
649 { 649 {
650 struct ioat_chan_common *chan = &ioat->base; 650 struct ioat_chan_common *chan = &ioat->base;
651 u64 phys_complete; 651 u64 phys_complete;
652 652
653 ioat2_quiesce(chan, 0); 653 ioat2_quiesce(chan, 0);
654 if (ioat3_cleanup_preamble(chan, &phys_complete)) 654 if (ioat3_cleanup_preamble(chan, &phys_complete))
655 __cleanup(ioat, phys_complete); 655 __cleanup(ioat, phys_complete);
656 656
657 __ioat2_restart_chan(ioat); 657 __ioat2_restart_chan(ioat);
658 } 658 }
659 659
660 static void ioat3_eh(struct ioat2_dma_chan *ioat) 660 static void ioat3_eh(struct ioat2_dma_chan *ioat)
661 { 661 {
662 struct ioat_chan_common *chan = &ioat->base; 662 struct ioat_chan_common *chan = &ioat->base;
663 struct pci_dev *pdev = to_pdev(chan); 663 struct pci_dev *pdev = to_pdev(chan);
664 struct ioat_dma_descriptor *hw; 664 struct ioat_dma_descriptor *hw;
665 u64 phys_complete; 665 u64 phys_complete;
666 struct ioat_ring_ent *desc; 666 struct ioat_ring_ent *desc;
667 u32 err_handled = 0; 667 u32 err_handled = 0;
668 u32 chanerr_int; 668 u32 chanerr_int;
669 u32 chanerr; 669 u32 chanerr;
670 670
671 /* cleanup so tail points to descriptor that caused the error */ 671 /* cleanup so tail points to descriptor that caused the error */
672 if (ioat3_cleanup_preamble(chan, &phys_complete)) 672 if (ioat3_cleanup_preamble(chan, &phys_complete))
673 __cleanup(ioat, phys_complete); 673 __cleanup(ioat, phys_complete);
674 674
675 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); 675 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
676 pci_read_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, &chanerr_int); 676 pci_read_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, &chanerr_int);
677 677
678 dev_dbg(to_dev(chan), "%s: error = %x:%x\n", 678 dev_dbg(to_dev(chan), "%s: error = %x:%x\n",
679 __func__, chanerr, chanerr_int); 679 __func__, chanerr, chanerr_int);
680 680
681 desc = ioat2_get_ring_ent(ioat, ioat->tail); 681 desc = ioat2_get_ring_ent(ioat, ioat->tail);
682 hw = desc->hw; 682 hw = desc->hw;
683 dump_desc_dbg(ioat, desc); 683 dump_desc_dbg(ioat, desc);
684 684
685 switch (hw->ctl_f.op) { 685 switch (hw->ctl_f.op) {
686 case IOAT_OP_XOR_VAL: 686 case IOAT_OP_XOR_VAL:
687 if (chanerr & IOAT_CHANERR_XOR_P_OR_CRC_ERR) { 687 if (chanerr & IOAT_CHANERR_XOR_P_OR_CRC_ERR) {
688 *desc->result |= SUM_CHECK_P_RESULT; 688 *desc->result |= SUM_CHECK_P_RESULT;
689 err_handled |= IOAT_CHANERR_XOR_P_OR_CRC_ERR; 689 err_handled |= IOAT_CHANERR_XOR_P_OR_CRC_ERR;
690 } 690 }
691 break; 691 break;
692 case IOAT_OP_PQ_VAL: 692 case IOAT_OP_PQ_VAL:
693 case IOAT_OP_PQ_VAL_16S: 693 case IOAT_OP_PQ_VAL_16S:
694 if (chanerr & IOAT_CHANERR_XOR_P_OR_CRC_ERR) { 694 if (chanerr & IOAT_CHANERR_XOR_P_OR_CRC_ERR) {
695 *desc->result |= SUM_CHECK_P_RESULT; 695 *desc->result |= SUM_CHECK_P_RESULT;
696 err_handled |= IOAT_CHANERR_XOR_P_OR_CRC_ERR; 696 err_handled |= IOAT_CHANERR_XOR_P_OR_CRC_ERR;
697 } 697 }
698 if (chanerr & IOAT_CHANERR_XOR_Q_ERR) { 698 if (chanerr & IOAT_CHANERR_XOR_Q_ERR) {
699 *desc->result |= SUM_CHECK_Q_RESULT; 699 *desc->result |= SUM_CHECK_Q_RESULT;
700 err_handled |= IOAT_CHANERR_XOR_Q_ERR; 700 err_handled |= IOAT_CHANERR_XOR_Q_ERR;
701 } 701 }
702 break; 702 break;
703 } 703 }
704 704
705 /* fault on unhandled error or spurious halt */ 705 /* fault on unhandled error or spurious halt */
706 if (chanerr ^ err_handled || chanerr == 0) { 706 if (chanerr ^ err_handled || chanerr == 0) {
707 dev_err(to_dev(chan), "%s: fatal error (%x:%x)\n", 707 dev_err(to_dev(chan), "%s: fatal error (%x:%x)\n",
708 __func__, chanerr, err_handled); 708 __func__, chanerr, err_handled);
709 BUG(); 709 BUG();
710 } 710 }
711 711
712 writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET); 712 writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET);
713 pci_write_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, chanerr_int); 713 pci_write_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, chanerr_int);
714 714
715 /* mark faulting descriptor as complete */ 715 /* mark faulting descriptor as complete */
716 *chan->completion = desc->txd.phys; 716 *chan->completion = desc->txd.phys;
717 717
718 spin_lock_bh(&ioat->prep_lock); 718 spin_lock_bh(&ioat->prep_lock);
719 ioat3_restart_channel(ioat); 719 ioat3_restart_channel(ioat);
720 spin_unlock_bh(&ioat->prep_lock); 720 spin_unlock_bh(&ioat->prep_lock);
721 } 721 }
722 722
723 static void check_active(struct ioat2_dma_chan *ioat) 723 static void check_active(struct ioat2_dma_chan *ioat)
724 { 724 {
725 struct ioat_chan_common *chan = &ioat->base; 725 struct ioat_chan_common *chan = &ioat->base;
726 726
727 if (ioat2_ring_active(ioat)) { 727 if (ioat2_ring_active(ioat)) {
728 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); 728 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
729 return; 729 return;
730 } 730 }
731 731
732 if (test_and_clear_bit(IOAT_CHAN_ACTIVE, &chan->state)) 732 if (test_and_clear_bit(IOAT_CHAN_ACTIVE, &chan->state))
733 mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT); 733 mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
734 else if (ioat->alloc_order > ioat_get_alloc_order()) { 734 else if (ioat->alloc_order > ioat_get_alloc_order()) {
735 /* if the ring is idle, empty, and oversized try to step 735 /* if the ring is idle, empty, and oversized try to step
736 * down the size 736 * down the size
737 */ 737 */
738 reshape_ring(ioat, ioat->alloc_order - 1); 738 reshape_ring(ioat, ioat->alloc_order - 1);
739 739
740 /* keep shrinking until we get back to our minimum 740 /* keep shrinking until we get back to our minimum
741 * default size 741 * default size
742 */ 742 */
743 if (ioat->alloc_order > ioat_get_alloc_order()) 743 if (ioat->alloc_order > ioat_get_alloc_order())
744 mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT); 744 mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
745 } 745 }
746 746
747 } 747 }
748 748
749 static void ioat3_timer_event(unsigned long data) 749 static void ioat3_timer_event(unsigned long data)
750 { 750 {
751 struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data); 751 struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
752 struct ioat_chan_common *chan = &ioat->base; 752 struct ioat_chan_common *chan = &ioat->base;
753 dma_addr_t phys_complete; 753 dma_addr_t phys_complete;
754 u64 status; 754 u64 status;
755 755
756 status = ioat_chansts(chan); 756 status = ioat_chansts(chan);
757 757
758 /* when halted due to errors check for channel 758 /* when halted due to errors check for channel
759 * programming errors before advancing the completion state 759 * programming errors before advancing the completion state
760 */ 760 */
761 if (is_ioat_halted(status)) { 761 if (is_ioat_halted(status)) {
762 u32 chanerr; 762 u32 chanerr;
763 763
764 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); 764 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
765 dev_err(to_dev(chan), "%s: Channel halted (%x)\n", 765 dev_err(to_dev(chan), "%s: Channel halted (%x)\n",
766 __func__, chanerr); 766 __func__, chanerr);
767 if (test_bit(IOAT_RUN, &chan->state)) 767 if (test_bit(IOAT_RUN, &chan->state))
768 BUG_ON(is_ioat_bug(chanerr)); 768 BUG_ON(is_ioat_bug(chanerr));
769 else /* we never got off the ground */ 769 else /* we never got off the ground */
770 return; 770 return;
771 } 771 }
772 772
773 /* if we haven't made progress and we have already 773 /* if we haven't made progress and we have already
774 * acknowledged a pending completion once, then be more 774 * acknowledged a pending completion once, then be more
775 * forceful with a restart 775 * forceful with a restart
776 */ 776 */
777 spin_lock_bh(&chan->cleanup_lock); 777 spin_lock_bh(&chan->cleanup_lock);
778 if (ioat_cleanup_preamble(chan, &phys_complete)) 778 if (ioat_cleanup_preamble(chan, &phys_complete))
779 __cleanup(ioat, phys_complete); 779 __cleanup(ioat, phys_complete);
780 else if (test_bit(IOAT_COMPLETION_ACK, &chan->state)) { 780 else if (test_bit(IOAT_COMPLETION_ACK, &chan->state)) {
781 spin_lock_bh(&ioat->prep_lock); 781 spin_lock_bh(&ioat->prep_lock);
782 ioat3_restart_channel(ioat); 782 ioat3_restart_channel(ioat);
783 spin_unlock_bh(&ioat->prep_lock); 783 spin_unlock_bh(&ioat->prep_lock);
784 spin_unlock_bh(&chan->cleanup_lock); 784 spin_unlock_bh(&chan->cleanup_lock);
785 return; 785 return;
786 } else { 786 } else {
787 set_bit(IOAT_COMPLETION_ACK, &chan->state); 787 set_bit(IOAT_COMPLETION_ACK, &chan->state);
788 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); 788 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
789 } 789 }
790 790
791 791
792 if (ioat2_ring_active(ioat)) 792 if (ioat2_ring_active(ioat))
793 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); 793 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
794 else { 794 else {
795 spin_lock_bh(&ioat->prep_lock); 795 spin_lock_bh(&ioat->prep_lock);
796 check_active(ioat); 796 check_active(ioat);
797 spin_unlock_bh(&ioat->prep_lock); 797 spin_unlock_bh(&ioat->prep_lock);
798 } 798 }
799 spin_unlock_bh(&chan->cleanup_lock); 799 spin_unlock_bh(&chan->cleanup_lock);
800 } 800 }
801 801
802 static enum dma_status 802 static enum dma_status
803 ioat3_tx_status(struct dma_chan *c, dma_cookie_t cookie, 803 ioat3_tx_status(struct dma_chan *c, dma_cookie_t cookie,
804 struct dma_tx_state *txstate) 804 struct dma_tx_state *txstate)
805 { 805 {
806 struct ioat2_dma_chan *ioat = to_ioat2_chan(c); 806 struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
807 enum dma_status ret; 807 enum dma_status ret;
808 808
809 ret = dma_cookie_status(c, cookie, txstate); 809 ret = dma_cookie_status(c, cookie, txstate);
810 if (ret == DMA_SUCCESS) 810 if (ret == DMA_COMPLETE)
811 return ret; 811 return ret;
812 812
813 ioat3_cleanup(ioat); 813 ioat3_cleanup(ioat);
814 814
815 return dma_cookie_status(c, cookie, txstate); 815 return dma_cookie_status(c, cookie, txstate);
816 } 816 }
817 817
818 static struct dma_async_tx_descriptor * 818 static struct dma_async_tx_descriptor *
819 __ioat3_prep_xor_lock(struct dma_chan *c, enum sum_check_flags *result, 819 __ioat3_prep_xor_lock(struct dma_chan *c, enum sum_check_flags *result,
820 dma_addr_t dest, dma_addr_t *src, unsigned int src_cnt, 820 dma_addr_t dest, dma_addr_t *src, unsigned int src_cnt,
821 size_t len, unsigned long flags) 821 size_t len, unsigned long flags)
822 { 822 {
823 struct ioat2_dma_chan *ioat = to_ioat2_chan(c); 823 struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
824 struct ioat_ring_ent *compl_desc; 824 struct ioat_ring_ent *compl_desc;
825 struct ioat_ring_ent *desc; 825 struct ioat_ring_ent *desc;
826 struct ioat_ring_ent *ext; 826 struct ioat_ring_ent *ext;
827 size_t total_len = len; 827 size_t total_len = len;
828 struct ioat_xor_descriptor *xor; 828 struct ioat_xor_descriptor *xor;
829 struct ioat_xor_ext_descriptor *xor_ex = NULL; 829 struct ioat_xor_ext_descriptor *xor_ex = NULL;
830 struct ioat_dma_descriptor *hw; 830 struct ioat_dma_descriptor *hw;
831 int num_descs, with_ext, idx, i; 831 int num_descs, with_ext, idx, i;
832 u32 offset = 0; 832 u32 offset = 0;
833 u8 op = result ? IOAT_OP_XOR_VAL : IOAT_OP_XOR; 833 u8 op = result ? IOAT_OP_XOR_VAL : IOAT_OP_XOR;
834 834
835 BUG_ON(src_cnt < 2); 835 BUG_ON(src_cnt < 2);
836 836
837 num_descs = ioat2_xferlen_to_descs(ioat, len); 837 num_descs = ioat2_xferlen_to_descs(ioat, len);
838 /* we need 2x the number of descriptors to cover greater than 5 838 /* we need 2x the number of descriptors to cover greater than 5
839 * sources 839 * sources
840 */ 840 */
841 if (src_cnt > 5) { 841 if (src_cnt > 5) {
842 with_ext = 1; 842 with_ext = 1;
843 num_descs *= 2; 843 num_descs *= 2;
844 } else 844 } else
845 with_ext = 0; 845 with_ext = 0;
846 846
847 /* completion writes from the raid engine may pass completion 847 /* completion writes from the raid engine may pass completion
848 * writes from the legacy engine, so we need one extra null 848 * writes from the legacy engine, so we need one extra null
849 * (legacy) descriptor to ensure all completion writes arrive in 849 * (legacy) descriptor to ensure all completion writes arrive in
850 * order. 850 * order.
851 */ 851 */
852 if (likely(num_descs) && ioat2_check_space_lock(ioat, num_descs+1) == 0) 852 if (likely(num_descs) && ioat2_check_space_lock(ioat, num_descs+1) == 0)
853 idx = ioat->head; 853 idx = ioat->head;
854 else 854 else
855 return NULL; 855 return NULL;
856 i = 0; 856 i = 0;
857 do { 857 do {
858 struct ioat_raw_descriptor *descs[2]; 858 struct ioat_raw_descriptor *descs[2];
859 size_t xfer_size = min_t(size_t, len, 1 << ioat->xfercap_log); 859 size_t xfer_size = min_t(size_t, len, 1 << ioat->xfercap_log);
860 int s; 860 int s;
861 861
862 desc = ioat2_get_ring_ent(ioat, idx + i); 862 desc = ioat2_get_ring_ent(ioat, idx + i);
863 xor = desc->xor; 863 xor = desc->xor;
864 864
865 /* save a branch by unconditionally retrieving the 865 /* save a branch by unconditionally retrieving the
866 * extended descriptor xor_set_src() knows to not write 866 * extended descriptor xor_set_src() knows to not write
867 * to it in the single descriptor case 867 * to it in the single descriptor case
868 */ 868 */
869 ext = ioat2_get_ring_ent(ioat, idx + i + 1); 869 ext = ioat2_get_ring_ent(ioat, idx + i + 1);
870 xor_ex = ext->xor_ex; 870 xor_ex = ext->xor_ex;
871 871
872 descs[0] = (struct ioat_raw_descriptor *) xor; 872 descs[0] = (struct ioat_raw_descriptor *) xor;
873 descs[1] = (struct ioat_raw_descriptor *) xor_ex; 873 descs[1] = (struct ioat_raw_descriptor *) xor_ex;
874 for (s = 0; s < src_cnt; s++) 874 for (s = 0; s < src_cnt; s++)
875 xor_set_src(descs, src[s], offset, s); 875 xor_set_src(descs, src[s], offset, s);
876 xor->size = xfer_size; 876 xor->size = xfer_size;
877 xor->dst_addr = dest + offset; 877 xor->dst_addr = dest + offset;
878 xor->ctl = 0; 878 xor->ctl = 0;
879 xor->ctl_f.op = op; 879 xor->ctl_f.op = op;
880 xor->ctl_f.src_cnt = src_cnt_to_hw(src_cnt); 880 xor->ctl_f.src_cnt = src_cnt_to_hw(src_cnt);
881 881
882 len -= xfer_size; 882 len -= xfer_size;
883 offset += xfer_size; 883 offset += xfer_size;
884 dump_desc_dbg(ioat, desc); 884 dump_desc_dbg(ioat, desc);
885 } while ((i += 1 + with_ext) < num_descs); 885 } while ((i += 1 + with_ext) < num_descs);
886 886
887 /* last xor descriptor carries the unmap parameters and fence bit */ 887 /* last xor descriptor carries the unmap parameters and fence bit */
888 desc->txd.flags = flags; 888 desc->txd.flags = flags;
889 desc->len = total_len; 889 desc->len = total_len;
890 if (result) 890 if (result)
891 desc->result = result; 891 desc->result = result;
892 xor->ctl_f.fence = !!(flags & DMA_PREP_FENCE); 892 xor->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
893 893
894 /* completion descriptor carries interrupt bit */ 894 /* completion descriptor carries interrupt bit */
895 compl_desc = ioat2_get_ring_ent(ioat, idx + i); 895 compl_desc = ioat2_get_ring_ent(ioat, idx + i);
896 compl_desc->txd.flags = flags & DMA_PREP_INTERRUPT; 896 compl_desc->txd.flags = flags & DMA_PREP_INTERRUPT;
897 hw = compl_desc->hw; 897 hw = compl_desc->hw;
898 hw->ctl = 0; 898 hw->ctl = 0;
899 hw->ctl_f.null = 1; 899 hw->ctl_f.null = 1;
900 hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT); 900 hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
901 hw->ctl_f.compl_write = 1; 901 hw->ctl_f.compl_write = 1;
902 hw->size = NULL_DESC_BUFFER_SIZE; 902 hw->size = NULL_DESC_BUFFER_SIZE;
903 dump_desc_dbg(ioat, compl_desc); 903 dump_desc_dbg(ioat, compl_desc);
904 904
905 /* we leave the channel locked to ensure in order submission */ 905 /* we leave the channel locked to ensure in order submission */
906 return &compl_desc->txd; 906 return &compl_desc->txd;
907 } 907 }
908 908
909 static struct dma_async_tx_descriptor * 909 static struct dma_async_tx_descriptor *
910 ioat3_prep_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, 910 ioat3_prep_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
911 unsigned int src_cnt, size_t len, unsigned long flags) 911 unsigned int src_cnt, size_t len, unsigned long flags)
912 { 912 {
913 return __ioat3_prep_xor_lock(chan, NULL, dest, src, src_cnt, len, flags); 913 return __ioat3_prep_xor_lock(chan, NULL, dest, src, src_cnt, len, flags);
914 } 914 }
915 915
916 struct dma_async_tx_descriptor * 916 struct dma_async_tx_descriptor *
917 ioat3_prep_xor_val(struct dma_chan *chan, dma_addr_t *src, 917 ioat3_prep_xor_val(struct dma_chan *chan, dma_addr_t *src,
918 unsigned int src_cnt, size_t len, 918 unsigned int src_cnt, size_t len,
919 enum sum_check_flags *result, unsigned long flags) 919 enum sum_check_flags *result, unsigned long flags)
920 { 920 {
921 /* the cleanup routine only sets bits on validate failure, it 921 /* the cleanup routine only sets bits on validate failure, it
922 * does not clear bits on validate success... so clear it here 922 * does not clear bits on validate success... so clear it here
923 */ 923 */
924 *result = 0; 924 *result = 0;
925 925
926 return __ioat3_prep_xor_lock(chan, result, src[0], &src[1], 926 return __ioat3_prep_xor_lock(chan, result, src[0], &src[1],
927 src_cnt - 1, len, flags); 927 src_cnt - 1, len, flags);
928 } 928 }
929 929
930 static void 930 static void
931 dump_pq_desc_dbg(struct ioat2_dma_chan *ioat, struct ioat_ring_ent *desc, struct ioat_ring_ent *ext) 931 dump_pq_desc_dbg(struct ioat2_dma_chan *ioat, struct ioat_ring_ent *desc, struct ioat_ring_ent *ext)
932 { 932 {
933 struct device *dev = to_dev(&ioat->base); 933 struct device *dev = to_dev(&ioat->base);
934 struct ioat_pq_descriptor *pq = desc->pq; 934 struct ioat_pq_descriptor *pq = desc->pq;
935 struct ioat_pq_ext_descriptor *pq_ex = ext ? ext->pq_ex : NULL; 935 struct ioat_pq_ext_descriptor *pq_ex = ext ? ext->pq_ex : NULL;
936 struct ioat_raw_descriptor *descs[] = { (void *) pq, (void *) pq_ex }; 936 struct ioat_raw_descriptor *descs[] = { (void *) pq, (void *) pq_ex };
937 int src_cnt = src_cnt_to_sw(pq->ctl_f.src_cnt); 937 int src_cnt = src_cnt_to_sw(pq->ctl_f.src_cnt);
938 int i; 938 int i;
939 939
940 dev_dbg(dev, "desc[%d]: (%#llx->%#llx) flags: %#x" 940 dev_dbg(dev, "desc[%d]: (%#llx->%#llx) flags: %#x"
941 " sz: %#10.8x ctl: %#x (op: %#x int: %d compl: %d pq: '%s%s'" 941 " sz: %#10.8x ctl: %#x (op: %#x int: %d compl: %d pq: '%s%s'"
942 " src_cnt: %d)\n", 942 " src_cnt: %d)\n",
943 desc_id(desc), (unsigned long long) desc->txd.phys, 943 desc_id(desc), (unsigned long long) desc->txd.phys,
944 (unsigned long long) (pq_ex ? pq_ex->next : pq->next), 944 (unsigned long long) (pq_ex ? pq_ex->next : pq->next),
945 desc->txd.flags, pq->size, pq->ctl, pq->ctl_f.op, pq->ctl_f.int_en, 945 desc->txd.flags, pq->size, pq->ctl, pq->ctl_f.op, pq->ctl_f.int_en,
946 pq->ctl_f.compl_write, 946 pq->ctl_f.compl_write,
947 pq->ctl_f.p_disable ? "" : "p", pq->ctl_f.q_disable ? "" : "q", 947 pq->ctl_f.p_disable ? "" : "p", pq->ctl_f.q_disable ? "" : "q",
948 pq->ctl_f.src_cnt); 948 pq->ctl_f.src_cnt);
949 for (i = 0; i < src_cnt; i++) 949 for (i = 0; i < src_cnt; i++)
950 dev_dbg(dev, "\tsrc[%d]: %#llx coef: %#x\n", i, 950 dev_dbg(dev, "\tsrc[%d]: %#llx coef: %#x\n", i,
951 (unsigned long long) pq_get_src(descs, i), pq->coef[i]); 951 (unsigned long long) pq_get_src(descs, i), pq->coef[i]);
952 dev_dbg(dev, "\tP: %#llx\n", pq->p_addr); 952 dev_dbg(dev, "\tP: %#llx\n", pq->p_addr);
953 dev_dbg(dev, "\tQ: %#llx\n", pq->q_addr); 953 dev_dbg(dev, "\tQ: %#llx\n", pq->q_addr);
954 dev_dbg(dev, "\tNEXT: %#llx\n", pq->next); 954 dev_dbg(dev, "\tNEXT: %#llx\n", pq->next);
955 } 955 }
956 956
957 static void dump_pq16_desc_dbg(struct ioat2_dma_chan *ioat, 957 static void dump_pq16_desc_dbg(struct ioat2_dma_chan *ioat,
958 struct ioat_ring_ent *desc) 958 struct ioat_ring_ent *desc)
959 { 959 {
960 struct device *dev = to_dev(&ioat->base); 960 struct device *dev = to_dev(&ioat->base);
961 struct ioat_pq_descriptor *pq = desc->pq; 961 struct ioat_pq_descriptor *pq = desc->pq;
962 struct ioat_raw_descriptor *descs[] = { (void *)pq, 962 struct ioat_raw_descriptor *descs[] = { (void *)pq,
963 (void *)pq, 963 (void *)pq,
964 (void *)pq }; 964 (void *)pq };
965 int src_cnt = src16_cnt_to_sw(pq->ctl_f.src_cnt); 965 int src_cnt = src16_cnt_to_sw(pq->ctl_f.src_cnt);
966 int i; 966 int i;
967 967
968 if (desc->sed) { 968 if (desc->sed) {
969 descs[1] = (void *)desc->sed->hw; 969 descs[1] = (void *)desc->sed->hw;
970 descs[2] = (void *)desc->sed->hw + 64; 970 descs[2] = (void *)desc->sed->hw + 64;
971 } 971 }
972 972
973 dev_dbg(dev, "desc[%d]: (%#llx->%#llx) flags: %#x" 973 dev_dbg(dev, "desc[%d]: (%#llx->%#llx) flags: %#x"
974 " sz: %#x ctl: %#x (op: %#x int: %d compl: %d pq: '%s%s'" 974 " sz: %#x ctl: %#x (op: %#x int: %d compl: %d pq: '%s%s'"
975 " src_cnt: %d)\n", 975 " src_cnt: %d)\n",
976 desc_id(desc), (unsigned long long) desc->txd.phys, 976 desc_id(desc), (unsigned long long) desc->txd.phys,
977 (unsigned long long) pq->next, 977 (unsigned long long) pq->next,
978 desc->txd.flags, pq->size, pq->ctl, 978 desc->txd.flags, pq->size, pq->ctl,
979 pq->ctl_f.op, pq->ctl_f.int_en, 979 pq->ctl_f.op, pq->ctl_f.int_en,
980 pq->ctl_f.compl_write, 980 pq->ctl_f.compl_write,
981 pq->ctl_f.p_disable ? "" : "p", pq->ctl_f.q_disable ? "" : "q", 981 pq->ctl_f.p_disable ? "" : "p", pq->ctl_f.q_disable ? "" : "q",
982 pq->ctl_f.src_cnt); 982 pq->ctl_f.src_cnt);
983 for (i = 0; i < src_cnt; i++) { 983 for (i = 0; i < src_cnt; i++) {
984 dev_dbg(dev, "\tsrc[%d]: %#llx coef: %#x\n", i, 984 dev_dbg(dev, "\tsrc[%d]: %#llx coef: %#x\n", i,
985 (unsigned long long) pq16_get_src(descs, i), 985 (unsigned long long) pq16_get_src(descs, i),
986 pq->coef[i]); 986 pq->coef[i]);
987 } 987 }
988 dev_dbg(dev, "\tP: %#llx\n", pq->p_addr); 988 dev_dbg(dev, "\tP: %#llx\n", pq->p_addr);
989 dev_dbg(dev, "\tQ: %#llx\n", pq->q_addr); 989 dev_dbg(dev, "\tQ: %#llx\n", pq->q_addr);
990 } 990 }
991 991
992 static struct dma_async_tx_descriptor * 992 static struct dma_async_tx_descriptor *
993 __ioat3_prep_pq_lock(struct dma_chan *c, enum sum_check_flags *result, 993 __ioat3_prep_pq_lock(struct dma_chan *c, enum sum_check_flags *result,
994 const dma_addr_t *dst, const dma_addr_t *src, 994 const dma_addr_t *dst, const dma_addr_t *src,
995 unsigned int src_cnt, const unsigned char *scf, 995 unsigned int src_cnt, const unsigned char *scf,
996 size_t len, unsigned long flags) 996 size_t len, unsigned long flags)
997 { 997 {
998 struct ioat2_dma_chan *ioat = to_ioat2_chan(c); 998 struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
999 struct ioat_chan_common *chan = &ioat->base; 999 struct ioat_chan_common *chan = &ioat->base;
1000 struct ioatdma_device *device = chan->device; 1000 struct ioatdma_device *device = chan->device;
1001 struct ioat_ring_ent *compl_desc; 1001 struct ioat_ring_ent *compl_desc;
1002 struct ioat_ring_ent *desc; 1002 struct ioat_ring_ent *desc;
1003 struct ioat_ring_ent *ext; 1003 struct ioat_ring_ent *ext;
1004 size_t total_len = len; 1004 size_t total_len = len;
1005 struct ioat_pq_descriptor *pq; 1005 struct ioat_pq_descriptor *pq;
1006 struct ioat_pq_ext_descriptor *pq_ex = NULL; 1006 struct ioat_pq_ext_descriptor *pq_ex = NULL;
1007 struct ioat_dma_descriptor *hw; 1007 struct ioat_dma_descriptor *hw;
1008 u32 offset = 0; 1008 u32 offset = 0;
1009 u8 op = result ? IOAT_OP_PQ_VAL : IOAT_OP_PQ; 1009 u8 op = result ? IOAT_OP_PQ_VAL : IOAT_OP_PQ;
1010 int i, s, idx, with_ext, num_descs; 1010 int i, s, idx, with_ext, num_descs;
1011 int cb32 = (device->version < IOAT_VER_3_3) ? 1 : 0; 1011 int cb32 = (device->version < IOAT_VER_3_3) ? 1 : 0;
1012 1012
1013 dev_dbg(to_dev(chan), "%s\n", __func__); 1013 dev_dbg(to_dev(chan), "%s\n", __func__);
1014 /* the engine requires at least two sources (we provide 1014 /* the engine requires at least two sources (we provide
1015 * at least 1 implied source in the DMA_PREP_CONTINUE case) 1015 * at least 1 implied source in the DMA_PREP_CONTINUE case)
1016 */ 1016 */
1017 BUG_ON(src_cnt + dmaf_continue(flags) < 2); 1017 BUG_ON(src_cnt + dmaf_continue(flags) < 2);
1018 1018
1019 num_descs = ioat2_xferlen_to_descs(ioat, len); 1019 num_descs = ioat2_xferlen_to_descs(ioat, len);
1020 /* we need 2x the number of descriptors to cover greater than 3 1020 /* we need 2x the number of descriptors to cover greater than 3
1021 * sources (we need 1 extra source in the q-only continuation 1021 * sources (we need 1 extra source in the q-only continuation
1022 * case and 3 extra sources in the p+q continuation case. 1022 * case and 3 extra sources in the p+q continuation case.
1023 */ 1023 */
1024 if (src_cnt + dmaf_p_disabled_continue(flags) > 3 || 1024 if (src_cnt + dmaf_p_disabled_continue(flags) > 3 ||
1025 (dmaf_continue(flags) && !dmaf_p_disabled_continue(flags))) { 1025 (dmaf_continue(flags) && !dmaf_p_disabled_continue(flags))) {
1026 with_ext = 1; 1026 with_ext = 1;
1027 num_descs *= 2; 1027 num_descs *= 2;
1028 } else 1028 } else
1029 with_ext = 0; 1029 with_ext = 0;
1030 1030
1031 /* completion writes from the raid engine may pass completion 1031 /* completion writes from the raid engine may pass completion
1032 * writes from the legacy engine, so we need one extra null 1032 * writes from the legacy engine, so we need one extra null
1033 * (legacy) descriptor to ensure all completion writes arrive in 1033 * (legacy) descriptor to ensure all completion writes arrive in
1034 * order. 1034 * order.
1035 */ 1035 */
1036 if (likely(num_descs) && 1036 if (likely(num_descs) &&
1037 ioat2_check_space_lock(ioat, num_descs + cb32) == 0) 1037 ioat2_check_space_lock(ioat, num_descs + cb32) == 0)
1038 idx = ioat->head; 1038 idx = ioat->head;
1039 else 1039 else
1040 return NULL; 1040 return NULL;
1041 i = 0; 1041 i = 0;
1042 do { 1042 do {
1043 struct ioat_raw_descriptor *descs[2]; 1043 struct ioat_raw_descriptor *descs[2];
1044 size_t xfer_size = min_t(size_t, len, 1 << ioat->xfercap_log); 1044 size_t xfer_size = min_t(size_t, len, 1 << ioat->xfercap_log);
1045 1045
1046 desc = ioat2_get_ring_ent(ioat, idx + i); 1046 desc = ioat2_get_ring_ent(ioat, idx + i);
1047 pq = desc->pq; 1047 pq = desc->pq;
1048 1048
1049 /* save a branch by unconditionally retrieving the 1049 /* save a branch by unconditionally retrieving the
1050 * extended descriptor pq_set_src() knows to not write 1050 * extended descriptor pq_set_src() knows to not write
1051 * to it in the single descriptor case 1051 * to it in the single descriptor case
1052 */ 1052 */
1053 ext = ioat2_get_ring_ent(ioat, idx + i + with_ext); 1053 ext = ioat2_get_ring_ent(ioat, idx + i + with_ext);
1054 pq_ex = ext->pq_ex; 1054 pq_ex = ext->pq_ex;
1055 1055
1056 descs[0] = (struct ioat_raw_descriptor *) pq; 1056 descs[0] = (struct ioat_raw_descriptor *) pq;
1057 descs[1] = (struct ioat_raw_descriptor *) pq_ex; 1057 descs[1] = (struct ioat_raw_descriptor *) pq_ex;
1058 1058
1059 for (s = 0; s < src_cnt; s++) 1059 for (s = 0; s < src_cnt; s++)
1060 pq_set_src(descs, src[s], offset, scf[s], s); 1060 pq_set_src(descs, src[s], offset, scf[s], s);
1061 1061
1062 /* see the comment for dma_maxpq in include/linux/dmaengine.h */ 1062 /* see the comment for dma_maxpq in include/linux/dmaengine.h */
1063 if (dmaf_p_disabled_continue(flags)) 1063 if (dmaf_p_disabled_continue(flags))
1064 pq_set_src(descs, dst[1], offset, 1, s++); 1064 pq_set_src(descs, dst[1], offset, 1, s++);
1065 else if (dmaf_continue(flags)) { 1065 else if (dmaf_continue(flags)) {
1066 pq_set_src(descs, dst[0], offset, 0, s++); 1066 pq_set_src(descs, dst[0], offset, 0, s++);
1067 pq_set_src(descs, dst[1], offset, 1, s++); 1067 pq_set_src(descs, dst[1], offset, 1, s++);
1068 pq_set_src(descs, dst[1], offset, 0, s++); 1068 pq_set_src(descs, dst[1], offset, 0, s++);
1069 } 1069 }
1070 pq->size = xfer_size; 1070 pq->size = xfer_size;
1071 pq->p_addr = dst[0] + offset; 1071 pq->p_addr = dst[0] + offset;
1072 pq->q_addr = dst[1] + offset; 1072 pq->q_addr = dst[1] + offset;
1073 pq->ctl = 0; 1073 pq->ctl = 0;
1074 pq->ctl_f.op = op; 1074 pq->ctl_f.op = op;
1075 /* we turn on descriptor write back error status */ 1075 /* we turn on descriptor write back error status */
1076 if (device->cap & IOAT_CAP_DWBES) 1076 if (device->cap & IOAT_CAP_DWBES)
1077 pq->ctl_f.wb_en = result ? 1 : 0; 1077 pq->ctl_f.wb_en = result ? 1 : 0;
1078 pq->ctl_f.src_cnt = src_cnt_to_hw(s); 1078 pq->ctl_f.src_cnt = src_cnt_to_hw(s);
1079 pq->ctl_f.p_disable = !!(flags & DMA_PREP_PQ_DISABLE_P); 1079 pq->ctl_f.p_disable = !!(flags & DMA_PREP_PQ_DISABLE_P);
1080 pq->ctl_f.q_disable = !!(flags & DMA_PREP_PQ_DISABLE_Q); 1080 pq->ctl_f.q_disable = !!(flags & DMA_PREP_PQ_DISABLE_Q);
1081 1081
1082 len -= xfer_size; 1082 len -= xfer_size;
1083 offset += xfer_size; 1083 offset += xfer_size;
1084 } while ((i += 1 + with_ext) < num_descs); 1084 } while ((i += 1 + with_ext) < num_descs);
1085 1085
1086 /* last pq descriptor carries the unmap parameters and fence bit */ 1086 /* last pq descriptor carries the unmap parameters and fence bit */
1087 desc->txd.flags = flags; 1087 desc->txd.flags = flags;
1088 desc->len = total_len; 1088 desc->len = total_len;
1089 if (result) 1089 if (result)
1090 desc->result = result; 1090 desc->result = result;
1091 pq->ctl_f.fence = !!(flags & DMA_PREP_FENCE); 1091 pq->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
1092 dump_pq_desc_dbg(ioat, desc, ext); 1092 dump_pq_desc_dbg(ioat, desc, ext);
1093 1093
1094 if (!cb32) { 1094 if (!cb32) {
1095 pq->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT); 1095 pq->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
1096 pq->ctl_f.compl_write = 1; 1096 pq->ctl_f.compl_write = 1;
1097 compl_desc = desc; 1097 compl_desc = desc;
1098 } else { 1098 } else {
1099 /* completion descriptor carries interrupt bit */ 1099 /* completion descriptor carries interrupt bit */
1100 compl_desc = ioat2_get_ring_ent(ioat, idx + i); 1100 compl_desc = ioat2_get_ring_ent(ioat, idx + i);
1101 compl_desc->txd.flags = flags & DMA_PREP_INTERRUPT; 1101 compl_desc->txd.flags = flags & DMA_PREP_INTERRUPT;
1102 hw = compl_desc->hw; 1102 hw = compl_desc->hw;
1103 hw->ctl = 0; 1103 hw->ctl = 0;
1104 hw->ctl_f.null = 1; 1104 hw->ctl_f.null = 1;
1105 hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT); 1105 hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
1106 hw->ctl_f.compl_write = 1; 1106 hw->ctl_f.compl_write = 1;
1107 hw->size = NULL_DESC_BUFFER_SIZE; 1107 hw->size = NULL_DESC_BUFFER_SIZE;
1108 dump_desc_dbg(ioat, compl_desc); 1108 dump_desc_dbg(ioat, compl_desc);
1109 } 1109 }
1110 1110
1111 1111
1112 /* we leave the channel locked to ensure in order submission */ 1112 /* we leave the channel locked to ensure in order submission */
1113 return &compl_desc->txd; 1113 return &compl_desc->txd;
1114 } 1114 }
1115 1115
1116 static struct dma_async_tx_descriptor * 1116 static struct dma_async_tx_descriptor *
1117 __ioat3_prep_pq16_lock(struct dma_chan *c, enum sum_check_flags *result, 1117 __ioat3_prep_pq16_lock(struct dma_chan *c, enum sum_check_flags *result,
1118 const dma_addr_t *dst, const dma_addr_t *src, 1118 const dma_addr_t *dst, const dma_addr_t *src,
1119 unsigned int src_cnt, const unsigned char *scf, 1119 unsigned int src_cnt, const unsigned char *scf,
1120 size_t len, unsigned long flags) 1120 size_t len, unsigned long flags)
1121 { 1121 {
1122 struct ioat2_dma_chan *ioat = to_ioat2_chan(c); 1122 struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
1123 struct ioat_chan_common *chan = &ioat->base; 1123 struct ioat_chan_common *chan = &ioat->base;
1124 struct ioatdma_device *device = chan->device; 1124 struct ioatdma_device *device = chan->device;
1125 struct ioat_ring_ent *desc; 1125 struct ioat_ring_ent *desc;
1126 size_t total_len = len; 1126 size_t total_len = len;
1127 struct ioat_pq_descriptor *pq; 1127 struct ioat_pq_descriptor *pq;
1128 u32 offset = 0; 1128 u32 offset = 0;
1129 u8 op; 1129 u8 op;
1130 int i, s, idx, num_descs; 1130 int i, s, idx, num_descs;
1131 1131
1132 /* this function only handles src_cnt 9 - 16 */ 1132 /* this function only handles src_cnt 9 - 16 */
1133 BUG_ON(src_cnt < 9); 1133 BUG_ON(src_cnt < 9);
1134 1134
1135 /* this function is only called with 9-16 sources */ 1135 /* this function is only called with 9-16 sources */
1136 op = result ? IOAT_OP_PQ_VAL_16S : IOAT_OP_PQ_16S; 1136 op = result ? IOAT_OP_PQ_VAL_16S : IOAT_OP_PQ_16S;
1137 1137
1138 dev_dbg(to_dev(chan), "%s\n", __func__); 1138 dev_dbg(to_dev(chan), "%s\n", __func__);
1139 1139
1140 num_descs = ioat2_xferlen_to_descs(ioat, len); 1140 num_descs = ioat2_xferlen_to_descs(ioat, len);
1141 1141
1142 /* 1142 /*
1143 * 16 source pq is only available on cb3.3 and has no completion 1143 * 16 source pq is only available on cb3.3 and has no completion
1144 * write hw bug. 1144 * write hw bug.
1145 */ 1145 */
1146 if (num_descs && ioat2_check_space_lock(ioat, num_descs) == 0) 1146 if (num_descs && ioat2_check_space_lock(ioat, num_descs) == 0)
1147 idx = ioat->head; 1147 idx = ioat->head;
1148 else 1148 else
1149 return NULL; 1149 return NULL;
1150 1150
1151 i = 0; 1151 i = 0;
1152 1152
1153 do { 1153 do {
1154 struct ioat_raw_descriptor *descs[4]; 1154 struct ioat_raw_descriptor *descs[4];
1155 size_t xfer_size = min_t(size_t, len, 1 << ioat->xfercap_log); 1155 size_t xfer_size = min_t(size_t, len, 1 << ioat->xfercap_log);
1156 1156
1157 desc = ioat2_get_ring_ent(ioat, idx + i); 1157 desc = ioat2_get_ring_ent(ioat, idx + i);
1158 pq = desc->pq; 1158 pq = desc->pq;
1159 1159
1160 descs[0] = (struct ioat_raw_descriptor *) pq; 1160 descs[0] = (struct ioat_raw_descriptor *) pq;
1161 1161
1162 desc->sed = ioat3_alloc_sed(device, 1162 desc->sed = ioat3_alloc_sed(device,
1163 sed_get_pq16_pool_idx(src_cnt)); 1163 sed_get_pq16_pool_idx(src_cnt));
1164 if (!desc->sed) { 1164 if (!desc->sed) {
1165 dev_err(to_dev(chan), 1165 dev_err(to_dev(chan),
1166 "%s: no free sed entries\n", __func__); 1166 "%s: no free sed entries\n", __func__);
1167 return NULL; 1167 return NULL;
1168 } 1168 }
1169 1169
1170 pq->sed_addr = desc->sed->dma; 1170 pq->sed_addr = desc->sed->dma;
1171 desc->sed->parent = desc; 1171 desc->sed->parent = desc;
1172 1172
1173 descs[1] = (struct ioat_raw_descriptor *)desc->sed->hw; 1173 descs[1] = (struct ioat_raw_descriptor *)desc->sed->hw;
1174 descs[2] = (void *)descs[1] + 64; 1174 descs[2] = (void *)descs[1] + 64;
1175 1175
1176 for (s = 0; s < src_cnt; s++) 1176 for (s = 0; s < src_cnt; s++)
1177 pq16_set_src(descs, src[s], offset, scf[s], s); 1177 pq16_set_src(descs, src[s], offset, scf[s], s);
1178 1178
1179 /* see the comment for dma_maxpq in include/linux/dmaengine.h */ 1179 /* see the comment for dma_maxpq in include/linux/dmaengine.h */
1180 if (dmaf_p_disabled_continue(flags)) 1180 if (dmaf_p_disabled_continue(flags))
1181 pq16_set_src(descs, dst[1], offset, 1, s++); 1181 pq16_set_src(descs, dst[1], offset, 1, s++);
1182 else if (dmaf_continue(flags)) { 1182 else if (dmaf_continue(flags)) {
1183 pq16_set_src(descs, dst[0], offset, 0, s++); 1183 pq16_set_src(descs, dst[0], offset, 0, s++);
1184 pq16_set_src(descs, dst[1], offset, 1, s++); 1184 pq16_set_src(descs, dst[1], offset, 1, s++);
1185 pq16_set_src(descs, dst[1], offset, 0, s++); 1185 pq16_set_src(descs, dst[1], offset, 0, s++);
1186 } 1186 }
1187 1187
1188 pq->size = xfer_size; 1188 pq->size = xfer_size;
1189 pq->p_addr = dst[0] + offset; 1189 pq->p_addr = dst[0] + offset;
1190 pq->q_addr = dst[1] + offset; 1190 pq->q_addr = dst[1] + offset;
1191 pq->ctl = 0; 1191 pq->ctl = 0;
1192 pq->ctl_f.op = op; 1192 pq->ctl_f.op = op;
1193 pq->ctl_f.src_cnt = src16_cnt_to_hw(s); 1193 pq->ctl_f.src_cnt = src16_cnt_to_hw(s);
1194 /* we turn on descriptor write back error status */ 1194 /* we turn on descriptor write back error status */
1195 if (device->cap & IOAT_CAP_DWBES) 1195 if (device->cap & IOAT_CAP_DWBES)
1196 pq->ctl_f.wb_en = result ? 1 : 0; 1196 pq->ctl_f.wb_en = result ? 1 : 0;
1197 pq->ctl_f.p_disable = !!(flags & DMA_PREP_PQ_DISABLE_P); 1197 pq->ctl_f.p_disable = !!(flags & DMA_PREP_PQ_DISABLE_P);
1198 pq->ctl_f.q_disable = !!(flags & DMA_PREP_PQ_DISABLE_Q); 1198 pq->ctl_f.q_disable = !!(flags & DMA_PREP_PQ_DISABLE_Q);
1199 1199
1200 len -= xfer_size; 1200 len -= xfer_size;
1201 offset += xfer_size; 1201 offset += xfer_size;
1202 } while (++i < num_descs); 1202 } while (++i < num_descs);
1203 1203
1204 /* last pq descriptor carries the unmap parameters and fence bit */ 1204 /* last pq descriptor carries the unmap parameters and fence bit */
1205 desc->txd.flags = flags; 1205 desc->txd.flags = flags;
1206 desc->len = total_len; 1206 desc->len = total_len;
1207 if (result) 1207 if (result)
1208 desc->result = result; 1208 desc->result = result;
1209 pq->ctl_f.fence = !!(flags & DMA_PREP_FENCE); 1209 pq->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
1210 1210
1211 /* with cb3.3 we should be able to do completion w/o a null desc */ 1211 /* with cb3.3 we should be able to do completion w/o a null desc */
1212 pq->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT); 1212 pq->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
1213 pq->ctl_f.compl_write = 1; 1213 pq->ctl_f.compl_write = 1;
1214 1214
1215 dump_pq16_desc_dbg(ioat, desc); 1215 dump_pq16_desc_dbg(ioat, desc);
1216 1216
1217 /* we leave the channel locked to ensure in order submission */ 1217 /* we leave the channel locked to ensure in order submission */
1218 return &desc->txd; 1218 return &desc->txd;
1219 } 1219 }
1220 1220
1221 static struct dma_async_tx_descriptor * 1221 static struct dma_async_tx_descriptor *
1222 ioat3_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src, 1222 ioat3_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
1223 unsigned int src_cnt, const unsigned char *scf, size_t len, 1223 unsigned int src_cnt, const unsigned char *scf, size_t len,
1224 unsigned long flags) 1224 unsigned long flags)
1225 { 1225 {
1226 struct dma_device *dma = chan->device; 1226 struct dma_device *dma = chan->device;
1227 1227
1228 /* specify valid address for disabled result */ 1228 /* specify valid address for disabled result */
1229 if (flags & DMA_PREP_PQ_DISABLE_P) 1229 if (flags & DMA_PREP_PQ_DISABLE_P)
1230 dst[0] = dst[1]; 1230 dst[0] = dst[1];
1231 if (flags & DMA_PREP_PQ_DISABLE_Q) 1231 if (flags & DMA_PREP_PQ_DISABLE_Q)
1232 dst[1] = dst[0]; 1232 dst[1] = dst[0];
1233 1233
1234 /* handle the single source multiply case from the raid6 1234 /* handle the single source multiply case from the raid6
1235 * recovery path 1235 * recovery path
1236 */ 1236 */
1237 if ((flags & DMA_PREP_PQ_DISABLE_P) && src_cnt == 1) { 1237 if ((flags & DMA_PREP_PQ_DISABLE_P) && src_cnt == 1) {
1238 dma_addr_t single_source[2]; 1238 dma_addr_t single_source[2];
1239 unsigned char single_source_coef[2]; 1239 unsigned char single_source_coef[2];
1240 1240
1241 BUG_ON(flags & DMA_PREP_PQ_DISABLE_Q); 1241 BUG_ON(flags & DMA_PREP_PQ_DISABLE_Q);
1242 single_source[0] = src[0]; 1242 single_source[0] = src[0];
1243 single_source[1] = src[0]; 1243 single_source[1] = src[0];
1244 single_source_coef[0] = scf[0]; 1244 single_source_coef[0] = scf[0];
1245 single_source_coef[1] = 0; 1245 single_source_coef[1] = 0;
1246 1246
1247 return (src_cnt > 8) && (dma->max_pq > 8) ? 1247 return (src_cnt > 8) && (dma->max_pq > 8) ?
1248 __ioat3_prep_pq16_lock(chan, NULL, dst, single_source, 1248 __ioat3_prep_pq16_lock(chan, NULL, dst, single_source,
1249 2, single_source_coef, len, 1249 2, single_source_coef, len,
1250 flags) : 1250 flags) :
1251 __ioat3_prep_pq_lock(chan, NULL, dst, single_source, 2, 1251 __ioat3_prep_pq_lock(chan, NULL, dst, single_source, 2,
1252 single_source_coef, len, flags); 1252 single_source_coef, len, flags);
1253 1253
1254 } else { 1254 } else {
1255 return (src_cnt > 8) && (dma->max_pq > 8) ? 1255 return (src_cnt > 8) && (dma->max_pq > 8) ?
1256 __ioat3_prep_pq16_lock(chan, NULL, dst, src, src_cnt, 1256 __ioat3_prep_pq16_lock(chan, NULL, dst, src, src_cnt,
1257 scf, len, flags) : 1257 scf, len, flags) :
1258 __ioat3_prep_pq_lock(chan, NULL, dst, src, src_cnt, 1258 __ioat3_prep_pq_lock(chan, NULL, dst, src, src_cnt,
1259 scf, len, flags); 1259 scf, len, flags);
1260 } 1260 }
1261 } 1261 }
1262 1262
1263 struct dma_async_tx_descriptor * 1263 struct dma_async_tx_descriptor *
1264 ioat3_prep_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src, 1264 ioat3_prep_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
1265 unsigned int src_cnt, const unsigned char *scf, size_t len, 1265 unsigned int src_cnt, const unsigned char *scf, size_t len,
1266 enum sum_check_flags *pqres, unsigned long flags) 1266 enum sum_check_flags *pqres, unsigned long flags)
1267 { 1267 {
1268 struct dma_device *dma = chan->device; 1268 struct dma_device *dma = chan->device;
1269 1269
1270 /* specify valid address for disabled result */ 1270 /* specify valid address for disabled result */
1271 if (flags & DMA_PREP_PQ_DISABLE_P) 1271 if (flags & DMA_PREP_PQ_DISABLE_P)
1272 pq[0] = pq[1]; 1272 pq[0] = pq[1];
1273 if (flags & DMA_PREP_PQ_DISABLE_Q) 1273 if (flags & DMA_PREP_PQ_DISABLE_Q)
1274 pq[1] = pq[0]; 1274 pq[1] = pq[0];
1275 1275
1276 /* the cleanup routine only sets bits on validate failure, it 1276 /* the cleanup routine only sets bits on validate failure, it
1277 * does not clear bits on validate success... so clear it here 1277 * does not clear bits on validate success... so clear it here
1278 */ 1278 */
1279 *pqres = 0; 1279 *pqres = 0;
1280 1280
1281 return (src_cnt > 8) && (dma->max_pq > 8) ? 1281 return (src_cnt > 8) && (dma->max_pq > 8) ?
1282 __ioat3_prep_pq16_lock(chan, pqres, pq, src, src_cnt, scf, len, 1282 __ioat3_prep_pq16_lock(chan, pqres, pq, src, src_cnt, scf, len,
1283 flags) : 1283 flags) :
1284 __ioat3_prep_pq_lock(chan, pqres, pq, src, src_cnt, scf, len, 1284 __ioat3_prep_pq_lock(chan, pqres, pq, src, src_cnt, scf, len,
1285 flags); 1285 flags);
1286 } 1286 }
1287 1287
1288 static struct dma_async_tx_descriptor * 1288 static struct dma_async_tx_descriptor *
1289 ioat3_prep_pqxor(struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src, 1289 ioat3_prep_pqxor(struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src,
1290 unsigned int src_cnt, size_t len, unsigned long flags) 1290 unsigned int src_cnt, size_t len, unsigned long flags)
1291 { 1291 {
1292 struct dma_device *dma = chan->device; 1292 struct dma_device *dma = chan->device;
1293 unsigned char scf[src_cnt]; 1293 unsigned char scf[src_cnt];
1294 dma_addr_t pq[2]; 1294 dma_addr_t pq[2];
1295 1295
1296 memset(scf, 0, src_cnt); 1296 memset(scf, 0, src_cnt);
1297 pq[0] = dst; 1297 pq[0] = dst;
1298 flags |= DMA_PREP_PQ_DISABLE_Q; 1298 flags |= DMA_PREP_PQ_DISABLE_Q;
1299 pq[1] = dst; /* specify valid address for disabled result */ 1299 pq[1] = dst; /* specify valid address for disabled result */
1300 1300
1301 return (src_cnt > 8) && (dma->max_pq > 8) ? 1301 return (src_cnt > 8) && (dma->max_pq > 8) ?
1302 __ioat3_prep_pq16_lock(chan, NULL, pq, src, src_cnt, scf, len, 1302 __ioat3_prep_pq16_lock(chan, NULL, pq, src, src_cnt, scf, len,
1303 flags) : 1303 flags) :
1304 __ioat3_prep_pq_lock(chan, NULL, pq, src, src_cnt, scf, len, 1304 __ioat3_prep_pq_lock(chan, NULL, pq, src, src_cnt, scf, len,
1305 flags); 1305 flags);
1306 } 1306 }
1307 1307
1308 struct dma_async_tx_descriptor * 1308 struct dma_async_tx_descriptor *
1309 ioat3_prep_pqxor_val(struct dma_chan *chan, dma_addr_t *src, 1309 ioat3_prep_pqxor_val(struct dma_chan *chan, dma_addr_t *src,
1310 unsigned int src_cnt, size_t len, 1310 unsigned int src_cnt, size_t len,
1311 enum sum_check_flags *result, unsigned long flags) 1311 enum sum_check_flags *result, unsigned long flags)
1312 { 1312 {
1313 struct dma_device *dma = chan->device; 1313 struct dma_device *dma = chan->device;
1314 unsigned char scf[src_cnt]; 1314 unsigned char scf[src_cnt];
1315 dma_addr_t pq[2]; 1315 dma_addr_t pq[2];
1316 1316
1317 /* the cleanup routine only sets bits on validate failure, it 1317 /* the cleanup routine only sets bits on validate failure, it
1318 * does not clear bits on validate success... so clear it here 1318 * does not clear bits on validate success... so clear it here
1319 */ 1319 */
1320 *result = 0; 1320 *result = 0;
1321 1321
1322 memset(scf, 0, src_cnt); 1322 memset(scf, 0, src_cnt);
1323 pq[0] = src[0]; 1323 pq[0] = src[0];
1324 flags |= DMA_PREP_PQ_DISABLE_Q; 1324 flags |= DMA_PREP_PQ_DISABLE_Q;
1325 pq[1] = pq[0]; /* specify valid address for disabled result */ 1325 pq[1] = pq[0]; /* specify valid address for disabled result */
1326 1326
1327 1327
1328 return (src_cnt > 8) && (dma->max_pq > 8) ? 1328 return (src_cnt > 8) && (dma->max_pq > 8) ?
1329 __ioat3_prep_pq16_lock(chan, result, pq, &src[1], src_cnt - 1, 1329 __ioat3_prep_pq16_lock(chan, result, pq, &src[1], src_cnt - 1,
1330 scf, len, flags) : 1330 scf, len, flags) :
1331 __ioat3_prep_pq_lock(chan, result, pq, &src[1], src_cnt - 1, 1331 __ioat3_prep_pq_lock(chan, result, pq, &src[1], src_cnt - 1,
1332 scf, len, flags); 1332 scf, len, flags);
1333 } 1333 }
1334 1334
1335 static struct dma_async_tx_descriptor * 1335 static struct dma_async_tx_descriptor *
1336 ioat3_prep_interrupt_lock(struct dma_chan *c, unsigned long flags) 1336 ioat3_prep_interrupt_lock(struct dma_chan *c, unsigned long flags)
1337 { 1337 {
1338 struct ioat2_dma_chan *ioat = to_ioat2_chan(c); 1338 struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
1339 struct ioat_ring_ent *desc; 1339 struct ioat_ring_ent *desc;
1340 struct ioat_dma_descriptor *hw; 1340 struct ioat_dma_descriptor *hw;
1341 1341
1342 if (ioat2_check_space_lock(ioat, 1) == 0) 1342 if (ioat2_check_space_lock(ioat, 1) == 0)
1343 desc = ioat2_get_ring_ent(ioat, ioat->head); 1343 desc = ioat2_get_ring_ent(ioat, ioat->head);
1344 else 1344 else
1345 return NULL; 1345 return NULL;
1346 1346
1347 hw = desc->hw; 1347 hw = desc->hw;
1348 hw->ctl = 0; 1348 hw->ctl = 0;
1349 hw->ctl_f.null = 1; 1349 hw->ctl_f.null = 1;
1350 hw->ctl_f.int_en = 1; 1350 hw->ctl_f.int_en = 1;
1351 hw->ctl_f.fence = !!(flags & DMA_PREP_FENCE); 1351 hw->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
1352 hw->ctl_f.compl_write = 1; 1352 hw->ctl_f.compl_write = 1;
1353 hw->size = NULL_DESC_BUFFER_SIZE; 1353 hw->size = NULL_DESC_BUFFER_SIZE;
1354 hw->src_addr = 0; 1354 hw->src_addr = 0;
1355 hw->dst_addr = 0; 1355 hw->dst_addr = 0;
1356 1356
1357 desc->txd.flags = flags; 1357 desc->txd.flags = flags;
1358 desc->len = 1; 1358 desc->len = 1;
1359 1359
1360 dump_desc_dbg(ioat, desc); 1360 dump_desc_dbg(ioat, desc);
1361 1361
1362 /* we leave the channel locked to ensure in order submission */ 1362 /* we leave the channel locked to ensure in order submission */
1363 return &desc->txd; 1363 return &desc->txd;
1364 } 1364 }
1365 1365
1366 static void ioat3_dma_test_callback(void *dma_async_param) 1366 static void ioat3_dma_test_callback(void *dma_async_param)
1367 { 1367 {
1368 struct completion *cmp = dma_async_param; 1368 struct completion *cmp = dma_async_param;
1369 1369
1370 complete(cmp); 1370 complete(cmp);
1371 } 1371 }
1372 1372
1373 #define IOAT_NUM_SRC_TEST 6 /* must be <= 8 */ 1373 #define IOAT_NUM_SRC_TEST 6 /* must be <= 8 */
1374 static int ioat_xor_val_self_test(struct ioatdma_device *device) 1374 static int ioat_xor_val_self_test(struct ioatdma_device *device)
1375 { 1375 {
1376 int i, src_idx; 1376 int i, src_idx;
1377 struct page *dest; 1377 struct page *dest;
1378 struct page *xor_srcs[IOAT_NUM_SRC_TEST]; 1378 struct page *xor_srcs[IOAT_NUM_SRC_TEST];
1379 struct page *xor_val_srcs[IOAT_NUM_SRC_TEST + 1]; 1379 struct page *xor_val_srcs[IOAT_NUM_SRC_TEST + 1];
1380 dma_addr_t dma_srcs[IOAT_NUM_SRC_TEST + 1]; 1380 dma_addr_t dma_srcs[IOAT_NUM_SRC_TEST + 1];
1381 dma_addr_t dest_dma; 1381 dma_addr_t dest_dma;
1382 struct dma_async_tx_descriptor *tx; 1382 struct dma_async_tx_descriptor *tx;
1383 struct dma_chan *dma_chan; 1383 struct dma_chan *dma_chan;
1384 dma_cookie_t cookie; 1384 dma_cookie_t cookie;
1385 u8 cmp_byte = 0; 1385 u8 cmp_byte = 0;
1386 u32 cmp_word; 1386 u32 cmp_word;
1387 u32 xor_val_result; 1387 u32 xor_val_result;
1388 int err = 0; 1388 int err = 0;
1389 struct completion cmp; 1389 struct completion cmp;
1390 unsigned long tmo; 1390 unsigned long tmo;
1391 struct device *dev = &device->pdev->dev; 1391 struct device *dev = &device->pdev->dev;
1392 struct dma_device *dma = &device->common; 1392 struct dma_device *dma = &device->common;
1393 u8 op = 0; 1393 u8 op = 0;
1394 1394
1395 dev_dbg(dev, "%s\n", __func__); 1395 dev_dbg(dev, "%s\n", __func__);
1396 1396
1397 if (!dma_has_cap(DMA_XOR, dma->cap_mask)) 1397 if (!dma_has_cap(DMA_XOR, dma->cap_mask))
1398 return 0; 1398 return 0;
1399 1399
1400 for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++) { 1400 for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++) {
1401 xor_srcs[src_idx] = alloc_page(GFP_KERNEL); 1401 xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
1402 if (!xor_srcs[src_idx]) { 1402 if (!xor_srcs[src_idx]) {
1403 while (src_idx--) 1403 while (src_idx--)
1404 __free_page(xor_srcs[src_idx]); 1404 __free_page(xor_srcs[src_idx]);
1405 return -ENOMEM; 1405 return -ENOMEM;
1406 } 1406 }
1407 } 1407 }
1408 1408
1409 dest = alloc_page(GFP_KERNEL); 1409 dest = alloc_page(GFP_KERNEL);
1410 if (!dest) { 1410 if (!dest) {
1411 while (src_idx--) 1411 while (src_idx--)
1412 __free_page(xor_srcs[src_idx]); 1412 __free_page(xor_srcs[src_idx]);
1413 return -ENOMEM; 1413 return -ENOMEM;
1414 } 1414 }
1415 1415
1416 /* Fill in src buffers */ 1416 /* Fill in src buffers */
1417 for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++) { 1417 for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++) {
1418 u8 *ptr = page_address(xor_srcs[src_idx]); 1418 u8 *ptr = page_address(xor_srcs[src_idx]);
1419 for (i = 0; i < PAGE_SIZE; i++) 1419 for (i = 0; i < PAGE_SIZE; i++)
1420 ptr[i] = (1 << src_idx); 1420 ptr[i] = (1 << src_idx);
1421 } 1421 }
1422 1422
1423 for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++) 1423 for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++)
1424 cmp_byte ^= (u8) (1 << src_idx); 1424 cmp_byte ^= (u8) (1 << src_idx);
1425 1425
1426 cmp_word = (cmp_byte << 24) | (cmp_byte << 16) | 1426 cmp_word = (cmp_byte << 24) | (cmp_byte << 16) |
1427 (cmp_byte << 8) | cmp_byte; 1427 (cmp_byte << 8) | cmp_byte;
1428 1428
1429 memset(page_address(dest), 0, PAGE_SIZE); 1429 memset(page_address(dest), 0, PAGE_SIZE);
1430 1430
1431 dma_chan = container_of(dma->channels.next, struct dma_chan, 1431 dma_chan = container_of(dma->channels.next, struct dma_chan,
1432 device_node); 1432 device_node);
1433 if (dma->device_alloc_chan_resources(dma_chan) < 1) { 1433 if (dma->device_alloc_chan_resources(dma_chan) < 1) {
1434 err = -ENODEV; 1434 err = -ENODEV;
1435 goto out; 1435 goto out;
1436 } 1436 }
1437 1437
1438 /* test xor */ 1438 /* test xor */
1439 op = IOAT_OP_XOR; 1439 op = IOAT_OP_XOR;
1440 1440
1441 dest_dma = dma_map_page(dev, dest, 0, PAGE_SIZE, DMA_FROM_DEVICE); 1441 dest_dma = dma_map_page(dev, dest, 0, PAGE_SIZE, DMA_FROM_DEVICE);
1442 for (i = 0; i < IOAT_NUM_SRC_TEST; i++) 1442 for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
1443 dma_srcs[i] = dma_map_page(dev, xor_srcs[i], 0, PAGE_SIZE, 1443 dma_srcs[i] = dma_map_page(dev, xor_srcs[i], 0, PAGE_SIZE,
1444 DMA_TO_DEVICE); 1444 DMA_TO_DEVICE);
1445 tx = dma->device_prep_dma_xor(dma_chan, dest_dma, dma_srcs, 1445 tx = dma->device_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
1446 IOAT_NUM_SRC_TEST, PAGE_SIZE, 1446 IOAT_NUM_SRC_TEST, PAGE_SIZE,
1447 DMA_PREP_INTERRUPT | 1447 DMA_PREP_INTERRUPT |
1448 DMA_COMPL_SKIP_SRC_UNMAP | 1448 DMA_COMPL_SKIP_SRC_UNMAP |
1449 DMA_COMPL_SKIP_DEST_UNMAP); 1449 DMA_COMPL_SKIP_DEST_UNMAP);
1450 1450
1451 if (!tx) { 1451 if (!tx) {
1452 dev_err(dev, "Self-test xor prep failed\n"); 1452 dev_err(dev, "Self-test xor prep failed\n");
1453 err = -ENODEV; 1453 err = -ENODEV;
1454 goto dma_unmap; 1454 goto dma_unmap;
1455 } 1455 }
1456 1456
1457 async_tx_ack(tx); 1457 async_tx_ack(tx);
1458 init_completion(&cmp); 1458 init_completion(&cmp);
1459 tx->callback = ioat3_dma_test_callback; 1459 tx->callback = ioat3_dma_test_callback;
1460 tx->callback_param = &cmp; 1460 tx->callback_param = &cmp;
1461 cookie = tx->tx_submit(tx); 1461 cookie = tx->tx_submit(tx);
1462 if (cookie < 0) { 1462 if (cookie < 0) {
1463 dev_err(dev, "Self-test xor setup failed\n"); 1463 dev_err(dev, "Self-test xor setup failed\n");
1464 err = -ENODEV; 1464 err = -ENODEV;
1465 goto dma_unmap; 1465 goto dma_unmap;
1466 } 1466 }
1467 dma->device_issue_pending(dma_chan); 1467 dma->device_issue_pending(dma_chan);
1468 1468
1469 tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); 1469 tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
1470 1470
1471 if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) { 1471 if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
1472 dev_err(dev, "Self-test xor timed out\n"); 1472 dev_err(dev, "Self-test xor timed out\n");
1473 err = -ENODEV; 1473 err = -ENODEV;
1474 goto dma_unmap; 1474 goto dma_unmap;
1475 } 1475 }
1476 1476
1477 dma_unmap_page(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE); 1477 dma_unmap_page(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);
1478 for (i = 0; i < IOAT_NUM_SRC_TEST; i++) 1478 for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
1479 dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE); 1479 dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE);
1480 1480
1481 dma_sync_single_for_cpu(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE); 1481 dma_sync_single_for_cpu(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);
1482 for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) { 1482 for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) {
1483 u32 *ptr = page_address(dest); 1483 u32 *ptr = page_address(dest);
1484 if (ptr[i] != cmp_word) { 1484 if (ptr[i] != cmp_word) {
1485 dev_err(dev, "Self-test xor failed compare\n"); 1485 dev_err(dev, "Self-test xor failed compare\n");
1486 err = -ENODEV; 1486 err = -ENODEV;
1487 goto free_resources; 1487 goto free_resources;
1488 } 1488 }
1489 } 1489 }
1490 dma_sync_single_for_device(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE); 1490 dma_sync_single_for_device(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);
1491 1491
1492 /* skip validate if the capability is not present */ 1492 /* skip validate if the capability is not present */
1493 if (!dma_has_cap(DMA_XOR_VAL, dma_chan->device->cap_mask)) 1493 if (!dma_has_cap(DMA_XOR_VAL, dma_chan->device->cap_mask))
1494 goto free_resources; 1494 goto free_resources;
1495 1495
1496 op = IOAT_OP_XOR_VAL; 1496 op = IOAT_OP_XOR_VAL;
1497 1497
1498 /* validate the sources with the destintation page */ 1498 /* validate the sources with the destintation page */
1499 for (i = 0; i < IOAT_NUM_SRC_TEST; i++) 1499 for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
1500 xor_val_srcs[i] = xor_srcs[i]; 1500 xor_val_srcs[i] = xor_srcs[i];
1501 xor_val_srcs[i] = dest; 1501 xor_val_srcs[i] = dest;
1502 1502
1503 xor_val_result = 1; 1503 xor_val_result = 1;
1504 1504
1505 for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) 1505 for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
1506 dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE, 1506 dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE,
1507 DMA_TO_DEVICE); 1507 DMA_TO_DEVICE);
1508 tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs, 1508 tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs,
1509 IOAT_NUM_SRC_TEST + 1, PAGE_SIZE, 1509 IOAT_NUM_SRC_TEST + 1, PAGE_SIZE,
1510 &xor_val_result, DMA_PREP_INTERRUPT | 1510 &xor_val_result, DMA_PREP_INTERRUPT |
1511 DMA_COMPL_SKIP_SRC_UNMAP | 1511 DMA_COMPL_SKIP_SRC_UNMAP |
1512 DMA_COMPL_SKIP_DEST_UNMAP); 1512 DMA_COMPL_SKIP_DEST_UNMAP);
1513 if (!tx) { 1513 if (!tx) {
1514 dev_err(dev, "Self-test zero prep failed\n"); 1514 dev_err(dev, "Self-test zero prep failed\n");
1515 err = -ENODEV; 1515 err = -ENODEV;
1516 goto dma_unmap; 1516 goto dma_unmap;
1517 } 1517 }
1518 1518
1519 async_tx_ack(tx); 1519 async_tx_ack(tx);
1520 init_completion(&cmp); 1520 init_completion(&cmp);
1521 tx->callback = ioat3_dma_test_callback; 1521 tx->callback = ioat3_dma_test_callback;
1522 tx->callback_param = &cmp; 1522 tx->callback_param = &cmp;
1523 cookie = tx->tx_submit(tx); 1523 cookie = tx->tx_submit(tx);
1524 if (cookie < 0) { 1524 if (cookie < 0) {
1525 dev_err(dev, "Self-test zero setup failed\n"); 1525 dev_err(dev, "Self-test zero setup failed\n");
1526 err = -ENODEV; 1526 err = -ENODEV;
1527 goto dma_unmap; 1527 goto dma_unmap;
1528 } 1528 }
1529 dma->device_issue_pending(dma_chan); 1529 dma->device_issue_pending(dma_chan);
1530 1530
1531 tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); 1531 tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
1532 1532
1533 if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) { 1533 if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
1534 dev_err(dev, "Self-test validate timed out\n"); 1534 dev_err(dev, "Self-test validate timed out\n");
1535 err = -ENODEV; 1535 err = -ENODEV;
1536 goto dma_unmap; 1536 goto dma_unmap;
1537 } 1537 }
1538 1538
1539 for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) 1539 for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
1540 dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE); 1540 dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE);
1541 1541
1542 if (xor_val_result != 0) { 1542 if (xor_val_result != 0) {
1543 dev_err(dev, "Self-test validate failed compare\n"); 1543 dev_err(dev, "Self-test validate failed compare\n");
1544 err = -ENODEV; 1544 err = -ENODEV;
1545 goto free_resources; 1545 goto free_resources;
1546 } 1546 }
1547 1547
1548 /* test for non-zero parity sum */ 1548 /* test for non-zero parity sum */
1549 op = IOAT_OP_XOR_VAL; 1549 op = IOAT_OP_XOR_VAL;
1550 1550
1551 xor_val_result = 0; 1551 xor_val_result = 0;
1552 for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) 1552 for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
1553 dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE, 1553 dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE,
1554 DMA_TO_DEVICE); 1554 DMA_TO_DEVICE);
1555 tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs, 1555 tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs,
1556 IOAT_NUM_SRC_TEST + 1, PAGE_SIZE, 1556 IOAT_NUM_SRC_TEST + 1, PAGE_SIZE,
1557 &xor_val_result, DMA_PREP_INTERRUPT | 1557 &xor_val_result, DMA_PREP_INTERRUPT |
1558 DMA_COMPL_SKIP_SRC_UNMAP | 1558 DMA_COMPL_SKIP_SRC_UNMAP |
1559 DMA_COMPL_SKIP_DEST_UNMAP); 1559 DMA_COMPL_SKIP_DEST_UNMAP);
1560 if (!tx) { 1560 if (!tx) {
1561 dev_err(dev, "Self-test 2nd zero prep failed\n"); 1561 dev_err(dev, "Self-test 2nd zero prep failed\n");
1562 err = -ENODEV; 1562 err = -ENODEV;
1563 goto dma_unmap; 1563 goto dma_unmap;
1564 } 1564 }
1565 1565
1566 async_tx_ack(tx); 1566 async_tx_ack(tx);
1567 init_completion(&cmp); 1567 init_completion(&cmp);
1568 tx->callback = ioat3_dma_test_callback; 1568 tx->callback = ioat3_dma_test_callback;
1569 tx->callback_param = &cmp; 1569 tx->callback_param = &cmp;
1570 cookie = tx->tx_submit(tx); 1570 cookie = tx->tx_submit(tx);
1571 if (cookie < 0) { 1571 if (cookie < 0) {
1572 dev_err(dev, "Self-test 2nd zero setup failed\n"); 1572 dev_err(dev, "Self-test 2nd zero setup failed\n");
1573 err = -ENODEV; 1573 err = -ENODEV;
1574 goto dma_unmap; 1574 goto dma_unmap;
1575 } 1575 }
1576 dma->device_issue_pending(dma_chan); 1576 dma->device_issue_pending(dma_chan);
1577 1577
1578 tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); 1578 tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
1579 1579
1580 if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) { 1580 if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
1581 dev_err(dev, "Self-test 2nd validate timed out\n"); 1581 dev_err(dev, "Self-test 2nd validate timed out\n");
1582 err = -ENODEV; 1582 err = -ENODEV;
1583 goto dma_unmap; 1583 goto dma_unmap;
1584 } 1584 }
1585 1585
1586 if (xor_val_result != SUM_CHECK_P_RESULT) { 1586 if (xor_val_result != SUM_CHECK_P_RESULT) {
1587 dev_err(dev, "Self-test validate failed compare\n"); 1587 dev_err(dev, "Self-test validate failed compare\n");
1588 err = -ENODEV; 1588 err = -ENODEV;
1589 goto dma_unmap; 1589 goto dma_unmap;
1590 } 1590 }
1591 1591
1592 for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) 1592 for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
1593 dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE); 1593 dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE);
1594 1594
1595 goto free_resources; 1595 goto free_resources;
1596 dma_unmap: 1596 dma_unmap:
1597 if (op == IOAT_OP_XOR) { 1597 if (op == IOAT_OP_XOR) {
1598 dma_unmap_page(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE); 1598 dma_unmap_page(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);
1599 for (i = 0; i < IOAT_NUM_SRC_TEST; i++) 1599 for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
1600 dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, 1600 dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE,
1601 DMA_TO_DEVICE); 1601 DMA_TO_DEVICE);
1602 } else if (op == IOAT_OP_XOR_VAL) { 1602 } else if (op == IOAT_OP_XOR_VAL) {
1603 for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) 1603 for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
1604 dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, 1604 dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE,
1605 DMA_TO_DEVICE); 1605 DMA_TO_DEVICE);
1606 } 1606 }
1607 free_resources: 1607 free_resources:
1608 dma->device_free_chan_resources(dma_chan); 1608 dma->device_free_chan_resources(dma_chan);
1609 out: 1609 out:
1610 src_idx = IOAT_NUM_SRC_TEST; 1610 src_idx = IOAT_NUM_SRC_TEST;
1611 while (src_idx--) 1611 while (src_idx--)
1612 __free_page(xor_srcs[src_idx]); 1612 __free_page(xor_srcs[src_idx]);
1613 __free_page(dest); 1613 __free_page(dest);
1614 return err; 1614 return err;
1615 } 1615 }
1616 1616
1617 static int ioat3_dma_self_test(struct ioatdma_device *device) 1617 static int ioat3_dma_self_test(struct ioatdma_device *device)
1618 { 1618 {
1619 int rc = ioat_dma_self_test(device); 1619 int rc = ioat_dma_self_test(device);
1620 1620
1621 if (rc) 1621 if (rc)
1622 return rc; 1622 return rc;
1623 1623
1624 rc = ioat_xor_val_self_test(device); 1624 rc = ioat_xor_val_self_test(device);
1625 if (rc) 1625 if (rc)
1626 return rc; 1626 return rc;
1627 1627
1628 return 0; 1628 return 0;
1629 } 1629 }
1630 1630
1631 static int ioat3_irq_reinit(struct ioatdma_device *device) 1631 static int ioat3_irq_reinit(struct ioatdma_device *device)
1632 { 1632 {
1633 int msixcnt = device->common.chancnt; 1633 int msixcnt = device->common.chancnt;
1634 struct pci_dev *pdev = device->pdev; 1634 struct pci_dev *pdev = device->pdev;
1635 int i; 1635 int i;
1636 struct msix_entry *msix; 1636 struct msix_entry *msix;
1637 struct ioat_chan_common *chan; 1637 struct ioat_chan_common *chan;
1638 int err = 0; 1638 int err = 0;
1639 1639
1640 switch (device->irq_mode) { 1640 switch (device->irq_mode) {
1641 case IOAT_MSIX: 1641 case IOAT_MSIX:
1642 1642
1643 for (i = 0; i < msixcnt; i++) { 1643 for (i = 0; i < msixcnt; i++) {
1644 msix = &device->msix_entries[i]; 1644 msix = &device->msix_entries[i];
1645 chan = ioat_chan_by_index(device, i); 1645 chan = ioat_chan_by_index(device, i);
1646 devm_free_irq(&pdev->dev, msix->vector, chan); 1646 devm_free_irq(&pdev->dev, msix->vector, chan);
1647 } 1647 }
1648 1648
1649 pci_disable_msix(pdev); 1649 pci_disable_msix(pdev);
1650 break; 1650 break;
1651 1651
1652 case IOAT_MSIX_SINGLE: 1652 case IOAT_MSIX_SINGLE:
1653 msix = &device->msix_entries[0]; 1653 msix = &device->msix_entries[0];
1654 chan = ioat_chan_by_index(device, 0); 1654 chan = ioat_chan_by_index(device, 0);
1655 devm_free_irq(&pdev->dev, msix->vector, chan); 1655 devm_free_irq(&pdev->dev, msix->vector, chan);
1656 pci_disable_msix(pdev); 1656 pci_disable_msix(pdev);
1657 break; 1657 break;
1658 1658
1659 case IOAT_MSI: 1659 case IOAT_MSI:
1660 chan = ioat_chan_by_index(device, 0); 1660 chan = ioat_chan_by_index(device, 0);
1661 devm_free_irq(&pdev->dev, pdev->irq, chan); 1661 devm_free_irq(&pdev->dev, pdev->irq, chan);
1662 pci_disable_msi(pdev); 1662 pci_disable_msi(pdev);
1663 break; 1663 break;
1664 1664
1665 case IOAT_INTX: 1665 case IOAT_INTX:
1666 chan = ioat_chan_by_index(device, 0); 1666 chan = ioat_chan_by_index(device, 0);
1667 devm_free_irq(&pdev->dev, pdev->irq, chan); 1667 devm_free_irq(&pdev->dev, pdev->irq, chan);
1668 break; 1668 break;
1669 1669
1670 default: 1670 default:
1671 return 0; 1671 return 0;
1672 } 1672 }
1673 1673
1674 device->irq_mode = IOAT_NOIRQ; 1674 device->irq_mode = IOAT_NOIRQ;
1675 1675
1676 err = ioat_dma_setup_interrupts(device); 1676 err = ioat_dma_setup_interrupts(device);
1677 1677
1678 return err; 1678 return err;
1679 } 1679 }
1680 1680
1681 static int ioat3_reset_hw(struct ioat_chan_common *chan) 1681 static int ioat3_reset_hw(struct ioat_chan_common *chan)
1682 { 1682 {
1683 /* throw away whatever the channel was doing and get it 1683 /* throw away whatever the channel was doing and get it
1684 * initialized, with ioat3 specific workarounds 1684 * initialized, with ioat3 specific workarounds
1685 */ 1685 */
1686 struct ioatdma_device *device = chan->device; 1686 struct ioatdma_device *device = chan->device;
1687 struct pci_dev *pdev = device->pdev; 1687 struct pci_dev *pdev = device->pdev;
1688 u32 chanerr; 1688 u32 chanerr;
1689 u16 dev_id; 1689 u16 dev_id;
1690 int err; 1690 int err;
1691 1691
1692 ioat2_quiesce(chan, msecs_to_jiffies(100)); 1692 ioat2_quiesce(chan, msecs_to_jiffies(100));
1693 1693
1694 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); 1694 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
1695 writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET); 1695 writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET);
1696 1696
1697 if (device->version < IOAT_VER_3_3) { 1697 if (device->version < IOAT_VER_3_3) {
1698 /* clear any pending errors */ 1698 /* clear any pending errors */
1699 err = pci_read_config_dword(pdev, 1699 err = pci_read_config_dword(pdev,
1700 IOAT_PCI_CHANERR_INT_OFFSET, &chanerr); 1700 IOAT_PCI_CHANERR_INT_OFFSET, &chanerr);
1701 if (err) { 1701 if (err) {
1702 dev_err(&pdev->dev, 1702 dev_err(&pdev->dev,
1703 "channel error register unreachable\n"); 1703 "channel error register unreachable\n");
1704 return err; 1704 return err;
1705 } 1705 }
1706 pci_write_config_dword(pdev, 1706 pci_write_config_dword(pdev,
1707 IOAT_PCI_CHANERR_INT_OFFSET, chanerr); 1707 IOAT_PCI_CHANERR_INT_OFFSET, chanerr);
1708 1708
1709 /* Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit 1709 /* Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit
1710 * (workaround for spurious config parity error after restart) 1710 * (workaround for spurious config parity error after restart)
1711 */ 1711 */
1712 pci_read_config_word(pdev, IOAT_PCI_DEVICE_ID_OFFSET, &dev_id); 1712 pci_read_config_word(pdev, IOAT_PCI_DEVICE_ID_OFFSET, &dev_id);
1713 if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0) { 1713 if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0) {
1714 pci_write_config_dword(pdev, 1714 pci_write_config_dword(pdev,
1715 IOAT_PCI_DMAUNCERRSTS_OFFSET, 1715 IOAT_PCI_DMAUNCERRSTS_OFFSET,
1716 0x10); 1716 0x10);
1717 } 1717 }
1718 } 1718 }
1719 1719
1720 err = ioat2_reset_sync(chan, msecs_to_jiffies(200)); 1720 err = ioat2_reset_sync(chan, msecs_to_jiffies(200));
1721 if (err) { 1721 if (err) {
1722 dev_err(&pdev->dev, "Failed to reset!\n"); 1722 dev_err(&pdev->dev, "Failed to reset!\n");
1723 return err; 1723 return err;
1724 } 1724 }
1725 1725
1726 if (device->irq_mode != IOAT_NOIRQ && is_bwd_ioat(pdev)) 1726 if (device->irq_mode != IOAT_NOIRQ && is_bwd_ioat(pdev))
1727 err = ioat3_irq_reinit(device); 1727 err = ioat3_irq_reinit(device);
1728 1728
1729 return err; 1729 return err;
1730 } 1730 }
1731 1731
1732 static void ioat3_intr_quirk(struct ioatdma_device *device) 1732 static void ioat3_intr_quirk(struct ioatdma_device *device)
1733 { 1733 {
1734 struct dma_device *dma; 1734 struct dma_device *dma;
1735 struct dma_chan *c; 1735 struct dma_chan *c;
1736 struct ioat_chan_common *chan; 1736 struct ioat_chan_common *chan;
1737 u32 errmask; 1737 u32 errmask;
1738 1738
1739 dma = &device->common; 1739 dma = &device->common;
1740 1740
1741 /* 1741 /*
1742 * if we have descriptor write back error status, we mask the 1742 * if we have descriptor write back error status, we mask the
1743 * error interrupts 1743 * error interrupts
1744 */ 1744 */
1745 if (device->cap & IOAT_CAP_DWBES) { 1745 if (device->cap & IOAT_CAP_DWBES) {
1746 list_for_each_entry(c, &dma->channels, device_node) { 1746 list_for_each_entry(c, &dma->channels, device_node) {
1747 chan = to_chan_common(c); 1747 chan = to_chan_common(c);
1748 errmask = readl(chan->reg_base + 1748 errmask = readl(chan->reg_base +
1749 IOAT_CHANERR_MASK_OFFSET); 1749 IOAT_CHANERR_MASK_OFFSET);
1750 errmask |= IOAT_CHANERR_XOR_P_OR_CRC_ERR | 1750 errmask |= IOAT_CHANERR_XOR_P_OR_CRC_ERR |
1751 IOAT_CHANERR_XOR_Q_ERR; 1751 IOAT_CHANERR_XOR_Q_ERR;
1752 writel(errmask, chan->reg_base + 1752 writel(errmask, chan->reg_base +
1753 IOAT_CHANERR_MASK_OFFSET); 1753 IOAT_CHANERR_MASK_OFFSET);
1754 } 1754 }
1755 } 1755 }
1756 } 1756 }
1757 1757
1758 int ioat3_dma_probe(struct ioatdma_device *device, int dca) 1758 int ioat3_dma_probe(struct ioatdma_device *device, int dca)
1759 { 1759 {
1760 struct pci_dev *pdev = device->pdev; 1760 struct pci_dev *pdev = device->pdev;
1761 int dca_en = system_has_dca_enabled(pdev); 1761 int dca_en = system_has_dca_enabled(pdev);
1762 struct dma_device *dma; 1762 struct dma_device *dma;
1763 struct dma_chan *c; 1763 struct dma_chan *c;
1764 struct ioat_chan_common *chan; 1764 struct ioat_chan_common *chan;
1765 bool is_raid_device = false; 1765 bool is_raid_device = false;
1766 int err; 1766 int err;
1767 1767
1768 device->enumerate_channels = ioat2_enumerate_channels; 1768 device->enumerate_channels = ioat2_enumerate_channels;
1769 device->reset_hw = ioat3_reset_hw; 1769 device->reset_hw = ioat3_reset_hw;
1770 device->self_test = ioat3_dma_self_test; 1770 device->self_test = ioat3_dma_self_test;
1771 device->intr_quirk = ioat3_intr_quirk; 1771 device->intr_quirk = ioat3_intr_quirk;
1772 dma = &device->common; 1772 dma = &device->common;
1773 dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy_lock; 1773 dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy_lock;
1774 dma->device_issue_pending = ioat2_issue_pending; 1774 dma->device_issue_pending = ioat2_issue_pending;
1775 dma->device_alloc_chan_resources = ioat2_alloc_chan_resources; 1775 dma->device_alloc_chan_resources = ioat2_alloc_chan_resources;
1776 dma->device_free_chan_resources = ioat2_free_chan_resources; 1776 dma->device_free_chan_resources = ioat2_free_chan_resources;
1777 1777
1778 dma_cap_set(DMA_INTERRUPT, dma->cap_mask); 1778 dma_cap_set(DMA_INTERRUPT, dma->cap_mask);
1779 dma->device_prep_dma_interrupt = ioat3_prep_interrupt_lock; 1779 dma->device_prep_dma_interrupt = ioat3_prep_interrupt_lock;
1780 1780
1781 device->cap = readl(device->reg_base + IOAT_DMA_CAP_OFFSET); 1781 device->cap = readl(device->reg_base + IOAT_DMA_CAP_OFFSET);
1782 1782
1783 if (is_xeon_cb32(pdev) || is_bwd_noraid(pdev)) 1783 if (is_xeon_cb32(pdev) || is_bwd_noraid(pdev))
1784 device->cap &= ~(IOAT_CAP_XOR | IOAT_CAP_PQ | IOAT_CAP_RAID16SS); 1784 device->cap &= ~(IOAT_CAP_XOR | IOAT_CAP_PQ | IOAT_CAP_RAID16SS);
1785 1785
1786 /* dca is incompatible with raid operations */ 1786 /* dca is incompatible with raid operations */
1787 if (dca_en && (device->cap & (IOAT_CAP_XOR|IOAT_CAP_PQ))) 1787 if (dca_en && (device->cap & (IOAT_CAP_XOR|IOAT_CAP_PQ)))
1788 device->cap &= ~(IOAT_CAP_XOR|IOAT_CAP_PQ); 1788 device->cap &= ~(IOAT_CAP_XOR|IOAT_CAP_PQ);
1789 1789
1790 if (device->cap & IOAT_CAP_XOR) { 1790 if (device->cap & IOAT_CAP_XOR) {
1791 is_raid_device = true; 1791 is_raid_device = true;
1792 dma->max_xor = 8; 1792 dma->max_xor = 8;
1793 1793
1794 dma_cap_set(DMA_XOR, dma->cap_mask); 1794 dma_cap_set(DMA_XOR, dma->cap_mask);
1795 dma->device_prep_dma_xor = ioat3_prep_xor; 1795 dma->device_prep_dma_xor = ioat3_prep_xor;
1796 1796
1797 dma_cap_set(DMA_XOR_VAL, dma->cap_mask); 1797 dma_cap_set(DMA_XOR_VAL, dma->cap_mask);
1798 dma->device_prep_dma_xor_val = ioat3_prep_xor_val; 1798 dma->device_prep_dma_xor_val = ioat3_prep_xor_val;
1799 } 1799 }
1800 1800
1801 if (device->cap & IOAT_CAP_PQ) { 1801 if (device->cap & IOAT_CAP_PQ) {
1802 is_raid_device = true; 1802 is_raid_device = true;
1803 1803
1804 dma->device_prep_dma_pq = ioat3_prep_pq; 1804 dma->device_prep_dma_pq = ioat3_prep_pq;
1805 dma->device_prep_dma_pq_val = ioat3_prep_pq_val; 1805 dma->device_prep_dma_pq_val = ioat3_prep_pq_val;
1806 dma_cap_set(DMA_PQ, dma->cap_mask); 1806 dma_cap_set(DMA_PQ, dma->cap_mask);
1807 dma_cap_set(DMA_PQ_VAL, dma->cap_mask); 1807 dma_cap_set(DMA_PQ_VAL, dma->cap_mask);
1808 1808
1809 if (device->cap & IOAT_CAP_RAID16SS) { 1809 if (device->cap & IOAT_CAP_RAID16SS) {
1810 dma_set_maxpq(dma, 16, 0); 1810 dma_set_maxpq(dma, 16, 0);
1811 } else { 1811 } else {
1812 dma_set_maxpq(dma, 8, 0); 1812 dma_set_maxpq(dma, 8, 0);
1813 } 1813 }
1814 1814
1815 if (!(device->cap & IOAT_CAP_XOR)) { 1815 if (!(device->cap & IOAT_CAP_XOR)) {
1816 dma->device_prep_dma_xor = ioat3_prep_pqxor; 1816 dma->device_prep_dma_xor = ioat3_prep_pqxor;
1817 dma->device_prep_dma_xor_val = ioat3_prep_pqxor_val; 1817 dma->device_prep_dma_xor_val = ioat3_prep_pqxor_val;
1818 dma_cap_set(DMA_XOR, dma->cap_mask); 1818 dma_cap_set(DMA_XOR, dma->cap_mask);
1819 dma_cap_set(DMA_XOR_VAL, dma->cap_mask); 1819 dma_cap_set(DMA_XOR_VAL, dma->cap_mask);
1820 1820
1821 if (device->cap & IOAT_CAP_RAID16SS) { 1821 if (device->cap & IOAT_CAP_RAID16SS) {
1822 dma->max_xor = 16; 1822 dma->max_xor = 16;
1823 } else { 1823 } else {
1824 dma->max_xor = 8; 1824 dma->max_xor = 8;
1825 } 1825 }
1826 } 1826 }
1827 } 1827 }
1828 1828
1829 dma->device_tx_status = ioat3_tx_status; 1829 dma->device_tx_status = ioat3_tx_status;
1830 device->cleanup_fn = ioat3_cleanup_event; 1830 device->cleanup_fn = ioat3_cleanup_event;
1831 device->timer_fn = ioat3_timer_event; 1831 device->timer_fn = ioat3_timer_event;
1832 1832
1833 /* starting with CB3.3 super extended descriptors are supported */ 1833 /* starting with CB3.3 super extended descriptors are supported */
1834 if (device->cap & IOAT_CAP_RAID16SS) { 1834 if (device->cap & IOAT_CAP_RAID16SS) {
1835 char pool_name[14]; 1835 char pool_name[14];
1836 int i; 1836 int i;
1837 1837
1838 /* allocate sw descriptor pool for SED */ 1838 /* allocate sw descriptor pool for SED */
1839 device->sed_pool = kmem_cache_create("ioat_sed", 1839 device->sed_pool = kmem_cache_create("ioat_sed",
1840 sizeof(struct ioat_sed_ent), 0, 0, NULL); 1840 sizeof(struct ioat_sed_ent), 0, 0, NULL);
1841 if (!device->sed_pool) 1841 if (!device->sed_pool)
1842 return -ENOMEM; 1842 return -ENOMEM;
1843 1843
1844 for (i = 0; i < MAX_SED_POOLS; i++) { 1844 for (i = 0; i < MAX_SED_POOLS; i++) {
1845 snprintf(pool_name, 14, "ioat_hw%d_sed", i); 1845 snprintf(pool_name, 14, "ioat_hw%d_sed", i);
1846 1846
1847 /* allocate SED DMA pool */ 1847 /* allocate SED DMA pool */
1848 device->sed_hw_pool[i] = dma_pool_create(pool_name, 1848 device->sed_hw_pool[i] = dma_pool_create(pool_name,
1849 &pdev->dev, 1849 &pdev->dev,
1850 SED_SIZE * (i + 1), 64, 0); 1850 SED_SIZE * (i + 1), 64, 0);
1851 if (!device->sed_hw_pool[i]) 1851 if (!device->sed_hw_pool[i])
1852 goto sed_pool_cleanup; 1852 goto sed_pool_cleanup;
1853 1853
1854 } 1854 }
1855 } 1855 }
1856 1856
1857 err = ioat_probe(device); 1857 err = ioat_probe(device);
1858 if (err) 1858 if (err)
1859 return err; 1859 return err;
1860 ioat_set_tcp_copy_break(262144); 1860 ioat_set_tcp_copy_break(262144);
1861 1861
1862 list_for_each_entry(c, &dma->channels, device_node) { 1862 list_for_each_entry(c, &dma->channels, device_node) {
1863 chan = to_chan_common(c); 1863 chan = to_chan_common(c);
1864 writel(IOAT_DMA_DCA_ANY_CPU, 1864 writel(IOAT_DMA_DCA_ANY_CPU,
1865 chan->reg_base + IOAT_DCACTRL_OFFSET); 1865 chan->reg_base + IOAT_DCACTRL_OFFSET);
1866 } 1866 }
1867 1867
1868 err = ioat_register(device); 1868 err = ioat_register(device);
1869 if (err) 1869 if (err)
1870 return err; 1870 return err;
1871 1871
1872 ioat_kobject_add(device, &ioat2_ktype); 1872 ioat_kobject_add(device, &ioat2_ktype);
1873 1873
1874 if (dca) 1874 if (dca)
1875 device->dca = ioat3_dca_init(pdev, device->reg_base); 1875 device->dca = ioat3_dca_init(pdev, device->reg_base);
1876 1876
1877 return 0; 1877 return 0;
1878 1878
1879 sed_pool_cleanup: 1879 sed_pool_cleanup:
1880 if (device->sed_pool) { 1880 if (device->sed_pool) {
1881 int i; 1881 int i;
1882 kmem_cache_destroy(device->sed_pool); 1882 kmem_cache_destroy(device->sed_pool);
1883 1883
1884 for (i = 0; i < MAX_SED_POOLS; i++) 1884 for (i = 0; i < MAX_SED_POOLS; i++)
1885 if (device->sed_hw_pool[i]) 1885 if (device->sed_hw_pool[i])
1886 dma_pool_destroy(device->sed_hw_pool[i]); 1886 dma_pool_destroy(device->sed_hw_pool[i]);
1887 } 1887 }
1888 1888
1889 return -ENOMEM; 1889 return -ENOMEM;
1890 } 1890 }
1891 1891
1892 void ioat3_dma_remove(struct ioatdma_device *device) 1892 void ioat3_dma_remove(struct ioatdma_device *device)
1893 { 1893 {
1894 if (device->sed_pool) { 1894 if (device->sed_pool) {
1895 int i; 1895 int i;
1896 kmem_cache_destroy(device->sed_pool); 1896 kmem_cache_destroy(device->sed_pool);
1897 1897
1898 for (i = 0; i < MAX_SED_POOLS; i++) 1898 for (i = 0; i < MAX_SED_POOLS; i++)
1899 if (device->sed_hw_pool[i]) 1899 if (device->sed_hw_pool[i])
1900 dma_pool_destroy(device->sed_hw_pool[i]); 1900 dma_pool_destroy(device->sed_hw_pool[i]);
1901 } 1901 }
1902 } 1902 }
1903 1903
drivers/dma/iop-adma.c
1 /* 1 /*
2 * offload engine driver for the Intel Xscale series of i/o processors 2 * offload engine driver for the Intel Xscale series of i/o processors
3 * Copyright © 2006, Intel Corporation. 3 * Copyright © 2006, Intel Corporation.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License, 6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation. 7 * version 2, as published by the Free Software Foundation.
8 * 8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT 9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details. 12 * more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License along with 14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 * 17 *
18 */ 18 */
19 19
20 /* 20 /*
21 * This driver supports the asynchrounous DMA copy and RAID engines available 21 * This driver supports the asynchrounous DMA copy and RAID engines available
22 * on the Intel Xscale(R) family of I/O Processors (IOP 32x, 33x, 134x) 22 * on the Intel Xscale(R) family of I/O Processors (IOP 32x, 33x, 134x)
23 */ 23 */
24 24
25 #include <linux/init.h> 25 #include <linux/init.h>
26 #include <linux/module.h> 26 #include <linux/module.h>
27 #include <linux/delay.h> 27 #include <linux/delay.h>
28 #include <linux/dma-mapping.h> 28 #include <linux/dma-mapping.h>
29 #include <linux/spinlock.h> 29 #include <linux/spinlock.h>
30 #include <linux/interrupt.h> 30 #include <linux/interrupt.h>
31 #include <linux/platform_device.h> 31 #include <linux/platform_device.h>
32 #include <linux/memory.h> 32 #include <linux/memory.h>
33 #include <linux/ioport.h> 33 #include <linux/ioport.h>
34 #include <linux/raid/pq.h> 34 #include <linux/raid/pq.h>
35 #include <linux/slab.h> 35 #include <linux/slab.h>
36 36
37 #include <mach/adma.h> 37 #include <mach/adma.h>
38 38
39 #include "dmaengine.h" 39 #include "dmaengine.h"
40 40
41 #define to_iop_adma_chan(chan) container_of(chan, struct iop_adma_chan, common) 41 #define to_iop_adma_chan(chan) container_of(chan, struct iop_adma_chan, common)
42 #define to_iop_adma_device(dev) \ 42 #define to_iop_adma_device(dev) \
43 container_of(dev, struct iop_adma_device, common) 43 container_of(dev, struct iop_adma_device, common)
44 #define tx_to_iop_adma_slot(tx) \ 44 #define tx_to_iop_adma_slot(tx) \
45 container_of(tx, struct iop_adma_desc_slot, async_tx) 45 container_of(tx, struct iop_adma_desc_slot, async_tx)
46 46
47 /** 47 /**
48 * iop_adma_free_slots - flags descriptor slots for reuse 48 * iop_adma_free_slots - flags descriptor slots for reuse
49 * @slot: Slot to free 49 * @slot: Slot to free
50 * Caller must hold &iop_chan->lock while calling this function 50 * Caller must hold &iop_chan->lock while calling this function
51 */ 51 */
52 static void iop_adma_free_slots(struct iop_adma_desc_slot *slot) 52 static void iop_adma_free_slots(struct iop_adma_desc_slot *slot)
53 { 53 {
54 int stride = slot->slots_per_op; 54 int stride = slot->slots_per_op;
55 55
56 while (stride--) { 56 while (stride--) {
57 slot->slots_per_op = 0; 57 slot->slots_per_op = 0;
58 slot = list_entry(slot->slot_node.next, 58 slot = list_entry(slot->slot_node.next,
59 struct iop_adma_desc_slot, 59 struct iop_adma_desc_slot,
60 slot_node); 60 slot_node);
61 } 61 }
62 } 62 }
63 63
64 static void 64 static void
65 iop_desc_unmap(struct iop_adma_chan *iop_chan, struct iop_adma_desc_slot *desc) 65 iop_desc_unmap(struct iop_adma_chan *iop_chan, struct iop_adma_desc_slot *desc)
66 { 66 {
67 struct dma_async_tx_descriptor *tx = &desc->async_tx; 67 struct dma_async_tx_descriptor *tx = &desc->async_tx;
68 struct iop_adma_desc_slot *unmap = desc->group_head; 68 struct iop_adma_desc_slot *unmap = desc->group_head;
69 struct device *dev = &iop_chan->device->pdev->dev; 69 struct device *dev = &iop_chan->device->pdev->dev;
70 u32 len = unmap->unmap_len; 70 u32 len = unmap->unmap_len;
71 enum dma_ctrl_flags flags = tx->flags; 71 enum dma_ctrl_flags flags = tx->flags;
72 u32 src_cnt; 72 u32 src_cnt;
73 dma_addr_t addr; 73 dma_addr_t addr;
74 dma_addr_t dest; 74 dma_addr_t dest;
75 75
76 src_cnt = unmap->unmap_src_cnt; 76 src_cnt = unmap->unmap_src_cnt;
77 dest = iop_desc_get_dest_addr(unmap, iop_chan); 77 dest = iop_desc_get_dest_addr(unmap, iop_chan);
78 if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) { 78 if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
79 enum dma_data_direction dir; 79 enum dma_data_direction dir;
80 80
81 if (src_cnt > 1) /* is xor? */ 81 if (src_cnt > 1) /* is xor? */
82 dir = DMA_BIDIRECTIONAL; 82 dir = DMA_BIDIRECTIONAL;
83 else 83 else
84 dir = DMA_FROM_DEVICE; 84 dir = DMA_FROM_DEVICE;
85 85
86 dma_unmap_page(dev, dest, len, dir); 86 dma_unmap_page(dev, dest, len, dir);
87 } 87 }
88 88
89 if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) { 89 if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
90 while (src_cnt--) { 90 while (src_cnt--) {
91 addr = iop_desc_get_src_addr(unmap, iop_chan, src_cnt); 91 addr = iop_desc_get_src_addr(unmap, iop_chan, src_cnt);
92 if (addr == dest) 92 if (addr == dest)
93 continue; 93 continue;
94 dma_unmap_page(dev, addr, len, DMA_TO_DEVICE); 94 dma_unmap_page(dev, addr, len, DMA_TO_DEVICE);
95 } 95 }
96 } 96 }
97 desc->group_head = NULL; 97 desc->group_head = NULL;
98 } 98 }
99 99
100 static void 100 static void
101 iop_desc_unmap_pq(struct iop_adma_chan *iop_chan, struct iop_adma_desc_slot *desc) 101 iop_desc_unmap_pq(struct iop_adma_chan *iop_chan, struct iop_adma_desc_slot *desc)
102 { 102 {
103 struct dma_async_tx_descriptor *tx = &desc->async_tx; 103 struct dma_async_tx_descriptor *tx = &desc->async_tx;
104 struct iop_adma_desc_slot *unmap = desc->group_head; 104 struct iop_adma_desc_slot *unmap = desc->group_head;
105 struct device *dev = &iop_chan->device->pdev->dev; 105 struct device *dev = &iop_chan->device->pdev->dev;
106 u32 len = unmap->unmap_len; 106 u32 len = unmap->unmap_len;
107 enum dma_ctrl_flags flags = tx->flags; 107 enum dma_ctrl_flags flags = tx->flags;
108 u32 src_cnt = unmap->unmap_src_cnt; 108 u32 src_cnt = unmap->unmap_src_cnt;
109 dma_addr_t pdest = iop_desc_get_dest_addr(unmap, iop_chan); 109 dma_addr_t pdest = iop_desc_get_dest_addr(unmap, iop_chan);
110 dma_addr_t qdest = iop_desc_get_qdest_addr(unmap, iop_chan); 110 dma_addr_t qdest = iop_desc_get_qdest_addr(unmap, iop_chan);
111 int i; 111 int i;
112 112
113 if (tx->flags & DMA_PREP_CONTINUE) 113 if (tx->flags & DMA_PREP_CONTINUE)
114 src_cnt -= 3; 114 src_cnt -= 3;
115 115
116 if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP) && !desc->pq_check_result) { 116 if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP) && !desc->pq_check_result) {
117 dma_unmap_page(dev, pdest, len, DMA_BIDIRECTIONAL); 117 dma_unmap_page(dev, pdest, len, DMA_BIDIRECTIONAL);
118 dma_unmap_page(dev, qdest, len, DMA_BIDIRECTIONAL); 118 dma_unmap_page(dev, qdest, len, DMA_BIDIRECTIONAL);
119 } 119 }
120 120
121 if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) { 121 if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
122 dma_addr_t addr; 122 dma_addr_t addr;
123 123
124 for (i = 0; i < src_cnt; i++) { 124 for (i = 0; i < src_cnt; i++) {
125 addr = iop_desc_get_src_addr(unmap, iop_chan, i); 125 addr = iop_desc_get_src_addr(unmap, iop_chan, i);
126 dma_unmap_page(dev, addr, len, DMA_TO_DEVICE); 126 dma_unmap_page(dev, addr, len, DMA_TO_DEVICE);
127 } 127 }
128 if (desc->pq_check_result) { 128 if (desc->pq_check_result) {
129 dma_unmap_page(dev, pdest, len, DMA_TO_DEVICE); 129 dma_unmap_page(dev, pdest, len, DMA_TO_DEVICE);
130 dma_unmap_page(dev, qdest, len, DMA_TO_DEVICE); 130 dma_unmap_page(dev, qdest, len, DMA_TO_DEVICE);
131 } 131 }
132 } 132 }
133 133
134 desc->group_head = NULL; 134 desc->group_head = NULL;
135 } 135 }
136 136
137 137
138 static dma_cookie_t 138 static dma_cookie_t
139 iop_adma_run_tx_complete_actions(struct iop_adma_desc_slot *desc, 139 iop_adma_run_tx_complete_actions(struct iop_adma_desc_slot *desc,
140 struct iop_adma_chan *iop_chan, dma_cookie_t cookie) 140 struct iop_adma_chan *iop_chan, dma_cookie_t cookie)
141 { 141 {
142 struct dma_async_tx_descriptor *tx = &desc->async_tx; 142 struct dma_async_tx_descriptor *tx = &desc->async_tx;
143 143
144 BUG_ON(tx->cookie < 0); 144 BUG_ON(tx->cookie < 0);
145 if (tx->cookie > 0) { 145 if (tx->cookie > 0) {
146 cookie = tx->cookie; 146 cookie = tx->cookie;
147 tx->cookie = 0; 147 tx->cookie = 0;
148 148
149 /* call the callback (must not sleep or submit new 149 /* call the callback (must not sleep or submit new
150 * operations to this channel) 150 * operations to this channel)
151 */ 151 */
152 if (tx->callback) 152 if (tx->callback)
153 tx->callback(tx->callback_param); 153 tx->callback(tx->callback_param);
154 154
155 /* unmap dma addresses 155 /* unmap dma addresses
156 * (unmap_single vs unmap_page?) 156 * (unmap_single vs unmap_page?)
157 */ 157 */
158 if (desc->group_head && desc->unmap_len) { 158 if (desc->group_head && desc->unmap_len) {
159 if (iop_desc_is_pq(desc)) 159 if (iop_desc_is_pq(desc))
160 iop_desc_unmap_pq(iop_chan, desc); 160 iop_desc_unmap_pq(iop_chan, desc);
161 else 161 else
162 iop_desc_unmap(iop_chan, desc); 162 iop_desc_unmap(iop_chan, desc);
163 } 163 }
164 } 164 }
165 165
166 /* run dependent operations */ 166 /* run dependent operations */
167 dma_run_dependencies(tx); 167 dma_run_dependencies(tx);
168 168
169 return cookie; 169 return cookie;
170 } 170 }
171 171
172 static int 172 static int
173 iop_adma_clean_slot(struct iop_adma_desc_slot *desc, 173 iop_adma_clean_slot(struct iop_adma_desc_slot *desc,
174 struct iop_adma_chan *iop_chan) 174 struct iop_adma_chan *iop_chan)
175 { 175 {
176 /* the client is allowed to attach dependent operations 176 /* the client is allowed to attach dependent operations
177 * until 'ack' is set 177 * until 'ack' is set
178 */ 178 */
179 if (!async_tx_test_ack(&desc->async_tx)) 179 if (!async_tx_test_ack(&desc->async_tx))
180 return 0; 180 return 0;
181 181
182 /* leave the last descriptor in the chain 182 /* leave the last descriptor in the chain
183 * so we can append to it 183 * so we can append to it
184 */ 184 */
185 if (desc->chain_node.next == &iop_chan->chain) 185 if (desc->chain_node.next == &iop_chan->chain)
186 return 1; 186 return 1;
187 187
188 dev_dbg(iop_chan->device->common.dev, 188 dev_dbg(iop_chan->device->common.dev,
189 "\tfree slot: %d slots_per_op: %d\n", 189 "\tfree slot: %d slots_per_op: %d\n",
190 desc->idx, desc->slots_per_op); 190 desc->idx, desc->slots_per_op);
191 191
192 list_del(&desc->chain_node); 192 list_del(&desc->chain_node);
193 iop_adma_free_slots(desc); 193 iop_adma_free_slots(desc);
194 194
195 return 0; 195 return 0;
196 } 196 }
197 197
198 static void __iop_adma_slot_cleanup(struct iop_adma_chan *iop_chan) 198 static void __iop_adma_slot_cleanup(struct iop_adma_chan *iop_chan)
199 { 199 {
200 struct iop_adma_desc_slot *iter, *_iter, *grp_start = NULL; 200 struct iop_adma_desc_slot *iter, *_iter, *grp_start = NULL;
201 dma_cookie_t cookie = 0; 201 dma_cookie_t cookie = 0;
202 u32 current_desc = iop_chan_get_current_descriptor(iop_chan); 202 u32 current_desc = iop_chan_get_current_descriptor(iop_chan);
203 int busy = iop_chan_is_busy(iop_chan); 203 int busy = iop_chan_is_busy(iop_chan);
204 int seen_current = 0, slot_cnt = 0, slots_per_op = 0; 204 int seen_current = 0, slot_cnt = 0, slots_per_op = 0;
205 205
206 dev_dbg(iop_chan->device->common.dev, "%s\n", __func__); 206 dev_dbg(iop_chan->device->common.dev, "%s\n", __func__);
207 /* free completed slots from the chain starting with 207 /* free completed slots from the chain starting with
208 * the oldest descriptor 208 * the oldest descriptor
209 */ 209 */
210 list_for_each_entry_safe(iter, _iter, &iop_chan->chain, 210 list_for_each_entry_safe(iter, _iter, &iop_chan->chain,
211 chain_node) { 211 chain_node) {
212 pr_debug("\tcookie: %d slot: %d busy: %d " 212 pr_debug("\tcookie: %d slot: %d busy: %d "
213 "this_desc: %#x next_desc: %#x ack: %d\n", 213 "this_desc: %#x next_desc: %#x ack: %d\n",
214 iter->async_tx.cookie, iter->idx, busy, 214 iter->async_tx.cookie, iter->idx, busy,
215 iter->async_tx.phys, iop_desc_get_next_desc(iter), 215 iter->async_tx.phys, iop_desc_get_next_desc(iter),
216 async_tx_test_ack(&iter->async_tx)); 216 async_tx_test_ack(&iter->async_tx));
217 prefetch(_iter); 217 prefetch(_iter);
218 prefetch(&_iter->async_tx); 218 prefetch(&_iter->async_tx);
219 219
220 /* do not advance past the current descriptor loaded into the 220 /* do not advance past the current descriptor loaded into the
221 * hardware channel, subsequent descriptors are either in 221 * hardware channel, subsequent descriptors are either in
222 * process or have not been submitted 222 * process or have not been submitted
223 */ 223 */
224 if (seen_current) 224 if (seen_current)
225 break; 225 break;
226 226
227 /* stop the search if we reach the current descriptor and the 227 /* stop the search if we reach the current descriptor and the
228 * channel is busy, or if it appears that the current descriptor 228 * channel is busy, or if it appears that the current descriptor
229 * needs to be re-read (i.e. has been appended to) 229 * needs to be re-read (i.e. has been appended to)
230 */ 230 */
231 if (iter->async_tx.phys == current_desc) { 231 if (iter->async_tx.phys == current_desc) {
232 BUG_ON(seen_current++); 232 BUG_ON(seen_current++);
233 if (busy || iop_desc_get_next_desc(iter)) 233 if (busy || iop_desc_get_next_desc(iter))
234 break; 234 break;
235 } 235 }
236 236
237 /* detect the start of a group transaction */ 237 /* detect the start of a group transaction */
238 if (!slot_cnt && !slots_per_op) { 238 if (!slot_cnt && !slots_per_op) {
239 slot_cnt = iter->slot_cnt; 239 slot_cnt = iter->slot_cnt;
240 slots_per_op = iter->slots_per_op; 240 slots_per_op = iter->slots_per_op;
241 if (slot_cnt <= slots_per_op) { 241 if (slot_cnt <= slots_per_op) {
242 slot_cnt = 0; 242 slot_cnt = 0;
243 slots_per_op = 0; 243 slots_per_op = 0;
244 } 244 }
245 } 245 }
246 246
247 if (slot_cnt) { 247 if (slot_cnt) {
248 pr_debug("\tgroup++\n"); 248 pr_debug("\tgroup++\n");
249 if (!grp_start) 249 if (!grp_start)
250 grp_start = iter; 250 grp_start = iter;
251 slot_cnt -= slots_per_op; 251 slot_cnt -= slots_per_op;
252 } 252 }
253 253
254 /* all the members of a group are complete */ 254 /* all the members of a group are complete */
255 if (slots_per_op != 0 && slot_cnt == 0) { 255 if (slots_per_op != 0 && slot_cnt == 0) {
256 struct iop_adma_desc_slot *grp_iter, *_grp_iter; 256 struct iop_adma_desc_slot *grp_iter, *_grp_iter;
257 int end_of_chain = 0; 257 int end_of_chain = 0;
258 pr_debug("\tgroup end\n"); 258 pr_debug("\tgroup end\n");
259 259
260 /* collect the total results */ 260 /* collect the total results */
261 if (grp_start->xor_check_result) { 261 if (grp_start->xor_check_result) {
262 u32 zero_sum_result = 0; 262 u32 zero_sum_result = 0;
263 slot_cnt = grp_start->slot_cnt; 263 slot_cnt = grp_start->slot_cnt;
264 grp_iter = grp_start; 264 grp_iter = grp_start;
265 265
266 list_for_each_entry_from(grp_iter, 266 list_for_each_entry_from(grp_iter,
267 &iop_chan->chain, chain_node) { 267 &iop_chan->chain, chain_node) {
268 zero_sum_result |= 268 zero_sum_result |=
269 iop_desc_get_zero_result(grp_iter); 269 iop_desc_get_zero_result(grp_iter);
270 pr_debug("\titer%d result: %d\n", 270 pr_debug("\titer%d result: %d\n",
271 grp_iter->idx, zero_sum_result); 271 grp_iter->idx, zero_sum_result);
272 slot_cnt -= slots_per_op; 272 slot_cnt -= slots_per_op;
273 if (slot_cnt == 0) 273 if (slot_cnt == 0)
274 break; 274 break;
275 } 275 }
276 pr_debug("\tgrp_start->xor_check_result: %p\n", 276 pr_debug("\tgrp_start->xor_check_result: %p\n",
277 grp_start->xor_check_result); 277 grp_start->xor_check_result);
278 *grp_start->xor_check_result = zero_sum_result; 278 *grp_start->xor_check_result = zero_sum_result;
279 } 279 }
280 280
281 /* clean up the group */ 281 /* clean up the group */
282 slot_cnt = grp_start->slot_cnt; 282 slot_cnt = grp_start->slot_cnt;
283 grp_iter = grp_start; 283 grp_iter = grp_start;
284 list_for_each_entry_safe_from(grp_iter, _grp_iter, 284 list_for_each_entry_safe_from(grp_iter, _grp_iter,
285 &iop_chan->chain, chain_node) { 285 &iop_chan->chain, chain_node) {
286 cookie = iop_adma_run_tx_complete_actions( 286 cookie = iop_adma_run_tx_complete_actions(
287 grp_iter, iop_chan, cookie); 287 grp_iter, iop_chan, cookie);
288 288
289 slot_cnt -= slots_per_op; 289 slot_cnt -= slots_per_op;
290 end_of_chain = iop_adma_clean_slot(grp_iter, 290 end_of_chain = iop_adma_clean_slot(grp_iter,
291 iop_chan); 291 iop_chan);
292 292
293 if (slot_cnt == 0 || end_of_chain) 293 if (slot_cnt == 0 || end_of_chain)
294 break; 294 break;
295 } 295 }
296 296
297 /* the group should be complete at this point */ 297 /* the group should be complete at this point */
298 BUG_ON(slot_cnt); 298 BUG_ON(slot_cnt);
299 299
300 slots_per_op = 0; 300 slots_per_op = 0;
301 grp_start = NULL; 301 grp_start = NULL;
302 if (end_of_chain) 302 if (end_of_chain)
303 break; 303 break;
304 else 304 else
305 continue; 305 continue;
306 } else if (slots_per_op) /* wait for group completion */ 306 } else if (slots_per_op) /* wait for group completion */
307 continue; 307 continue;
308 308
309 /* write back zero sum results (single descriptor case) */ 309 /* write back zero sum results (single descriptor case) */
310 if (iter->xor_check_result && iter->async_tx.cookie) 310 if (iter->xor_check_result && iter->async_tx.cookie)
311 *iter->xor_check_result = 311 *iter->xor_check_result =
312 iop_desc_get_zero_result(iter); 312 iop_desc_get_zero_result(iter);
313 313
314 cookie = iop_adma_run_tx_complete_actions( 314 cookie = iop_adma_run_tx_complete_actions(
315 iter, iop_chan, cookie); 315 iter, iop_chan, cookie);
316 316
317 if (iop_adma_clean_slot(iter, iop_chan)) 317 if (iop_adma_clean_slot(iter, iop_chan))
318 break; 318 break;
319 } 319 }
320 320
321 if (cookie > 0) { 321 if (cookie > 0) {
322 iop_chan->common.completed_cookie = cookie; 322 iop_chan->common.completed_cookie = cookie;
323 pr_debug("\tcompleted cookie %d\n", cookie); 323 pr_debug("\tcompleted cookie %d\n", cookie);
324 } 324 }
325 } 325 }
326 326
327 static void 327 static void
328 iop_adma_slot_cleanup(struct iop_adma_chan *iop_chan) 328 iop_adma_slot_cleanup(struct iop_adma_chan *iop_chan)
329 { 329 {
330 spin_lock_bh(&iop_chan->lock); 330 spin_lock_bh(&iop_chan->lock);
331 __iop_adma_slot_cleanup(iop_chan); 331 __iop_adma_slot_cleanup(iop_chan);
332 spin_unlock_bh(&iop_chan->lock); 332 spin_unlock_bh(&iop_chan->lock);
333 } 333 }
334 334
335 static void iop_adma_tasklet(unsigned long data) 335 static void iop_adma_tasklet(unsigned long data)
336 { 336 {
337 struct iop_adma_chan *iop_chan = (struct iop_adma_chan *) data; 337 struct iop_adma_chan *iop_chan = (struct iop_adma_chan *) data;
338 338
339 /* lockdep will flag depedency submissions as potentially 339 /* lockdep will flag depedency submissions as potentially
340 * recursive locking, this is not the case as a dependency 340 * recursive locking, this is not the case as a dependency
341 * submission will never recurse a channels submit routine. 341 * submission will never recurse a channels submit routine.
342 * There are checks in async_tx.c to prevent this. 342 * There are checks in async_tx.c to prevent this.
343 */ 343 */
344 spin_lock_nested(&iop_chan->lock, SINGLE_DEPTH_NESTING); 344 spin_lock_nested(&iop_chan->lock, SINGLE_DEPTH_NESTING);
345 __iop_adma_slot_cleanup(iop_chan); 345 __iop_adma_slot_cleanup(iop_chan);
346 spin_unlock(&iop_chan->lock); 346 spin_unlock(&iop_chan->lock);
347 } 347 }
348 348
349 static struct iop_adma_desc_slot * 349 static struct iop_adma_desc_slot *
350 iop_adma_alloc_slots(struct iop_adma_chan *iop_chan, int num_slots, 350 iop_adma_alloc_slots(struct iop_adma_chan *iop_chan, int num_slots,
351 int slots_per_op) 351 int slots_per_op)
352 { 352 {
353 struct iop_adma_desc_slot *iter, *_iter, *alloc_start = NULL; 353 struct iop_adma_desc_slot *iter, *_iter, *alloc_start = NULL;
354 LIST_HEAD(chain); 354 LIST_HEAD(chain);
355 int slots_found, retry = 0; 355 int slots_found, retry = 0;
356 356
357 /* start search from the last allocated descrtiptor 357 /* start search from the last allocated descrtiptor
358 * if a contiguous allocation can not be found start searching 358 * if a contiguous allocation can not be found start searching
359 * from the beginning of the list 359 * from the beginning of the list
360 */ 360 */
361 retry: 361 retry:
362 slots_found = 0; 362 slots_found = 0;
363 if (retry == 0) 363 if (retry == 0)
364 iter = iop_chan->last_used; 364 iter = iop_chan->last_used;
365 else 365 else
366 iter = list_entry(&iop_chan->all_slots, 366 iter = list_entry(&iop_chan->all_slots,
367 struct iop_adma_desc_slot, 367 struct iop_adma_desc_slot,
368 slot_node); 368 slot_node);
369 369
370 list_for_each_entry_safe_continue( 370 list_for_each_entry_safe_continue(
371 iter, _iter, &iop_chan->all_slots, slot_node) { 371 iter, _iter, &iop_chan->all_slots, slot_node) {
372 prefetch(_iter); 372 prefetch(_iter);
373 prefetch(&_iter->async_tx); 373 prefetch(&_iter->async_tx);
374 if (iter->slots_per_op) { 374 if (iter->slots_per_op) {
375 /* give up after finding the first busy slot 375 /* give up after finding the first busy slot
376 * on the second pass through the list 376 * on the second pass through the list
377 */ 377 */
378 if (retry) 378 if (retry)
379 break; 379 break;
380 380
381 slots_found = 0; 381 slots_found = 0;
382 continue; 382 continue;
383 } 383 }
384 384
385 /* start the allocation if the slot is correctly aligned */ 385 /* start the allocation if the slot is correctly aligned */
386 if (!slots_found++) { 386 if (!slots_found++) {
387 if (iop_desc_is_aligned(iter, slots_per_op)) 387 if (iop_desc_is_aligned(iter, slots_per_op))
388 alloc_start = iter; 388 alloc_start = iter;
389 else { 389 else {
390 slots_found = 0; 390 slots_found = 0;
391 continue; 391 continue;
392 } 392 }
393 } 393 }
394 394
395 if (slots_found == num_slots) { 395 if (slots_found == num_slots) {
396 struct iop_adma_desc_slot *alloc_tail = NULL; 396 struct iop_adma_desc_slot *alloc_tail = NULL;
397 struct iop_adma_desc_slot *last_used = NULL; 397 struct iop_adma_desc_slot *last_used = NULL;
398 iter = alloc_start; 398 iter = alloc_start;
399 while (num_slots) { 399 while (num_slots) {
400 int i; 400 int i;
401 dev_dbg(iop_chan->device->common.dev, 401 dev_dbg(iop_chan->device->common.dev,
402 "allocated slot: %d " 402 "allocated slot: %d "
403 "(desc %p phys: %#x) slots_per_op %d\n", 403 "(desc %p phys: %#x) slots_per_op %d\n",
404 iter->idx, iter->hw_desc, 404 iter->idx, iter->hw_desc,
405 iter->async_tx.phys, slots_per_op); 405 iter->async_tx.phys, slots_per_op);
406 406
407 /* pre-ack all but the last descriptor */ 407 /* pre-ack all but the last descriptor */
408 if (num_slots != slots_per_op) 408 if (num_slots != slots_per_op)
409 async_tx_ack(&iter->async_tx); 409 async_tx_ack(&iter->async_tx);
410 410
411 list_add_tail(&iter->chain_node, &chain); 411 list_add_tail(&iter->chain_node, &chain);
412 alloc_tail = iter; 412 alloc_tail = iter;
413 iter->async_tx.cookie = 0; 413 iter->async_tx.cookie = 0;
414 iter->slot_cnt = num_slots; 414 iter->slot_cnt = num_slots;
415 iter->xor_check_result = NULL; 415 iter->xor_check_result = NULL;
416 for (i = 0; i < slots_per_op; i++) { 416 for (i = 0; i < slots_per_op; i++) {
417 iter->slots_per_op = slots_per_op - i; 417 iter->slots_per_op = slots_per_op - i;
418 last_used = iter; 418 last_used = iter;
419 iter = list_entry(iter->slot_node.next, 419 iter = list_entry(iter->slot_node.next,
420 struct iop_adma_desc_slot, 420 struct iop_adma_desc_slot,
421 slot_node); 421 slot_node);
422 } 422 }
423 num_slots -= slots_per_op; 423 num_slots -= slots_per_op;
424 } 424 }
425 alloc_tail->group_head = alloc_start; 425 alloc_tail->group_head = alloc_start;
426 alloc_tail->async_tx.cookie = -EBUSY; 426 alloc_tail->async_tx.cookie = -EBUSY;
427 list_splice(&chain, &alloc_tail->tx_list); 427 list_splice(&chain, &alloc_tail->tx_list);
428 iop_chan->last_used = last_used; 428 iop_chan->last_used = last_used;
429 iop_desc_clear_next_desc(alloc_start); 429 iop_desc_clear_next_desc(alloc_start);
430 iop_desc_clear_next_desc(alloc_tail); 430 iop_desc_clear_next_desc(alloc_tail);
431 return alloc_tail; 431 return alloc_tail;
432 } 432 }
433 } 433 }
434 if (!retry++) 434 if (!retry++)
435 goto retry; 435 goto retry;
436 436
437 /* perform direct reclaim if the allocation fails */ 437 /* perform direct reclaim if the allocation fails */
438 __iop_adma_slot_cleanup(iop_chan); 438 __iop_adma_slot_cleanup(iop_chan);
439 439
440 return NULL; 440 return NULL;
441 } 441 }
442 442
443 static void iop_adma_check_threshold(struct iop_adma_chan *iop_chan) 443 static void iop_adma_check_threshold(struct iop_adma_chan *iop_chan)
444 { 444 {
445 dev_dbg(iop_chan->device->common.dev, "pending: %d\n", 445 dev_dbg(iop_chan->device->common.dev, "pending: %d\n",
446 iop_chan->pending); 446 iop_chan->pending);
447 447
448 if (iop_chan->pending >= IOP_ADMA_THRESHOLD) { 448 if (iop_chan->pending >= IOP_ADMA_THRESHOLD) {
449 iop_chan->pending = 0; 449 iop_chan->pending = 0;
450 iop_chan_append(iop_chan); 450 iop_chan_append(iop_chan);
451 } 451 }
452 } 452 }
453 453
454 static dma_cookie_t 454 static dma_cookie_t
455 iop_adma_tx_submit(struct dma_async_tx_descriptor *tx) 455 iop_adma_tx_submit(struct dma_async_tx_descriptor *tx)
456 { 456 {
457 struct iop_adma_desc_slot *sw_desc = tx_to_iop_adma_slot(tx); 457 struct iop_adma_desc_slot *sw_desc = tx_to_iop_adma_slot(tx);
458 struct iop_adma_chan *iop_chan = to_iop_adma_chan(tx->chan); 458 struct iop_adma_chan *iop_chan = to_iop_adma_chan(tx->chan);
459 struct iop_adma_desc_slot *grp_start, *old_chain_tail; 459 struct iop_adma_desc_slot *grp_start, *old_chain_tail;
460 int slot_cnt; 460 int slot_cnt;
461 int slots_per_op; 461 int slots_per_op;
462 dma_cookie_t cookie; 462 dma_cookie_t cookie;
463 dma_addr_t next_dma; 463 dma_addr_t next_dma;
464 464
465 grp_start = sw_desc->group_head; 465 grp_start = sw_desc->group_head;
466 slot_cnt = grp_start->slot_cnt; 466 slot_cnt = grp_start->slot_cnt;
467 slots_per_op = grp_start->slots_per_op; 467 slots_per_op = grp_start->slots_per_op;
468 468
469 spin_lock_bh(&iop_chan->lock); 469 spin_lock_bh(&iop_chan->lock);
470 cookie = dma_cookie_assign(tx); 470 cookie = dma_cookie_assign(tx);
471 471
472 old_chain_tail = list_entry(iop_chan->chain.prev, 472 old_chain_tail = list_entry(iop_chan->chain.prev,
473 struct iop_adma_desc_slot, chain_node); 473 struct iop_adma_desc_slot, chain_node);
474 list_splice_init(&sw_desc->tx_list, 474 list_splice_init(&sw_desc->tx_list,
475 &old_chain_tail->chain_node); 475 &old_chain_tail->chain_node);
476 476
477 /* fix up the hardware chain */ 477 /* fix up the hardware chain */
478 next_dma = grp_start->async_tx.phys; 478 next_dma = grp_start->async_tx.phys;
479 iop_desc_set_next_desc(old_chain_tail, next_dma); 479 iop_desc_set_next_desc(old_chain_tail, next_dma);
480 BUG_ON(iop_desc_get_next_desc(old_chain_tail) != next_dma); /* flush */ 480 BUG_ON(iop_desc_get_next_desc(old_chain_tail) != next_dma); /* flush */
481 481
482 /* check for pre-chained descriptors */ 482 /* check for pre-chained descriptors */
483 iop_paranoia(iop_desc_get_next_desc(sw_desc)); 483 iop_paranoia(iop_desc_get_next_desc(sw_desc));
484 484
485 /* increment the pending count by the number of slots 485 /* increment the pending count by the number of slots
486 * memcpy operations have a 1:1 (slot:operation) relation 486 * memcpy operations have a 1:1 (slot:operation) relation
487 * other operations are heavier and will pop the threshold 487 * other operations are heavier and will pop the threshold
488 * more often. 488 * more often.
489 */ 489 */
490 iop_chan->pending += slot_cnt; 490 iop_chan->pending += slot_cnt;
491 iop_adma_check_threshold(iop_chan); 491 iop_adma_check_threshold(iop_chan);
492 spin_unlock_bh(&iop_chan->lock); 492 spin_unlock_bh(&iop_chan->lock);
493 493
494 dev_dbg(iop_chan->device->common.dev, "%s cookie: %d slot: %d\n", 494 dev_dbg(iop_chan->device->common.dev, "%s cookie: %d slot: %d\n",
495 __func__, sw_desc->async_tx.cookie, sw_desc->idx); 495 __func__, sw_desc->async_tx.cookie, sw_desc->idx);
496 496
497 return cookie; 497 return cookie;
498 } 498 }
499 499
500 static void iop_chan_start_null_memcpy(struct iop_adma_chan *iop_chan); 500 static void iop_chan_start_null_memcpy(struct iop_adma_chan *iop_chan);
501 static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan); 501 static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan);
502 502
503 /** 503 /**
504 * iop_adma_alloc_chan_resources - returns the number of allocated descriptors 504 * iop_adma_alloc_chan_resources - returns the number of allocated descriptors
505 * @chan - allocate descriptor resources for this channel 505 * @chan - allocate descriptor resources for this channel
506 * @client - current client requesting the channel be ready for requests 506 * @client - current client requesting the channel be ready for requests
507 * 507 *
508 * Note: We keep the slots for 1 operation on iop_chan->chain at all times. To 508 * Note: We keep the slots for 1 operation on iop_chan->chain at all times. To
509 * avoid deadlock, via async_xor, num_descs_in_pool must at a minimum be 509 * avoid deadlock, via async_xor, num_descs_in_pool must at a minimum be
510 * greater than 2x the number slots needed to satisfy a device->max_xor 510 * greater than 2x the number slots needed to satisfy a device->max_xor
511 * request. 511 * request.
512 * */ 512 * */
513 static int iop_adma_alloc_chan_resources(struct dma_chan *chan) 513 static int iop_adma_alloc_chan_resources(struct dma_chan *chan)
514 { 514 {
515 char *hw_desc; 515 char *hw_desc;
516 int idx; 516 int idx;
517 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); 517 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
518 struct iop_adma_desc_slot *slot = NULL; 518 struct iop_adma_desc_slot *slot = NULL;
519 int init = iop_chan->slots_allocated ? 0 : 1; 519 int init = iop_chan->slots_allocated ? 0 : 1;
520 struct iop_adma_platform_data *plat_data = 520 struct iop_adma_platform_data *plat_data =
521 dev_get_platdata(&iop_chan->device->pdev->dev); 521 dev_get_platdata(&iop_chan->device->pdev->dev);
522 int num_descs_in_pool = plat_data->pool_size/IOP_ADMA_SLOT_SIZE; 522 int num_descs_in_pool = plat_data->pool_size/IOP_ADMA_SLOT_SIZE;
523 523
524 /* Allocate descriptor slots */ 524 /* Allocate descriptor slots */
525 do { 525 do {
526 idx = iop_chan->slots_allocated; 526 idx = iop_chan->slots_allocated;
527 if (idx == num_descs_in_pool) 527 if (idx == num_descs_in_pool)
528 break; 528 break;
529 529
530 slot = kzalloc(sizeof(*slot), GFP_KERNEL); 530 slot = kzalloc(sizeof(*slot), GFP_KERNEL);
531 if (!slot) { 531 if (!slot) {
532 printk(KERN_INFO "IOP ADMA Channel only initialized" 532 printk(KERN_INFO "IOP ADMA Channel only initialized"
533 " %d descriptor slots", idx); 533 " %d descriptor slots", idx);
534 break; 534 break;
535 } 535 }
536 hw_desc = (char *) iop_chan->device->dma_desc_pool_virt; 536 hw_desc = (char *) iop_chan->device->dma_desc_pool_virt;
537 slot->hw_desc = (void *) &hw_desc[idx * IOP_ADMA_SLOT_SIZE]; 537 slot->hw_desc = (void *) &hw_desc[idx * IOP_ADMA_SLOT_SIZE];
538 538
539 dma_async_tx_descriptor_init(&slot->async_tx, chan); 539 dma_async_tx_descriptor_init(&slot->async_tx, chan);
540 slot->async_tx.tx_submit = iop_adma_tx_submit; 540 slot->async_tx.tx_submit = iop_adma_tx_submit;
541 INIT_LIST_HEAD(&slot->tx_list); 541 INIT_LIST_HEAD(&slot->tx_list);
542 INIT_LIST_HEAD(&slot->chain_node); 542 INIT_LIST_HEAD(&slot->chain_node);
543 INIT_LIST_HEAD(&slot->slot_node); 543 INIT_LIST_HEAD(&slot->slot_node);
544 hw_desc = (char *) iop_chan->device->dma_desc_pool; 544 hw_desc = (char *) iop_chan->device->dma_desc_pool;
545 slot->async_tx.phys = 545 slot->async_tx.phys =
546 (dma_addr_t) &hw_desc[idx * IOP_ADMA_SLOT_SIZE]; 546 (dma_addr_t) &hw_desc[idx * IOP_ADMA_SLOT_SIZE];
547 slot->idx = idx; 547 slot->idx = idx;
548 548
549 spin_lock_bh(&iop_chan->lock); 549 spin_lock_bh(&iop_chan->lock);
550 iop_chan->slots_allocated++; 550 iop_chan->slots_allocated++;
551 list_add_tail(&slot->slot_node, &iop_chan->all_slots); 551 list_add_tail(&slot->slot_node, &iop_chan->all_slots);
552 spin_unlock_bh(&iop_chan->lock); 552 spin_unlock_bh(&iop_chan->lock);
553 } while (iop_chan->slots_allocated < num_descs_in_pool); 553 } while (iop_chan->slots_allocated < num_descs_in_pool);
554 554
555 if (idx && !iop_chan->last_used) 555 if (idx && !iop_chan->last_used)
556 iop_chan->last_used = list_entry(iop_chan->all_slots.next, 556 iop_chan->last_used = list_entry(iop_chan->all_slots.next,
557 struct iop_adma_desc_slot, 557 struct iop_adma_desc_slot,
558 slot_node); 558 slot_node);
559 559
560 dev_dbg(iop_chan->device->common.dev, 560 dev_dbg(iop_chan->device->common.dev,
561 "allocated %d descriptor slots last_used: %p\n", 561 "allocated %d descriptor slots last_used: %p\n",
562 iop_chan->slots_allocated, iop_chan->last_used); 562 iop_chan->slots_allocated, iop_chan->last_used);
563 563
564 /* initialize the channel and the chain with a null operation */ 564 /* initialize the channel and the chain with a null operation */
565 if (init) { 565 if (init) {
566 if (dma_has_cap(DMA_MEMCPY, 566 if (dma_has_cap(DMA_MEMCPY,
567 iop_chan->device->common.cap_mask)) 567 iop_chan->device->common.cap_mask))
568 iop_chan_start_null_memcpy(iop_chan); 568 iop_chan_start_null_memcpy(iop_chan);
569 else if (dma_has_cap(DMA_XOR, 569 else if (dma_has_cap(DMA_XOR,
570 iop_chan->device->common.cap_mask)) 570 iop_chan->device->common.cap_mask))
571 iop_chan_start_null_xor(iop_chan); 571 iop_chan_start_null_xor(iop_chan);
572 else 572 else
573 BUG(); 573 BUG();
574 } 574 }
575 575
576 return (idx > 0) ? idx : -ENOMEM; 576 return (idx > 0) ? idx : -ENOMEM;
577 } 577 }
578 578
579 static struct dma_async_tx_descriptor * 579 static struct dma_async_tx_descriptor *
580 iop_adma_prep_dma_interrupt(struct dma_chan *chan, unsigned long flags) 580 iop_adma_prep_dma_interrupt(struct dma_chan *chan, unsigned long flags)
581 { 581 {
582 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); 582 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
583 struct iop_adma_desc_slot *sw_desc, *grp_start; 583 struct iop_adma_desc_slot *sw_desc, *grp_start;
584 int slot_cnt, slots_per_op; 584 int slot_cnt, slots_per_op;
585 585
586 dev_dbg(iop_chan->device->common.dev, "%s\n", __func__); 586 dev_dbg(iop_chan->device->common.dev, "%s\n", __func__);
587 587
588 spin_lock_bh(&iop_chan->lock); 588 spin_lock_bh(&iop_chan->lock);
589 slot_cnt = iop_chan_interrupt_slot_count(&slots_per_op, iop_chan); 589 slot_cnt = iop_chan_interrupt_slot_count(&slots_per_op, iop_chan);
590 sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op); 590 sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
591 if (sw_desc) { 591 if (sw_desc) {
592 grp_start = sw_desc->group_head; 592 grp_start = sw_desc->group_head;
593 iop_desc_init_interrupt(grp_start, iop_chan); 593 iop_desc_init_interrupt(grp_start, iop_chan);
594 grp_start->unmap_len = 0; 594 grp_start->unmap_len = 0;
595 sw_desc->async_tx.flags = flags; 595 sw_desc->async_tx.flags = flags;
596 } 596 }
597 spin_unlock_bh(&iop_chan->lock); 597 spin_unlock_bh(&iop_chan->lock);
598 598
599 return sw_desc ? &sw_desc->async_tx : NULL; 599 return sw_desc ? &sw_desc->async_tx : NULL;
600 } 600 }
601 601
602 static struct dma_async_tx_descriptor * 602 static struct dma_async_tx_descriptor *
603 iop_adma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dma_dest, 603 iop_adma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dma_dest,
604 dma_addr_t dma_src, size_t len, unsigned long flags) 604 dma_addr_t dma_src, size_t len, unsigned long flags)
605 { 605 {
606 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); 606 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
607 struct iop_adma_desc_slot *sw_desc, *grp_start; 607 struct iop_adma_desc_slot *sw_desc, *grp_start;
608 int slot_cnt, slots_per_op; 608 int slot_cnt, slots_per_op;
609 609
610 if (unlikely(!len)) 610 if (unlikely(!len))
611 return NULL; 611 return NULL;
612 BUG_ON(len > IOP_ADMA_MAX_BYTE_COUNT); 612 BUG_ON(len > IOP_ADMA_MAX_BYTE_COUNT);
613 613
614 dev_dbg(iop_chan->device->common.dev, "%s len: %u\n", 614 dev_dbg(iop_chan->device->common.dev, "%s len: %u\n",
615 __func__, len); 615 __func__, len);
616 616
617 spin_lock_bh(&iop_chan->lock); 617 spin_lock_bh(&iop_chan->lock);
618 slot_cnt = iop_chan_memcpy_slot_count(len, &slots_per_op); 618 slot_cnt = iop_chan_memcpy_slot_count(len, &slots_per_op);
619 sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op); 619 sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
620 if (sw_desc) { 620 if (sw_desc) {
621 grp_start = sw_desc->group_head; 621 grp_start = sw_desc->group_head;
622 iop_desc_init_memcpy(grp_start, flags); 622 iop_desc_init_memcpy(grp_start, flags);
623 iop_desc_set_byte_count(grp_start, iop_chan, len); 623 iop_desc_set_byte_count(grp_start, iop_chan, len);
624 iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest); 624 iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest);
625 iop_desc_set_memcpy_src_addr(grp_start, dma_src); 625 iop_desc_set_memcpy_src_addr(grp_start, dma_src);
626 sw_desc->unmap_src_cnt = 1; 626 sw_desc->unmap_src_cnt = 1;
627 sw_desc->unmap_len = len; 627 sw_desc->unmap_len = len;
628 sw_desc->async_tx.flags = flags; 628 sw_desc->async_tx.flags = flags;
629 } 629 }
630 spin_unlock_bh(&iop_chan->lock); 630 spin_unlock_bh(&iop_chan->lock);
631 631
632 return sw_desc ? &sw_desc->async_tx : NULL; 632 return sw_desc ? &sw_desc->async_tx : NULL;
633 } 633 }
634 634
635 static struct dma_async_tx_descriptor * 635 static struct dma_async_tx_descriptor *
636 iop_adma_prep_dma_xor(struct dma_chan *chan, dma_addr_t dma_dest, 636 iop_adma_prep_dma_xor(struct dma_chan *chan, dma_addr_t dma_dest,
637 dma_addr_t *dma_src, unsigned int src_cnt, size_t len, 637 dma_addr_t *dma_src, unsigned int src_cnt, size_t len,
638 unsigned long flags) 638 unsigned long flags)
639 { 639 {
640 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); 640 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
641 struct iop_adma_desc_slot *sw_desc, *grp_start; 641 struct iop_adma_desc_slot *sw_desc, *grp_start;
642 int slot_cnt, slots_per_op; 642 int slot_cnt, slots_per_op;
643 643
644 if (unlikely(!len)) 644 if (unlikely(!len))
645 return NULL; 645 return NULL;
646 BUG_ON(len > IOP_ADMA_XOR_MAX_BYTE_COUNT); 646 BUG_ON(len > IOP_ADMA_XOR_MAX_BYTE_COUNT);
647 647
648 dev_dbg(iop_chan->device->common.dev, 648 dev_dbg(iop_chan->device->common.dev,
649 "%s src_cnt: %d len: %u flags: %lx\n", 649 "%s src_cnt: %d len: %u flags: %lx\n",
650 __func__, src_cnt, len, flags); 650 __func__, src_cnt, len, flags);
651 651
652 spin_lock_bh(&iop_chan->lock); 652 spin_lock_bh(&iop_chan->lock);
653 slot_cnt = iop_chan_xor_slot_count(len, src_cnt, &slots_per_op); 653 slot_cnt = iop_chan_xor_slot_count(len, src_cnt, &slots_per_op);
654 sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op); 654 sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
655 if (sw_desc) { 655 if (sw_desc) {
656 grp_start = sw_desc->group_head; 656 grp_start = sw_desc->group_head;
657 iop_desc_init_xor(grp_start, src_cnt, flags); 657 iop_desc_init_xor(grp_start, src_cnt, flags);
658 iop_desc_set_byte_count(grp_start, iop_chan, len); 658 iop_desc_set_byte_count(grp_start, iop_chan, len);
659 iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest); 659 iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest);
660 sw_desc->unmap_src_cnt = src_cnt; 660 sw_desc->unmap_src_cnt = src_cnt;
661 sw_desc->unmap_len = len; 661 sw_desc->unmap_len = len;
662 sw_desc->async_tx.flags = flags; 662 sw_desc->async_tx.flags = flags;
663 while (src_cnt--) 663 while (src_cnt--)
664 iop_desc_set_xor_src_addr(grp_start, src_cnt, 664 iop_desc_set_xor_src_addr(grp_start, src_cnt,
665 dma_src[src_cnt]); 665 dma_src[src_cnt]);
666 } 666 }
667 spin_unlock_bh(&iop_chan->lock); 667 spin_unlock_bh(&iop_chan->lock);
668 668
669 return sw_desc ? &sw_desc->async_tx : NULL; 669 return sw_desc ? &sw_desc->async_tx : NULL;
670 } 670 }
671 671
672 static struct dma_async_tx_descriptor * 672 static struct dma_async_tx_descriptor *
673 iop_adma_prep_dma_xor_val(struct dma_chan *chan, dma_addr_t *dma_src, 673 iop_adma_prep_dma_xor_val(struct dma_chan *chan, dma_addr_t *dma_src,
674 unsigned int src_cnt, size_t len, u32 *result, 674 unsigned int src_cnt, size_t len, u32 *result,
675 unsigned long flags) 675 unsigned long flags)
676 { 676 {
677 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); 677 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
678 struct iop_adma_desc_slot *sw_desc, *grp_start; 678 struct iop_adma_desc_slot *sw_desc, *grp_start;
679 int slot_cnt, slots_per_op; 679 int slot_cnt, slots_per_op;
680 680
681 if (unlikely(!len)) 681 if (unlikely(!len))
682 return NULL; 682 return NULL;
683 683
684 dev_dbg(iop_chan->device->common.dev, "%s src_cnt: %d len: %u\n", 684 dev_dbg(iop_chan->device->common.dev, "%s src_cnt: %d len: %u\n",
685 __func__, src_cnt, len); 685 __func__, src_cnt, len);
686 686
687 spin_lock_bh(&iop_chan->lock); 687 spin_lock_bh(&iop_chan->lock);
688 slot_cnt = iop_chan_zero_sum_slot_count(len, src_cnt, &slots_per_op); 688 slot_cnt = iop_chan_zero_sum_slot_count(len, src_cnt, &slots_per_op);
689 sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op); 689 sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
690 if (sw_desc) { 690 if (sw_desc) {
691 grp_start = sw_desc->group_head; 691 grp_start = sw_desc->group_head;
692 iop_desc_init_zero_sum(grp_start, src_cnt, flags); 692 iop_desc_init_zero_sum(grp_start, src_cnt, flags);
693 iop_desc_set_zero_sum_byte_count(grp_start, len); 693 iop_desc_set_zero_sum_byte_count(grp_start, len);
694 grp_start->xor_check_result = result; 694 grp_start->xor_check_result = result;
695 pr_debug("\t%s: grp_start->xor_check_result: %p\n", 695 pr_debug("\t%s: grp_start->xor_check_result: %p\n",
696 __func__, grp_start->xor_check_result); 696 __func__, grp_start->xor_check_result);
697 sw_desc->unmap_src_cnt = src_cnt; 697 sw_desc->unmap_src_cnt = src_cnt;
698 sw_desc->unmap_len = len; 698 sw_desc->unmap_len = len;
699 sw_desc->async_tx.flags = flags; 699 sw_desc->async_tx.flags = flags;
700 while (src_cnt--) 700 while (src_cnt--)
701 iop_desc_set_zero_sum_src_addr(grp_start, src_cnt, 701 iop_desc_set_zero_sum_src_addr(grp_start, src_cnt,
702 dma_src[src_cnt]); 702 dma_src[src_cnt]);
703 } 703 }
704 spin_unlock_bh(&iop_chan->lock); 704 spin_unlock_bh(&iop_chan->lock);
705 705
706 return sw_desc ? &sw_desc->async_tx : NULL; 706 return sw_desc ? &sw_desc->async_tx : NULL;
707 } 707 }
708 708
709 static struct dma_async_tx_descriptor * 709 static struct dma_async_tx_descriptor *
710 iop_adma_prep_dma_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src, 710 iop_adma_prep_dma_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
711 unsigned int src_cnt, const unsigned char *scf, size_t len, 711 unsigned int src_cnt, const unsigned char *scf, size_t len,
712 unsigned long flags) 712 unsigned long flags)
713 { 713 {
714 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); 714 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
715 struct iop_adma_desc_slot *sw_desc, *g; 715 struct iop_adma_desc_slot *sw_desc, *g;
716 int slot_cnt, slots_per_op; 716 int slot_cnt, slots_per_op;
717 int continue_srcs; 717 int continue_srcs;
718 718
719 if (unlikely(!len)) 719 if (unlikely(!len))
720 return NULL; 720 return NULL;
721 BUG_ON(len > IOP_ADMA_XOR_MAX_BYTE_COUNT); 721 BUG_ON(len > IOP_ADMA_XOR_MAX_BYTE_COUNT);
722 722
723 dev_dbg(iop_chan->device->common.dev, 723 dev_dbg(iop_chan->device->common.dev,
724 "%s src_cnt: %d len: %u flags: %lx\n", 724 "%s src_cnt: %d len: %u flags: %lx\n",
725 __func__, src_cnt, len, flags); 725 __func__, src_cnt, len, flags);
726 726
727 if (dmaf_p_disabled_continue(flags)) 727 if (dmaf_p_disabled_continue(flags))
728 continue_srcs = 1+src_cnt; 728 continue_srcs = 1+src_cnt;
729 else if (dmaf_continue(flags)) 729 else if (dmaf_continue(flags))
730 continue_srcs = 3+src_cnt; 730 continue_srcs = 3+src_cnt;
731 else 731 else
732 continue_srcs = 0+src_cnt; 732 continue_srcs = 0+src_cnt;
733 733
734 spin_lock_bh(&iop_chan->lock); 734 spin_lock_bh(&iop_chan->lock);
735 slot_cnt = iop_chan_pq_slot_count(len, continue_srcs, &slots_per_op); 735 slot_cnt = iop_chan_pq_slot_count(len, continue_srcs, &slots_per_op);
736 sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op); 736 sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
737 if (sw_desc) { 737 if (sw_desc) {
738 int i; 738 int i;
739 739
740 g = sw_desc->group_head; 740 g = sw_desc->group_head;
741 iop_desc_set_byte_count(g, iop_chan, len); 741 iop_desc_set_byte_count(g, iop_chan, len);
742 742
743 /* even if P is disabled its destination address (bits 743 /* even if P is disabled its destination address (bits
744 * [3:0]) must match Q. It is ok if P points to an 744 * [3:0]) must match Q. It is ok if P points to an
745 * invalid address, it won't be written. 745 * invalid address, it won't be written.
746 */ 746 */
747 if (flags & DMA_PREP_PQ_DISABLE_P) 747 if (flags & DMA_PREP_PQ_DISABLE_P)
748 dst[0] = dst[1] & 0x7; 748 dst[0] = dst[1] & 0x7;
749 749
750 iop_desc_set_pq_addr(g, dst); 750 iop_desc_set_pq_addr(g, dst);
751 sw_desc->unmap_src_cnt = src_cnt; 751 sw_desc->unmap_src_cnt = src_cnt;
752 sw_desc->unmap_len = len; 752 sw_desc->unmap_len = len;
753 sw_desc->async_tx.flags = flags; 753 sw_desc->async_tx.flags = flags;
754 for (i = 0; i < src_cnt; i++) 754 for (i = 0; i < src_cnt; i++)
755 iop_desc_set_pq_src_addr(g, i, src[i], scf[i]); 755 iop_desc_set_pq_src_addr(g, i, src[i], scf[i]);
756 756
757 /* if we are continuing a previous operation factor in 757 /* if we are continuing a previous operation factor in
758 * the old p and q values, see the comment for dma_maxpq 758 * the old p and q values, see the comment for dma_maxpq
759 * in include/linux/dmaengine.h 759 * in include/linux/dmaengine.h
760 */ 760 */
761 if (dmaf_p_disabled_continue(flags)) 761 if (dmaf_p_disabled_continue(flags))
762 iop_desc_set_pq_src_addr(g, i++, dst[1], 1); 762 iop_desc_set_pq_src_addr(g, i++, dst[1], 1);
763 else if (dmaf_continue(flags)) { 763 else if (dmaf_continue(flags)) {
764 iop_desc_set_pq_src_addr(g, i++, dst[0], 0); 764 iop_desc_set_pq_src_addr(g, i++, dst[0], 0);
765 iop_desc_set_pq_src_addr(g, i++, dst[1], 1); 765 iop_desc_set_pq_src_addr(g, i++, dst[1], 1);
766 iop_desc_set_pq_src_addr(g, i++, dst[1], 0); 766 iop_desc_set_pq_src_addr(g, i++, dst[1], 0);
767 } 767 }
768 iop_desc_init_pq(g, i, flags); 768 iop_desc_init_pq(g, i, flags);
769 } 769 }
770 spin_unlock_bh(&iop_chan->lock); 770 spin_unlock_bh(&iop_chan->lock);
771 771
772 return sw_desc ? &sw_desc->async_tx : NULL; 772 return sw_desc ? &sw_desc->async_tx : NULL;
773 } 773 }
774 774
775 static struct dma_async_tx_descriptor * 775 static struct dma_async_tx_descriptor *
776 iop_adma_prep_dma_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src, 776 iop_adma_prep_dma_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
777 unsigned int src_cnt, const unsigned char *scf, 777 unsigned int src_cnt, const unsigned char *scf,
778 size_t len, enum sum_check_flags *pqres, 778 size_t len, enum sum_check_flags *pqres,
779 unsigned long flags) 779 unsigned long flags)
780 { 780 {
781 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); 781 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
782 struct iop_adma_desc_slot *sw_desc, *g; 782 struct iop_adma_desc_slot *sw_desc, *g;
783 int slot_cnt, slots_per_op; 783 int slot_cnt, slots_per_op;
784 784
785 if (unlikely(!len)) 785 if (unlikely(!len))
786 return NULL; 786 return NULL;
787 BUG_ON(len > IOP_ADMA_XOR_MAX_BYTE_COUNT); 787 BUG_ON(len > IOP_ADMA_XOR_MAX_BYTE_COUNT);
788 788
789 dev_dbg(iop_chan->device->common.dev, "%s src_cnt: %d len: %u\n", 789 dev_dbg(iop_chan->device->common.dev, "%s src_cnt: %d len: %u\n",
790 __func__, src_cnt, len); 790 __func__, src_cnt, len);
791 791
792 spin_lock_bh(&iop_chan->lock); 792 spin_lock_bh(&iop_chan->lock);
793 slot_cnt = iop_chan_pq_zero_sum_slot_count(len, src_cnt + 2, &slots_per_op); 793 slot_cnt = iop_chan_pq_zero_sum_slot_count(len, src_cnt + 2, &slots_per_op);
794 sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op); 794 sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
795 if (sw_desc) { 795 if (sw_desc) {
796 /* for validate operations p and q are tagged onto the 796 /* for validate operations p and q are tagged onto the
797 * end of the source list 797 * end of the source list
798 */ 798 */
799 int pq_idx = src_cnt; 799 int pq_idx = src_cnt;
800 800
801 g = sw_desc->group_head; 801 g = sw_desc->group_head;
802 iop_desc_init_pq_zero_sum(g, src_cnt+2, flags); 802 iop_desc_init_pq_zero_sum(g, src_cnt+2, flags);
803 iop_desc_set_pq_zero_sum_byte_count(g, len); 803 iop_desc_set_pq_zero_sum_byte_count(g, len);
804 g->pq_check_result = pqres; 804 g->pq_check_result = pqres;
805 pr_debug("\t%s: g->pq_check_result: %p\n", 805 pr_debug("\t%s: g->pq_check_result: %p\n",
806 __func__, g->pq_check_result); 806 __func__, g->pq_check_result);
807 sw_desc->unmap_src_cnt = src_cnt+2; 807 sw_desc->unmap_src_cnt = src_cnt+2;
808 sw_desc->unmap_len = len; 808 sw_desc->unmap_len = len;
809 sw_desc->async_tx.flags = flags; 809 sw_desc->async_tx.flags = flags;
810 while (src_cnt--) 810 while (src_cnt--)
811 iop_desc_set_pq_zero_sum_src_addr(g, src_cnt, 811 iop_desc_set_pq_zero_sum_src_addr(g, src_cnt,
812 src[src_cnt], 812 src[src_cnt],
813 scf[src_cnt]); 813 scf[src_cnt]);
814 iop_desc_set_pq_zero_sum_addr(g, pq_idx, src); 814 iop_desc_set_pq_zero_sum_addr(g, pq_idx, src);
815 } 815 }
816 spin_unlock_bh(&iop_chan->lock); 816 spin_unlock_bh(&iop_chan->lock);
817 817
818 return sw_desc ? &sw_desc->async_tx : NULL; 818 return sw_desc ? &sw_desc->async_tx : NULL;
819 } 819 }
820 820
821 static void iop_adma_free_chan_resources(struct dma_chan *chan) 821 static void iop_adma_free_chan_resources(struct dma_chan *chan)
822 { 822 {
823 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); 823 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
824 struct iop_adma_desc_slot *iter, *_iter; 824 struct iop_adma_desc_slot *iter, *_iter;
825 int in_use_descs = 0; 825 int in_use_descs = 0;
826 826
827 iop_adma_slot_cleanup(iop_chan); 827 iop_adma_slot_cleanup(iop_chan);
828 828
829 spin_lock_bh(&iop_chan->lock); 829 spin_lock_bh(&iop_chan->lock);
830 list_for_each_entry_safe(iter, _iter, &iop_chan->chain, 830 list_for_each_entry_safe(iter, _iter, &iop_chan->chain,
831 chain_node) { 831 chain_node) {
832 in_use_descs++; 832 in_use_descs++;
833 list_del(&iter->chain_node); 833 list_del(&iter->chain_node);
834 } 834 }
835 list_for_each_entry_safe_reverse( 835 list_for_each_entry_safe_reverse(
836 iter, _iter, &iop_chan->all_slots, slot_node) { 836 iter, _iter, &iop_chan->all_slots, slot_node) {
837 list_del(&iter->slot_node); 837 list_del(&iter->slot_node);
838 kfree(iter); 838 kfree(iter);
839 iop_chan->slots_allocated--; 839 iop_chan->slots_allocated--;
840 } 840 }
841 iop_chan->last_used = NULL; 841 iop_chan->last_used = NULL;
842 842
843 dev_dbg(iop_chan->device->common.dev, "%s slots_allocated %d\n", 843 dev_dbg(iop_chan->device->common.dev, "%s slots_allocated %d\n",
844 __func__, iop_chan->slots_allocated); 844 __func__, iop_chan->slots_allocated);
845 spin_unlock_bh(&iop_chan->lock); 845 spin_unlock_bh(&iop_chan->lock);
846 846
847 /* one is ok since we left it on there on purpose */ 847 /* one is ok since we left it on there on purpose */
848 if (in_use_descs > 1) 848 if (in_use_descs > 1)
849 printk(KERN_ERR "IOP: Freeing %d in use descriptors!\n", 849 printk(KERN_ERR "IOP: Freeing %d in use descriptors!\n",
850 in_use_descs - 1); 850 in_use_descs - 1);
851 } 851 }
852 852
853 /** 853 /**
854 * iop_adma_status - poll the status of an ADMA transaction 854 * iop_adma_status - poll the status of an ADMA transaction
855 * @chan: ADMA channel handle 855 * @chan: ADMA channel handle
856 * @cookie: ADMA transaction identifier 856 * @cookie: ADMA transaction identifier
857 * @txstate: a holder for the current state of the channel or NULL 857 * @txstate: a holder for the current state of the channel or NULL
858 */ 858 */
859 static enum dma_status iop_adma_status(struct dma_chan *chan, 859 static enum dma_status iop_adma_status(struct dma_chan *chan,
860 dma_cookie_t cookie, 860 dma_cookie_t cookie,
861 struct dma_tx_state *txstate) 861 struct dma_tx_state *txstate)
862 { 862 {
863 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); 863 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
864 int ret; 864 int ret;
865 865
866 ret = dma_cookie_status(chan, cookie, txstate); 866 ret = dma_cookie_status(chan, cookie, txstate);
867 if (ret == DMA_SUCCESS) 867 if (ret == DMA_COMPLETE)
868 return ret; 868 return ret;
869 869
870 iop_adma_slot_cleanup(iop_chan); 870 iop_adma_slot_cleanup(iop_chan);
871 871
872 return dma_cookie_status(chan, cookie, txstate); 872 return dma_cookie_status(chan, cookie, txstate);
873 } 873 }
874 874
875 static irqreturn_t iop_adma_eot_handler(int irq, void *data) 875 static irqreturn_t iop_adma_eot_handler(int irq, void *data)
876 { 876 {
877 struct iop_adma_chan *chan = data; 877 struct iop_adma_chan *chan = data;
878 878
879 dev_dbg(chan->device->common.dev, "%s\n", __func__); 879 dev_dbg(chan->device->common.dev, "%s\n", __func__);
880 880
881 tasklet_schedule(&chan->irq_tasklet); 881 tasklet_schedule(&chan->irq_tasklet);
882 882
883 iop_adma_device_clear_eot_status(chan); 883 iop_adma_device_clear_eot_status(chan);
884 884
885 return IRQ_HANDLED; 885 return IRQ_HANDLED;
886 } 886 }
887 887
888 static irqreturn_t iop_adma_eoc_handler(int irq, void *data) 888 static irqreturn_t iop_adma_eoc_handler(int irq, void *data)
889 { 889 {
890 struct iop_adma_chan *chan = data; 890 struct iop_adma_chan *chan = data;
891 891
892 dev_dbg(chan->device->common.dev, "%s\n", __func__); 892 dev_dbg(chan->device->common.dev, "%s\n", __func__);
893 893
894 tasklet_schedule(&chan->irq_tasklet); 894 tasklet_schedule(&chan->irq_tasklet);
895 895
896 iop_adma_device_clear_eoc_status(chan); 896 iop_adma_device_clear_eoc_status(chan);
897 897
898 return IRQ_HANDLED; 898 return IRQ_HANDLED;
899 } 899 }
900 900
901 static irqreturn_t iop_adma_err_handler(int irq, void *data) 901 static irqreturn_t iop_adma_err_handler(int irq, void *data)
902 { 902 {
903 struct iop_adma_chan *chan = data; 903 struct iop_adma_chan *chan = data;
904 unsigned long status = iop_chan_get_status(chan); 904 unsigned long status = iop_chan_get_status(chan);
905 905
906 dev_err(chan->device->common.dev, 906 dev_err(chan->device->common.dev,
907 "error ( %s%s%s%s%s%s%s)\n", 907 "error ( %s%s%s%s%s%s%s)\n",
908 iop_is_err_int_parity(status, chan) ? "int_parity " : "", 908 iop_is_err_int_parity(status, chan) ? "int_parity " : "",
909 iop_is_err_mcu_abort(status, chan) ? "mcu_abort " : "", 909 iop_is_err_mcu_abort(status, chan) ? "mcu_abort " : "",
910 iop_is_err_int_tabort(status, chan) ? "int_tabort " : "", 910 iop_is_err_int_tabort(status, chan) ? "int_tabort " : "",
911 iop_is_err_int_mabort(status, chan) ? "int_mabort " : "", 911 iop_is_err_int_mabort(status, chan) ? "int_mabort " : "",
912 iop_is_err_pci_tabort(status, chan) ? "pci_tabort " : "", 912 iop_is_err_pci_tabort(status, chan) ? "pci_tabort " : "",
913 iop_is_err_pci_mabort(status, chan) ? "pci_mabort " : "", 913 iop_is_err_pci_mabort(status, chan) ? "pci_mabort " : "",
914 iop_is_err_split_tx(status, chan) ? "split_tx " : ""); 914 iop_is_err_split_tx(status, chan) ? "split_tx " : "");
915 915
916 iop_adma_device_clear_err_status(chan); 916 iop_adma_device_clear_err_status(chan);
917 917
918 BUG(); 918 BUG();
919 919
920 return IRQ_HANDLED; 920 return IRQ_HANDLED;
921 } 921 }
922 922
923 static void iop_adma_issue_pending(struct dma_chan *chan) 923 static void iop_adma_issue_pending(struct dma_chan *chan)
924 { 924 {
925 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); 925 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
926 926
927 if (iop_chan->pending) { 927 if (iop_chan->pending) {
928 iop_chan->pending = 0; 928 iop_chan->pending = 0;
929 iop_chan_append(iop_chan); 929 iop_chan_append(iop_chan);
930 } 930 }
931 } 931 }
932 932
933 /* 933 /*
934 * Perform a transaction to verify the HW works. 934 * Perform a transaction to verify the HW works.
935 */ 935 */
936 #define IOP_ADMA_TEST_SIZE 2000 936 #define IOP_ADMA_TEST_SIZE 2000
937 937
938 static int iop_adma_memcpy_self_test(struct iop_adma_device *device) 938 static int iop_adma_memcpy_self_test(struct iop_adma_device *device)
939 { 939 {
940 int i; 940 int i;
941 void *src, *dest; 941 void *src, *dest;
942 dma_addr_t src_dma, dest_dma; 942 dma_addr_t src_dma, dest_dma;
943 struct dma_chan *dma_chan; 943 struct dma_chan *dma_chan;
944 dma_cookie_t cookie; 944 dma_cookie_t cookie;
945 struct dma_async_tx_descriptor *tx; 945 struct dma_async_tx_descriptor *tx;
946 int err = 0; 946 int err = 0;
947 struct iop_adma_chan *iop_chan; 947 struct iop_adma_chan *iop_chan;
948 948
949 dev_dbg(device->common.dev, "%s\n", __func__); 949 dev_dbg(device->common.dev, "%s\n", __func__);
950 950
951 src = kmalloc(IOP_ADMA_TEST_SIZE, GFP_KERNEL); 951 src = kmalloc(IOP_ADMA_TEST_SIZE, GFP_KERNEL);
952 if (!src) 952 if (!src)
953 return -ENOMEM; 953 return -ENOMEM;
954 dest = kzalloc(IOP_ADMA_TEST_SIZE, GFP_KERNEL); 954 dest = kzalloc(IOP_ADMA_TEST_SIZE, GFP_KERNEL);
955 if (!dest) { 955 if (!dest) {
956 kfree(src); 956 kfree(src);
957 return -ENOMEM; 957 return -ENOMEM;
958 } 958 }
959 959
960 /* Fill in src buffer */ 960 /* Fill in src buffer */
961 for (i = 0; i < IOP_ADMA_TEST_SIZE; i++) 961 for (i = 0; i < IOP_ADMA_TEST_SIZE; i++)
962 ((u8 *) src)[i] = (u8)i; 962 ((u8 *) src)[i] = (u8)i;
963 963
964 /* Start copy, using first DMA channel */ 964 /* Start copy, using first DMA channel */
965 dma_chan = container_of(device->common.channels.next, 965 dma_chan = container_of(device->common.channels.next,
966 struct dma_chan, 966 struct dma_chan,
967 device_node); 967 device_node);
968 if (iop_adma_alloc_chan_resources(dma_chan) < 1) { 968 if (iop_adma_alloc_chan_resources(dma_chan) < 1) {
969 err = -ENODEV; 969 err = -ENODEV;
970 goto out; 970 goto out;
971 } 971 }
972 972
973 dest_dma = dma_map_single(dma_chan->device->dev, dest, 973 dest_dma = dma_map_single(dma_chan->device->dev, dest,
974 IOP_ADMA_TEST_SIZE, DMA_FROM_DEVICE); 974 IOP_ADMA_TEST_SIZE, DMA_FROM_DEVICE);
975 src_dma = dma_map_single(dma_chan->device->dev, src, 975 src_dma = dma_map_single(dma_chan->device->dev, src,
976 IOP_ADMA_TEST_SIZE, DMA_TO_DEVICE); 976 IOP_ADMA_TEST_SIZE, DMA_TO_DEVICE);
977 tx = iop_adma_prep_dma_memcpy(dma_chan, dest_dma, src_dma, 977 tx = iop_adma_prep_dma_memcpy(dma_chan, dest_dma, src_dma,
978 IOP_ADMA_TEST_SIZE, 978 IOP_ADMA_TEST_SIZE,
979 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 979 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
980 980
981 cookie = iop_adma_tx_submit(tx); 981 cookie = iop_adma_tx_submit(tx);
982 iop_adma_issue_pending(dma_chan); 982 iop_adma_issue_pending(dma_chan);
983 msleep(1); 983 msleep(1);
984 984
985 if (iop_adma_status(dma_chan, cookie, NULL) != 985 if (iop_adma_status(dma_chan, cookie, NULL) !=
986 DMA_SUCCESS) { 986 DMA_COMPLETE) {
987 dev_err(dma_chan->device->dev, 987 dev_err(dma_chan->device->dev,
988 "Self-test copy timed out, disabling\n"); 988 "Self-test copy timed out, disabling\n");
989 err = -ENODEV; 989 err = -ENODEV;
990 goto free_resources; 990 goto free_resources;
991 } 991 }
992 992
993 iop_chan = to_iop_adma_chan(dma_chan); 993 iop_chan = to_iop_adma_chan(dma_chan);
994 dma_sync_single_for_cpu(&iop_chan->device->pdev->dev, dest_dma, 994 dma_sync_single_for_cpu(&iop_chan->device->pdev->dev, dest_dma,
995 IOP_ADMA_TEST_SIZE, DMA_FROM_DEVICE); 995 IOP_ADMA_TEST_SIZE, DMA_FROM_DEVICE);
996 if (memcmp(src, dest, IOP_ADMA_TEST_SIZE)) { 996 if (memcmp(src, dest, IOP_ADMA_TEST_SIZE)) {
997 dev_err(dma_chan->device->dev, 997 dev_err(dma_chan->device->dev,
998 "Self-test copy failed compare, disabling\n"); 998 "Self-test copy failed compare, disabling\n");
999 err = -ENODEV; 999 err = -ENODEV;
1000 goto free_resources; 1000 goto free_resources;
1001 } 1001 }
1002 1002
1003 free_resources: 1003 free_resources:
1004 iop_adma_free_chan_resources(dma_chan); 1004 iop_adma_free_chan_resources(dma_chan);
1005 out: 1005 out:
1006 kfree(src); 1006 kfree(src);
1007 kfree(dest); 1007 kfree(dest);
1008 return err; 1008 return err;
1009 } 1009 }
1010 1010
1011 #define IOP_ADMA_NUM_SRC_TEST 4 /* must be <= 15 */ 1011 #define IOP_ADMA_NUM_SRC_TEST 4 /* must be <= 15 */
1012 static int 1012 static int
1013 iop_adma_xor_val_self_test(struct iop_adma_device *device) 1013 iop_adma_xor_val_self_test(struct iop_adma_device *device)
1014 { 1014 {
1015 int i, src_idx; 1015 int i, src_idx;
1016 struct page *dest; 1016 struct page *dest;
1017 struct page *xor_srcs[IOP_ADMA_NUM_SRC_TEST]; 1017 struct page *xor_srcs[IOP_ADMA_NUM_SRC_TEST];
1018 struct page *zero_sum_srcs[IOP_ADMA_NUM_SRC_TEST + 1]; 1018 struct page *zero_sum_srcs[IOP_ADMA_NUM_SRC_TEST + 1];
1019 dma_addr_t dma_srcs[IOP_ADMA_NUM_SRC_TEST + 1]; 1019 dma_addr_t dma_srcs[IOP_ADMA_NUM_SRC_TEST + 1];
1020 dma_addr_t dest_dma; 1020 dma_addr_t dest_dma;
1021 struct dma_async_tx_descriptor *tx; 1021 struct dma_async_tx_descriptor *tx;
1022 struct dma_chan *dma_chan; 1022 struct dma_chan *dma_chan;
1023 dma_cookie_t cookie; 1023 dma_cookie_t cookie;
1024 u8 cmp_byte = 0; 1024 u8 cmp_byte = 0;
1025 u32 cmp_word; 1025 u32 cmp_word;
1026 u32 zero_sum_result; 1026 u32 zero_sum_result;
1027 int err = 0; 1027 int err = 0;
1028 struct iop_adma_chan *iop_chan; 1028 struct iop_adma_chan *iop_chan;
1029 1029
1030 dev_dbg(device->common.dev, "%s\n", __func__); 1030 dev_dbg(device->common.dev, "%s\n", __func__);
1031 1031
1032 for (src_idx = 0; src_idx < IOP_ADMA_NUM_SRC_TEST; src_idx++) { 1032 for (src_idx = 0; src_idx < IOP_ADMA_NUM_SRC_TEST; src_idx++) {
1033 xor_srcs[src_idx] = alloc_page(GFP_KERNEL); 1033 xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
1034 if (!xor_srcs[src_idx]) { 1034 if (!xor_srcs[src_idx]) {
1035 while (src_idx--) 1035 while (src_idx--)
1036 __free_page(xor_srcs[src_idx]); 1036 __free_page(xor_srcs[src_idx]);
1037 return -ENOMEM; 1037 return -ENOMEM;
1038 } 1038 }
1039 } 1039 }
1040 1040
1041 dest = alloc_page(GFP_KERNEL); 1041 dest = alloc_page(GFP_KERNEL);
1042 if (!dest) { 1042 if (!dest) {
1043 while (src_idx--) 1043 while (src_idx--)
1044 __free_page(xor_srcs[src_idx]); 1044 __free_page(xor_srcs[src_idx]);
1045 return -ENOMEM; 1045 return -ENOMEM;
1046 } 1046 }
1047 1047
1048 /* Fill in src buffers */ 1048 /* Fill in src buffers */
1049 for (src_idx = 0; src_idx < IOP_ADMA_NUM_SRC_TEST; src_idx++) { 1049 for (src_idx = 0; src_idx < IOP_ADMA_NUM_SRC_TEST; src_idx++) {
1050 u8 *ptr = page_address(xor_srcs[src_idx]); 1050 u8 *ptr = page_address(xor_srcs[src_idx]);
1051 for (i = 0; i < PAGE_SIZE; i++) 1051 for (i = 0; i < PAGE_SIZE; i++)
1052 ptr[i] = (1 << src_idx); 1052 ptr[i] = (1 << src_idx);
1053 } 1053 }
1054 1054
1055 for (src_idx = 0; src_idx < IOP_ADMA_NUM_SRC_TEST; src_idx++) 1055 for (src_idx = 0; src_idx < IOP_ADMA_NUM_SRC_TEST; src_idx++)
1056 cmp_byte ^= (u8) (1 << src_idx); 1056 cmp_byte ^= (u8) (1 << src_idx);
1057 1057
1058 cmp_word = (cmp_byte << 24) | (cmp_byte << 16) | 1058 cmp_word = (cmp_byte << 24) | (cmp_byte << 16) |
1059 (cmp_byte << 8) | cmp_byte; 1059 (cmp_byte << 8) | cmp_byte;
1060 1060
1061 memset(page_address(dest), 0, PAGE_SIZE); 1061 memset(page_address(dest), 0, PAGE_SIZE);
1062 1062
1063 dma_chan = container_of(device->common.channels.next, 1063 dma_chan = container_of(device->common.channels.next,
1064 struct dma_chan, 1064 struct dma_chan,
1065 device_node); 1065 device_node);
1066 if (iop_adma_alloc_chan_resources(dma_chan) < 1) { 1066 if (iop_adma_alloc_chan_resources(dma_chan) < 1) {
1067 err = -ENODEV; 1067 err = -ENODEV;
1068 goto out; 1068 goto out;
1069 } 1069 }
1070 1070
1071 /* test xor */ 1071 /* test xor */
1072 dest_dma = dma_map_page(dma_chan->device->dev, dest, 0, 1072 dest_dma = dma_map_page(dma_chan->device->dev, dest, 0,
1073 PAGE_SIZE, DMA_FROM_DEVICE); 1073 PAGE_SIZE, DMA_FROM_DEVICE);
1074 for (i = 0; i < IOP_ADMA_NUM_SRC_TEST; i++) 1074 for (i = 0; i < IOP_ADMA_NUM_SRC_TEST; i++)
1075 dma_srcs[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i], 1075 dma_srcs[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i],
1076 0, PAGE_SIZE, DMA_TO_DEVICE); 1076 0, PAGE_SIZE, DMA_TO_DEVICE);
1077 tx = iop_adma_prep_dma_xor(dma_chan, dest_dma, dma_srcs, 1077 tx = iop_adma_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
1078 IOP_ADMA_NUM_SRC_TEST, PAGE_SIZE, 1078 IOP_ADMA_NUM_SRC_TEST, PAGE_SIZE,
1079 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 1079 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1080 1080
1081 cookie = iop_adma_tx_submit(tx); 1081 cookie = iop_adma_tx_submit(tx);
1082 iop_adma_issue_pending(dma_chan); 1082 iop_adma_issue_pending(dma_chan);
1083 msleep(8); 1083 msleep(8);
1084 1084
1085 if (iop_adma_status(dma_chan, cookie, NULL) != 1085 if (iop_adma_status(dma_chan, cookie, NULL) !=
1086 DMA_SUCCESS) { 1086 DMA_COMPLETE) {
1087 dev_err(dma_chan->device->dev, 1087 dev_err(dma_chan->device->dev,
1088 "Self-test xor timed out, disabling\n"); 1088 "Self-test xor timed out, disabling\n");
1089 err = -ENODEV; 1089 err = -ENODEV;
1090 goto free_resources; 1090 goto free_resources;
1091 } 1091 }
1092 1092
1093 iop_chan = to_iop_adma_chan(dma_chan); 1093 iop_chan = to_iop_adma_chan(dma_chan);
1094 dma_sync_single_for_cpu(&iop_chan->device->pdev->dev, dest_dma, 1094 dma_sync_single_for_cpu(&iop_chan->device->pdev->dev, dest_dma,
1095 PAGE_SIZE, DMA_FROM_DEVICE); 1095 PAGE_SIZE, DMA_FROM_DEVICE);
1096 for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) { 1096 for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) {
1097 u32 *ptr = page_address(dest); 1097 u32 *ptr = page_address(dest);
1098 if (ptr[i] != cmp_word) { 1098 if (ptr[i] != cmp_word) {
1099 dev_err(dma_chan->device->dev, 1099 dev_err(dma_chan->device->dev,
1100 "Self-test xor failed compare, disabling\n"); 1100 "Self-test xor failed compare, disabling\n");
1101 err = -ENODEV; 1101 err = -ENODEV;
1102 goto free_resources; 1102 goto free_resources;
1103 } 1103 }
1104 } 1104 }
1105 dma_sync_single_for_device(&iop_chan->device->pdev->dev, dest_dma, 1105 dma_sync_single_for_device(&iop_chan->device->pdev->dev, dest_dma,
1106 PAGE_SIZE, DMA_TO_DEVICE); 1106 PAGE_SIZE, DMA_TO_DEVICE);
1107 1107
1108 /* skip zero sum if the capability is not present */ 1108 /* skip zero sum if the capability is not present */
1109 if (!dma_has_cap(DMA_XOR_VAL, dma_chan->device->cap_mask)) 1109 if (!dma_has_cap(DMA_XOR_VAL, dma_chan->device->cap_mask))
1110 goto free_resources; 1110 goto free_resources;
1111 1111
1112 /* zero sum the sources with the destintation page */ 1112 /* zero sum the sources with the destintation page */
1113 for (i = 0; i < IOP_ADMA_NUM_SRC_TEST; i++) 1113 for (i = 0; i < IOP_ADMA_NUM_SRC_TEST; i++)
1114 zero_sum_srcs[i] = xor_srcs[i]; 1114 zero_sum_srcs[i] = xor_srcs[i];
1115 zero_sum_srcs[i] = dest; 1115 zero_sum_srcs[i] = dest;
1116 1116
1117 zero_sum_result = 1; 1117 zero_sum_result = 1;
1118 1118
1119 for (i = 0; i < IOP_ADMA_NUM_SRC_TEST + 1; i++) 1119 for (i = 0; i < IOP_ADMA_NUM_SRC_TEST + 1; i++)
1120 dma_srcs[i] = dma_map_page(dma_chan->device->dev, 1120 dma_srcs[i] = dma_map_page(dma_chan->device->dev,
1121 zero_sum_srcs[i], 0, PAGE_SIZE, 1121 zero_sum_srcs[i], 0, PAGE_SIZE,
1122 DMA_TO_DEVICE); 1122 DMA_TO_DEVICE);
1123 tx = iop_adma_prep_dma_xor_val(dma_chan, dma_srcs, 1123 tx = iop_adma_prep_dma_xor_val(dma_chan, dma_srcs,
1124 IOP_ADMA_NUM_SRC_TEST + 1, PAGE_SIZE, 1124 IOP_ADMA_NUM_SRC_TEST + 1, PAGE_SIZE,
1125 &zero_sum_result, 1125 &zero_sum_result,
1126 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 1126 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1127 1127
1128 cookie = iop_adma_tx_submit(tx); 1128 cookie = iop_adma_tx_submit(tx);
1129 iop_adma_issue_pending(dma_chan); 1129 iop_adma_issue_pending(dma_chan);
1130 msleep(8); 1130 msleep(8);
1131 1131
1132 if (iop_adma_status(dma_chan, cookie, NULL) != DMA_SUCCESS) { 1132 if (iop_adma_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
1133 dev_err(dma_chan->device->dev, 1133 dev_err(dma_chan->device->dev,
1134 "Self-test zero sum timed out, disabling\n"); 1134 "Self-test zero sum timed out, disabling\n");
1135 err = -ENODEV; 1135 err = -ENODEV;
1136 goto free_resources; 1136 goto free_resources;
1137 } 1137 }
1138 1138
1139 if (zero_sum_result != 0) { 1139 if (zero_sum_result != 0) {
1140 dev_err(dma_chan->device->dev, 1140 dev_err(dma_chan->device->dev,
1141 "Self-test zero sum failed compare, disabling\n"); 1141 "Self-test zero sum failed compare, disabling\n");
1142 err = -ENODEV; 1142 err = -ENODEV;
1143 goto free_resources; 1143 goto free_resources;
1144 } 1144 }
1145 1145
1146 /* test for non-zero parity sum */ 1146 /* test for non-zero parity sum */
1147 zero_sum_result = 0; 1147 zero_sum_result = 0;
1148 for (i = 0; i < IOP_ADMA_NUM_SRC_TEST + 1; i++) 1148 for (i = 0; i < IOP_ADMA_NUM_SRC_TEST + 1; i++)
1149 dma_srcs[i] = dma_map_page(dma_chan->device->dev, 1149 dma_srcs[i] = dma_map_page(dma_chan->device->dev,
1150 zero_sum_srcs[i], 0, PAGE_SIZE, 1150 zero_sum_srcs[i], 0, PAGE_SIZE,
1151 DMA_TO_DEVICE); 1151 DMA_TO_DEVICE);
1152 tx = iop_adma_prep_dma_xor_val(dma_chan, dma_srcs, 1152 tx = iop_adma_prep_dma_xor_val(dma_chan, dma_srcs,
1153 IOP_ADMA_NUM_SRC_TEST + 1, PAGE_SIZE, 1153 IOP_ADMA_NUM_SRC_TEST + 1, PAGE_SIZE,
1154 &zero_sum_result, 1154 &zero_sum_result,
1155 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 1155 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1156 1156
1157 cookie = iop_adma_tx_submit(tx); 1157 cookie = iop_adma_tx_submit(tx);
1158 iop_adma_issue_pending(dma_chan); 1158 iop_adma_issue_pending(dma_chan);
1159 msleep(8); 1159 msleep(8);
1160 1160
1161 if (iop_adma_status(dma_chan, cookie, NULL) != DMA_SUCCESS) { 1161 if (iop_adma_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
1162 dev_err(dma_chan->device->dev, 1162 dev_err(dma_chan->device->dev,
1163 "Self-test non-zero sum timed out, disabling\n"); 1163 "Self-test non-zero sum timed out, disabling\n");
1164 err = -ENODEV; 1164 err = -ENODEV;
1165 goto free_resources; 1165 goto free_resources;
1166 } 1166 }
1167 1167
1168 if (zero_sum_result != 1) { 1168 if (zero_sum_result != 1) {
1169 dev_err(dma_chan->device->dev, 1169 dev_err(dma_chan->device->dev,
1170 "Self-test non-zero sum failed compare, disabling\n"); 1170 "Self-test non-zero sum failed compare, disabling\n");
1171 err = -ENODEV; 1171 err = -ENODEV;
1172 goto free_resources; 1172 goto free_resources;
1173 } 1173 }
1174 1174
1175 free_resources: 1175 free_resources:
1176 iop_adma_free_chan_resources(dma_chan); 1176 iop_adma_free_chan_resources(dma_chan);
1177 out: 1177 out:
1178 src_idx = IOP_ADMA_NUM_SRC_TEST; 1178 src_idx = IOP_ADMA_NUM_SRC_TEST;
1179 while (src_idx--) 1179 while (src_idx--)
1180 __free_page(xor_srcs[src_idx]); 1180 __free_page(xor_srcs[src_idx]);
1181 __free_page(dest); 1181 __free_page(dest);
1182 return err; 1182 return err;
1183 } 1183 }
1184 1184
1185 #ifdef CONFIG_RAID6_PQ 1185 #ifdef CONFIG_RAID6_PQ
1186 static int 1186 static int
1187 iop_adma_pq_zero_sum_self_test(struct iop_adma_device *device) 1187 iop_adma_pq_zero_sum_self_test(struct iop_adma_device *device)
1188 { 1188 {
1189 /* combined sources, software pq results, and extra hw pq results */ 1189 /* combined sources, software pq results, and extra hw pq results */
1190 struct page *pq[IOP_ADMA_NUM_SRC_TEST+2+2]; 1190 struct page *pq[IOP_ADMA_NUM_SRC_TEST+2+2];
1191 /* ptr to the extra hw pq buffers defined above */ 1191 /* ptr to the extra hw pq buffers defined above */
1192 struct page **pq_hw = &pq[IOP_ADMA_NUM_SRC_TEST+2]; 1192 struct page **pq_hw = &pq[IOP_ADMA_NUM_SRC_TEST+2];
1193 /* address conversion buffers (dma_map / page_address) */ 1193 /* address conversion buffers (dma_map / page_address) */
1194 void *pq_sw[IOP_ADMA_NUM_SRC_TEST+2]; 1194 void *pq_sw[IOP_ADMA_NUM_SRC_TEST+2];
1195 dma_addr_t pq_src[IOP_ADMA_NUM_SRC_TEST+2]; 1195 dma_addr_t pq_src[IOP_ADMA_NUM_SRC_TEST+2];
1196 dma_addr_t *pq_dest = &pq_src[IOP_ADMA_NUM_SRC_TEST]; 1196 dma_addr_t *pq_dest = &pq_src[IOP_ADMA_NUM_SRC_TEST];
1197 1197
1198 int i; 1198 int i;
1199 struct dma_async_tx_descriptor *tx; 1199 struct dma_async_tx_descriptor *tx;
1200 struct dma_chan *dma_chan; 1200 struct dma_chan *dma_chan;
1201 dma_cookie_t cookie; 1201 dma_cookie_t cookie;
1202 u32 zero_sum_result; 1202 u32 zero_sum_result;
1203 int err = 0; 1203 int err = 0;
1204 struct device *dev; 1204 struct device *dev;
1205 1205
1206 dev_dbg(device->common.dev, "%s\n", __func__); 1206 dev_dbg(device->common.dev, "%s\n", __func__);
1207 1207
1208 for (i = 0; i < ARRAY_SIZE(pq); i++) { 1208 for (i = 0; i < ARRAY_SIZE(pq); i++) {
1209 pq[i] = alloc_page(GFP_KERNEL); 1209 pq[i] = alloc_page(GFP_KERNEL);
1210 if (!pq[i]) { 1210 if (!pq[i]) {
1211 while (i--) 1211 while (i--)
1212 __free_page(pq[i]); 1212 __free_page(pq[i]);
1213 return -ENOMEM; 1213 return -ENOMEM;
1214 } 1214 }
1215 } 1215 }
1216 1216
1217 /* Fill in src buffers */ 1217 /* Fill in src buffers */
1218 for (i = 0; i < IOP_ADMA_NUM_SRC_TEST; i++) { 1218 for (i = 0; i < IOP_ADMA_NUM_SRC_TEST; i++) {
1219 pq_sw[i] = page_address(pq[i]); 1219 pq_sw[i] = page_address(pq[i]);
1220 memset(pq_sw[i], 0x11111111 * (1<<i), PAGE_SIZE); 1220 memset(pq_sw[i], 0x11111111 * (1<<i), PAGE_SIZE);
1221 } 1221 }
1222 pq_sw[i] = page_address(pq[i]); 1222 pq_sw[i] = page_address(pq[i]);
1223 pq_sw[i+1] = page_address(pq[i+1]); 1223 pq_sw[i+1] = page_address(pq[i+1]);
1224 1224
1225 dma_chan = container_of(device->common.channels.next, 1225 dma_chan = container_of(device->common.channels.next,
1226 struct dma_chan, 1226 struct dma_chan,
1227 device_node); 1227 device_node);
1228 if (iop_adma_alloc_chan_resources(dma_chan) < 1) { 1228 if (iop_adma_alloc_chan_resources(dma_chan) < 1) {
1229 err = -ENODEV; 1229 err = -ENODEV;
1230 goto out; 1230 goto out;
1231 } 1231 }
1232 1232
1233 dev = dma_chan->device->dev; 1233 dev = dma_chan->device->dev;
1234 1234
1235 /* initialize the dests */ 1235 /* initialize the dests */
1236 memset(page_address(pq_hw[0]), 0 , PAGE_SIZE); 1236 memset(page_address(pq_hw[0]), 0 , PAGE_SIZE);
1237 memset(page_address(pq_hw[1]), 0 , PAGE_SIZE); 1237 memset(page_address(pq_hw[1]), 0 , PAGE_SIZE);
1238 1238
1239 /* test pq */ 1239 /* test pq */
1240 pq_dest[0] = dma_map_page(dev, pq_hw[0], 0, PAGE_SIZE, DMA_FROM_DEVICE); 1240 pq_dest[0] = dma_map_page(dev, pq_hw[0], 0, PAGE_SIZE, DMA_FROM_DEVICE);
1241 pq_dest[1] = dma_map_page(dev, pq_hw[1], 0, PAGE_SIZE, DMA_FROM_DEVICE); 1241 pq_dest[1] = dma_map_page(dev, pq_hw[1], 0, PAGE_SIZE, DMA_FROM_DEVICE);
1242 for (i = 0; i < IOP_ADMA_NUM_SRC_TEST; i++) 1242 for (i = 0; i < IOP_ADMA_NUM_SRC_TEST; i++)
1243 pq_src[i] = dma_map_page(dev, pq[i], 0, PAGE_SIZE, 1243 pq_src[i] = dma_map_page(dev, pq[i], 0, PAGE_SIZE,
1244 DMA_TO_DEVICE); 1244 DMA_TO_DEVICE);
1245 1245
1246 tx = iop_adma_prep_dma_pq(dma_chan, pq_dest, pq_src, 1246 tx = iop_adma_prep_dma_pq(dma_chan, pq_dest, pq_src,
1247 IOP_ADMA_NUM_SRC_TEST, (u8 *)raid6_gfexp, 1247 IOP_ADMA_NUM_SRC_TEST, (u8 *)raid6_gfexp,
1248 PAGE_SIZE, 1248 PAGE_SIZE,
1249 DMA_PREP_INTERRUPT | 1249 DMA_PREP_INTERRUPT |
1250 DMA_CTRL_ACK); 1250 DMA_CTRL_ACK);
1251 1251
1252 cookie = iop_adma_tx_submit(tx); 1252 cookie = iop_adma_tx_submit(tx);
1253 iop_adma_issue_pending(dma_chan); 1253 iop_adma_issue_pending(dma_chan);
1254 msleep(8); 1254 msleep(8);
1255 1255
1256 if (iop_adma_status(dma_chan, cookie, NULL) != 1256 if (iop_adma_status(dma_chan, cookie, NULL) !=
1257 DMA_SUCCESS) { 1257 DMA_COMPLETE) {
1258 dev_err(dev, "Self-test pq timed out, disabling\n"); 1258 dev_err(dev, "Self-test pq timed out, disabling\n");
1259 err = -ENODEV; 1259 err = -ENODEV;
1260 goto free_resources; 1260 goto free_resources;
1261 } 1261 }
1262 1262
1263 raid6_call.gen_syndrome(IOP_ADMA_NUM_SRC_TEST+2, PAGE_SIZE, pq_sw); 1263 raid6_call.gen_syndrome(IOP_ADMA_NUM_SRC_TEST+2, PAGE_SIZE, pq_sw);
1264 1264
1265 if (memcmp(pq_sw[IOP_ADMA_NUM_SRC_TEST], 1265 if (memcmp(pq_sw[IOP_ADMA_NUM_SRC_TEST],
1266 page_address(pq_hw[0]), PAGE_SIZE) != 0) { 1266 page_address(pq_hw[0]), PAGE_SIZE) != 0) {
1267 dev_err(dev, "Self-test p failed compare, disabling\n"); 1267 dev_err(dev, "Self-test p failed compare, disabling\n");
1268 err = -ENODEV; 1268 err = -ENODEV;
1269 goto free_resources; 1269 goto free_resources;
1270 } 1270 }
1271 if (memcmp(pq_sw[IOP_ADMA_NUM_SRC_TEST+1], 1271 if (memcmp(pq_sw[IOP_ADMA_NUM_SRC_TEST+1],
1272 page_address(pq_hw[1]), PAGE_SIZE) != 0) { 1272 page_address(pq_hw[1]), PAGE_SIZE) != 0) {
1273 dev_err(dev, "Self-test q failed compare, disabling\n"); 1273 dev_err(dev, "Self-test q failed compare, disabling\n");
1274 err = -ENODEV; 1274 err = -ENODEV;
1275 goto free_resources; 1275 goto free_resources;
1276 } 1276 }
1277 1277
1278 /* test correct zero sum using the software generated pq values */ 1278 /* test correct zero sum using the software generated pq values */
1279 for (i = 0; i < IOP_ADMA_NUM_SRC_TEST + 2; i++) 1279 for (i = 0; i < IOP_ADMA_NUM_SRC_TEST + 2; i++)
1280 pq_src[i] = dma_map_page(dev, pq[i], 0, PAGE_SIZE, 1280 pq_src[i] = dma_map_page(dev, pq[i], 0, PAGE_SIZE,
1281 DMA_TO_DEVICE); 1281 DMA_TO_DEVICE);
1282 1282
1283 zero_sum_result = ~0; 1283 zero_sum_result = ~0;
1284 tx = iop_adma_prep_dma_pq_val(dma_chan, &pq_src[IOP_ADMA_NUM_SRC_TEST], 1284 tx = iop_adma_prep_dma_pq_val(dma_chan, &pq_src[IOP_ADMA_NUM_SRC_TEST],
1285 pq_src, IOP_ADMA_NUM_SRC_TEST, 1285 pq_src, IOP_ADMA_NUM_SRC_TEST,
1286 raid6_gfexp, PAGE_SIZE, &zero_sum_result, 1286 raid6_gfexp, PAGE_SIZE, &zero_sum_result,
1287 DMA_PREP_INTERRUPT|DMA_CTRL_ACK); 1287 DMA_PREP_INTERRUPT|DMA_CTRL_ACK);
1288 1288
1289 cookie = iop_adma_tx_submit(tx); 1289 cookie = iop_adma_tx_submit(tx);
1290 iop_adma_issue_pending(dma_chan); 1290 iop_adma_issue_pending(dma_chan);
1291 msleep(8); 1291 msleep(8);
1292 1292
1293 if (iop_adma_status(dma_chan, cookie, NULL) != 1293 if (iop_adma_status(dma_chan, cookie, NULL) !=
1294 DMA_SUCCESS) { 1294 DMA_COMPLETE) {
1295 dev_err(dev, "Self-test pq-zero-sum timed out, disabling\n"); 1295 dev_err(dev, "Self-test pq-zero-sum timed out, disabling\n");
1296 err = -ENODEV; 1296 err = -ENODEV;
1297 goto free_resources; 1297 goto free_resources;
1298 } 1298 }
1299 1299
1300 if (zero_sum_result != 0) { 1300 if (zero_sum_result != 0) {
1301 dev_err(dev, "Self-test pq-zero-sum failed to validate: %x\n", 1301 dev_err(dev, "Self-test pq-zero-sum failed to validate: %x\n",
1302 zero_sum_result); 1302 zero_sum_result);
1303 err = -ENODEV; 1303 err = -ENODEV;
1304 goto free_resources; 1304 goto free_resources;
1305 } 1305 }
1306 1306
1307 /* test incorrect zero sum */ 1307 /* test incorrect zero sum */
1308 i = IOP_ADMA_NUM_SRC_TEST; 1308 i = IOP_ADMA_NUM_SRC_TEST;
1309 memset(pq_sw[i] + 100, 0, 100); 1309 memset(pq_sw[i] + 100, 0, 100);
1310 memset(pq_sw[i+1] + 200, 0, 200); 1310 memset(pq_sw[i+1] + 200, 0, 200);
1311 for (i = 0; i < IOP_ADMA_NUM_SRC_TEST + 2; i++) 1311 for (i = 0; i < IOP_ADMA_NUM_SRC_TEST + 2; i++)
1312 pq_src[i] = dma_map_page(dev, pq[i], 0, PAGE_SIZE, 1312 pq_src[i] = dma_map_page(dev, pq[i], 0, PAGE_SIZE,
1313 DMA_TO_DEVICE); 1313 DMA_TO_DEVICE);
1314 1314
1315 zero_sum_result = 0; 1315 zero_sum_result = 0;
1316 tx = iop_adma_prep_dma_pq_val(dma_chan, &pq_src[IOP_ADMA_NUM_SRC_TEST], 1316 tx = iop_adma_prep_dma_pq_val(dma_chan, &pq_src[IOP_ADMA_NUM_SRC_TEST],
1317 pq_src, IOP_ADMA_NUM_SRC_TEST, 1317 pq_src, IOP_ADMA_NUM_SRC_TEST,
1318 raid6_gfexp, PAGE_SIZE, &zero_sum_result, 1318 raid6_gfexp, PAGE_SIZE, &zero_sum_result,
1319 DMA_PREP_INTERRUPT|DMA_CTRL_ACK); 1319 DMA_PREP_INTERRUPT|DMA_CTRL_ACK);
1320 1320
1321 cookie = iop_adma_tx_submit(tx); 1321 cookie = iop_adma_tx_submit(tx);
1322 iop_adma_issue_pending(dma_chan); 1322 iop_adma_issue_pending(dma_chan);
1323 msleep(8); 1323 msleep(8);
1324 1324
1325 if (iop_adma_status(dma_chan, cookie, NULL) != 1325 if (iop_adma_status(dma_chan, cookie, NULL) !=
1326 DMA_SUCCESS) { 1326 DMA_COMPLETE) {
1327 dev_err(dev, "Self-test !pq-zero-sum timed out, disabling\n"); 1327 dev_err(dev, "Self-test !pq-zero-sum timed out, disabling\n");
1328 err = -ENODEV; 1328 err = -ENODEV;
1329 goto free_resources; 1329 goto free_resources;
1330 } 1330 }
1331 1331
1332 if (zero_sum_result != (SUM_CHECK_P_RESULT | SUM_CHECK_Q_RESULT)) { 1332 if (zero_sum_result != (SUM_CHECK_P_RESULT | SUM_CHECK_Q_RESULT)) {
1333 dev_err(dev, "Self-test !pq-zero-sum failed to validate: %x\n", 1333 dev_err(dev, "Self-test !pq-zero-sum failed to validate: %x\n",
1334 zero_sum_result); 1334 zero_sum_result);
1335 err = -ENODEV; 1335 err = -ENODEV;
1336 goto free_resources; 1336 goto free_resources;
1337 } 1337 }
1338 1338
1339 free_resources: 1339 free_resources:
1340 iop_adma_free_chan_resources(dma_chan); 1340 iop_adma_free_chan_resources(dma_chan);
1341 out: 1341 out:
1342 i = ARRAY_SIZE(pq); 1342 i = ARRAY_SIZE(pq);
1343 while (i--) 1343 while (i--)
1344 __free_page(pq[i]); 1344 __free_page(pq[i]);
1345 return err; 1345 return err;
1346 } 1346 }
1347 #endif 1347 #endif
1348 1348
1349 static int iop_adma_remove(struct platform_device *dev) 1349 static int iop_adma_remove(struct platform_device *dev)
1350 { 1350 {
1351 struct iop_adma_device *device = platform_get_drvdata(dev); 1351 struct iop_adma_device *device = platform_get_drvdata(dev);
1352 struct dma_chan *chan, *_chan; 1352 struct dma_chan *chan, *_chan;
1353 struct iop_adma_chan *iop_chan; 1353 struct iop_adma_chan *iop_chan;
1354 struct iop_adma_platform_data *plat_data = dev_get_platdata(&dev->dev); 1354 struct iop_adma_platform_data *plat_data = dev_get_platdata(&dev->dev);
1355 1355
1356 dma_async_device_unregister(&device->common); 1356 dma_async_device_unregister(&device->common);
1357 1357
1358 dma_free_coherent(&dev->dev, plat_data->pool_size, 1358 dma_free_coherent(&dev->dev, plat_data->pool_size,
1359 device->dma_desc_pool_virt, device->dma_desc_pool); 1359 device->dma_desc_pool_virt, device->dma_desc_pool);
1360 1360
1361 list_for_each_entry_safe(chan, _chan, &device->common.channels, 1361 list_for_each_entry_safe(chan, _chan, &device->common.channels,
1362 device_node) { 1362 device_node) {
1363 iop_chan = to_iop_adma_chan(chan); 1363 iop_chan = to_iop_adma_chan(chan);
1364 list_del(&chan->device_node); 1364 list_del(&chan->device_node);
1365 kfree(iop_chan); 1365 kfree(iop_chan);
1366 } 1366 }
1367 kfree(device); 1367 kfree(device);
1368 1368
1369 return 0; 1369 return 0;
1370 } 1370 }
1371 1371
1372 static int iop_adma_probe(struct platform_device *pdev) 1372 static int iop_adma_probe(struct platform_device *pdev)
1373 { 1373 {
1374 struct resource *res; 1374 struct resource *res;
1375 int ret = 0, i; 1375 int ret = 0, i;
1376 struct iop_adma_device *adev; 1376 struct iop_adma_device *adev;
1377 struct iop_adma_chan *iop_chan; 1377 struct iop_adma_chan *iop_chan;
1378 struct dma_device *dma_dev; 1378 struct dma_device *dma_dev;
1379 struct iop_adma_platform_data *plat_data = dev_get_platdata(&pdev->dev); 1379 struct iop_adma_platform_data *plat_data = dev_get_platdata(&pdev->dev);
1380 1380
1381 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1381 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1382 if (!res) 1382 if (!res)
1383 return -ENODEV; 1383 return -ENODEV;
1384 1384
1385 if (!devm_request_mem_region(&pdev->dev, res->start, 1385 if (!devm_request_mem_region(&pdev->dev, res->start,
1386 resource_size(res), pdev->name)) 1386 resource_size(res), pdev->name))
1387 return -EBUSY; 1387 return -EBUSY;
1388 1388
1389 adev = kzalloc(sizeof(*adev), GFP_KERNEL); 1389 adev = kzalloc(sizeof(*adev), GFP_KERNEL);
1390 if (!adev) 1390 if (!adev)
1391 return -ENOMEM; 1391 return -ENOMEM;
1392 dma_dev = &adev->common; 1392 dma_dev = &adev->common;
1393 1393
1394 /* allocate coherent memory for hardware descriptors 1394 /* allocate coherent memory for hardware descriptors
1395 * note: writecombine gives slightly better performance, but 1395 * note: writecombine gives slightly better performance, but
1396 * requires that we explicitly flush the writes 1396 * requires that we explicitly flush the writes
1397 */ 1397 */
1398 if ((adev->dma_desc_pool_virt = dma_alloc_writecombine(&pdev->dev, 1398 if ((adev->dma_desc_pool_virt = dma_alloc_writecombine(&pdev->dev,
1399 plat_data->pool_size, 1399 plat_data->pool_size,
1400 &adev->dma_desc_pool, 1400 &adev->dma_desc_pool,
1401 GFP_KERNEL)) == NULL) { 1401 GFP_KERNEL)) == NULL) {
1402 ret = -ENOMEM; 1402 ret = -ENOMEM;
1403 goto err_free_adev; 1403 goto err_free_adev;
1404 } 1404 }
1405 1405
1406 dev_dbg(&pdev->dev, "%s: allocated descriptor pool virt %p phys %p\n", 1406 dev_dbg(&pdev->dev, "%s: allocated descriptor pool virt %p phys %p\n",
1407 __func__, adev->dma_desc_pool_virt, 1407 __func__, adev->dma_desc_pool_virt,
1408 (void *) adev->dma_desc_pool); 1408 (void *) adev->dma_desc_pool);
1409 1409
1410 adev->id = plat_data->hw_id; 1410 adev->id = plat_data->hw_id;
1411 1411
1412 /* discover transaction capabilites from the platform data */ 1412 /* discover transaction capabilites from the platform data */
1413 dma_dev->cap_mask = plat_data->cap_mask; 1413 dma_dev->cap_mask = plat_data->cap_mask;
1414 1414
1415 adev->pdev = pdev; 1415 adev->pdev = pdev;
1416 platform_set_drvdata(pdev, adev); 1416 platform_set_drvdata(pdev, adev);
1417 1417
1418 INIT_LIST_HEAD(&dma_dev->channels); 1418 INIT_LIST_HEAD(&dma_dev->channels);
1419 1419
1420 /* set base routines */ 1420 /* set base routines */
1421 dma_dev->device_alloc_chan_resources = iop_adma_alloc_chan_resources; 1421 dma_dev->device_alloc_chan_resources = iop_adma_alloc_chan_resources;
1422 dma_dev->device_free_chan_resources = iop_adma_free_chan_resources; 1422 dma_dev->device_free_chan_resources = iop_adma_free_chan_resources;
1423 dma_dev->device_tx_status = iop_adma_status; 1423 dma_dev->device_tx_status = iop_adma_status;
1424 dma_dev->device_issue_pending = iop_adma_issue_pending; 1424 dma_dev->device_issue_pending = iop_adma_issue_pending;
1425 dma_dev->dev = &pdev->dev; 1425 dma_dev->dev = &pdev->dev;
1426 1426
1427 /* set prep routines based on capability */ 1427 /* set prep routines based on capability */
1428 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) 1428 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask))
1429 dma_dev->device_prep_dma_memcpy = iop_adma_prep_dma_memcpy; 1429 dma_dev->device_prep_dma_memcpy = iop_adma_prep_dma_memcpy;
1430 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { 1430 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
1431 dma_dev->max_xor = iop_adma_get_max_xor(); 1431 dma_dev->max_xor = iop_adma_get_max_xor();
1432 dma_dev->device_prep_dma_xor = iop_adma_prep_dma_xor; 1432 dma_dev->device_prep_dma_xor = iop_adma_prep_dma_xor;
1433 } 1433 }
1434 if (dma_has_cap(DMA_XOR_VAL, dma_dev->cap_mask)) 1434 if (dma_has_cap(DMA_XOR_VAL, dma_dev->cap_mask))
1435 dma_dev->device_prep_dma_xor_val = 1435 dma_dev->device_prep_dma_xor_val =
1436 iop_adma_prep_dma_xor_val; 1436 iop_adma_prep_dma_xor_val;
1437 if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) { 1437 if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) {
1438 dma_set_maxpq(dma_dev, iop_adma_get_max_pq(), 0); 1438 dma_set_maxpq(dma_dev, iop_adma_get_max_pq(), 0);
1439 dma_dev->device_prep_dma_pq = iop_adma_prep_dma_pq; 1439 dma_dev->device_prep_dma_pq = iop_adma_prep_dma_pq;
1440 } 1440 }
1441 if (dma_has_cap(DMA_PQ_VAL, dma_dev->cap_mask)) 1441 if (dma_has_cap(DMA_PQ_VAL, dma_dev->cap_mask))
1442 dma_dev->device_prep_dma_pq_val = 1442 dma_dev->device_prep_dma_pq_val =
1443 iop_adma_prep_dma_pq_val; 1443 iop_adma_prep_dma_pq_val;
1444 if (dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask)) 1444 if (dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask))
1445 dma_dev->device_prep_dma_interrupt = 1445 dma_dev->device_prep_dma_interrupt =
1446 iop_adma_prep_dma_interrupt; 1446 iop_adma_prep_dma_interrupt;
1447 1447
1448 iop_chan = kzalloc(sizeof(*iop_chan), GFP_KERNEL); 1448 iop_chan = kzalloc(sizeof(*iop_chan), GFP_KERNEL);
1449 if (!iop_chan) { 1449 if (!iop_chan) {
1450 ret = -ENOMEM; 1450 ret = -ENOMEM;
1451 goto err_free_dma; 1451 goto err_free_dma;
1452 } 1452 }
1453 iop_chan->device = adev; 1453 iop_chan->device = adev;
1454 1454
1455 iop_chan->mmr_base = devm_ioremap(&pdev->dev, res->start, 1455 iop_chan->mmr_base = devm_ioremap(&pdev->dev, res->start,
1456 resource_size(res)); 1456 resource_size(res));
1457 if (!iop_chan->mmr_base) { 1457 if (!iop_chan->mmr_base) {
1458 ret = -ENOMEM; 1458 ret = -ENOMEM;
1459 goto err_free_iop_chan; 1459 goto err_free_iop_chan;
1460 } 1460 }
1461 tasklet_init(&iop_chan->irq_tasklet, iop_adma_tasklet, (unsigned long) 1461 tasklet_init(&iop_chan->irq_tasklet, iop_adma_tasklet, (unsigned long)
1462 iop_chan); 1462 iop_chan);
1463 1463
1464 /* clear errors before enabling interrupts */ 1464 /* clear errors before enabling interrupts */
1465 iop_adma_device_clear_err_status(iop_chan); 1465 iop_adma_device_clear_err_status(iop_chan);
1466 1466
1467 for (i = 0; i < 3; i++) { 1467 for (i = 0; i < 3; i++) {
1468 irq_handler_t handler[] = { iop_adma_eot_handler, 1468 irq_handler_t handler[] = { iop_adma_eot_handler,
1469 iop_adma_eoc_handler, 1469 iop_adma_eoc_handler,
1470 iop_adma_err_handler }; 1470 iop_adma_err_handler };
1471 int irq = platform_get_irq(pdev, i); 1471 int irq = platform_get_irq(pdev, i);
1472 if (irq < 0) { 1472 if (irq < 0) {
1473 ret = -ENXIO; 1473 ret = -ENXIO;
1474 goto err_free_iop_chan; 1474 goto err_free_iop_chan;
1475 } else { 1475 } else {
1476 ret = devm_request_irq(&pdev->dev, irq, 1476 ret = devm_request_irq(&pdev->dev, irq,
1477 handler[i], 0, pdev->name, iop_chan); 1477 handler[i], 0, pdev->name, iop_chan);
1478 if (ret) 1478 if (ret)
1479 goto err_free_iop_chan; 1479 goto err_free_iop_chan;
1480 } 1480 }
1481 } 1481 }
1482 1482
1483 spin_lock_init(&iop_chan->lock); 1483 spin_lock_init(&iop_chan->lock);
1484 INIT_LIST_HEAD(&iop_chan->chain); 1484 INIT_LIST_HEAD(&iop_chan->chain);
1485 INIT_LIST_HEAD(&iop_chan->all_slots); 1485 INIT_LIST_HEAD(&iop_chan->all_slots);
1486 iop_chan->common.device = dma_dev; 1486 iop_chan->common.device = dma_dev;
1487 dma_cookie_init(&iop_chan->common); 1487 dma_cookie_init(&iop_chan->common);
1488 list_add_tail(&iop_chan->common.device_node, &dma_dev->channels); 1488 list_add_tail(&iop_chan->common.device_node, &dma_dev->channels);
1489 1489
1490 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) { 1490 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
1491 ret = iop_adma_memcpy_self_test(adev); 1491 ret = iop_adma_memcpy_self_test(adev);
1492 dev_dbg(&pdev->dev, "memcpy self test returned %d\n", ret); 1492 dev_dbg(&pdev->dev, "memcpy self test returned %d\n", ret);
1493 if (ret) 1493 if (ret)
1494 goto err_free_iop_chan; 1494 goto err_free_iop_chan;
1495 } 1495 }
1496 1496
1497 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { 1497 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
1498 ret = iop_adma_xor_val_self_test(adev); 1498 ret = iop_adma_xor_val_self_test(adev);
1499 dev_dbg(&pdev->dev, "xor self test returned %d\n", ret); 1499 dev_dbg(&pdev->dev, "xor self test returned %d\n", ret);
1500 if (ret) 1500 if (ret)
1501 goto err_free_iop_chan; 1501 goto err_free_iop_chan;
1502 } 1502 }
1503 1503
1504 if (dma_has_cap(DMA_PQ, dma_dev->cap_mask) && 1504 if (dma_has_cap(DMA_PQ, dma_dev->cap_mask) &&
1505 dma_has_cap(DMA_PQ_VAL, dma_dev->cap_mask)) { 1505 dma_has_cap(DMA_PQ_VAL, dma_dev->cap_mask)) {
1506 #ifdef CONFIG_RAID6_PQ 1506 #ifdef CONFIG_RAID6_PQ
1507 ret = iop_adma_pq_zero_sum_self_test(adev); 1507 ret = iop_adma_pq_zero_sum_self_test(adev);
1508 dev_dbg(&pdev->dev, "pq self test returned %d\n", ret); 1508 dev_dbg(&pdev->dev, "pq self test returned %d\n", ret);
1509 #else 1509 #else
1510 /* can not test raid6, so do not publish capability */ 1510 /* can not test raid6, so do not publish capability */
1511 dma_cap_clear(DMA_PQ, dma_dev->cap_mask); 1511 dma_cap_clear(DMA_PQ, dma_dev->cap_mask);
1512 dma_cap_clear(DMA_PQ_VAL, dma_dev->cap_mask); 1512 dma_cap_clear(DMA_PQ_VAL, dma_dev->cap_mask);
1513 ret = 0; 1513 ret = 0;
1514 #endif 1514 #endif
1515 if (ret) 1515 if (ret)
1516 goto err_free_iop_chan; 1516 goto err_free_iop_chan;
1517 } 1517 }
1518 1518
1519 dev_info(&pdev->dev, "Intel(R) IOP: ( %s%s%s%s%s%s)\n", 1519 dev_info(&pdev->dev, "Intel(R) IOP: ( %s%s%s%s%s%s)\n",
1520 dma_has_cap(DMA_PQ, dma_dev->cap_mask) ? "pq " : "", 1520 dma_has_cap(DMA_PQ, dma_dev->cap_mask) ? "pq " : "",
1521 dma_has_cap(DMA_PQ_VAL, dma_dev->cap_mask) ? "pq_val " : "", 1521 dma_has_cap(DMA_PQ_VAL, dma_dev->cap_mask) ? "pq_val " : "",
1522 dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "", 1522 dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
1523 dma_has_cap(DMA_XOR_VAL, dma_dev->cap_mask) ? "xor_val " : "", 1523 dma_has_cap(DMA_XOR_VAL, dma_dev->cap_mask) ? "xor_val " : "",
1524 dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "", 1524 dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "",
1525 dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : ""); 1525 dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : "");
1526 1526
1527 dma_async_device_register(dma_dev); 1527 dma_async_device_register(dma_dev);
1528 goto out; 1528 goto out;
1529 1529
1530 err_free_iop_chan: 1530 err_free_iop_chan:
1531 kfree(iop_chan); 1531 kfree(iop_chan);
1532 err_free_dma: 1532 err_free_dma:
1533 dma_free_coherent(&adev->pdev->dev, plat_data->pool_size, 1533 dma_free_coherent(&adev->pdev->dev, plat_data->pool_size,
1534 adev->dma_desc_pool_virt, adev->dma_desc_pool); 1534 adev->dma_desc_pool_virt, adev->dma_desc_pool);
1535 err_free_adev: 1535 err_free_adev:
1536 kfree(adev); 1536 kfree(adev);
1537 out: 1537 out:
1538 return ret; 1538 return ret;
1539 } 1539 }
1540 1540
1541 static void iop_chan_start_null_memcpy(struct iop_adma_chan *iop_chan) 1541 static void iop_chan_start_null_memcpy(struct iop_adma_chan *iop_chan)
1542 { 1542 {
1543 struct iop_adma_desc_slot *sw_desc, *grp_start; 1543 struct iop_adma_desc_slot *sw_desc, *grp_start;
1544 dma_cookie_t cookie; 1544 dma_cookie_t cookie;
1545 int slot_cnt, slots_per_op; 1545 int slot_cnt, slots_per_op;
1546 1546
1547 dev_dbg(iop_chan->device->common.dev, "%s\n", __func__); 1547 dev_dbg(iop_chan->device->common.dev, "%s\n", __func__);
1548 1548
1549 spin_lock_bh(&iop_chan->lock); 1549 spin_lock_bh(&iop_chan->lock);
1550 slot_cnt = iop_chan_memcpy_slot_count(0, &slots_per_op); 1550 slot_cnt = iop_chan_memcpy_slot_count(0, &slots_per_op);
1551 sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op); 1551 sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
1552 if (sw_desc) { 1552 if (sw_desc) {
1553 grp_start = sw_desc->group_head; 1553 grp_start = sw_desc->group_head;
1554 1554
1555 list_splice_init(&sw_desc->tx_list, &iop_chan->chain); 1555 list_splice_init(&sw_desc->tx_list, &iop_chan->chain);
1556 async_tx_ack(&sw_desc->async_tx); 1556 async_tx_ack(&sw_desc->async_tx);
1557 iop_desc_init_memcpy(grp_start, 0); 1557 iop_desc_init_memcpy(grp_start, 0);
1558 iop_desc_set_byte_count(grp_start, iop_chan, 0); 1558 iop_desc_set_byte_count(grp_start, iop_chan, 0);
1559 iop_desc_set_dest_addr(grp_start, iop_chan, 0); 1559 iop_desc_set_dest_addr(grp_start, iop_chan, 0);
1560 iop_desc_set_memcpy_src_addr(grp_start, 0); 1560 iop_desc_set_memcpy_src_addr(grp_start, 0);
1561 1561
1562 cookie = dma_cookie_assign(&sw_desc->async_tx); 1562 cookie = dma_cookie_assign(&sw_desc->async_tx);
1563 1563
1564 /* initialize the completed cookie to be less than 1564 /* initialize the completed cookie to be less than
1565 * the most recently used cookie 1565 * the most recently used cookie
1566 */ 1566 */
1567 iop_chan->common.completed_cookie = cookie - 1; 1567 iop_chan->common.completed_cookie = cookie - 1;
1568 1568
1569 /* channel should not be busy */ 1569 /* channel should not be busy */
1570 BUG_ON(iop_chan_is_busy(iop_chan)); 1570 BUG_ON(iop_chan_is_busy(iop_chan));
1571 1571
1572 /* clear any prior error-status bits */ 1572 /* clear any prior error-status bits */
1573 iop_adma_device_clear_err_status(iop_chan); 1573 iop_adma_device_clear_err_status(iop_chan);
1574 1574
1575 /* disable operation */ 1575 /* disable operation */
1576 iop_chan_disable(iop_chan); 1576 iop_chan_disable(iop_chan);
1577 1577
1578 /* set the descriptor address */ 1578 /* set the descriptor address */
1579 iop_chan_set_next_descriptor(iop_chan, sw_desc->async_tx.phys); 1579 iop_chan_set_next_descriptor(iop_chan, sw_desc->async_tx.phys);
1580 1580
1581 /* 1/ don't add pre-chained descriptors 1581 /* 1/ don't add pre-chained descriptors
1582 * 2/ dummy read to flush next_desc write 1582 * 2/ dummy read to flush next_desc write
1583 */ 1583 */
1584 BUG_ON(iop_desc_get_next_desc(sw_desc)); 1584 BUG_ON(iop_desc_get_next_desc(sw_desc));
1585 1585
1586 /* run the descriptor */ 1586 /* run the descriptor */
1587 iop_chan_enable(iop_chan); 1587 iop_chan_enable(iop_chan);
1588 } else 1588 } else
1589 dev_err(iop_chan->device->common.dev, 1589 dev_err(iop_chan->device->common.dev,
1590 "failed to allocate null descriptor\n"); 1590 "failed to allocate null descriptor\n");
1591 spin_unlock_bh(&iop_chan->lock); 1591 spin_unlock_bh(&iop_chan->lock);
1592 } 1592 }
1593 1593
1594 static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan) 1594 static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan)
1595 { 1595 {
1596 struct iop_adma_desc_slot *sw_desc, *grp_start; 1596 struct iop_adma_desc_slot *sw_desc, *grp_start;
1597 dma_cookie_t cookie; 1597 dma_cookie_t cookie;
1598 int slot_cnt, slots_per_op; 1598 int slot_cnt, slots_per_op;
1599 1599
1600 dev_dbg(iop_chan->device->common.dev, "%s\n", __func__); 1600 dev_dbg(iop_chan->device->common.dev, "%s\n", __func__);
1601 1601
1602 spin_lock_bh(&iop_chan->lock); 1602 spin_lock_bh(&iop_chan->lock);
1603 slot_cnt = iop_chan_xor_slot_count(0, 2, &slots_per_op); 1603 slot_cnt = iop_chan_xor_slot_count(0, 2, &slots_per_op);
1604 sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op); 1604 sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
1605 if (sw_desc) { 1605 if (sw_desc) {
1606 grp_start = sw_desc->group_head; 1606 grp_start = sw_desc->group_head;
1607 list_splice_init(&sw_desc->tx_list, &iop_chan->chain); 1607 list_splice_init(&sw_desc->tx_list, &iop_chan->chain);
1608 async_tx_ack(&sw_desc->async_tx); 1608 async_tx_ack(&sw_desc->async_tx);
1609 iop_desc_init_null_xor(grp_start, 2, 0); 1609 iop_desc_init_null_xor(grp_start, 2, 0);
1610 iop_desc_set_byte_count(grp_start, iop_chan, 0); 1610 iop_desc_set_byte_count(grp_start, iop_chan, 0);
1611 iop_desc_set_dest_addr(grp_start, iop_chan, 0); 1611 iop_desc_set_dest_addr(grp_start, iop_chan, 0);
1612 iop_desc_set_xor_src_addr(grp_start, 0, 0); 1612 iop_desc_set_xor_src_addr(grp_start, 0, 0);
1613 iop_desc_set_xor_src_addr(grp_start, 1, 0); 1613 iop_desc_set_xor_src_addr(grp_start, 1, 0);
1614 1614
1615 cookie = dma_cookie_assign(&sw_desc->async_tx); 1615 cookie = dma_cookie_assign(&sw_desc->async_tx);
1616 1616
1617 /* initialize the completed cookie to be less than 1617 /* initialize the completed cookie to be less than
1618 * the most recently used cookie 1618 * the most recently used cookie
1619 */ 1619 */
1620 iop_chan->common.completed_cookie = cookie - 1; 1620 iop_chan->common.completed_cookie = cookie - 1;
1621 1621
1622 /* channel should not be busy */ 1622 /* channel should not be busy */
1623 BUG_ON(iop_chan_is_busy(iop_chan)); 1623 BUG_ON(iop_chan_is_busy(iop_chan));
1624 1624
1625 /* clear any prior error-status bits */ 1625 /* clear any prior error-status bits */
1626 iop_adma_device_clear_err_status(iop_chan); 1626 iop_adma_device_clear_err_status(iop_chan);
1627 1627
1628 /* disable operation */ 1628 /* disable operation */
1629 iop_chan_disable(iop_chan); 1629 iop_chan_disable(iop_chan);
1630 1630
1631 /* set the descriptor address */ 1631 /* set the descriptor address */
1632 iop_chan_set_next_descriptor(iop_chan, sw_desc->async_tx.phys); 1632 iop_chan_set_next_descriptor(iop_chan, sw_desc->async_tx.phys);
1633 1633
1634 /* 1/ don't add pre-chained descriptors 1634 /* 1/ don't add pre-chained descriptors
1635 * 2/ dummy read to flush next_desc write 1635 * 2/ dummy read to flush next_desc write
1636 */ 1636 */
1637 BUG_ON(iop_desc_get_next_desc(sw_desc)); 1637 BUG_ON(iop_desc_get_next_desc(sw_desc));
1638 1638
1639 /* run the descriptor */ 1639 /* run the descriptor */
1640 iop_chan_enable(iop_chan); 1640 iop_chan_enable(iop_chan);
1641 } else 1641 } else
1642 dev_err(iop_chan->device->common.dev, 1642 dev_err(iop_chan->device->common.dev,
1643 "failed to allocate null descriptor\n"); 1643 "failed to allocate null descriptor\n");
1644 spin_unlock_bh(&iop_chan->lock); 1644 spin_unlock_bh(&iop_chan->lock);
1645 } 1645 }
1646 1646
1647 static struct platform_driver iop_adma_driver = { 1647 static struct platform_driver iop_adma_driver = {
1648 .probe = iop_adma_probe, 1648 .probe = iop_adma_probe,
1649 .remove = iop_adma_remove, 1649 .remove = iop_adma_remove,
1650 .driver = { 1650 .driver = {
1651 .owner = THIS_MODULE, 1651 .owner = THIS_MODULE,
1652 .name = "iop-adma", 1652 .name = "iop-adma",
1653 }, 1653 },
1654 }; 1654 };
1655 1655
1656 module_platform_driver(iop_adma_driver); 1656 module_platform_driver(iop_adma_driver);
1657 1657
1658 MODULE_AUTHOR("Intel Corporation"); 1658 MODULE_AUTHOR("Intel Corporation");
1659 MODULE_DESCRIPTION("IOP ADMA Engine Driver"); 1659 MODULE_DESCRIPTION("IOP ADMA Engine Driver");
1660 MODULE_LICENSE("GPL"); 1660 MODULE_LICENSE("GPL");
1661 MODULE_ALIAS("platform:iop-adma"); 1661 MODULE_ALIAS("platform:iop-adma");
1662 1662
1 /* 1 /*
2 * Copyright (c) 2013 Linaro Ltd. 2 * Copyright (c) 2013 Linaro Ltd.
3 * Copyright (c) 2013 Hisilicon Limited. 3 * Copyright (c) 2013 Hisilicon Limited.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as 6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation. 7 * published by the Free Software Foundation.
8 */ 8 */
9 #include <linux/sched.h> 9 #include <linux/sched.h>
10 #include <linux/device.h> 10 #include <linux/device.h>
11 #include <linux/dmaengine.h> 11 #include <linux/dmaengine.h>
12 #include <linux/init.h> 12 #include <linux/init.h>
13 #include <linux/interrupt.h> 13 #include <linux/interrupt.h>
14 #include <linux/kernel.h> 14 #include <linux/kernel.h>
15 #include <linux/module.h> 15 #include <linux/module.h>
16 #include <linux/platform_device.h> 16 #include <linux/platform_device.h>
17 #include <linux/slab.h> 17 #include <linux/slab.h>
18 #include <linux/spinlock.h> 18 #include <linux/spinlock.h>
19 #include <linux/of_device.h> 19 #include <linux/of_device.h>
20 #include <linux/of.h> 20 #include <linux/of.h>
21 #include <linux/clk.h> 21 #include <linux/clk.h>
22 #include <linux/of_dma.h> 22 #include <linux/of_dma.h>
23 23
24 #include "virt-dma.h" 24 #include "virt-dma.h"
25 25
26 #define DRIVER_NAME "k3-dma" 26 #define DRIVER_NAME "k3-dma"
27 #define DMA_ALIGN 3 27 #define DMA_ALIGN 3
28 #define DMA_MAX_SIZE 0x1ffc 28 #define DMA_MAX_SIZE 0x1ffc
29 29
30 #define INT_STAT 0x00 30 #define INT_STAT 0x00
31 #define INT_TC1 0x04 31 #define INT_TC1 0x04
32 #define INT_ERR1 0x0c 32 #define INT_ERR1 0x0c
33 #define INT_ERR2 0x10 33 #define INT_ERR2 0x10
34 #define INT_TC1_MASK 0x18 34 #define INT_TC1_MASK 0x18
35 #define INT_ERR1_MASK 0x20 35 #define INT_ERR1_MASK 0x20
36 #define INT_ERR2_MASK 0x24 36 #define INT_ERR2_MASK 0x24
37 #define INT_TC1_RAW 0x600 37 #define INT_TC1_RAW 0x600
38 #define INT_ERR1_RAW 0x608 38 #define INT_ERR1_RAW 0x608
39 #define INT_ERR2_RAW 0x610 39 #define INT_ERR2_RAW 0x610
40 #define CH_PRI 0x688 40 #define CH_PRI 0x688
41 #define CH_STAT 0x690 41 #define CH_STAT 0x690
42 #define CX_CUR_CNT 0x704 42 #define CX_CUR_CNT 0x704
43 #define CX_LLI 0x800 43 #define CX_LLI 0x800
44 #define CX_CNT 0x810 44 #define CX_CNT 0x810
45 #define CX_SRC 0x814 45 #define CX_SRC 0x814
46 #define CX_DST 0x818 46 #define CX_DST 0x818
47 #define CX_CFG 0x81c 47 #define CX_CFG 0x81c
48 #define AXI_CFG 0x820 48 #define AXI_CFG 0x820
49 #define AXI_CFG_DEFAULT 0x201201 49 #define AXI_CFG_DEFAULT 0x201201
50 50
51 #define CX_LLI_CHAIN_EN 0x2 51 #define CX_LLI_CHAIN_EN 0x2
52 #define CX_CFG_EN 0x1 52 #define CX_CFG_EN 0x1
53 #define CX_CFG_MEM2PER (0x1 << 2) 53 #define CX_CFG_MEM2PER (0x1 << 2)
54 #define CX_CFG_PER2MEM (0x2 << 2) 54 #define CX_CFG_PER2MEM (0x2 << 2)
55 #define CX_CFG_SRCINCR (0x1 << 31) 55 #define CX_CFG_SRCINCR (0x1 << 31)
56 #define CX_CFG_DSTINCR (0x1 << 30) 56 #define CX_CFG_DSTINCR (0x1 << 30)
57 57
58 struct k3_desc_hw { 58 struct k3_desc_hw {
59 u32 lli; 59 u32 lli;
60 u32 reserved[3]; 60 u32 reserved[3];
61 u32 count; 61 u32 count;
62 u32 saddr; 62 u32 saddr;
63 u32 daddr; 63 u32 daddr;
64 u32 config; 64 u32 config;
65 } __aligned(32); 65 } __aligned(32);
66 66
67 struct k3_dma_desc_sw { 67 struct k3_dma_desc_sw {
68 struct virt_dma_desc vd; 68 struct virt_dma_desc vd;
69 dma_addr_t desc_hw_lli; 69 dma_addr_t desc_hw_lli;
70 size_t desc_num; 70 size_t desc_num;
71 size_t size; 71 size_t size;
72 struct k3_desc_hw desc_hw[0]; 72 struct k3_desc_hw desc_hw[0];
73 }; 73 };
74 74
75 struct k3_dma_phy; 75 struct k3_dma_phy;
76 76
77 struct k3_dma_chan { 77 struct k3_dma_chan {
78 u32 ccfg; 78 u32 ccfg;
79 struct virt_dma_chan vc; 79 struct virt_dma_chan vc;
80 struct k3_dma_phy *phy; 80 struct k3_dma_phy *phy;
81 struct list_head node; 81 struct list_head node;
82 enum dma_transfer_direction dir; 82 enum dma_transfer_direction dir;
83 dma_addr_t dev_addr; 83 dma_addr_t dev_addr;
84 enum dma_status status; 84 enum dma_status status;
85 }; 85 };
86 86
87 struct k3_dma_phy { 87 struct k3_dma_phy {
88 u32 idx; 88 u32 idx;
89 void __iomem *base; 89 void __iomem *base;
90 struct k3_dma_chan *vchan; 90 struct k3_dma_chan *vchan;
91 struct k3_dma_desc_sw *ds_run; 91 struct k3_dma_desc_sw *ds_run;
92 struct k3_dma_desc_sw *ds_done; 92 struct k3_dma_desc_sw *ds_done;
93 }; 93 };
94 94
95 struct k3_dma_dev { 95 struct k3_dma_dev {
96 struct dma_device slave; 96 struct dma_device slave;
97 void __iomem *base; 97 void __iomem *base;
98 struct tasklet_struct task; 98 struct tasklet_struct task;
99 spinlock_t lock; 99 spinlock_t lock;
100 struct list_head chan_pending; 100 struct list_head chan_pending;
101 struct k3_dma_phy *phy; 101 struct k3_dma_phy *phy;
102 struct k3_dma_chan *chans; 102 struct k3_dma_chan *chans;
103 struct clk *clk; 103 struct clk *clk;
104 u32 dma_channels; 104 u32 dma_channels;
105 u32 dma_requests; 105 u32 dma_requests;
106 }; 106 };
107 107
108 #define to_k3_dma(dmadev) container_of(dmadev, struct k3_dma_dev, slave) 108 #define to_k3_dma(dmadev) container_of(dmadev, struct k3_dma_dev, slave)
109 109
110 static struct k3_dma_chan *to_k3_chan(struct dma_chan *chan) 110 static struct k3_dma_chan *to_k3_chan(struct dma_chan *chan)
111 { 111 {
112 return container_of(chan, struct k3_dma_chan, vc.chan); 112 return container_of(chan, struct k3_dma_chan, vc.chan);
113 } 113 }
114 114
115 static void k3_dma_pause_dma(struct k3_dma_phy *phy, bool on) 115 static void k3_dma_pause_dma(struct k3_dma_phy *phy, bool on)
116 { 116 {
117 u32 val = 0; 117 u32 val = 0;
118 118
119 if (on) { 119 if (on) {
120 val = readl_relaxed(phy->base + CX_CFG); 120 val = readl_relaxed(phy->base + CX_CFG);
121 val |= CX_CFG_EN; 121 val |= CX_CFG_EN;
122 writel_relaxed(val, phy->base + CX_CFG); 122 writel_relaxed(val, phy->base + CX_CFG);
123 } else { 123 } else {
124 val = readl_relaxed(phy->base + CX_CFG); 124 val = readl_relaxed(phy->base + CX_CFG);
125 val &= ~CX_CFG_EN; 125 val &= ~CX_CFG_EN;
126 writel_relaxed(val, phy->base + CX_CFG); 126 writel_relaxed(val, phy->base + CX_CFG);
127 } 127 }
128 } 128 }
129 129
130 static void k3_dma_terminate_chan(struct k3_dma_phy *phy, struct k3_dma_dev *d) 130 static void k3_dma_terminate_chan(struct k3_dma_phy *phy, struct k3_dma_dev *d)
131 { 131 {
132 u32 val = 0; 132 u32 val = 0;
133 133
134 k3_dma_pause_dma(phy, false); 134 k3_dma_pause_dma(phy, false);
135 135
136 val = 0x1 << phy->idx; 136 val = 0x1 << phy->idx;
137 writel_relaxed(val, d->base + INT_TC1_RAW); 137 writel_relaxed(val, d->base + INT_TC1_RAW);
138 writel_relaxed(val, d->base + INT_ERR1_RAW); 138 writel_relaxed(val, d->base + INT_ERR1_RAW);
139 writel_relaxed(val, d->base + INT_ERR2_RAW); 139 writel_relaxed(val, d->base + INT_ERR2_RAW);
140 } 140 }
141 141
142 static void k3_dma_set_desc(struct k3_dma_phy *phy, struct k3_desc_hw *hw) 142 static void k3_dma_set_desc(struct k3_dma_phy *phy, struct k3_desc_hw *hw)
143 { 143 {
144 writel_relaxed(hw->lli, phy->base + CX_LLI); 144 writel_relaxed(hw->lli, phy->base + CX_LLI);
145 writel_relaxed(hw->count, phy->base + CX_CNT); 145 writel_relaxed(hw->count, phy->base + CX_CNT);
146 writel_relaxed(hw->saddr, phy->base + CX_SRC); 146 writel_relaxed(hw->saddr, phy->base + CX_SRC);
147 writel_relaxed(hw->daddr, phy->base + CX_DST); 147 writel_relaxed(hw->daddr, phy->base + CX_DST);
148 writel_relaxed(AXI_CFG_DEFAULT, phy->base + AXI_CFG); 148 writel_relaxed(AXI_CFG_DEFAULT, phy->base + AXI_CFG);
149 writel_relaxed(hw->config, phy->base + CX_CFG); 149 writel_relaxed(hw->config, phy->base + CX_CFG);
150 } 150 }
151 151
152 static u32 k3_dma_get_curr_cnt(struct k3_dma_dev *d, struct k3_dma_phy *phy) 152 static u32 k3_dma_get_curr_cnt(struct k3_dma_dev *d, struct k3_dma_phy *phy)
153 { 153 {
154 u32 cnt = 0; 154 u32 cnt = 0;
155 155
156 cnt = readl_relaxed(d->base + CX_CUR_CNT + phy->idx * 0x10); 156 cnt = readl_relaxed(d->base + CX_CUR_CNT + phy->idx * 0x10);
157 cnt &= 0xffff; 157 cnt &= 0xffff;
158 return cnt; 158 return cnt;
159 } 159 }
160 160
161 static u32 k3_dma_get_curr_lli(struct k3_dma_phy *phy) 161 static u32 k3_dma_get_curr_lli(struct k3_dma_phy *phy)
162 { 162 {
163 return readl_relaxed(phy->base + CX_LLI); 163 return readl_relaxed(phy->base + CX_LLI);
164 } 164 }
165 165
166 static u32 k3_dma_get_chan_stat(struct k3_dma_dev *d) 166 static u32 k3_dma_get_chan_stat(struct k3_dma_dev *d)
167 { 167 {
168 return readl_relaxed(d->base + CH_STAT); 168 return readl_relaxed(d->base + CH_STAT);
169 } 169 }
170 170
171 static void k3_dma_enable_dma(struct k3_dma_dev *d, bool on) 171 static void k3_dma_enable_dma(struct k3_dma_dev *d, bool on)
172 { 172 {
173 if (on) { 173 if (on) {
174 /* set same priority */ 174 /* set same priority */
175 writel_relaxed(0x0, d->base + CH_PRI); 175 writel_relaxed(0x0, d->base + CH_PRI);
176 176
177 /* unmask irq */ 177 /* unmask irq */
178 writel_relaxed(0xffff, d->base + INT_TC1_MASK); 178 writel_relaxed(0xffff, d->base + INT_TC1_MASK);
179 writel_relaxed(0xffff, d->base + INT_ERR1_MASK); 179 writel_relaxed(0xffff, d->base + INT_ERR1_MASK);
180 writel_relaxed(0xffff, d->base + INT_ERR2_MASK); 180 writel_relaxed(0xffff, d->base + INT_ERR2_MASK);
181 } else { 181 } else {
182 /* mask irq */ 182 /* mask irq */
183 writel_relaxed(0x0, d->base + INT_TC1_MASK); 183 writel_relaxed(0x0, d->base + INT_TC1_MASK);
184 writel_relaxed(0x0, d->base + INT_ERR1_MASK); 184 writel_relaxed(0x0, d->base + INT_ERR1_MASK);
185 writel_relaxed(0x0, d->base + INT_ERR2_MASK); 185 writel_relaxed(0x0, d->base + INT_ERR2_MASK);
186 } 186 }
187 } 187 }
188 188
189 static irqreturn_t k3_dma_int_handler(int irq, void *dev_id) 189 static irqreturn_t k3_dma_int_handler(int irq, void *dev_id)
190 { 190 {
191 struct k3_dma_dev *d = (struct k3_dma_dev *)dev_id; 191 struct k3_dma_dev *d = (struct k3_dma_dev *)dev_id;
192 struct k3_dma_phy *p; 192 struct k3_dma_phy *p;
193 struct k3_dma_chan *c; 193 struct k3_dma_chan *c;
194 u32 stat = readl_relaxed(d->base + INT_STAT); 194 u32 stat = readl_relaxed(d->base + INT_STAT);
195 u32 tc1 = readl_relaxed(d->base + INT_TC1); 195 u32 tc1 = readl_relaxed(d->base + INT_TC1);
196 u32 err1 = readl_relaxed(d->base + INT_ERR1); 196 u32 err1 = readl_relaxed(d->base + INT_ERR1);
197 u32 err2 = readl_relaxed(d->base + INT_ERR2); 197 u32 err2 = readl_relaxed(d->base + INT_ERR2);
198 u32 i, irq_chan = 0; 198 u32 i, irq_chan = 0;
199 199
200 while (stat) { 200 while (stat) {
201 i = __ffs(stat); 201 i = __ffs(stat);
202 stat &= (stat - 1); 202 stat &= (stat - 1);
203 if (likely(tc1 & BIT(i))) { 203 if (likely(tc1 & BIT(i))) {
204 p = &d->phy[i]; 204 p = &d->phy[i];
205 c = p->vchan; 205 c = p->vchan;
206 if (c) { 206 if (c) {
207 unsigned long flags; 207 unsigned long flags;
208 208
209 spin_lock_irqsave(&c->vc.lock, flags); 209 spin_lock_irqsave(&c->vc.lock, flags);
210 vchan_cookie_complete(&p->ds_run->vd); 210 vchan_cookie_complete(&p->ds_run->vd);
211 p->ds_done = p->ds_run; 211 p->ds_done = p->ds_run;
212 spin_unlock_irqrestore(&c->vc.lock, flags); 212 spin_unlock_irqrestore(&c->vc.lock, flags);
213 } 213 }
214 irq_chan |= BIT(i); 214 irq_chan |= BIT(i);
215 } 215 }
216 if (unlikely((err1 & BIT(i)) || (err2 & BIT(i)))) 216 if (unlikely((err1 & BIT(i)) || (err2 & BIT(i))))
217 dev_warn(d->slave.dev, "DMA ERR\n"); 217 dev_warn(d->slave.dev, "DMA ERR\n");
218 } 218 }
219 219
220 writel_relaxed(irq_chan, d->base + INT_TC1_RAW); 220 writel_relaxed(irq_chan, d->base + INT_TC1_RAW);
221 writel_relaxed(err1, d->base + INT_ERR1_RAW); 221 writel_relaxed(err1, d->base + INT_ERR1_RAW);
222 writel_relaxed(err2, d->base + INT_ERR2_RAW); 222 writel_relaxed(err2, d->base + INT_ERR2_RAW);
223 223
224 if (irq_chan) { 224 if (irq_chan) {
225 tasklet_schedule(&d->task); 225 tasklet_schedule(&d->task);
226 return IRQ_HANDLED; 226 return IRQ_HANDLED;
227 } else 227 } else
228 return IRQ_NONE; 228 return IRQ_NONE;
229 } 229 }
230 230
231 static int k3_dma_start_txd(struct k3_dma_chan *c) 231 static int k3_dma_start_txd(struct k3_dma_chan *c)
232 { 232 {
233 struct k3_dma_dev *d = to_k3_dma(c->vc.chan.device); 233 struct k3_dma_dev *d = to_k3_dma(c->vc.chan.device);
234 struct virt_dma_desc *vd = vchan_next_desc(&c->vc); 234 struct virt_dma_desc *vd = vchan_next_desc(&c->vc);
235 235
236 if (!c->phy) 236 if (!c->phy)
237 return -EAGAIN; 237 return -EAGAIN;
238 238
239 if (BIT(c->phy->idx) & k3_dma_get_chan_stat(d)) 239 if (BIT(c->phy->idx) & k3_dma_get_chan_stat(d))
240 return -EAGAIN; 240 return -EAGAIN;
241 241
242 if (vd) { 242 if (vd) {
243 struct k3_dma_desc_sw *ds = 243 struct k3_dma_desc_sw *ds =
244 container_of(vd, struct k3_dma_desc_sw, vd); 244 container_of(vd, struct k3_dma_desc_sw, vd);
245 /* 245 /*
246 * fetch and remove request from vc->desc_issued 246 * fetch and remove request from vc->desc_issued
247 * so vc->desc_issued only contains desc pending 247 * so vc->desc_issued only contains desc pending
248 */ 248 */
249 list_del(&ds->vd.node); 249 list_del(&ds->vd.node);
250 c->phy->ds_run = ds; 250 c->phy->ds_run = ds;
251 c->phy->ds_done = NULL; 251 c->phy->ds_done = NULL;
252 /* start dma */ 252 /* start dma */
253 k3_dma_set_desc(c->phy, &ds->desc_hw[0]); 253 k3_dma_set_desc(c->phy, &ds->desc_hw[0]);
254 return 0; 254 return 0;
255 } 255 }
256 c->phy->ds_done = NULL; 256 c->phy->ds_done = NULL;
257 c->phy->ds_run = NULL; 257 c->phy->ds_run = NULL;
258 return -EAGAIN; 258 return -EAGAIN;
259 } 259 }
260 260
261 static void k3_dma_tasklet(unsigned long arg) 261 static void k3_dma_tasklet(unsigned long arg)
262 { 262 {
263 struct k3_dma_dev *d = (struct k3_dma_dev *)arg; 263 struct k3_dma_dev *d = (struct k3_dma_dev *)arg;
264 struct k3_dma_phy *p; 264 struct k3_dma_phy *p;
265 struct k3_dma_chan *c, *cn; 265 struct k3_dma_chan *c, *cn;
266 unsigned pch, pch_alloc = 0; 266 unsigned pch, pch_alloc = 0;
267 267
268 /* check new dma request of running channel in vc->desc_issued */ 268 /* check new dma request of running channel in vc->desc_issued */
269 list_for_each_entry_safe(c, cn, &d->slave.channels, vc.chan.device_node) { 269 list_for_each_entry_safe(c, cn, &d->slave.channels, vc.chan.device_node) {
270 spin_lock_irq(&c->vc.lock); 270 spin_lock_irq(&c->vc.lock);
271 p = c->phy; 271 p = c->phy;
272 if (p && p->ds_done) { 272 if (p && p->ds_done) {
273 if (k3_dma_start_txd(c)) { 273 if (k3_dma_start_txd(c)) {
274 /* No current txd associated with this channel */ 274 /* No current txd associated with this channel */
275 dev_dbg(d->slave.dev, "pchan %u: free\n", p->idx); 275 dev_dbg(d->slave.dev, "pchan %u: free\n", p->idx);
276 /* Mark this channel free */ 276 /* Mark this channel free */
277 c->phy = NULL; 277 c->phy = NULL;
278 p->vchan = NULL; 278 p->vchan = NULL;
279 } 279 }
280 } 280 }
281 spin_unlock_irq(&c->vc.lock); 281 spin_unlock_irq(&c->vc.lock);
282 } 282 }
283 283
284 /* check new channel request in d->chan_pending */ 284 /* check new channel request in d->chan_pending */
285 spin_lock_irq(&d->lock); 285 spin_lock_irq(&d->lock);
286 for (pch = 0; pch < d->dma_channels; pch++) { 286 for (pch = 0; pch < d->dma_channels; pch++) {
287 p = &d->phy[pch]; 287 p = &d->phy[pch];
288 288
289 if (p->vchan == NULL && !list_empty(&d->chan_pending)) { 289 if (p->vchan == NULL && !list_empty(&d->chan_pending)) {
290 c = list_first_entry(&d->chan_pending, 290 c = list_first_entry(&d->chan_pending,
291 struct k3_dma_chan, node); 291 struct k3_dma_chan, node);
292 /* remove from d->chan_pending */ 292 /* remove from d->chan_pending */
293 list_del_init(&c->node); 293 list_del_init(&c->node);
294 pch_alloc |= 1 << pch; 294 pch_alloc |= 1 << pch;
295 /* Mark this channel allocated */ 295 /* Mark this channel allocated */
296 p->vchan = c; 296 p->vchan = c;
297 c->phy = p; 297 c->phy = p;
298 dev_dbg(d->slave.dev, "pchan %u: alloc vchan %p\n", pch, &c->vc); 298 dev_dbg(d->slave.dev, "pchan %u: alloc vchan %p\n", pch, &c->vc);
299 } 299 }
300 } 300 }
301 spin_unlock_irq(&d->lock); 301 spin_unlock_irq(&d->lock);
302 302
303 for (pch = 0; pch < d->dma_channels; pch++) { 303 for (pch = 0; pch < d->dma_channels; pch++) {
304 if (pch_alloc & (1 << pch)) { 304 if (pch_alloc & (1 << pch)) {
305 p = &d->phy[pch]; 305 p = &d->phy[pch];
306 c = p->vchan; 306 c = p->vchan;
307 if (c) { 307 if (c) {
308 spin_lock_irq(&c->vc.lock); 308 spin_lock_irq(&c->vc.lock);
309 k3_dma_start_txd(c); 309 k3_dma_start_txd(c);
310 spin_unlock_irq(&c->vc.lock); 310 spin_unlock_irq(&c->vc.lock);
311 } 311 }
312 } 312 }
313 } 313 }
314 } 314 }
315 315
316 static int k3_dma_alloc_chan_resources(struct dma_chan *chan) 316 static int k3_dma_alloc_chan_resources(struct dma_chan *chan)
317 { 317 {
318 return 0; 318 return 0;
319 } 319 }
320 320
321 static void k3_dma_free_chan_resources(struct dma_chan *chan) 321 static void k3_dma_free_chan_resources(struct dma_chan *chan)
322 { 322 {
323 struct k3_dma_chan *c = to_k3_chan(chan); 323 struct k3_dma_chan *c = to_k3_chan(chan);
324 struct k3_dma_dev *d = to_k3_dma(chan->device); 324 struct k3_dma_dev *d = to_k3_dma(chan->device);
325 unsigned long flags; 325 unsigned long flags;
326 326
327 spin_lock_irqsave(&d->lock, flags); 327 spin_lock_irqsave(&d->lock, flags);
328 list_del_init(&c->node); 328 list_del_init(&c->node);
329 spin_unlock_irqrestore(&d->lock, flags); 329 spin_unlock_irqrestore(&d->lock, flags);
330 330
331 vchan_free_chan_resources(&c->vc); 331 vchan_free_chan_resources(&c->vc);
332 c->ccfg = 0; 332 c->ccfg = 0;
333 } 333 }
334 334
335 static enum dma_status k3_dma_tx_status(struct dma_chan *chan, 335 static enum dma_status k3_dma_tx_status(struct dma_chan *chan,
336 dma_cookie_t cookie, struct dma_tx_state *state) 336 dma_cookie_t cookie, struct dma_tx_state *state)
337 { 337 {
338 struct k3_dma_chan *c = to_k3_chan(chan); 338 struct k3_dma_chan *c = to_k3_chan(chan);
339 struct k3_dma_dev *d = to_k3_dma(chan->device); 339 struct k3_dma_dev *d = to_k3_dma(chan->device);
340 struct k3_dma_phy *p; 340 struct k3_dma_phy *p;
341 struct virt_dma_desc *vd; 341 struct virt_dma_desc *vd;
342 unsigned long flags; 342 unsigned long flags;
343 enum dma_status ret; 343 enum dma_status ret;
344 size_t bytes = 0; 344 size_t bytes = 0;
345 345
346 ret = dma_cookie_status(&c->vc.chan, cookie, state); 346 ret = dma_cookie_status(&c->vc.chan, cookie, state);
347 if (ret == DMA_SUCCESS) 347 if (ret == DMA_COMPLETE)
348 return ret; 348 return ret;
349 349
350 spin_lock_irqsave(&c->vc.lock, flags); 350 spin_lock_irqsave(&c->vc.lock, flags);
351 p = c->phy; 351 p = c->phy;
352 ret = c->status; 352 ret = c->status;
353 353
354 /* 354 /*
355 * If the cookie is on our issue queue, then the residue is 355 * If the cookie is on our issue queue, then the residue is
356 * its total size. 356 * its total size.
357 */ 357 */
358 vd = vchan_find_desc(&c->vc, cookie); 358 vd = vchan_find_desc(&c->vc, cookie);
359 if (vd) { 359 if (vd) {
360 bytes = container_of(vd, struct k3_dma_desc_sw, vd)->size; 360 bytes = container_of(vd, struct k3_dma_desc_sw, vd)->size;
361 } else if ((!p) || (!p->ds_run)) { 361 } else if ((!p) || (!p->ds_run)) {
362 bytes = 0; 362 bytes = 0;
363 } else { 363 } else {
364 struct k3_dma_desc_sw *ds = p->ds_run; 364 struct k3_dma_desc_sw *ds = p->ds_run;
365 u32 clli = 0, index = 0; 365 u32 clli = 0, index = 0;
366 366
367 bytes = k3_dma_get_curr_cnt(d, p); 367 bytes = k3_dma_get_curr_cnt(d, p);
368 clli = k3_dma_get_curr_lli(p); 368 clli = k3_dma_get_curr_lli(p);
369 index = (clli - ds->desc_hw_lli) / sizeof(struct k3_desc_hw); 369 index = (clli - ds->desc_hw_lli) / sizeof(struct k3_desc_hw);
370 for (; index < ds->desc_num; index++) { 370 for (; index < ds->desc_num; index++) {
371 bytes += ds->desc_hw[index].count; 371 bytes += ds->desc_hw[index].count;
372 /* end of lli */ 372 /* end of lli */
373 if (!ds->desc_hw[index].lli) 373 if (!ds->desc_hw[index].lli)
374 break; 374 break;
375 } 375 }
376 } 376 }
377 spin_unlock_irqrestore(&c->vc.lock, flags); 377 spin_unlock_irqrestore(&c->vc.lock, flags);
378 dma_set_residue(state, bytes); 378 dma_set_residue(state, bytes);
379 return ret; 379 return ret;
380 } 380 }
381 381
382 static void k3_dma_issue_pending(struct dma_chan *chan) 382 static void k3_dma_issue_pending(struct dma_chan *chan)
383 { 383 {
384 struct k3_dma_chan *c = to_k3_chan(chan); 384 struct k3_dma_chan *c = to_k3_chan(chan);
385 struct k3_dma_dev *d = to_k3_dma(chan->device); 385 struct k3_dma_dev *d = to_k3_dma(chan->device);
386 unsigned long flags; 386 unsigned long flags;
387 387
388 spin_lock_irqsave(&c->vc.lock, flags); 388 spin_lock_irqsave(&c->vc.lock, flags);
389 /* add request to vc->desc_issued */ 389 /* add request to vc->desc_issued */
390 if (vchan_issue_pending(&c->vc)) { 390 if (vchan_issue_pending(&c->vc)) {
391 spin_lock(&d->lock); 391 spin_lock(&d->lock);
392 if (!c->phy) { 392 if (!c->phy) {
393 if (list_empty(&c->node)) { 393 if (list_empty(&c->node)) {
394 /* if new channel, add chan_pending */ 394 /* if new channel, add chan_pending */
395 list_add_tail(&c->node, &d->chan_pending); 395 list_add_tail(&c->node, &d->chan_pending);
396 /* check in tasklet */ 396 /* check in tasklet */
397 tasklet_schedule(&d->task); 397 tasklet_schedule(&d->task);
398 dev_dbg(d->slave.dev, "vchan %p: issued\n", &c->vc); 398 dev_dbg(d->slave.dev, "vchan %p: issued\n", &c->vc);
399 } 399 }
400 } 400 }
401 spin_unlock(&d->lock); 401 spin_unlock(&d->lock);
402 } else 402 } else
403 dev_dbg(d->slave.dev, "vchan %p: nothing to issue\n", &c->vc); 403 dev_dbg(d->slave.dev, "vchan %p: nothing to issue\n", &c->vc);
404 spin_unlock_irqrestore(&c->vc.lock, flags); 404 spin_unlock_irqrestore(&c->vc.lock, flags);
405 } 405 }
406 406
407 static void k3_dma_fill_desc(struct k3_dma_desc_sw *ds, dma_addr_t dst, 407 static void k3_dma_fill_desc(struct k3_dma_desc_sw *ds, dma_addr_t dst,
408 dma_addr_t src, size_t len, u32 num, u32 ccfg) 408 dma_addr_t src, size_t len, u32 num, u32 ccfg)
409 { 409 {
410 if ((num + 1) < ds->desc_num) 410 if ((num + 1) < ds->desc_num)
411 ds->desc_hw[num].lli = ds->desc_hw_lli + (num + 1) * 411 ds->desc_hw[num].lli = ds->desc_hw_lli + (num + 1) *
412 sizeof(struct k3_desc_hw); 412 sizeof(struct k3_desc_hw);
413 ds->desc_hw[num].lli |= CX_LLI_CHAIN_EN; 413 ds->desc_hw[num].lli |= CX_LLI_CHAIN_EN;
414 ds->desc_hw[num].count = len; 414 ds->desc_hw[num].count = len;
415 ds->desc_hw[num].saddr = src; 415 ds->desc_hw[num].saddr = src;
416 ds->desc_hw[num].daddr = dst; 416 ds->desc_hw[num].daddr = dst;
417 ds->desc_hw[num].config = ccfg; 417 ds->desc_hw[num].config = ccfg;
418 } 418 }
419 419
420 static struct dma_async_tx_descriptor *k3_dma_prep_memcpy( 420 static struct dma_async_tx_descriptor *k3_dma_prep_memcpy(
421 struct dma_chan *chan, dma_addr_t dst, dma_addr_t src, 421 struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
422 size_t len, unsigned long flags) 422 size_t len, unsigned long flags)
423 { 423 {
424 struct k3_dma_chan *c = to_k3_chan(chan); 424 struct k3_dma_chan *c = to_k3_chan(chan);
425 struct k3_dma_desc_sw *ds; 425 struct k3_dma_desc_sw *ds;
426 size_t copy = 0; 426 size_t copy = 0;
427 int num = 0; 427 int num = 0;
428 428
429 if (!len) 429 if (!len)
430 return NULL; 430 return NULL;
431 431
432 num = DIV_ROUND_UP(len, DMA_MAX_SIZE); 432 num = DIV_ROUND_UP(len, DMA_MAX_SIZE);
433 ds = kzalloc(sizeof(*ds) + num * sizeof(ds->desc_hw[0]), GFP_ATOMIC); 433 ds = kzalloc(sizeof(*ds) + num * sizeof(ds->desc_hw[0]), GFP_ATOMIC);
434 if (!ds) { 434 if (!ds) {
435 dev_dbg(chan->device->dev, "vchan %p: kzalloc fail\n", &c->vc); 435 dev_dbg(chan->device->dev, "vchan %p: kzalloc fail\n", &c->vc);
436 return NULL; 436 return NULL;
437 } 437 }
438 ds->desc_hw_lli = __virt_to_phys((unsigned long)&ds->desc_hw[0]); 438 ds->desc_hw_lli = __virt_to_phys((unsigned long)&ds->desc_hw[0]);
439 ds->size = len; 439 ds->size = len;
440 ds->desc_num = num; 440 ds->desc_num = num;
441 num = 0; 441 num = 0;
442 442
443 if (!c->ccfg) { 443 if (!c->ccfg) {
444 /* default is memtomem, without calling device_control */ 444 /* default is memtomem, without calling device_control */
445 c->ccfg = CX_CFG_SRCINCR | CX_CFG_DSTINCR | CX_CFG_EN; 445 c->ccfg = CX_CFG_SRCINCR | CX_CFG_DSTINCR | CX_CFG_EN;
446 c->ccfg |= (0xf << 20) | (0xf << 24); /* burst = 16 */ 446 c->ccfg |= (0xf << 20) | (0xf << 24); /* burst = 16 */
447 c->ccfg |= (0x3 << 12) | (0x3 << 16); /* width = 64 bit */ 447 c->ccfg |= (0x3 << 12) | (0x3 << 16); /* width = 64 bit */
448 } 448 }
449 449
450 do { 450 do {
451 copy = min_t(size_t, len, DMA_MAX_SIZE); 451 copy = min_t(size_t, len, DMA_MAX_SIZE);
452 k3_dma_fill_desc(ds, dst, src, copy, num++, c->ccfg); 452 k3_dma_fill_desc(ds, dst, src, copy, num++, c->ccfg);
453 453
454 if (c->dir == DMA_MEM_TO_DEV) { 454 if (c->dir == DMA_MEM_TO_DEV) {
455 src += copy; 455 src += copy;
456 } else if (c->dir == DMA_DEV_TO_MEM) { 456 } else if (c->dir == DMA_DEV_TO_MEM) {
457 dst += copy; 457 dst += copy;
458 } else { 458 } else {
459 src += copy; 459 src += copy;
460 dst += copy; 460 dst += copy;
461 } 461 }
462 len -= copy; 462 len -= copy;
463 } while (len); 463 } while (len);
464 464
465 ds->desc_hw[num-1].lli = 0; /* end of link */ 465 ds->desc_hw[num-1].lli = 0; /* end of link */
466 return vchan_tx_prep(&c->vc, &ds->vd, flags); 466 return vchan_tx_prep(&c->vc, &ds->vd, flags);
467 } 467 }
468 468
469 static struct dma_async_tx_descriptor *k3_dma_prep_slave_sg( 469 static struct dma_async_tx_descriptor *k3_dma_prep_slave_sg(
470 struct dma_chan *chan, struct scatterlist *sgl, unsigned int sglen, 470 struct dma_chan *chan, struct scatterlist *sgl, unsigned int sglen,
471 enum dma_transfer_direction dir, unsigned long flags, void *context) 471 enum dma_transfer_direction dir, unsigned long flags, void *context)
472 { 472 {
473 struct k3_dma_chan *c = to_k3_chan(chan); 473 struct k3_dma_chan *c = to_k3_chan(chan);
474 struct k3_dma_desc_sw *ds; 474 struct k3_dma_desc_sw *ds;
475 size_t len, avail, total = 0; 475 size_t len, avail, total = 0;
476 struct scatterlist *sg; 476 struct scatterlist *sg;
477 dma_addr_t addr, src = 0, dst = 0; 477 dma_addr_t addr, src = 0, dst = 0;
478 int num = sglen, i; 478 int num = sglen, i;
479 479
480 if (sgl == 0) 480 if (sgl == 0)
481 return NULL; 481 return NULL;
482 482
483 for_each_sg(sgl, sg, sglen, i) { 483 for_each_sg(sgl, sg, sglen, i) {
484 avail = sg_dma_len(sg); 484 avail = sg_dma_len(sg);
485 if (avail > DMA_MAX_SIZE) 485 if (avail > DMA_MAX_SIZE)
486 num += DIV_ROUND_UP(avail, DMA_MAX_SIZE) - 1; 486 num += DIV_ROUND_UP(avail, DMA_MAX_SIZE) - 1;
487 } 487 }
488 488
489 ds = kzalloc(sizeof(*ds) + num * sizeof(ds->desc_hw[0]), GFP_ATOMIC); 489 ds = kzalloc(sizeof(*ds) + num * sizeof(ds->desc_hw[0]), GFP_ATOMIC);
490 if (!ds) { 490 if (!ds) {
491 dev_dbg(chan->device->dev, "vchan %p: kzalloc fail\n", &c->vc); 491 dev_dbg(chan->device->dev, "vchan %p: kzalloc fail\n", &c->vc);
492 return NULL; 492 return NULL;
493 } 493 }
494 ds->desc_hw_lli = __virt_to_phys((unsigned long)&ds->desc_hw[0]); 494 ds->desc_hw_lli = __virt_to_phys((unsigned long)&ds->desc_hw[0]);
495 ds->desc_num = num; 495 ds->desc_num = num;
496 num = 0; 496 num = 0;
497 497
498 for_each_sg(sgl, sg, sglen, i) { 498 for_each_sg(sgl, sg, sglen, i) {
499 addr = sg_dma_address(sg); 499 addr = sg_dma_address(sg);
500 avail = sg_dma_len(sg); 500 avail = sg_dma_len(sg);
501 total += avail; 501 total += avail;
502 502
503 do { 503 do {
504 len = min_t(size_t, avail, DMA_MAX_SIZE); 504 len = min_t(size_t, avail, DMA_MAX_SIZE);
505 505
506 if (dir == DMA_MEM_TO_DEV) { 506 if (dir == DMA_MEM_TO_DEV) {
507 src = addr; 507 src = addr;
508 dst = c->dev_addr; 508 dst = c->dev_addr;
509 } else if (dir == DMA_DEV_TO_MEM) { 509 } else if (dir == DMA_DEV_TO_MEM) {
510 src = c->dev_addr; 510 src = c->dev_addr;
511 dst = addr; 511 dst = addr;
512 } 512 }
513 513
514 k3_dma_fill_desc(ds, dst, src, len, num++, c->ccfg); 514 k3_dma_fill_desc(ds, dst, src, len, num++, c->ccfg);
515 515
516 addr += len; 516 addr += len;
517 avail -= len; 517 avail -= len;
518 } while (avail); 518 } while (avail);
519 } 519 }
520 520
521 ds->desc_hw[num-1].lli = 0; /* end of link */ 521 ds->desc_hw[num-1].lli = 0; /* end of link */
522 ds->size = total; 522 ds->size = total;
523 return vchan_tx_prep(&c->vc, &ds->vd, flags); 523 return vchan_tx_prep(&c->vc, &ds->vd, flags);
524 } 524 }
525 525
526 static int k3_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 526 static int k3_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
527 unsigned long arg) 527 unsigned long arg)
528 { 528 {
529 struct k3_dma_chan *c = to_k3_chan(chan); 529 struct k3_dma_chan *c = to_k3_chan(chan);
530 struct k3_dma_dev *d = to_k3_dma(chan->device); 530 struct k3_dma_dev *d = to_k3_dma(chan->device);
531 struct dma_slave_config *cfg = (void *)arg; 531 struct dma_slave_config *cfg = (void *)arg;
532 struct k3_dma_phy *p = c->phy; 532 struct k3_dma_phy *p = c->phy;
533 unsigned long flags; 533 unsigned long flags;
534 u32 maxburst = 0, val = 0; 534 u32 maxburst = 0, val = 0;
535 enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED; 535 enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
536 LIST_HEAD(head); 536 LIST_HEAD(head);
537 537
538 switch (cmd) { 538 switch (cmd) {
539 case DMA_SLAVE_CONFIG: 539 case DMA_SLAVE_CONFIG:
540 if (cfg == NULL) 540 if (cfg == NULL)
541 return -EINVAL; 541 return -EINVAL;
542 c->dir = cfg->direction; 542 c->dir = cfg->direction;
543 if (c->dir == DMA_DEV_TO_MEM) { 543 if (c->dir == DMA_DEV_TO_MEM) {
544 c->ccfg = CX_CFG_DSTINCR; 544 c->ccfg = CX_CFG_DSTINCR;
545 c->dev_addr = cfg->src_addr; 545 c->dev_addr = cfg->src_addr;
546 maxburst = cfg->src_maxburst; 546 maxburst = cfg->src_maxburst;
547 width = cfg->src_addr_width; 547 width = cfg->src_addr_width;
548 } else if (c->dir == DMA_MEM_TO_DEV) { 548 } else if (c->dir == DMA_MEM_TO_DEV) {
549 c->ccfg = CX_CFG_SRCINCR; 549 c->ccfg = CX_CFG_SRCINCR;
550 c->dev_addr = cfg->dst_addr; 550 c->dev_addr = cfg->dst_addr;
551 maxburst = cfg->dst_maxburst; 551 maxburst = cfg->dst_maxburst;
552 width = cfg->dst_addr_width; 552 width = cfg->dst_addr_width;
553 } 553 }
554 switch (width) { 554 switch (width) {
555 case DMA_SLAVE_BUSWIDTH_1_BYTE: 555 case DMA_SLAVE_BUSWIDTH_1_BYTE:
556 case DMA_SLAVE_BUSWIDTH_2_BYTES: 556 case DMA_SLAVE_BUSWIDTH_2_BYTES:
557 case DMA_SLAVE_BUSWIDTH_4_BYTES: 557 case DMA_SLAVE_BUSWIDTH_4_BYTES:
558 case DMA_SLAVE_BUSWIDTH_8_BYTES: 558 case DMA_SLAVE_BUSWIDTH_8_BYTES:
559 val = __ffs(width); 559 val = __ffs(width);
560 break; 560 break;
561 default: 561 default:
562 val = 3; 562 val = 3;
563 break; 563 break;
564 } 564 }
565 c->ccfg |= (val << 12) | (val << 16); 565 c->ccfg |= (val << 12) | (val << 16);
566 566
567 if ((maxburst == 0) || (maxburst > 16)) 567 if ((maxburst == 0) || (maxburst > 16))
568 val = 16; 568 val = 16;
569 else 569 else
570 val = maxburst - 1; 570 val = maxburst - 1;
571 c->ccfg |= (val << 20) | (val << 24); 571 c->ccfg |= (val << 20) | (val << 24);
572 c->ccfg |= CX_CFG_MEM2PER | CX_CFG_EN; 572 c->ccfg |= CX_CFG_MEM2PER | CX_CFG_EN;
573 573
574 /* specific request line */ 574 /* specific request line */
575 c->ccfg |= c->vc.chan.chan_id << 4; 575 c->ccfg |= c->vc.chan.chan_id << 4;
576 break; 576 break;
577 577
578 case DMA_TERMINATE_ALL: 578 case DMA_TERMINATE_ALL:
579 dev_dbg(d->slave.dev, "vchan %p: terminate all\n", &c->vc); 579 dev_dbg(d->slave.dev, "vchan %p: terminate all\n", &c->vc);
580 580
581 /* Prevent this channel being scheduled */ 581 /* Prevent this channel being scheduled */
582 spin_lock(&d->lock); 582 spin_lock(&d->lock);
583 list_del_init(&c->node); 583 list_del_init(&c->node);
584 spin_unlock(&d->lock); 584 spin_unlock(&d->lock);
585 585
586 /* Clear the tx descriptor lists */ 586 /* Clear the tx descriptor lists */
587 spin_lock_irqsave(&c->vc.lock, flags); 587 spin_lock_irqsave(&c->vc.lock, flags);
588 vchan_get_all_descriptors(&c->vc, &head); 588 vchan_get_all_descriptors(&c->vc, &head);
589 if (p) { 589 if (p) {
590 /* vchan is assigned to a pchan - stop the channel */ 590 /* vchan is assigned to a pchan - stop the channel */
591 k3_dma_terminate_chan(p, d); 591 k3_dma_terminate_chan(p, d);
592 c->phy = NULL; 592 c->phy = NULL;
593 p->vchan = NULL; 593 p->vchan = NULL;
594 p->ds_run = p->ds_done = NULL; 594 p->ds_run = p->ds_done = NULL;
595 } 595 }
596 spin_unlock_irqrestore(&c->vc.lock, flags); 596 spin_unlock_irqrestore(&c->vc.lock, flags);
597 vchan_dma_desc_free_list(&c->vc, &head); 597 vchan_dma_desc_free_list(&c->vc, &head);
598 break; 598 break;
599 599
600 case DMA_PAUSE: 600 case DMA_PAUSE:
601 dev_dbg(d->slave.dev, "vchan %p: pause\n", &c->vc); 601 dev_dbg(d->slave.dev, "vchan %p: pause\n", &c->vc);
602 if (c->status == DMA_IN_PROGRESS) { 602 if (c->status == DMA_IN_PROGRESS) {
603 c->status = DMA_PAUSED; 603 c->status = DMA_PAUSED;
604 if (p) { 604 if (p) {
605 k3_dma_pause_dma(p, false); 605 k3_dma_pause_dma(p, false);
606 } else { 606 } else {
607 spin_lock(&d->lock); 607 spin_lock(&d->lock);
608 list_del_init(&c->node); 608 list_del_init(&c->node);
609 spin_unlock(&d->lock); 609 spin_unlock(&d->lock);
610 } 610 }
611 } 611 }
612 break; 612 break;
613 613
614 case DMA_RESUME: 614 case DMA_RESUME:
615 dev_dbg(d->slave.dev, "vchan %p: resume\n", &c->vc); 615 dev_dbg(d->slave.dev, "vchan %p: resume\n", &c->vc);
616 spin_lock_irqsave(&c->vc.lock, flags); 616 spin_lock_irqsave(&c->vc.lock, flags);
617 if (c->status == DMA_PAUSED) { 617 if (c->status == DMA_PAUSED) {
618 c->status = DMA_IN_PROGRESS; 618 c->status = DMA_IN_PROGRESS;
619 if (p) { 619 if (p) {
620 k3_dma_pause_dma(p, true); 620 k3_dma_pause_dma(p, true);
621 } else if (!list_empty(&c->vc.desc_issued)) { 621 } else if (!list_empty(&c->vc.desc_issued)) {
622 spin_lock(&d->lock); 622 spin_lock(&d->lock);
623 list_add_tail(&c->node, &d->chan_pending); 623 list_add_tail(&c->node, &d->chan_pending);
624 spin_unlock(&d->lock); 624 spin_unlock(&d->lock);
625 } 625 }
626 } 626 }
627 spin_unlock_irqrestore(&c->vc.lock, flags); 627 spin_unlock_irqrestore(&c->vc.lock, flags);
628 break; 628 break;
629 default: 629 default:
630 return -ENXIO; 630 return -ENXIO;
631 } 631 }
632 return 0; 632 return 0;
633 } 633 }
634 634
635 static void k3_dma_free_desc(struct virt_dma_desc *vd) 635 static void k3_dma_free_desc(struct virt_dma_desc *vd)
636 { 636 {
637 struct k3_dma_desc_sw *ds = 637 struct k3_dma_desc_sw *ds =
638 container_of(vd, struct k3_dma_desc_sw, vd); 638 container_of(vd, struct k3_dma_desc_sw, vd);
639 639
640 kfree(ds); 640 kfree(ds);
641 } 641 }
642 642
643 static struct of_device_id k3_pdma_dt_ids[] = { 643 static struct of_device_id k3_pdma_dt_ids[] = {
644 { .compatible = "hisilicon,k3-dma-1.0", }, 644 { .compatible = "hisilicon,k3-dma-1.0", },
645 {} 645 {}
646 }; 646 };
647 MODULE_DEVICE_TABLE(of, k3_pdma_dt_ids); 647 MODULE_DEVICE_TABLE(of, k3_pdma_dt_ids);
648 648
649 static struct dma_chan *k3_of_dma_simple_xlate(struct of_phandle_args *dma_spec, 649 static struct dma_chan *k3_of_dma_simple_xlate(struct of_phandle_args *dma_spec,
650 struct of_dma *ofdma) 650 struct of_dma *ofdma)
651 { 651 {
652 struct k3_dma_dev *d = ofdma->of_dma_data; 652 struct k3_dma_dev *d = ofdma->of_dma_data;
653 unsigned int request = dma_spec->args[0]; 653 unsigned int request = dma_spec->args[0];
654 654
655 if (request > d->dma_requests) 655 if (request > d->dma_requests)
656 return NULL; 656 return NULL;
657 657
658 return dma_get_slave_channel(&(d->chans[request].vc.chan)); 658 return dma_get_slave_channel(&(d->chans[request].vc.chan));
659 } 659 }
660 660
661 static int k3_dma_probe(struct platform_device *op) 661 static int k3_dma_probe(struct platform_device *op)
662 { 662 {
663 struct k3_dma_dev *d; 663 struct k3_dma_dev *d;
664 const struct of_device_id *of_id; 664 const struct of_device_id *of_id;
665 struct resource *iores; 665 struct resource *iores;
666 int i, ret, irq = 0; 666 int i, ret, irq = 0;
667 667
668 iores = platform_get_resource(op, IORESOURCE_MEM, 0); 668 iores = platform_get_resource(op, IORESOURCE_MEM, 0);
669 if (!iores) 669 if (!iores)
670 return -EINVAL; 670 return -EINVAL;
671 671
672 d = devm_kzalloc(&op->dev, sizeof(*d), GFP_KERNEL); 672 d = devm_kzalloc(&op->dev, sizeof(*d), GFP_KERNEL);
673 if (!d) 673 if (!d)
674 return -ENOMEM; 674 return -ENOMEM;
675 675
676 d->base = devm_ioremap_resource(&op->dev, iores); 676 d->base = devm_ioremap_resource(&op->dev, iores);
677 if (IS_ERR(d->base)) 677 if (IS_ERR(d->base))
678 return PTR_ERR(d->base); 678 return PTR_ERR(d->base);
679 679
680 of_id = of_match_device(k3_pdma_dt_ids, &op->dev); 680 of_id = of_match_device(k3_pdma_dt_ids, &op->dev);
681 if (of_id) { 681 if (of_id) {
682 of_property_read_u32((&op->dev)->of_node, 682 of_property_read_u32((&op->dev)->of_node,
683 "dma-channels", &d->dma_channels); 683 "dma-channels", &d->dma_channels);
684 of_property_read_u32((&op->dev)->of_node, 684 of_property_read_u32((&op->dev)->of_node,
685 "dma-requests", &d->dma_requests); 685 "dma-requests", &d->dma_requests);
686 } 686 }
687 687
688 d->clk = devm_clk_get(&op->dev, NULL); 688 d->clk = devm_clk_get(&op->dev, NULL);
689 if (IS_ERR(d->clk)) { 689 if (IS_ERR(d->clk)) {
690 dev_err(&op->dev, "no dma clk\n"); 690 dev_err(&op->dev, "no dma clk\n");
691 return PTR_ERR(d->clk); 691 return PTR_ERR(d->clk);
692 } 692 }
693 693
694 irq = platform_get_irq(op, 0); 694 irq = platform_get_irq(op, 0);
695 ret = devm_request_irq(&op->dev, irq, 695 ret = devm_request_irq(&op->dev, irq,
696 k3_dma_int_handler, 0, DRIVER_NAME, d); 696 k3_dma_int_handler, 0, DRIVER_NAME, d);
697 if (ret) 697 if (ret)
698 return ret; 698 return ret;
699 699
700 /* init phy channel */ 700 /* init phy channel */
701 d->phy = devm_kzalloc(&op->dev, 701 d->phy = devm_kzalloc(&op->dev,
702 d->dma_channels * sizeof(struct k3_dma_phy), GFP_KERNEL); 702 d->dma_channels * sizeof(struct k3_dma_phy), GFP_KERNEL);
703 if (d->phy == NULL) 703 if (d->phy == NULL)
704 return -ENOMEM; 704 return -ENOMEM;
705 705
706 for (i = 0; i < d->dma_channels; i++) { 706 for (i = 0; i < d->dma_channels; i++) {
707 struct k3_dma_phy *p = &d->phy[i]; 707 struct k3_dma_phy *p = &d->phy[i];
708 708
709 p->idx = i; 709 p->idx = i;
710 p->base = d->base + i * 0x40; 710 p->base = d->base + i * 0x40;
711 } 711 }
712 712
713 INIT_LIST_HEAD(&d->slave.channels); 713 INIT_LIST_HEAD(&d->slave.channels);
714 dma_cap_set(DMA_SLAVE, d->slave.cap_mask); 714 dma_cap_set(DMA_SLAVE, d->slave.cap_mask);
715 dma_cap_set(DMA_MEMCPY, d->slave.cap_mask); 715 dma_cap_set(DMA_MEMCPY, d->slave.cap_mask);
716 d->slave.dev = &op->dev; 716 d->slave.dev = &op->dev;
717 d->slave.device_alloc_chan_resources = k3_dma_alloc_chan_resources; 717 d->slave.device_alloc_chan_resources = k3_dma_alloc_chan_resources;
718 d->slave.device_free_chan_resources = k3_dma_free_chan_resources; 718 d->slave.device_free_chan_resources = k3_dma_free_chan_resources;
719 d->slave.device_tx_status = k3_dma_tx_status; 719 d->slave.device_tx_status = k3_dma_tx_status;
720 d->slave.device_prep_dma_memcpy = k3_dma_prep_memcpy; 720 d->slave.device_prep_dma_memcpy = k3_dma_prep_memcpy;
721 d->slave.device_prep_slave_sg = k3_dma_prep_slave_sg; 721 d->slave.device_prep_slave_sg = k3_dma_prep_slave_sg;
722 d->slave.device_issue_pending = k3_dma_issue_pending; 722 d->slave.device_issue_pending = k3_dma_issue_pending;
723 d->slave.device_control = k3_dma_control; 723 d->slave.device_control = k3_dma_control;
724 d->slave.copy_align = DMA_ALIGN; 724 d->slave.copy_align = DMA_ALIGN;
725 d->slave.chancnt = d->dma_requests; 725 d->slave.chancnt = d->dma_requests;
726 726
727 /* init virtual channel */ 727 /* init virtual channel */
728 d->chans = devm_kzalloc(&op->dev, 728 d->chans = devm_kzalloc(&op->dev,
729 d->dma_requests * sizeof(struct k3_dma_chan), GFP_KERNEL); 729 d->dma_requests * sizeof(struct k3_dma_chan), GFP_KERNEL);
730 if (d->chans == NULL) 730 if (d->chans == NULL)
731 return -ENOMEM; 731 return -ENOMEM;
732 732
733 for (i = 0; i < d->dma_requests; i++) { 733 for (i = 0; i < d->dma_requests; i++) {
734 struct k3_dma_chan *c = &d->chans[i]; 734 struct k3_dma_chan *c = &d->chans[i];
735 735
736 c->status = DMA_IN_PROGRESS; 736 c->status = DMA_IN_PROGRESS;
737 INIT_LIST_HEAD(&c->node); 737 INIT_LIST_HEAD(&c->node);
738 c->vc.desc_free = k3_dma_free_desc; 738 c->vc.desc_free = k3_dma_free_desc;
739 vchan_init(&c->vc, &d->slave); 739 vchan_init(&c->vc, &d->slave);
740 } 740 }
741 741
742 /* Enable clock before accessing registers */ 742 /* Enable clock before accessing registers */
743 ret = clk_prepare_enable(d->clk); 743 ret = clk_prepare_enable(d->clk);
744 if (ret < 0) { 744 if (ret < 0) {
745 dev_err(&op->dev, "clk_prepare_enable failed: %d\n", ret); 745 dev_err(&op->dev, "clk_prepare_enable failed: %d\n", ret);
746 return ret; 746 return ret;
747 } 747 }
748 748
749 k3_dma_enable_dma(d, true); 749 k3_dma_enable_dma(d, true);
750 750
751 ret = dma_async_device_register(&d->slave); 751 ret = dma_async_device_register(&d->slave);
752 if (ret) 752 if (ret)
753 return ret; 753 return ret;
754 754
755 ret = of_dma_controller_register((&op->dev)->of_node, 755 ret = of_dma_controller_register((&op->dev)->of_node,
756 k3_of_dma_simple_xlate, d); 756 k3_of_dma_simple_xlate, d);
757 if (ret) 757 if (ret)
758 goto of_dma_register_fail; 758 goto of_dma_register_fail;
759 759
760 spin_lock_init(&d->lock); 760 spin_lock_init(&d->lock);
761 INIT_LIST_HEAD(&d->chan_pending); 761 INIT_LIST_HEAD(&d->chan_pending);
762 tasklet_init(&d->task, k3_dma_tasklet, (unsigned long)d); 762 tasklet_init(&d->task, k3_dma_tasklet, (unsigned long)d);
763 platform_set_drvdata(op, d); 763 platform_set_drvdata(op, d);
764 dev_info(&op->dev, "initialized\n"); 764 dev_info(&op->dev, "initialized\n");
765 765
766 return 0; 766 return 0;
767 767
768 of_dma_register_fail: 768 of_dma_register_fail:
769 dma_async_device_unregister(&d->slave); 769 dma_async_device_unregister(&d->slave);
770 return ret; 770 return ret;
771 } 771 }
772 772
773 static int k3_dma_remove(struct platform_device *op) 773 static int k3_dma_remove(struct platform_device *op)
774 { 774 {
775 struct k3_dma_chan *c, *cn; 775 struct k3_dma_chan *c, *cn;
776 struct k3_dma_dev *d = platform_get_drvdata(op); 776 struct k3_dma_dev *d = platform_get_drvdata(op);
777 777
778 dma_async_device_unregister(&d->slave); 778 dma_async_device_unregister(&d->slave);
779 of_dma_controller_free((&op->dev)->of_node); 779 of_dma_controller_free((&op->dev)->of_node);
780 780
781 list_for_each_entry_safe(c, cn, &d->slave.channels, vc.chan.device_node) { 781 list_for_each_entry_safe(c, cn, &d->slave.channels, vc.chan.device_node) {
782 list_del(&c->vc.chan.device_node); 782 list_del(&c->vc.chan.device_node);
783 tasklet_kill(&c->vc.task); 783 tasklet_kill(&c->vc.task);
784 } 784 }
785 tasklet_kill(&d->task); 785 tasklet_kill(&d->task);
786 clk_disable_unprepare(d->clk); 786 clk_disable_unprepare(d->clk);
787 return 0; 787 return 0;
788 } 788 }
789 789
790 static int k3_dma_suspend(struct device *dev) 790 static int k3_dma_suspend(struct device *dev)
791 { 791 {
792 struct k3_dma_dev *d = dev_get_drvdata(dev); 792 struct k3_dma_dev *d = dev_get_drvdata(dev);
793 u32 stat = 0; 793 u32 stat = 0;
794 794
795 stat = k3_dma_get_chan_stat(d); 795 stat = k3_dma_get_chan_stat(d);
796 if (stat) { 796 if (stat) {
797 dev_warn(d->slave.dev, 797 dev_warn(d->slave.dev,
798 "chan %d is running fail to suspend\n", stat); 798 "chan %d is running fail to suspend\n", stat);
799 return -1; 799 return -1;
800 } 800 }
801 k3_dma_enable_dma(d, false); 801 k3_dma_enable_dma(d, false);
802 clk_disable_unprepare(d->clk); 802 clk_disable_unprepare(d->clk);
803 return 0; 803 return 0;
804 } 804 }
805 805
806 static int k3_dma_resume(struct device *dev) 806 static int k3_dma_resume(struct device *dev)
807 { 807 {
808 struct k3_dma_dev *d = dev_get_drvdata(dev); 808 struct k3_dma_dev *d = dev_get_drvdata(dev);
809 int ret = 0; 809 int ret = 0;
810 810
811 ret = clk_prepare_enable(d->clk); 811 ret = clk_prepare_enable(d->clk);
812 if (ret < 0) { 812 if (ret < 0) {
813 dev_err(d->slave.dev, "clk_prepare_enable failed: %d\n", ret); 813 dev_err(d->slave.dev, "clk_prepare_enable failed: %d\n", ret);
814 return ret; 814 return ret;
815 } 815 }
816 k3_dma_enable_dma(d, true); 816 k3_dma_enable_dma(d, true);
817 return 0; 817 return 0;
818 } 818 }
819 819
820 SIMPLE_DEV_PM_OPS(k3_dma_pmops, k3_dma_suspend, k3_dma_resume); 820 SIMPLE_DEV_PM_OPS(k3_dma_pmops, k3_dma_suspend, k3_dma_resume);
821 821
822 static struct platform_driver k3_pdma_driver = { 822 static struct platform_driver k3_pdma_driver = {
823 .driver = { 823 .driver = {
824 .name = DRIVER_NAME, 824 .name = DRIVER_NAME,
825 .owner = THIS_MODULE, 825 .owner = THIS_MODULE,
826 .pm = &k3_dma_pmops, 826 .pm = &k3_dma_pmops,
827 .of_match_table = k3_pdma_dt_ids, 827 .of_match_table = k3_pdma_dt_ids,
828 }, 828 },
829 .probe = k3_dma_probe, 829 .probe = k3_dma_probe,
830 .remove = k3_dma_remove, 830 .remove = k3_dma_remove,
831 }; 831 };
832 832
833 module_platform_driver(k3_pdma_driver); 833 module_platform_driver(k3_pdma_driver);
834 834
835 MODULE_DESCRIPTION("Hisilicon k3 DMA Driver"); 835 MODULE_DESCRIPTION("Hisilicon k3 DMA Driver");
836 MODULE_ALIAS("platform:k3dma"); 836 MODULE_ALIAS("platform:k3dma");
837 MODULE_LICENSE("GPL v2"); 837 MODULE_LICENSE("GPL v2");
838 838
drivers/dma/mmp_tdma.c
1 /* 1 /*
2 * Driver For Marvell Two-channel DMA Engine 2 * Driver For Marvell Two-channel DMA Engine
3 * 3 *
4 * Copyright: Marvell International Ltd. 4 * Copyright: Marvell International Ltd.
5 * 5 *
6 * The code contained herein is licensed under the GNU General Public 6 * The code contained herein is licensed under the GNU General Public
7 * License. You may obtain a copy of the GNU General Public License 7 * License. You may obtain a copy of the GNU General Public License
8 * Version 2 or later at the following locations: 8 * Version 2 or later at the following locations:
9 * 9 *
10 */ 10 */
11 11
12 #include <linux/err.h> 12 #include <linux/err.h>
13 #include <linux/module.h> 13 #include <linux/module.h>
14 #include <linux/init.h> 14 #include <linux/init.h>
15 #include <linux/types.h> 15 #include <linux/types.h>
16 #include <linux/interrupt.h> 16 #include <linux/interrupt.h>
17 #include <linux/dma-mapping.h> 17 #include <linux/dma-mapping.h>
18 #include <linux/slab.h> 18 #include <linux/slab.h>
19 #include <linux/dmaengine.h> 19 #include <linux/dmaengine.h>
20 #include <linux/platform_device.h> 20 #include <linux/platform_device.h>
21 #include <linux/device.h> 21 #include <linux/device.h>
22 #include <mach/regs-icu.h> 22 #include <mach/regs-icu.h>
23 #include <linux/platform_data/dma-mmp_tdma.h> 23 #include <linux/platform_data/dma-mmp_tdma.h>
24 #include <linux/of_device.h> 24 #include <linux/of_device.h>
25 25
26 #include "dmaengine.h" 26 #include "dmaengine.h"
27 27
28 /* 28 /*
29 * Two-Channel DMA registers 29 * Two-Channel DMA registers
30 */ 30 */
31 #define TDBCR 0x00 /* Byte Count */ 31 #define TDBCR 0x00 /* Byte Count */
32 #define TDSAR 0x10 /* Src Addr */ 32 #define TDSAR 0x10 /* Src Addr */
33 #define TDDAR 0x20 /* Dst Addr */ 33 #define TDDAR 0x20 /* Dst Addr */
34 #define TDNDPR 0x30 /* Next Desc */ 34 #define TDNDPR 0x30 /* Next Desc */
35 #define TDCR 0x40 /* Control */ 35 #define TDCR 0x40 /* Control */
36 #define TDCP 0x60 /* Priority*/ 36 #define TDCP 0x60 /* Priority*/
37 #define TDCDPR 0x70 /* Current Desc */ 37 #define TDCDPR 0x70 /* Current Desc */
38 #define TDIMR 0x80 /* Int Mask */ 38 #define TDIMR 0x80 /* Int Mask */
39 #define TDISR 0xa0 /* Int Status */ 39 #define TDISR 0xa0 /* Int Status */
40 40
41 /* Two-Channel DMA Control Register */ 41 /* Two-Channel DMA Control Register */
42 #define TDCR_SSZ_8_BITS (0x0 << 22) /* Sample Size */ 42 #define TDCR_SSZ_8_BITS (0x0 << 22) /* Sample Size */
43 #define TDCR_SSZ_12_BITS (0x1 << 22) 43 #define TDCR_SSZ_12_BITS (0x1 << 22)
44 #define TDCR_SSZ_16_BITS (0x2 << 22) 44 #define TDCR_SSZ_16_BITS (0x2 << 22)
45 #define TDCR_SSZ_20_BITS (0x3 << 22) 45 #define TDCR_SSZ_20_BITS (0x3 << 22)
46 #define TDCR_SSZ_24_BITS (0x4 << 22) 46 #define TDCR_SSZ_24_BITS (0x4 << 22)
47 #define TDCR_SSZ_32_BITS (0x5 << 22) 47 #define TDCR_SSZ_32_BITS (0x5 << 22)
48 #define TDCR_SSZ_SHIFT (0x1 << 22) 48 #define TDCR_SSZ_SHIFT (0x1 << 22)
49 #define TDCR_SSZ_MASK (0x7 << 22) 49 #define TDCR_SSZ_MASK (0x7 << 22)
50 #define TDCR_SSPMOD (0x1 << 21) /* SSP MOD */ 50 #define TDCR_SSPMOD (0x1 << 21) /* SSP MOD */
51 #define TDCR_ABR (0x1 << 20) /* Channel Abort */ 51 #define TDCR_ABR (0x1 << 20) /* Channel Abort */
52 #define TDCR_CDE (0x1 << 17) /* Close Desc Enable */ 52 #define TDCR_CDE (0x1 << 17) /* Close Desc Enable */
53 #define TDCR_PACKMOD (0x1 << 16) /* Pack Mode (ADMA Only) */ 53 #define TDCR_PACKMOD (0x1 << 16) /* Pack Mode (ADMA Only) */
54 #define TDCR_CHANACT (0x1 << 14) /* Channel Active */ 54 #define TDCR_CHANACT (0x1 << 14) /* Channel Active */
55 #define TDCR_FETCHND (0x1 << 13) /* Fetch Next Desc */ 55 #define TDCR_FETCHND (0x1 << 13) /* Fetch Next Desc */
56 #define TDCR_CHANEN (0x1 << 12) /* Channel Enable */ 56 #define TDCR_CHANEN (0x1 << 12) /* Channel Enable */
57 #define TDCR_INTMODE (0x1 << 10) /* Interrupt Mode */ 57 #define TDCR_INTMODE (0x1 << 10) /* Interrupt Mode */
58 #define TDCR_CHAINMOD (0x1 << 9) /* Chain Mode */ 58 #define TDCR_CHAINMOD (0x1 << 9) /* Chain Mode */
59 #define TDCR_BURSTSZ_MSK (0x7 << 6) /* Burst Size */ 59 #define TDCR_BURSTSZ_MSK (0x7 << 6) /* Burst Size */
60 #define TDCR_BURSTSZ_4B (0x0 << 6) 60 #define TDCR_BURSTSZ_4B (0x0 << 6)
61 #define TDCR_BURSTSZ_8B (0x1 << 6) 61 #define TDCR_BURSTSZ_8B (0x1 << 6)
62 #define TDCR_BURSTSZ_16B (0x3 << 6) 62 #define TDCR_BURSTSZ_16B (0x3 << 6)
63 #define TDCR_BURSTSZ_32B (0x6 << 6) 63 #define TDCR_BURSTSZ_32B (0x6 << 6)
64 #define TDCR_BURSTSZ_64B (0x7 << 6) 64 #define TDCR_BURSTSZ_64B (0x7 << 6)
65 #define TDCR_BURSTSZ_SQU_1B (0x5 << 6) 65 #define TDCR_BURSTSZ_SQU_1B (0x5 << 6)
66 #define TDCR_BURSTSZ_SQU_2B (0x6 << 6) 66 #define TDCR_BURSTSZ_SQU_2B (0x6 << 6)
67 #define TDCR_BURSTSZ_SQU_4B (0x0 << 6) 67 #define TDCR_BURSTSZ_SQU_4B (0x0 << 6)
68 #define TDCR_BURSTSZ_SQU_8B (0x1 << 6) 68 #define TDCR_BURSTSZ_SQU_8B (0x1 << 6)
69 #define TDCR_BURSTSZ_SQU_16B (0x3 << 6) 69 #define TDCR_BURSTSZ_SQU_16B (0x3 << 6)
70 #define TDCR_BURSTSZ_SQU_32B (0x7 << 6) 70 #define TDCR_BURSTSZ_SQU_32B (0x7 << 6)
71 #define TDCR_BURSTSZ_128B (0x5 << 6) 71 #define TDCR_BURSTSZ_128B (0x5 << 6)
72 #define TDCR_DSTDIR_MSK (0x3 << 4) /* Dst Direction */ 72 #define TDCR_DSTDIR_MSK (0x3 << 4) /* Dst Direction */
73 #define TDCR_DSTDIR_ADDR_HOLD (0x2 << 4) /* Dst Addr Hold */ 73 #define TDCR_DSTDIR_ADDR_HOLD (0x2 << 4) /* Dst Addr Hold */
74 #define TDCR_DSTDIR_ADDR_INC (0x0 << 4) /* Dst Addr Increment */ 74 #define TDCR_DSTDIR_ADDR_INC (0x0 << 4) /* Dst Addr Increment */
75 #define TDCR_SRCDIR_MSK (0x3 << 2) /* Src Direction */ 75 #define TDCR_SRCDIR_MSK (0x3 << 2) /* Src Direction */
76 #define TDCR_SRCDIR_ADDR_HOLD (0x2 << 2) /* Src Addr Hold */ 76 #define TDCR_SRCDIR_ADDR_HOLD (0x2 << 2) /* Src Addr Hold */
77 #define TDCR_SRCDIR_ADDR_INC (0x0 << 2) /* Src Addr Increment */ 77 #define TDCR_SRCDIR_ADDR_INC (0x0 << 2) /* Src Addr Increment */
78 #define TDCR_DSTDESCCONT (0x1 << 1) 78 #define TDCR_DSTDESCCONT (0x1 << 1)
79 #define TDCR_SRCDESTCONT (0x1 << 0) 79 #define TDCR_SRCDESTCONT (0x1 << 0)
80 80
81 /* Two-Channel DMA Int Mask Register */ 81 /* Two-Channel DMA Int Mask Register */
82 #define TDIMR_COMP (0x1 << 0) 82 #define TDIMR_COMP (0x1 << 0)
83 83
84 /* Two-Channel DMA Int Status Register */ 84 /* Two-Channel DMA Int Status Register */
85 #define TDISR_COMP (0x1 << 0) 85 #define TDISR_COMP (0x1 << 0)
86 86
87 /* 87 /*
88 * Two-Channel DMA Descriptor Struct 88 * Two-Channel DMA Descriptor Struct
89 * NOTE: desc's buf must be aligned to 16 bytes. 89 * NOTE: desc's buf must be aligned to 16 bytes.
90 */ 90 */
91 struct mmp_tdma_desc { 91 struct mmp_tdma_desc {
92 u32 byte_cnt; 92 u32 byte_cnt;
93 u32 src_addr; 93 u32 src_addr;
94 u32 dst_addr; 94 u32 dst_addr;
95 u32 nxt_desc; 95 u32 nxt_desc;
96 }; 96 };
97 97
98 enum mmp_tdma_type { 98 enum mmp_tdma_type {
99 MMP_AUD_TDMA = 0, 99 MMP_AUD_TDMA = 0,
100 PXA910_SQU, 100 PXA910_SQU,
101 }; 101 };
102 102
103 #define TDMA_ALIGNMENT 3 103 #define TDMA_ALIGNMENT 3
104 #define TDMA_MAX_XFER_BYTES SZ_64K 104 #define TDMA_MAX_XFER_BYTES SZ_64K
105 105
106 struct mmp_tdma_chan { 106 struct mmp_tdma_chan {
107 struct device *dev; 107 struct device *dev;
108 struct dma_chan chan; 108 struct dma_chan chan;
109 struct dma_async_tx_descriptor desc; 109 struct dma_async_tx_descriptor desc;
110 struct tasklet_struct tasklet; 110 struct tasklet_struct tasklet;
111 111
112 struct mmp_tdma_desc *desc_arr; 112 struct mmp_tdma_desc *desc_arr;
113 phys_addr_t desc_arr_phys; 113 phys_addr_t desc_arr_phys;
114 int desc_num; 114 int desc_num;
115 enum dma_transfer_direction dir; 115 enum dma_transfer_direction dir;
116 dma_addr_t dev_addr; 116 dma_addr_t dev_addr;
117 u32 burst_sz; 117 u32 burst_sz;
118 enum dma_slave_buswidth buswidth; 118 enum dma_slave_buswidth buswidth;
119 enum dma_status status; 119 enum dma_status status;
120 120
121 int idx; 121 int idx;
122 enum mmp_tdma_type type; 122 enum mmp_tdma_type type;
123 int irq; 123 int irq;
124 unsigned long reg_base; 124 unsigned long reg_base;
125 125
126 size_t buf_len; 126 size_t buf_len;
127 size_t period_len; 127 size_t period_len;
128 size_t pos; 128 size_t pos;
129 }; 129 };
130 130
131 #define TDMA_CHANNEL_NUM 2 131 #define TDMA_CHANNEL_NUM 2
132 struct mmp_tdma_device { 132 struct mmp_tdma_device {
133 struct device *dev; 133 struct device *dev;
134 void __iomem *base; 134 void __iomem *base;
135 struct dma_device device; 135 struct dma_device device;
136 struct mmp_tdma_chan *tdmac[TDMA_CHANNEL_NUM]; 136 struct mmp_tdma_chan *tdmac[TDMA_CHANNEL_NUM];
137 }; 137 };
138 138
139 #define to_mmp_tdma_chan(dchan) container_of(dchan, struct mmp_tdma_chan, chan) 139 #define to_mmp_tdma_chan(dchan) container_of(dchan, struct mmp_tdma_chan, chan)
140 140
141 static void mmp_tdma_chan_set_desc(struct mmp_tdma_chan *tdmac, dma_addr_t phys) 141 static void mmp_tdma_chan_set_desc(struct mmp_tdma_chan *tdmac, dma_addr_t phys)
142 { 142 {
143 writel(phys, tdmac->reg_base + TDNDPR); 143 writel(phys, tdmac->reg_base + TDNDPR);
144 writel(readl(tdmac->reg_base + TDCR) | TDCR_FETCHND, 144 writel(readl(tdmac->reg_base + TDCR) | TDCR_FETCHND,
145 tdmac->reg_base + TDCR); 145 tdmac->reg_base + TDCR);
146 } 146 }
147 147
148 static void mmp_tdma_enable_chan(struct mmp_tdma_chan *tdmac) 148 static void mmp_tdma_enable_chan(struct mmp_tdma_chan *tdmac)
149 { 149 {
150 /* enable irq */ 150 /* enable irq */
151 writel(TDIMR_COMP, tdmac->reg_base + TDIMR); 151 writel(TDIMR_COMP, tdmac->reg_base + TDIMR);
152 /* enable dma chan */ 152 /* enable dma chan */
153 writel(readl(tdmac->reg_base + TDCR) | TDCR_CHANEN, 153 writel(readl(tdmac->reg_base + TDCR) | TDCR_CHANEN,
154 tdmac->reg_base + TDCR); 154 tdmac->reg_base + TDCR);
155 tdmac->status = DMA_IN_PROGRESS; 155 tdmac->status = DMA_IN_PROGRESS;
156 } 156 }
157 157
158 static void mmp_tdma_disable_chan(struct mmp_tdma_chan *tdmac) 158 static void mmp_tdma_disable_chan(struct mmp_tdma_chan *tdmac)
159 { 159 {
160 writel(readl(tdmac->reg_base + TDCR) & ~TDCR_CHANEN, 160 writel(readl(tdmac->reg_base + TDCR) & ~TDCR_CHANEN,
161 tdmac->reg_base + TDCR); 161 tdmac->reg_base + TDCR);
162 162
163 /* disable irq */ 163 /* disable irq */
164 writel(0, tdmac->reg_base + TDIMR); 164 writel(0, tdmac->reg_base + TDIMR);
165 165
166 tdmac->status = DMA_SUCCESS; 166 tdmac->status = DMA_COMPLETE;
167 } 167 }
168 168
169 static void mmp_tdma_resume_chan(struct mmp_tdma_chan *tdmac) 169 static void mmp_tdma_resume_chan(struct mmp_tdma_chan *tdmac)
170 { 170 {
171 writel(readl(tdmac->reg_base + TDCR) | TDCR_CHANEN, 171 writel(readl(tdmac->reg_base + TDCR) | TDCR_CHANEN,
172 tdmac->reg_base + TDCR); 172 tdmac->reg_base + TDCR);
173 tdmac->status = DMA_IN_PROGRESS; 173 tdmac->status = DMA_IN_PROGRESS;
174 } 174 }
175 175
176 static void mmp_tdma_pause_chan(struct mmp_tdma_chan *tdmac) 176 static void mmp_tdma_pause_chan(struct mmp_tdma_chan *tdmac)
177 { 177 {
178 writel(readl(tdmac->reg_base + TDCR) & ~TDCR_CHANEN, 178 writel(readl(tdmac->reg_base + TDCR) & ~TDCR_CHANEN,
179 tdmac->reg_base + TDCR); 179 tdmac->reg_base + TDCR);
180 tdmac->status = DMA_PAUSED; 180 tdmac->status = DMA_PAUSED;
181 } 181 }
182 182
183 static int mmp_tdma_config_chan(struct mmp_tdma_chan *tdmac) 183 static int mmp_tdma_config_chan(struct mmp_tdma_chan *tdmac)
184 { 184 {
185 unsigned int tdcr; 185 unsigned int tdcr;
186 186
187 mmp_tdma_disable_chan(tdmac); 187 mmp_tdma_disable_chan(tdmac);
188 188
189 if (tdmac->dir == DMA_MEM_TO_DEV) 189 if (tdmac->dir == DMA_MEM_TO_DEV)
190 tdcr = TDCR_DSTDIR_ADDR_HOLD | TDCR_SRCDIR_ADDR_INC; 190 tdcr = TDCR_DSTDIR_ADDR_HOLD | TDCR_SRCDIR_ADDR_INC;
191 else if (tdmac->dir == DMA_DEV_TO_MEM) 191 else if (tdmac->dir == DMA_DEV_TO_MEM)
192 tdcr = TDCR_SRCDIR_ADDR_HOLD | TDCR_DSTDIR_ADDR_INC; 192 tdcr = TDCR_SRCDIR_ADDR_HOLD | TDCR_DSTDIR_ADDR_INC;
193 193
194 if (tdmac->type == MMP_AUD_TDMA) { 194 if (tdmac->type == MMP_AUD_TDMA) {
195 tdcr |= TDCR_PACKMOD; 195 tdcr |= TDCR_PACKMOD;
196 196
197 switch (tdmac->burst_sz) { 197 switch (tdmac->burst_sz) {
198 case 4: 198 case 4:
199 tdcr |= TDCR_BURSTSZ_4B; 199 tdcr |= TDCR_BURSTSZ_4B;
200 break; 200 break;
201 case 8: 201 case 8:
202 tdcr |= TDCR_BURSTSZ_8B; 202 tdcr |= TDCR_BURSTSZ_8B;
203 break; 203 break;
204 case 16: 204 case 16:
205 tdcr |= TDCR_BURSTSZ_16B; 205 tdcr |= TDCR_BURSTSZ_16B;
206 break; 206 break;
207 case 32: 207 case 32:
208 tdcr |= TDCR_BURSTSZ_32B; 208 tdcr |= TDCR_BURSTSZ_32B;
209 break; 209 break;
210 case 64: 210 case 64:
211 tdcr |= TDCR_BURSTSZ_64B; 211 tdcr |= TDCR_BURSTSZ_64B;
212 break; 212 break;
213 case 128: 213 case 128:
214 tdcr |= TDCR_BURSTSZ_128B; 214 tdcr |= TDCR_BURSTSZ_128B;
215 break; 215 break;
216 default: 216 default:
217 dev_err(tdmac->dev, "mmp_tdma: unknown burst size.\n"); 217 dev_err(tdmac->dev, "mmp_tdma: unknown burst size.\n");
218 return -EINVAL; 218 return -EINVAL;
219 } 219 }
220 220
221 switch (tdmac->buswidth) { 221 switch (tdmac->buswidth) {
222 case DMA_SLAVE_BUSWIDTH_1_BYTE: 222 case DMA_SLAVE_BUSWIDTH_1_BYTE:
223 tdcr |= TDCR_SSZ_8_BITS; 223 tdcr |= TDCR_SSZ_8_BITS;
224 break; 224 break;
225 case DMA_SLAVE_BUSWIDTH_2_BYTES: 225 case DMA_SLAVE_BUSWIDTH_2_BYTES:
226 tdcr |= TDCR_SSZ_16_BITS; 226 tdcr |= TDCR_SSZ_16_BITS;
227 break; 227 break;
228 case DMA_SLAVE_BUSWIDTH_4_BYTES: 228 case DMA_SLAVE_BUSWIDTH_4_BYTES:
229 tdcr |= TDCR_SSZ_32_BITS; 229 tdcr |= TDCR_SSZ_32_BITS;
230 break; 230 break;
231 default: 231 default:
232 dev_err(tdmac->dev, "mmp_tdma: unknown bus size.\n"); 232 dev_err(tdmac->dev, "mmp_tdma: unknown bus size.\n");
233 return -EINVAL; 233 return -EINVAL;
234 } 234 }
235 } else if (tdmac->type == PXA910_SQU) { 235 } else if (tdmac->type == PXA910_SQU) {
236 tdcr |= TDCR_SSPMOD; 236 tdcr |= TDCR_SSPMOD;
237 237
238 switch (tdmac->burst_sz) { 238 switch (tdmac->burst_sz) {
239 case 1: 239 case 1:
240 tdcr |= TDCR_BURSTSZ_SQU_1B; 240 tdcr |= TDCR_BURSTSZ_SQU_1B;
241 break; 241 break;
242 case 2: 242 case 2:
243 tdcr |= TDCR_BURSTSZ_SQU_2B; 243 tdcr |= TDCR_BURSTSZ_SQU_2B;
244 break; 244 break;
245 case 4: 245 case 4:
246 tdcr |= TDCR_BURSTSZ_SQU_4B; 246 tdcr |= TDCR_BURSTSZ_SQU_4B;
247 break; 247 break;
248 case 8: 248 case 8:
249 tdcr |= TDCR_BURSTSZ_SQU_8B; 249 tdcr |= TDCR_BURSTSZ_SQU_8B;
250 break; 250 break;
251 case 16: 251 case 16:
252 tdcr |= TDCR_BURSTSZ_SQU_16B; 252 tdcr |= TDCR_BURSTSZ_SQU_16B;
253 break; 253 break;
254 case 32: 254 case 32:
255 tdcr |= TDCR_BURSTSZ_SQU_32B; 255 tdcr |= TDCR_BURSTSZ_SQU_32B;
256 break; 256 break;
257 default: 257 default:
258 dev_err(tdmac->dev, "mmp_tdma: unknown burst size.\n"); 258 dev_err(tdmac->dev, "mmp_tdma: unknown burst size.\n");
259 return -EINVAL; 259 return -EINVAL;
260 } 260 }
261 } 261 }
262 262
263 writel(tdcr, tdmac->reg_base + TDCR); 263 writel(tdcr, tdmac->reg_base + TDCR);
264 return 0; 264 return 0;
265 } 265 }
266 266
267 static int mmp_tdma_clear_chan_irq(struct mmp_tdma_chan *tdmac) 267 static int mmp_tdma_clear_chan_irq(struct mmp_tdma_chan *tdmac)
268 { 268 {
269 u32 reg = readl(tdmac->reg_base + TDISR); 269 u32 reg = readl(tdmac->reg_base + TDISR);
270 270
271 if (reg & TDISR_COMP) { 271 if (reg & TDISR_COMP) {
272 /* clear irq */ 272 /* clear irq */
273 reg &= ~TDISR_COMP; 273 reg &= ~TDISR_COMP;
274 writel(reg, tdmac->reg_base + TDISR); 274 writel(reg, tdmac->reg_base + TDISR);
275 275
276 return 0; 276 return 0;
277 } 277 }
278 return -EAGAIN; 278 return -EAGAIN;
279 } 279 }
280 280
281 static irqreturn_t mmp_tdma_chan_handler(int irq, void *dev_id) 281 static irqreturn_t mmp_tdma_chan_handler(int irq, void *dev_id)
282 { 282 {
283 struct mmp_tdma_chan *tdmac = dev_id; 283 struct mmp_tdma_chan *tdmac = dev_id;
284 284
285 if (mmp_tdma_clear_chan_irq(tdmac) == 0) { 285 if (mmp_tdma_clear_chan_irq(tdmac) == 0) {
286 tdmac->pos = (tdmac->pos + tdmac->period_len) % tdmac->buf_len; 286 tdmac->pos = (tdmac->pos + tdmac->period_len) % tdmac->buf_len;
287 tasklet_schedule(&tdmac->tasklet); 287 tasklet_schedule(&tdmac->tasklet);
288 return IRQ_HANDLED; 288 return IRQ_HANDLED;
289 } else 289 } else
290 return IRQ_NONE; 290 return IRQ_NONE;
291 } 291 }
292 292
293 static irqreturn_t mmp_tdma_int_handler(int irq, void *dev_id) 293 static irqreturn_t mmp_tdma_int_handler(int irq, void *dev_id)
294 { 294 {
295 struct mmp_tdma_device *tdev = dev_id; 295 struct mmp_tdma_device *tdev = dev_id;
296 int i, ret; 296 int i, ret;
297 int irq_num = 0; 297 int irq_num = 0;
298 298
299 for (i = 0; i < TDMA_CHANNEL_NUM; i++) { 299 for (i = 0; i < TDMA_CHANNEL_NUM; i++) {
300 struct mmp_tdma_chan *tdmac = tdev->tdmac[i]; 300 struct mmp_tdma_chan *tdmac = tdev->tdmac[i];
301 301
302 ret = mmp_tdma_chan_handler(irq, tdmac); 302 ret = mmp_tdma_chan_handler(irq, tdmac);
303 if (ret == IRQ_HANDLED) 303 if (ret == IRQ_HANDLED)
304 irq_num++; 304 irq_num++;
305 } 305 }
306 306
307 if (irq_num) 307 if (irq_num)
308 return IRQ_HANDLED; 308 return IRQ_HANDLED;
309 else 309 else
310 return IRQ_NONE; 310 return IRQ_NONE;
311 } 311 }
312 312
313 static void dma_do_tasklet(unsigned long data) 313 static void dma_do_tasklet(unsigned long data)
314 { 314 {
315 struct mmp_tdma_chan *tdmac = (struct mmp_tdma_chan *)data; 315 struct mmp_tdma_chan *tdmac = (struct mmp_tdma_chan *)data;
316 316
317 if (tdmac->desc.callback) 317 if (tdmac->desc.callback)
318 tdmac->desc.callback(tdmac->desc.callback_param); 318 tdmac->desc.callback(tdmac->desc.callback_param);
319 319
320 } 320 }
321 321
322 static void mmp_tdma_free_descriptor(struct mmp_tdma_chan *tdmac) 322 static void mmp_tdma_free_descriptor(struct mmp_tdma_chan *tdmac)
323 { 323 {
324 struct gen_pool *gpool; 324 struct gen_pool *gpool;
325 int size = tdmac->desc_num * sizeof(struct mmp_tdma_desc); 325 int size = tdmac->desc_num * sizeof(struct mmp_tdma_desc);
326 326
327 gpool = sram_get_gpool("asram"); 327 gpool = sram_get_gpool("asram");
328 if (tdmac->desc_arr) 328 if (tdmac->desc_arr)
329 gen_pool_free(gpool, (unsigned long)tdmac->desc_arr, 329 gen_pool_free(gpool, (unsigned long)tdmac->desc_arr,
330 size); 330 size);
331 tdmac->desc_arr = NULL; 331 tdmac->desc_arr = NULL;
332 332
333 return; 333 return;
334 } 334 }
335 335
336 static dma_cookie_t mmp_tdma_tx_submit(struct dma_async_tx_descriptor *tx) 336 static dma_cookie_t mmp_tdma_tx_submit(struct dma_async_tx_descriptor *tx)
337 { 337 {
338 struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(tx->chan); 338 struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(tx->chan);
339 339
340 mmp_tdma_chan_set_desc(tdmac, tdmac->desc_arr_phys); 340 mmp_tdma_chan_set_desc(tdmac, tdmac->desc_arr_phys);
341 341
342 return 0; 342 return 0;
343 } 343 }
344 344
345 static int mmp_tdma_alloc_chan_resources(struct dma_chan *chan) 345 static int mmp_tdma_alloc_chan_resources(struct dma_chan *chan)
346 { 346 {
347 struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan); 347 struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
348 int ret; 348 int ret;
349 349
350 dma_async_tx_descriptor_init(&tdmac->desc, chan); 350 dma_async_tx_descriptor_init(&tdmac->desc, chan);
351 tdmac->desc.tx_submit = mmp_tdma_tx_submit; 351 tdmac->desc.tx_submit = mmp_tdma_tx_submit;
352 352
353 if (tdmac->irq) { 353 if (tdmac->irq) {
354 ret = devm_request_irq(tdmac->dev, tdmac->irq, 354 ret = devm_request_irq(tdmac->dev, tdmac->irq,
355 mmp_tdma_chan_handler, 0, "tdma", tdmac); 355 mmp_tdma_chan_handler, 0, "tdma", tdmac);
356 if (ret) 356 if (ret)
357 return ret; 357 return ret;
358 } 358 }
359 return 1; 359 return 1;
360 } 360 }
361 361
362 static void mmp_tdma_free_chan_resources(struct dma_chan *chan) 362 static void mmp_tdma_free_chan_resources(struct dma_chan *chan)
363 { 363 {
364 struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan); 364 struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
365 365
366 if (tdmac->irq) 366 if (tdmac->irq)
367 devm_free_irq(tdmac->dev, tdmac->irq, tdmac); 367 devm_free_irq(tdmac->dev, tdmac->irq, tdmac);
368 mmp_tdma_free_descriptor(tdmac); 368 mmp_tdma_free_descriptor(tdmac);
369 return; 369 return;
370 } 370 }
371 371
372 struct mmp_tdma_desc *mmp_tdma_alloc_descriptor(struct mmp_tdma_chan *tdmac) 372 struct mmp_tdma_desc *mmp_tdma_alloc_descriptor(struct mmp_tdma_chan *tdmac)
373 { 373 {
374 struct gen_pool *gpool; 374 struct gen_pool *gpool;
375 int size = tdmac->desc_num * sizeof(struct mmp_tdma_desc); 375 int size = tdmac->desc_num * sizeof(struct mmp_tdma_desc);
376 376
377 gpool = sram_get_gpool("asram"); 377 gpool = sram_get_gpool("asram");
378 if (!gpool) 378 if (!gpool)
379 return NULL; 379 return NULL;
380 380
381 tdmac->desc_arr = (void *)gen_pool_alloc(gpool, size); 381 tdmac->desc_arr = (void *)gen_pool_alloc(gpool, size);
382 if (!tdmac->desc_arr) 382 if (!tdmac->desc_arr)
383 return NULL; 383 return NULL;
384 384
385 tdmac->desc_arr_phys = gen_pool_virt_to_phys(gpool, 385 tdmac->desc_arr_phys = gen_pool_virt_to_phys(gpool,
386 (unsigned long)tdmac->desc_arr); 386 (unsigned long)tdmac->desc_arr);
387 387
388 return tdmac->desc_arr; 388 return tdmac->desc_arr;
389 } 389 }
390 390
391 static struct dma_async_tx_descriptor *mmp_tdma_prep_dma_cyclic( 391 static struct dma_async_tx_descriptor *mmp_tdma_prep_dma_cyclic(
392 struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len, 392 struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
393 size_t period_len, enum dma_transfer_direction direction, 393 size_t period_len, enum dma_transfer_direction direction,
394 unsigned long flags, void *context) 394 unsigned long flags, void *context)
395 { 395 {
396 struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan); 396 struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
397 struct mmp_tdma_desc *desc; 397 struct mmp_tdma_desc *desc;
398 int num_periods = buf_len / period_len; 398 int num_periods = buf_len / period_len;
399 int i = 0, buf = 0; 399 int i = 0, buf = 0;
400 400
401 if (tdmac->status != DMA_SUCCESS) 401 if (tdmac->status != DMA_COMPLETE)
402 return NULL; 402 return NULL;
403 403
404 if (period_len > TDMA_MAX_XFER_BYTES) { 404 if (period_len > TDMA_MAX_XFER_BYTES) {
405 dev_err(tdmac->dev, 405 dev_err(tdmac->dev,
406 "maximum period size exceeded: %d > %d\n", 406 "maximum period size exceeded: %d > %d\n",
407 period_len, TDMA_MAX_XFER_BYTES); 407 period_len, TDMA_MAX_XFER_BYTES);
408 goto err_out; 408 goto err_out;
409 } 409 }
410 410
411 tdmac->status = DMA_IN_PROGRESS; 411 tdmac->status = DMA_IN_PROGRESS;
412 tdmac->desc_num = num_periods; 412 tdmac->desc_num = num_periods;
413 desc = mmp_tdma_alloc_descriptor(tdmac); 413 desc = mmp_tdma_alloc_descriptor(tdmac);
414 if (!desc) 414 if (!desc)
415 goto err_out; 415 goto err_out;
416 416
417 while (buf < buf_len) { 417 while (buf < buf_len) {
418 desc = &tdmac->desc_arr[i]; 418 desc = &tdmac->desc_arr[i];
419 419
420 if (i + 1 == num_periods) 420 if (i + 1 == num_periods)
421 desc->nxt_desc = tdmac->desc_arr_phys; 421 desc->nxt_desc = tdmac->desc_arr_phys;
422 else 422 else
423 desc->nxt_desc = tdmac->desc_arr_phys + 423 desc->nxt_desc = tdmac->desc_arr_phys +
424 sizeof(*desc) * (i + 1); 424 sizeof(*desc) * (i + 1);
425 425
426 if (direction == DMA_MEM_TO_DEV) { 426 if (direction == DMA_MEM_TO_DEV) {
427 desc->src_addr = dma_addr; 427 desc->src_addr = dma_addr;
428 desc->dst_addr = tdmac->dev_addr; 428 desc->dst_addr = tdmac->dev_addr;
429 } else { 429 } else {
430 desc->src_addr = tdmac->dev_addr; 430 desc->src_addr = tdmac->dev_addr;
431 desc->dst_addr = dma_addr; 431 desc->dst_addr = dma_addr;
432 } 432 }
433 desc->byte_cnt = period_len; 433 desc->byte_cnt = period_len;
434 dma_addr += period_len; 434 dma_addr += period_len;
435 buf += period_len; 435 buf += period_len;
436 i++; 436 i++;
437 } 437 }
438 438
439 tdmac->buf_len = buf_len; 439 tdmac->buf_len = buf_len;
440 tdmac->period_len = period_len; 440 tdmac->period_len = period_len;
441 tdmac->pos = 0; 441 tdmac->pos = 0;
442 442
443 return &tdmac->desc; 443 return &tdmac->desc;
444 444
445 err_out: 445 err_out:
446 tdmac->status = DMA_ERROR; 446 tdmac->status = DMA_ERROR;
447 return NULL; 447 return NULL;
448 } 448 }
449 449
450 static int mmp_tdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 450 static int mmp_tdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
451 unsigned long arg) 451 unsigned long arg)
452 { 452 {
453 struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan); 453 struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
454 struct dma_slave_config *dmaengine_cfg = (void *)arg; 454 struct dma_slave_config *dmaengine_cfg = (void *)arg;
455 int ret = 0; 455 int ret = 0;
456 456
457 switch (cmd) { 457 switch (cmd) {
458 case DMA_TERMINATE_ALL: 458 case DMA_TERMINATE_ALL:
459 mmp_tdma_disable_chan(tdmac); 459 mmp_tdma_disable_chan(tdmac);
460 break; 460 break;
461 case DMA_PAUSE: 461 case DMA_PAUSE:
462 mmp_tdma_pause_chan(tdmac); 462 mmp_tdma_pause_chan(tdmac);
463 break; 463 break;
464 case DMA_RESUME: 464 case DMA_RESUME:
465 mmp_tdma_resume_chan(tdmac); 465 mmp_tdma_resume_chan(tdmac);
466 break; 466 break;
467 case DMA_SLAVE_CONFIG: 467 case DMA_SLAVE_CONFIG:
468 if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) { 468 if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) {
469 tdmac->dev_addr = dmaengine_cfg->src_addr; 469 tdmac->dev_addr = dmaengine_cfg->src_addr;
470 tdmac->burst_sz = dmaengine_cfg->src_maxburst; 470 tdmac->burst_sz = dmaengine_cfg->src_maxburst;
471 tdmac->buswidth = dmaengine_cfg->src_addr_width; 471 tdmac->buswidth = dmaengine_cfg->src_addr_width;
472 } else { 472 } else {
473 tdmac->dev_addr = dmaengine_cfg->dst_addr; 473 tdmac->dev_addr = dmaengine_cfg->dst_addr;
474 tdmac->burst_sz = dmaengine_cfg->dst_maxburst; 474 tdmac->burst_sz = dmaengine_cfg->dst_maxburst;
475 tdmac->buswidth = dmaengine_cfg->dst_addr_width; 475 tdmac->buswidth = dmaengine_cfg->dst_addr_width;
476 } 476 }
477 tdmac->dir = dmaengine_cfg->direction; 477 tdmac->dir = dmaengine_cfg->direction;
478 return mmp_tdma_config_chan(tdmac); 478 return mmp_tdma_config_chan(tdmac);
479 default: 479 default:
480 ret = -ENOSYS; 480 ret = -ENOSYS;
481 } 481 }
482 482
483 return ret; 483 return ret;
484 } 484 }
485 485
486 static enum dma_status mmp_tdma_tx_status(struct dma_chan *chan, 486 static enum dma_status mmp_tdma_tx_status(struct dma_chan *chan,
487 dma_cookie_t cookie, struct dma_tx_state *txstate) 487 dma_cookie_t cookie, struct dma_tx_state *txstate)
488 { 488 {
489 struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan); 489 struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
490 490
491 dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie, 491 dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie,
492 tdmac->buf_len - tdmac->pos); 492 tdmac->buf_len - tdmac->pos);
493 493
494 return tdmac->status; 494 return tdmac->status;
495 } 495 }
496 496
497 static void mmp_tdma_issue_pending(struct dma_chan *chan) 497 static void mmp_tdma_issue_pending(struct dma_chan *chan)
498 { 498 {
499 struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan); 499 struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
500 500
501 mmp_tdma_enable_chan(tdmac); 501 mmp_tdma_enable_chan(tdmac);
502 } 502 }
503 503
504 static int mmp_tdma_remove(struct platform_device *pdev) 504 static int mmp_tdma_remove(struct platform_device *pdev)
505 { 505 {
506 struct mmp_tdma_device *tdev = platform_get_drvdata(pdev); 506 struct mmp_tdma_device *tdev = platform_get_drvdata(pdev);
507 507
508 dma_async_device_unregister(&tdev->device); 508 dma_async_device_unregister(&tdev->device);
509 return 0; 509 return 0;
510 } 510 }
511 511
512 static int mmp_tdma_chan_init(struct mmp_tdma_device *tdev, 512 static int mmp_tdma_chan_init(struct mmp_tdma_device *tdev,
513 int idx, int irq, int type) 513 int idx, int irq, int type)
514 { 514 {
515 struct mmp_tdma_chan *tdmac; 515 struct mmp_tdma_chan *tdmac;
516 516
517 if (idx >= TDMA_CHANNEL_NUM) { 517 if (idx >= TDMA_CHANNEL_NUM) {
518 dev_err(tdev->dev, "too many channels for device!\n"); 518 dev_err(tdev->dev, "too many channels for device!\n");
519 return -EINVAL; 519 return -EINVAL;
520 } 520 }
521 521
522 /* alloc channel */ 522 /* alloc channel */
523 tdmac = devm_kzalloc(tdev->dev, sizeof(*tdmac), GFP_KERNEL); 523 tdmac = devm_kzalloc(tdev->dev, sizeof(*tdmac), GFP_KERNEL);
524 if (!tdmac) { 524 if (!tdmac) {
525 dev_err(tdev->dev, "no free memory for DMA channels!\n"); 525 dev_err(tdev->dev, "no free memory for DMA channels!\n");
526 return -ENOMEM; 526 return -ENOMEM;
527 } 527 }
528 if (irq) 528 if (irq)
529 tdmac->irq = irq; 529 tdmac->irq = irq;
530 tdmac->dev = tdev->dev; 530 tdmac->dev = tdev->dev;
531 tdmac->chan.device = &tdev->device; 531 tdmac->chan.device = &tdev->device;
532 tdmac->idx = idx; 532 tdmac->idx = idx;
533 tdmac->type = type; 533 tdmac->type = type;
534 tdmac->reg_base = (unsigned long)tdev->base + idx * 4; 534 tdmac->reg_base = (unsigned long)tdev->base + idx * 4;
535 tdmac->status = DMA_SUCCESS; 535 tdmac->status = DMA_COMPLETE;
536 tdev->tdmac[tdmac->idx] = tdmac; 536 tdev->tdmac[tdmac->idx] = tdmac;
537 tasklet_init(&tdmac->tasklet, dma_do_tasklet, (unsigned long)tdmac); 537 tasklet_init(&tdmac->tasklet, dma_do_tasklet, (unsigned long)tdmac);
538 538
539 /* add the channel to tdma_chan list */ 539 /* add the channel to tdma_chan list */
540 list_add_tail(&tdmac->chan.device_node, 540 list_add_tail(&tdmac->chan.device_node,
541 &tdev->device.channels); 541 &tdev->device.channels);
542 return 0; 542 return 0;
543 } 543 }
544 544
545 static struct of_device_id mmp_tdma_dt_ids[] = { 545 static struct of_device_id mmp_tdma_dt_ids[] = {
546 { .compatible = "marvell,adma-1.0", .data = (void *)MMP_AUD_TDMA}, 546 { .compatible = "marvell,adma-1.0", .data = (void *)MMP_AUD_TDMA},
547 { .compatible = "marvell,pxa910-squ", .data = (void *)PXA910_SQU}, 547 { .compatible = "marvell,pxa910-squ", .data = (void *)PXA910_SQU},
548 {} 548 {}
549 }; 549 };
550 MODULE_DEVICE_TABLE(of, mmp_tdma_dt_ids); 550 MODULE_DEVICE_TABLE(of, mmp_tdma_dt_ids);
551 551
552 static int mmp_tdma_probe(struct platform_device *pdev) 552 static int mmp_tdma_probe(struct platform_device *pdev)
553 { 553 {
554 enum mmp_tdma_type type; 554 enum mmp_tdma_type type;
555 const struct of_device_id *of_id; 555 const struct of_device_id *of_id;
556 struct mmp_tdma_device *tdev; 556 struct mmp_tdma_device *tdev;
557 struct resource *iores; 557 struct resource *iores;
558 int i, ret; 558 int i, ret;
559 int irq = 0, irq_num = 0; 559 int irq = 0, irq_num = 0;
560 int chan_num = TDMA_CHANNEL_NUM; 560 int chan_num = TDMA_CHANNEL_NUM;
561 561
562 of_id = of_match_device(mmp_tdma_dt_ids, &pdev->dev); 562 of_id = of_match_device(mmp_tdma_dt_ids, &pdev->dev);
563 if (of_id) 563 if (of_id)
564 type = (enum mmp_tdma_type) of_id->data; 564 type = (enum mmp_tdma_type) of_id->data;
565 else 565 else
566 type = platform_get_device_id(pdev)->driver_data; 566 type = platform_get_device_id(pdev)->driver_data;
567 567
568 /* always have couple channels */ 568 /* always have couple channels */
569 tdev = devm_kzalloc(&pdev->dev, sizeof(*tdev), GFP_KERNEL); 569 tdev = devm_kzalloc(&pdev->dev, sizeof(*tdev), GFP_KERNEL);
570 if (!tdev) 570 if (!tdev)
571 return -ENOMEM; 571 return -ENOMEM;
572 572
573 tdev->dev = &pdev->dev; 573 tdev->dev = &pdev->dev;
574 574
575 for (i = 0; i < chan_num; i++) { 575 for (i = 0; i < chan_num; i++) {
576 if (platform_get_irq(pdev, i) > 0) 576 if (platform_get_irq(pdev, i) > 0)
577 irq_num++; 577 irq_num++;
578 } 578 }
579 579
580 iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); 580 iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
581 tdev->base = devm_ioremap_resource(&pdev->dev, iores); 581 tdev->base = devm_ioremap_resource(&pdev->dev, iores);
582 if (IS_ERR(tdev->base)) 582 if (IS_ERR(tdev->base))
583 return PTR_ERR(tdev->base); 583 return PTR_ERR(tdev->base);
584 584
585 INIT_LIST_HEAD(&tdev->device.channels); 585 INIT_LIST_HEAD(&tdev->device.channels);
586 586
587 if (irq_num != chan_num) { 587 if (irq_num != chan_num) {
588 irq = platform_get_irq(pdev, 0); 588 irq = platform_get_irq(pdev, 0);
589 ret = devm_request_irq(&pdev->dev, irq, 589 ret = devm_request_irq(&pdev->dev, irq,
590 mmp_tdma_int_handler, 0, "tdma", tdev); 590 mmp_tdma_int_handler, 0, "tdma", tdev);
591 if (ret) 591 if (ret)
592 return ret; 592 return ret;
593 } 593 }
594 594
595 /* initialize channel parameters */ 595 /* initialize channel parameters */
596 for (i = 0; i < chan_num; i++) { 596 for (i = 0; i < chan_num; i++) {
597 irq = (irq_num != chan_num) ? 0 : platform_get_irq(pdev, i); 597 irq = (irq_num != chan_num) ? 0 : platform_get_irq(pdev, i);
598 ret = mmp_tdma_chan_init(tdev, i, irq, type); 598 ret = mmp_tdma_chan_init(tdev, i, irq, type);
599 if (ret) 599 if (ret)
600 return ret; 600 return ret;
601 } 601 }
602 602
603 dma_cap_set(DMA_SLAVE, tdev->device.cap_mask); 603 dma_cap_set(DMA_SLAVE, tdev->device.cap_mask);
604 dma_cap_set(DMA_CYCLIC, tdev->device.cap_mask); 604 dma_cap_set(DMA_CYCLIC, tdev->device.cap_mask);
605 tdev->device.dev = &pdev->dev; 605 tdev->device.dev = &pdev->dev;
606 tdev->device.device_alloc_chan_resources = 606 tdev->device.device_alloc_chan_resources =
607 mmp_tdma_alloc_chan_resources; 607 mmp_tdma_alloc_chan_resources;
608 tdev->device.device_free_chan_resources = 608 tdev->device.device_free_chan_resources =
609 mmp_tdma_free_chan_resources; 609 mmp_tdma_free_chan_resources;
610 tdev->device.device_prep_dma_cyclic = mmp_tdma_prep_dma_cyclic; 610 tdev->device.device_prep_dma_cyclic = mmp_tdma_prep_dma_cyclic;
611 tdev->device.device_tx_status = mmp_tdma_tx_status; 611 tdev->device.device_tx_status = mmp_tdma_tx_status;
612 tdev->device.device_issue_pending = mmp_tdma_issue_pending; 612 tdev->device.device_issue_pending = mmp_tdma_issue_pending;
613 tdev->device.device_control = mmp_tdma_control; 613 tdev->device.device_control = mmp_tdma_control;
614 tdev->device.copy_align = TDMA_ALIGNMENT; 614 tdev->device.copy_align = TDMA_ALIGNMENT;
615 615
616 dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); 616 dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
617 platform_set_drvdata(pdev, tdev); 617 platform_set_drvdata(pdev, tdev);
618 618
619 ret = dma_async_device_register(&tdev->device); 619 ret = dma_async_device_register(&tdev->device);
620 if (ret) { 620 if (ret) {
621 dev_err(tdev->device.dev, "unable to register\n"); 621 dev_err(tdev->device.dev, "unable to register\n");
622 return ret; 622 return ret;
623 } 623 }
624 624
625 dev_info(tdev->device.dev, "initialized\n"); 625 dev_info(tdev->device.dev, "initialized\n");
626 return 0; 626 return 0;
627 } 627 }
628 628
629 static const struct platform_device_id mmp_tdma_id_table[] = { 629 static const struct platform_device_id mmp_tdma_id_table[] = {
630 { "mmp-adma", MMP_AUD_TDMA }, 630 { "mmp-adma", MMP_AUD_TDMA },
631 { "pxa910-squ", PXA910_SQU }, 631 { "pxa910-squ", PXA910_SQU },
632 { }, 632 { },
633 }; 633 };
634 634
635 static struct platform_driver mmp_tdma_driver = { 635 static struct platform_driver mmp_tdma_driver = {
636 .driver = { 636 .driver = {
637 .name = "mmp-tdma", 637 .name = "mmp-tdma",
638 .owner = THIS_MODULE, 638 .owner = THIS_MODULE,
639 .of_match_table = mmp_tdma_dt_ids, 639 .of_match_table = mmp_tdma_dt_ids,
640 }, 640 },
641 .id_table = mmp_tdma_id_table, 641 .id_table = mmp_tdma_id_table,
642 .probe = mmp_tdma_probe, 642 .probe = mmp_tdma_probe,
643 .remove = mmp_tdma_remove, 643 .remove = mmp_tdma_remove,
644 }; 644 };
645 645
646 module_platform_driver(mmp_tdma_driver); 646 module_platform_driver(mmp_tdma_driver);
647 647
648 MODULE_LICENSE("GPL"); 648 MODULE_LICENSE("GPL");
649 MODULE_DESCRIPTION("MMP Two-Channel DMA Driver"); 649 MODULE_DESCRIPTION("MMP Two-Channel DMA Driver");
650 MODULE_ALIAS("platform:mmp-tdma"); 650 MODULE_ALIAS("platform:mmp-tdma");
651 MODULE_AUTHOR("Leo Yan <leoy@marvell.com>"); 651 MODULE_AUTHOR("Leo Yan <leoy@marvell.com>");
652 MODULE_AUTHOR("Zhangfei Gao <zhangfei.gao@marvell.com>"); 652 MODULE_AUTHOR("Zhangfei Gao <zhangfei.gao@marvell.com>");
653 653
drivers/dma/mv_xor.c
1 /* 1 /*
2 * offload engine driver for the Marvell XOR engine 2 * offload engine driver for the Marvell XOR engine
3 * Copyright (C) 2007, 2008, Marvell International Ltd. 3 * Copyright (C) 2007, 2008, Marvell International Ltd.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License, 6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation. 7 * version 2, as published by the Free Software Foundation.
8 * 8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT 9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details. 12 * more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License along with 14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 */ 17 */
18 18
19 #include <linux/init.h> 19 #include <linux/init.h>
20 #include <linux/module.h> 20 #include <linux/module.h>
21 #include <linux/slab.h> 21 #include <linux/slab.h>
22 #include <linux/delay.h> 22 #include <linux/delay.h>
23 #include <linux/dma-mapping.h> 23 #include <linux/dma-mapping.h>
24 #include <linux/spinlock.h> 24 #include <linux/spinlock.h>
25 #include <linux/interrupt.h> 25 #include <linux/interrupt.h>
26 #include <linux/platform_device.h> 26 #include <linux/platform_device.h>
27 #include <linux/memory.h> 27 #include <linux/memory.h>
28 #include <linux/clk.h> 28 #include <linux/clk.h>
29 #include <linux/of.h> 29 #include <linux/of.h>
30 #include <linux/of_irq.h> 30 #include <linux/of_irq.h>
31 #include <linux/irqdomain.h> 31 #include <linux/irqdomain.h>
32 #include <linux/platform_data/dma-mv_xor.h> 32 #include <linux/platform_data/dma-mv_xor.h>
33 33
34 #include "dmaengine.h" 34 #include "dmaengine.h"
35 #include "mv_xor.h" 35 #include "mv_xor.h"
36 36
37 static void mv_xor_issue_pending(struct dma_chan *chan); 37 static void mv_xor_issue_pending(struct dma_chan *chan);
38 38
39 #define to_mv_xor_chan(chan) \ 39 #define to_mv_xor_chan(chan) \
40 container_of(chan, struct mv_xor_chan, dmachan) 40 container_of(chan, struct mv_xor_chan, dmachan)
41 41
42 #define to_mv_xor_slot(tx) \ 42 #define to_mv_xor_slot(tx) \
43 container_of(tx, struct mv_xor_desc_slot, async_tx) 43 container_of(tx, struct mv_xor_desc_slot, async_tx)
44 44
45 #define mv_chan_to_devp(chan) \ 45 #define mv_chan_to_devp(chan) \
46 ((chan)->dmadev.dev) 46 ((chan)->dmadev.dev)
47 47
48 static void mv_desc_init(struct mv_xor_desc_slot *desc, unsigned long flags) 48 static void mv_desc_init(struct mv_xor_desc_slot *desc, unsigned long flags)
49 { 49 {
50 struct mv_xor_desc *hw_desc = desc->hw_desc; 50 struct mv_xor_desc *hw_desc = desc->hw_desc;
51 51
52 hw_desc->status = (1 << 31); 52 hw_desc->status = (1 << 31);
53 hw_desc->phy_next_desc = 0; 53 hw_desc->phy_next_desc = 0;
54 hw_desc->desc_command = (1 << 31); 54 hw_desc->desc_command = (1 << 31);
55 } 55 }
56 56
57 static u32 mv_desc_get_dest_addr(struct mv_xor_desc_slot *desc) 57 static u32 mv_desc_get_dest_addr(struct mv_xor_desc_slot *desc)
58 { 58 {
59 struct mv_xor_desc *hw_desc = desc->hw_desc; 59 struct mv_xor_desc *hw_desc = desc->hw_desc;
60 return hw_desc->phy_dest_addr; 60 return hw_desc->phy_dest_addr;
61 } 61 }
62 62
63 static u32 mv_desc_get_src_addr(struct mv_xor_desc_slot *desc, 63 static u32 mv_desc_get_src_addr(struct mv_xor_desc_slot *desc,
64 int src_idx) 64 int src_idx)
65 { 65 {
66 struct mv_xor_desc *hw_desc = desc->hw_desc; 66 struct mv_xor_desc *hw_desc = desc->hw_desc;
67 return hw_desc->phy_src_addr[mv_phy_src_idx(src_idx)]; 67 return hw_desc->phy_src_addr[mv_phy_src_idx(src_idx)];
68 } 68 }
69 69
70 70
71 static void mv_desc_set_byte_count(struct mv_xor_desc_slot *desc, 71 static void mv_desc_set_byte_count(struct mv_xor_desc_slot *desc,
72 u32 byte_count) 72 u32 byte_count)
73 { 73 {
74 struct mv_xor_desc *hw_desc = desc->hw_desc; 74 struct mv_xor_desc *hw_desc = desc->hw_desc;
75 hw_desc->byte_count = byte_count; 75 hw_desc->byte_count = byte_count;
76 } 76 }
77 77
78 static void mv_desc_set_next_desc(struct mv_xor_desc_slot *desc, 78 static void mv_desc_set_next_desc(struct mv_xor_desc_slot *desc,
79 u32 next_desc_addr) 79 u32 next_desc_addr)
80 { 80 {
81 struct mv_xor_desc *hw_desc = desc->hw_desc; 81 struct mv_xor_desc *hw_desc = desc->hw_desc;
82 BUG_ON(hw_desc->phy_next_desc); 82 BUG_ON(hw_desc->phy_next_desc);
83 hw_desc->phy_next_desc = next_desc_addr; 83 hw_desc->phy_next_desc = next_desc_addr;
84 } 84 }
85 85
86 static void mv_desc_clear_next_desc(struct mv_xor_desc_slot *desc) 86 static void mv_desc_clear_next_desc(struct mv_xor_desc_slot *desc)
87 { 87 {
88 struct mv_xor_desc *hw_desc = desc->hw_desc; 88 struct mv_xor_desc *hw_desc = desc->hw_desc;
89 hw_desc->phy_next_desc = 0; 89 hw_desc->phy_next_desc = 0;
90 } 90 }
91 91
92 static void mv_desc_set_dest_addr(struct mv_xor_desc_slot *desc, 92 static void mv_desc_set_dest_addr(struct mv_xor_desc_slot *desc,
93 dma_addr_t addr) 93 dma_addr_t addr)
94 { 94 {
95 struct mv_xor_desc *hw_desc = desc->hw_desc; 95 struct mv_xor_desc *hw_desc = desc->hw_desc;
96 hw_desc->phy_dest_addr = addr; 96 hw_desc->phy_dest_addr = addr;
97 } 97 }
98 98
99 static int mv_chan_memset_slot_count(size_t len) 99 static int mv_chan_memset_slot_count(size_t len)
100 { 100 {
101 return 1; 101 return 1;
102 } 102 }
103 103
104 #define mv_chan_memcpy_slot_count(c) mv_chan_memset_slot_count(c) 104 #define mv_chan_memcpy_slot_count(c) mv_chan_memset_slot_count(c)
105 105
106 static void mv_desc_set_src_addr(struct mv_xor_desc_slot *desc, 106 static void mv_desc_set_src_addr(struct mv_xor_desc_slot *desc,
107 int index, dma_addr_t addr) 107 int index, dma_addr_t addr)
108 { 108 {
109 struct mv_xor_desc *hw_desc = desc->hw_desc; 109 struct mv_xor_desc *hw_desc = desc->hw_desc;
110 hw_desc->phy_src_addr[mv_phy_src_idx(index)] = addr; 110 hw_desc->phy_src_addr[mv_phy_src_idx(index)] = addr;
111 if (desc->type == DMA_XOR) 111 if (desc->type == DMA_XOR)
112 hw_desc->desc_command |= (1 << index); 112 hw_desc->desc_command |= (1 << index);
113 } 113 }
114 114
115 static u32 mv_chan_get_current_desc(struct mv_xor_chan *chan) 115 static u32 mv_chan_get_current_desc(struct mv_xor_chan *chan)
116 { 116 {
117 return readl_relaxed(XOR_CURR_DESC(chan)); 117 return readl_relaxed(XOR_CURR_DESC(chan));
118 } 118 }
119 119
120 static void mv_chan_set_next_descriptor(struct mv_xor_chan *chan, 120 static void mv_chan_set_next_descriptor(struct mv_xor_chan *chan,
121 u32 next_desc_addr) 121 u32 next_desc_addr)
122 { 122 {
123 writel_relaxed(next_desc_addr, XOR_NEXT_DESC(chan)); 123 writel_relaxed(next_desc_addr, XOR_NEXT_DESC(chan));
124 } 124 }
125 125
126 static void mv_chan_unmask_interrupts(struct mv_xor_chan *chan) 126 static void mv_chan_unmask_interrupts(struct mv_xor_chan *chan)
127 { 127 {
128 u32 val = readl_relaxed(XOR_INTR_MASK(chan)); 128 u32 val = readl_relaxed(XOR_INTR_MASK(chan));
129 val |= XOR_INTR_MASK_VALUE << (chan->idx * 16); 129 val |= XOR_INTR_MASK_VALUE << (chan->idx * 16);
130 writel_relaxed(val, XOR_INTR_MASK(chan)); 130 writel_relaxed(val, XOR_INTR_MASK(chan));
131 } 131 }
132 132
133 static u32 mv_chan_get_intr_cause(struct mv_xor_chan *chan) 133 static u32 mv_chan_get_intr_cause(struct mv_xor_chan *chan)
134 { 134 {
135 u32 intr_cause = readl_relaxed(XOR_INTR_CAUSE(chan)); 135 u32 intr_cause = readl_relaxed(XOR_INTR_CAUSE(chan));
136 intr_cause = (intr_cause >> (chan->idx * 16)) & 0xFFFF; 136 intr_cause = (intr_cause >> (chan->idx * 16)) & 0xFFFF;
137 return intr_cause; 137 return intr_cause;
138 } 138 }
139 139
140 static int mv_is_err_intr(u32 intr_cause) 140 static int mv_is_err_intr(u32 intr_cause)
141 { 141 {
142 if (intr_cause & ((1<<4)|(1<<5)|(1<<6)|(1<<7)|(1<<8)|(1<<9))) 142 if (intr_cause & ((1<<4)|(1<<5)|(1<<6)|(1<<7)|(1<<8)|(1<<9)))
143 return 1; 143 return 1;
144 144
145 return 0; 145 return 0;
146 } 146 }
147 147
148 static void mv_xor_device_clear_eoc_cause(struct mv_xor_chan *chan) 148 static void mv_xor_device_clear_eoc_cause(struct mv_xor_chan *chan)
149 { 149 {
150 u32 val = ~(1 << (chan->idx * 16)); 150 u32 val = ~(1 << (chan->idx * 16));
151 dev_dbg(mv_chan_to_devp(chan), "%s, val 0x%08x\n", __func__, val); 151 dev_dbg(mv_chan_to_devp(chan), "%s, val 0x%08x\n", __func__, val);
152 writel_relaxed(val, XOR_INTR_CAUSE(chan)); 152 writel_relaxed(val, XOR_INTR_CAUSE(chan));
153 } 153 }
154 154
155 static void mv_xor_device_clear_err_status(struct mv_xor_chan *chan) 155 static void mv_xor_device_clear_err_status(struct mv_xor_chan *chan)
156 { 156 {
157 u32 val = 0xFFFF0000 >> (chan->idx * 16); 157 u32 val = 0xFFFF0000 >> (chan->idx * 16);
158 writel_relaxed(val, XOR_INTR_CAUSE(chan)); 158 writel_relaxed(val, XOR_INTR_CAUSE(chan));
159 } 159 }
160 160
161 static int mv_can_chain(struct mv_xor_desc_slot *desc) 161 static int mv_can_chain(struct mv_xor_desc_slot *desc)
162 { 162 {
163 struct mv_xor_desc_slot *chain_old_tail = list_entry( 163 struct mv_xor_desc_slot *chain_old_tail = list_entry(
164 desc->chain_node.prev, struct mv_xor_desc_slot, chain_node); 164 desc->chain_node.prev, struct mv_xor_desc_slot, chain_node);
165 165
166 if (chain_old_tail->type != desc->type) 166 if (chain_old_tail->type != desc->type)
167 return 0; 167 return 0;
168 168
169 return 1; 169 return 1;
170 } 170 }
171 171
172 static void mv_set_mode(struct mv_xor_chan *chan, 172 static void mv_set_mode(struct mv_xor_chan *chan,
173 enum dma_transaction_type type) 173 enum dma_transaction_type type)
174 { 174 {
175 u32 op_mode; 175 u32 op_mode;
176 u32 config = readl_relaxed(XOR_CONFIG(chan)); 176 u32 config = readl_relaxed(XOR_CONFIG(chan));
177 177
178 switch (type) { 178 switch (type) {
179 case DMA_XOR: 179 case DMA_XOR:
180 op_mode = XOR_OPERATION_MODE_XOR; 180 op_mode = XOR_OPERATION_MODE_XOR;
181 break; 181 break;
182 case DMA_MEMCPY: 182 case DMA_MEMCPY:
183 op_mode = XOR_OPERATION_MODE_MEMCPY; 183 op_mode = XOR_OPERATION_MODE_MEMCPY;
184 break; 184 break;
185 default: 185 default:
186 dev_err(mv_chan_to_devp(chan), 186 dev_err(mv_chan_to_devp(chan),
187 "error: unsupported operation %d\n", 187 "error: unsupported operation %d\n",
188 type); 188 type);
189 BUG(); 189 BUG();
190 return; 190 return;
191 } 191 }
192 192
193 config &= ~0x7; 193 config &= ~0x7;
194 config |= op_mode; 194 config |= op_mode;
195 195
196 #if defined(__BIG_ENDIAN) 196 #if defined(__BIG_ENDIAN)
197 config |= XOR_DESCRIPTOR_SWAP; 197 config |= XOR_DESCRIPTOR_SWAP;
198 #else 198 #else
199 config &= ~XOR_DESCRIPTOR_SWAP; 199 config &= ~XOR_DESCRIPTOR_SWAP;
200 #endif 200 #endif
201 201
202 writel_relaxed(config, XOR_CONFIG(chan)); 202 writel_relaxed(config, XOR_CONFIG(chan));
203 chan->current_type = type; 203 chan->current_type = type;
204 } 204 }
205 205
206 static void mv_chan_activate(struct mv_xor_chan *chan) 206 static void mv_chan_activate(struct mv_xor_chan *chan)
207 { 207 {
208 u32 activation; 208 u32 activation;
209 209
210 dev_dbg(mv_chan_to_devp(chan), " activate chan.\n"); 210 dev_dbg(mv_chan_to_devp(chan), " activate chan.\n");
211 activation = readl_relaxed(XOR_ACTIVATION(chan)); 211 activation = readl_relaxed(XOR_ACTIVATION(chan));
212 activation |= 0x1; 212 activation |= 0x1;
213 writel_relaxed(activation, XOR_ACTIVATION(chan)); 213 writel_relaxed(activation, XOR_ACTIVATION(chan));
214 } 214 }
215 215
216 static char mv_chan_is_busy(struct mv_xor_chan *chan) 216 static char mv_chan_is_busy(struct mv_xor_chan *chan)
217 { 217 {
218 u32 state = readl_relaxed(XOR_ACTIVATION(chan)); 218 u32 state = readl_relaxed(XOR_ACTIVATION(chan));
219 219
220 state = (state >> 4) & 0x3; 220 state = (state >> 4) & 0x3;
221 221
222 return (state == 1) ? 1 : 0; 222 return (state == 1) ? 1 : 0;
223 } 223 }
224 224
225 static int mv_chan_xor_slot_count(size_t len, int src_cnt) 225 static int mv_chan_xor_slot_count(size_t len, int src_cnt)
226 { 226 {
227 return 1; 227 return 1;
228 } 228 }
229 229
230 /** 230 /**
231 * mv_xor_free_slots - flags descriptor slots for reuse 231 * mv_xor_free_slots - flags descriptor slots for reuse
232 * @slot: Slot to free 232 * @slot: Slot to free
233 * Caller must hold &mv_chan->lock while calling this function 233 * Caller must hold &mv_chan->lock while calling this function
234 */ 234 */
235 static void mv_xor_free_slots(struct mv_xor_chan *mv_chan, 235 static void mv_xor_free_slots(struct mv_xor_chan *mv_chan,
236 struct mv_xor_desc_slot *slot) 236 struct mv_xor_desc_slot *slot)
237 { 237 {
238 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d slot %p\n", 238 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d slot %p\n",
239 __func__, __LINE__, slot); 239 __func__, __LINE__, slot);
240 240
241 slot->slots_per_op = 0; 241 slot->slots_per_op = 0;
242 242
243 } 243 }
244 244
245 /* 245 /*
246 * mv_xor_start_new_chain - program the engine to operate on new chain headed by 246 * mv_xor_start_new_chain - program the engine to operate on new chain headed by
247 * sw_desc 247 * sw_desc
248 * Caller must hold &mv_chan->lock while calling this function 248 * Caller must hold &mv_chan->lock while calling this function
249 */ 249 */
250 static void mv_xor_start_new_chain(struct mv_xor_chan *mv_chan, 250 static void mv_xor_start_new_chain(struct mv_xor_chan *mv_chan,
251 struct mv_xor_desc_slot *sw_desc) 251 struct mv_xor_desc_slot *sw_desc)
252 { 252 {
253 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: sw_desc %p\n", 253 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: sw_desc %p\n",
254 __func__, __LINE__, sw_desc); 254 __func__, __LINE__, sw_desc);
255 if (sw_desc->type != mv_chan->current_type) 255 if (sw_desc->type != mv_chan->current_type)
256 mv_set_mode(mv_chan, sw_desc->type); 256 mv_set_mode(mv_chan, sw_desc->type);
257 257
258 /* set the hardware chain */ 258 /* set the hardware chain */
259 mv_chan_set_next_descriptor(mv_chan, sw_desc->async_tx.phys); 259 mv_chan_set_next_descriptor(mv_chan, sw_desc->async_tx.phys);
260 260
261 mv_chan->pending += sw_desc->slot_cnt; 261 mv_chan->pending += sw_desc->slot_cnt;
262 mv_xor_issue_pending(&mv_chan->dmachan); 262 mv_xor_issue_pending(&mv_chan->dmachan);
263 } 263 }
264 264
265 static dma_cookie_t 265 static dma_cookie_t
266 mv_xor_run_tx_complete_actions(struct mv_xor_desc_slot *desc, 266 mv_xor_run_tx_complete_actions(struct mv_xor_desc_slot *desc,
267 struct mv_xor_chan *mv_chan, dma_cookie_t cookie) 267 struct mv_xor_chan *mv_chan, dma_cookie_t cookie)
268 { 268 {
269 BUG_ON(desc->async_tx.cookie < 0); 269 BUG_ON(desc->async_tx.cookie < 0);
270 270
271 if (desc->async_tx.cookie > 0) { 271 if (desc->async_tx.cookie > 0) {
272 cookie = desc->async_tx.cookie; 272 cookie = desc->async_tx.cookie;
273 273
274 /* call the callback (must not sleep or submit new 274 /* call the callback (must not sleep or submit new
275 * operations to this channel) 275 * operations to this channel)
276 */ 276 */
277 if (desc->async_tx.callback) 277 if (desc->async_tx.callback)
278 desc->async_tx.callback( 278 desc->async_tx.callback(
279 desc->async_tx.callback_param); 279 desc->async_tx.callback_param);
280 280
281 /* unmap dma addresses 281 /* unmap dma addresses
282 * (unmap_single vs unmap_page?) 282 * (unmap_single vs unmap_page?)
283 */ 283 */
284 if (desc->group_head && desc->unmap_len) { 284 if (desc->group_head && desc->unmap_len) {
285 struct mv_xor_desc_slot *unmap = desc->group_head; 285 struct mv_xor_desc_slot *unmap = desc->group_head;
286 struct device *dev = mv_chan_to_devp(mv_chan); 286 struct device *dev = mv_chan_to_devp(mv_chan);
287 u32 len = unmap->unmap_len; 287 u32 len = unmap->unmap_len;
288 enum dma_ctrl_flags flags = desc->async_tx.flags; 288 enum dma_ctrl_flags flags = desc->async_tx.flags;
289 u32 src_cnt; 289 u32 src_cnt;
290 dma_addr_t addr; 290 dma_addr_t addr;
291 dma_addr_t dest; 291 dma_addr_t dest;
292 292
293 src_cnt = unmap->unmap_src_cnt; 293 src_cnt = unmap->unmap_src_cnt;
294 dest = mv_desc_get_dest_addr(unmap); 294 dest = mv_desc_get_dest_addr(unmap);
295 if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) { 295 if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
296 enum dma_data_direction dir; 296 enum dma_data_direction dir;
297 297
298 if (src_cnt > 1) /* is xor ? */ 298 if (src_cnt > 1) /* is xor ? */
299 dir = DMA_BIDIRECTIONAL; 299 dir = DMA_BIDIRECTIONAL;
300 else 300 else
301 dir = DMA_FROM_DEVICE; 301 dir = DMA_FROM_DEVICE;
302 dma_unmap_page(dev, dest, len, dir); 302 dma_unmap_page(dev, dest, len, dir);
303 } 303 }
304 304
305 if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) { 305 if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
306 while (src_cnt--) { 306 while (src_cnt--) {
307 addr = mv_desc_get_src_addr(unmap, 307 addr = mv_desc_get_src_addr(unmap,
308 src_cnt); 308 src_cnt);
309 if (addr == dest) 309 if (addr == dest)
310 continue; 310 continue;
311 dma_unmap_page(dev, addr, len, 311 dma_unmap_page(dev, addr, len,
312 DMA_TO_DEVICE); 312 DMA_TO_DEVICE);
313 } 313 }
314 } 314 }
315 desc->group_head = NULL; 315 desc->group_head = NULL;
316 } 316 }
317 } 317 }
318 318
319 /* run dependent operations */ 319 /* run dependent operations */
320 dma_run_dependencies(&desc->async_tx); 320 dma_run_dependencies(&desc->async_tx);
321 321
322 return cookie; 322 return cookie;
323 } 323 }
324 324
325 static int 325 static int
326 mv_xor_clean_completed_slots(struct mv_xor_chan *mv_chan) 326 mv_xor_clean_completed_slots(struct mv_xor_chan *mv_chan)
327 { 327 {
328 struct mv_xor_desc_slot *iter, *_iter; 328 struct mv_xor_desc_slot *iter, *_iter;
329 329
330 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__); 330 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__);
331 list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots, 331 list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
332 completed_node) { 332 completed_node) {
333 333
334 if (async_tx_test_ack(&iter->async_tx)) { 334 if (async_tx_test_ack(&iter->async_tx)) {
335 list_del(&iter->completed_node); 335 list_del(&iter->completed_node);
336 mv_xor_free_slots(mv_chan, iter); 336 mv_xor_free_slots(mv_chan, iter);
337 } 337 }
338 } 338 }
339 return 0; 339 return 0;
340 } 340 }
341 341
342 static int 342 static int
343 mv_xor_clean_slot(struct mv_xor_desc_slot *desc, 343 mv_xor_clean_slot(struct mv_xor_desc_slot *desc,
344 struct mv_xor_chan *mv_chan) 344 struct mv_xor_chan *mv_chan)
345 { 345 {
346 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: desc %p flags %d\n", 346 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: desc %p flags %d\n",
347 __func__, __LINE__, desc, desc->async_tx.flags); 347 __func__, __LINE__, desc, desc->async_tx.flags);
348 list_del(&desc->chain_node); 348 list_del(&desc->chain_node);
349 /* the client is allowed to attach dependent operations 349 /* the client is allowed to attach dependent operations
350 * until 'ack' is set 350 * until 'ack' is set
351 */ 351 */
352 if (!async_tx_test_ack(&desc->async_tx)) { 352 if (!async_tx_test_ack(&desc->async_tx)) {
353 /* move this slot to the completed_slots */ 353 /* move this slot to the completed_slots */
354 list_add_tail(&desc->completed_node, &mv_chan->completed_slots); 354 list_add_tail(&desc->completed_node, &mv_chan->completed_slots);
355 return 0; 355 return 0;
356 } 356 }
357 357
358 mv_xor_free_slots(mv_chan, desc); 358 mv_xor_free_slots(mv_chan, desc);
359 return 0; 359 return 0;
360 } 360 }
361 361
362 static void __mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan) 362 static void __mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
363 { 363 {
364 struct mv_xor_desc_slot *iter, *_iter; 364 struct mv_xor_desc_slot *iter, *_iter;
365 dma_cookie_t cookie = 0; 365 dma_cookie_t cookie = 0;
366 int busy = mv_chan_is_busy(mv_chan); 366 int busy = mv_chan_is_busy(mv_chan);
367 u32 current_desc = mv_chan_get_current_desc(mv_chan); 367 u32 current_desc = mv_chan_get_current_desc(mv_chan);
368 int seen_current = 0; 368 int seen_current = 0;
369 369
370 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__); 370 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__);
371 dev_dbg(mv_chan_to_devp(mv_chan), "current_desc %x\n", current_desc); 371 dev_dbg(mv_chan_to_devp(mv_chan), "current_desc %x\n", current_desc);
372 mv_xor_clean_completed_slots(mv_chan); 372 mv_xor_clean_completed_slots(mv_chan);
373 373
374 /* free completed slots from the chain starting with 374 /* free completed slots from the chain starting with
375 * the oldest descriptor 375 * the oldest descriptor
376 */ 376 */
377 377
378 list_for_each_entry_safe(iter, _iter, &mv_chan->chain, 378 list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
379 chain_node) { 379 chain_node) {
380 prefetch(_iter); 380 prefetch(_iter);
381 prefetch(&_iter->async_tx); 381 prefetch(&_iter->async_tx);
382 382
383 /* do not advance past the current descriptor loaded into the 383 /* do not advance past the current descriptor loaded into the
384 * hardware channel, subsequent descriptors are either in 384 * hardware channel, subsequent descriptors are either in
385 * process or have not been submitted 385 * process or have not been submitted
386 */ 386 */
387 if (seen_current) 387 if (seen_current)
388 break; 388 break;
389 389
390 /* stop the search if we reach the current descriptor and the 390 /* stop the search if we reach the current descriptor and the
391 * channel is busy 391 * channel is busy
392 */ 392 */
393 if (iter->async_tx.phys == current_desc) { 393 if (iter->async_tx.phys == current_desc) {
394 seen_current = 1; 394 seen_current = 1;
395 if (busy) 395 if (busy)
396 break; 396 break;
397 } 397 }
398 398
399 cookie = mv_xor_run_tx_complete_actions(iter, mv_chan, cookie); 399 cookie = mv_xor_run_tx_complete_actions(iter, mv_chan, cookie);
400 400
401 if (mv_xor_clean_slot(iter, mv_chan)) 401 if (mv_xor_clean_slot(iter, mv_chan))
402 break; 402 break;
403 } 403 }
404 404
405 if ((busy == 0) && !list_empty(&mv_chan->chain)) { 405 if ((busy == 0) && !list_empty(&mv_chan->chain)) {
406 struct mv_xor_desc_slot *chain_head; 406 struct mv_xor_desc_slot *chain_head;
407 chain_head = list_entry(mv_chan->chain.next, 407 chain_head = list_entry(mv_chan->chain.next,
408 struct mv_xor_desc_slot, 408 struct mv_xor_desc_slot,
409 chain_node); 409 chain_node);
410 410
411 mv_xor_start_new_chain(mv_chan, chain_head); 411 mv_xor_start_new_chain(mv_chan, chain_head);
412 } 412 }
413 413
414 if (cookie > 0) 414 if (cookie > 0)
415 mv_chan->dmachan.completed_cookie = cookie; 415 mv_chan->dmachan.completed_cookie = cookie;
416 } 416 }
417 417
418 static void 418 static void
419 mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan) 419 mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
420 { 420 {
421 spin_lock_bh(&mv_chan->lock); 421 spin_lock_bh(&mv_chan->lock);
422 __mv_xor_slot_cleanup(mv_chan); 422 __mv_xor_slot_cleanup(mv_chan);
423 spin_unlock_bh(&mv_chan->lock); 423 spin_unlock_bh(&mv_chan->lock);
424 } 424 }
425 425
426 static void mv_xor_tasklet(unsigned long data) 426 static void mv_xor_tasklet(unsigned long data)
427 { 427 {
428 struct mv_xor_chan *chan = (struct mv_xor_chan *) data; 428 struct mv_xor_chan *chan = (struct mv_xor_chan *) data;
429 mv_xor_slot_cleanup(chan); 429 mv_xor_slot_cleanup(chan);
430 } 430 }
431 431
432 static struct mv_xor_desc_slot * 432 static struct mv_xor_desc_slot *
433 mv_xor_alloc_slots(struct mv_xor_chan *mv_chan, int num_slots, 433 mv_xor_alloc_slots(struct mv_xor_chan *mv_chan, int num_slots,
434 int slots_per_op) 434 int slots_per_op)
435 { 435 {
436 struct mv_xor_desc_slot *iter, *_iter, *alloc_start = NULL; 436 struct mv_xor_desc_slot *iter, *_iter, *alloc_start = NULL;
437 LIST_HEAD(chain); 437 LIST_HEAD(chain);
438 int slots_found, retry = 0; 438 int slots_found, retry = 0;
439 439
440 /* start search from the last allocated descrtiptor 440 /* start search from the last allocated descrtiptor
441 * if a contiguous allocation can not be found start searching 441 * if a contiguous allocation can not be found start searching
442 * from the beginning of the list 442 * from the beginning of the list
443 */ 443 */
444 retry: 444 retry:
445 slots_found = 0; 445 slots_found = 0;
446 if (retry == 0) 446 if (retry == 0)
447 iter = mv_chan->last_used; 447 iter = mv_chan->last_used;
448 else 448 else
449 iter = list_entry(&mv_chan->all_slots, 449 iter = list_entry(&mv_chan->all_slots,
450 struct mv_xor_desc_slot, 450 struct mv_xor_desc_slot,
451 slot_node); 451 slot_node);
452 452
453 list_for_each_entry_safe_continue( 453 list_for_each_entry_safe_continue(
454 iter, _iter, &mv_chan->all_slots, slot_node) { 454 iter, _iter, &mv_chan->all_slots, slot_node) {
455 prefetch(_iter); 455 prefetch(_iter);
456 prefetch(&_iter->async_tx); 456 prefetch(&_iter->async_tx);
457 if (iter->slots_per_op) { 457 if (iter->slots_per_op) {
458 /* give up after finding the first busy slot 458 /* give up after finding the first busy slot
459 * on the second pass through the list 459 * on the second pass through the list
460 */ 460 */
461 if (retry) 461 if (retry)
462 break; 462 break;
463 463
464 slots_found = 0; 464 slots_found = 0;
465 continue; 465 continue;
466 } 466 }
467 467
468 /* start the allocation if the slot is correctly aligned */ 468 /* start the allocation if the slot is correctly aligned */
469 if (!slots_found++) 469 if (!slots_found++)
470 alloc_start = iter; 470 alloc_start = iter;
471 471
472 if (slots_found == num_slots) { 472 if (slots_found == num_slots) {
473 struct mv_xor_desc_slot *alloc_tail = NULL; 473 struct mv_xor_desc_slot *alloc_tail = NULL;
474 struct mv_xor_desc_slot *last_used = NULL; 474 struct mv_xor_desc_slot *last_used = NULL;
475 iter = alloc_start; 475 iter = alloc_start;
476 while (num_slots) { 476 while (num_slots) {
477 int i; 477 int i;
478 478
479 /* pre-ack all but the last descriptor */ 479 /* pre-ack all but the last descriptor */
480 async_tx_ack(&iter->async_tx); 480 async_tx_ack(&iter->async_tx);
481 481
482 list_add_tail(&iter->chain_node, &chain); 482 list_add_tail(&iter->chain_node, &chain);
483 alloc_tail = iter; 483 alloc_tail = iter;
484 iter->async_tx.cookie = 0; 484 iter->async_tx.cookie = 0;
485 iter->slot_cnt = num_slots; 485 iter->slot_cnt = num_slots;
486 iter->xor_check_result = NULL; 486 iter->xor_check_result = NULL;
487 for (i = 0; i < slots_per_op; i++) { 487 for (i = 0; i < slots_per_op; i++) {
488 iter->slots_per_op = slots_per_op - i; 488 iter->slots_per_op = slots_per_op - i;
489 last_used = iter; 489 last_used = iter;
490 iter = list_entry(iter->slot_node.next, 490 iter = list_entry(iter->slot_node.next,
491 struct mv_xor_desc_slot, 491 struct mv_xor_desc_slot,
492 slot_node); 492 slot_node);
493 } 493 }
494 num_slots -= slots_per_op; 494 num_slots -= slots_per_op;
495 } 495 }
496 alloc_tail->group_head = alloc_start; 496 alloc_tail->group_head = alloc_start;
497 alloc_tail->async_tx.cookie = -EBUSY; 497 alloc_tail->async_tx.cookie = -EBUSY;
498 list_splice(&chain, &alloc_tail->tx_list); 498 list_splice(&chain, &alloc_tail->tx_list);
499 mv_chan->last_used = last_used; 499 mv_chan->last_used = last_used;
500 mv_desc_clear_next_desc(alloc_start); 500 mv_desc_clear_next_desc(alloc_start);
501 mv_desc_clear_next_desc(alloc_tail); 501 mv_desc_clear_next_desc(alloc_tail);
502 return alloc_tail; 502 return alloc_tail;
503 } 503 }
504 } 504 }
505 if (!retry++) 505 if (!retry++)
506 goto retry; 506 goto retry;
507 507
508 /* try to free some slots if the allocation fails */ 508 /* try to free some slots if the allocation fails */
509 tasklet_schedule(&mv_chan->irq_tasklet); 509 tasklet_schedule(&mv_chan->irq_tasklet);
510 510
511 return NULL; 511 return NULL;
512 } 512 }
513 513
514 /************************ DMA engine API functions ****************************/ 514 /************************ DMA engine API functions ****************************/
515 static dma_cookie_t 515 static dma_cookie_t
516 mv_xor_tx_submit(struct dma_async_tx_descriptor *tx) 516 mv_xor_tx_submit(struct dma_async_tx_descriptor *tx)
517 { 517 {
518 struct mv_xor_desc_slot *sw_desc = to_mv_xor_slot(tx); 518 struct mv_xor_desc_slot *sw_desc = to_mv_xor_slot(tx);
519 struct mv_xor_chan *mv_chan = to_mv_xor_chan(tx->chan); 519 struct mv_xor_chan *mv_chan = to_mv_xor_chan(tx->chan);
520 struct mv_xor_desc_slot *grp_start, *old_chain_tail; 520 struct mv_xor_desc_slot *grp_start, *old_chain_tail;
521 dma_cookie_t cookie; 521 dma_cookie_t cookie;
522 int new_hw_chain = 1; 522 int new_hw_chain = 1;
523 523
524 dev_dbg(mv_chan_to_devp(mv_chan), 524 dev_dbg(mv_chan_to_devp(mv_chan),
525 "%s sw_desc %p: async_tx %p\n", 525 "%s sw_desc %p: async_tx %p\n",
526 __func__, sw_desc, &sw_desc->async_tx); 526 __func__, sw_desc, &sw_desc->async_tx);
527 527
528 grp_start = sw_desc->group_head; 528 grp_start = sw_desc->group_head;
529 529
530 spin_lock_bh(&mv_chan->lock); 530 spin_lock_bh(&mv_chan->lock);
531 cookie = dma_cookie_assign(tx); 531 cookie = dma_cookie_assign(tx);
532 532
533 if (list_empty(&mv_chan->chain)) 533 if (list_empty(&mv_chan->chain))
534 list_splice_init(&sw_desc->tx_list, &mv_chan->chain); 534 list_splice_init(&sw_desc->tx_list, &mv_chan->chain);
535 else { 535 else {
536 new_hw_chain = 0; 536 new_hw_chain = 0;
537 537
538 old_chain_tail = list_entry(mv_chan->chain.prev, 538 old_chain_tail = list_entry(mv_chan->chain.prev,
539 struct mv_xor_desc_slot, 539 struct mv_xor_desc_slot,
540 chain_node); 540 chain_node);
541 list_splice_init(&grp_start->tx_list, 541 list_splice_init(&grp_start->tx_list,
542 &old_chain_tail->chain_node); 542 &old_chain_tail->chain_node);
543 543
544 if (!mv_can_chain(grp_start)) 544 if (!mv_can_chain(grp_start))
545 goto submit_done; 545 goto submit_done;
546 546
547 dev_dbg(mv_chan_to_devp(mv_chan), "Append to last desc %x\n", 547 dev_dbg(mv_chan_to_devp(mv_chan), "Append to last desc %x\n",
548 old_chain_tail->async_tx.phys); 548 old_chain_tail->async_tx.phys);
549 549
550 /* fix up the hardware chain */ 550 /* fix up the hardware chain */
551 mv_desc_set_next_desc(old_chain_tail, grp_start->async_tx.phys); 551 mv_desc_set_next_desc(old_chain_tail, grp_start->async_tx.phys);
552 552
553 /* if the channel is not busy */ 553 /* if the channel is not busy */
554 if (!mv_chan_is_busy(mv_chan)) { 554 if (!mv_chan_is_busy(mv_chan)) {
555 u32 current_desc = mv_chan_get_current_desc(mv_chan); 555 u32 current_desc = mv_chan_get_current_desc(mv_chan);
556 /* 556 /*
557 * and the curren desc is the end of the chain before 557 * and the curren desc is the end of the chain before
558 * the append, then we need to start the channel 558 * the append, then we need to start the channel
559 */ 559 */
560 if (current_desc == old_chain_tail->async_tx.phys) 560 if (current_desc == old_chain_tail->async_tx.phys)
561 new_hw_chain = 1; 561 new_hw_chain = 1;
562 } 562 }
563 } 563 }
564 564
565 if (new_hw_chain) 565 if (new_hw_chain)
566 mv_xor_start_new_chain(mv_chan, grp_start); 566 mv_xor_start_new_chain(mv_chan, grp_start);
567 567
568 submit_done: 568 submit_done:
569 spin_unlock_bh(&mv_chan->lock); 569 spin_unlock_bh(&mv_chan->lock);
570 570
571 return cookie; 571 return cookie;
572 } 572 }
573 573
574 /* returns the number of allocated descriptors */ 574 /* returns the number of allocated descriptors */
575 static int mv_xor_alloc_chan_resources(struct dma_chan *chan) 575 static int mv_xor_alloc_chan_resources(struct dma_chan *chan)
576 { 576 {
577 char *hw_desc; 577 char *hw_desc;
578 int idx; 578 int idx;
579 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); 579 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
580 struct mv_xor_desc_slot *slot = NULL; 580 struct mv_xor_desc_slot *slot = NULL;
581 int num_descs_in_pool = MV_XOR_POOL_SIZE/MV_XOR_SLOT_SIZE; 581 int num_descs_in_pool = MV_XOR_POOL_SIZE/MV_XOR_SLOT_SIZE;
582 582
583 /* Allocate descriptor slots */ 583 /* Allocate descriptor slots */
584 idx = mv_chan->slots_allocated; 584 idx = mv_chan->slots_allocated;
585 while (idx < num_descs_in_pool) { 585 while (idx < num_descs_in_pool) {
586 slot = kzalloc(sizeof(*slot), GFP_KERNEL); 586 slot = kzalloc(sizeof(*slot), GFP_KERNEL);
587 if (!slot) { 587 if (!slot) {
588 printk(KERN_INFO "MV XOR Channel only initialized" 588 printk(KERN_INFO "MV XOR Channel only initialized"
589 " %d descriptor slots", idx); 589 " %d descriptor slots", idx);
590 break; 590 break;
591 } 591 }
592 hw_desc = (char *) mv_chan->dma_desc_pool_virt; 592 hw_desc = (char *) mv_chan->dma_desc_pool_virt;
593 slot->hw_desc = (void *) &hw_desc[idx * MV_XOR_SLOT_SIZE]; 593 slot->hw_desc = (void *) &hw_desc[idx * MV_XOR_SLOT_SIZE];
594 594
595 dma_async_tx_descriptor_init(&slot->async_tx, chan); 595 dma_async_tx_descriptor_init(&slot->async_tx, chan);
596 slot->async_tx.tx_submit = mv_xor_tx_submit; 596 slot->async_tx.tx_submit = mv_xor_tx_submit;
597 INIT_LIST_HEAD(&slot->chain_node); 597 INIT_LIST_HEAD(&slot->chain_node);
598 INIT_LIST_HEAD(&slot->slot_node); 598 INIT_LIST_HEAD(&slot->slot_node);
599 INIT_LIST_HEAD(&slot->tx_list); 599 INIT_LIST_HEAD(&slot->tx_list);
600 hw_desc = (char *) mv_chan->dma_desc_pool; 600 hw_desc = (char *) mv_chan->dma_desc_pool;
601 slot->async_tx.phys = 601 slot->async_tx.phys =
602 (dma_addr_t) &hw_desc[idx * MV_XOR_SLOT_SIZE]; 602 (dma_addr_t) &hw_desc[idx * MV_XOR_SLOT_SIZE];
603 slot->idx = idx++; 603 slot->idx = idx++;
604 604
605 spin_lock_bh(&mv_chan->lock); 605 spin_lock_bh(&mv_chan->lock);
606 mv_chan->slots_allocated = idx; 606 mv_chan->slots_allocated = idx;
607 list_add_tail(&slot->slot_node, &mv_chan->all_slots); 607 list_add_tail(&slot->slot_node, &mv_chan->all_slots);
608 spin_unlock_bh(&mv_chan->lock); 608 spin_unlock_bh(&mv_chan->lock);
609 } 609 }
610 610
611 if (mv_chan->slots_allocated && !mv_chan->last_used) 611 if (mv_chan->slots_allocated && !mv_chan->last_used)
612 mv_chan->last_used = list_entry(mv_chan->all_slots.next, 612 mv_chan->last_used = list_entry(mv_chan->all_slots.next,
613 struct mv_xor_desc_slot, 613 struct mv_xor_desc_slot,
614 slot_node); 614 slot_node);
615 615
616 dev_dbg(mv_chan_to_devp(mv_chan), 616 dev_dbg(mv_chan_to_devp(mv_chan),
617 "allocated %d descriptor slots last_used: %p\n", 617 "allocated %d descriptor slots last_used: %p\n",
618 mv_chan->slots_allocated, mv_chan->last_used); 618 mv_chan->slots_allocated, mv_chan->last_used);
619 619
620 return mv_chan->slots_allocated ? : -ENOMEM; 620 return mv_chan->slots_allocated ? : -ENOMEM;
621 } 621 }
622 622
623 static struct dma_async_tx_descriptor * 623 static struct dma_async_tx_descriptor *
624 mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, 624 mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
625 size_t len, unsigned long flags) 625 size_t len, unsigned long flags)
626 { 626 {
627 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); 627 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
628 struct mv_xor_desc_slot *sw_desc, *grp_start; 628 struct mv_xor_desc_slot *sw_desc, *grp_start;
629 int slot_cnt; 629 int slot_cnt;
630 630
631 dev_dbg(mv_chan_to_devp(mv_chan), 631 dev_dbg(mv_chan_to_devp(mv_chan),
632 "%s dest: %x src %x len: %u flags: %ld\n", 632 "%s dest: %x src %x len: %u flags: %ld\n",
633 __func__, dest, src, len, flags); 633 __func__, dest, src, len, flags);
634 if (unlikely(len < MV_XOR_MIN_BYTE_COUNT)) 634 if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
635 return NULL; 635 return NULL;
636 636
637 BUG_ON(len > MV_XOR_MAX_BYTE_COUNT); 637 BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
638 638
639 spin_lock_bh(&mv_chan->lock); 639 spin_lock_bh(&mv_chan->lock);
640 slot_cnt = mv_chan_memcpy_slot_count(len); 640 slot_cnt = mv_chan_memcpy_slot_count(len);
641 sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1); 641 sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1);
642 if (sw_desc) { 642 if (sw_desc) {
643 sw_desc->type = DMA_MEMCPY; 643 sw_desc->type = DMA_MEMCPY;
644 sw_desc->async_tx.flags = flags; 644 sw_desc->async_tx.flags = flags;
645 grp_start = sw_desc->group_head; 645 grp_start = sw_desc->group_head;
646 mv_desc_init(grp_start, flags); 646 mv_desc_init(grp_start, flags);
647 mv_desc_set_byte_count(grp_start, len); 647 mv_desc_set_byte_count(grp_start, len);
648 mv_desc_set_dest_addr(sw_desc->group_head, dest); 648 mv_desc_set_dest_addr(sw_desc->group_head, dest);
649 mv_desc_set_src_addr(grp_start, 0, src); 649 mv_desc_set_src_addr(grp_start, 0, src);
650 sw_desc->unmap_src_cnt = 1; 650 sw_desc->unmap_src_cnt = 1;
651 sw_desc->unmap_len = len; 651 sw_desc->unmap_len = len;
652 } 652 }
653 spin_unlock_bh(&mv_chan->lock); 653 spin_unlock_bh(&mv_chan->lock);
654 654
655 dev_dbg(mv_chan_to_devp(mv_chan), 655 dev_dbg(mv_chan_to_devp(mv_chan),
656 "%s sw_desc %p async_tx %p\n", 656 "%s sw_desc %p async_tx %p\n",
657 __func__, sw_desc, sw_desc ? &sw_desc->async_tx : NULL); 657 __func__, sw_desc, sw_desc ? &sw_desc->async_tx : NULL);
658 658
659 return sw_desc ? &sw_desc->async_tx : NULL; 659 return sw_desc ? &sw_desc->async_tx : NULL;
660 } 660 }
661 661
662 static struct dma_async_tx_descriptor * 662 static struct dma_async_tx_descriptor *
663 mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, 663 mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
664 unsigned int src_cnt, size_t len, unsigned long flags) 664 unsigned int src_cnt, size_t len, unsigned long flags)
665 { 665 {
666 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); 666 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
667 struct mv_xor_desc_slot *sw_desc, *grp_start; 667 struct mv_xor_desc_slot *sw_desc, *grp_start;
668 int slot_cnt; 668 int slot_cnt;
669 669
670 if (unlikely(len < MV_XOR_MIN_BYTE_COUNT)) 670 if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
671 return NULL; 671 return NULL;
672 672
673 BUG_ON(len > MV_XOR_MAX_BYTE_COUNT); 673 BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
674 674
675 dev_dbg(mv_chan_to_devp(mv_chan), 675 dev_dbg(mv_chan_to_devp(mv_chan),
676 "%s src_cnt: %d len: dest %x %u flags: %ld\n", 676 "%s src_cnt: %d len: dest %x %u flags: %ld\n",
677 __func__, src_cnt, len, dest, flags); 677 __func__, src_cnt, len, dest, flags);
678 678
679 spin_lock_bh(&mv_chan->lock); 679 spin_lock_bh(&mv_chan->lock);
680 slot_cnt = mv_chan_xor_slot_count(len, src_cnt); 680 slot_cnt = mv_chan_xor_slot_count(len, src_cnt);
681 sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1); 681 sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1);
682 if (sw_desc) { 682 if (sw_desc) {
683 sw_desc->type = DMA_XOR; 683 sw_desc->type = DMA_XOR;
684 sw_desc->async_tx.flags = flags; 684 sw_desc->async_tx.flags = flags;
685 grp_start = sw_desc->group_head; 685 grp_start = sw_desc->group_head;
686 mv_desc_init(grp_start, flags); 686 mv_desc_init(grp_start, flags);
687 /* the byte count field is the same as in memcpy desc*/ 687 /* the byte count field is the same as in memcpy desc*/
688 mv_desc_set_byte_count(grp_start, len); 688 mv_desc_set_byte_count(grp_start, len);
689 mv_desc_set_dest_addr(sw_desc->group_head, dest); 689 mv_desc_set_dest_addr(sw_desc->group_head, dest);
690 sw_desc->unmap_src_cnt = src_cnt; 690 sw_desc->unmap_src_cnt = src_cnt;
691 sw_desc->unmap_len = len; 691 sw_desc->unmap_len = len;
692 while (src_cnt--) 692 while (src_cnt--)
693 mv_desc_set_src_addr(grp_start, src_cnt, src[src_cnt]); 693 mv_desc_set_src_addr(grp_start, src_cnt, src[src_cnt]);
694 } 694 }
695 spin_unlock_bh(&mv_chan->lock); 695 spin_unlock_bh(&mv_chan->lock);
696 dev_dbg(mv_chan_to_devp(mv_chan), 696 dev_dbg(mv_chan_to_devp(mv_chan),
697 "%s sw_desc %p async_tx %p \n", 697 "%s sw_desc %p async_tx %p \n",
698 __func__, sw_desc, &sw_desc->async_tx); 698 __func__, sw_desc, &sw_desc->async_tx);
699 return sw_desc ? &sw_desc->async_tx : NULL; 699 return sw_desc ? &sw_desc->async_tx : NULL;
700 } 700 }
701 701
702 static void mv_xor_free_chan_resources(struct dma_chan *chan) 702 static void mv_xor_free_chan_resources(struct dma_chan *chan)
703 { 703 {
704 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); 704 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
705 struct mv_xor_desc_slot *iter, *_iter; 705 struct mv_xor_desc_slot *iter, *_iter;
706 int in_use_descs = 0; 706 int in_use_descs = 0;
707 707
708 mv_xor_slot_cleanup(mv_chan); 708 mv_xor_slot_cleanup(mv_chan);
709 709
710 spin_lock_bh(&mv_chan->lock); 710 spin_lock_bh(&mv_chan->lock);
711 list_for_each_entry_safe(iter, _iter, &mv_chan->chain, 711 list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
712 chain_node) { 712 chain_node) {
713 in_use_descs++; 713 in_use_descs++;
714 list_del(&iter->chain_node); 714 list_del(&iter->chain_node);
715 } 715 }
716 list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots, 716 list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
717 completed_node) { 717 completed_node) {
718 in_use_descs++; 718 in_use_descs++;
719 list_del(&iter->completed_node); 719 list_del(&iter->completed_node);
720 } 720 }
721 list_for_each_entry_safe_reverse( 721 list_for_each_entry_safe_reverse(
722 iter, _iter, &mv_chan->all_slots, slot_node) { 722 iter, _iter, &mv_chan->all_slots, slot_node) {
723 list_del(&iter->slot_node); 723 list_del(&iter->slot_node);
724 kfree(iter); 724 kfree(iter);
725 mv_chan->slots_allocated--; 725 mv_chan->slots_allocated--;
726 } 726 }
727 mv_chan->last_used = NULL; 727 mv_chan->last_used = NULL;
728 728
729 dev_dbg(mv_chan_to_devp(mv_chan), "%s slots_allocated %d\n", 729 dev_dbg(mv_chan_to_devp(mv_chan), "%s slots_allocated %d\n",
730 __func__, mv_chan->slots_allocated); 730 __func__, mv_chan->slots_allocated);
731 spin_unlock_bh(&mv_chan->lock); 731 spin_unlock_bh(&mv_chan->lock);
732 732
733 if (in_use_descs) 733 if (in_use_descs)
734 dev_err(mv_chan_to_devp(mv_chan), 734 dev_err(mv_chan_to_devp(mv_chan),
735 "freeing %d in use descriptors!\n", in_use_descs); 735 "freeing %d in use descriptors!\n", in_use_descs);
736 } 736 }
737 737
738 /** 738 /**
739 * mv_xor_status - poll the status of an XOR transaction 739 * mv_xor_status - poll the status of an XOR transaction
740 * @chan: XOR channel handle 740 * @chan: XOR channel handle
741 * @cookie: XOR transaction identifier 741 * @cookie: XOR transaction identifier
742 * @txstate: XOR transactions state holder (or NULL) 742 * @txstate: XOR transactions state holder (or NULL)
743 */ 743 */
744 static enum dma_status mv_xor_status(struct dma_chan *chan, 744 static enum dma_status mv_xor_status(struct dma_chan *chan,
745 dma_cookie_t cookie, 745 dma_cookie_t cookie,
746 struct dma_tx_state *txstate) 746 struct dma_tx_state *txstate)
747 { 747 {
748 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); 748 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
749 enum dma_status ret; 749 enum dma_status ret;
750 750
751 ret = dma_cookie_status(chan, cookie, txstate); 751 ret = dma_cookie_status(chan, cookie, txstate);
752 if (ret == DMA_SUCCESS) { 752 if (ret == DMA_COMPLETE) {
753 mv_xor_clean_completed_slots(mv_chan); 753 mv_xor_clean_completed_slots(mv_chan);
754 return ret; 754 return ret;
755 } 755 }
756 mv_xor_slot_cleanup(mv_chan); 756 mv_xor_slot_cleanup(mv_chan);
757 757
758 return dma_cookie_status(chan, cookie, txstate); 758 return dma_cookie_status(chan, cookie, txstate);
759 } 759 }
760 760
761 static void mv_dump_xor_regs(struct mv_xor_chan *chan) 761 static void mv_dump_xor_regs(struct mv_xor_chan *chan)
762 { 762 {
763 u32 val; 763 u32 val;
764 764
765 val = readl_relaxed(XOR_CONFIG(chan)); 765 val = readl_relaxed(XOR_CONFIG(chan));
766 dev_err(mv_chan_to_devp(chan), "config 0x%08x\n", val); 766 dev_err(mv_chan_to_devp(chan), "config 0x%08x\n", val);
767 767
768 val = readl_relaxed(XOR_ACTIVATION(chan)); 768 val = readl_relaxed(XOR_ACTIVATION(chan));
769 dev_err(mv_chan_to_devp(chan), "activation 0x%08x\n", val); 769 dev_err(mv_chan_to_devp(chan), "activation 0x%08x\n", val);
770 770
771 val = readl_relaxed(XOR_INTR_CAUSE(chan)); 771 val = readl_relaxed(XOR_INTR_CAUSE(chan));
772 dev_err(mv_chan_to_devp(chan), "intr cause 0x%08x\n", val); 772 dev_err(mv_chan_to_devp(chan), "intr cause 0x%08x\n", val);
773 773
774 val = readl_relaxed(XOR_INTR_MASK(chan)); 774 val = readl_relaxed(XOR_INTR_MASK(chan));
775 dev_err(mv_chan_to_devp(chan), "intr mask 0x%08x\n", val); 775 dev_err(mv_chan_to_devp(chan), "intr mask 0x%08x\n", val);
776 776
777 val = readl_relaxed(XOR_ERROR_CAUSE(chan)); 777 val = readl_relaxed(XOR_ERROR_CAUSE(chan));
778 dev_err(mv_chan_to_devp(chan), "error cause 0x%08x\n", val); 778 dev_err(mv_chan_to_devp(chan), "error cause 0x%08x\n", val);
779 779
780 val = readl_relaxed(XOR_ERROR_ADDR(chan)); 780 val = readl_relaxed(XOR_ERROR_ADDR(chan));
781 dev_err(mv_chan_to_devp(chan), "error addr 0x%08x\n", val); 781 dev_err(mv_chan_to_devp(chan), "error addr 0x%08x\n", val);
782 } 782 }
783 783
784 static void mv_xor_err_interrupt_handler(struct mv_xor_chan *chan, 784 static void mv_xor_err_interrupt_handler(struct mv_xor_chan *chan,
785 u32 intr_cause) 785 u32 intr_cause)
786 { 786 {
787 if (intr_cause & (1 << 4)) { 787 if (intr_cause & (1 << 4)) {
788 dev_dbg(mv_chan_to_devp(chan), 788 dev_dbg(mv_chan_to_devp(chan),
789 "ignore this error\n"); 789 "ignore this error\n");
790 return; 790 return;
791 } 791 }
792 792
793 dev_err(mv_chan_to_devp(chan), 793 dev_err(mv_chan_to_devp(chan),
794 "error on chan %d. intr cause 0x%08x\n", 794 "error on chan %d. intr cause 0x%08x\n",
795 chan->idx, intr_cause); 795 chan->idx, intr_cause);
796 796
797 mv_dump_xor_regs(chan); 797 mv_dump_xor_regs(chan);
798 BUG(); 798 BUG();
799 } 799 }
800 800
801 static irqreturn_t mv_xor_interrupt_handler(int irq, void *data) 801 static irqreturn_t mv_xor_interrupt_handler(int irq, void *data)
802 { 802 {
803 struct mv_xor_chan *chan = data; 803 struct mv_xor_chan *chan = data;
804 u32 intr_cause = mv_chan_get_intr_cause(chan); 804 u32 intr_cause = mv_chan_get_intr_cause(chan);
805 805
806 dev_dbg(mv_chan_to_devp(chan), "intr cause %x\n", intr_cause); 806 dev_dbg(mv_chan_to_devp(chan), "intr cause %x\n", intr_cause);
807 807
808 if (mv_is_err_intr(intr_cause)) 808 if (mv_is_err_intr(intr_cause))
809 mv_xor_err_interrupt_handler(chan, intr_cause); 809 mv_xor_err_interrupt_handler(chan, intr_cause);
810 810
811 tasklet_schedule(&chan->irq_tasklet); 811 tasklet_schedule(&chan->irq_tasklet);
812 812
813 mv_xor_device_clear_eoc_cause(chan); 813 mv_xor_device_clear_eoc_cause(chan);
814 814
815 return IRQ_HANDLED; 815 return IRQ_HANDLED;
816 } 816 }
817 817
818 static void mv_xor_issue_pending(struct dma_chan *chan) 818 static void mv_xor_issue_pending(struct dma_chan *chan)
819 { 819 {
820 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); 820 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
821 821
822 if (mv_chan->pending >= MV_XOR_THRESHOLD) { 822 if (mv_chan->pending >= MV_XOR_THRESHOLD) {
823 mv_chan->pending = 0; 823 mv_chan->pending = 0;
824 mv_chan_activate(mv_chan); 824 mv_chan_activate(mv_chan);
825 } 825 }
826 } 826 }
827 827
828 /* 828 /*
829 * Perform a transaction to verify the HW works. 829 * Perform a transaction to verify the HW works.
830 */ 830 */
831 #define MV_XOR_TEST_SIZE 2000 831 #define MV_XOR_TEST_SIZE 2000
832 832
833 static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan) 833 static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan)
834 { 834 {
835 int i; 835 int i;
836 void *src, *dest; 836 void *src, *dest;
837 dma_addr_t src_dma, dest_dma; 837 dma_addr_t src_dma, dest_dma;
838 struct dma_chan *dma_chan; 838 struct dma_chan *dma_chan;
839 dma_cookie_t cookie; 839 dma_cookie_t cookie;
840 struct dma_async_tx_descriptor *tx; 840 struct dma_async_tx_descriptor *tx;
841 int err = 0; 841 int err = 0;
842 842
843 src = kmalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL); 843 src = kmalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL);
844 if (!src) 844 if (!src)
845 return -ENOMEM; 845 return -ENOMEM;
846 846
847 dest = kzalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL); 847 dest = kzalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL);
848 if (!dest) { 848 if (!dest) {
849 kfree(src); 849 kfree(src);
850 return -ENOMEM; 850 return -ENOMEM;
851 } 851 }
852 852
853 /* Fill in src buffer */ 853 /* Fill in src buffer */
854 for (i = 0; i < MV_XOR_TEST_SIZE; i++) 854 for (i = 0; i < MV_XOR_TEST_SIZE; i++)
855 ((u8 *) src)[i] = (u8)i; 855 ((u8 *) src)[i] = (u8)i;
856 856
857 dma_chan = &mv_chan->dmachan; 857 dma_chan = &mv_chan->dmachan;
858 if (mv_xor_alloc_chan_resources(dma_chan) < 1) { 858 if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
859 err = -ENODEV; 859 err = -ENODEV;
860 goto out; 860 goto out;
861 } 861 }
862 862
863 dest_dma = dma_map_single(dma_chan->device->dev, dest, 863 dest_dma = dma_map_single(dma_chan->device->dev, dest,
864 MV_XOR_TEST_SIZE, DMA_FROM_DEVICE); 864 MV_XOR_TEST_SIZE, DMA_FROM_DEVICE);
865 865
866 src_dma = dma_map_single(dma_chan->device->dev, src, 866 src_dma = dma_map_single(dma_chan->device->dev, src,
867 MV_XOR_TEST_SIZE, DMA_TO_DEVICE); 867 MV_XOR_TEST_SIZE, DMA_TO_DEVICE);
868 868
869 tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma, 869 tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma,
870 MV_XOR_TEST_SIZE, 0); 870 MV_XOR_TEST_SIZE, 0);
871 cookie = mv_xor_tx_submit(tx); 871 cookie = mv_xor_tx_submit(tx);
872 mv_xor_issue_pending(dma_chan); 872 mv_xor_issue_pending(dma_chan);
873 async_tx_ack(tx); 873 async_tx_ack(tx);
874 msleep(1); 874 msleep(1);
875 875
876 if (mv_xor_status(dma_chan, cookie, NULL) != 876 if (mv_xor_status(dma_chan, cookie, NULL) !=
877 DMA_SUCCESS) { 877 DMA_COMPLETE) {
878 dev_err(dma_chan->device->dev, 878 dev_err(dma_chan->device->dev,
879 "Self-test copy timed out, disabling\n"); 879 "Self-test copy timed out, disabling\n");
880 err = -ENODEV; 880 err = -ENODEV;
881 goto free_resources; 881 goto free_resources;
882 } 882 }
883 883
884 dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma, 884 dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma,
885 MV_XOR_TEST_SIZE, DMA_FROM_DEVICE); 885 MV_XOR_TEST_SIZE, DMA_FROM_DEVICE);
886 if (memcmp(src, dest, MV_XOR_TEST_SIZE)) { 886 if (memcmp(src, dest, MV_XOR_TEST_SIZE)) {
887 dev_err(dma_chan->device->dev, 887 dev_err(dma_chan->device->dev,
888 "Self-test copy failed compare, disabling\n"); 888 "Self-test copy failed compare, disabling\n");
889 err = -ENODEV; 889 err = -ENODEV;
890 goto free_resources; 890 goto free_resources;
891 } 891 }
892 892
893 free_resources: 893 free_resources:
894 mv_xor_free_chan_resources(dma_chan); 894 mv_xor_free_chan_resources(dma_chan);
895 out: 895 out:
896 kfree(src); 896 kfree(src);
897 kfree(dest); 897 kfree(dest);
898 return err; 898 return err;
899 } 899 }
900 900
901 #define MV_XOR_NUM_SRC_TEST 4 /* must be <= 15 */ 901 #define MV_XOR_NUM_SRC_TEST 4 /* must be <= 15 */
902 static int 902 static int
903 mv_xor_xor_self_test(struct mv_xor_chan *mv_chan) 903 mv_xor_xor_self_test(struct mv_xor_chan *mv_chan)
904 { 904 {
905 int i, src_idx; 905 int i, src_idx;
906 struct page *dest; 906 struct page *dest;
907 struct page *xor_srcs[MV_XOR_NUM_SRC_TEST]; 907 struct page *xor_srcs[MV_XOR_NUM_SRC_TEST];
908 dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST]; 908 dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST];
909 dma_addr_t dest_dma; 909 dma_addr_t dest_dma;
910 struct dma_async_tx_descriptor *tx; 910 struct dma_async_tx_descriptor *tx;
911 struct dma_chan *dma_chan; 911 struct dma_chan *dma_chan;
912 dma_cookie_t cookie; 912 dma_cookie_t cookie;
913 u8 cmp_byte = 0; 913 u8 cmp_byte = 0;
914 u32 cmp_word; 914 u32 cmp_word;
915 int err = 0; 915 int err = 0;
916 916
917 for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) { 917 for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) {
918 xor_srcs[src_idx] = alloc_page(GFP_KERNEL); 918 xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
919 if (!xor_srcs[src_idx]) { 919 if (!xor_srcs[src_idx]) {
920 while (src_idx--) 920 while (src_idx--)
921 __free_page(xor_srcs[src_idx]); 921 __free_page(xor_srcs[src_idx]);
922 return -ENOMEM; 922 return -ENOMEM;
923 } 923 }
924 } 924 }
925 925
926 dest = alloc_page(GFP_KERNEL); 926 dest = alloc_page(GFP_KERNEL);
927 if (!dest) { 927 if (!dest) {
928 while (src_idx--) 928 while (src_idx--)
929 __free_page(xor_srcs[src_idx]); 929 __free_page(xor_srcs[src_idx]);
930 return -ENOMEM; 930 return -ENOMEM;
931 } 931 }
932 932
933 /* Fill in src buffers */ 933 /* Fill in src buffers */
934 for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) { 934 for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) {
935 u8 *ptr = page_address(xor_srcs[src_idx]); 935 u8 *ptr = page_address(xor_srcs[src_idx]);
936 for (i = 0; i < PAGE_SIZE; i++) 936 for (i = 0; i < PAGE_SIZE; i++)
937 ptr[i] = (1 << src_idx); 937 ptr[i] = (1 << src_idx);
938 } 938 }
939 939
940 for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) 940 for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++)
941 cmp_byte ^= (u8) (1 << src_idx); 941 cmp_byte ^= (u8) (1 << src_idx);
942 942
943 cmp_word = (cmp_byte << 24) | (cmp_byte << 16) | 943 cmp_word = (cmp_byte << 24) | (cmp_byte << 16) |
944 (cmp_byte << 8) | cmp_byte; 944 (cmp_byte << 8) | cmp_byte;
945 945
946 memset(page_address(dest), 0, PAGE_SIZE); 946 memset(page_address(dest), 0, PAGE_SIZE);
947 947
948 dma_chan = &mv_chan->dmachan; 948 dma_chan = &mv_chan->dmachan;
949 if (mv_xor_alloc_chan_resources(dma_chan) < 1) { 949 if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
950 err = -ENODEV; 950 err = -ENODEV;
951 goto out; 951 goto out;
952 } 952 }
953 953
954 /* test xor */ 954 /* test xor */
955 dest_dma = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE, 955 dest_dma = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE,
956 DMA_FROM_DEVICE); 956 DMA_FROM_DEVICE);
957 957
958 for (i = 0; i < MV_XOR_NUM_SRC_TEST; i++) 958 for (i = 0; i < MV_XOR_NUM_SRC_TEST; i++)
959 dma_srcs[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i], 959 dma_srcs[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i],
960 0, PAGE_SIZE, DMA_TO_DEVICE); 960 0, PAGE_SIZE, DMA_TO_DEVICE);
961 961
962 tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs, 962 tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
963 MV_XOR_NUM_SRC_TEST, PAGE_SIZE, 0); 963 MV_XOR_NUM_SRC_TEST, PAGE_SIZE, 0);
964 964
965 cookie = mv_xor_tx_submit(tx); 965 cookie = mv_xor_tx_submit(tx);
966 mv_xor_issue_pending(dma_chan); 966 mv_xor_issue_pending(dma_chan);
967 async_tx_ack(tx); 967 async_tx_ack(tx);
968 msleep(8); 968 msleep(8);
969 969
970 if (mv_xor_status(dma_chan, cookie, NULL) != 970 if (mv_xor_status(dma_chan, cookie, NULL) !=
971 DMA_SUCCESS) { 971 DMA_COMPLETE) {
972 dev_err(dma_chan->device->dev, 972 dev_err(dma_chan->device->dev,
973 "Self-test xor timed out, disabling\n"); 973 "Self-test xor timed out, disabling\n");
974 err = -ENODEV; 974 err = -ENODEV;
975 goto free_resources; 975 goto free_resources;
976 } 976 }
977 977
978 dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma, 978 dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma,
979 PAGE_SIZE, DMA_FROM_DEVICE); 979 PAGE_SIZE, DMA_FROM_DEVICE);
980 for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) { 980 for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) {
981 u32 *ptr = page_address(dest); 981 u32 *ptr = page_address(dest);
982 if (ptr[i] != cmp_word) { 982 if (ptr[i] != cmp_word) {
983 dev_err(dma_chan->device->dev, 983 dev_err(dma_chan->device->dev,
984 "Self-test xor failed compare, disabling. index %d, data %x, expected %x\n", 984 "Self-test xor failed compare, disabling. index %d, data %x, expected %x\n",
985 i, ptr[i], cmp_word); 985 i, ptr[i], cmp_word);
986 err = -ENODEV; 986 err = -ENODEV;
987 goto free_resources; 987 goto free_resources;
988 } 988 }
989 } 989 }
990 990
991 free_resources: 991 free_resources:
992 mv_xor_free_chan_resources(dma_chan); 992 mv_xor_free_chan_resources(dma_chan);
993 out: 993 out:
994 src_idx = MV_XOR_NUM_SRC_TEST; 994 src_idx = MV_XOR_NUM_SRC_TEST;
995 while (src_idx--) 995 while (src_idx--)
996 __free_page(xor_srcs[src_idx]); 996 __free_page(xor_srcs[src_idx]);
997 __free_page(dest); 997 __free_page(dest);
998 return err; 998 return err;
999 } 999 }
1000 1000
1001 /* This driver does not implement any of the optional DMA operations. */ 1001 /* This driver does not implement any of the optional DMA operations. */
1002 static int 1002 static int
1003 mv_xor_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 1003 mv_xor_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
1004 unsigned long arg) 1004 unsigned long arg)
1005 { 1005 {
1006 return -ENOSYS; 1006 return -ENOSYS;
1007 } 1007 }
1008 1008
1009 static int mv_xor_channel_remove(struct mv_xor_chan *mv_chan) 1009 static int mv_xor_channel_remove(struct mv_xor_chan *mv_chan)
1010 { 1010 {
1011 struct dma_chan *chan, *_chan; 1011 struct dma_chan *chan, *_chan;
1012 struct device *dev = mv_chan->dmadev.dev; 1012 struct device *dev = mv_chan->dmadev.dev;
1013 1013
1014 dma_async_device_unregister(&mv_chan->dmadev); 1014 dma_async_device_unregister(&mv_chan->dmadev);
1015 1015
1016 dma_free_coherent(dev, MV_XOR_POOL_SIZE, 1016 dma_free_coherent(dev, MV_XOR_POOL_SIZE,
1017 mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool); 1017 mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool);
1018 1018
1019 list_for_each_entry_safe(chan, _chan, &mv_chan->dmadev.channels, 1019 list_for_each_entry_safe(chan, _chan, &mv_chan->dmadev.channels,
1020 device_node) { 1020 device_node) {
1021 list_del(&chan->device_node); 1021 list_del(&chan->device_node);
1022 } 1022 }
1023 1023
1024 free_irq(mv_chan->irq, mv_chan); 1024 free_irq(mv_chan->irq, mv_chan);
1025 1025
1026 return 0; 1026 return 0;
1027 } 1027 }
1028 1028
1029 static struct mv_xor_chan * 1029 static struct mv_xor_chan *
1030 mv_xor_channel_add(struct mv_xor_device *xordev, 1030 mv_xor_channel_add(struct mv_xor_device *xordev,
1031 struct platform_device *pdev, 1031 struct platform_device *pdev,
1032 int idx, dma_cap_mask_t cap_mask, int irq) 1032 int idx, dma_cap_mask_t cap_mask, int irq)
1033 { 1033 {
1034 int ret = 0; 1034 int ret = 0;
1035 struct mv_xor_chan *mv_chan; 1035 struct mv_xor_chan *mv_chan;
1036 struct dma_device *dma_dev; 1036 struct dma_device *dma_dev;
1037 1037
1038 mv_chan = devm_kzalloc(&pdev->dev, sizeof(*mv_chan), GFP_KERNEL); 1038 mv_chan = devm_kzalloc(&pdev->dev, sizeof(*mv_chan), GFP_KERNEL);
1039 if (!mv_chan) 1039 if (!mv_chan)
1040 return ERR_PTR(-ENOMEM); 1040 return ERR_PTR(-ENOMEM);
1041 1041
1042 mv_chan->idx = idx; 1042 mv_chan->idx = idx;
1043 mv_chan->irq = irq; 1043 mv_chan->irq = irq;
1044 1044
1045 dma_dev = &mv_chan->dmadev; 1045 dma_dev = &mv_chan->dmadev;
1046 1046
1047 /* allocate coherent memory for hardware descriptors 1047 /* allocate coherent memory for hardware descriptors
1048 * note: writecombine gives slightly better performance, but 1048 * note: writecombine gives slightly better performance, but
1049 * requires that we explicitly flush the writes 1049 * requires that we explicitly flush the writes
1050 */ 1050 */
1051 mv_chan->dma_desc_pool_virt = 1051 mv_chan->dma_desc_pool_virt =
1052 dma_alloc_writecombine(&pdev->dev, MV_XOR_POOL_SIZE, 1052 dma_alloc_writecombine(&pdev->dev, MV_XOR_POOL_SIZE,
1053 &mv_chan->dma_desc_pool, GFP_KERNEL); 1053 &mv_chan->dma_desc_pool, GFP_KERNEL);
1054 if (!mv_chan->dma_desc_pool_virt) 1054 if (!mv_chan->dma_desc_pool_virt)
1055 return ERR_PTR(-ENOMEM); 1055 return ERR_PTR(-ENOMEM);
1056 1056
1057 /* discover transaction capabilites from the platform data */ 1057 /* discover transaction capabilites from the platform data */
1058 dma_dev->cap_mask = cap_mask; 1058 dma_dev->cap_mask = cap_mask;
1059 1059
1060 INIT_LIST_HEAD(&dma_dev->channels); 1060 INIT_LIST_HEAD(&dma_dev->channels);
1061 1061
1062 /* set base routines */ 1062 /* set base routines */
1063 dma_dev->device_alloc_chan_resources = mv_xor_alloc_chan_resources; 1063 dma_dev->device_alloc_chan_resources = mv_xor_alloc_chan_resources;
1064 dma_dev->device_free_chan_resources = mv_xor_free_chan_resources; 1064 dma_dev->device_free_chan_resources = mv_xor_free_chan_resources;
1065 dma_dev->device_tx_status = mv_xor_status; 1065 dma_dev->device_tx_status = mv_xor_status;
1066 dma_dev->device_issue_pending = mv_xor_issue_pending; 1066 dma_dev->device_issue_pending = mv_xor_issue_pending;
1067 dma_dev->device_control = mv_xor_control; 1067 dma_dev->device_control = mv_xor_control;
1068 dma_dev->dev = &pdev->dev; 1068 dma_dev->dev = &pdev->dev;
1069 1069
1070 /* set prep routines based on capability */ 1070 /* set prep routines based on capability */
1071 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) 1071 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask))
1072 dma_dev->device_prep_dma_memcpy = mv_xor_prep_dma_memcpy; 1072 dma_dev->device_prep_dma_memcpy = mv_xor_prep_dma_memcpy;
1073 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { 1073 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
1074 dma_dev->max_xor = 8; 1074 dma_dev->max_xor = 8;
1075 dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor; 1075 dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor;
1076 } 1076 }
1077 1077
1078 mv_chan->mmr_base = xordev->xor_base; 1078 mv_chan->mmr_base = xordev->xor_base;
1079 if (!mv_chan->mmr_base) { 1079 if (!mv_chan->mmr_base) {
1080 ret = -ENOMEM; 1080 ret = -ENOMEM;
1081 goto err_free_dma; 1081 goto err_free_dma;
1082 } 1082 }
1083 tasklet_init(&mv_chan->irq_tasklet, mv_xor_tasklet, (unsigned long) 1083 tasklet_init(&mv_chan->irq_tasklet, mv_xor_tasklet, (unsigned long)
1084 mv_chan); 1084 mv_chan);
1085 1085
1086 /* clear errors before enabling interrupts */ 1086 /* clear errors before enabling interrupts */
1087 mv_xor_device_clear_err_status(mv_chan); 1087 mv_xor_device_clear_err_status(mv_chan);
1088 1088
1089 ret = request_irq(mv_chan->irq, mv_xor_interrupt_handler, 1089 ret = request_irq(mv_chan->irq, mv_xor_interrupt_handler,
1090 0, dev_name(&pdev->dev), mv_chan); 1090 0, dev_name(&pdev->dev), mv_chan);
1091 if (ret) 1091 if (ret)
1092 goto err_free_dma; 1092 goto err_free_dma;
1093 1093
1094 mv_chan_unmask_interrupts(mv_chan); 1094 mv_chan_unmask_interrupts(mv_chan);
1095 1095
1096 mv_set_mode(mv_chan, DMA_MEMCPY); 1096 mv_set_mode(mv_chan, DMA_MEMCPY);
1097 1097
1098 spin_lock_init(&mv_chan->lock); 1098 spin_lock_init(&mv_chan->lock);
1099 INIT_LIST_HEAD(&mv_chan->chain); 1099 INIT_LIST_HEAD(&mv_chan->chain);
1100 INIT_LIST_HEAD(&mv_chan->completed_slots); 1100 INIT_LIST_HEAD(&mv_chan->completed_slots);
1101 INIT_LIST_HEAD(&mv_chan->all_slots); 1101 INIT_LIST_HEAD(&mv_chan->all_slots);
1102 mv_chan->dmachan.device = dma_dev; 1102 mv_chan->dmachan.device = dma_dev;
1103 dma_cookie_init(&mv_chan->dmachan); 1103 dma_cookie_init(&mv_chan->dmachan);
1104 1104
1105 list_add_tail(&mv_chan->dmachan.device_node, &dma_dev->channels); 1105 list_add_tail(&mv_chan->dmachan.device_node, &dma_dev->channels);
1106 1106
1107 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) { 1107 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
1108 ret = mv_xor_memcpy_self_test(mv_chan); 1108 ret = mv_xor_memcpy_self_test(mv_chan);
1109 dev_dbg(&pdev->dev, "memcpy self test returned %d\n", ret); 1109 dev_dbg(&pdev->dev, "memcpy self test returned %d\n", ret);
1110 if (ret) 1110 if (ret)
1111 goto err_free_irq; 1111 goto err_free_irq;
1112 } 1112 }
1113 1113
1114 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { 1114 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
1115 ret = mv_xor_xor_self_test(mv_chan); 1115 ret = mv_xor_xor_self_test(mv_chan);
1116 dev_dbg(&pdev->dev, "xor self test returned %d\n", ret); 1116 dev_dbg(&pdev->dev, "xor self test returned %d\n", ret);
1117 if (ret) 1117 if (ret)
1118 goto err_free_irq; 1118 goto err_free_irq;
1119 } 1119 }
1120 1120
1121 dev_info(&pdev->dev, "Marvell XOR: ( %s%s%s)\n", 1121 dev_info(&pdev->dev, "Marvell XOR: ( %s%s%s)\n",
1122 dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "", 1122 dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
1123 dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "", 1123 dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "",
1124 dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : ""); 1124 dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : "");
1125 1125
1126 dma_async_device_register(dma_dev); 1126 dma_async_device_register(dma_dev);
1127 return mv_chan; 1127 return mv_chan;
1128 1128
1129 err_free_irq: 1129 err_free_irq:
1130 free_irq(mv_chan->irq, mv_chan); 1130 free_irq(mv_chan->irq, mv_chan);
1131 err_free_dma: 1131 err_free_dma:
1132 dma_free_coherent(&pdev->dev, MV_XOR_POOL_SIZE, 1132 dma_free_coherent(&pdev->dev, MV_XOR_POOL_SIZE,
1133 mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool); 1133 mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool);
1134 return ERR_PTR(ret); 1134 return ERR_PTR(ret);
1135 } 1135 }
1136 1136
1137 static void 1137 static void
1138 mv_xor_conf_mbus_windows(struct mv_xor_device *xordev, 1138 mv_xor_conf_mbus_windows(struct mv_xor_device *xordev,
1139 const struct mbus_dram_target_info *dram) 1139 const struct mbus_dram_target_info *dram)
1140 { 1140 {
1141 void __iomem *base = xordev->xor_base; 1141 void __iomem *base = xordev->xor_base;
1142 u32 win_enable = 0; 1142 u32 win_enable = 0;
1143 int i; 1143 int i;
1144 1144
1145 for (i = 0; i < 8; i++) { 1145 for (i = 0; i < 8; i++) {
1146 writel(0, base + WINDOW_BASE(i)); 1146 writel(0, base + WINDOW_BASE(i));
1147 writel(0, base + WINDOW_SIZE(i)); 1147 writel(0, base + WINDOW_SIZE(i));
1148 if (i < 4) 1148 if (i < 4)
1149 writel(0, base + WINDOW_REMAP_HIGH(i)); 1149 writel(0, base + WINDOW_REMAP_HIGH(i));
1150 } 1150 }
1151 1151
1152 for (i = 0; i < dram->num_cs; i++) { 1152 for (i = 0; i < dram->num_cs; i++) {
1153 const struct mbus_dram_window *cs = dram->cs + i; 1153 const struct mbus_dram_window *cs = dram->cs + i;
1154 1154
1155 writel((cs->base & 0xffff0000) | 1155 writel((cs->base & 0xffff0000) |
1156 (cs->mbus_attr << 8) | 1156 (cs->mbus_attr << 8) |
1157 dram->mbus_dram_target_id, base + WINDOW_BASE(i)); 1157 dram->mbus_dram_target_id, base + WINDOW_BASE(i));
1158 writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i)); 1158 writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i));
1159 1159
1160 win_enable |= (1 << i); 1160 win_enable |= (1 << i);
1161 win_enable |= 3 << (16 + (2 * i)); 1161 win_enable |= 3 << (16 + (2 * i));
1162 } 1162 }
1163 1163
1164 writel(win_enable, base + WINDOW_BAR_ENABLE(0)); 1164 writel(win_enable, base + WINDOW_BAR_ENABLE(0));
1165 writel(win_enable, base + WINDOW_BAR_ENABLE(1)); 1165 writel(win_enable, base + WINDOW_BAR_ENABLE(1));
1166 writel(0, base + WINDOW_OVERRIDE_CTRL(0)); 1166 writel(0, base + WINDOW_OVERRIDE_CTRL(0));
1167 writel(0, base + WINDOW_OVERRIDE_CTRL(1)); 1167 writel(0, base + WINDOW_OVERRIDE_CTRL(1));
1168 } 1168 }
1169 1169
1170 static int mv_xor_probe(struct platform_device *pdev) 1170 static int mv_xor_probe(struct platform_device *pdev)
1171 { 1171 {
1172 const struct mbus_dram_target_info *dram; 1172 const struct mbus_dram_target_info *dram;
1173 struct mv_xor_device *xordev; 1173 struct mv_xor_device *xordev;
1174 struct mv_xor_platform_data *pdata = dev_get_platdata(&pdev->dev); 1174 struct mv_xor_platform_data *pdata = dev_get_platdata(&pdev->dev);
1175 struct resource *res; 1175 struct resource *res;
1176 int i, ret; 1176 int i, ret;
1177 1177
1178 dev_notice(&pdev->dev, "Marvell shared XOR driver\n"); 1178 dev_notice(&pdev->dev, "Marvell shared XOR driver\n");
1179 1179
1180 xordev = devm_kzalloc(&pdev->dev, sizeof(*xordev), GFP_KERNEL); 1180 xordev = devm_kzalloc(&pdev->dev, sizeof(*xordev), GFP_KERNEL);
1181 if (!xordev) 1181 if (!xordev)
1182 return -ENOMEM; 1182 return -ENOMEM;
1183 1183
1184 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1184 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1185 if (!res) 1185 if (!res)
1186 return -ENODEV; 1186 return -ENODEV;
1187 1187
1188 xordev->xor_base = devm_ioremap(&pdev->dev, res->start, 1188 xordev->xor_base = devm_ioremap(&pdev->dev, res->start,
1189 resource_size(res)); 1189 resource_size(res));
1190 if (!xordev->xor_base) 1190 if (!xordev->xor_base)
1191 return -EBUSY; 1191 return -EBUSY;
1192 1192
1193 res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 1193 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1194 if (!res) 1194 if (!res)
1195 return -ENODEV; 1195 return -ENODEV;
1196 1196
1197 xordev->xor_high_base = devm_ioremap(&pdev->dev, res->start, 1197 xordev->xor_high_base = devm_ioremap(&pdev->dev, res->start,
1198 resource_size(res)); 1198 resource_size(res));
1199 if (!xordev->xor_high_base) 1199 if (!xordev->xor_high_base)
1200 return -EBUSY; 1200 return -EBUSY;
1201 1201
1202 platform_set_drvdata(pdev, xordev); 1202 platform_set_drvdata(pdev, xordev);
1203 1203
1204 /* 1204 /*
1205 * (Re-)program MBUS remapping windows if we are asked to. 1205 * (Re-)program MBUS remapping windows if we are asked to.
1206 */ 1206 */
1207 dram = mv_mbus_dram_info(); 1207 dram = mv_mbus_dram_info();
1208 if (dram) 1208 if (dram)
1209 mv_xor_conf_mbus_windows(xordev, dram); 1209 mv_xor_conf_mbus_windows(xordev, dram);
1210 1210
1211 /* Not all platforms can gate the clock, so it is not 1211 /* Not all platforms can gate the clock, so it is not
1212 * an error if the clock does not exists. 1212 * an error if the clock does not exists.
1213 */ 1213 */
1214 xordev->clk = clk_get(&pdev->dev, NULL); 1214 xordev->clk = clk_get(&pdev->dev, NULL);
1215 if (!IS_ERR(xordev->clk)) 1215 if (!IS_ERR(xordev->clk))
1216 clk_prepare_enable(xordev->clk); 1216 clk_prepare_enable(xordev->clk);
1217 1217
1218 if (pdev->dev.of_node) { 1218 if (pdev->dev.of_node) {
1219 struct device_node *np; 1219 struct device_node *np;
1220 int i = 0; 1220 int i = 0;
1221 1221
1222 for_each_child_of_node(pdev->dev.of_node, np) { 1222 for_each_child_of_node(pdev->dev.of_node, np) {
1223 dma_cap_mask_t cap_mask; 1223 dma_cap_mask_t cap_mask;
1224 int irq; 1224 int irq;
1225 1225
1226 dma_cap_zero(cap_mask); 1226 dma_cap_zero(cap_mask);
1227 if (of_property_read_bool(np, "dmacap,memcpy")) 1227 if (of_property_read_bool(np, "dmacap,memcpy"))
1228 dma_cap_set(DMA_MEMCPY, cap_mask); 1228 dma_cap_set(DMA_MEMCPY, cap_mask);
1229 if (of_property_read_bool(np, "dmacap,xor")) 1229 if (of_property_read_bool(np, "dmacap,xor"))
1230 dma_cap_set(DMA_XOR, cap_mask); 1230 dma_cap_set(DMA_XOR, cap_mask);
1231 if (of_property_read_bool(np, "dmacap,interrupt")) 1231 if (of_property_read_bool(np, "dmacap,interrupt"))
1232 dma_cap_set(DMA_INTERRUPT, cap_mask); 1232 dma_cap_set(DMA_INTERRUPT, cap_mask);
1233 1233
1234 irq = irq_of_parse_and_map(np, 0); 1234 irq = irq_of_parse_and_map(np, 0);
1235 if (!irq) { 1235 if (!irq) {
1236 ret = -ENODEV; 1236 ret = -ENODEV;
1237 goto err_channel_add; 1237 goto err_channel_add;
1238 } 1238 }
1239 1239
1240 xordev->channels[i] = 1240 xordev->channels[i] =
1241 mv_xor_channel_add(xordev, pdev, i, 1241 mv_xor_channel_add(xordev, pdev, i,
1242 cap_mask, irq); 1242 cap_mask, irq);
1243 if (IS_ERR(xordev->channels[i])) { 1243 if (IS_ERR(xordev->channels[i])) {
1244 ret = PTR_ERR(xordev->channels[i]); 1244 ret = PTR_ERR(xordev->channels[i]);
1245 xordev->channels[i] = NULL; 1245 xordev->channels[i] = NULL;
1246 irq_dispose_mapping(irq); 1246 irq_dispose_mapping(irq);
1247 goto err_channel_add; 1247 goto err_channel_add;
1248 } 1248 }
1249 1249
1250 i++; 1250 i++;
1251 } 1251 }
1252 } else if (pdata && pdata->channels) { 1252 } else if (pdata && pdata->channels) {
1253 for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) { 1253 for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) {
1254 struct mv_xor_channel_data *cd; 1254 struct mv_xor_channel_data *cd;
1255 int irq; 1255 int irq;
1256 1256
1257 cd = &pdata->channels[i]; 1257 cd = &pdata->channels[i];
1258 if (!cd) { 1258 if (!cd) {
1259 ret = -ENODEV; 1259 ret = -ENODEV;
1260 goto err_channel_add; 1260 goto err_channel_add;
1261 } 1261 }
1262 1262
1263 irq = platform_get_irq(pdev, i); 1263 irq = platform_get_irq(pdev, i);
1264 if (irq < 0) { 1264 if (irq < 0) {
1265 ret = irq; 1265 ret = irq;
1266 goto err_channel_add; 1266 goto err_channel_add;
1267 } 1267 }
1268 1268
1269 xordev->channels[i] = 1269 xordev->channels[i] =
1270 mv_xor_channel_add(xordev, pdev, i, 1270 mv_xor_channel_add(xordev, pdev, i,
1271 cd->cap_mask, irq); 1271 cd->cap_mask, irq);
1272 if (IS_ERR(xordev->channels[i])) { 1272 if (IS_ERR(xordev->channels[i])) {
1273 ret = PTR_ERR(xordev->channels[i]); 1273 ret = PTR_ERR(xordev->channels[i]);
1274 goto err_channel_add; 1274 goto err_channel_add;
1275 } 1275 }
1276 } 1276 }
1277 } 1277 }
1278 1278
1279 return 0; 1279 return 0;
1280 1280
1281 err_channel_add: 1281 err_channel_add:
1282 for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) 1282 for (i = 0; i < MV_XOR_MAX_CHANNELS; i++)
1283 if (xordev->channels[i]) { 1283 if (xordev->channels[i]) {
1284 mv_xor_channel_remove(xordev->channels[i]); 1284 mv_xor_channel_remove(xordev->channels[i]);
1285 if (pdev->dev.of_node) 1285 if (pdev->dev.of_node)
1286 irq_dispose_mapping(xordev->channels[i]->irq); 1286 irq_dispose_mapping(xordev->channels[i]->irq);
1287 } 1287 }
1288 1288
1289 if (!IS_ERR(xordev->clk)) { 1289 if (!IS_ERR(xordev->clk)) {
1290 clk_disable_unprepare(xordev->clk); 1290 clk_disable_unprepare(xordev->clk);
1291 clk_put(xordev->clk); 1291 clk_put(xordev->clk);
1292 } 1292 }
1293 1293
1294 return ret; 1294 return ret;
1295 } 1295 }
1296 1296
1297 static int mv_xor_remove(struct platform_device *pdev) 1297 static int mv_xor_remove(struct platform_device *pdev)
1298 { 1298 {
1299 struct mv_xor_device *xordev = platform_get_drvdata(pdev); 1299 struct mv_xor_device *xordev = platform_get_drvdata(pdev);
1300 int i; 1300 int i;
1301 1301
1302 for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) { 1302 for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) {
1303 if (xordev->channels[i]) 1303 if (xordev->channels[i])
1304 mv_xor_channel_remove(xordev->channels[i]); 1304 mv_xor_channel_remove(xordev->channels[i]);
1305 } 1305 }
1306 1306
1307 if (!IS_ERR(xordev->clk)) { 1307 if (!IS_ERR(xordev->clk)) {
1308 clk_disable_unprepare(xordev->clk); 1308 clk_disable_unprepare(xordev->clk);
1309 clk_put(xordev->clk); 1309 clk_put(xordev->clk);
1310 } 1310 }
1311 1311
1312 return 0; 1312 return 0;
1313 } 1313 }
1314 1314
1315 #ifdef CONFIG_OF 1315 #ifdef CONFIG_OF
1316 static struct of_device_id mv_xor_dt_ids[] = { 1316 static struct of_device_id mv_xor_dt_ids[] = {
1317 { .compatible = "marvell,orion-xor", }, 1317 { .compatible = "marvell,orion-xor", },
1318 {}, 1318 {},
1319 }; 1319 };
1320 MODULE_DEVICE_TABLE(of, mv_xor_dt_ids); 1320 MODULE_DEVICE_TABLE(of, mv_xor_dt_ids);
1321 #endif 1321 #endif
1322 1322
1323 static struct platform_driver mv_xor_driver = { 1323 static struct platform_driver mv_xor_driver = {
1324 .probe = mv_xor_probe, 1324 .probe = mv_xor_probe,
1325 .remove = mv_xor_remove, 1325 .remove = mv_xor_remove,
1326 .driver = { 1326 .driver = {
1327 .owner = THIS_MODULE, 1327 .owner = THIS_MODULE,
1328 .name = MV_XOR_NAME, 1328 .name = MV_XOR_NAME,
1329 .of_match_table = of_match_ptr(mv_xor_dt_ids), 1329 .of_match_table = of_match_ptr(mv_xor_dt_ids),
1330 }, 1330 },
1331 }; 1331 };
1332 1332
1333 1333
1334 static int __init mv_xor_init(void) 1334 static int __init mv_xor_init(void)
1335 { 1335 {
1336 return platform_driver_register(&mv_xor_driver); 1336 return platform_driver_register(&mv_xor_driver);
1337 } 1337 }
1338 module_init(mv_xor_init); 1338 module_init(mv_xor_init);
1339 1339
1340 /* it's currently unsafe to unload this module */ 1340 /* it's currently unsafe to unload this module */
1341 #if 0 1341 #if 0
1342 static void __exit mv_xor_exit(void) 1342 static void __exit mv_xor_exit(void)
1343 { 1343 {
1344 platform_driver_unregister(&mv_xor_driver); 1344 platform_driver_unregister(&mv_xor_driver);
1345 return; 1345 return;
1346 } 1346 }
1347 1347
1348 module_exit(mv_xor_exit); 1348 module_exit(mv_xor_exit);
1349 #endif 1349 #endif
1350 1350
1351 MODULE_AUTHOR("Saeed Bishara <saeed@marvell.com>"); 1351 MODULE_AUTHOR("Saeed Bishara <saeed@marvell.com>");
1352 MODULE_DESCRIPTION("DMA engine driver for Marvell's XOR engine"); 1352 MODULE_DESCRIPTION("DMA engine driver for Marvell's XOR engine");
1353 MODULE_LICENSE("GPL"); 1353 MODULE_LICENSE("GPL");
1354 1354
drivers/dma/mxs-dma.c
1 /* 1 /*
2 * Copyright 2011 Freescale Semiconductor, Inc. All Rights Reserved. 2 * Copyright 2011 Freescale Semiconductor, Inc. All Rights Reserved.
3 * 3 *
4 * Refer to drivers/dma/imx-sdma.c 4 * Refer to drivers/dma/imx-sdma.c
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as 7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation. 8 * published by the Free Software Foundation.
9 */ 9 */
10 10
11 #include <linux/init.h> 11 #include <linux/init.h>
12 #include <linux/types.h> 12 #include <linux/types.h>
13 #include <linux/mm.h> 13 #include <linux/mm.h>
14 #include <linux/interrupt.h> 14 #include <linux/interrupt.h>
15 #include <linux/clk.h> 15 #include <linux/clk.h>
16 #include <linux/wait.h> 16 #include <linux/wait.h>
17 #include <linux/sched.h> 17 #include <linux/sched.h>
18 #include <linux/semaphore.h> 18 #include <linux/semaphore.h>
19 #include <linux/device.h> 19 #include <linux/device.h>
20 #include <linux/dma-mapping.h> 20 #include <linux/dma-mapping.h>
21 #include <linux/slab.h> 21 #include <linux/slab.h>
22 #include <linux/platform_device.h> 22 #include <linux/platform_device.h>
23 #include <linux/dmaengine.h> 23 #include <linux/dmaengine.h>
24 #include <linux/delay.h> 24 #include <linux/delay.h>
25 #include <linux/module.h> 25 #include <linux/module.h>
26 #include <linux/stmp_device.h> 26 #include <linux/stmp_device.h>
27 #include <linux/of.h> 27 #include <linux/of.h>
28 #include <linux/of_device.h> 28 #include <linux/of_device.h>
29 #include <linux/of_dma.h> 29 #include <linux/of_dma.h>
30 30
31 #include <asm/irq.h> 31 #include <asm/irq.h>
32 32
33 #include "dmaengine.h" 33 #include "dmaengine.h"
34 34
35 /* 35 /*
36 * NOTE: The term "PIO" throughout the mxs-dma implementation means 36 * NOTE: The term "PIO" throughout the mxs-dma implementation means
37 * PIO mode of mxs apbh-dma and apbx-dma. With this working mode, 37 * PIO mode of mxs apbh-dma and apbx-dma. With this working mode,
38 * dma can program the controller registers of peripheral devices. 38 * dma can program the controller registers of peripheral devices.
39 */ 39 */
40 40
41 #define dma_is_apbh(mxs_dma) ((mxs_dma)->type == MXS_DMA_APBH) 41 #define dma_is_apbh(mxs_dma) ((mxs_dma)->type == MXS_DMA_APBH)
42 #define apbh_is_old(mxs_dma) ((mxs_dma)->dev_id == IMX23_DMA) 42 #define apbh_is_old(mxs_dma) ((mxs_dma)->dev_id == IMX23_DMA)
43 43
44 #define HW_APBHX_CTRL0 0x000 44 #define HW_APBHX_CTRL0 0x000
45 #define BM_APBH_CTRL0_APB_BURST8_EN (1 << 29) 45 #define BM_APBH_CTRL0_APB_BURST8_EN (1 << 29)
46 #define BM_APBH_CTRL0_APB_BURST_EN (1 << 28) 46 #define BM_APBH_CTRL0_APB_BURST_EN (1 << 28)
47 #define BP_APBH_CTRL0_RESET_CHANNEL 16 47 #define BP_APBH_CTRL0_RESET_CHANNEL 16
48 #define HW_APBHX_CTRL1 0x010 48 #define HW_APBHX_CTRL1 0x010
49 #define HW_APBHX_CTRL2 0x020 49 #define HW_APBHX_CTRL2 0x020
50 #define HW_APBHX_CHANNEL_CTRL 0x030 50 #define HW_APBHX_CHANNEL_CTRL 0x030
51 #define BP_APBHX_CHANNEL_CTRL_RESET_CHANNEL 16 51 #define BP_APBHX_CHANNEL_CTRL_RESET_CHANNEL 16
52 /* 52 /*
53 * The offset of NXTCMDAR register is different per both dma type and version, 53 * The offset of NXTCMDAR register is different per both dma type and version,
54 * while stride for each channel is all the same 0x70. 54 * while stride for each channel is all the same 0x70.
55 */ 55 */
56 #define HW_APBHX_CHn_NXTCMDAR(d, n) \ 56 #define HW_APBHX_CHn_NXTCMDAR(d, n) \
57 (((dma_is_apbh(d) && apbh_is_old(d)) ? 0x050 : 0x110) + (n) * 0x70) 57 (((dma_is_apbh(d) && apbh_is_old(d)) ? 0x050 : 0x110) + (n) * 0x70)
58 #define HW_APBHX_CHn_SEMA(d, n) \ 58 #define HW_APBHX_CHn_SEMA(d, n) \
59 (((dma_is_apbh(d) && apbh_is_old(d)) ? 0x080 : 0x140) + (n) * 0x70) 59 (((dma_is_apbh(d) && apbh_is_old(d)) ? 0x080 : 0x140) + (n) * 0x70)
60 60
61 /* 61 /*
62 * ccw bits definitions 62 * ccw bits definitions
63 * 63 *
64 * COMMAND: 0..1 (2) 64 * COMMAND: 0..1 (2)
65 * CHAIN: 2 (1) 65 * CHAIN: 2 (1)
66 * IRQ: 3 (1) 66 * IRQ: 3 (1)
67 * NAND_LOCK: 4 (1) - not implemented 67 * NAND_LOCK: 4 (1) - not implemented
68 * NAND_WAIT4READY: 5 (1) - not implemented 68 * NAND_WAIT4READY: 5 (1) - not implemented
69 * DEC_SEM: 6 (1) 69 * DEC_SEM: 6 (1)
70 * WAIT4END: 7 (1) 70 * WAIT4END: 7 (1)
71 * HALT_ON_TERMINATE: 8 (1) 71 * HALT_ON_TERMINATE: 8 (1)
72 * TERMINATE_FLUSH: 9 (1) 72 * TERMINATE_FLUSH: 9 (1)
73 * RESERVED: 10..11 (2) 73 * RESERVED: 10..11 (2)
74 * PIO_NUM: 12..15 (4) 74 * PIO_NUM: 12..15 (4)
75 */ 75 */
76 #define BP_CCW_COMMAND 0 76 #define BP_CCW_COMMAND 0
77 #define BM_CCW_COMMAND (3 << 0) 77 #define BM_CCW_COMMAND (3 << 0)
78 #define CCW_CHAIN (1 << 2) 78 #define CCW_CHAIN (1 << 2)
79 #define CCW_IRQ (1 << 3) 79 #define CCW_IRQ (1 << 3)
80 #define CCW_DEC_SEM (1 << 6) 80 #define CCW_DEC_SEM (1 << 6)
81 #define CCW_WAIT4END (1 << 7) 81 #define CCW_WAIT4END (1 << 7)
82 #define CCW_HALT_ON_TERM (1 << 8) 82 #define CCW_HALT_ON_TERM (1 << 8)
83 #define CCW_TERM_FLUSH (1 << 9) 83 #define CCW_TERM_FLUSH (1 << 9)
84 #define BP_CCW_PIO_NUM 12 84 #define BP_CCW_PIO_NUM 12
85 #define BM_CCW_PIO_NUM (0xf << 12) 85 #define BM_CCW_PIO_NUM (0xf << 12)
86 86
87 #define BF_CCW(value, field) (((value) << BP_CCW_##field) & BM_CCW_##field) 87 #define BF_CCW(value, field) (((value) << BP_CCW_##field) & BM_CCW_##field)
88 88
89 #define MXS_DMA_CMD_NO_XFER 0 89 #define MXS_DMA_CMD_NO_XFER 0
90 #define MXS_DMA_CMD_WRITE 1 90 #define MXS_DMA_CMD_WRITE 1
91 #define MXS_DMA_CMD_READ 2 91 #define MXS_DMA_CMD_READ 2
92 #define MXS_DMA_CMD_DMA_SENSE 3 /* not implemented */ 92 #define MXS_DMA_CMD_DMA_SENSE 3 /* not implemented */
93 93
94 struct mxs_dma_ccw { 94 struct mxs_dma_ccw {
95 u32 next; 95 u32 next;
96 u16 bits; 96 u16 bits;
97 u16 xfer_bytes; 97 u16 xfer_bytes;
98 #define MAX_XFER_BYTES 0xff00 98 #define MAX_XFER_BYTES 0xff00
99 u32 bufaddr; 99 u32 bufaddr;
100 #define MXS_PIO_WORDS 16 100 #define MXS_PIO_WORDS 16
101 u32 pio_words[MXS_PIO_WORDS]; 101 u32 pio_words[MXS_PIO_WORDS];
102 }; 102 };
103 103
104 #define CCW_BLOCK_SIZE (4 * PAGE_SIZE) 104 #define CCW_BLOCK_SIZE (4 * PAGE_SIZE)
105 #define NUM_CCW (int)(CCW_BLOCK_SIZE / sizeof(struct mxs_dma_ccw)) 105 #define NUM_CCW (int)(CCW_BLOCK_SIZE / sizeof(struct mxs_dma_ccw))
106 106
107 struct mxs_dma_chan { 107 struct mxs_dma_chan {
108 struct mxs_dma_engine *mxs_dma; 108 struct mxs_dma_engine *mxs_dma;
109 struct dma_chan chan; 109 struct dma_chan chan;
110 struct dma_async_tx_descriptor desc; 110 struct dma_async_tx_descriptor desc;
111 struct tasklet_struct tasklet; 111 struct tasklet_struct tasklet;
112 unsigned int chan_irq; 112 unsigned int chan_irq;
113 struct mxs_dma_ccw *ccw; 113 struct mxs_dma_ccw *ccw;
114 dma_addr_t ccw_phys; 114 dma_addr_t ccw_phys;
115 int desc_count; 115 int desc_count;
116 enum dma_status status; 116 enum dma_status status;
117 unsigned int flags; 117 unsigned int flags;
118 #define MXS_DMA_SG_LOOP (1 << 0) 118 #define MXS_DMA_SG_LOOP (1 << 0)
119 }; 119 };
120 120
121 #define MXS_DMA_CHANNELS 16 121 #define MXS_DMA_CHANNELS 16
122 #define MXS_DMA_CHANNELS_MASK 0xffff 122 #define MXS_DMA_CHANNELS_MASK 0xffff
123 123
124 enum mxs_dma_devtype { 124 enum mxs_dma_devtype {
125 MXS_DMA_APBH, 125 MXS_DMA_APBH,
126 MXS_DMA_APBX, 126 MXS_DMA_APBX,
127 }; 127 };
128 128
129 enum mxs_dma_id { 129 enum mxs_dma_id {
130 IMX23_DMA, 130 IMX23_DMA,
131 IMX28_DMA, 131 IMX28_DMA,
132 }; 132 };
133 133
134 struct mxs_dma_engine { 134 struct mxs_dma_engine {
135 enum mxs_dma_id dev_id; 135 enum mxs_dma_id dev_id;
136 enum mxs_dma_devtype type; 136 enum mxs_dma_devtype type;
137 void __iomem *base; 137 void __iomem *base;
138 struct clk *clk; 138 struct clk *clk;
139 struct dma_device dma_device; 139 struct dma_device dma_device;
140 struct device_dma_parameters dma_parms; 140 struct device_dma_parameters dma_parms;
141 struct mxs_dma_chan mxs_chans[MXS_DMA_CHANNELS]; 141 struct mxs_dma_chan mxs_chans[MXS_DMA_CHANNELS];
142 struct platform_device *pdev; 142 struct platform_device *pdev;
143 unsigned int nr_channels; 143 unsigned int nr_channels;
144 }; 144 };
145 145
146 struct mxs_dma_type { 146 struct mxs_dma_type {
147 enum mxs_dma_id id; 147 enum mxs_dma_id id;
148 enum mxs_dma_devtype type; 148 enum mxs_dma_devtype type;
149 }; 149 };
150 150
151 static struct mxs_dma_type mxs_dma_types[] = { 151 static struct mxs_dma_type mxs_dma_types[] = {
152 { 152 {
153 .id = IMX23_DMA, 153 .id = IMX23_DMA,
154 .type = MXS_DMA_APBH, 154 .type = MXS_DMA_APBH,
155 }, { 155 }, {
156 .id = IMX23_DMA, 156 .id = IMX23_DMA,
157 .type = MXS_DMA_APBX, 157 .type = MXS_DMA_APBX,
158 }, { 158 }, {
159 .id = IMX28_DMA, 159 .id = IMX28_DMA,
160 .type = MXS_DMA_APBH, 160 .type = MXS_DMA_APBH,
161 }, { 161 }, {
162 .id = IMX28_DMA, 162 .id = IMX28_DMA,
163 .type = MXS_DMA_APBX, 163 .type = MXS_DMA_APBX,
164 } 164 }
165 }; 165 };
166 166
167 static struct platform_device_id mxs_dma_ids[] = { 167 static struct platform_device_id mxs_dma_ids[] = {
168 { 168 {
169 .name = "imx23-dma-apbh", 169 .name = "imx23-dma-apbh",
170 .driver_data = (kernel_ulong_t) &mxs_dma_types[0], 170 .driver_data = (kernel_ulong_t) &mxs_dma_types[0],
171 }, { 171 }, {
172 .name = "imx23-dma-apbx", 172 .name = "imx23-dma-apbx",
173 .driver_data = (kernel_ulong_t) &mxs_dma_types[1], 173 .driver_data = (kernel_ulong_t) &mxs_dma_types[1],
174 }, { 174 }, {
175 .name = "imx28-dma-apbh", 175 .name = "imx28-dma-apbh",
176 .driver_data = (kernel_ulong_t) &mxs_dma_types[2], 176 .driver_data = (kernel_ulong_t) &mxs_dma_types[2],
177 }, { 177 }, {
178 .name = "imx28-dma-apbx", 178 .name = "imx28-dma-apbx",
179 .driver_data = (kernel_ulong_t) &mxs_dma_types[3], 179 .driver_data = (kernel_ulong_t) &mxs_dma_types[3],
180 }, { 180 }, {
181 /* end of list */ 181 /* end of list */
182 } 182 }
183 }; 183 };
184 184
185 static const struct of_device_id mxs_dma_dt_ids[] = { 185 static const struct of_device_id mxs_dma_dt_ids[] = {
186 { .compatible = "fsl,imx23-dma-apbh", .data = &mxs_dma_ids[0], }, 186 { .compatible = "fsl,imx23-dma-apbh", .data = &mxs_dma_ids[0], },
187 { .compatible = "fsl,imx23-dma-apbx", .data = &mxs_dma_ids[1], }, 187 { .compatible = "fsl,imx23-dma-apbx", .data = &mxs_dma_ids[1], },
188 { .compatible = "fsl,imx28-dma-apbh", .data = &mxs_dma_ids[2], }, 188 { .compatible = "fsl,imx28-dma-apbh", .data = &mxs_dma_ids[2], },
189 { .compatible = "fsl,imx28-dma-apbx", .data = &mxs_dma_ids[3], }, 189 { .compatible = "fsl,imx28-dma-apbx", .data = &mxs_dma_ids[3], },
190 { /* sentinel */ } 190 { /* sentinel */ }
191 }; 191 };
192 MODULE_DEVICE_TABLE(of, mxs_dma_dt_ids); 192 MODULE_DEVICE_TABLE(of, mxs_dma_dt_ids);
193 193
194 static struct mxs_dma_chan *to_mxs_dma_chan(struct dma_chan *chan) 194 static struct mxs_dma_chan *to_mxs_dma_chan(struct dma_chan *chan)
195 { 195 {
196 return container_of(chan, struct mxs_dma_chan, chan); 196 return container_of(chan, struct mxs_dma_chan, chan);
197 } 197 }
198 198
199 static void mxs_dma_reset_chan(struct mxs_dma_chan *mxs_chan) 199 static void mxs_dma_reset_chan(struct mxs_dma_chan *mxs_chan)
200 { 200 {
201 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; 201 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
202 int chan_id = mxs_chan->chan.chan_id; 202 int chan_id = mxs_chan->chan.chan_id;
203 203
204 if (dma_is_apbh(mxs_dma) && apbh_is_old(mxs_dma)) 204 if (dma_is_apbh(mxs_dma) && apbh_is_old(mxs_dma))
205 writel(1 << (chan_id + BP_APBH_CTRL0_RESET_CHANNEL), 205 writel(1 << (chan_id + BP_APBH_CTRL0_RESET_CHANNEL),
206 mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_SET); 206 mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_SET);
207 else 207 else
208 writel(1 << (chan_id + BP_APBHX_CHANNEL_CTRL_RESET_CHANNEL), 208 writel(1 << (chan_id + BP_APBHX_CHANNEL_CTRL_RESET_CHANNEL),
209 mxs_dma->base + HW_APBHX_CHANNEL_CTRL + STMP_OFFSET_REG_SET); 209 mxs_dma->base + HW_APBHX_CHANNEL_CTRL + STMP_OFFSET_REG_SET);
210 } 210 }
211 211
212 static void mxs_dma_enable_chan(struct mxs_dma_chan *mxs_chan) 212 static void mxs_dma_enable_chan(struct mxs_dma_chan *mxs_chan)
213 { 213 {
214 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; 214 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
215 int chan_id = mxs_chan->chan.chan_id; 215 int chan_id = mxs_chan->chan.chan_id;
216 216
217 /* set cmd_addr up */ 217 /* set cmd_addr up */
218 writel(mxs_chan->ccw_phys, 218 writel(mxs_chan->ccw_phys,
219 mxs_dma->base + HW_APBHX_CHn_NXTCMDAR(mxs_dma, chan_id)); 219 mxs_dma->base + HW_APBHX_CHn_NXTCMDAR(mxs_dma, chan_id));
220 220
221 /* write 1 to SEMA to kick off the channel */ 221 /* write 1 to SEMA to kick off the channel */
222 writel(1, mxs_dma->base + HW_APBHX_CHn_SEMA(mxs_dma, chan_id)); 222 writel(1, mxs_dma->base + HW_APBHX_CHn_SEMA(mxs_dma, chan_id));
223 } 223 }
224 224
225 static void mxs_dma_disable_chan(struct mxs_dma_chan *mxs_chan) 225 static void mxs_dma_disable_chan(struct mxs_dma_chan *mxs_chan)
226 { 226 {
227 mxs_chan->status = DMA_SUCCESS; 227 mxs_chan->status = DMA_COMPLETE;
228 } 228 }
229 229
230 static void mxs_dma_pause_chan(struct mxs_dma_chan *mxs_chan) 230 static void mxs_dma_pause_chan(struct mxs_dma_chan *mxs_chan)
231 { 231 {
232 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; 232 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
233 int chan_id = mxs_chan->chan.chan_id; 233 int chan_id = mxs_chan->chan.chan_id;
234 234
235 /* freeze the channel */ 235 /* freeze the channel */
236 if (dma_is_apbh(mxs_dma) && apbh_is_old(mxs_dma)) 236 if (dma_is_apbh(mxs_dma) && apbh_is_old(mxs_dma))
237 writel(1 << chan_id, 237 writel(1 << chan_id,
238 mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_SET); 238 mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_SET);
239 else 239 else
240 writel(1 << chan_id, 240 writel(1 << chan_id,
241 mxs_dma->base + HW_APBHX_CHANNEL_CTRL + STMP_OFFSET_REG_SET); 241 mxs_dma->base + HW_APBHX_CHANNEL_CTRL + STMP_OFFSET_REG_SET);
242 242
243 mxs_chan->status = DMA_PAUSED; 243 mxs_chan->status = DMA_PAUSED;
244 } 244 }
245 245
246 static void mxs_dma_resume_chan(struct mxs_dma_chan *mxs_chan) 246 static void mxs_dma_resume_chan(struct mxs_dma_chan *mxs_chan)
247 { 247 {
248 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; 248 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
249 int chan_id = mxs_chan->chan.chan_id; 249 int chan_id = mxs_chan->chan.chan_id;
250 250
251 /* unfreeze the channel */ 251 /* unfreeze the channel */
252 if (dma_is_apbh(mxs_dma) && apbh_is_old(mxs_dma)) 252 if (dma_is_apbh(mxs_dma) && apbh_is_old(mxs_dma))
253 writel(1 << chan_id, 253 writel(1 << chan_id,
254 mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_CLR); 254 mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_CLR);
255 else 255 else
256 writel(1 << chan_id, 256 writel(1 << chan_id,
257 mxs_dma->base + HW_APBHX_CHANNEL_CTRL + STMP_OFFSET_REG_CLR); 257 mxs_dma->base + HW_APBHX_CHANNEL_CTRL + STMP_OFFSET_REG_CLR);
258 258
259 mxs_chan->status = DMA_IN_PROGRESS; 259 mxs_chan->status = DMA_IN_PROGRESS;
260 } 260 }
261 261
262 static dma_cookie_t mxs_dma_tx_submit(struct dma_async_tx_descriptor *tx) 262 static dma_cookie_t mxs_dma_tx_submit(struct dma_async_tx_descriptor *tx)
263 { 263 {
264 return dma_cookie_assign(tx); 264 return dma_cookie_assign(tx);
265 } 265 }
266 266
267 static void mxs_dma_tasklet(unsigned long data) 267 static void mxs_dma_tasklet(unsigned long data)
268 { 268 {
269 struct mxs_dma_chan *mxs_chan = (struct mxs_dma_chan *) data; 269 struct mxs_dma_chan *mxs_chan = (struct mxs_dma_chan *) data;
270 270
271 if (mxs_chan->desc.callback) 271 if (mxs_chan->desc.callback)
272 mxs_chan->desc.callback(mxs_chan->desc.callback_param); 272 mxs_chan->desc.callback(mxs_chan->desc.callback_param);
273 } 273 }
274 274
275 static irqreturn_t mxs_dma_int_handler(int irq, void *dev_id) 275 static irqreturn_t mxs_dma_int_handler(int irq, void *dev_id)
276 { 276 {
277 struct mxs_dma_engine *mxs_dma = dev_id; 277 struct mxs_dma_engine *mxs_dma = dev_id;
278 u32 stat1, stat2; 278 u32 stat1, stat2;
279 279
280 /* completion status */ 280 /* completion status */
281 stat1 = readl(mxs_dma->base + HW_APBHX_CTRL1); 281 stat1 = readl(mxs_dma->base + HW_APBHX_CTRL1);
282 stat1 &= MXS_DMA_CHANNELS_MASK; 282 stat1 &= MXS_DMA_CHANNELS_MASK;
283 writel(stat1, mxs_dma->base + HW_APBHX_CTRL1 + STMP_OFFSET_REG_CLR); 283 writel(stat1, mxs_dma->base + HW_APBHX_CTRL1 + STMP_OFFSET_REG_CLR);
284 284
285 /* error status */ 285 /* error status */
286 stat2 = readl(mxs_dma->base + HW_APBHX_CTRL2); 286 stat2 = readl(mxs_dma->base + HW_APBHX_CTRL2);
287 writel(stat2, mxs_dma->base + HW_APBHX_CTRL2 + STMP_OFFSET_REG_CLR); 287 writel(stat2, mxs_dma->base + HW_APBHX_CTRL2 + STMP_OFFSET_REG_CLR);
288 288
289 /* 289 /*
290 * When both completion and error of termination bits set at the 290 * When both completion and error of termination bits set at the
291 * same time, we do not take it as an error. IOW, it only becomes 291 * same time, we do not take it as an error. IOW, it only becomes
292 * an error we need to handle here in case of either it's (1) a bus 292 * an error we need to handle here in case of either it's (1) a bus
293 * error or (2) a termination error with no completion. 293 * error or (2) a termination error with no completion.
294 */ 294 */
295 stat2 = ((stat2 >> MXS_DMA_CHANNELS) & stat2) | /* (1) */ 295 stat2 = ((stat2 >> MXS_DMA_CHANNELS) & stat2) | /* (1) */
296 (~(stat2 >> MXS_DMA_CHANNELS) & stat2 & ~stat1); /* (2) */ 296 (~(stat2 >> MXS_DMA_CHANNELS) & stat2 & ~stat1); /* (2) */
297 297
298 /* combine error and completion status for checking */ 298 /* combine error and completion status for checking */
299 stat1 = (stat2 << MXS_DMA_CHANNELS) | stat1; 299 stat1 = (stat2 << MXS_DMA_CHANNELS) | stat1;
300 while (stat1) { 300 while (stat1) {
301 int channel = fls(stat1) - 1; 301 int channel = fls(stat1) - 1;
302 struct mxs_dma_chan *mxs_chan = 302 struct mxs_dma_chan *mxs_chan =
303 &mxs_dma->mxs_chans[channel % MXS_DMA_CHANNELS]; 303 &mxs_dma->mxs_chans[channel % MXS_DMA_CHANNELS];
304 304
305 if (channel >= MXS_DMA_CHANNELS) { 305 if (channel >= MXS_DMA_CHANNELS) {
306 dev_dbg(mxs_dma->dma_device.dev, 306 dev_dbg(mxs_dma->dma_device.dev,
307 "%s: error in channel %d\n", __func__, 307 "%s: error in channel %d\n", __func__,
308 channel - MXS_DMA_CHANNELS); 308 channel - MXS_DMA_CHANNELS);
309 mxs_chan->status = DMA_ERROR; 309 mxs_chan->status = DMA_ERROR;
310 mxs_dma_reset_chan(mxs_chan); 310 mxs_dma_reset_chan(mxs_chan);
311 } else { 311 } else {
312 if (mxs_chan->flags & MXS_DMA_SG_LOOP) 312 if (mxs_chan->flags & MXS_DMA_SG_LOOP)
313 mxs_chan->status = DMA_IN_PROGRESS; 313 mxs_chan->status = DMA_IN_PROGRESS;
314 else 314 else
315 mxs_chan->status = DMA_SUCCESS; 315 mxs_chan->status = DMA_COMPLETE;
316 } 316 }
317 317
318 stat1 &= ~(1 << channel); 318 stat1 &= ~(1 << channel);
319 319
320 if (mxs_chan->status == DMA_SUCCESS) 320 if (mxs_chan->status == DMA_COMPLETE)
321 dma_cookie_complete(&mxs_chan->desc); 321 dma_cookie_complete(&mxs_chan->desc);
322 322
323 /* schedule tasklet on this channel */ 323 /* schedule tasklet on this channel */
324 tasklet_schedule(&mxs_chan->tasklet); 324 tasklet_schedule(&mxs_chan->tasklet);
325 } 325 }
326 326
327 return IRQ_HANDLED; 327 return IRQ_HANDLED;
328 } 328 }
329 329
330 static int mxs_dma_alloc_chan_resources(struct dma_chan *chan) 330 static int mxs_dma_alloc_chan_resources(struct dma_chan *chan)
331 { 331 {
332 struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); 332 struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
333 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; 333 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
334 int ret; 334 int ret;
335 335
336 mxs_chan->ccw = dma_alloc_coherent(mxs_dma->dma_device.dev, 336 mxs_chan->ccw = dma_alloc_coherent(mxs_dma->dma_device.dev,
337 CCW_BLOCK_SIZE, &mxs_chan->ccw_phys, 337 CCW_BLOCK_SIZE, &mxs_chan->ccw_phys,
338 GFP_KERNEL); 338 GFP_KERNEL);
339 if (!mxs_chan->ccw) { 339 if (!mxs_chan->ccw) {
340 ret = -ENOMEM; 340 ret = -ENOMEM;
341 goto err_alloc; 341 goto err_alloc;
342 } 342 }
343 343
344 memset(mxs_chan->ccw, 0, CCW_BLOCK_SIZE); 344 memset(mxs_chan->ccw, 0, CCW_BLOCK_SIZE);
345 345
346 if (mxs_chan->chan_irq != NO_IRQ) { 346 if (mxs_chan->chan_irq != NO_IRQ) {
347 ret = request_irq(mxs_chan->chan_irq, mxs_dma_int_handler, 347 ret = request_irq(mxs_chan->chan_irq, mxs_dma_int_handler,
348 0, "mxs-dma", mxs_dma); 348 0, "mxs-dma", mxs_dma);
349 if (ret) 349 if (ret)
350 goto err_irq; 350 goto err_irq;
351 } 351 }
352 352
353 ret = clk_prepare_enable(mxs_dma->clk); 353 ret = clk_prepare_enable(mxs_dma->clk);
354 if (ret) 354 if (ret)
355 goto err_clk; 355 goto err_clk;
356 356
357 mxs_dma_reset_chan(mxs_chan); 357 mxs_dma_reset_chan(mxs_chan);
358 358
359 dma_async_tx_descriptor_init(&mxs_chan->desc, chan); 359 dma_async_tx_descriptor_init(&mxs_chan->desc, chan);
360 mxs_chan->desc.tx_submit = mxs_dma_tx_submit; 360 mxs_chan->desc.tx_submit = mxs_dma_tx_submit;
361 361
362 /* the descriptor is ready */ 362 /* the descriptor is ready */
363 async_tx_ack(&mxs_chan->desc); 363 async_tx_ack(&mxs_chan->desc);
364 364
365 return 0; 365 return 0;
366 366
367 err_clk: 367 err_clk:
368 free_irq(mxs_chan->chan_irq, mxs_dma); 368 free_irq(mxs_chan->chan_irq, mxs_dma);
369 err_irq: 369 err_irq:
370 dma_free_coherent(mxs_dma->dma_device.dev, CCW_BLOCK_SIZE, 370 dma_free_coherent(mxs_dma->dma_device.dev, CCW_BLOCK_SIZE,
371 mxs_chan->ccw, mxs_chan->ccw_phys); 371 mxs_chan->ccw, mxs_chan->ccw_phys);
372 err_alloc: 372 err_alloc:
373 return ret; 373 return ret;
374 } 374 }
375 375
376 static void mxs_dma_free_chan_resources(struct dma_chan *chan) 376 static void mxs_dma_free_chan_resources(struct dma_chan *chan)
377 { 377 {
378 struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); 378 struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
379 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; 379 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
380 380
381 mxs_dma_disable_chan(mxs_chan); 381 mxs_dma_disable_chan(mxs_chan);
382 382
383 free_irq(mxs_chan->chan_irq, mxs_dma); 383 free_irq(mxs_chan->chan_irq, mxs_dma);
384 384
385 dma_free_coherent(mxs_dma->dma_device.dev, CCW_BLOCK_SIZE, 385 dma_free_coherent(mxs_dma->dma_device.dev, CCW_BLOCK_SIZE,
386 mxs_chan->ccw, mxs_chan->ccw_phys); 386 mxs_chan->ccw, mxs_chan->ccw_phys);
387 387
388 clk_disable_unprepare(mxs_dma->clk); 388 clk_disable_unprepare(mxs_dma->clk);
389 } 389 }
390 390
391 /* 391 /*
392 * How to use the flags for ->device_prep_slave_sg() : 392 * How to use the flags for ->device_prep_slave_sg() :
393 * [1] If there is only one DMA command in the DMA chain, the code should be: 393 * [1] If there is only one DMA command in the DMA chain, the code should be:
394 * ...... 394 * ......
395 * ->device_prep_slave_sg(DMA_CTRL_ACK); 395 * ->device_prep_slave_sg(DMA_CTRL_ACK);
396 * ...... 396 * ......
397 * [2] If there are two DMA commands in the DMA chain, the code should be 397 * [2] If there are two DMA commands in the DMA chain, the code should be
398 * ...... 398 * ......
399 * ->device_prep_slave_sg(0); 399 * ->device_prep_slave_sg(0);
400 * ...... 400 * ......
401 * ->device_prep_slave_sg(DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 401 * ->device_prep_slave_sg(DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
402 * ...... 402 * ......
403 * [3] If there are more than two DMA commands in the DMA chain, the code 403 * [3] If there are more than two DMA commands in the DMA chain, the code
404 * should be: 404 * should be:
405 * ...... 405 * ......
406 * ->device_prep_slave_sg(0); // First 406 * ->device_prep_slave_sg(0); // First
407 * ...... 407 * ......
408 * ->device_prep_slave_sg(DMA_PREP_INTERRUPT [| DMA_CTRL_ACK]); 408 * ->device_prep_slave_sg(DMA_PREP_INTERRUPT [| DMA_CTRL_ACK]);
409 * ...... 409 * ......
410 * ->device_prep_slave_sg(DMA_PREP_INTERRUPT | DMA_CTRL_ACK); // Last 410 * ->device_prep_slave_sg(DMA_PREP_INTERRUPT | DMA_CTRL_ACK); // Last
411 * ...... 411 * ......
412 */ 412 */
413 static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg( 413 static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
414 struct dma_chan *chan, struct scatterlist *sgl, 414 struct dma_chan *chan, struct scatterlist *sgl,
415 unsigned int sg_len, enum dma_transfer_direction direction, 415 unsigned int sg_len, enum dma_transfer_direction direction,
416 unsigned long flags, void *context) 416 unsigned long flags, void *context)
417 { 417 {
418 struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); 418 struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
419 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; 419 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
420 struct mxs_dma_ccw *ccw; 420 struct mxs_dma_ccw *ccw;
421 struct scatterlist *sg; 421 struct scatterlist *sg;
422 u32 i, j; 422 u32 i, j;
423 u32 *pio; 423 u32 *pio;
424 bool append = flags & DMA_PREP_INTERRUPT; 424 bool append = flags & DMA_PREP_INTERRUPT;
425 int idx = append ? mxs_chan->desc_count : 0; 425 int idx = append ? mxs_chan->desc_count : 0;
426 426
427 if (mxs_chan->status == DMA_IN_PROGRESS && !append) 427 if (mxs_chan->status == DMA_IN_PROGRESS && !append)
428 return NULL; 428 return NULL;
429 429
430 if (sg_len + (append ? idx : 0) > NUM_CCW) { 430 if (sg_len + (append ? idx : 0) > NUM_CCW) {
431 dev_err(mxs_dma->dma_device.dev, 431 dev_err(mxs_dma->dma_device.dev,
432 "maximum number of sg exceeded: %d > %d\n", 432 "maximum number of sg exceeded: %d > %d\n",
433 sg_len, NUM_CCW); 433 sg_len, NUM_CCW);
434 goto err_out; 434 goto err_out;
435 } 435 }
436 436
437 mxs_chan->status = DMA_IN_PROGRESS; 437 mxs_chan->status = DMA_IN_PROGRESS;
438 mxs_chan->flags = 0; 438 mxs_chan->flags = 0;
439 439
440 /* 440 /*
441 * If the sg is prepared with append flag set, the sg 441 * If the sg is prepared with append flag set, the sg
442 * will be appended to the last prepared sg. 442 * will be appended to the last prepared sg.
443 */ 443 */
444 if (append) { 444 if (append) {
445 BUG_ON(idx < 1); 445 BUG_ON(idx < 1);
446 ccw = &mxs_chan->ccw[idx - 1]; 446 ccw = &mxs_chan->ccw[idx - 1];
447 ccw->next = mxs_chan->ccw_phys + sizeof(*ccw) * idx; 447 ccw->next = mxs_chan->ccw_phys + sizeof(*ccw) * idx;
448 ccw->bits |= CCW_CHAIN; 448 ccw->bits |= CCW_CHAIN;
449 ccw->bits &= ~CCW_IRQ; 449 ccw->bits &= ~CCW_IRQ;
450 ccw->bits &= ~CCW_DEC_SEM; 450 ccw->bits &= ~CCW_DEC_SEM;
451 } else { 451 } else {
452 idx = 0; 452 idx = 0;
453 } 453 }
454 454
455 if (direction == DMA_TRANS_NONE) { 455 if (direction == DMA_TRANS_NONE) {
456 ccw = &mxs_chan->ccw[idx++]; 456 ccw = &mxs_chan->ccw[idx++];
457 pio = (u32 *) sgl; 457 pio = (u32 *) sgl;
458 458
459 for (j = 0; j < sg_len;) 459 for (j = 0; j < sg_len;)
460 ccw->pio_words[j++] = *pio++; 460 ccw->pio_words[j++] = *pio++;
461 461
462 ccw->bits = 0; 462 ccw->bits = 0;
463 ccw->bits |= CCW_IRQ; 463 ccw->bits |= CCW_IRQ;
464 ccw->bits |= CCW_DEC_SEM; 464 ccw->bits |= CCW_DEC_SEM;
465 if (flags & DMA_CTRL_ACK) 465 if (flags & DMA_CTRL_ACK)
466 ccw->bits |= CCW_WAIT4END; 466 ccw->bits |= CCW_WAIT4END;
467 ccw->bits |= CCW_HALT_ON_TERM; 467 ccw->bits |= CCW_HALT_ON_TERM;
468 ccw->bits |= CCW_TERM_FLUSH; 468 ccw->bits |= CCW_TERM_FLUSH;
469 ccw->bits |= BF_CCW(sg_len, PIO_NUM); 469 ccw->bits |= BF_CCW(sg_len, PIO_NUM);
470 ccw->bits |= BF_CCW(MXS_DMA_CMD_NO_XFER, COMMAND); 470 ccw->bits |= BF_CCW(MXS_DMA_CMD_NO_XFER, COMMAND);
471 } else { 471 } else {
472 for_each_sg(sgl, sg, sg_len, i) { 472 for_each_sg(sgl, sg, sg_len, i) {
473 if (sg_dma_len(sg) > MAX_XFER_BYTES) { 473 if (sg_dma_len(sg) > MAX_XFER_BYTES) {
474 dev_err(mxs_dma->dma_device.dev, "maximum bytes for sg entry exceeded: %d > %d\n", 474 dev_err(mxs_dma->dma_device.dev, "maximum bytes for sg entry exceeded: %d > %d\n",
475 sg_dma_len(sg), MAX_XFER_BYTES); 475 sg_dma_len(sg), MAX_XFER_BYTES);
476 goto err_out; 476 goto err_out;
477 } 477 }
478 478
479 ccw = &mxs_chan->ccw[idx++]; 479 ccw = &mxs_chan->ccw[idx++];
480 480
481 ccw->next = mxs_chan->ccw_phys + sizeof(*ccw) * idx; 481 ccw->next = mxs_chan->ccw_phys + sizeof(*ccw) * idx;
482 ccw->bufaddr = sg->dma_address; 482 ccw->bufaddr = sg->dma_address;
483 ccw->xfer_bytes = sg_dma_len(sg); 483 ccw->xfer_bytes = sg_dma_len(sg);
484 484
485 ccw->bits = 0; 485 ccw->bits = 0;
486 ccw->bits |= CCW_CHAIN; 486 ccw->bits |= CCW_CHAIN;
487 ccw->bits |= CCW_HALT_ON_TERM; 487 ccw->bits |= CCW_HALT_ON_TERM;
488 ccw->bits |= CCW_TERM_FLUSH; 488 ccw->bits |= CCW_TERM_FLUSH;
489 ccw->bits |= BF_CCW(direction == DMA_DEV_TO_MEM ? 489 ccw->bits |= BF_CCW(direction == DMA_DEV_TO_MEM ?
490 MXS_DMA_CMD_WRITE : MXS_DMA_CMD_READ, 490 MXS_DMA_CMD_WRITE : MXS_DMA_CMD_READ,
491 COMMAND); 491 COMMAND);
492 492
493 if (i + 1 == sg_len) { 493 if (i + 1 == sg_len) {
494 ccw->bits &= ~CCW_CHAIN; 494 ccw->bits &= ~CCW_CHAIN;
495 ccw->bits |= CCW_IRQ; 495 ccw->bits |= CCW_IRQ;
496 ccw->bits |= CCW_DEC_SEM; 496 ccw->bits |= CCW_DEC_SEM;
497 if (flags & DMA_CTRL_ACK) 497 if (flags & DMA_CTRL_ACK)
498 ccw->bits |= CCW_WAIT4END; 498 ccw->bits |= CCW_WAIT4END;
499 } 499 }
500 } 500 }
501 } 501 }
502 mxs_chan->desc_count = idx; 502 mxs_chan->desc_count = idx;
503 503
504 return &mxs_chan->desc; 504 return &mxs_chan->desc;
505 505
506 err_out: 506 err_out:
507 mxs_chan->status = DMA_ERROR; 507 mxs_chan->status = DMA_ERROR;
508 return NULL; 508 return NULL;
509 } 509 }
510 510
511 static struct dma_async_tx_descriptor *mxs_dma_prep_dma_cyclic( 511 static struct dma_async_tx_descriptor *mxs_dma_prep_dma_cyclic(
512 struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len, 512 struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
513 size_t period_len, enum dma_transfer_direction direction, 513 size_t period_len, enum dma_transfer_direction direction,
514 unsigned long flags, void *context) 514 unsigned long flags, void *context)
515 { 515 {
516 struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); 516 struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
517 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; 517 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
518 u32 num_periods = buf_len / period_len; 518 u32 num_periods = buf_len / period_len;
519 u32 i = 0, buf = 0; 519 u32 i = 0, buf = 0;
520 520
521 if (mxs_chan->status == DMA_IN_PROGRESS) 521 if (mxs_chan->status == DMA_IN_PROGRESS)
522 return NULL; 522 return NULL;
523 523
524 mxs_chan->status = DMA_IN_PROGRESS; 524 mxs_chan->status = DMA_IN_PROGRESS;
525 mxs_chan->flags |= MXS_DMA_SG_LOOP; 525 mxs_chan->flags |= MXS_DMA_SG_LOOP;
526 526
527 if (num_periods > NUM_CCW) { 527 if (num_periods > NUM_CCW) {
528 dev_err(mxs_dma->dma_device.dev, 528 dev_err(mxs_dma->dma_device.dev,
529 "maximum number of sg exceeded: %d > %d\n", 529 "maximum number of sg exceeded: %d > %d\n",
530 num_periods, NUM_CCW); 530 num_periods, NUM_CCW);
531 goto err_out; 531 goto err_out;
532 } 532 }
533 533
534 if (period_len > MAX_XFER_BYTES) { 534 if (period_len > MAX_XFER_BYTES) {
535 dev_err(mxs_dma->dma_device.dev, 535 dev_err(mxs_dma->dma_device.dev,
536 "maximum period size exceeded: %d > %d\n", 536 "maximum period size exceeded: %d > %d\n",
537 period_len, MAX_XFER_BYTES); 537 period_len, MAX_XFER_BYTES);
538 goto err_out; 538 goto err_out;
539 } 539 }
540 540
541 while (buf < buf_len) { 541 while (buf < buf_len) {
542 struct mxs_dma_ccw *ccw = &mxs_chan->ccw[i]; 542 struct mxs_dma_ccw *ccw = &mxs_chan->ccw[i];
543 543
544 if (i + 1 == num_periods) 544 if (i + 1 == num_periods)
545 ccw->next = mxs_chan->ccw_phys; 545 ccw->next = mxs_chan->ccw_phys;
546 else 546 else
547 ccw->next = mxs_chan->ccw_phys + sizeof(*ccw) * (i + 1); 547 ccw->next = mxs_chan->ccw_phys + sizeof(*ccw) * (i + 1);
548 548
549 ccw->bufaddr = dma_addr; 549 ccw->bufaddr = dma_addr;
550 ccw->xfer_bytes = period_len; 550 ccw->xfer_bytes = period_len;
551 551
552 ccw->bits = 0; 552 ccw->bits = 0;
553 ccw->bits |= CCW_CHAIN; 553 ccw->bits |= CCW_CHAIN;
554 ccw->bits |= CCW_IRQ; 554 ccw->bits |= CCW_IRQ;
555 ccw->bits |= CCW_HALT_ON_TERM; 555 ccw->bits |= CCW_HALT_ON_TERM;
556 ccw->bits |= CCW_TERM_FLUSH; 556 ccw->bits |= CCW_TERM_FLUSH;
557 ccw->bits |= BF_CCW(direction == DMA_DEV_TO_MEM ? 557 ccw->bits |= BF_CCW(direction == DMA_DEV_TO_MEM ?
558 MXS_DMA_CMD_WRITE : MXS_DMA_CMD_READ, COMMAND); 558 MXS_DMA_CMD_WRITE : MXS_DMA_CMD_READ, COMMAND);
559 559
560 dma_addr += period_len; 560 dma_addr += period_len;
561 buf += period_len; 561 buf += period_len;
562 562
563 i++; 563 i++;
564 } 564 }
565 mxs_chan->desc_count = i; 565 mxs_chan->desc_count = i;
566 566
567 return &mxs_chan->desc; 567 return &mxs_chan->desc;
568 568
569 err_out: 569 err_out:
570 mxs_chan->status = DMA_ERROR; 570 mxs_chan->status = DMA_ERROR;
571 return NULL; 571 return NULL;
572 } 572 }
573 573
574 static int mxs_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 574 static int mxs_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
575 unsigned long arg) 575 unsigned long arg)
576 { 576 {
577 struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); 577 struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
578 int ret = 0; 578 int ret = 0;
579 579
580 switch (cmd) { 580 switch (cmd) {
581 case DMA_TERMINATE_ALL: 581 case DMA_TERMINATE_ALL:
582 mxs_dma_reset_chan(mxs_chan); 582 mxs_dma_reset_chan(mxs_chan);
583 mxs_dma_disable_chan(mxs_chan); 583 mxs_dma_disable_chan(mxs_chan);
584 break; 584 break;
585 case DMA_PAUSE: 585 case DMA_PAUSE:
586 mxs_dma_pause_chan(mxs_chan); 586 mxs_dma_pause_chan(mxs_chan);
587 break; 587 break;
588 case DMA_RESUME: 588 case DMA_RESUME:
589 mxs_dma_resume_chan(mxs_chan); 589 mxs_dma_resume_chan(mxs_chan);
590 break; 590 break;
591 default: 591 default:
592 ret = -ENOSYS; 592 ret = -ENOSYS;
593 } 593 }
594 594
595 return ret; 595 return ret;
596 } 596 }
597 597
598 static enum dma_status mxs_dma_tx_status(struct dma_chan *chan, 598 static enum dma_status mxs_dma_tx_status(struct dma_chan *chan,
599 dma_cookie_t cookie, struct dma_tx_state *txstate) 599 dma_cookie_t cookie, struct dma_tx_state *txstate)
600 { 600 {
601 struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); 601 struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
602 602
603 dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie, 0); 603 dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie, 0);
604 604
605 return mxs_chan->status; 605 return mxs_chan->status;
606 } 606 }
607 607
608 static void mxs_dma_issue_pending(struct dma_chan *chan) 608 static void mxs_dma_issue_pending(struct dma_chan *chan)
609 { 609 {
610 struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); 610 struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
611 611
612 mxs_dma_enable_chan(mxs_chan); 612 mxs_dma_enable_chan(mxs_chan);
613 } 613 }
614 614
615 static int __init mxs_dma_init(struct mxs_dma_engine *mxs_dma) 615 static int __init mxs_dma_init(struct mxs_dma_engine *mxs_dma)
616 { 616 {
617 int ret; 617 int ret;
618 618
619 ret = clk_prepare_enable(mxs_dma->clk); 619 ret = clk_prepare_enable(mxs_dma->clk);
620 if (ret) 620 if (ret)
621 return ret; 621 return ret;
622 622
623 ret = stmp_reset_block(mxs_dma->base); 623 ret = stmp_reset_block(mxs_dma->base);
624 if (ret) 624 if (ret)
625 goto err_out; 625 goto err_out;
626 626
627 /* enable apbh burst */ 627 /* enable apbh burst */
628 if (dma_is_apbh(mxs_dma)) { 628 if (dma_is_apbh(mxs_dma)) {
629 writel(BM_APBH_CTRL0_APB_BURST_EN, 629 writel(BM_APBH_CTRL0_APB_BURST_EN,
630 mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_SET); 630 mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_SET);
631 writel(BM_APBH_CTRL0_APB_BURST8_EN, 631 writel(BM_APBH_CTRL0_APB_BURST8_EN,
632 mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_SET); 632 mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_SET);
633 } 633 }
634 634
635 /* enable irq for all the channels */ 635 /* enable irq for all the channels */
636 writel(MXS_DMA_CHANNELS_MASK << MXS_DMA_CHANNELS, 636 writel(MXS_DMA_CHANNELS_MASK << MXS_DMA_CHANNELS,
637 mxs_dma->base + HW_APBHX_CTRL1 + STMP_OFFSET_REG_SET); 637 mxs_dma->base + HW_APBHX_CTRL1 + STMP_OFFSET_REG_SET);
638 638
639 err_out: 639 err_out:
640 clk_disable_unprepare(mxs_dma->clk); 640 clk_disable_unprepare(mxs_dma->clk);
641 return ret; 641 return ret;
642 } 642 }
643 643
644 struct mxs_dma_filter_param { 644 struct mxs_dma_filter_param {
645 struct device_node *of_node; 645 struct device_node *of_node;
646 unsigned int chan_id; 646 unsigned int chan_id;
647 }; 647 };
648 648
649 static bool mxs_dma_filter_fn(struct dma_chan *chan, void *fn_param) 649 static bool mxs_dma_filter_fn(struct dma_chan *chan, void *fn_param)
650 { 650 {
651 struct mxs_dma_filter_param *param = fn_param; 651 struct mxs_dma_filter_param *param = fn_param;
652 struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); 652 struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
653 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; 653 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
654 int chan_irq; 654 int chan_irq;
655 655
656 if (mxs_dma->dma_device.dev->of_node != param->of_node) 656 if (mxs_dma->dma_device.dev->of_node != param->of_node)
657 return false; 657 return false;
658 658
659 if (chan->chan_id != param->chan_id) 659 if (chan->chan_id != param->chan_id)
660 return false; 660 return false;
661 661
662 chan_irq = platform_get_irq(mxs_dma->pdev, param->chan_id); 662 chan_irq = platform_get_irq(mxs_dma->pdev, param->chan_id);
663 if (chan_irq < 0) 663 if (chan_irq < 0)
664 return false; 664 return false;
665 665
666 mxs_chan->chan_irq = chan_irq; 666 mxs_chan->chan_irq = chan_irq;
667 667
668 return true; 668 return true;
669 } 669 }
670 670
671 static struct dma_chan *mxs_dma_xlate(struct of_phandle_args *dma_spec, 671 static struct dma_chan *mxs_dma_xlate(struct of_phandle_args *dma_spec,
672 struct of_dma *ofdma) 672 struct of_dma *ofdma)
673 { 673 {
674 struct mxs_dma_engine *mxs_dma = ofdma->of_dma_data; 674 struct mxs_dma_engine *mxs_dma = ofdma->of_dma_data;
675 dma_cap_mask_t mask = mxs_dma->dma_device.cap_mask; 675 dma_cap_mask_t mask = mxs_dma->dma_device.cap_mask;
676 struct mxs_dma_filter_param param; 676 struct mxs_dma_filter_param param;
677 677
678 if (dma_spec->args_count != 1) 678 if (dma_spec->args_count != 1)
679 return NULL; 679 return NULL;
680 680
681 param.of_node = ofdma->of_node; 681 param.of_node = ofdma->of_node;
682 param.chan_id = dma_spec->args[0]; 682 param.chan_id = dma_spec->args[0];
683 683
684 if (param.chan_id >= mxs_dma->nr_channels) 684 if (param.chan_id >= mxs_dma->nr_channels)
685 return NULL; 685 return NULL;
686 686
687 return dma_request_channel(mask, mxs_dma_filter_fn, &param); 687 return dma_request_channel(mask, mxs_dma_filter_fn, &param);
688 } 688 }
689 689
690 static int __init mxs_dma_probe(struct platform_device *pdev) 690 static int __init mxs_dma_probe(struct platform_device *pdev)
691 { 691 {
692 struct device_node *np = pdev->dev.of_node; 692 struct device_node *np = pdev->dev.of_node;
693 const struct platform_device_id *id_entry; 693 const struct platform_device_id *id_entry;
694 const struct of_device_id *of_id; 694 const struct of_device_id *of_id;
695 const struct mxs_dma_type *dma_type; 695 const struct mxs_dma_type *dma_type;
696 struct mxs_dma_engine *mxs_dma; 696 struct mxs_dma_engine *mxs_dma;
697 struct resource *iores; 697 struct resource *iores;
698 int ret, i; 698 int ret, i;
699 699
700 mxs_dma = devm_kzalloc(&pdev->dev, sizeof(*mxs_dma), GFP_KERNEL); 700 mxs_dma = devm_kzalloc(&pdev->dev, sizeof(*mxs_dma), GFP_KERNEL);
701 if (!mxs_dma) 701 if (!mxs_dma)
702 return -ENOMEM; 702 return -ENOMEM;
703 703
704 ret = of_property_read_u32(np, "dma-channels", &mxs_dma->nr_channels); 704 ret = of_property_read_u32(np, "dma-channels", &mxs_dma->nr_channels);
705 if (ret) { 705 if (ret) {
706 dev_err(&pdev->dev, "failed to read dma-channels\n"); 706 dev_err(&pdev->dev, "failed to read dma-channels\n");
707 return ret; 707 return ret;
708 } 708 }
709 709
710 of_id = of_match_device(mxs_dma_dt_ids, &pdev->dev); 710 of_id = of_match_device(mxs_dma_dt_ids, &pdev->dev);
711 if (of_id) 711 if (of_id)
712 id_entry = of_id->data; 712 id_entry = of_id->data;
713 else 713 else
714 id_entry = platform_get_device_id(pdev); 714 id_entry = platform_get_device_id(pdev);
715 715
716 dma_type = (struct mxs_dma_type *)id_entry->driver_data; 716 dma_type = (struct mxs_dma_type *)id_entry->driver_data;
717 mxs_dma->type = dma_type->type; 717 mxs_dma->type = dma_type->type;
718 mxs_dma->dev_id = dma_type->id; 718 mxs_dma->dev_id = dma_type->id;
719 719
720 iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); 720 iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
721 mxs_dma->base = devm_ioremap_resource(&pdev->dev, iores); 721 mxs_dma->base = devm_ioremap_resource(&pdev->dev, iores);
722 if (IS_ERR(mxs_dma->base)) 722 if (IS_ERR(mxs_dma->base))
723 return PTR_ERR(mxs_dma->base); 723 return PTR_ERR(mxs_dma->base);
724 724
725 mxs_dma->clk = devm_clk_get(&pdev->dev, NULL); 725 mxs_dma->clk = devm_clk_get(&pdev->dev, NULL);
726 if (IS_ERR(mxs_dma->clk)) 726 if (IS_ERR(mxs_dma->clk))
727 return PTR_ERR(mxs_dma->clk); 727 return PTR_ERR(mxs_dma->clk);
728 728
729 dma_cap_set(DMA_SLAVE, mxs_dma->dma_device.cap_mask); 729 dma_cap_set(DMA_SLAVE, mxs_dma->dma_device.cap_mask);
730 dma_cap_set(DMA_CYCLIC, mxs_dma->dma_device.cap_mask); 730 dma_cap_set(DMA_CYCLIC, mxs_dma->dma_device.cap_mask);
731 731
732 INIT_LIST_HEAD(&mxs_dma->dma_device.channels); 732 INIT_LIST_HEAD(&mxs_dma->dma_device.channels);
733 733
734 /* Initialize channel parameters */ 734 /* Initialize channel parameters */
735 for (i = 0; i < MXS_DMA_CHANNELS; i++) { 735 for (i = 0; i < MXS_DMA_CHANNELS; i++) {
736 struct mxs_dma_chan *mxs_chan = &mxs_dma->mxs_chans[i]; 736 struct mxs_dma_chan *mxs_chan = &mxs_dma->mxs_chans[i];
737 737
738 mxs_chan->mxs_dma = mxs_dma; 738 mxs_chan->mxs_dma = mxs_dma;
739 mxs_chan->chan.device = &mxs_dma->dma_device; 739 mxs_chan->chan.device = &mxs_dma->dma_device;
740 dma_cookie_init(&mxs_chan->chan); 740 dma_cookie_init(&mxs_chan->chan);
741 741
742 tasklet_init(&mxs_chan->tasklet, mxs_dma_tasklet, 742 tasklet_init(&mxs_chan->tasklet, mxs_dma_tasklet,
743 (unsigned long) mxs_chan); 743 (unsigned long) mxs_chan);
744 744
745 745
746 /* Add the channel to mxs_chan list */ 746 /* Add the channel to mxs_chan list */
747 list_add_tail(&mxs_chan->chan.device_node, 747 list_add_tail(&mxs_chan->chan.device_node,
748 &mxs_dma->dma_device.channels); 748 &mxs_dma->dma_device.channels);
749 } 749 }
750 750
751 ret = mxs_dma_init(mxs_dma); 751 ret = mxs_dma_init(mxs_dma);
752 if (ret) 752 if (ret)
753 return ret; 753 return ret;
754 754
755 mxs_dma->pdev = pdev; 755 mxs_dma->pdev = pdev;
756 mxs_dma->dma_device.dev = &pdev->dev; 756 mxs_dma->dma_device.dev = &pdev->dev;
757 757
758 /* mxs_dma gets 65535 bytes maximum sg size */ 758 /* mxs_dma gets 65535 bytes maximum sg size */
759 mxs_dma->dma_device.dev->dma_parms = &mxs_dma->dma_parms; 759 mxs_dma->dma_device.dev->dma_parms = &mxs_dma->dma_parms;
760 dma_set_max_seg_size(mxs_dma->dma_device.dev, MAX_XFER_BYTES); 760 dma_set_max_seg_size(mxs_dma->dma_device.dev, MAX_XFER_BYTES);
761 761
762 mxs_dma->dma_device.device_alloc_chan_resources = mxs_dma_alloc_chan_resources; 762 mxs_dma->dma_device.device_alloc_chan_resources = mxs_dma_alloc_chan_resources;
763 mxs_dma->dma_device.device_free_chan_resources = mxs_dma_free_chan_resources; 763 mxs_dma->dma_device.device_free_chan_resources = mxs_dma_free_chan_resources;
764 mxs_dma->dma_device.device_tx_status = mxs_dma_tx_status; 764 mxs_dma->dma_device.device_tx_status = mxs_dma_tx_status;
765 mxs_dma->dma_device.device_prep_slave_sg = mxs_dma_prep_slave_sg; 765 mxs_dma->dma_device.device_prep_slave_sg = mxs_dma_prep_slave_sg;
766 mxs_dma->dma_device.device_prep_dma_cyclic = mxs_dma_prep_dma_cyclic; 766 mxs_dma->dma_device.device_prep_dma_cyclic = mxs_dma_prep_dma_cyclic;
767 mxs_dma->dma_device.device_control = mxs_dma_control; 767 mxs_dma->dma_device.device_control = mxs_dma_control;
768 mxs_dma->dma_device.device_issue_pending = mxs_dma_issue_pending; 768 mxs_dma->dma_device.device_issue_pending = mxs_dma_issue_pending;
769 769
770 ret = dma_async_device_register(&mxs_dma->dma_device); 770 ret = dma_async_device_register(&mxs_dma->dma_device);
771 if (ret) { 771 if (ret) {
772 dev_err(mxs_dma->dma_device.dev, "unable to register\n"); 772 dev_err(mxs_dma->dma_device.dev, "unable to register\n");
773 return ret; 773 return ret;
774 } 774 }
775 775
776 ret = of_dma_controller_register(np, mxs_dma_xlate, mxs_dma); 776 ret = of_dma_controller_register(np, mxs_dma_xlate, mxs_dma);
777 if (ret) { 777 if (ret) {
778 dev_err(mxs_dma->dma_device.dev, 778 dev_err(mxs_dma->dma_device.dev,
779 "failed to register controller\n"); 779 "failed to register controller\n");
780 dma_async_device_unregister(&mxs_dma->dma_device); 780 dma_async_device_unregister(&mxs_dma->dma_device);
781 } 781 }
782 782
783 dev_info(mxs_dma->dma_device.dev, "initialized\n"); 783 dev_info(mxs_dma->dma_device.dev, "initialized\n");
784 784
785 return 0; 785 return 0;
786 } 786 }
787 787
788 static struct platform_driver mxs_dma_driver = { 788 static struct platform_driver mxs_dma_driver = {
789 .driver = { 789 .driver = {
790 .name = "mxs-dma", 790 .name = "mxs-dma",
791 .of_match_table = mxs_dma_dt_ids, 791 .of_match_table = mxs_dma_dt_ids,
792 }, 792 },
793 .id_table = mxs_dma_ids, 793 .id_table = mxs_dma_ids,
794 }; 794 };
795 795
796 static int __init mxs_dma_module_init(void) 796 static int __init mxs_dma_module_init(void)
797 { 797 {
798 return platform_driver_probe(&mxs_dma_driver, mxs_dma_probe); 798 return platform_driver_probe(&mxs_dma_driver, mxs_dma_probe);
799 } 799 }
800 subsys_initcall(mxs_dma_module_init); 800 subsys_initcall(mxs_dma_module_init);
801 801
drivers/dma/omap-dma.c
1 /* 1 /*
2 * OMAP DMAengine support 2 * OMAP DMAengine support
3 * 3 *
4 * This program is free software; you can redistribute it and/or modify 4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as 5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation. 6 * published by the Free Software Foundation.
7 */ 7 */
8 #include <linux/dmaengine.h> 8 #include <linux/dmaengine.h>
9 #include <linux/dma-mapping.h> 9 #include <linux/dma-mapping.h>
10 #include <linux/err.h> 10 #include <linux/err.h>
11 #include <linux/init.h> 11 #include <linux/init.h>
12 #include <linux/interrupt.h> 12 #include <linux/interrupt.h>
13 #include <linux/list.h> 13 #include <linux/list.h>
14 #include <linux/module.h> 14 #include <linux/module.h>
15 #include <linux/omap-dma.h> 15 #include <linux/omap-dma.h>
16 #include <linux/platform_device.h> 16 #include <linux/platform_device.h>
17 #include <linux/slab.h> 17 #include <linux/slab.h>
18 #include <linux/spinlock.h> 18 #include <linux/spinlock.h>
19 #include <linux/of_dma.h> 19 #include <linux/of_dma.h>
20 #include <linux/of_device.h> 20 #include <linux/of_device.h>
21 21
22 #include "virt-dma.h" 22 #include "virt-dma.h"
23 23
24 struct omap_dmadev { 24 struct omap_dmadev {
25 struct dma_device ddev; 25 struct dma_device ddev;
26 spinlock_t lock; 26 spinlock_t lock;
27 struct tasklet_struct task; 27 struct tasklet_struct task;
28 struct list_head pending; 28 struct list_head pending;
29 }; 29 };
30 30
31 struct omap_chan { 31 struct omap_chan {
32 struct virt_dma_chan vc; 32 struct virt_dma_chan vc;
33 struct list_head node; 33 struct list_head node;
34 34
35 struct dma_slave_config cfg; 35 struct dma_slave_config cfg;
36 unsigned dma_sig; 36 unsigned dma_sig;
37 bool cyclic; 37 bool cyclic;
38 bool paused; 38 bool paused;
39 39
40 int dma_ch; 40 int dma_ch;
41 struct omap_desc *desc; 41 struct omap_desc *desc;
42 unsigned sgidx; 42 unsigned sgidx;
43 }; 43 };
44 44
45 struct omap_sg { 45 struct omap_sg {
46 dma_addr_t addr; 46 dma_addr_t addr;
47 uint32_t en; /* number of elements (24-bit) */ 47 uint32_t en; /* number of elements (24-bit) */
48 uint32_t fn; /* number of frames (16-bit) */ 48 uint32_t fn; /* number of frames (16-bit) */
49 }; 49 };
50 50
51 struct omap_desc { 51 struct omap_desc {
52 struct virt_dma_desc vd; 52 struct virt_dma_desc vd;
53 enum dma_transfer_direction dir; 53 enum dma_transfer_direction dir;
54 dma_addr_t dev_addr; 54 dma_addr_t dev_addr;
55 55
56 int16_t fi; /* for OMAP_DMA_SYNC_PACKET */ 56 int16_t fi; /* for OMAP_DMA_SYNC_PACKET */
57 uint8_t es; /* OMAP_DMA_DATA_TYPE_xxx */ 57 uint8_t es; /* OMAP_DMA_DATA_TYPE_xxx */
58 uint8_t sync_mode; /* OMAP_DMA_SYNC_xxx */ 58 uint8_t sync_mode; /* OMAP_DMA_SYNC_xxx */
59 uint8_t sync_type; /* OMAP_DMA_xxx_SYNC* */ 59 uint8_t sync_type; /* OMAP_DMA_xxx_SYNC* */
60 uint8_t periph_port; /* Peripheral port */ 60 uint8_t periph_port; /* Peripheral port */
61 61
62 unsigned sglen; 62 unsigned sglen;
63 struct omap_sg sg[0]; 63 struct omap_sg sg[0];
64 }; 64 };
65 65
66 static const unsigned es_bytes[] = { 66 static const unsigned es_bytes[] = {
67 [OMAP_DMA_DATA_TYPE_S8] = 1, 67 [OMAP_DMA_DATA_TYPE_S8] = 1,
68 [OMAP_DMA_DATA_TYPE_S16] = 2, 68 [OMAP_DMA_DATA_TYPE_S16] = 2,
69 [OMAP_DMA_DATA_TYPE_S32] = 4, 69 [OMAP_DMA_DATA_TYPE_S32] = 4,
70 }; 70 };
71 71
72 static struct of_dma_filter_info omap_dma_info = { 72 static struct of_dma_filter_info omap_dma_info = {
73 .filter_fn = omap_dma_filter_fn, 73 .filter_fn = omap_dma_filter_fn,
74 }; 74 };
75 75
76 static inline struct omap_dmadev *to_omap_dma_dev(struct dma_device *d) 76 static inline struct omap_dmadev *to_omap_dma_dev(struct dma_device *d)
77 { 77 {
78 return container_of(d, struct omap_dmadev, ddev); 78 return container_of(d, struct omap_dmadev, ddev);
79 } 79 }
80 80
81 static inline struct omap_chan *to_omap_dma_chan(struct dma_chan *c) 81 static inline struct omap_chan *to_omap_dma_chan(struct dma_chan *c)
82 { 82 {
83 return container_of(c, struct omap_chan, vc.chan); 83 return container_of(c, struct omap_chan, vc.chan);
84 } 84 }
85 85
86 static inline struct omap_desc *to_omap_dma_desc(struct dma_async_tx_descriptor *t) 86 static inline struct omap_desc *to_omap_dma_desc(struct dma_async_tx_descriptor *t)
87 { 87 {
88 return container_of(t, struct omap_desc, vd.tx); 88 return container_of(t, struct omap_desc, vd.tx);
89 } 89 }
90 90
91 static void omap_dma_desc_free(struct virt_dma_desc *vd) 91 static void omap_dma_desc_free(struct virt_dma_desc *vd)
92 { 92 {
93 kfree(container_of(vd, struct omap_desc, vd)); 93 kfree(container_of(vd, struct omap_desc, vd));
94 } 94 }
95 95
96 static void omap_dma_start_sg(struct omap_chan *c, struct omap_desc *d, 96 static void omap_dma_start_sg(struct omap_chan *c, struct omap_desc *d,
97 unsigned idx) 97 unsigned idx)
98 { 98 {
99 struct omap_sg *sg = d->sg + idx; 99 struct omap_sg *sg = d->sg + idx;
100 100
101 if (d->dir == DMA_DEV_TO_MEM) 101 if (d->dir == DMA_DEV_TO_MEM)
102 omap_set_dma_dest_params(c->dma_ch, OMAP_DMA_PORT_EMIFF, 102 omap_set_dma_dest_params(c->dma_ch, OMAP_DMA_PORT_EMIFF,
103 OMAP_DMA_AMODE_POST_INC, sg->addr, 0, 0); 103 OMAP_DMA_AMODE_POST_INC, sg->addr, 0, 0);
104 else 104 else
105 omap_set_dma_src_params(c->dma_ch, OMAP_DMA_PORT_EMIFF, 105 omap_set_dma_src_params(c->dma_ch, OMAP_DMA_PORT_EMIFF,
106 OMAP_DMA_AMODE_POST_INC, sg->addr, 0, 0); 106 OMAP_DMA_AMODE_POST_INC, sg->addr, 0, 0);
107 107
108 omap_set_dma_transfer_params(c->dma_ch, d->es, sg->en, sg->fn, 108 omap_set_dma_transfer_params(c->dma_ch, d->es, sg->en, sg->fn,
109 d->sync_mode, c->dma_sig, d->sync_type); 109 d->sync_mode, c->dma_sig, d->sync_type);
110 110
111 omap_start_dma(c->dma_ch); 111 omap_start_dma(c->dma_ch);
112 } 112 }
113 113
114 static void omap_dma_start_desc(struct omap_chan *c) 114 static void omap_dma_start_desc(struct omap_chan *c)
115 { 115 {
116 struct virt_dma_desc *vd = vchan_next_desc(&c->vc); 116 struct virt_dma_desc *vd = vchan_next_desc(&c->vc);
117 struct omap_desc *d; 117 struct omap_desc *d;
118 118
119 if (!vd) { 119 if (!vd) {
120 c->desc = NULL; 120 c->desc = NULL;
121 return; 121 return;
122 } 122 }
123 123
124 list_del(&vd->node); 124 list_del(&vd->node);
125 125
126 c->desc = d = to_omap_dma_desc(&vd->tx); 126 c->desc = d = to_omap_dma_desc(&vd->tx);
127 c->sgidx = 0; 127 c->sgidx = 0;
128 128
129 if (d->dir == DMA_DEV_TO_MEM) 129 if (d->dir == DMA_DEV_TO_MEM)
130 omap_set_dma_src_params(c->dma_ch, d->periph_port, 130 omap_set_dma_src_params(c->dma_ch, d->periph_port,
131 OMAP_DMA_AMODE_CONSTANT, d->dev_addr, 0, d->fi); 131 OMAP_DMA_AMODE_CONSTANT, d->dev_addr, 0, d->fi);
132 else 132 else
133 omap_set_dma_dest_params(c->dma_ch, d->periph_port, 133 omap_set_dma_dest_params(c->dma_ch, d->periph_port,
134 OMAP_DMA_AMODE_CONSTANT, d->dev_addr, 0, d->fi); 134 OMAP_DMA_AMODE_CONSTANT, d->dev_addr, 0, d->fi);
135 135
136 omap_dma_start_sg(c, d, 0); 136 omap_dma_start_sg(c, d, 0);
137 } 137 }
138 138
139 static void omap_dma_callback(int ch, u16 status, void *data) 139 static void omap_dma_callback(int ch, u16 status, void *data)
140 { 140 {
141 struct omap_chan *c = data; 141 struct omap_chan *c = data;
142 struct omap_desc *d; 142 struct omap_desc *d;
143 unsigned long flags; 143 unsigned long flags;
144 144
145 spin_lock_irqsave(&c->vc.lock, flags); 145 spin_lock_irqsave(&c->vc.lock, flags);
146 d = c->desc; 146 d = c->desc;
147 if (d) { 147 if (d) {
148 if (!c->cyclic) { 148 if (!c->cyclic) {
149 if (++c->sgidx < d->sglen) { 149 if (++c->sgidx < d->sglen) {
150 omap_dma_start_sg(c, d, c->sgidx); 150 omap_dma_start_sg(c, d, c->sgidx);
151 } else { 151 } else {
152 omap_dma_start_desc(c); 152 omap_dma_start_desc(c);
153 vchan_cookie_complete(&d->vd); 153 vchan_cookie_complete(&d->vd);
154 } 154 }
155 } else { 155 } else {
156 vchan_cyclic_callback(&d->vd); 156 vchan_cyclic_callback(&d->vd);
157 } 157 }
158 } 158 }
159 spin_unlock_irqrestore(&c->vc.lock, flags); 159 spin_unlock_irqrestore(&c->vc.lock, flags);
160 } 160 }
161 161
162 /* 162 /*
163 * This callback schedules all pending channels. We could be more 163 * This callback schedules all pending channels. We could be more
164 * clever here by postponing allocation of the real DMA channels to 164 * clever here by postponing allocation of the real DMA channels to
165 * this point, and freeing them when our virtual channel becomes idle. 165 * this point, and freeing them when our virtual channel becomes idle.
166 * 166 *
167 * We would then need to deal with 'all channels in-use' 167 * We would then need to deal with 'all channels in-use'
168 */ 168 */
169 static void omap_dma_sched(unsigned long data) 169 static void omap_dma_sched(unsigned long data)
170 { 170 {
171 struct omap_dmadev *d = (struct omap_dmadev *)data; 171 struct omap_dmadev *d = (struct omap_dmadev *)data;
172 LIST_HEAD(head); 172 LIST_HEAD(head);
173 173
174 spin_lock_irq(&d->lock); 174 spin_lock_irq(&d->lock);
175 list_splice_tail_init(&d->pending, &head); 175 list_splice_tail_init(&d->pending, &head);
176 spin_unlock_irq(&d->lock); 176 spin_unlock_irq(&d->lock);
177 177
178 while (!list_empty(&head)) { 178 while (!list_empty(&head)) {
179 struct omap_chan *c = list_first_entry(&head, 179 struct omap_chan *c = list_first_entry(&head,
180 struct omap_chan, node); 180 struct omap_chan, node);
181 181
182 spin_lock_irq(&c->vc.lock); 182 spin_lock_irq(&c->vc.lock);
183 list_del_init(&c->node); 183 list_del_init(&c->node);
184 omap_dma_start_desc(c); 184 omap_dma_start_desc(c);
185 spin_unlock_irq(&c->vc.lock); 185 spin_unlock_irq(&c->vc.lock);
186 } 186 }
187 } 187 }
188 188
189 static int omap_dma_alloc_chan_resources(struct dma_chan *chan) 189 static int omap_dma_alloc_chan_resources(struct dma_chan *chan)
190 { 190 {
191 struct omap_chan *c = to_omap_dma_chan(chan); 191 struct omap_chan *c = to_omap_dma_chan(chan);
192 192
193 dev_info(c->vc.chan.device->dev, "allocating channel for %u\n", c->dma_sig); 193 dev_info(c->vc.chan.device->dev, "allocating channel for %u\n", c->dma_sig);
194 194
195 return omap_request_dma(c->dma_sig, "DMA engine", 195 return omap_request_dma(c->dma_sig, "DMA engine",
196 omap_dma_callback, c, &c->dma_ch); 196 omap_dma_callback, c, &c->dma_ch);
197 } 197 }
198 198
199 static void omap_dma_free_chan_resources(struct dma_chan *chan) 199 static void omap_dma_free_chan_resources(struct dma_chan *chan)
200 { 200 {
201 struct omap_chan *c = to_omap_dma_chan(chan); 201 struct omap_chan *c = to_omap_dma_chan(chan);
202 202
203 vchan_free_chan_resources(&c->vc); 203 vchan_free_chan_resources(&c->vc);
204 omap_free_dma(c->dma_ch); 204 omap_free_dma(c->dma_ch);
205 205
206 dev_info(c->vc.chan.device->dev, "freeing channel for %u\n", c->dma_sig); 206 dev_info(c->vc.chan.device->dev, "freeing channel for %u\n", c->dma_sig);
207 } 207 }
208 208
209 static size_t omap_dma_sg_size(struct omap_sg *sg) 209 static size_t omap_dma_sg_size(struct omap_sg *sg)
210 { 210 {
211 return sg->en * sg->fn; 211 return sg->en * sg->fn;
212 } 212 }
213 213
214 static size_t omap_dma_desc_size(struct omap_desc *d) 214 static size_t omap_dma_desc_size(struct omap_desc *d)
215 { 215 {
216 unsigned i; 216 unsigned i;
217 size_t size; 217 size_t size;
218 218
219 for (size = i = 0; i < d->sglen; i++) 219 for (size = i = 0; i < d->sglen; i++)
220 size += omap_dma_sg_size(&d->sg[i]); 220 size += omap_dma_sg_size(&d->sg[i]);
221 221
222 return size * es_bytes[d->es]; 222 return size * es_bytes[d->es];
223 } 223 }
224 224
225 static size_t omap_dma_desc_size_pos(struct omap_desc *d, dma_addr_t addr) 225 static size_t omap_dma_desc_size_pos(struct omap_desc *d, dma_addr_t addr)
226 { 226 {
227 unsigned i; 227 unsigned i;
228 size_t size, es_size = es_bytes[d->es]; 228 size_t size, es_size = es_bytes[d->es];
229 229
230 for (size = i = 0; i < d->sglen; i++) { 230 for (size = i = 0; i < d->sglen; i++) {
231 size_t this_size = omap_dma_sg_size(&d->sg[i]) * es_size; 231 size_t this_size = omap_dma_sg_size(&d->sg[i]) * es_size;
232 232
233 if (size) 233 if (size)
234 size += this_size; 234 size += this_size;
235 else if (addr >= d->sg[i].addr && 235 else if (addr >= d->sg[i].addr &&
236 addr < d->sg[i].addr + this_size) 236 addr < d->sg[i].addr + this_size)
237 size += d->sg[i].addr + this_size - addr; 237 size += d->sg[i].addr + this_size - addr;
238 } 238 }
239 return size; 239 return size;
240 } 240 }
241 241
242 static enum dma_status omap_dma_tx_status(struct dma_chan *chan, 242 static enum dma_status omap_dma_tx_status(struct dma_chan *chan,
243 dma_cookie_t cookie, struct dma_tx_state *txstate) 243 dma_cookie_t cookie, struct dma_tx_state *txstate)
244 { 244 {
245 struct omap_chan *c = to_omap_dma_chan(chan); 245 struct omap_chan *c = to_omap_dma_chan(chan);
246 struct virt_dma_desc *vd; 246 struct virt_dma_desc *vd;
247 enum dma_status ret; 247 enum dma_status ret;
248 unsigned long flags; 248 unsigned long flags;
249 249
250 ret = dma_cookie_status(chan, cookie, txstate); 250 ret = dma_cookie_status(chan, cookie, txstate);
251 if (ret == DMA_SUCCESS || !txstate) 251 if (ret == DMA_COMPLETE || !txstate)
252 return ret; 252 return ret;
253 253
254 spin_lock_irqsave(&c->vc.lock, flags); 254 spin_lock_irqsave(&c->vc.lock, flags);
255 vd = vchan_find_desc(&c->vc, cookie); 255 vd = vchan_find_desc(&c->vc, cookie);
256 if (vd) { 256 if (vd) {
257 txstate->residue = omap_dma_desc_size(to_omap_dma_desc(&vd->tx)); 257 txstate->residue = omap_dma_desc_size(to_omap_dma_desc(&vd->tx));
258 } else if (c->desc && c->desc->vd.tx.cookie == cookie) { 258 } else if (c->desc && c->desc->vd.tx.cookie == cookie) {
259 struct omap_desc *d = c->desc; 259 struct omap_desc *d = c->desc;
260 dma_addr_t pos; 260 dma_addr_t pos;
261 261
262 if (d->dir == DMA_MEM_TO_DEV) 262 if (d->dir == DMA_MEM_TO_DEV)
263 pos = omap_get_dma_src_pos(c->dma_ch); 263 pos = omap_get_dma_src_pos(c->dma_ch);
264 else if (d->dir == DMA_DEV_TO_MEM) 264 else if (d->dir == DMA_DEV_TO_MEM)
265 pos = omap_get_dma_dst_pos(c->dma_ch); 265 pos = omap_get_dma_dst_pos(c->dma_ch);
266 else 266 else
267 pos = 0; 267 pos = 0;
268 268
269 txstate->residue = omap_dma_desc_size_pos(d, pos); 269 txstate->residue = omap_dma_desc_size_pos(d, pos);
270 } else { 270 } else {
271 txstate->residue = 0; 271 txstate->residue = 0;
272 } 272 }
273 spin_unlock_irqrestore(&c->vc.lock, flags); 273 spin_unlock_irqrestore(&c->vc.lock, flags);
274 274
275 return ret; 275 return ret;
276 } 276 }
277 277
278 static void omap_dma_issue_pending(struct dma_chan *chan) 278 static void omap_dma_issue_pending(struct dma_chan *chan)
279 { 279 {
280 struct omap_chan *c = to_omap_dma_chan(chan); 280 struct omap_chan *c = to_omap_dma_chan(chan);
281 unsigned long flags; 281 unsigned long flags;
282 282
283 spin_lock_irqsave(&c->vc.lock, flags); 283 spin_lock_irqsave(&c->vc.lock, flags);
284 if (vchan_issue_pending(&c->vc) && !c->desc) { 284 if (vchan_issue_pending(&c->vc) && !c->desc) {
285 /* 285 /*
286 * c->cyclic is used only by audio and in this case the DMA need 286 * c->cyclic is used only by audio and in this case the DMA need
287 * to be started without delay. 287 * to be started without delay.
288 */ 288 */
289 if (!c->cyclic) { 289 if (!c->cyclic) {
290 struct omap_dmadev *d = to_omap_dma_dev(chan->device); 290 struct omap_dmadev *d = to_omap_dma_dev(chan->device);
291 spin_lock(&d->lock); 291 spin_lock(&d->lock);
292 if (list_empty(&c->node)) 292 if (list_empty(&c->node))
293 list_add_tail(&c->node, &d->pending); 293 list_add_tail(&c->node, &d->pending);
294 spin_unlock(&d->lock); 294 spin_unlock(&d->lock);
295 tasklet_schedule(&d->task); 295 tasklet_schedule(&d->task);
296 } else { 296 } else {
297 omap_dma_start_desc(c); 297 omap_dma_start_desc(c);
298 } 298 }
299 } 299 }
300 spin_unlock_irqrestore(&c->vc.lock, flags); 300 spin_unlock_irqrestore(&c->vc.lock, flags);
301 } 301 }
302 302
303 static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg( 303 static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg(
304 struct dma_chan *chan, struct scatterlist *sgl, unsigned sglen, 304 struct dma_chan *chan, struct scatterlist *sgl, unsigned sglen,
305 enum dma_transfer_direction dir, unsigned long tx_flags, void *context) 305 enum dma_transfer_direction dir, unsigned long tx_flags, void *context)
306 { 306 {
307 struct omap_chan *c = to_omap_dma_chan(chan); 307 struct omap_chan *c = to_omap_dma_chan(chan);
308 enum dma_slave_buswidth dev_width; 308 enum dma_slave_buswidth dev_width;
309 struct scatterlist *sgent; 309 struct scatterlist *sgent;
310 struct omap_desc *d; 310 struct omap_desc *d;
311 dma_addr_t dev_addr; 311 dma_addr_t dev_addr;
312 unsigned i, j = 0, es, en, frame_bytes, sync_type; 312 unsigned i, j = 0, es, en, frame_bytes, sync_type;
313 u32 burst; 313 u32 burst;
314 314
315 if (dir == DMA_DEV_TO_MEM) { 315 if (dir == DMA_DEV_TO_MEM) {
316 dev_addr = c->cfg.src_addr; 316 dev_addr = c->cfg.src_addr;
317 dev_width = c->cfg.src_addr_width; 317 dev_width = c->cfg.src_addr_width;
318 burst = c->cfg.src_maxburst; 318 burst = c->cfg.src_maxburst;
319 sync_type = OMAP_DMA_SRC_SYNC; 319 sync_type = OMAP_DMA_SRC_SYNC;
320 } else if (dir == DMA_MEM_TO_DEV) { 320 } else if (dir == DMA_MEM_TO_DEV) {
321 dev_addr = c->cfg.dst_addr; 321 dev_addr = c->cfg.dst_addr;
322 dev_width = c->cfg.dst_addr_width; 322 dev_width = c->cfg.dst_addr_width;
323 burst = c->cfg.dst_maxburst; 323 burst = c->cfg.dst_maxburst;
324 sync_type = OMAP_DMA_DST_SYNC; 324 sync_type = OMAP_DMA_DST_SYNC;
325 } else { 325 } else {
326 dev_err(chan->device->dev, "%s: bad direction?\n", __func__); 326 dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
327 return NULL; 327 return NULL;
328 } 328 }
329 329
330 /* Bus width translates to the element size (ES) */ 330 /* Bus width translates to the element size (ES) */
331 switch (dev_width) { 331 switch (dev_width) {
332 case DMA_SLAVE_BUSWIDTH_1_BYTE: 332 case DMA_SLAVE_BUSWIDTH_1_BYTE:
333 es = OMAP_DMA_DATA_TYPE_S8; 333 es = OMAP_DMA_DATA_TYPE_S8;
334 break; 334 break;
335 case DMA_SLAVE_BUSWIDTH_2_BYTES: 335 case DMA_SLAVE_BUSWIDTH_2_BYTES:
336 es = OMAP_DMA_DATA_TYPE_S16; 336 es = OMAP_DMA_DATA_TYPE_S16;
337 break; 337 break;
338 case DMA_SLAVE_BUSWIDTH_4_BYTES: 338 case DMA_SLAVE_BUSWIDTH_4_BYTES:
339 es = OMAP_DMA_DATA_TYPE_S32; 339 es = OMAP_DMA_DATA_TYPE_S32;
340 break; 340 break;
341 default: /* not reached */ 341 default: /* not reached */
342 return NULL; 342 return NULL;
343 } 343 }
344 344
345 /* Now allocate and setup the descriptor. */ 345 /* Now allocate and setup the descriptor. */
346 d = kzalloc(sizeof(*d) + sglen * sizeof(d->sg[0]), GFP_ATOMIC); 346 d = kzalloc(sizeof(*d) + sglen * sizeof(d->sg[0]), GFP_ATOMIC);
347 if (!d) 347 if (!d)
348 return NULL; 348 return NULL;
349 349
350 d->dir = dir; 350 d->dir = dir;
351 d->dev_addr = dev_addr; 351 d->dev_addr = dev_addr;
352 d->es = es; 352 d->es = es;
353 d->sync_mode = OMAP_DMA_SYNC_FRAME; 353 d->sync_mode = OMAP_DMA_SYNC_FRAME;
354 d->sync_type = sync_type; 354 d->sync_type = sync_type;
355 d->periph_port = OMAP_DMA_PORT_TIPB; 355 d->periph_port = OMAP_DMA_PORT_TIPB;
356 356
357 /* 357 /*
358 * Build our scatterlist entries: each contains the address, 358 * Build our scatterlist entries: each contains the address,
359 * the number of elements (EN) in each frame, and the number of 359 * the number of elements (EN) in each frame, and the number of
360 * frames (FN). Number of bytes for this entry = ES * EN * FN. 360 * frames (FN). Number of bytes for this entry = ES * EN * FN.
361 * 361 *
362 * Burst size translates to number of elements with frame sync. 362 * Burst size translates to number of elements with frame sync.
363 * Note: DMA engine defines burst to be the number of dev-width 363 * Note: DMA engine defines burst to be the number of dev-width
364 * transfers. 364 * transfers.
365 */ 365 */
366 en = burst; 366 en = burst;
367 frame_bytes = es_bytes[es] * en; 367 frame_bytes = es_bytes[es] * en;
368 for_each_sg(sgl, sgent, sglen, i) { 368 for_each_sg(sgl, sgent, sglen, i) {
369 d->sg[j].addr = sg_dma_address(sgent); 369 d->sg[j].addr = sg_dma_address(sgent);
370 d->sg[j].en = en; 370 d->sg[j].en = en;
371 d->sg[j].fn = sg_dma_len(sgent) / frame_bytes; 371 d->sg[j].fn = sg_dma_len(sgent) / frame_bytes;
372 j++; 372 j++;
373 } 373 }
374 374
375 d->sglen = j; 375 d->sglen = j;
376 376
377 return vchan_tx_prep(&c->vc, &d->vd, tx_flags); 377 return vchan_tx_prep(&c->vc, &d->vd, tx_flags);
378 } 378 }
379 379
380 static struct dma_async_tx_descriptor *omap_dma_prep_dma_cyclic( 380 static struct dma_async_tx_descriptor *omap_dma_prep_dma_cyclic(
381 struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, 381 struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
382 size_t period_len, enum dma_transfer_direction dir, unsigned long flags, 382 size_t period_len, enum dma_transfer_direction dir, unsigned long flags,
383 void *context) 383 void *context)
384 { 384 {
385 struct omap_chan *c = to_omap_dma_chan(chan); 385 struct omap_chan *c = to_omap_dma_chan(chan);
386 enum dma_slave_buswidth dev_width; 386 enum dma_slave_buswidth dev_width;
387 struct omap_desc *d; 387 struct omap_desc *d;
388 dma_addr_t dev_addr; 388 dma_addr_t dev_addr;
389 unsigned es, sync_type; 389 unsigned es, sync_type;
390 u32 burst; 390 u32 burst;
391 391
392 if (dir == DMA_DEV_TO_MEM) { 392 if (dir == DMA_DEV_TO_MEM) {
393 dev_addr = c->cfg.src_addr; 393 dev_addr = c->cfg.src_addr;
394 dev_width = c->cfg.src_addr_width; 394 dev_width = c->cfg.src_addr_width;
395 burst = c->cfg.src_maxburst; 395 burst = c->cfg.src_maxburst;
396 sync_type = OMAP_DMA_SRC_SYNC; 396 sync_type = OMAP_DMA_SRC_SYNC;
397 } else if (dir == DMA_MEM_TO_DEV) { 397 } else if (dir == DMA_MEM_TO_DEV) {
398 dev_addr = c->cfg.dst_addr; 398 dev_addr = c->cfg.dst_addr;
399 dev_width = c->cfg.dst_addr_width; 399 dev_width = c->cfg.dst_addr_width;
400 burst = c->cfg.dst_maxburst; 400 burst = c->cfg.dst_maxburst;
401 sync_type = OMAP_DMA_DST_SYNC; 401 sync_type = OMAP_DMA_DST_SYNC;
402 } else { 402 } else {
403 dev_err(chan->device->dev, "%s: bad direction?\n", __func__); 403 dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
404 return NULL; 404 return NULL;
405 } 405 }
406 406
407 /* Bus width translates to the element size (ES) */ 407 /* Bus width translates to the element size (ES) */
408 switch (dev_width) { 408 switch (dev_width) {
409 case DMA_SLAVE_BUSWIDTH_1_BYTE: 409 case DMA_SLAVE_BUSWIDTH_1_BYTE:
410 es = OMAP_DMA_DATA_TYPE_S8; 410 es = OMAP_DMA_DATA_TYPE_S8;
411 break; 411 break;
412 case DMA_SLAVE_BUSWIDTH_2_BYTES: 412 case DMA_SLAVE_BUSWIDTH_2_BYTES:
413 es = OMAP_DMA_DATA_TYPE_S16; 413 es = OMAP_DMA_DATA_TYPE_S16;
414 break; 414 break;
415 case DMA_SLAVE_BUSWIDTH_4_BYTES: 415 case DMA_SLAVE_BUSWIDTH_4_BYTES:
416 es = OMAP_DMA_DATA_TYPE_S32; 416 es = OMAP_DMA_DATA_TYPE_S32;
417 break; 417 break;
418 default: /* not reached */ 418 default: /* not reached */
419 return NULL; 419 return NULL;
420 } 420 }
421 421
422 /* Now allocate and setup the descriptor. */ 422 /* Now allocate and setup the descriptor. */
423 d = kzalloc(sizeof(*d) + sizeof(d->sg[0]), GFP_ATOMIC); 423 d = kzalloc(sizeof(*d) + sizeof(d->sg[0]), GFP_ATOMIC);
424 if (!d) 424 if (!d)
425 return NULL; 425 return NULL;
426 426
427 d->dir = dir; 427 d->dir = dir;
428 d->dev_addr = dev_addr; 428 d->dev_addr = dev_addr;
429 d->fi = burst; 429 d->fi = burst;
430 d->es = es; 430 d->es = es;
431 if (burst) 431 if (burst)
432 d->sync_mode = OMAP_DMA_SYNC_PACKET; 432 d->sync_mode = OMAP_DMA_SYNC_PACKET;
433 else 433 else
434 d->sync_mode = OMAP_DMA_SYNC_ELEMENT; 434 d->sync_mode = OMAP_DMA_SYNC_ELEMENT;
435 d->sync_type = sync_type; 435 d->sync_type = sync_type;
436 d->periph_port = OMAP_DMA_PORT_MPUI; 436 d->periph_port = OMAP_DMA_PORT_MPUI;
437 d->sg[0].addr = buf_addr; 437 d->sg[0].addr = buf_addr;
438 d->sg[0].en = period_len / es_bytes[es]; 438 d->sg[0].en = period_len / es_bytes[es];
439 d->sg[0].fn = buf_len / period_len; 439 d->sg[0].fn = buf_len / period_len;
440 d->sglen = 1; 440 d->sglen = 1;
441 441
442 if (!c->cyclic) { 442 if (!c->cyclic) {
443 c->cyclic = true; 443 c->cyclic = true;
444 omap_dma_link_lch(c->dma_ch, c->dma_ch); 444 omap_dma_link_lch(c->dma_ch, c->dma_ch);
445 445
446 if (flags & DMA_PREP_INTERRUPT) 446 if (flags & DMA_PREP_INTERRUPT)
447 omap_enable_dma_irq(c->dma_ch, OMAP_DMA_FRAME_IRQ); 447 omap_enable_dma_irq(c->dma_ch, OMAP_DMA_FRAME_IRQ);
448 448
449 omap_disable_dma_irq(c->dma_ch, OMAP_DMA_BLOCK_IRQ); 449 omap_disable_dma_irq(c->dma_ch, OMAP_DMA_BLOCK_IRQ);
450 } 450 }
451 451
452 if (dma_omap2plus()) { 452 if (dma_omap2plus()) {
453 omap_set_dma_src_burst_mode(c->dma_ch, OMAP_DMA_DATA_BURST_16); 453 omap_set_dma_src_burst_mode(c->dma_ch, OMAP_DMA_DATA_BURST_16);
454 omap_set_dma_dest_burst_mode(c->dma_ch, OMAP_DMA_DATA_BURST_16); 454 omap_set_dma_dest_burst_mode(c->dma_ch, OMAP_DMA_DATA_BURST_16);
455 } 455 }
456 456
457 return vchan_tx_prep(&c->vc, &d->vd, flags); 457 return vchan_tx_prep(&c->vc, &d->vd, flags);
458 } 458 }
459 459
460 static int omap_dma_slave_config(struct omap_chan *c, struct dma_slave_config *cfg) 460 static int omap_dma_slave_config(struct omap_chan *c, struct dma_slave_config *cfg)
461 { 461 {
462 if (cfg->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES || 462 if (cfg->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
463 cfg->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES) 463 cfg->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
464 return -EINVAL; 464 return -EINVAL;
465 465
466 memcpy(&c->cfg, cfg, sizeof(c->cfg)); 466 memcpy(&c->cfg, cfg, sizeof(c->cfg));
467 467
468 return 0; 468 return 0;
469 } 469 }
470 470
471 static int omap_dma_terminate_all(struct omap_chan *c) 471 static int omap_dma_terminate_all(struct omap_chan *c)
472 { 472 {
473 struct omap_dmadev *d = to_omap_dma_dev(c->vc.chan.device); 473 struct omap_dmadev *d = to_omap_dma_dev(c->vc.chan.device);
474 unsigned long flags; 474 unsigned long flags;
475 LIST_HEAD(head); 475 LIST_HEAD(head);
476 476
477 spin_lock_irqsave(&c->vc.lock, flags); 477 spin_lock_irqsave(&c->vc.lock, flags);
478 478
479 /* Prevent this channel being scheduled */ 479 /* Prevent this channel being scheduled */
480 spin_lock(&d->lock); 480 spin_lock(&d->lock);
481 list_del_init(&c->node); 481 list_del_init(&c->node);
482 spin_unlock(&d->lock); 482 spin_unlock(&d->lock);
483 483
484 /* 484 /*
485 * Stop DMA activity: we assume the callback will not be called 485 * Stop DMA activity: we assume the callback will not be called
486 * after omap_stop_dma() returns (even if it does, it will see 486 * after omap_stop_dma() returns (even if it does, it will see
487 * c->desc is NULL and exit.) 487 * c->desc is NULL and exit.)
488 */ 488 */
489 if (c->desc) { 489 if (c->desc) {
490 c->desc = NULL; 490 c->desc = NULL;
491 /* Avoid stopping the dma twice */ 491 /* Avoid stopping the dma twice */
492 if (!c->paused) 492 if (!c->paused)
493 omap_stop_dma(c->dma_ch); 493 omap_stop_dma(c->dma_ch);
494 } 494 }
495 495
496 if (c->cyclic) { 496 if (c->cyclic) {
497 c->cyclic = false; 497 c->cyclic = false;
498 c->paused = false; 498 c->paused = false;
499 omap_dma_unlink_lch(c->dma_ch, c->dma_ch); 499 omap_dma_unlink_lch(c->dma_ch, c->dma_ch);
500 } 500 }
501 501
502 vchan_get_all_descriptors(&c->vc, &head); 502 vchan_get_all_descriptors(&c->vc, &head);
503 spin_unlock_irqrestore(&c->vc.lock, flags); 503 spin_unlock_irqrestore(&c->vc.lock, flags);
504 vchan_dma_desc_free_list(&c->vc, &head); 504 vchan_dma_desc_free_list(&c->vc, &head);
505 505
506 return 0; 506 return 0;
507 } 507 }
508 508
509 static int omap_dma_pause(struct omap_chan *c) 509 static int omap_dma_pause(struct omap_chan *c)
510 { 510 {
511 /* Pause/Resume only allowed with cyclic mode */ 511 /* Pause/Resume only allowed with cyclic mode */
512 if (!c->cyclic) 512 if (!c->cyclic)
513 return -EINVAL; 513 return -EINVAL;
514 514
515 if (!c->paused) { 515 if (!c->paused) {
516 omap_stop_dma(c->dma_ch); 516 omap_stop_dma(c->dma_ch);
517 c->paused = true; 517 c->paused = true;
518 } 518 }
519 519
520 return 0; 520 return 0;
521 } 521 }
522 522
523 static int omap_dma_resume(struct omap_chan *c) 523 static int omap_dma_resume(struct omap_chan *c)
524 { 524 {
525 /* Pause/Resume only allowed with cyclic mode */ 525 /* Pause/Resume only allowed with cyclic mode */
526 if (!c->cyclic) 526 if (!c->cyclic)
527 return -EINVAL; 527 return -EINVAL;
528 528
529 if (c->paused) { 529 if (c->paused) {
530 omap_start_dma(c->dma_ch); 530 omap_start_dma(c->dma_ch);
531 c->paused = false; 531 c->paused = false;
532 } 532 }
533 533
534 return 0; 534 return 0;
535 } 535 }
536 536
537 static int omap_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 537 static int omap_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
538 unsigned long arg) 538 unsigned long arg)
539 { 539 {
540 struct omap_chan *c = to_omap_dma_chan(chan); 540 struct omap_chan *c = to_omap_dma_chan(chan);
541 int ret; 541 int ret;
542 542
543 switch (cmd) { 543 switch (cmd) {
544 case DMA_SLAVE_CONFIG: 544 case DMA_SLAVE_CONFIG:
545 ret = omap_dma_slave_config(c, (struct dma_slave_config *)arg); 545 ret = omap_dma_slave_config(c, (struct dma_slave_config *)arg);
546 break; 546 break;
547 547
548 case DMA_TERMINATE_ALL: 548 case DMA_TERMINATE_ALL:
549 ret = omap_dma_terminate_all(c); 549 ret = omap_dma_terminate_all(c);
550 break; 550 break;
551 551
552 case DMA_PAUSE: 552 case DMA_PAUSE:
553 ret = omap_dma_pause(c); 553 ret = omap_dma_pause(c);
554 break; 554 break;
555 555
556 case DMA_RESUME: 556 case DMA_RESUME:
557 ret = omap_dma_resume(c); 557 ret = omap_dma_resume(c);
558 break; 558 break;
559 559
560 default: 560 default:
561 ret = -ENXIO; 561 ret = -ENXIO;
562 break; 562 break;
563 } 563 }
564 564
565 return ret; 565 return ret;
566 } 566 }
567 567
568 static int omap_dma_chan_init(struct omap_dmadev *od, int dma_sig) 568 static int omap_dma_chan_init(struct omap_dmadev *od, int dma_sig)
569 { 569 {
570 struct omap_chan *c; 570 struct omap_chan *c;
571 571
572 c = kzalloc(sizeof(*c), GFP_KERNEL); 572 c = kzalloc(sizeof(*c), GFP_KERNEL);
573 if (!c) 573 if (!c)
574 return -ENOMEM; 574 return -ENOMEM;
575 575
576 c->dma_sig = dma_sig; 576 c->dma_sig = dma_sig;
577 c->vc.desc_free = omap_dma_desc_free; 577 c->vc.desc_free = omap_dma_desc_free;
578 vchan_init(&c->vc, &od->ddev); 578 vchan_init(&c->vc, &od->ddev);
579 INIT_LIST_HEAD(&c->node); 579 INIT_LIST_HEAD(&c->node);
580 580
581 od->ddev.chancnt++; 581 od->ddev.chancnt++;
582 582
583 return 0; 583 return 0;
584 } 584 }
585 585
586 static void omap_dma_free(struct omap_dmadev *od) 586 static void omap_dma_free(struct omap_dmadev *od)
587 { 587 {
588 tasklet_kill(&od->task); 588 tasklet_kill(&od->task);
589 while (!list_empty(&od->ddev.channels)) { 589 while (!list_empty(&od->ddev.channels)) {
590 struct omap_chan *c = list_first_entry(&od->ddev.channels, 590 struct omap_chan *c = list_first_entry(&od->ddev.channels,
591 struct omap_chan, vc.chan.device_node); 591 struct omap_chan, vc.chan.device_node);
592 592
593 list_del(&c->vc.chan.device_node); 593 list_del(&c->vc.chan.device_node);
594 tasklet_kill(&c->vc.task); 594 tasklet_kill(&c->vc.task);
595 kfree(c); 595 kfree(c);
596 } 596 }
597 kfree(od); 597 kfree(od);
598 } 598 }
599 599
600 static int omap_dma_probe(struct platform_device *pdev) 600 static int omap_dma_probe(struct platform_device *pdev)
601 { 601 {
602 struct omap_dmadev *od; 602 struct omap_dmadev *od;
603 int rc, i; 603 int rc, i;
604 604
605 od = kzalloc(sizeof(*od), GFP_KERNEL); 605 od = kzalloc(sizeof(*od), GFP_KERNEL);
606 if (!od) 606 if (!od)
607 return -ENOMEM; 607 return -ENOMEM;
608 608
609 dma_cap_set(DMA_SLAVE, od->ddev.cap_mask); 609 dma_cap_set(DMA_SLAVE, od->ddev.cap_mask);
610 dma_cap_set(DMA_CYCLIC, od->ddev.cap_mask); 610 dma_cap_set(DMA_CYCLIC, od->ddev.cap_mask);
611 od->ddev.device_alloc_chan_resources = omap_dma_alloc_chan_resources; 611 od->ddev.device_alloc_chan_resources = omap_dma_alloc_chan_resources;
612 od->ddev.device_free_chan_resources = omap_dma_free_chan_resources; 612 od->ddev.device_free_chan_resources = omap_dma_free_chan_resources;
613 od->ddev.device_tx_status = omap_dma_tx_status; 613 od->ddev.device_tx_status = omap_dma_tx_status;
614 od->ddev.device_issue_pending = omap_dma_issue_pending; 614 od->ddev.device_issue_pending = omap_dma_issue_pending;
615 od->ddev.device_prep_slave_sg = omap_dma_prep_slave_sg; 615 od->ddev.device_prep_slave_sg = omap_dma_prep_slave_sg;
616 od->ddev.device_prep_dma_cyclic = omap_dma_prep_dma_cyclic; 616 od->ddev.device_prep_dma_cyclic = omap_dma_prep_dma_cyclic;
617 od->ddev.device_control = omap_dma_control; 617 od->ddev.device_control = omap_dma_control;
618 od->ddev.dev = &pdev->dev; 618 od->ddev.dev = &pdev->dev;
619 INIT_LIST_HEAD(&od->ddev.channels); 619 INIT_LIST_HEAD(&od->ddev.channels);
620 INIT_LIST_HEAD(&od->pending); 620 INIT_LIST_HEAD(&od->pending);
621 spin_lock_init(&od->lock); 621 spin_lock_init(&od->lock);
622 622
623 tasklet_init(&od->task, omap_dma_sched, (unsigned long)od); 623 tasklet_init(&od->task, omap_dma_sched, (unsigned long)od);
624 624
625 for (i = 0; i < 127; i++) { 625 for (i = 0; i < 127; i++) {
626 rc = omap_dma_chan_init(od, i); 626 rc = omap_dma_chan_init(od, i);
627 if (rc) { 627 if (rc) {
628 omap_dma_free(od); 628 omap_dma_free(od);
629 return rc; 629 return rc;
630 } 630 }
631 } 631 }
632 632
633 rc = dma_async_device_register(&od->ddev); 633 rc = dma_async_device_register(&od->ddev);
634 if (rc) { 634 if (rc) {
635 pr_warn("OMAP-DMA: failed to register slave DMA engine device: %d\n", 635 pr_warn("OMAP-DMA: failed to register slave DMA engine device: %d\n",
636 rc); 636 rc);
637 omap_dma_free(od); 637 omap_dma_free(od);
638 return rc; 638 return rc;
639 } 639 }
640 640
641 platform_set_drvdata(pdev, od); 641 platform_set_drvdata(pdev, od);
642 642
643 if (pdev->dev.of_node) { 643 if (pdev->dev.of_node) {
644 omap_dma_info.dma_cap = od->ddev.cap_mask; 644 omap_dma_info.dma_cap = od->ddev.cap_mask;
645 645
646 /* Device-tree DMA controller registration */ 646 /* Device-tree DMA controller registration */
647 rc = of_dma_controller_register(pdev->dev.of_node, 647 rc = of_dma_controller_register(pdev->dev.of_node,
648 of_dma_simple_xlate, &omap_dma_info); 648 of_dma_simple_xlate, &omap_dma_info);
649 if (rc) { 649 if (rc) {
650 pr_warn("OMAP-DMA: failed to register DMA controller\n"); 650 pr_warn("OMAP-DMA: failed to register DMA controller\n");
651 dma_async_device_unregister(&od->ddev); 651 dma_async_device_unregister(&od->ddev);
652 omap_dma_free(od); 652 omap_dma_free(od);
653 } 653 }
654 } 654 }
655 655
656 dev_info(&pdev->dev, "OMAP DMA engine driver\n"); 656 dev_info(&pdev->dev, "OMAP DMA engine driver\n");
657 657
658 return rc; 658 return rc;
659 } 659 }
660 660
661 static int omap_dma_remove(struct platform_device *pdev) 661 static int omap_dma_remove(struct platform_device *pdev)
662 { 662 {
663 struct omap_dmadev *od = platform_get_drvdata(pdev); 663 struct omap_dmadev *od = platform_get_drvdata(pdev);
664 664
665 if (pdev->dev.of_node) 665 if (pdev->dev.of_node)
666 of_dma_controller_free(pdev->dev.of_node); 666 of_dma_controller_free(pdev->dev.of_node);
667 667
668 dma_async_device_unregister(&od->ddev); 668 dma_async_device_unregister(&od->ddev);
669 omap_dma_free(od); 669 omap_dma_free(od);
670 670
671 return 0; 671 return 0;
672 } 672 }
673 673
674 static const struct of_device_id omap_dma_match[] = { 674 static const struct of_device_id omap_dma_match[] = {
675 { .compatible = "ti,omap2420-sdma", }, 675 { .compatible = "ti,omap2420-sdma", },
676 { .compatible = "ti,omap2430-sdma", }, 676 { .compatible = "ti,omap2430-sdma", },
677 { .compatible = "ti,omap3430-sdma", }, 677 { .compatible = "ti,omap3430-sdma", },
678 { .compatible = "ti,omap3630-sdma", }, 678 { .compatible = "ti,omap3630-sdma", },
679 { .compatible = "ti,omap4430-sdma", }, 679 { .compatible = "ti,omap4430-sdma", },
680 {}, 680 {},
681 }; 681 };
682 MODULE_DEVICE_TABLE(of, omap_dma_match); 682 MODULE_DEVICE_TABLE(of, omap_dma_match);
683 683
684 static struct platform_driver omap_dma_driver = { 684 static struct platform_driver omap_dma_driver = {
685 .probe = omap_dma_probe, 685 .probe = omap_dma_probe,
686 .remove = omap_dma_remove, 686 .remove = omap_dma_remove,
687 .driver = { 687 .driver = {
688 .name = "omap-dma-engine", 688 .name = "omap-dma-engine",
689 .owner = THIS_MODULE, 689 .owner = THIS_MODULE,
690 .of_match_table = of_match_ptr(omap_dma_match), 690 .of_match_table = of_match_ptr(omap_dma_match),
691 }, 691 },
692 }; 692 };
693 693
694 bool omap_dma_filter_fn(struct dma_chan *chan, void *param) 694 bool omap_dma_filter_fn(struct dma_chan *chan, void *param)
695 { 695 {
696 if (chan->device->dev->driver == &omap_dma_driver.driver) { 696 if (chan->device->dev->driver == &omap_dma_driver.driver) {
697 struct omap_chan *c = to_omap_dma_chan(chan); 697 struct omap_chan *c = to_omap_dma_chan(chan);
698 unsigned req = *(unsigned *)param; 698 unsigned req = *(unsigned *)param;
699 699
700 return req == c->dma_sig; 700 return req == c->dma_sig;
701 } 701 }
702 return false; 702 return false;
703 } 703 }
704 EXPORT_SYMBOL_GPL(omap_dma_filter_fn); 704 EXPORT_SYMBOL_GPL(omap_dma_filter_fn);
705 705
706 static int omap_dma_init(void) 706 static int omap_dma_init(void)
707 { 707 {
708 return platform_driver_register(&omap_dma_driver); 708 return platform_driver_register(&omap_dma_driver);
709 } 709 }
710 subsys_initcall(omap_dma_init); 710 subsys_initcall(omap_dma_init);
711 711
712 static void __exit omap_dma_exit(void) 712 static void __exit omap_dma_exit(void)
713 { 713 {
714 platform_driver_unregister(&omap_dma_driver); 714 platform_driver_unregister(&omap_dma_driver);
715 } 715 }
716 module_exit(omap_dma_exit); 716 module_exit(omap_dma_exit);
717 717
718 MODULE_AUTHOR("Russell King"); 718 MODULE_AUTHOR("Russell King");
719 MODULE_LICENSE("GPL"); 719 MODULE_LICENSE("GPL");
720 720
drivers/dma/ppc4xx/adma.c
1 /* 1 /*
2 * Copyright (C) 2006-2009 DENX Software Engineering. 2 * Copyright (C) 2006-2009 DENX Software Engineering.
3 * 3 *
4 * Author: Yuri Tikhonov <yur@emcraft.com> 4 * Author: Yuri Tikhonov <yur@emcraft.com>
5 * 5 *
6 * Further porting to arch/powerpc by 6 * Further porting to arch/powerpc by
7 * Anatolij Gustschin <agust@denx.de> 7 * Anatolij Gustschin <agust@denx.de>
8 * 8 *
9 * This program is free software; you can redistribute it and/or modify it 9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the Free 10 * under the terms of the GNU General Public License as published by the Free
11 * Software Foundation; either version 2 of the License, or (at your option) 11 * Software Foundation; either version 2 of the License, or (at your option)
12 * any later version. 12 * any later version.
13 * 13 *
14 * This program is distributed in the hope that it will be useful, but WITHOUT 14 * This program is distributed in the hope that it will be useful, but WITHOUT
15 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 15 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 16 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * more details. 17 * more details.
18 * 18 *
19 * You should have received a copy of the GNU General Public License along with 19 * You should have received a copy of the GNU General Public License along with
20 * this program; if not, write to the Free Software Foundation, Inc., 59 20 * this program; if not, write to the Free Software Foundation, Inc., 59
21 * Temple Place - Suite 330, Boston, MA 02111-1307, USA. 21 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
22 * 22 *
23 * The full GNU General Public License is included in this distribution in the 23 * The full GNU General Public License is included in this distribution in the
24 * file called COPYING. 24 * file called COPYING.
25 */ 25 */
26 26
27 /* 27 /*
28 * This driver supports the asynchrounous DMA copy and RAID engines available 28 * This driver supports the asynchrounous DMA copy and RAID engines available
29 * on the AMCC PPC440SPe Processors. 29 * on the AMCC PPC440SPe Processors.
30 * Based on the Intel Xscale(R) family of I/O Processors (IOP 32x, 33x, 134x) 30 * Based on the Intel Xscale(R) family of I/O Processors (IOP 32x, 33x, 134x)
31 * ADMA driver written by D.Williams. 31 * ADMA driver written by D.Williams.
32 */ 32 */
33 33
34 #include <linux/init.h> 34 #include <linux/init.h>
35 #include <linux/module.h> 35 #include <linux/module.h>
36 #include <linux/async_tx.h> 36 #include <linux/async_tx.h>
37 #include <linux/delay.h> 37 #include <linux/delay.h>
38 #include <linux/dma-mapping.h> 38 #include <linux/dma-mapping.h>
39 #include <linux/spinlock.h> 39 #include <linux/spinlock.h>
40 #include <linux/interrupt.h> 40 #include <linux/interrupt.h>
41 #include <linux/slab.h> 41 #include <linux/slab.h>
42 #include <linux/uaccess.h> 42 #include <linux/uaccess.h>
43 #include <linux/proc_fs.h> 43 #include <linux/proc_fs.h>
44 #include <linux/of.h> 44 #include <linux/of.h>
45 #include <linux/of_platform.h> 45 #include <linux/of_platform.h>
46 #include <asm/dcr.h> 46 #include <asm/dcr.h>
47 #include <asm/dcr-regs.h> 47 #include <asm/dcr-regs.h>
48 #include "adma.h" 48 #include "adma.h"
49 #include "../dmaengine.h" 49 #include "../dmaengine.h"
50 50
51 enum ppc_adma_init_code { 51 enum ppc_adma_init_code {
52 PPC_ADMA_INIT_OK = 0, 52 PPC_ADMA_INIT_OK = 0,
53 PPC_ADMA_INIT_MEMRES, 53 PPC_ADMA_INIT_MEMRES,
54 PPC_ADMA_INIT_MEMREG, 54 PPC_ADMA_INIT_MEMREG,
55 PPC_ADMA_INIT_ALLOC, 55 PPC_ADMA_INIT_ALLOC,
56 PPC_ADMA_INIT_COHERENT, 56 PPC_ADMA_INIT_COHERENT,
57 PPC_ADMA_INIT_CHANNEL, 57 PPC_ADMA_INIT_CHANNEL,
58 PPC_ADMA_INIT_IRQ1, 58 PPC_ADMA_INIT_IRQ1,
59 PPC_ADMA_INIT_IRQ2, 59 PPC_ADMA_INIT_IRQ2,
60 PPC_ADMA_INIT_REGISTER 60 PPC_ADMA_INIT_REGISTER
61 }; 61 };
62 62
63 static char *ppc_adma_errors[] = { 63 static char *ppc_adma_errors[] = {
64 [PPC_ADMA_INIT_OK] = "ok", 64 [PPC_ADMA_INIT_OK] = "ok",
65 [PPC_ADMA_INIT_MEMRES] = "failed to get memory resource", 65 [PPC_ADMA_INIT_MEMRES] = "failed to get memory resource",
66 [PPC_ADMA_INIT_MEMREG] = "failed to request memory region", 66 [PPC_ADMA_INIT_MEMREG] = "failed to request memory region",
67 [PPC_ADMA_INIT_ALLOC] = "failed to allocate memory for adev " 67 [PPC_ADMA_INIT_ALLOC] = "failed to allocate memory for adev "
68 "structure", 68 "structure",
69 [PPC_ADMA_INIT_COHERENT] = "failed to allocate coherent memory for " 69 [PPC_ADMA_INIT_COHERENT] = "failed to allocate coherent memory for "
70 "hardware descriptors", 70 "hardware descriptors",
71 [PPC_ADMA_INIT_CHANNEL] = "failed to allocate memory for channel", 71 [PPC_ADMA_INIT_CHANNEL] = "failed to allocate memory for channel",
72 [PPC_ADMA_INIT_IRQ1] = "failed to request first irq", 72 [PPC_ADMA_INIT_IRQ1] = "failed to request first irq",
73 [PPC_ADMA_INIT_IRQ2] = "failed to request second irq", 73 [PPC_ADMA_INIT_IRQ2] = "failed to request second irq",
74 [PPC_ADMA_INIT_REGISTER] = "failed to register dma async device", 74 [PPC_ADMA_INIT_REGISTER] = "failed to register dma async device",
75 }; 75 };
76 76
77 static enum ppc_adma_init_code 77 static enum ppc_adma_init_code
78 ppc440spe_adma_devices[PPC440SPE_ADMA_ENGINES_NUM]; 78 ppc440spe_adma_devices[PPC440SPE_ADMA_ENGINES_NUM];
79 79
80 struct ppc_dma_chan_ref { 80 struct ppc_dma_chan_ref {
81 struct dma_chan *chan; 81 struct dma_chan *chan;
82 struct list_head node; 82 struct list_head node;
83 }; 83 };
84 84
85 /* The list of channels exported by ppc440spe ADMA */ 85 /* The list of channels exported by ppc440spe ADMA */
86 struct list_head 86 struct list_head
87 ppc440spe_adma_chan_list = LIST_HEAD_INIT(ppc440spe_adma_chan_list); 87 ppc440spe_adma_chan_list = LIST_HEAD_INIT(ppc440spe_adma_chan_list);
88 88
89 /* This flag is set when want to refetch the xor chain in the interrupt 89 /* This flag is set when want to refetch the xor chain in the interrupt
90 * handler 90 * handler
91 */ 91 */
92 static u32 do_xor_refetch; 92 static u32 do_xor_refetch;
93 93
94 /* Pointer to DMA0, DMA1 CP/CS FIFO */ 94 /* Pointer to DMA0, DMA1 CP/CS FIFO */
95 static void *ppc440spe_dma_fifo_buf; 95 static void *ppc440spe_dma_fifo_buf;
96 96
97 /* Pointers to last submitted to DMA0, DMA1 CDBs */ 97 /* Pointers to last submitted to DMA0, DMA1 CDBs */
98 static struct ppc440spe_adma_desc_slot *chan_last_sub[3]; 98 static struct ppc440spe_adma_desc_slot *chan_last_sub[3];
99 static struct ppc440spe_adma_desc_slot *chan_first_cdb[3]; 99 static struct ppc440spe_adma_desc_slot *chan_first_cdb[3];
100 100
101 /* Pointer to last linked and submitted xor CB */ 101 /* Pointer to last linked and submitted xor CB */
102 static struct ppc440spe_adma_desc_slot *xor_last_linked; 102 static struct ppc440spe_adma_desc_slot *xor_last_linked;
103 static struct ppc440spe_adma_desc_slot *xor_last_submit; 103 static struct ppc440spe_adma_desc_slot *xor_last_submit;
104 104
105 /* This array is used in data-check operations for storing a pattern */ 105 /* This array is used in data-check operations for storing a pattern */
106 static char ppc440spe_qword[16]; 106 static char ppc440spe_qword[16];
107 107
108 static atomic_t ppc440spe_adma_err_irq_ref; 108 static atomic_t ppc440spe_adma_err_irq_ref;
109 static dcr_host_t ppc440spe_mq_dcr_host; 109 static dcr_host_t ppc440spe_mq_dcr_host;
110 static unsigned int ppc440spe_mq_dcr_len; 110 static unsigned int ppc440spe_mq_dcr_len;
111 111
112 /* Since RXOR operations use the common register (MQ0_CF2H) for setting-up 112 /* Since RXOR operations use the common register (MQ0_CF2H) for setting-up
113 * the block size in transactions, then we do not allow to activate more than 113 * the block size in transactions, then we do not allow to activate more than
114 * only one RXOR transactions simultaneously. So use this var to store 114 * only one RXOR transactions simultaneously. So use this var to store
115 * the information about is RXOR currently active (PPC440SPE_RXOR_RUN bit is 115 * the information about is RXOR currently active (PPC440SPE_RXOR_RUN bit is
116 * set) or not (PPC440SPE_RXOR_RUN is clear). 116 * set) or not (PPC440SPE_RXOR_RUN is clear).
117 */ 117 */
118 static unsigned long ppc440spe_rxor_state; 118 static unsigned long ppc440spe_rxor_state;
119 119
120 /* These are used in enable & check routines 120 /* These are used in enable & check routines
121 */ 121 */
122 static u32 ppc440spe_r6_enabled; 122 static u32 ppc440spe_r6_enabled;
123 static struct ppc440spe_adma_chan *ppc440spe_r6_tchan; 123 static struct ppc440spe_adma_chan *ppc440spe_r6_tchan;
124 static struct completion ppc440spe_r6_test_comp; 124 static struct completion ppc440spe_r6_test_comp;
125 125
126 static int ppc440spe_adma_dma2rxor_prep_src( 126 static int ppc440spe_adma_dma2rxor_prep_src(
127 struct ppc440spe_adma_desc_slot *desc, 127 struct ppc440spe_adma_desc_slot *desc,
128 struct ppc440spe_rxor *cursor, int index, 128 struct ppc440spe_rxor *cursor, int index,
129 int src_cnt, u32 addr); 129 int src_cnt, u32 addr);
130 static void ppc440spe_adma_dma2rxor_set_src( 130 static void ppc440spe_adma_dma2rxor_set_src(
131 struct ppc440spe_adma_desc_slot *desc, 131 struct ppc440spe_adma_desc_slot *desc,
132 int index, dma_addr_t addr); 132 int index, dma_addr_t addr);
133 static void ppc440spe_adma_dma2rxor_set_mult( 133 static void ppc440spe_adma_dma2rxor_set_mult(
134 struct ppc440spe_adma_desc_slot *desc, 134 struct ppc440spe_adma_desc_slot *desc,
135 int index, u8 mult); 135 int index, u8 mult);
136 136
137 #ifdef ADMA_LL_DEBUG 137 #ifdef ADMA_LL_DEBUG
138 #define ADMA_LL_DBG(x) ({ if (1) x; 0; }) 138 #define ADMA_LL_DBG(x) ({ if (1) x; 0; })
139 #else 139 #else
140 #define ADMA_LL_DBG(x) ({ if (0) x; 0; }) 140 #define ADMA_LL_DBG(x) ({ if (0) x; 0; })
141 #endif 141 #endif
142 142
143 static void print_cb(struct ppc440spe_adma_chan *chan, void *block) 143 static void print_cb(struct ppc440spe_adma_chan *chan, void *block)
144 { 144 {
145 struct dma_cdb *cdb; 145 struct dma_cdb *cdb;
146 struct xor_cb *cb; 146 struct xor_cb *cb;
147 int i; 147 int i;
148 148
149 switch (chan->device->id) { 149 switch (chan->device->id) {
150 case 0: 150 case 0:
151 case 1: 151 case 1:
152 cdb = block; 152 cdb = block;
153 153
154 pr_debug("CDB at %p [%d]:\n" 154 pr_debug("CDB at %p [%d]:\n"
155 "\t attr 0x%02x opc 0x%02x cnt 0x%08x\n" 155 "\t attr 0x%02x opc 0x%02x cnt 0x%08x\n"
156 "\t sg1u 0x%08x sg1l 0x%08x\n" 156 "\t sg1u 0x%08x sg1l 0x%08x\n"
157 "\t sg2u 0x%08x sg2l 0x%08x\n" 157 "\t sg2u 0x%08x sg2l 0x%08x\n"
158 "\t sg3u 0x%08x sg3l 0x%08x\n", 158 "\t sg3u 0x%08x sg3l 0x%08x\n",
159 cdb, chan->device->id, 159 cdb, chan->device->id,
160 cdb->attr, cdb->opc, le32_to_cpu(cdb->cnt), 160 cdb->attr, cdb->opc, le32_to_cpu(cdb->cnt),
161 le32_to_cpu(cdb->sg1u), le32_to_cpu(cdb->sg1l), 161 le32_to_cpu(cdb->sg1u), le32_to_cpu(cdb->sg1l),
162 le32_to_cpu(cdb->sg2u), le32_to_cpu(cdb->sg2l), 162 le32_to_cpu(cdb->sg2u), le32_to_cpu(cdb->sg2l),
163 le32_to_cpu(cdb->sg3u), le32_to_cpu(cdb->sg3l) 163 le32_to_cpu(cdb->sg3u), le32_to_cpu(cdb->sg3l)
164 ); 164 );
165 break; 165 break;
166 case 2: 166 case 2:
167 cb = block; 167 cb = block;
168 168
169 pr_debug("CB at %p [%d]:\n" 169 pr_debug("CB at %p [%d]:\n"
170 "\t cbc 0x%08x cbbc 0x%08x cbs 0x%08x\n" 170 "\t cbc 0x%08x cbbc 0x%08x cbs 0x%08x\n"
171 "\t cbtah 0x%08x cbtal 0x%08x\n" 171 "\t cbtah 0x%08x cbtal 0x%08x\n"
172 "\t cblah 0x%08x cblal 0x%08x\n", 172 "\t cblah 0x%08x cblal 0x%08x\n",
173 cb, chan->device->id, 173 cb, chan->device->id,
174 cb->cbc, cb->cbbc, cb->cbs, 174 cb->cbc, cb->cbbc, cb->cbs,
175 cb->cbtah, cb->cbtal, 175 cb->cbtah, cb->cbtal,
176 cb->cblah, cb->cblal); 176 cb->cblah, cb->cblal);
177 for (i = 0; i < 16; i++) { 177 for (i = 0; i < 16; i++) {
178 if (i && !cb->ops[i].h && !cb->ops[i].l) 178 if (i && !cb->ops[i].h && !cb->ops[i].l)
179 continue; 179 continue;
180 pr_debug("\t ops[%2d]: h 0x%08x l 0x%08x\n", 180 pr_debug("\t ops[%2d]: h 0x%08x l 0x%08x\n",
181 i, cb->ops[i].h, cb->ops[i].l); 181 i, cb->ops[i].h, cb->ops[i].l);
182 } 182 }
183 break; 183 break;
184 } 184 }
185 } 185 }
186 186
187 static void print_cb_list(struct ppc440spe_adma_chan *chan, 187 static void print_cb_list(struct ppc440spe_adma_chan *chan,
188 struct ppc440spe_adma_desc_slot *iter) 188 struct ppc440spe_adma_desc_slot *iter)
189 { 189 {
190 for (; iter; iter = iter->hw_next) 190 for (; iter; iter = iter->hw_next)
191 print_cb(chan, iter->hw_desc); 191 print_cb(chan, iter->hw_desc);
192 } 192 }
193 193
194 static void prep_dma_xor_dbg(int id, dma_addr_t dst, dma_addr_t *src, 194 static void prep_dma_xor_dbg(int id, dma_addr_t dst, dma_addr_t *src,
195 unsigned int src_cnt) 195 unsigned int src_cnt)
196 { 196 {
197 int i; 197 int i;
198 198
199 pr_debug("\n%s(%d):\nsrc: ", __func__, id); 199 pr_debug("\n%s(%d):\nsrc: ", __func__, id);
200 for (i = 0; i < src_cnt; i++) 200 for (i = 0; i < src_cnt; i++)
201 pr_debug("\t0x%016llx ", src[i]); 201 pr_debug("\t0x%016llx ", src[i]);
202 pr_debug("dst:\n\t0x%016llx\n", dst); 202 pr_debug("dst:\n\t0x%016llx\n", dst);
203 } 203 }
204 204
205 static void prep_dma_pq_dbg(int id, dma_addr_t *dst, dma_addr_t *src, 205 static void prep_dma_pq_dbg(int id, dma_addr_t *dst, dma_addr_t *src,
206 unsigned int src_cnt) 206 unsigned int src_cnt)
207 { 207 {
208 int i; 208 int i;
209 209
210 pr_debug("\n%s(%d):\nsrc: ", __func__, id); 210 pr_debug("\n%s(%d):\nsrc: ", __func__, id);
211 for (i = 0; i < src_cnt; i++) 211 for (i = 0; i < src_cnt; i++)
212 pr_debug("\t0x%016llx ", src[i]); 212 pr_debug("\t0x%016llx ", src[i]);
213 pr_debug("dst: "); 213 pr_debug("dst: ");
214 for (i = 0; i < 2; i++) 214 for (i = 0; i < 2; i++)
215 pr_debug("\t0x%016llx ", dst[i]); 215 pr_debug("\t0x%016llx ", dst[i]);
216 } 216 }
217 217
218 static void prep_dma_pqzero_sum_dbg(int id, dma_addr_t *src, 218 static void prep_dma_pqzero_sum_dbg(int id, dma_addr_t *src,
219 unsigned int src_cnt, 219 unsigned int src_cnt,
220 const unsigned char *scf) 220 const unsigned char *scf)
221 { 221 {
222 int i; 222 int i;
223 223
224 pr_debug("\n%s(%d):\nsrc(coef): ", __func__, id); 224 pr_debug("\n%s(%d):\nsrc(coef): ", __func__, id);
225 if (scf) { 225 if (scf) {
226 for (i = 0; i < src_cnt; i++) 226 for (i = 0; i < src_cnt; i++)
227 pr_debug("\t0x%016llx(0x%02x) ", src[i], scf[i]); 227 pr_debug("\t0x%016llx(0x%02x) ", src[i], scf[i]);
228 } else { 228 } else {
229 for (i = 0; i < src_cnt; i++) 229 for (i = 0; i < src_cnt; i++)
230 pr_debug("\t0x%016llx(no) ", src[i]); 230 pr_debug("\t0x%016llx(no) ", src[i]);
231 } 231 }
232 232
233 pr_debug("dst: "); 233 pr_debug("dst: ");
234 for (i = 0; i < 2; i++) 234 for (i = 0; i < 2; i++)
235 pr_debug("\t0x%016llx ", src[src_cnt + i]); 235 pr_debug("\t0x%016llx ", src[src_cnt + i]);
236 } 236 }
237 237
238 /****************************************************************************** 238 /******************************************************************************
239 * Command (Descriptor) Blocks low-level routines 239 * Command (Descriptor) Blocks low-level routines
240 ******************************************************************************/ 240 ******************************************************************************/
241 /** 241 /**
242 * ppc440spe_desc_init_interrupt - initialize the descriptor for INTERRUPT 242 * ppc440spe_desc_init_interrupt - initialize the descriptor for INTERRUPT
243 * pseudo operation 243 * pseudo operation
244 */ 244 */
245 static void ppc440spe_desc_init_interrupt(struct ppc440spe_adma_desc_slot *desc, 245 static void ppc440spe_desc_init_interrupt(struct ppc440spe_adma_desc_slot *desc,
246 struct ppc440spe_adma_chan *chan) 246 struct ppc440spe_adma_chan *chan)
247 { 247 {
248 struct xor_cb *p; 248 struct xor_cb *p;
249 249
250 switch (chan->device->id) { 250 switch (chan->device->id) {
251 case PPC440SPE_XOR_ID: 251 case PPC440SPE_XOR_ID:
252 p = desc->hw_desc; 252 p = desc->hw_desc;
253 memset(desc->hw_desc, 0, sizeof(struct xor_cb)); 253 memset(desc->hw_desc, 0, sizeof(struct xor_cb));
254 /* NOP with Command Block Complete Enable */ 254 /* NOP with Command Block Complete Enable */
255 p->cbc = XOR_CBCR_CBCE_BIT; 255 p->cbc = XOR_CBCR_CBCE_BIT;
256 break; 256 break;
257 case PPC440SPE_DMA0_ID: 257 case PPC440SPE_DMA0_ID:
258 case PPC440SPE_DMA1_ID: 258 case PPC440SPE_DMA1_ID:
259 memset(desc->hw_desc, 0, sizeof(struct dma_cdb)); 259 memset(desc->hw_desc, 0, sizeof(struct dma_cdb));
260 /* NOP with interrupt */ 260 /* NOP with interrupt */
261 set_bit(PPC440SPE_DESC_INT, &desc->flags); 261 set_bit(PPC440SPE_DESC_INT, &desc->flags);
262 break; 262 break;
263 default: 263 default:
264 printk(KERN_ERR "Unsupported id %d in %s\n", chan->device->id, 264 printk(KERN_ERR "Unsupported id %d in %s\n", chan->device->id,
265 __func__); 265 __func__);
266 break; 266 break;
267 } 267 }
268 } 268 }
269 269
270 /** 270 /**
271 * ppc440spe_desc_init_null_xor - initialize the descriptor for NULL XOR 271 * ppc440spe_desc_init_null_xor - initialize the descriptor for NULL XOR
272 * pseudo operation 272 * pseudo operation
273 */ 273 */
274 static void ppc440spe_desc_init_null_xor(struct ppc440spe_adma_desc_slot *desc) 274 static void ppc440spe_desc_init_null_xor(struct ppc440spe_adma_desc_slot *desc)
275 { 275 {
276 memset(desc->hw_desc, 0, sizeof(struct xor_cb)); 276 memset(desc->hw_desc, 0, sizeof(struct xor_cb));
277 desc->hw_next = NULL; 277 desc->hw_next = NULL;
278 desc->src_cnt = 0; 278 desc->src_cnt = 0;
279 desc->dst_cnt = 1; 279 desc->dst_cnt = 1;
280 } 280 }
281 281
282 /** 282 /**
283 * ppc440spe_desc_init_xor - initialize the descriptor for XOR operation 283 * ppc440spe_desc_init_xor - initialize the descriptor for XOR operation
284 */ 284 */
285 static void ppc440spe_desc_init_xor(struct ppc440spe_adma_desc_slot *desc, 285 static void ppc440spe_desc_init_xor(struct ppc440spe_adma_desc_slot *desc,
286 int src_cnt, unsigned long flags) 286 int src_cnt, unsigned long flags)
287 { 287 {
288 struct xor_cb *hw_desc = desc->hw_desc; 288 struct xor_cb *hw_desc = desc->hw_desc;
289 289
290 memset(desc->hw_desc, 0, sizeof(struct xor_cb)); 290 memset(desc->hw_desc, 0, sizeof(struct xor_cb));
291 desc->hw_next = NULL; 291 desc->hw_next = NULL;
292 desc->src_cnt = src_cnt; 292 desc->src_cnt = src_cnt;
293 desc->dst_cnt = 1; 293 desc->dst_cnt = 1;
294 294
295 hw_desc->cbc = XOR_CBCR_TGT_BIT | src_cnt; 295 hw_desc->cbc = XOR_CBCR_TGT_BIT | src_cnt;
296 if (flags & DMA_PREP_INTERRUPT) 296 if (flags & DMA_PREP_INTERRUPT)
297 /* Enable interrupt on completion */ 297 /* Enable interrupt on completion */
298 hw_desc->cbc |= XOR_CBCR_CBCE_BIT; 298 hw_desc->cbc |= XOR_CBCR_CBCE_BIT;
299 } 299 }
300 300
301 /** 301 /**
302 * ppc440spe_desc_init_dma2pq - initialize the descriptor for PQ 302 * ppc440spe_desc_init_dma2pq - initialize the descriptor for PQ
303 * operation in DMA2 controller 303 * operation in DMA2 controller
304 */ 304 */
305 static void ppc440spe_desc_init_dma2pq(struct ppc440spe_adma_desc_slot *desc, 305 static void ppc440spe_desc_init_dma2pq(struct ppc440spe_adma_desc_slot *desc,
306 int dst_cnt, int src_cnt, unsigned long flags) 306 int dst_cnt, int src_cnt, unsigned long flags)
307 { 307 {
308 struct xor_cb *hw_desc = desc->hw_desc; 308 struct xor_cb *hw_desc = desc->hw_desc;
309 309
310 memset(desc->hw_desc, 0, sizeof(struct xor_cb)); 310 memset(desc->hw_desc, 0, sizeof(struct xor_cb));
311 desc->hw_next = NULL; 311 desc->hw_next = NULL;
312 desc->src_cnt = src_cnt; 312 desc->src_cnt = src_cnt;
313 desc->dst_cnt = dst_cnt; 313 desc->dst_cnt = dst_cnt;
314 memset(desc->reverse_flags, 0, sizeof(desc->reverse_flags)); 314 memset(desc->reverse_flags, 0, sizeof(desc->reverse_flags));
315 desc->descs_per_op = 0; 315 desc->descs_per_op = 0;
316 316
317 hw_desc->cbc = XOR_CBCR_TGT_BIT; 317 hw_desc->cbc = XOR_CBCR_TGT_BIT;
318 if (flags & DMA_PREP_INTERRUPT) 318 if (flags & DMA_PREP_INTERRUPT)
319 /* Enable interrupt on completion */ 319 /* Enable interrupt on completion */
320 hw_desc->cbc |= XOR_CBCR_CBCE_BIT; 320 hw_desc->cbc |= XOR_CBCR_CBCE_BIT;
321 } 321 }
322 322
323 #define DMA_CTRL_FLAGS_LAST DMA_PREP_FENCE 323 #define DMA_CTRL_FLAGS_LAST DMA_PREP_FENCE
324 #define DMA_PREP_ZERO_P (DMA_CTRL_FLAGS_LAST << 1) 324 #define DMA_PREP_ZERO_P (DMA_CTRL_FLAGS_LAST << 1)
325 #define DMA_PREP_ZERO_Q (DMA_PREP_ZERO_P << 1) 325 #define DMA_PREP_ZERO_Q (DMA_PREP_ZERO_P << 1)
326 326
327 /** 327 /**
328 * ppc440spe_desc_init_dma01pq - initialize the descriptors for PQ operation 328 * ppc440spe_desc_init_dma01pq - initialize the descriptors for PQ operation
329 * with DMA0/1 329 * with DMA0/1
330 */ 330 */
331 static void ppc440spe_desc_init_dma01pq(struct ppc440spe_adma_desc_slot *desc, 331 static void ppc440spe_desc_init_dma01pq(struct ppc440spe_adma_desc_slot *desc,
332 int dst_cnt, int src_cnt, unsigned long flags, 332 int dst_cnt, int src_cnt, unsigned long flags,
333 unsigned long op) 333 unsigned long op)
334 { 334 {
335 struct dma_cdb *hw_desc; 335 struct dma_cdb *hw_desc;
336 struct ppc440spe_adma_desc_slot *iter; 336 struct ppc440spe_adma_desc_slot *iter;
337 u8 dopc; 337 u8 dopc;
338 338
339 /* Common initialization of a PQ descriptors chain */ 339 /* Common initialization of a PQ descriptors chain */
340 set_bits(op, &desc->flags); 340 set_bits(op, &desc->flags);
341 desc->src_cnt = src_cnt; 341 desc->src_cnt = src_cnt;
342 desc->dst_cnt = dst_cnt; 342 desc->dst_cnt = dst_cnt;
343 343
344 /* WXOR MULTICAST if both P and Q are being computed 344 /* WXOR MULTICAST if both P and Q are being computed
345 * MV_SG1_SG2 if Q only 345 * MV_SG1_SG2 if Q only
346 */ 346 */
347 dopc = (desc->dst_cnt == DMA_DEST_MAX_NUM) ? 347 dopc = (desc->dst_cnt == DMA_DEST_MAX_NUM) ?
348 DMA_CDB_OPC_MULTICAST : DMA_CDB_OPC_MV_SG1_SG2; 348 DMA_CDB_OPC_MULTICAST : DMA_CDB_OPC_MV_SG1_SG2;
349 349
350 list_for_each_entry(iter, &desc->group_list, chain_node) { 350 list_for_each_entry(iter, &desc->group_list, chain_node) {
351 hw_desc = iter->hw_desc; 351 hw_desc = iter->hw_desc;
352 memset(iter->hw_desc, 0, sizeof(struct dma_cdb)); 352 memset(iter->hw_desc, 0, sizeof(struct dma_cdb));
353 353
354 if (likely(!list_is_last(&iter->chain_node, 354 if (likely(!list_is_last(&iter->chain_node,
355 &desc->group_list))) { 355 &desc->group_list))) {
356 /* set 'next' pointer */ 356 /* set 'next' pointer */
357 iter->hw_next = list_entry(iter->chain_node.next, 357 iter->hw_next = list_entry(iter->chain_node.next,
358 struct ppc440spe_adma_desc_slot, chain_node); 358 struct ppc440spe_adma_desc_slot, chain_node);
359 clear_bit(PPC440SPE_DESC_INT, &iter->flags); 359 clear_bit(PPC440SPE_DESC_INT, &iter->flags);
360 } else { 360 } else {
361 /* this is the last descriptor. 361 /* this is the last descriptor.
362 * this slot will be pasted from ADMA level 362 * this slot will be pasted from ADMA level
363 * each time it wants to configure parameters 363 * each time it wants to configure parameters
364 * of the transaction (src, dst, ...) 364 * of the transaction (src, dst, ...)
365 */ 365 */
366 iter->hw_next = NULL; 366 iter->hw_next = NULL;
367 if (flags & DMA_PREP_INTERRUPT) 367 if (flags & DMA_PREP_INTERRUPT)
368 set_bit(PPC440SPE_DESC_INT, &iter->flags); 368 set_bit(PPC440SPE_DESC_INT, &iter->flags);
369 else 369 else
370 clear_bit(PPC440SPE_DESC_INT, &iter->flags); 370 clear_bit(PPC440SPE_DESC_INT, &iter->flags);
371 } 371 }
372 } 372 }
373 373
374 /* Set OPS depending on WXOR/RXOR type of operation */ 374 /* Set OPS depending on WXOR/RXOR type of operation */
375 if (!test_bit(PPC440SPE_DESC_RXOR, &desc->flags)) { 375 if (!test_bit(PPC440SPE_DESC_RXOR, &desc->flags)) {
376 /* This is a WXOR only chain: 376 /* This is a WXOR only chain:
377 * - first descriptors are for zeroing destinations 377 * - first descriptors are for zeroing destinations
378 * if PPC440SPE_ZERO_P/Q set; 378 * if PPC440SPE_ZERO_P/Q set;
379 * - descriptors remained are for GF-XOR operations. 379 * - descriptors remained are for GF-XOR operations.
380 */ 380 */
381 iter = list_first_entry(&desc->group_list, 381 iter = list_first_entry(&desc->group_list,
382 struct ppc440spe_adma_desc_slot, 382 struct ppc440spe_adma_desc_slot,
383 chain_node); 383 chain_node);
384 384
385 if (test_bit(PPC440SPE_ZERO_P, &desc->flags)) { 385 if (test_bit(PPC440SPE_ZERO_P, &desc->flags)) {
386 hw_desc = iter->hw_desc; 386 hw_desc = iter->hw_desc;
387 hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2; 387 hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2;
388 iter = list_first_entry(&iter->chain_node, 388 iter = list_first_entry(&iter->chain_node,
389 struct ppc440spe_adma_desc_slot, 389 struct ppc440spe_adma_desc_slot,
390 chain_node); 390 chain_node);
391 } 391 }
392 392
393 if (test_bit(PPC440SPE_ZERO_Q, &desc->flags)) { 393 if (test_bit(PPC440SPE_ZERO_Q, &desc->flags)) {
394 hw_desc = iter->hw_desc; 394 hw_desc = iter->hw_desc;
395 hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2; 395 hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2;
396 iter = list_first_entry(&iter->chain_node, 396 iter = list_first_entry(&iter->chain_node,
397 struct ppc440spe_adma_desc_slot, 397 struct ppc440spe_adma_desc_slot,
398 chain_node); 398 chain_node);
399 } 399 }
400 400
401 list_for_each_entry_from(iter, &desc->group_list, chain_node) { 401 list_for_each_entry_from(iter, &desc->group_list, chain_node) {
402 hw_desc = iter->hw_desc; 402 hw_desc = iter->hw_desc;
403 hw_desc->opc = dopc; 403 hw_desc->opc = dopc;
404 } 404 }
405 } else { 405 } else {
406 /* This is either RXOR-only or mixed RXOR/WXOR */ 406 /* This is either RXOR-only or mixed RXOR/WXOR */
407 407
408 /* The first 1 or 2 slots in chain are always RXOR, 408 /* The first 1 or 2 slots in chain are always RXOR,
409 * if need to calculate P & Q, then there are two 409 * if need to calculate P & Q, then there are two
410 * RXOR slots; if only P or only Q, then there is one 410 * RXOR slots; if only P or only Q, then there is one
411 */ 411 */
412 iter = list_first_entry(&desc->group_list, 412 iter = list_first_entry(&desc->group_list,
413 struct ppc440spe_adma_desc_slot, 413 struct ppc440spe_adma_desc_slot,
414 chain_node); 414 chain_node);
415 hw_desc = iter->hw_desc; 415 hw_desc = iter->hw_desc;
416 hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2; 416 hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2;
417 417
418 if (desc->dst_cnt == DMA_DEST_MAX_NUM) { 418 if (desc->dst_cnt == DMA_DEST_MAX_NUM) {
419 iter = list_first_entry(&iter->chain_node, 419 iter = list_first_entry(&iter->chain_node,
420 struct ppc440spe_adma_desc_slot, 420 struct ppc440spe_adma_desc_slot,
421 chain_node); 421 chain_node);
422 hw_desc = iter->hw_desc; 422 hw_desc = iter->hw_desc;
423 hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2; 423 hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2;
424 } 424 }
425 425
426 /* The remaining descs (if any) are WXORs */ 426 /* The remaining descs (if any) are WXORs */
427 if (test_bit(PPC440SPE_DESC_WXOR, &desc->flags)) { 427 if (test_bit(PPC440SPE_DESC_WXOR, &desc->flags)) {
428 iter = list_first_entry(&iter->chain_node, 428 iter = list_first_entry(&iter->chain_node,
429 struct ppc440spe_adma_desc_slot, 429 struct ppc440spe_adma_desc_slot,
430 chain_node); 430 chain_node);
431 list_for_each_entry_from(iter, &desc->group_list, 431 list_for_each_entry_from(iter, &desc->group_list,
432 chain_node) { 432 chain_node) {
433 hw_desc = iter->hw_desc; 433 hw_desc = iter->hw_desc;
434 hw_desc->opc = dopc; 434 hw_desc->opc = dopc;
435 } 435 }
436 } 436 }
437 } 437 }
438 } 438 }
439 439
440 /** 440 /**
441 * ppc440spe_desc_init_dma01pqzero_sum - initialize the descriptor 441 * ppc440spe_desc_init_dma01pqzero_sum - initialize the descriptor
442 * for PQ_ZERO_SUM operation 442 * for PQ_ZERO_SUM operation
443 */ 443 */
444 static void ppc440spe_desc_init_dma01pqzero_sum( 444 static void ppc440spe_desc_init_dma01pqzero_sum(
445 struct ppc440spe_adma_desc_slot *desc, 445 struct ppc440spe_adma_desc_slot *desc,
446 int dst_cnt, int src_cnt) 446 int dst_cnt, int src_cnt)
447 { 447 {
448 struct dma_cdb *hw_desc; 448 struct dma_cdb *hw_desc;
449 struct ppc440spe_adma_desc_slot *iter; 449 struct ppc440spe_adma_desc_slot *iter;
450 int i = 0; 450 int i = 0;
451 u8 dopc = (dst_cnt == 2) ? DMA_CDB_OPC_MULTICAST : 451 u8 dopc = (dst_cnt == 2) ? DMA_CDB_OPC_MULTICAST :
452 DMA_CDB_OPC_MV_SG1_SG2; 452 DMA_CDB_OPC_MV_SG1_SG2;
453 /* 453 /*
454 * Initialize starting from 2nd or 3rd descriptor dependent 454 * Initialize starting from 2nd or 3rd descriptor dependent
455 * on dst_cnt. First one or two slots are for cloning P 455 * on dst_cnt. First one or two slots are for cloning P
456 * and/or Q to chan->pdest and/or chan->qdest as we have 456 * and/or Q to chan->pdest and/or chan->qdest as we have
457 * to preserve original P/Q. 457 * to preserve original P/Q.
458 */ 458 */
459 iter = list_first_entry(&desc->group_list, 459 iter = list_first_entry(&desc->group_list,
460 struct ppc440spe_adma_desc_slot, chain_node); 460 struct ppc440spe_adma_desc_slot, chain_node);
461 iter = list_entry(iter->chain_node.next, 461 iter = list_entry(iter->chain_node.next,
462 struct ppc440spe_adma_desc_slot, chain_node); 462 struct ppc440spe_adma_desc_slot, chain_node);
463 463
464 if (dst_cnt > 1) { 464 if (dst_cnt > 1) {
465 iter = list_entry(iter->chain_node.next, 465 iter = list_entry(iter->chain_node.next,
466 struct ppc440spe_adma_desc_slot, chain_node); 466 struct ppc440spe_adma_desc_slot, chain_node);
467 } 467 }
468 /* initialize each source descriptor in chain */ 468 /* initialize each source descriptor in chain */
469 list_for_each_entry_from(iter, &desc->group_list, chain_node) { 469 list_for_each_entry_from(iter, &desc->group_list, chain_node) {
470 hw_desc = iter->hw_desc; 470 hw_desc = iter->hw_desc;
471 memset(iter->hw_desc, 0, sizeof(struct dma_cdb)); 471 memset(iter->hw_desc, 0, sizeof(struct dma_cdb));
472 iter->src_cnt = 0; 472 iter->src_cnt = 0;
473 iter->dst_cnt = 0; 473 iter->dst_cnt = 0;
474 474
475 /* This is a ZERO_SUM operation: 475 /* This is a ZERO_SUM operation:
476 * - <src_cnt> descriptors starting from 2nd or 3rd 476 * - <src_cnt> descriptors starting from 2nd or 3rd
477 * descriptor are for GF-XOR operations; 477 * descriptor are for GF-XOR operations;
478 * - remaining <dst_cnt> descriptors are for checking the result 478 * - remaining <dst_cnt> descriptors are for checking the result
479 */ 479 */
480 if (i++ < src_cnt) 480 if (i++ < src_cnt)
481 /* MV_SG1_SG2 if only Q is being verified 481 /* MV_SG1_SG2 if only Q is being verified
482 * MULTICAST if both P and Q are being verified 482 * MULTICAST if both P and Q are being verified
483 */ 483 */
484 hw_desc->opc = dopc; 484 hw_desc->opc = dopc;
485 else 485 else
486 /* DMA_CDB_OPC_DCHECK128 operation */ 486 /* DMA_CDB_OPC_DCHECK128 operation */
487 hw_desc->opc = DMA_CDB_OPC_DCHECK128; 487 hw_desc->opc = DMA_CDB_OPC_DCHECK128;
488 488
489 if (likely(!list_is_last(&iter->chain_node, 489 if (likely(!list_is_last(&iter->chain_node,
490 &desc->group_list))) { 490 &desc->group_list))) {
491 /* set 'next' pointer */ 491 /* set 'next' pointer */
492 iter->hw_next = list_entry(iter->chain_node.next, 492 iter->hw_next = list_entry(iter->chain_node.next,
493 struct ppc440spe_adma_desc_slot, 493 struct ppc440spe_adma_desc_slot,
494 chain_node); 494 chain_node);
495 } else { 495 } else {
496 /* this is the last descriptor. 496 /* this is the last descriptor.
497 * this slot will be pasted from ADMA level 497 * this slot will be pasted from ADMA level
498 * each time it wants to configure parameters 498 * each time it wants to configure parameters
499 * of the transaction (src, dst, ...) 499 * of the transaction (src, dst, ...)
500 */ 500 */
501 iter->hw_next = NULL; 501 iter->hw_next = NULL;
502 /* always enable interrupt generation since we get 502 /* always enable interrupt generation since we get
503 * the status of pqzero from the handler 503 * the status of pqzero from the handler
504 */ 504 */
505 set_bit(PPC440SPE_DESC_INT, &iter->flags); 505 set_bit(PPC440SPE_DESC_INT, &iter->flags);
506 } 506 }
507 } 507 }
508 desc->src_cnt = src_cnt; 508 desc->src_cnt = src_cnt;
509 desc->dst_cnt = dst_cnt; 509 desc->dst_cnt = dst_cnt;
510 } 510 }
511 511
512 /** 512 /**
513 * ppc440spe_desc_init_memcpy - initialize the descriptor for MEMCPY operation 513 * ppc440spe_desc_init_memcpy - initialize the descriptor for MEMCPY operation
514 */ 514 */
515 static void ppc440spe_desc_init_memcpy(struct ppc440spe_adma_desc_slot *desc, 515 static void ppc440spe_desc_init_memcpy(struct ppc440spe_adma_desc_slot *desc,
516 unsigned long flags) 516 unsigned long flags)
517 { 517 {
518 struct dma_cdb *hw_desc = desc->hw_desc; 518 struct dma_cdb *hw_desc = desc->hw_desc;
519 519
520 memset(desc->hw_desc, 0, sizeof(struct dma_cdb)); 520 memset(desc->hw_desc, 0, sizeof(struct dma_cdb));
521 desc->hw_next = NULL; 521 desc->hw_next = NULL;
522 desc->src_cnt = 1; 522 desc->src_cnt = 1;
523 desc->dst_cnt = 1; 523 desc->dst_cnt = 1;
524 524
525 if (flags & DMA_PREP_INTERRUPT) 525 if (flags & DMA_PREP_INTERRUPT)
526 set_bit(PPC440SPE_DESC_INT, &desc->flags); 526 set_bit(PPC440SPE_DESC_INT, &desc->flags);
527 else 527 else
528 clear_bit(PPC440SPE_DESC_INT, &desc->flags); 528 clear_bit(PPC440SPE_DESC_INT, &desc->flags);
529 529
530 hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2; 530 hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2;
531 } 531 }
532 532
533 /** 533 /**
534 * ppc440spe_desc_init_memset - initialize the descriptor for MEMSET operation 534 * ppc440spe_desc_init_memset - initialize the descriptor for MEMSET operation
535 */ 535 */
536 static void ppc440spe_desc_init_memset(struct ppc440spe_adma_desc_slot *desc, 536 static void ppc440spe_desc_init_memset(struct ppc440spe_adma_desc_slot *desc,
537 int value, unsigned long flags) 537 int value, unsigned long flags)
538 { 538 {
539 struct dma_cdb *hw_desc = desc->hw_desc; 539 struct dma_cdb *hw_desc = desc->hw_desc;
540 540
541 memset(desc->hw_desc, 0, sizeof(struct dma_cdb)); 541 memset(desc->hw_desc, 0, sizeof(struct dma_cdb));
542 desc->hw_next = NULL; 542 desc->hw_next = NULL;
543 desc->src_cnt = 1; 543 desc->src_cnt = 1;
544 desc->dst_cnt = 1; 544 desc->dst_cnt = 1;
545 545
546 if (flags & DMA_PREP_INTERRUPT) 546 if (flags & DMA_PREP_INTERRUPT)
547 set_bit(PPC440SPE_DESC_INT, &desc->flags); 547 set_bit(PPC440SPE_DESC_INT, &desc->flags);
548 else 548 else
549 clear_bit(PPC440SPE_DESC_INT, &desc->flags); 549 clear_bit(PPC440SPE_DESC_INT, &desc->flags);
550 550
551 hw_desc->sg1u = hw_desc->sg1l = cpu_to_le32((u32)value); 551 hw_desc->sg1u = hw_desc->sg1l = cpu_to_le32((u32)value);
552 hw_desc->sg3u = hw_desc->sg3l = cpu_to_le32((u32)value); 552 hw_desc->sg3u = hw_desc->sg3l = cpu_to_le32((u32)value);
553 hw_desc->opc = DMA_CDB_OPC_DFILL128; 553 hw_desc->opc = DMA_CDB_OPC_DFILL128;
554 } 554 }
555 555
556 /** 556 /**
557 * ppc440spe_desc_set_src_addr - set source address into the descriptor 557 * ppc440spe_desc_set_src_addr - set source address into the descriptor
558 */ 558 */
559 static void ppc440spe_desc_set_src_addr(struct ppc440spe_adma_desc_slot *desc, 559 static void ppc440spe_desc_set_src_addr(struct ppc440spe_adma_desc_slot *desc,
560 struct ppc440spe_adma_chan *chan, 560 struct ppc440spe_adma_chan *chan,
561 int src_idx, dma_addr_t addrh, 561 int src_idx, dma_addr_t addrh,
562 dma_addr_t addrl) 562 dma_addr_t addrl)
563 { 563 {
564 struct dma_cdb *dma_hw_desc; 564 struct dma_cdb *dma_hw_desc;
565 struct xor_cb *xor_hw_desc; 565 struct xor_cb *xor_hw_desc;
566 phys_addr_t addr64, tmplow, tmphi; 566 phys_addr_t addr64, tmplow, tmphi;
567 567
568 switch (chan->device->id) { 568 switch (chan->device->id) {
569 case PPC440SPE_DMA0_ID: 569 case PPC440SPE_DMA0_ID:
570 case PPC440SPE_DMA1_ID: 570 case PPC440SPE_DMA1_ID:
571 if (!addrh) { 571 if (!addrh) {
572 addr64 = addrl; 572 addr64 = addrl;
573 tmphi = (addr64 >> 32); 573 tmphi = (addr64 >> 32);
574 tmplow = (addr64 & 0xFFFFFFFF); 574 tmplow = (addr64 & 0xFFFFFFFF);
575 } else { 575 } else {
576 tmphi = addrh; 576 tmphi = addrh;
577 tmplow = addrl; 577 tmplow = addrl;
578 } 578 }
579 dma_hw_desc = desc->hw_desc; 579 dma_hw_desc = desc->hw_desc;
580 dma_hw_desc->sg1l = cpu_to_le32((u32)tmplow); 580 dma_hw_desc->sg1l = cpu_to_le32((u32)tmplow);
581 dma_hw_desc->sg1u |= cpu_to_le32((u32)tmphi); 581 dma_hw_desc->sg1u |= cpu_to_le32((u32)tmphi);
582 break; 582 break;
583 case PPC440SPE_XOR_ID: 583 case PPC440SPE_XOR_ID:
584 xor_hw_desc = desc->hw_desc; 584 xor_hw_desc = desc->hw_desc;
585 xor_hw_desc->ops[src_idx].l = addrl; 585 xor_hw_desc->ops[src_idx].l = addrl;
586 xor_hw_desc->ops[src_idx].h |= addrh; 586 xor_hw_desc->ops[src_idx].h |= addrh;
587 break; 587 break;
588 } 588 }
589 } 589 }
590 590
591 /** 591 /**
592 * ppc440spe_desc_set_src_mult - set source address mult into the descriptor 592 * ppc440spe_desc_set_src_mult - set source address mult into the descriptor
593 */ 593 */
594 static void ppc440spe_desc_set_src_mult(struct ppc440spe_adma_desc_slot *desc, 594 static void ppc440spe_desc_set_src_mult(struct ppc440spe_adma_desc_slot *desc,
595 struct ppc440spe_adma_chan *chan, u32 mult_index, 595 struct ppc440spe_adma_chan *chan, u32 mult_index,
596 int sg_index, unsigned char mult_value) 596 int sg_index, unsigned char mult_value)
597 { 597 {
598 struct dma_cdb *dma_hw_desc; 598 struct dma_cdb *dma_hw_desc;
599 struct xor_cb *xor_hw_desc; 599 struct xor_cb *xor_hw_desc;
600 u32 *psgu; 600 u32 *psgu;
601 601
602 switch (chan->device->id) { 602 switch (chan->device->id) {
603 case PPC440SPE_DMA0_ID: 603 case PPC440SPE_DMA0_ID:
604 case PPC440SPE_DMA1_ID: 604 case PPC440SPE_DMA1_ID:
605 dma_hw_desc = desc->hw_desc; 605 dma_hw_desc = desc->hw_desc;
606 606
607 switch (sg_index) { 607 switch (sg_index) {
608 /* for RXOR operations set multiplier 608 /* for RXOR operations set multiplier
609 * into source cued address 609 * into source cued address
610 */ 610 */
611 case DMA_CDB_SG_SRC: 611 case DMA_CDB_SG_SRC:
612 psgu = &dma_hw_desc->sg1u; 612 psgu = &dma_hw_desc->sg1u;
613 break; 613 break;
614 /* for WXOR operations set multiplier 614 /* for WXOR operations set multiplier
615 * into destination cued address(es) 615 * into destination cued address(es)
616 */ 616 */
617 case DMA_CDB_SG_DST1: 617 case DMA_CDB_SG_DST1:
618 psgu = &dma_hw_desc->sg2u; 618 psgu = &dma_hw_desc->sg2u;
619 break; 619 break;
620 case DMA_CDB_SG_DST2: 620 case DMA_CDB_SG_DST2:
621 psgu = &dma_hw_desc->sg3u; 621 psgu = &dma_hw_desc->sg3u;
622 break; 622 break;
623 default: 623 default:
624 BUG(); 624 BUG();
625 } 625 }
626 626
627 *psgu |= cpu_to_le32(mult_value << mult_index); 627 *psgu |= cpu_to_le32(mult_value << mult_index);
628 break; 628 break;
629 case PPC440SPE_XOR_ID: 629 case PPC440SPE_XOR_ID:
630 xor_hw_desc = desc->hw_desc; 630 xor_hw_desc = desc->hw_desc;
631 break; 631 break;
632 default: 632 default:
633 BUG(); 633 BUG();
634 } 634 }
635 } 635 }
636 636
637 /** 637 /**
638 * ppc440spe_desc_set_dest_addr - set destination address into the descriptor 638 * ppc440spe_desc_set_dest_addr - set destination address into the descriptor
639 */ 639 */
640 static void ppc440spe_desc_set_dest_addr(struct ppc440spe_adma_desc_slot *desc, 640 static void ppc440spe_desc_set_dest_addr(struct ppc440spe_adma_desc_slot *desc,
641 struct ppc440spe_adma_chan *chan, 641 struct ppc440spe_adma_chan *chan,
642 dma_addr_t addrh, dma_addr_t addrl, 642 dma_addr_t addrh, dma_addr_t addrl,
643 u32 dst_idx) 643 u32 dst_idx)
644 { 644 {
645 struct dma_cdb *dma_hw_desc; 645 struct dma_cdb *dma_hw_desc;
646 struct xor_cb *xor_hw_desc; 646 struct xor_cb *xor_hw_desc;
647 phys_addr_t addr64, tmphi, tmplow; 647 phys_addr_t addr64, tmphi, tmplow;
648 u32 *psgu, *psgl; 648 u32 *psgu, *psgl;
649 649
650 switch (chan->device->id) { 650 switch (chan->device->id) {
651 case PPC440SPE_DMA0_ID: 651 case PPC440SPE_DMA0_ID:
652 case PPC440SPE_DMA1_ID: 652 case PPC440SPE_DMA1_ID:
653 if (!addrh) { 653 if (!addrh) {
654 addr64 = addrl; 654 addr64 = addrl;
655 tmphi = (addr64 >> 32); 655 tmphi = (addr64 >> 32);
656 tmplow = (addr64 & 0xFFFFFFFF); 656 tmplow = (addr64 & 0xFFFFFFFF);
657 } else { 657 } else {
658 tmphi = addrh; 658 tmphi = addrh;
659 tmplow = addrl; 659 tmplow = addrl;
660 } 660 }
661 dma_hw_desc = desc->hw_desc; 661 dma_hw_desc = desc->hw_desc;
662 662
663 psgu = dst_idx ? &dma_hw_desc->sg3u : &dma_hw_desc->sg2u; 663 psgu = dst_idx ? &dma_hw_desc->sg3u : &dma_hw_desc->sg2u;
664 psgl = dst_idx ? &dma_hw_desc->sg3l : &dma_hw_desc->sg2l; 664 psgl = dst_idx ? &dma_hw_desc->sg3l : &dma_hw_desc->sg2l;
665 665
666 *psgl = cpu_to_le32((u32)tmplow); 666 *psgl = cpu_to_le32((u32)tmplow);
667 *psgu |= cpu_to_le32((u32)tmphi); 667 *psgu |= cpu_to_le32((u32)tmphi);
668 break; 668 break;
669 case PPC440SPE_XOR_ID: 669 case PPC440SPE_XOR_ID:
670 xor_hw_desc = desc->hw_desc; 670 xor_hw_desc = desc->hw_desc;
671 xor_hw_desc->cbtal = addrl; 671 xor_hw_desc->cbtal = addrl;
672 xor_hw_desc->cbtah |= addrh; 672 xor_hw_desc->cbtah |= addrh;
673 break; 673 break;
674 } 674 }
675 } 675 }
676 676
677 /** 677 /**
678 * ppc440spe_desc_set_byte_count - set number of data bytes involved 678 * ppc440spe_desc_set_byte_count - set number of data bytes involved
679 * into the operation 679 * into the operation
680 */ 680 */
681 static void ppc440spe_desc_set_byte_count(struct ppc440spe_adma_desc_slot *desc, 681 static void ppc440spe_desc_set_byte_count(struct ppc440spe_adma_desc_slot *desc,
682 struct ppc440spe_adma_chan *chan, 682 struct ppc440spe_adma_chan *chan,
683 u32 byte_count) 683 u32 byte_count)
684 { 684 {
685 struct dma_cdb *dma_hw_desc; 685 struct dma_cdb *dma_hw_desc;
686 struct xor_cb *xor_hw_desc; 686 struct xor_cb *xor_hw_desc;
687 687
688 switch (chan->device->id) { 688 switch (chan->device->id) {
689 case PPC440SPE_DMA0_ID: 689 case PPC440SPE_DMA0_ID:
690 case PPC440SPE_DMA1_ID: 690 case PPC440SPE_DMA1_ID:
691 dma_hw_desc = desc->hw_desc; 691 dma_hw_desc = desc->hw_desc;
692 dma_hw_desc->cnt = cpu_to_le32(byte_count); 692 dma_hw_desc->cnt = cpu_to_le32(byte_count);
693 break; 693 break;
694 case PPC440SPE_XOR_ID: 694 case PPC440SPE_XOR_ID:
695 xor_hw_desc = desc->hw_desc; 695 xor_hw_desc = desc->hw_desc;
696 xor_hw_desc->cbbc = byte_count; 696 xor_hw_desc->cbbc = byte_count;
697 break; 697 break;
698 } 698 }
699 } 699 }
700 700
701 /** 701 /**
702 * ppc440spe_desc_set_rxor_block_size - set RXOR block size 702 * ppc440spe_desc_set_rxor_block_size - set RXOR block size
703 */ 703 */
704 static inline void ppc440spe_desc_set_rxor_block_size(u32 byte_count) 704 static inline void ppc440spe_desc_set_rxor_block_size(u32 byte_count)
705 { 705 {
706 /* assume that byte_count is aligned on the 512-boundary; 706 /* assume that byte_count is aligned on the 512-boundary;
707 * thus write it directly to the register (bits 23:31 are 707 * thus write it directly to the register (bits 23:31 are
708 * reserved there). 708 * reserved there).
709 */ 709 */
710 dcr_write(ppc440spe_mq_dcr_host, DCRN_MQ0_CF2H, byte_count); 710 dcr_write(ppc440spe_mq_dcr_host, DCRN_MQ0_CF2H, byte_count);
711 } 711 }
712 712
713 /** 713 /**
714 * ppc440spe_desc_set_dcheck - set CHECK pattern 714 * ppc440spe_desc_set_dcheck - set CHECK pattern
715 */ 715 */
716 static void ppc440spe_desc_set_dcheck(struct ppc440spe_adma_desc_slot *desc, 716 static void ppc440spe_desc_set_dcheck(struct ppc440spe_adma_desc_slot *desc,
717 struct ppc440spe_adma_chan *chan, u8 *qword) 717 struct ppc440spe_adma_chan *chan, u8 *qword)
718 { 718 {
719 struct dma_cdb *dma_hw_desc; 719 struct dma_cdb *dma_hw_desc;
720 720
721 switch (chan->device->id) { 721 switch (chan->device->id) {
722 case PPC440SPE_DMA0_ID: 722 case PPC440SPE_DMA0_ID:
723 case PPC440SPE_DMA1_ID: 723 case PPC440SPE_DMA1_ID:
724 dma_hw_desc = desc->hw_desc; 724 dma_hw_desc = desc->hw_desc;
725 iowrite32(qword[0], &dma_hw_desc->sg3l); 725 iowrite32(qword[0], &dma_hw_desc->sg3l);
726 iowrite32(qword[4], &dma_hw_desc->sg3u); 726 iowrite32(qword[4], &dma_hw_desc->sg3u);
727 iowrite32(qword[8], &dma_hw_desc->sg2l); 727 iowrite32(qword[8], &dma_hw_desc->sg2l);
728 iowrite32(qword[12], &dma_hw_desc->sg2u); 728 iowrite32(qword[12], &dma_hw_desc->sg2u);
729 break; 729 break;
730 default: 730 default:
731 BUG(); 731 BUG();
732 } 732 }
733 } 733 }
734 734
735 /** 735 /**
736 * ppc440spe_xor_set_link - set link address in xor CB 736 * ppc440spe_xor_set_link - set link address in xor CB
737 */ 737 */
738 static void ppc440spe_xor_set_link(struct ppc440spe_adma_desc_slot *prev_desc, 738 static void ppc440spe_xor_set_link(struct ppc440spe_adma_desc_slot *prev_desc,
739 struct ppc440spe_adma_desc_slot *next_desc) 739 struct ppc440spe_adma_desc_slot *next_desc)
740 { 740 {
741 struct xor_cb *xor_hw_desc = prev_desc->hw_desc; 741 struct xor_cb *xor_hw_desc = prev_desc->hw_desc;
742 742
743 if (unlikely(!next_desc || !(next_desc->phys))) { 743 if (unlikely(!next_desc || !(next_desc->phys))) {
744 printk(KERN_ERR "%s: next_desc=0x%p; next_desc->phys=0x%llx\n", 744 printk(KERN_ERR "%s: next_desc=0x%p; next_desc->phys=0x%llx\n",
745 __func__, next_desc, 745 __func__, next_desc,
746 next_desc ? next_desc->phys : 0); 746 next_desc ? next_desc->phys : 0);
747 BUG(); 747 BUG();
748 } 748 }
749 749
750 xor_hw_desc->cbs = 0; 750 xor_hw_desc->cbs = 0;
751 xor_hw_desc->cblal = next_desc->phys; 751 xor_hw_desc->cblal = next_desc->phys;
752 xor_hw_desc->cblah = 0; 752 xor_hw_desc->cblah = 0;
753 xor_hw_desc->cbc |= XOR_CBCR_LNK_BIT; 753 xor_hw_desc->cbc |= XOR_CBCR_LNK_BIT;
754 } 754 }
755 755
756 /** 756 /**
757 * ppc440spe_desc_set_link - set the address of descriptor following this 757 * ppc440spe_desc_set_link - set the address of descriptor following this
758 * descriptor in chain 758 * descriptor in chain
759 */ 759 */
760 static void ppc440spe_desc_set_link(struct ppc440spe_adma_chan *chan, 760 static void ppc440spe_desc_set_link(struct ppc440spe_adma_chan *chan,
761 struct ppc440spe_adma_desc_slot *prev_desc, 761 struct ppc440spe_adma_desc_slot *prev_desc,
762 struct ppc440spe_adma_desc_slot *next_desc) 762 struct ppc440spe_adma_desc_slot *next_desc)
763 { 763 {
764 unsigned long flags; 764 unsigned long flags;
765 struct ppc440spe_adma_desc_slot *tail = next_desc; 765 struct ppc440spe_adma_desc_slot *tail = next_desc;
766 766
767 if (unlikely(!prev_desc || !next_desc || 767 if (unlikely(!prev_desc || !next_desc ||
768 (prev_desc->hw_next && prev_desc->hw_next != next_desc))) { 768 (prev_desc->hw_next && prev_desc->hw_next != next_desc))) {
769 /* If previous next is overwritten something is wrong. 769 /* If previous next is overwritten something is wrong.
770 * though we may refetch from append to initiate list 770 * though we may refetch from append to initiate list
771 * processing; in this case - it's ok. 771 * processing; in this case - it's ok.
772 */ 772 */
773 printk(KERN_ERR "%s: prev_desc=0x%p; next_desc=0x%p; " 773 printk(KERN_ERR "%s: prev_desc=0x%p; next_desc=0x%p; "
774 "prev->hw_next=0x%p\n", __func__, prev_desc, 774 "prev->hw_next=0x%p\n", __func__, prev_desc,
775 next_desc, prev_desc ? prev_desc->hw_next : 0); 775 next_desc, prev_desc ? prev_desc->hw_next : 0);
776 BUG(); 776 BUG();
777 } 777 }
778 778
779 local_irq_save(flags); 779 local_irq_save(flags);
780 780
781 /* do s/w chaining both for DMA and XOR descriptors */ 781 /* do s/w chaining both for DMA and XOR descriptors */
782 prev_desc->hw_next = next_desc; 782 prev_desc->hw_next = next_desc;
783 783
784 switch (chan->device->id) { 784 switch (chan->device->id) {
785 case PPC440SPE_DMA0_ID: 785 case PPC440SPE_DMA0_ID:
786 case PPC440SPE_DMA1_ID: 786 case PPC440SPE_DMA1_ID:
787 break; 787 break;
788 case PPC440SPE_XOR_ID: 788 case PPC440SPE_XOR_ID:
789 /* bind descriptor to the chain */ 789 /* bind descriptor to the chain */
790 while (tail->hw_next) 790 while (tail->hw_next)
791 tail = tail->hw_next; 791 tail = tail->hw_next;
792 xor_last_linked = tail; 792 xor_last_linked = tail;
793 793
794 if (prev_desc == xor_last_submit) 794 if (prev_desc == xor_last_submit)
795 /* do not link to the last submitted CB */ 795 /* do not link to the last submitted CB */
796 break; 796 break;
797 ppc440spe_xor_set_link(prev_desc, next_desc); 797 ppc440spe_xor_set_link(prev_desc, next_desc);
798 break; 798 break;
799 } 799 }
800 800
801 local_irq_restore(flags); 801 local_irq_restore(flags);
802 } 802 }
803 803
804 /** 804 /**
805 * ppc440spe_desc_get_src_addr - extract the source address from the descriptor 805 * ppc440spe_desc_get_src_addr - extract the source address from the descriptor
806 */ 806 */
807 static u32 ppc440spe_desc_get_src_addr(struct ppc440spe_adma_desc_slot *desc, 807 static u32 ppc440spe_desc_get_src_addr(struct ppc440spe_adma_desc_slot *desc,
808 struct ppc440spe_adma_chan *chan, int src_idx) 808 struct ppc440spe_adma_chan *chan, int src_idx)
809 { 809 {
810 struct dma_cdb *dma_hw_desc; 810 struct dma_cdb *dma_hw_desc;
811 struct xor_cb *xor_hw_desc; 811 struct xor_cb *xor_hw_desc;
812 812
813 switch (chan->device->id) { 813 switch (chan->device->id) {
814 case PPC440SPE_DMA0_ID: 814 case PPC440SPE_DMA0_ID:
815 case PPC440SPE_DMA1_ID: 815 case PPC440SPE_DMA1_ID:
816 dma_hw_desc = desc->hw_desc; 816 dma_hw_desc = desc->hw_desc;
817 /* May have 0, 1, 2, or 3 sources */ 817 /* May have 0, 1, 2, or 3 sources */
818 switch (dma_hw_desc->opc) { 818 switch (dma_hw_desc->opc) {
819 case DMA_CDB_OPC_NO_OP: 819 case DMA_CDB_OPC_NO_OP:
820 case DMA_CDB_OPC_DFILL128: 820 case DMA_CDB_OPC_DFILL128:
821 return 0; 821 return 0;
822 case DMA_CDB_OPC_DCHECK128: 822 case DMA_CDB_OPC_DCHECK128:
823 if (unlikely(src_idx)) { 823 if (unlikely(src_idx)) {
824 printk(KERN_ERR "%s: try to get %d source for" 824 printk(KERN_ERR "%s: try to get %d source for"
825 " DCHECK128\n", __func__, src_idx); 825 " DCHECK128\n", __func__, src_idx);
826 BUG(); 826 BUG();
827 } 827 }
828 return le32_to_cpu(dma_hw_desc->sg1l); 828 return le32_to_cpu(dma_hw_desc->sg1l);
829 case DMA_CDB_OPC_MULTICAST: 829 case DMA_CDB_OPC_MULTICAST:
830 case DMA_CDB_OPC_MV_SG1_SG2: 830 case DMA_CDB_OPC_MV_SG1_SG2:
831 if (unlikely(src_idx > 2)) { 831 if (unlikely(src_idx > 2)) {
832 printk(KERN_ERR "%s: try to get %d source from" 832 printk(KERN_ERR "%s: try to get %d source from"
833 " DMA descr\n", __func__, src_idx); 833 " DMA descr\n", __func__, src_idx);
834 BUG(); 834 BUG();
835 } 835 }
836 if (src_idx) { 836 if (src_idx) {
837 if (le32_to_cpu(dma_hw_desc->sg1u) & 837 if (le32_to_cpu(dma_hw_desc->sg1u) &
838 DMA_CUED_XOR_WIN_MSK) { 838 DMA_CUED_XOR_WIN_MSK) {
839 u8 region; 839 u8 region;
840 840
841 if (src_idx == 1) 841 if (src_idx == 1)
842 return le32_to_cpu( 842 return le32_to_cpu(
843 dma_hw_desc->sg1l) + 843 dma_hw_desc->sg1l) +
844 desc->unmap_len; 844 desc->unmap_len;
845 845
846 region = (le32_to_cpu( 846 region = (le32_to_cpu(
847 dma_hw_desc->sg1u)) >> 847 dma_hw_desc->sg1u)) >>
848 DMA_CUED_REGION_OFF; 848 DMA_CUED_REGION_OFF;
849 849
850 region &= DMA_CUED_REGION_MSK; 850 region &= DMA_CUED_REGION_MSK;
851 switch (region) { 851 switch (region) {
852 case DMA_RXOR123: 852 case DMA_RXOR123:
853 return le32_to_cpu( 853 return le32_to_cpu(
854 dma_hw_desc->sg1l) + 854 dma_hw_desc->sg1l) +
855 (desc->unmap_len << 1); 855 (desc->unmap_len << 1);
856 case DMA_RXOR124: 856 case DMA_RXOR124:
857 return le32_to_cpu( 857 return le32_to_cpu(
858 dma_hw_desc->sg1l) + 858 dma_hw_desc->sg1l) +
859 (desc->unmap_len * 3); 859 (desc->unmap_len * 3);
860 case DMA_RXOR125: 860 case DMA_RXOR125:
861 return le32_to_cpu( 861 return le32_to_cpu(
862 dma_hw_desc->sg1l) + 862 dma_hw_desc->sg1l) +
863 (desc->unmap_len << 2); 863 (desc->unmap_len << 2);
864 default: 864 default:
865 printk(KERN_ERR 865 printk(KERN_ERR
866 "%s: try to" 866 "%s: try to"
867 " get src3 for region %02x" 867 " get src3 for region %02x"
868 "PPC440SPE_DESC_RXOR12?\n", 868 "PPC440SPE_DESC_RXOR12?\n",
869 __func__, region); 869 __func__, region);
870 BUG(); 870 BUG();
871 } 871 }
872 } else { 872 } else {
873 printk(KERN_ERR 873 printk(KERN_ERR
874 "%s: try to get %d" 874 "%s: try to get %d"
875 " source for non-cued descr\n", 875 " source for non-cued descr\n",
876 __func__, src_idx); 876 __func__, src_idx);
877 BUG(); 877 BUG();
878 } 878 }
879 } 879 }
880 return le32_to_cpu(dma_hw_desc->sg1l); 880 return le32_to_cpu(dma_hw_desc->sg1l);
881 default: 881 default:
882 printk(KERN_ERR "%s: unknown OPC 0x%02x\n", 882 printk(KERN_ERR "%s: unknown OPC 0x%02x\n",
883 __func__, dma_hw_desc->opc); 883 __func__, dma_hw_desc->opc);
884 BUG(); 884 BUG();
885 } 885 }
886 return le32_to_cpu(dma_hw_desc->sg1l); 886 return le32_to_cpu(dma_hw_desc->sg1l);
887 case PPC440SPE_XOR_ID: 887 case PPC440SPE_XOR_ID:
888 /* May have up to 16 sources */ 888 /* May have up to 16 sources */
889 xor_hw_desc = desc->hw_desc; 889 xor_hw_desc = desc->hw_desc;
890 return xor_hw_desc->ops[src_idx].l; 890 return xor_hw_desc->ops[src_idx].l;
891 } 891 }
892 return 0; 892 return 0;
893 } 893 }
894 894
895 /** 895 /**
896 * ppc440spe_desc_get_dest_addr - extract the destination address from the 896 * ppc440spe_desc_get_dest_addr - extract the destination address from the
897 * descriptor 897 * descriptor
898 */ 898 */
899 static u32 ppc440spe_desc_get_dest_addr(struct ppc440spe_adma_desc_slot *desc, 899 static u32 ppc440spe_desc_get_dest_addr(struct ppc440spe_adma_desc_slot *desc,
900 struct ppc440spe_adma_chan *chan, int idx) 900 struct ppc440spe_adma_chan *chan, int idx)
901 { 901 {
902 struct dma_cdb *dma_hw_desc; 902 struct dma_cdb *dma_hw_desc;
903 struct xor_cb *xor_hw_desc; 903 struct xor_cb *xor_hw_desc;
904 904
905 switch (chan->device->id) { 905 switch (chan->device->id) {
906 case PPC440SPE_DMA0_ID: 906 case PPC440SPE_DMA0_ID:
907 case PPC440SPE_DMA1_ID: 907 case PPC440SPE_DMA1_ID:
908 dma_hw_desc = desc->hw_desc; 908 dma_hw_desc = desc->hw_desc;
909 909
910 if (likely(!idx)) 910 if (likely(!idx))
911 return le32_to_cpu(dma_hw_desc->sg2l); 911 return le32_to_cpu(dma_hw_desc->sg2l);
912 return le32_to_cpu(dma_hw_desc->sg3l); 912 return le32_to_cpu(dma_hw_desc->sg3l);
913 case PPC440SPE_XOR_ID: 913 case PPC440SPE_XOR_ID:
914 xor_hw_desc = desc->hw_desc; 914 xor_hw_desc = desc->hw_desc;
915 return xor_hw_desc->cbtal; 915 return xor_hw_desc->cbtal;
916 } 916 }
917 return 0; 917 return 0;
918 } 918 }
919 919
920 /** 920 /**
921 * ppc440spe_desc_get_src_num - extract the number of source addresses from 921 * ppc440spe_desc_get_src_num - extract the number of source addresses from
922 * the descriptor 922 * the descriptor
923 */ 923 */
924 static u32 ppc440spe_desc_get_src_num(struct ppc440spe_adma_desc_slot *desc, 924 static u32 ppc440spe_desc_get_src_num(struct ppc440spe_adma_desc_slot *desc,
925 struct ppc440spe_adma_chan *chan) 925 struct ppc440spe_adma_chan *chan)
926 { 926 {
927 struct dma_cdb *dma_hw_desc; 927 struct dma_cdb *dma_hw_desc;
928 struct xor_cb *xor_hw_desc; 928 struct xor_cb *xor_hw_desc;
929 929
930 switch (chan->device->id) { 930 switch (chan->device->id) {
931 case PPC440SPE_DMA0_ID: 931 case PPC440SPE_DMA0_ID:
932 case PPC440SPE_DMA1_ID: 932 case PPC440SPE_DMA1_ID:
933 dma_hw_desc = desc->hw_desc; 933 dma_hw_desc = desc->hw_desc;
934 934
935 switch (dma_hw_desc->opc) { 935 switch (dma_hw_desc->opc) {
936 case DMA_CDB_OPC_NO_OP: 936 case DMA_CDB_OPC_NO_OP:
937 case DMA_CDB_OPC_DFILL128: 937 case DMA_CDB_OPC_DFILL128:
938 return 0; 938 return 0;
939 case DMA_CDB_OPC_DCHECK128: 939 case DMA_CDB_OPC_DCHECK128:
940 return 1; 940 return 1;
941 case DMA_CDB_OPC_MV_SG1_SG2: 941 case DMA_CDB_OPC_MV_SG1_SG2:
942 case DMA_CDB_OPC_MULTICAST: 942 case DMA_CDB_OPC_MULTICAST:
943 /* 943 /*
944 * Only for RXOR operations we have more than 944 * Only for RXOR operations we have more than
945 * one source 945 * one source
946 */ 946 */
947 if (le32_to_cpu(dma_hw_desc->sg1u) & 947 if (le32_to_cpu(dma_hw_desc->sg1u) &
948 DMA_CUED_XOR_WIN_MSK) { 948 DMA_CUED_XOR_WIN_MSK) {
949 /* RXOR op, there are 2 or 3 sources */ 949 /* RXOR op, there are 2 or 3 sources */
950 if (((le32_to_cpu(dma_hw_desc->sg1u) >> 950 if (((le32_to_cpu(dma_hw_desc->sg1u) >>
951 DMA_CUED_REGION_OFF) & 951 DMA_CUED_REGION_OFF) &
952 DMA_CUED_REGION_MSK) == DMA_RXOR12) { 952 DMA_CUED_REGION_MSK) == DMA_RXOR12) {
953 /* RXOR 1-2 */ 953 /* RXOR 1-2 */
954 return 2; 954 return 2;
955 } else { 955 } else {
956 /* RXOR 1-2-3/1-2-4/1-2-5 */ 956 /* RXOR 1-2-3/1-2-4/1-2-5 */
957 return 3; 957 return 3;
958 } 958 }
959 } 959 }
960 return 1; 960 return 1;
961 default: 961 default:
962 printk(KERN_ERR "%s: unknown OPC 0x%02x\n", 962 printk(KERN_ERR "%s: unknown OPC 0x%02x\n",
963 __func__, dma_hw_desc->opc); 963 __func__, dma_hw_desc->opc);
964 BUG(); 964 BUG();
965 } 965 }
966 case PPC440SPE_XOR_ID: 966 case PPC440SPE_XOR_ID:
967 /* up to 16 sources */ 967 /* up to 16 sources */
968 xor_hw_desc = desc->hw_desc; 968 xor_hw_desc = desc->hw_desc;
969 return xor_hw_desc->cbc & XOR_CDCR_OAC_MSK; 969 return xor_hw_desc->cbc & XOR_CDCR_OAC_MSK;
970 default: 970 default:
971 BUG(); 971 BUG();
972 } 972 }
973 return 0; 973 return 0;
974 } 974 }
975 975
976 /** 976 /**
977 * ppc440spe_desc_get_dst_num - get the number of destination addresses in 977 * ppc440spe_desc_get_dst_num - get the number of destination addresses in
978 * this descriptor 978 * this descriptor
979 */ 979 */
980 static u32 ppc440spe_desc_get_dst_num(struct ppc440spe_adma_desc_slot *desc, 980 static u32 ppc440spe_desc_get_dst_num(struct ppc440spe_adma_desc_slot *desc,
981 struct ppc440spe_adma_chan *chan) 981 struct ppc440spe_adma_chan *chan)
982 { 982 {
983 struct dma_cdb *dma_hw_desc; 983 struct dma_cdb *dma_hw_desc;
984 984
985 switch (chan->device->id) { 985 switch (chan->device->id) {
986 case PPC440SPE_DMA0_ID: 986 case PPC440SPE_DMA0_ID:
987 case PPC440SPE_DMA1_ID: 987 case PPC440SPE_DMA1_ID:
988 /* May be 1 or 2 destinations */ 988 /* May be 1 or 2 destinations */
989 dma_hw_desc = desc->hw_desc; 989 dma_hw_desc = desc->hw_desc;
990 switch (dma_hw_desc->opc) { 990 switch (dma_hw_desc->opc) {
991 case DMA_CDB_OPC_NO_OP: 991 case DMA_CDB_OPC_NO_OP:
992 case DMA_CDB_OPC_DCHECK128: 992 case DMA_CDB_OPC_DCHECK128:
993 return 0; 993 return 0;
994 case DMA_CDB_OPC_MV_SG1_SG2: 994 case DMA_CDB_OPC_MV_SG1_SG2:
995 case DMA_CDB_OPC_DFILL128: 995 case DMA_CDB_OPC_DFILL128:
996 return 1; 996 return 1;
997 case DMA_CDB_OPC_MULTICAST: 997 case DMA_CDB_OPC_MULTICAST:
998 if (desc->dst_cnt == 2) 998 if (desc->dst_cnt == 2)
999 return 2; 999 return 2;
1000 else 1000 else
1001 return 1; 1001 return 1;
1002 default: 1002 default:
1003 printk(KERN_ERR "%s: unknown OPC 0x%02x\n", 1003 printk(KERN_ERR "%s: unknown OPC 0x%02x\n",
1004 __func__, dma_hw_desc->opc); 1004 __func__, dma_hw_desc->opc);
1005 BUG(); 1005 BUG();
1006 } 1006 }
1007 case PPC440SPE_XOR_ID: 1007 case PPC440SPE_XOR_ID:
1008 /* Always only 1 destination */ 1008 /* Always only 1 destination */
1009 return 1; 1009 return 1;
1010 default: 1010 default:
1011 BUG(); 1011 BUG();
1012 } 1012 }
1013 return 0; 1013 return 0;
1014 } 1014 }
1015 1015
1016 /** 1016 /**
1017 * ppc440spe_desc_get_link - get the address of the descriptor that 1017 * ppc440spe_desc_get_link - get the address of the descriptor that
1018 * follows this one 1018 * follows this one
1019 */ 1019 */
1020 static inline u32 ppc440spe_desc_get_link(struct ppc440spe_adma_desc_slot *desc, 1020 static inline u32 ppc440spe_desc_get_link(struct ppc440spe_adma_desc_slot *desc,
1021 struct ppc440spe_adma_chan *chan) 1021 struct ppc440spe_adma_chan *chan)
1022 { 1022 {
1023 if (!desc->hw_next) 1023 if (!desc->hw_next)
1024 return 0; 1024 return 0;
1025 1025
1026 return desc->hw_next->phys; 1026 return desc->hw_next->phys;
1027 } 1027 }
1028 1028
1029 /** 1029 /**
1030 * ppc440spe_desc_is_aligned - check alignment 1030 * ppc440spe_desc_is_aligned - check alignment
1031 */ 1031 */
1032 static inline int ppc440spe_desc_is_aligned( 1032 static inline int ppc440spe_desc_is_aligned(
1033 struct ppc440spe_adma_desc_slot *desc, int num_slots) 1033 struct ppc440spe_adma_desc_slot *desc, int num_slots)
1034 { 1034 {
1035 return (desc->idx & (num_slots - 1)) ? 0 : 1; 1035 return (desc->idx & (num_slots - 1)) ? 0 : 1;
1036 } 1036 }
1037 1037
1038 /** 1038 /**
1039 * ppc440spe_chan_xor_slot_count - get the number of slots necessary for 1039 * ppc440spe_chan_xor_slot_count - get the number of slots necessary for
1040 * XOR operation 1040 * XOR operation
1041 */ 1041 */
1042 static int ppc440spe_chan_xor_slot_count(size_t len, int src_cnt, 1042 static int ppc440spe_chan_xor_slot_count(size_t len, int src_cnt,
1043 int *slots_per_op) 1043 int *slots_per_op)
1044 { 1044 {
1045 int slot_cnt; 1045 int slot_cnt;
1046 1046
1047 /* each XOR descriptor provides up to 16 source operands */ 1047 /* each XOR descriptor provides up to 16 source operands */
1048 slot_cnt = *slots_per_op = (src_cnt + XOR_MAX_OPS - 1)/XOR_MAX_OPS; 1048 slot_cnt = *slots_per_op = (src_cnt + XOR_MAX_OPS - 1)/XOR_MAX_OPS;
1049 1049
1050 if (likely(len <= PPC440SPE_ADMA_XOR_MAX_BYTE_COUNT)) 1050 if (likely(len <= PPC440SPE_ADMA_XOR_MAX_BYTE_COUNT))
1051 return slot_cnt; 1051 return slot_cnt;
1052 1052
1053 printk(KERN_ERR "%s: len %d > max %d !!\n", 1053 printk(KERN_ERR "%s: len %d > max %d !!\n",
1054 __func__, len, PPC440SPE_ADMA_XOR_MAX_BYTE_COUNT); 1054 __func__, len, PPC440SPE_ADMA_XOR_MAX_BYTE_COUNT);
1055 BUG(); 1055 BUG();
1056 return slot_cnt; 1056 return slot_cnt;
1057 } 1057 }
1058 1058
1059 /** 1059 /**
1060 * ppc440spe_dma2_pq_slot_count - get the number of slots necessary for 1060 * ppc440spe_dma2_pq_slot_count - get the number of slots necessary for
1061 * DMA2 PQ operation 1061 * DMA2 PQ operation
1062 */ 1062 */
1063 static int ppc440spe_dma2_pq_slot_count(dma_addr_t *srcs, 1063 static int ppc440spe_dma2_pq_slot_count(dma_addr_t *srcs,
1064 int src_cnt, size_t len) 1064 int src_cnt, size_t len)
1065 { 1065 {
1066 signed long long order = 0; 1066 signed long long order = 0;
1067 int state = 0; 1067 int state = 0;
1068 int addr_count = 0; 1068 int addr_count = 0;
1069 int i; 1069 int i;
1070 for (i = 1; i < src_cnt; i++) { 1070 for (i = 1; i < src_cnt; i++) {
1071 dma_addr_t cur_addr = srcs[i]; 1071 dma_addr_t cur_addr = srcs[i];
1072 dma_addr_t old_addr = srcs[i-1]; 1072 dma_addr_t old_addr = srcs[i-1];
1073 switch (state) { 1073 switch (state) {
1074 case 0: 1074 case 0:
1075 if (cur_addr == old_addr + len) { 1075 if (cur_addr == old_addr + len) {
1076 /* direct RXOR */ 1076 /* direct RXOR */
1077 order = 1; 1077 order = 1;
1078 state = 1; 1078 state = 1;
1079 if (i == src_cnt-1) 1079 if (i == src_cnt-1)
1080 addr_count++; 1080 addr_count++;
1081 } else if (old_addr == cur_addr + len) { 1081 } else if (old_addr == cur_addr + len) {
1082 /* reverse RXOR */ 1082 /* reverse RXOR */
1083 order = -1; 1083 order = -1;
1084 state = 1; 1084 state = 1;
1085 if (i == src_cnt-1) 1085 if (i == src_cnt-1)
1086 addr_count++; 1086 addr_count++;
1087 } else { 1087 } else {
1088 state = 3; 1088 state = 3;
1089 } 1089 }
1090 break; 1090 break;
1091 case 1: 1091 case 1:
1092 if (i == src_cnt-2 || (order == -1 1092 if (i == src_cnt-2 || (order == -1
1093 && cur_addr != old_addr - len)) { 1093 && cur_addr != old_addr - len)) {
1094 order = 0; 1094 order = 0;
1095 state = 0; 1095 state = 0;
1096 addr_count++; 1096 addr_count++;
1097 } else if (cur_addr == old_addr + len*order) { 1097 } else if (cur_addr == old_addr + len*order) {
1098 state = 2; 1098 state = 2;
1099 if (i == src_cnt-1) 1099 if (i == src_cnt-1)
1100 addr_count++; 1100 addr_count++;
1101 } else if (cur_addr == old_addr + 2*len) { 1101 } else if (cur_addr == old_addr + 2*len) {
1102 state = 2; 1102 state = 2;
1103 if (i == src_cnt-1) 1103 if (i == src_cnt-1)
1104 addr_count++; 1104 addr_count++;
1105 } else if (cur_addr == old_addr + 3*len) { 1105 } else if (cur_addr == old_addr + 3*len) {
1106 state = 2; 1106 state = 2;
1107 if (i == src_cnt-1) 1107 if (i == src_cnt-1)
1108 addr_count++; 1108 addr_count++;
1109 } else { 1109 } else {
1110 order = 0; 1110 order = 0;
1111 state = 0; 1111 state = 0;
1112 addr_count++; 1112 addr_count++;
1113 } 1113 }
1114 break; 1114 break;
1115 case 2: 1115 case 2:
1116 order = 0; 1116 order = 0;
1117 state = 0; 1117 state = 0;
1118 addr_count++; 1118 addr_count++;
1119 break; 1119 break;
1120 } 1120 }
1121 if (state == 3) 1121 if (state == 3)
1122 break; 1122 break;
1123 } 1123 }
1124 if (src_cnt <= 1 || (state != 1 && state != 2)) { 1124 if (src_cnt <= 1 || (state != 1 && state != 2)) {
1125 pr_err("%s: src_cnt=%d, state=%d, addr_count=%d, order=%lld\n", 1125 pr_err("%s: src_cnt=%d, state=%d, addr_count=%d, order=%lld\n",
1126 __func__, src_cnt, state, addr_count, order); 1126 __func__, src_cnt, state, addr_count, order);
1127 for (i = 0; i < src_cnt; i++) 1127 for (i = 0; i < src_cnt; i++)
1128 pr_err("\t[%d] 0x%llx \n", i, srcs[i]); 1128 pr_err("\t[%d] 0x%llx \n", i, srcs[i]);
1129 BUG(); 1129 BUG();
1130 } 1130 }
1131 1131
1132 return (addr_count + XOR_MAX_OPS - 1) / XOR_MAX_OPS; 1132 return (addr_count + XOR_MAX_OPS - 1) / XOR_MAX_OPS;
1133 } 1133 }
1134 1134
1135 1135
1136 /****************************************************************************** 1136 /******************************************************************************
1137 * ADMA channel low-level routines 1137 * ADMA channel low-level routines
1138 ******************************************************************************/ 1138 ******************************************************************************/
1139 1139
1140 static u32 1140 static u32
1141 ppc440spe_chan_get_current_descriptor(struct ppc440spe_adma_chan *chan); 1141 ppc440spe_chan_get_current_descriptor(struct ppc440spe_adma_chan *chan);
1142 static void ppc440spe_chan_append(struct ppc440spe_adma_chan *chan); 1142 static void ppc440spe_chan_append(struct ppc440spe_adma_chan *chan);
1143 1143
1144 /** 1144 /**
1145 * ppc440spe_adma_device_clear_eot_status - interrupt ack to XOR or DMA engine 1145 * ppc440spe_adma_device_clear_eot_status - interrupt ack to XOR or DMA engine
1146 */ 1146 */
1147 static void ppc440spe_adma_device_clear_eot_status( 1147 static void ppc440spe_adma_device_clear_eot_status(
1148 struct ppc440spe_adma_chan *chan) 1148 struct ppc440spe_adma_chan *chan)
1149 { 1149 {
1150 struct dma_regs *dma_reg; 1150 struct dma_regs *dma_reg;
1151 struct xor_regs *xor_reg; 1151 struct xor_regs *xor_reg;
1152 u8 *p = chan->device->dma_desc_pool_virt; 1152 u8 *p = chan->device->dma_desc_pool_virt;
1153 struct dma_cdb *cdb; 1153 struct dma_cdb *cdb;
1154 u32 rv, i; 1154 u32 rv, i;
1155 1155
1156 switch (chan->device->id) { 1156 switch (chan->device->id) {
1157 case PPC440SPE_DMA0_ID: 1157 case PPC440SPE_DMA0_ID:
1158 case PPC440SPE_DMA1_ID: 1158 case PPC440SPE_DMA1_ID:
1159 /* read FIFO to ack */ 1159 /* read FIFO to ack */
1160 dma_reg = chan->device->dma_reg; 1160 dma_reg = chan->device->dma_reg;
1161 while ((rv = ioread32(&dma_reg->csfpl))) { 1161 while ((rv = ioread32(&dma_reg->csfpl))) {
1162 i = rv & DMA_CDB_ADDR_MSK; 1162 i = rv & DMA_CDB_ADDR_MSK;
1163 cdb = (struct dma_cdb *)&p[i - 1163 cdb = (struct dma_cdb *)&p[i -
1164 (u32)chan->device->dma_desc_pool]; 1164 (u32)chan->device->dma_desc_pool];
1165 1165
1166 /* Clear opcode to ack. This is necessary for 1166 /* Clear opcode to ack. This is necessary for
1167 * ZeroSum operations only 1167 * ZeroSum operations only
1168 */ 1168 */
1169 cdb->opc = 0; 1169 cdb->opc = 0;
1170 1170
1171 if (test_bit(PPC440SPE_RXOR_RUN, 1171 if (test_bit(PPC440SPE_RXOR_RUN,
1172 &ppc440spe_rxor_state)) { 1172 &ppc440spe_rxor_state)) {
1173 /* probably this is a completed RXOR op, 1173 /* probably this is a completed RXOR op,
1174 * get pointer to CDB using the fact that 1174 * get pointer to CDB using the fact that
1175 * physical and virtual addresses of CDB 1175 * physical and virtual addresses of CDB
1176 * in pools have the same offsets 1176 * in pools have the same offsets
1177 */ 1177 */
1178 if (le32_to_cpu(cdb->sg1u) & 1178 if (le32_to_cpu(cdb->sg1u) &
1179 DMA_CUED_XOR_BASE) { 1179 DMA_CUED_XOR_BASE) {
1180 /* this is a RXOR */ 1180 /* this is a RXOR */
1181 clear_bit(PPC440SPE_RXOR_RUN, 1181 clear_bit(PPC440SPE_RXOR_RUN,
1182 &ppc440spe_rxor_state); 1182 &ppc440spe_rxor_state);
1183 } 1183 }
1184 } 1184 }
1185 1185
1186 if (rv & DMA_CDB_STATUS_MSK) { 1186 if (rv & DMA_CDB_STATUS_MSK) {
1187 /* ZeroSum check failed 1187 /* ZeroSum check failed
1188 */ 1188 */
1189 struct ppc440spe_adma_desc_slot *iter; 1189 struct ppc440spe_adma_desc_slot *iter;
1190 dma_addr_t phys = rv & ~DMA_CDB_MSK; 1190 dma_addr_t phys = rv & ~DMA_CDB_MSK;
1191 1191
1192 /* 1192 /*
1193 * Update the status of corresponding 1193 * Update the status of corresponding
1194 * descriptor. 1194 * descriptor.
1195 */ 1195 */
1196 list_for_each_entry(iter, &chan->chain, 1196 list_for_each_entry(iter, &chan->chain,
1197 chain_node) { 1197 chain_node) {
1198 if (iter->phys == phys) 1198 if (iter->phys == phys)
1199 break; 1199 break;
1200 } 1200 }
1201 /* 1201 /*
1202 * if cannot find the corresponding 1202 * if cannot find the corresponding
1203 * slot it's a bug 1203 * slot it's a bug
1204 */ 1204 */
1205 BUG_ON(&iter->chain_node == &chan->chain); 1205 BUG_ON(&iter->chain_node == &chan->chain);
1206 1206
1207 if (iter->xor_check_result) { 1207 if (iter->xor_check_result) {
1208 if (test_bit(PPC440SPE_DESC_PCHECK, 1208 if (test_bit(PPC440SPE_DESC_PCHECK,
1209 &iter->flags)) { 1209 &iter->flags)) {
1210 *iter->xor_check_result |= 1210 *iter->xor_check_result |=
1211 SUM_CHECK_P_RESULT; 1211 SUM_CHECK_P_RESULT;
1212 } else 1212 } else
1213 if (test_bit(PPC440SPE_DESC_QCHECK, 1213 if (test_bit(PPC440SPE_DESC_QCHECK,
1214 &iter->flags)) { 1214 &iter->flags)) {
1215 *iter->xor_check_result |= 1215 *iter->xor_check_result |=
1216 SUM_CHECK_Q_RESULT; 1216 SUM_CHECK_Q_RESULT;
1217 } else 1217 } else
1218 BUG(); 1218 BUG();
1219 } 1219 }
1220 } 1220 }
1221 } 1221 }
1222 1222
1223 rv = ioread32(&dma_reg->dsts); 1223 rv = ioread32(&dma_reg->dsts);
1224 if (rv) { 1224 if (rv) {
1225 pr_err("DMA%d err status: 0x%x\n", 1225 pr_err("DMA%d err status: 0x%x\n",
1226 chan->device->id, rv); 1226 chan->device->id, rv);
1227 /* write back to clear */ 1227 /* write back to clear */
1228 iowrite32(rv, &dma_reg->dsts); 1228 iowrite32(rv, &dma_reg->dsts);
1229 } 1229 }
1230 break; 1230 break;
1231 case PPC440SPE_XOR_ID: 1231 case PPC440SPE_XOR_ID:
1232 /* reset status bits to ack */ 1232 /* reset status bits to ack */
1233 xor_reg = chan->device->xor_reg; 1233 xor_reg = chan->device->xor_reg;
1234 rv = ioread32be(&xor_reg->sr); 1234 rv = ioread32be(&xor_reg->sr);
1235 iowrite32be(rv, &xor_reg->sr); 1235 iowrite32be(rv, &xor_reg->sr);
1236 1236
1237 if (rv & (XOR_IE_ICBIE_BIT|XOR_IE_ICIE_BIT|XOR_IE_RPTIE_BIT)) { 1237 if (rv & (XOR_IE_ICBIE_BIT|XOR_IE_ICIE_BIT|XOR_IE_RPTIE_BIT)) {
1238 if (rv & XOR_IE_RPTIE_BIT) { 1238 if (rv & XOR_IE_RPTIE_BIT) {
1239 /* Read PLB Timeout Error. 1239 /* Read PLB Timeout Error.
1240 * Try to resubmit the CB 1240 * Try to resubmit the CB
1241 */ 1241 */
1242 u32 val = ioread32be(&xor_reg->ccbalr); 1242 u32 val = ioread32be(&xor_reg->ccbalr);
1243 1243
1244 iowrite32be(val, &xor_reg->cblalr); 1244 iowrite32be(val, &xor_reg->cblalr);
1245 1245
1246 val = ioread32be(&xor_reg->crsr); 1246 val = ioread32be(&xor_reg->crsr);
1247 iowrite32be(val | XOR_CRSR_XAE_BIT, 1247 iowrite32be(val | XOR_CRSR_XAE_BIT,
1248 &xor_reg->crsr); 1248 &xor_reg->crsr);
1249 } else 1249 } else
1250 pr_err("XOR ERR 0x%x status\n", rv); 1250 pr_err("XOR ERR 0x%x status\n", rv);
1251 break; 1251 break;
1252 } 1252 }
1253 1253
1254 /* if the XORcore is idle, but there are unprocessed CBs 1254 /* if the XORcore is idle, but there are unprocessed CBs
1255 * then refetch the s/w chain here 1255 * then refetch the s/w chain here
1256 */ 1256 */
1257 if (!(ioread32be(&xor_reg->sr) & XOR_SR_XCP_BIT) && 1257 if (!(ioread32be(&xor_reg->sr) & XOR_SR_XCP_BIT) &&
1258 do_xor_refetch) 1258 do_xor_refetch)
1259 ppc440spe_chan_append(chan); 1259 ppc440spe_chan_append(chan);
1260 break; 1260 break;
1261 } 1261 }
1262 } 1262 }
1263 1263
1264 /** 1264 /**
1265 * ppc440spe_chan_is_busy - get the channel status 1265 * ppc440spe_chan_is_busy - get the channel status
1266 */ 1266 */
1267 static int ppc440spe_chan_is_busy(struct ppc440spe_adma_chan *chan) 1267 static int ppc440spe_chan_is_busy(struct ppc440spe_adma_chan *chan)
1268 { 1268 {
1269 struct dma_regs *dma_reg; 1269 struct dma_regs *dma_reg;
1270 struct xor_regs *xor_reg; 1270 struct xor_regs *xor_reg;
1271 int busy = 0; 1271 int busy = 0;
1272 1272
1273 switch (chan->device->id) { 1273 switch (chan->device->id) {
1274 case PPC440SPE_DMA0_ID: 1274 case PPC440SPE_DMA0_ID:
1275 case PPC440SPE_DMA1_ID: 1275 case PPC440SPE_DMA1_ID:
1276 dma_reg = chan->device->dma_reg; 1276 dma_reg = chan->device->dma_reg;
1277 /* if command FIFO's head and tail pointers are equal and 1277 /* if command FIFO's head and tail pointers are equal and
1278 * status tail is the same as command, then channel is free 1278 * status tail is the same as command, then channel is free
1279 */ 1279 */
1280 if (ioread16(&dma_reg->cpfhp) != ioread16(&dma_reg->cpftp) || 1280 if (ioread16(&dma_reg->cpfhp) != ioread16(&dma_reg->cpftp) ||
1281 ioread16(&dma_reg->cpftp) != ioread16(&dma_reg->csftp)) 1281 ioread16(&dma_reg->cpftp) != ioread16(&dma_reg->csftp))
1282 busy = 1; 1282 busy = 1;
1283 break; 1283 break;
1284 case PPC440SPE_XOR_ID: 1284 case PPC440SPE_XOR_ID:
1285 /* use the special status bit for the XORcore 1285 /* use the special status bit for the XORcore
1286 */ 1286 */
1287 xor_reg = chan->device->xor_reg; 1287 xor_reg = chan->device->xor_reg;
1288 busy = (ioread32be(&xor_reg->sr) & XOR_SR_XCP_BIT) ? 1 : 0; 1288 busy = (ioread32be(&xor_reg->sr) & XOR_SR_XCP_BIT) ? 1 : 0;
1289 break; 1289 break;
1290 } 1290 }
1291 1291
1292 return busy; 1292 return busy;
1293 } 1293 }
1294 1294
1295 /** 1295 /**
1296 * ppc440spe_chan_set_first_xor_descriptor - init XORcore chain 1296 * ppc440spe_chan_set_first_xor_descriptor - init XORcore chain
1297 */ 1297 */
1298 static void ppc440spe_chan_set_first_xor_descriptor( 1298 static void ppc440spe_chan_set_first_xor_descriptor(
1299 struct ppc440spe_adma_chan *chan, 1299 struct ppc440spe_adma_chan *chan,
1300 struct ppc440spe_adma_desc_slot *next_desc) 1300 struct ppc440spe_adma_desc_slot *next_desc)
1301 { 1301 {
1302 struct xor_regs *xor_reg = chan->device->xor_reg; 1302 struct xor_regs *xor_reg = chan->device->xor_reg;
1303 1303
1304 if (ioread32be(&xor_reg->sr) & XOR_SR_XCP_BIT) 1304 if (ioread32be(&xor_reg->sr) & XOR_SR_XCP_BIT)
1305 printk(KERN_INFO "%s: Warn: XORcore is running " 1305 printk(KERN_INFO "%s: Warn: XORcore is running "
1306 "when try to set the first CDB!\n", 1306 "when try to set the first CDB!\n",
1307 __func__); 1307 __func__);
1308 1308
1309 xor_last_submit = xor_last_linked = next_desc; 1309 xor_last_submit = xor_last_linked = next_desc;
1310 1310
1311 iowrite32be(XOR_CRSR_64BA_BIT, &xor_reg->crsr); 1311 iowrite32be(XOR_CRSR_64BA_BIT, &xor_reg->crsr);
1312 1312
1313 iowrite32be(next_desc->phys, &xor_reg->cblalr); 1313 iowrite32be(next_desc->phys, &xor_reg->cblalr);
1314 iowrite32be(0, &xor_reg->cblahr); 1314 iowrite32be(0, &xor_reg->cblahr);
1315 iowrite32be(ioread32be(&xor_reg->cbcr) | XOR_CBCR_LNK_BIT, 1315 iowrite32be(ioread32be(&xor_reg->cbcr) | XOR_CBCR_LNK_BIT,
1316 &xor_reg->cbcr); 1316 &xor_reg->cbcr);
1317 1317
1318 chan->hw_chain_inited = 1; 1318 chan->hw_chain_inited = 1;
1319 } 1319 }
1320 1320
1321 /** 1321 /**
1322 * ppc440spe_dma_put_desc - put DMA0,1 descriptor to FIFO. 1322 * ppc440spe_dma_put_desc - put DMA0,1 descriptor to FIFO.
1323 * called with irqs disabled 1323 * called with irqs disabled
1324 */ 1324 */
1325 static void ppc440spe_dma_put_desc(struct ppc440spe_adma_chan *chan, 1325 static void ppc440spe_dma_put_desc(struct ppc440spe_adma_chan *chan,
1326 struct ppc440spe_adma_desc_slot *desc) 1326 struct ppc440spe_adma_desc_slot *desc)
1327 { 1327 {
1328 u32 pcdb; 1328 u32 pcdb;
1329 struct dma_regs *dma_reg = chan->device->dma_reg; 1329 struct dma_regs *dma_reg = chan->device->dma_reg;
1330 1330
1331 pcdb = desc->phys; 1331 pcdb = desc->phys;
1332 if (!test_bit(PPC440SPE_DESC_INT, &desc->flags)) 1332 if (!test_bit(PPC440SPE_DESC_INT, &desc->flags))
1333 pcdb |= DMA_CDB_NO_INT; 1333 pcdb |= DMA_CDB_NO_INT;
1334 1334
1335 chan_last_sub[chan->device->id] = desc; 1335 chan_last_sub[chan->device->id] = desc;
1336 1336
1337 ADMA_LL_DBG(print_cb(chan, desc->hw_desc)); 1337 ADMA_LL_DBG(print_cb(chan, desc->hw_desc));
1338 1338
1339 iowrite32(pcdb, &dma_reg->cpfpl); 1339 iowrite32(pcdb, &dma_reg->cpfpl);
1340 } 1340 }
1341 1341
1342 /** 1342 /**
1343 * ppc440spe_chan_append - update the h/w chain in the channel 1343 * ppc440spe_chan_append - update the h/w chain in the channel
1344 */ 1344 */
1345 static void ppc440spe_chan_append(struct ppc440spe_adma_chan *chan) 1345 static void ppc440spe_chan_append(struct ppc440spe_adma_chan *chan)
1346 { 1346 {
1347 struct xor_regs *xor_reg; 1347 struct xor_regs *xor_reg;
1348 struct ppc440spe_adma_desc_slot *iter; 1348 struct ppc440spe_adma_desc_slot *iter;
1349 struct xor_cb *xcb; 1349 struct xor_cb *xcb;
1350 u32 cur_desc; 1350 u32 cur_desc;
1351 unsigned long flags; 1351 unsigned long flags;
1352 1352
1353 local_irq_save(flags); 1353 local_irq_save(flags);
1354 1354
1355 switch (chan->device->id) { 1355 switch (chan->device->id) {
1356 case PPC440SPE_DMA0_ID: 1356 case PPC440SPE_DMA0_ID:
1357 case PPC440SPE_DMA1_ID: 1357 case PPC440SPE_DMA1_ID:
1358 cur_desc = ppc440spe_chan_get_current_descriptor(chan); 1358 cur_desc = ppc440spe_chan_get_current_descriptor(chan);
1359 1359
1360 if (likely(cur_desc)) { 1360 if (likely(cur_desc)) {
1361 iter = chan_last_sub[chan->device->id]; 1361 iter = chan_last_sub[chan->device->id];
1362 BUG_ON(!iter); 1362 BUG_ON(!iter);
1363 } else { 1363 } else {
1364 /* first peer */ 1364 /* first peer */
1365 iter = chan_first_cdb[chan->device->id]; 1365 iter = chan_first_cdb[chan->device->id];
1366 BUG_ON(!iter); 1366 BUG_ON(!iter);
1367 ppc440spe_dma_put_desc(chan, iter); 1367 ppc440spe_dma_put_desc(chan, iter);
1368 chan->hw_chain_inited = 1; 1368 chan->hw_chain_inited = 1;
1369 } 1369 }
1370 1370
1371 /* is there something new to append */ 1371 /* is there something new to append */
1372 if (!iter->hw_next) 1372 if (!iter->hw_next)
1373 break; 1373 break;
1374 1374
1375 /* flush descriptors from the s/w queue to fifo */ 1375 /* flush descriptors from the s/w queue to fifo */
1376 list_for_each_entry_continue(iter, &chan->chain, chain_node) { 1376 list_for_each_entry_continue(iter, &chan->chain, chain_node) {
1377 ppc440spe_dma_put_desc(chan, iter); 1377 ppc440spe_dma_put_desc(chan, iter);
1378 if (!iter->hw_next) 1378 if (!iter->hw_next)
1379 break; 1379 break;
1380 } 1380 }
1381 break; 1381 break;
1382 case PPC440SPE_XOR_ID: 1382 case PPC440SPE_XOR_ID:
1383 /* update h/w links and refetch */ 1383 /* update h/w links and refetch */
1384 if (!xor_last_submit->hw_next) 1384 if (!xor_last_submit->hw_next)
1385 break; 1385 break;
1386 1386
1387 xor_reg = chan->device->xor_reg; 1387 xor_reg = chan->device->xor_reg;
1388 /* the last linked CDB has to generate an interrupt 1388 /* the last linked CDB has to generate an interrupt
1389 * that we'd be able to append the next lists to h/w 1389 * that we'd be able to append the next lists to h/w
1390 * regardless of the XOR engine state at the moment of 1390 * regardless of the XOR engine state at the moment of
1391 * appending of these next lists 1391 * appending of these next lists
1392 */ 1392 */
1393 xcb = xor_last_linked->hw_desc; 1393 xcb = xor_last_linked->hw_desc;
1394 xcb->cbc |= XOR_CBCR_CBCE_BIT; 1394 xcb->cbc |= XOR_CBCR_CBCE_BIT;
1395 1395
1396 if (!(ioread32be(&xor_reg->sr) & XOR_SR_XCP_BIT)) { 1396 if (!(ioread32be(&xor_reg->sr) & XOR_SR_XCP_BIT)) {
1397 /* XORcore is idle. Refetch now */ 1397 /* XORcore is idle. Refetch now */
1398 do_xor_refetch = 0; 1398 do_xor_refetch = 0;
1399 ppc440spe_xor_set_link(xor_last_submit, 1399 ppc440spe_xor_set_link(xor_last_submit,
1400 xor_last_submit->hw_next); 1400 xor_last_submit->hw_next);
1401 1401
1402 ADMA_LL_DBG(print_cb_list(chan, 1402 ADMA_LL_DBG(print_cb_list(chan,
1403 xor_last_submit->hw_next)); 1403 xor_last_submit->hw_next));
1404 1404
1405 xor_last_submit = xor_last_linked; 1405 xor_last_submit = xor_last_linked;
1406 iowrite32be(ioread32be(&xor_reg->crsr) | 1406 iowrite32be(ioread32be(&xor_reg->crsr) |
1407 XOR_CRSR_RCBE_BIT | XOR_CRSR_64BA_BIT, 1407 XOR_CRSR_RCBE_BIT | XOR_CRSR_64BA_BIT,
1408 &xor_reg->crsr); 1408 &xor_reg->crsr);
1409 } else { 1409 } else {
1410 /* XORcore is running. Refetch later in the handler */ 1410 /* XORcore is running. Refetch later in the handler */
1411 do_xor_refetch = 1; 1411 do_xor_refetch = 1;
1412 } 1412 }
1413 1413
1414 break; 1414 break;
1415 } 1415 }
1416 1416
1417 local_irq_restore(flags); 1417 local_irq_restore(flags);
1418 } 1418 }
1419 1419
1420 /** 1420 /**
1421 * ppc440spe_chan_get_current_descriptor - get the currently executed descriptor 1421 * ppc440spe_chan_get_current_descriptor - get the currently executed descriptor
1422 */ 1422 */
1423 static u32 1423 static u32
1424 ppc440spe_chan_get_current_descriptor(struct ppc440spe_adma_chan *chan) 1424 ppc440spe_chan_get_current_descriptor(struct ppc440spe_adma_chan *chan)
1425 { 1425 {
1426 struct dma_regs *dma_reg; 1426 struct dma_regs *dma_reg;
1427 struct xor_regs *xor_reg; 1427 struct xor_regs *xor_reg;
1428 1428
1429 if (unlikely(!chan->hw_chain_inited)) 1429 if (unlikely(!chan->hw_chain_inited))
1430 /* h/w descriptor chain is not initialized yet */ 1430 /* h/w descriptor chain is not initialized yet */
1431 return 0; 1431 return 0;
1432 1432
1433 switch (chan->device->id) { 1433 switch (chan->device->id) {
1434 case PPC440SPE_DMA0_ID: 1434 case PPC440SPE_DMA0_ID:
1435 case PPC440SPE_DMA1_ID: 1435 case PPC440SPE_DMA1_ID:
1436 dma_reg = chan->device->dma_reg; 1436 dma_reg = chan->device->dma_reg;
1437 return ioread32(&dma_reg->acpl) & (~DMA_CDB_MSK); 1437 return ioread32(&dma_reg->acpl) & (~DMA_CDB_MSK);
1438 case PPC440SPE_XOR_ID: 1438 case PPC440SPE_XOR_ID:
1439 xor_reg = chan->device->xor_reg; 1439 xor_reg = chan->device->xor_reg;
1440 return ioread32be(&xor_reg->ccbalr); 1440 return ioread32be(&xor_reg->ccbalr);
1441 } 1441 }
1442 return 0; 1442 return 0;
1443 } 1443 }
1444 1444
1445 /** 1445 /**
1446 * ppc440spe_chan_run - enable the channel 1446 * ppc440spe_chan_run - enable the channel
1447 */ 1447 */
1448 static void ppc440spe_chan_run(struct ppc440spe_adma_chan *chan) 1448 static void ppc440spe_chan_run(struct ppc440spe_adma_chan *chan)
1449 { 1449 {
1450 struct xor_regs *xor_reg; 1450 struct xor_regs *xor_reg;
1451 1451
1452 switch (chan->device->id) { 1452 switch (chan->device->id) {
1453 case PPC440SPE_DMA0_ID: 1453 case PPC440SPE_DMA0_ID:
1454 case PPC440SPE_DMA1_ID: 1454 case PPC440SPE_DMA1_ID:
1455 /* DMAs are always enabled, do nothing */ 1455 /* DMAs are always enabled, do nothing */
1456 break; 1456 break;
1457 case PPC440SPE_XOR_ID: 1457 case PPC440SPE_XOR_ID:
1458 /* drain write buffer */ 1458 /* drain write buffer */
1459 xor_reg = chan->device->xor_reg; 1459 xor_reg = chan->device->xor_reg;
1460 1460
1461 /* fetch descriptor pointed to in <link> */ 1461 /* fetch descriptor pointed to in <link> */
1462 iowrite32be(XOR_CRSR_64BA_BIT | XOR_CRSR_XAE_BIT, 1462 iowrite32be(XOR_CRSR_64BA_BIT | XOR_CRSR_XAE_BIT,
1463 &xor_reg->crsr); 1463 &xor_reg->crsr);
1464 break; 1464 break;
1465 } 1465 }
1466 } 1466 }
1467 1467
1468 /****************************************************************************** 1468 /******************************************************************************
1469 * ADMA device level 1469 * ADMA device level
1470 ******************************************************************************/ 1470 ******************************************************************************/
1471 1471
1472 static void ppc440spe_chan_start_null_xor(struct ppc440spe_adma_chan *chan); 1472 static void ppc440spe_chan_start_null_xor(struct ppc440spe_adma_chan *chan);
1473 static int ppc440spe_adma_alloc_chan_resources(struct dma_chan *chan); 1473 static int ppc440spe_adma_alloc_chan_resources(struct dma_chan *chan);
1474 1474
1475 static dma_cookie_t 1475 static dma_cookie_t
1476 ppc440spe_adma_tx_submit(struct dma_async_tx_descriptor *tx); 1476 ppc440spe_adma_tx_submit(struct dma_async_tx_descriptor *tx);
1477 1477
1478 static void ppc440spe_adma_set_dest(struct ppc440spe_adma_desc_slot *tx, 1478 static void ppc440spe_adma_set_dest(struct ppc440spe_adma_desc_slot *tx,
1479 dma_addr_t addr, int index); 1479 dma_addr_t addr, int index);
1480 static void 1480 static void
1481 ppc440spe_adma_memcpy_xor_set_src(struct ppc440spe_adma_desc_slot *tx, 1481 ppc440spe_adma_memcpy_xor_set_src(struct ppc440spe_adma_desc_slot *tx,
1482 dma_addr_t addr, int index); 1482 dma_addr_t addr, int index);
1483 1483
1484 static void 1484 static void
1485 ppc440spe_adma_pq_set_dest(struct ppc440spe_adma_desc_slot *tx, 1485 ppc440spe_adma_pq_set_dest(struct ppc440spe_adma_desc_slot *tx,
1486 dma_addr_t *paddr, unsigned long flags); 1486 dma_addr_t *paddr, unsigned long flags);
1487 static void 1487 static void
1488 ppc440spe_adma_pq_set_src(struct ppc440spe_adma_desc_slot *tx, 1488 ppc440spe_adma_pq_set_src(struct ppc440spe_adma_desc_slot *tx,
1489 dma_addr_t addr, int index); 1489 dma_addr_t addr, int index);
1490 static void 1490 static void
1491 ppc440spe_adma_pq_set_src_mult(struct ppc440spe_adma_desc_slot *tx, 1491 ppc440spe_adma_pq_set_src_mult(struct ppc440spe_adma_desc_slot *tx,
1492 unsigned char mult, int index, int dst_pos); 1492 unsigned char mult, int index, int dst_pos);
1493 static void 1493 static void
1494 ppc440spe_adma_pqzero_sum_set_dest(struct ppc440spe_adma_desc_slot *tx, 1494 ppc440spe_adma_pqzero_sum_set_dest(struct ppc440spe_adma_desc_slot *tx,
1495 dma_addr_t paddr, dma_addr_t qaddr); 1495 dma_addr_t paddr, dma_addr_t qaddr);
1496 1496
1497 static struct page *ppc440spe_rxor_srcs[32]; 1497 static struct page *ppc440spe_rxor_srcs[32];
1498 1498
1499 /** 1499 /**
1500 * ppc440spe_can_rxor - check if the operands may be processed with RXOR 1500 * ppc440spe_can_rxor - check if the operands may be processed with RXOR
1501 */ 1501 */
1502 static int ppc440spe_can_rxor(struct page **srcs, int src_cnt, size_t len) 1502 static int ppc440spe_can_rxor(struct page **srcs, int src_cnt, size_t len)
1503 { 1503 {
1504 int i, order = 0, state = 0; 1504 int i, order = 0, state = 0;
1505 int idx = 0; 1505 int idx = 0;
1506 1506
1507 if (unlikely(!(src_cnt > 1))) 1507 if (unlikely(!(src_cnt > 1)))
1508 return 0; 1508 return 0;
1509 1509
1510 BUG_ON(src_cnt > ARRAY_SIZE(ppc440spe_rxor_srcs)); 1510 BUG_ON(src_cnt > ARRAY_SIZE(ppc440spe_rxor_srcs));
1511 1511
1512 /* Skip holes in the source list before checking */ 1512 /* Skip holes in the source list before checking */
1513 for (i = 0; i < src_cnt; i++) { 1513 for (i = 0; i < src_cnt; i++) {
1514 if (!srcs[i]) 1514 if (!srcs[i])
1515 continue; 1515 continue;
1516 ppc440spe_rxor_srcs[idx++] = srcs[i]; 1516 ppc440spe_rxor_srcs[idx++] = srcs[i];
1517 } 1517 }
1518 src_cnt = idx; 1518 src_cnt = idx;
1519 1519
1520 for (i = 1; i < src_cnt; i++) { 1520 for (i = 1; i < src_cnt; i++) {
1521 char *cur_addr = page_address(ppc440spe_rxor_srcs[i]); 1521 char *cur_addr = page_address(ppc440spe_rxor_srcs[i]);
1522 char *old_addr = page_address(ppc440spe_rxor_srcs[i - 1]); 1522 char *old_addr = page_address(ppc440spe_rxor_srcs[i - 1]);
1523 1523
1524 switch (state) { 1524 switch (state) {
1525 case 0: 1525 case 0:
1526 if (cur_addr == old_addr + len) { 1526 if (cur_addr == old_addr + len) {
1527 /* direct RXOR */ 1527 /* direct RXOR */
1528 order = 1; 1528 order = 1;
1529 state = 1; 1529 state = 1;
1530 } else if (old_addr == cur_addr + len) { 1530 } else if (old_addr == cur_addr + len) {
1531 /* reverse RXOR */ 1531 /* reverse RXOR */
1532 order = -1; 1532 order = -1;
1533 state = 1; 1533 state = 1;
1534 } else 1534 } else
1535 goto out; 1535 goto out;
1536 break; 1536 break;
1537 case 1: 1537 case 1:
1538 if ((i == src_cnt - 2) || 1538 if ((i == src_cnt - 2) ||
1539 (order == -1 && cur_addr != old_addr - len)) { 1539 (order == -1 && cur_addr != old_addr - len)) {
1540 order = 0; 1540 order = 0;
1541 state = 0; 1541 state = 0;
1542 } else if ((cur_addr == old_addr + len * order) || 1542 } else if ((cur_addr == old_addr + len * order) ||
1543 (cur_addr == old_addr + 2 * len) || 1543 (cur_addr == old_addr + 2 * len) ||
1544 (cur_addr == old_addr + 3 * len)) { 1544 (cur_addr == old_addr + 3 * len)) {
1545 state = 2; 1545 state = 2;
1546 } else { 1546 } else {
1547 order = 0; 1547 order = 0;
1548 state = 0; 1548 state = 0;
1549 } 1549 }
1550 break; 1550 break;
1551 case 2: 1551 case 2:
1552 order = 0; 1552 order = 0;
1553 state = 0; 1553 state = 0;
1554 break; 1554 break;
1555 } 1555 }
1556 } 1556 }
1557 1557
1558 out: 1558 out:
1559 if (state == 1 || state == 2) 1559 if (state == 1 || state == 2)
1560 return 1; 1560 return 1;
1561 1561
1562 return 0; 1562 return 0;
1563 } 1563 }
1564 1564
1565 /** 1565 /**
1566 * ppc440spe_adma_device_estimate - estimate the efficiency of processing 1566 * ppc440spe_adma_device_estimate - estimate the efficiency of processing
1567 * the operation given on this channel. It's assumed that 'chan' is 1567 * the operation given on this channel. It's assumed that 'chan' is
1568 * capable to process 'cap' type of operation. 1568 * capable to process 'cap' type of operation.
1569 * @chan: channel to use 1569 * @chan: channel to use
1570 * @cap: type of transaction 1570 * @cap: type of transaction
1571 * @dst_lst: array of destination pointers 1571 * @dst_lst: array of destination pointers
1572 * @dst_cnt: number of destination operands 1572 * @dst_cnt: number of destination operands
1573 * @src_lst: array of source pointers 1573 * @src_lst: array of source pointers
1574 * @src_cnt: number of source operands 1574 * @src_cnt: number of source operands
1575 * @src_sz: size of each source operand 1575 * @src_sz: size of each source operand
1576 */ 1576 */
1577 static int ppc440spe_adma_estimate(struct dma_chan *chan, 1577 static int ppc440spe_adma_estimate(struct dma_chan *chan,
1578 enum dma_transaction_type cap, struct page **dst_lst, int dst_cnt, 1578 enum dma_transaction_type cap, struct page **dst_lst, int dst_cnt,
1579 struct page **src_lst, int src_cnt, size_t src_sz) 1579 struct page **src_lst, int src_cnt, size_t src_sz)
1580 { 1580 {
1581 int ef = 1; 1581 int ef = 1;
1582 1582
1583 if (cap == DMA_PQ || cap == DMA_PQ_VAL) { 1583 if (cap == DMA_PQ || cap == DMA_PQ_VAL) {
1584 /* If RAID-6 capabilities were not activated don't try 1584 /* If RAID-6 capabilities were not activated don't try
1585 * to use them 1585 * to use them
1586 */ 1586 */
1587 if (unlikely(!ppc440spe_r6_enabled)) 1587 if (unlikely(!ppc440spe_r6_enabled))
1588 return -1; 1588 return -1;
1589 } 1589 }
1590 /* In the current implementation of ppc440spe ADMA driver it 1590 /* In the current implementation of ppc440spe ADMA driver it
1591 * makes sense to pick out only pq case, because it may be 1591 * makes sense to pick out only pq case, because it may be
1592 * processed: 1592 * processed:
1593 * (1) either using Biskup method on DMA2; 1593 * (1) either using Biskup method on DMA2;
1594 * (2) or on DMA0/1. 1594 * (2) or on DMA0/1.
1595 * Thus we give a favour to (1) if the sources are suitable; 1595 * Thus we give a favour to (1) if the sources are suitable;
1596 * else let it be processed on one of the DMA0/1 engines. 1596 * else let it be processed on one of the DMA0/1 engines.
1597 * In the sum_product case where destination is also the 1597 * In the sum_product case where destination is also the
1598 * source process it on DMA0/1 only. 1598 * source process it on DMA0/1 only.
1599 */ 1599 */
1600 if (cap == DMA_PQ && chan->chan_id == PPC440SPE_XOR_ID) { 1600 if (cap == DMA_PQ && chan->chan_id == PPC440SPE_XOR_ID) {
1601 1601
1602 if (dst_cnt == 1 && src_cnt == 2 && dst_lst[0] == src_lst[1]) 1602 if (dst_cnt == 1 && src_cnt == 2 && dst_lst[0] == src_lst[1])
1603 ef = 0; /* sum_product case, process on DMA0/1 */ 1603 ef = 0; /* sum_product case, process on DMA0/1 */
1604 else if (ppc440spe_can_rxor(src_lst, src_cnt, src_sz)) 1604 else if (ppc440spe_can_rxor(src_lst, src_cnt, src_sz))
1605 ef = 3; /* override (DMA0/1 + idle) */ 1605 ef = 3; /* override (DMA0/1 + idle) */
1606 else 1606 else
1607 ef = 0; /* can't process on DMA2 if !rxor */ 1607 ef = 0; /* can't process on DMA2 if !rxor */
1608 } 1608 }
1609 1609
1610 /* channel idleness increases the priority */ 1610 /* channel idleness increases the priority */
1611 if (likely(ef) && 1611 if (likely(ef) &&
1612 !ppc440spe_chan_is_busy(to_ppc440spe_adma_chan(chan))) 1612 !ppc440spe_chan_is_busy(to_ppc440spe_adma_chan(chan)))
1613 ef++; 1613 ef++;
1614 1614
1615 return ef; 1615 return ef;
1616 } 1616 }
1617 1617
1618 struct dma_chan * 1618 struct dma_chan *
1619 ppc440spe_async_tx_find_best_channel(enum dma_transaction_type cap, 1619 ppc440spe_async_tx_find_best_channel(enum dma_transaction_type cap,
1620 struct page **dst_lst, int dst_cnt, struct page **src_lst, 1620 struct page **dst_lst, int dst_cnt, struct page **src_lst,
1621 int src_cnt, size_t src_sz) 1621 int src_cnt, size_t src_sz)
1622 { 1622 {
1623 struct dma_chan *best_chan = NULL; 1623 struct dma_chan *best_chan = NULL;
1624 struct ppc_dma_chan_ref *ref; 1624 struct ppc_dma_chan_ref *ref;
1625 int best_rank = -1; 1625 int best_rank = -1;
1626 1626
1627 if (unlikely(!src_sz)) 1627 if (unlikely(!src_sz))
1628 return NULL; 1628 return NULL;
1629 if (src_sz > PAGE_SIZE) { 1629 if (src_sz > PAGE_SIZE) {
1630 /* 1630 /*
1631 * should a user of the api ever pass > PAGE_SIZE requests 1631 * should a user of the api ever pass > PAGE_SIZE requests
1632 * we sort out cases where temporary page-sized buffers 1632 * we sort out cases where temporary page-sized buffers
1633 * are used. 1633 * are used.
1634 */ 1634 */
1635 switch (cap) { 1635 switch (cap) {
1636 case DMA_PQ: 1636 case DMA_PQ:
1637 if (src_cnt == 1 && dst_lst[1] == src_lst[0]) 1637 if (src_cnt == 1 && dst_lst[1] == src_lst[0])
1638 return NULL; 1638 return NULL;
1639 if (src_cnt == 2 && dst_lst[1] == src_lst[1]) 1639 if (src_cnt == 2 && dst_lst[1] == src_lst[1])
1640 return NULL; 1640 return NULL;
1641 break; 1641 break;
1642 case DMA_PQ_VAL: 1642 case DMA_PQ_VAL:
1643 case DMA_XOR_VAL: 1643 case DMA_XOR_VAL:
1644 return NULL; 1644 return NULL;
1645 default: 1645 default:
1646 break; 1646 break;
1647 } 1647 }
1648 } 1648 }
1649 1649
1650 list_for_each_entry(ref, &ppc440spe_adma_chan_list, node) { 1650 list_for_each_entry(ref, &ppc440spe_adma_chan_list, node) {
1651 if (dma_has_cap(cap, ref->chan->device->cap_mask)) { 1651 if (dma_has_cap(cap, ref->chan->device->cap_mask)) {
1652 int rank; 1652 int rank;
1653 1653
1654 rank = ppc440spe_adma_estimate(ref->chan, cap, dst_lst, 1654 rank = ppc440spe_adma_estimate(ref->chan, cap, dst_lst,
1655 dst_cnt, src_lst, src_cnt, src_sz); 1655 dst_cnt, src_lst, src_cnt, src_sz);
1656 if (rank > best_rank) { 1656 if (rank > best_rank) {
1657 best_rank = rank; 1657 best_rank = rank;
1658 best_chan = ref->chan; 1658 best_chan = ref->chan;
1659 } 1659 }
1660 } 1660 }
1661 } 1661 }
1662 1662
1663 return best_chan; 1663 return best_chan;
1664 } 1664 }
1665 EXPORT_SYMBOL_GPL(ppc440spe_async_tx_find_best_channel); 1665 EXPORT_SYMBOL_GPL(ppc440spe_async_tx_find_best_channel);
1666 1666
1667 /** 1667 /**
1668 * ppc440spe_get_group_entry - get group entry with index idx 1668 * ppc440spe_get_group_entry - get group entry with index idx
1669 * @tdesc: is the last allocated slot in the group. 1669 * @tdesc: is the last allocated slot in the group.
1670 */ 1670 */
1671 static struct ppc440spe_adma_desc_slot * 1671 static struct ppc440spe_adma_desc_slot *
1672 ppc440spe_get_group_entry(struct ppc440spe_adma_desc_slot *tdesc, u32 entry_idx) 1672 ppc440spe_get_group_entry(struct ppc440spe_adma_desc_slot *tdesc, u32 entry_idx)
1673 { 1673 {
1674 struct ppc440spe_adma_desc_slot *iter = tdesc->group_head; 1674 struct ppc440spe_adma_desc_slot *iter = tdesc->group_head;
1675 int i = 0; 1675 int i = 0;
1676 1676
1677 if (entry_idx < 0 || entry_idx >= (tdesc->src_cnt + tdesc->dst_cnt)) { 1677 if (entry_idx < 0 || entry_idx >= (tdesc->src_cnt + tdesc->dst_cnt)) {
1678 printk("%s: entry_idx %d, src_cnt %d, dst_cnt %d\n", 1678 printk("%s: entry_idx %d, src_cnt %d, dst_cnt %d\n",
1679 __func__, entry_idx, tdesc->src_cnt, tdesc->dst_cnt); 1679 __func__, entry_idx, tdesc->src_cnt, tdesc->dst_cnt);
1680 BUG(); 1680 BUG();
1681 } 1681 }
1682 1682
1683 list_for_each_entry(iter, &tdesc->group_list, chain_node) { 1683 list_for_each_entry(iter, &tdesc->group_list, chain_node) {
1684 if (i++ == entry_idx) 1684 if (i++ == entry_idx)
1685 break; 1685 break;
1686 } 1686 }
1687 return iter; 1687 return iter;
1688 } 1688 }
1689 1689
1690 /** 1690 /**
1691 * ppc440spe_adma_free_slots - flags descriptor slots for reuse 1691 * ppc440spe_adma_free_slots - flags descriptor slots for reuse
1692 * @slot: Slot to free 1692 * @slot: Slot to free
1693 * Caller must hold &ppc440spe_chan->lock while calling this function 1693 * Caller must hold &ppc440spe_chan->lock while calling this function
1694 */ 1694 */
1695 static void ppc440spe_adma_free_slots(struct ppc440spe_adma_desc_slot *slot, 1695 static void ppc440spe_adma_free_slots(struct ppc440spe_adma_desc_slot *slot,
1696 struct ppc440spe_adma_chan *chan) 1696 struct ppc440spe_adma_chan *chan)
1697 { 1697 {
1698 int stride = slot->slots_per_op; 1698 int stride = slot->slots_per_op;
1699 1699
1700 while (stride--) { 1700 while (stride--) {
1701 slot->slots_per_op = 0; 1701 slot->slots_per_op = 0;
1702 slot = list_entry(slot->slot_node.next, 1702 slot = list_entry(slot->slot_node.next,
1703 struct ppc440spe_adma_desc_slot, 1703 struct ppc440spe_adma_desc_slot,
1704 slot_node); 1704 slot_node);
1705 } 1705 }
1706 } 1706 }
1707 1707
1708 static void ppc440spe_adma_unmap(struct ppc440spe_adma_chan *chan, 1708 static void ppc440spe_adma_unmap(struct ppc440spe_adma_chan *chan,
1709 struct ppc440spe_adma_desc_slot *desc) 1709 struct ppc440spe_adma_desc_slot *desc)
1710 { 1710 {
1711 u32 src_cnt, dst_cnt; 1711 u32 src_cnt, dst_cnt;
1712 dma_addr_t addr; 1712 dma_addr_t addr;
1713 1713
1714 /* 1714 /*
1715 * get the number of sources & destination 1715 * get the number of sources & destination
1716 * included in this descriptor and unmap 1716 * included in this descriptor and unmap
1717 * them all 1717 * them all
1718 */ 1718 */
1719 src_cnt = ppc440spe_desc_get_src_num(desc, chan); 1719 src_cnt = ppc440spe_desc_get_src_num(desc, chan);
1720 dst_cnt = ppc440spe_desc_get_dst_num(desc, chan); 1720 dst_cnt = ppc440spe_desc_get_dst_num(desc, chan);
1721 1721
1722 /* unmap destinations */ 1722 /* unmap destinations */
1723 if (!(desc->async_tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) { 1723 if (!(desc->async_tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
1724 while (dst_cnt--) { 1724 while (dst_cnt--) {
1725 addr = ppc440spe_desc_get_dest_addr( 1725 addr = ppc440spe_desc_get_dest_addr(
1726 desc, chan, dst_cnt); 1726 desc, chan, dst_cnt);
1727 dma_unmap_page(chan->device->dev, 1727 dma_unmap_page(chan->device->dev,
1728 addr, desc->unmap_len, 1728 addr, desc->unmap_len,
1729 DMA_FROM_DEVICE); 1729 DMA_FROM_DEVICE);
1730 } 1730 }
1731 } 1731 }
1732 1732
1733 /* unmap sources */ 1733 /* unmap sources */
1734 if (!(desc->async_tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) { 1734 if (!(desc->async_tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
1735 while (src_cnt--) { 1735 while (src_cnt--) {
1736 addr = ppc440spe_desc_get_src_addr( 1736 addr = ppc440spe_desc_get_src_addr(
1737 desc, chan, src_cnt); 1737 desc, chan, src_cnt);
1738 dma_unmap_page(chan->device->dev, 1738 dma_unmap_page(chan->device->dev,
1739 addr, desc->unmap_len, 1739 addr, desc->unmap_len,
1740 DMA_TO_DEVICE); 1740 DMA_TO_DEVICE);
1741 } 1741 }
1742 } 1742 }
1743 } 1743 }
1744 1744
1745 /** 1745 /**
1746 * ppc440spe_adma_run_tx_complete_actions - call functions to be called 1746 * ppc440spe_adma_run_tx_complete_actions - call functions to be called
1747 * upon completion 1747 * upon completion
1748 */ 1748 */
1749 static dma_cookie_t ppc440spe_adma_run_tx_complete_actions( 1749 static dma_cookie_t ppc440spe_adma_run_tx_complete_actions(
1750 struct ppc440spe_adma_desc_slot *desc, 1750 struct ppc440spe_adma_desc_slot *desc,
1751 struct ppc440spe_adma_chan *chan, 1751 struct ppc440spe_adma_chan *chan,
1752 dma_cookie_t cookie) 1752 dma_cookie_t cookie)
1753 { 1753 {
1754 int i; 1754 int i;
1755 1755
1756 BUG_ON(desc->async_tx.cookie < 0); 1756 BUG_ON(desc->async_tx.cookie < 0);
1757 if (desc->async_tx.cookie > 0) { 1757 if (desc->async_tx.cookie > 0) {
1758 cookie = desc->async_tx.cookie; 1758 cookie = desc->async_tx.cookie;
1759 desc->async_tx.cookie = 0; 1759 desc->async_tx.cookie = 0;
1760 1760
1761 /* call the callback (must not sleep or submit new 1761 /* call the callback (must not sleep or submit new
1762 * operations to this channel) 1762 * operations to this channel)
1763 */ 1763 */
1764 if (desc->async_tx.callback) 1764 if (desc->async_tx.callback)
1765 desc->async_tx.callback( 1765 desc->async_tx.callback(
1766 desc->async_tx.callback_param); 1766 desc->async_tx.callback_param);
1767 1767
1768 /* unmap dma addresses 1768 /* unmap dma addresses
1769 * (unmap_single vs unmap_page?) 1769 * (unmap_single vs unmap_page?)
1770 * 1770 *
1771 * actually, ppc's dma_unmap_page() functions are empty, so 1771 * actually, ppc's dma_unmap_page() functions are empty, so
1772 * the following code is just for the sake of completeness 1772 * the following code is just for the sake of completeness
1773 */ 1773 */
1774 if (chan && chan->needs_unmap && desc->group_head && 1774 if (chan && chan->needs_unmap && desc->group_head &&
1775 desc->unmap_len) { 1775 desc->unmap_len) {
1776 struct ppc440spe_adma_desc_slot *unmap = 1776 struct ppc440spe_adma_desc_slot *unmap =
1777 desc->group_head; 1777 desc->group_head;
1778 /* assume 1 slot per op always */ 1778 /* assume 1 slot per op always */
1779 u32 slot_count = unmap->slot_cnt; 1779 u32 slot_count = unmap->slot_cnt;
1780 1780
1781 /* Run through the group list and unmap addresses */ 1781 /* Run through the group list and unmap addresses */
1782 for (i = 0; i < slot_count; i++) { 1782 for (i = 0; i < slot_count; i++) {
1783 BUG_ON(!unmap); 1783 BUG_ON(!unmap);
1784 ppc440spe_adma_unmap(chan, unmap); 1784 ppc440spe_adma_unmap(chan, unmap);
1785 unmap = unmap->hw_next; 1785 unmap = unmap->hw_next;
1786 } 1786 }
1787 } 1787 }
1788 } 1788 }
1789 1789
1790 /* run dependent operations */ 1790 /* run dependent operations */
1791 dma_run_dependencies(&desc->async_tx); 1791 dma_run_dependencies(&desc->async_tx);
1792 1792
1793 return cookie; 1793 return cookie;
1794 } 1794 }
1795 1795
1796 /** 1796 /**
1797 * ppc440spe_adma_clean_slot - clean up CDB slot (if ack is set) 1797 * ppc440spe_adma_clean_slot - clean up CDB slot (if ack is set)
1798 */ 1798 */
1799 static int ppc440spe_adma_clean_slot(struct ppc440spe_adma_desc_slot *desc, 1799 static int ppc440spe_adma_clean_slot(struct ppc440spe_adma_desc_slot *desc,
1800 struct ppc440spe_adma_chan *chan) 1800 struct ppc440spe_adma_chan *chan)
1801 { 1801 {
1802 /* the client is allowed to attach dependent operations 1802 /* the client is allowed to attach dependent operations
1803 * until 'ack' is set 1803 * until 'ack' is set
1804 */ 1804 */
1805 if (!async_tx_test_ack(&desc->async_tx)) 1805 if (!async_tx_test_ack(&desc->async_tx))
1806 return 0; 1806 return 0;
1807 1807
1808 /* leave the last descriptor in the chain 1808 /* leave the last descriptor in the chain
1809 * so we can append to it 1809 * so we can append to it
1810 */ 1810 */
1811 if (list_is_last(&desc->chain_node, &chan->chain) || 1811 if (list_is_last(&desc->chain_node, &chan->chain) ||
1812 desc->phys == ppc440spe_chan_get_current_descriptor(chan)) 1812 desc->phys == ppc440spe_chan_get_current_descriptor(chan))
1813 return 1; 1813 return 1;
1814 1814
1815 if (chan->device->id != PPC440SPE_XOR_ID) { 1815 if (chan->device->id != PPC440SPE_XOR_ID) {
1816 /* our DMA interrupt handler clears opc field of 1816 /* our DMA interrupt handler clears opc field of
1817 * each processed descriptor. For all types of 1817 * each processed descriptor. For all types of
1818 * operations except for ZeroSum we do not actually 1818 * operations except for ZeroSum we do not actually
1819 * need ack from the interrupt handler. ZeroSum is a 1819 * need ack from the interrupt handler. ZeroSum is a
1820 * special case since the result of this operation 1820 * special case since the result of this operation
1821 * is available from the handler only, so if we see 1821 * is available from the handler only, so if we see
1822 * such type of descriptor (which is unprocessed yet) 1822 * such type of descriptor (which is unprocessed yet)
1823 * then leave it in chain. 1823 * then leave it in chain.
1824 */ 1824 */
1825 struct dma_cdb *cdb = desc->hw_desc; 1825 struct dma_cdb *cdb = desc->hw_desc;
1826 if (cdb->opc == DMA_CDB_OPC_DCHECK128) 1826 if (cdb->opc == DMA_CDB_OPC_DCHECK128)
1827 return 1; 1827 return 1;
1828 } 1828 }
1829 1829
1830 dev_dbg(chan->device->common.dev, "\tfree slot %llx: %d stride: %d\n", 1830 dev_dbg(chan->device->common.dev, "\tfree slot %llx: %d stride: %d\n",
1831 desc->phys, desc->idx, desc->slots_per_op); 1831 desc->phys, desc->idx, desc->slots_per_op);
1832 1832
1833 list_del(&desc->chain_node); 1833 list_del(&desc->chain_node);
1834 ppc440spe_adma_free_slots(desc, chan); 1834 ppc440spe_adma_free_slots(desc, chan);
1835 return 0; 1835 return 0;
1836 } 1836 }
1837 1837
1838 /** 1838 /**
1839 * __ppc440spe_adma_slot_cleanup - this is the common clean-up routine 1839 * __ppc440spe_adma_slot_cleanup - this is the common clean-up routine
1840 * which runs through the channel CDBs list until reach the descriptor 1840 * which runs through the channel CDBs list until reach the descriptor
1841 * currently processed. When routine determines that all CDBs of group 1841 * currently processed. When routine determines that all CDBs of group
1842 * are completed then corresponding callbacks (if any) are called and slots 1842 * are completed then corresponding callbacks (if any) are called and slots
1843 * are freed. 1843 * are freed.
1844 */ 1844 */
1845 static void __ppc440spe_adma_slot_cleanup(struct ppc440spe_adma_chan *chan) 1845 static void __ppc440spe_adma_slot_cleanup(struct ppc440spe_adma_chan *chan)
1846 { 1846 {
1847 struct ppc440spe_adma_desc_slot *iter, *_iter, *group_start = NULL; 1847 struct ppc440spe_adma_desc_slot *iter, *_iter, *group_start = NULL;
1848 dma_cookie_t cookie = 0; 1848 dma_cookie_t cookie = 0;
1849 u32 current_desc = ppc440spe_chan_get_current_descriptor(chan); 1849 u32 current_desc = ppc440spe_chan_get_current_descriptor(chan);
1850 int busy = ppc440spe_chan_is_busy(chan); 1850 int busy = ppc440spe_chan_is_busy(chan);
1851 int seen_current = 0, slot_cnt = 0, slots_per_op = 0; 1851 int seen_current = 0, slot_cnt = 0, slots_per_op = 0;
1852 1852
1853 dev_dbg(chan->device->common.dev, "ppc440spe adma%d: %s\n", 1853 dev_dbg(chan->device->common.dev, "ppc440spe adma%d: %s\n",
1854 chan->device->id, __func__); 1854 chan->device->id, __func__);
1855 1855
1856 if (!current_desc) { 1856 if (!current_desc) {
1857 /* There were no transactions yet, so 1857 /* There were no transactions yet, so
1858 * nothing to clean 1858 * nothing to clean
1859 */ 1859 */
1860 return; 1860 return;
1861 } 1861 }
1862 1862
1863 /* free completed slots from the chain starting with 1863 /* free completed slots from the chain starting with
1864 * the oldest descriptor 1864 * the oldest descriptor
1865 */ 1865 */
1866 list_for_each_entry_safe(iter, _iter, &chan->chain, 1866 list_for_each_entry_safe(iter, _iter, &chan->chain,
1867 chain_node) { 1867 chain_node) {
1868 dev_dbg(chan->device->common.dev, "\tcookie: %d slot: %d " 1868 dev_dbg(chan->device->common.dev, "\tcookie: %d slot: %d "
1869 "busy: %d this_desc: %#llx next_desc: %#x " 1869 "busy: %d this_desc: %#llx next_desc: %#x "
1870 "cur: %#x ack: %d\n", 1870 "cur: %#x ack: %d\n",
1871 iter->async_tx.cookie, iter->idx, busy, iter->phys, 1871 iter->async_tx.cookie, iter->idx, busy, iter->phys,
1872 ppc440spe_desc_get_link(iter, chan), current_desc, 1872 ppc440spe_desc_get_link(iter, chan), current_desc,
1873 async_tx_test_ack(&iter->async_tx)); 1873 async_tx_test_ack(&iter->async_tx));
1874 prefetch(_iter); 1874 prefetch(_iter);
1875 prefetch(&_iter->async_tx); 1875 prefetch(&_iter->async_tx);
1876 1876
1877 /* do not advance past the current descriptor loaded into the 1877 /* do not advance past the current descriptor loaded into the
1878 * hardware channel,subsequent descriptors are either in process 1878 * hardware channel,subsequent descriptors are either in process
1879 * or have not been submitted 1879 * or have not been submitted
1880 */ 1880 */
1881 if (seen_current) 1881 if (seen_current)
1882 break; 1882 break;
1883 1883
1884 /* stop the search if we reach the current descriptor and the 1884 /* stop the search if we reach the current descriptor and the
1885 * channel is busy, or if it appears that the current descriptor 1885 * channel is busy, or if it appears that the current descriptor
1886 * needs to be re-read (i.e. has been appended to) 1886 * needs to be re-read (i.e. has been appended to)
1887 */ 1887 */
1888 if (iter->phys == current_desc) { 1888 if (iter->phys == current_desc) {
1889 BUG_ON(seen_current++); 1889 BUG_ON(seen_current++);
1890 if (busy || ppc440spe_desc_get_link(iter, chan)) { 1890 if (busy || ppc440spe_desc_get_link(iter, chan)) {
1891 /* not all descriptors of the group have 1891 /* not all descriptors of the group have
1892 * been completed; exit. 1892 * been completed; exit.
1893 */ 1893 */
1894 break; 1894 break;
1895 } 1895 }
1896 } 1896 }
1897 1897
1898 /* detect the start of a group transaction */ 1898 /* detect the start of a group transaction */
1899 if (!slot_cnt && !slots_per_op) { 1899 if (!slot_cnt && !slots_per_op) {
1900 slot_cnt = iter->slot_cnt; 1900 slot_cnt = iter->slot_cnt;
1901 slots_per_op = iter->slots_per_op; 1901 slots_per_op = iter->slots_per_op;
1902 if (slot_cnt <= slots_per_op) { 1902 if (slot_cnt <= slots_per_op) {
1903 slot_cnt = 0; 1903 slot_cnt = 0;
1904 slots_per_op = 0; 1904 slots_per_op = 0;
1905 } 1905 }
1906 } 1906 }
1907 1907
1908 if (slot_cnt) { 1908 if (slot_cnt) {
1909 if (!group_start) 1909 if (!group_start)
1910 group_start = iter; 1910 group_start = iter;
1911 slot_cnt -= slots_per_op; 1911 slot_cnt -= slots_per_op;
1912 } 1912 }
1913 1913
1914 /* all the members of a group are complete */ 1914 /* all the members of a group are complete */
1915 if (slots_per_op != 0 && slot_cnt == 0) { 1915 if (slots_per_op != 0 && slot_cnt == 0) {
1916 struct ppc440spe_adma_desc_slot *grp_iter, *_grp_iter; 1916 struct ppc440spe_adma_desc_slot *grp_iter, *_grp_iter;
1917 int end_of_chain = 0; 1917 int end_of_chain = 0;
1918 1918
1919 /* clean up the group */ 1919 /* clean up the group */
1920 slot_cnt = group_start->slot_cnt; 1920 slot_cnt = group_start->slot_cnt;
1921 grp_iter = group_start; 1921 grp_iter = group_start;
1922 list_for_each_entry_safe_from(grp_iter, _grp_iter, 1922 list_for_each_entry_safe_from(grp_iter, _grp_iter,
1923 &chan->chain, chain_node) { 1923 &chan->chain, chain_node) {
1924 1924
1925 cookie = ppc440spe_adma_run_tx_complete_actions( 1925 cookie = ppc440spe_adma_run_tx_complete_actions(
1926 grp_iter, chan, cookie); 1926 grp_iter, chan, cookie);
1927 1927
1928 slot_cnt -= slots_per_op; 1928 slot_cnt -= slots_per_op;
1929 end_of_chain = ppc440spe_adma_clean_slot( 1929 end_of_chain = ppc440spe_adma_clean_slot(
1930 grp_iter, chan); 1930 grp_iter, chan);
1931 if (end_of_chain && slot_cnt) { 1931 if (end_of_chain && slot_cnt) {
1932 /* Should wait for ZeroSum completion */ 1932 /* Should wait for ZeroSum completion */
1933 if (cookie > 0) 1933 if (cookie > 0)
1934 chan->common.completed_cookie = cookie; 1934 chan->common.completed_cookie = cookie;
1935 return; 1935 return;
1936 } 1936 }
1937 1937
1938 if (slot_cnt == 0 || end_of_chain) 1938 if (slot_cnt == 0 || end_of_chain)
1939 break; 1939 break;
1940 } 1940 }
1941 1941
1942 /* the group should be complete at this point */ 1942 /* the group should be complete at this point */
1943 BUG_ON(slot_cnt); 1943 BUG_ON(slot_cnt);
1944 1944
1945 slots_per_op = 0; 1945 slots_per_op = 0;
1946 group_start = NULL; 1946 group_start = NULL;
1947 if (end_of_chain) 1947 if (end_of_chain)
1948 break; 1948 break;
1949 else 1949 else
1950 continue; 1950 continue;
1951 } else if (slots_per_op) /* wait for group completion */ 1951 } else if (slots_per_op) /* wait for group completion */
1952 continue; 1952 continue;
1953 1953
1954 cookie = ppc440spe_adma_run_tx_complete_actions(iter, chan, 1954 cookie = ppc440spe_adma_run_tx_complete_actions(iter, chan,
1955 cookie); 1955 cookie);
1956 1956
1957 if (ppc440spe_adma_clean_slot(iter, chan)) 1957 if (ppc440spe_adma_clean_slot(iter, chan))
1958 break; 1958 break;
1959 } 1959 }
1960 1960
1961 BUG_ON(!seen_current); 1961 BUG_ON(!seen_current);
1962 1962
1963 if (cookie > 0) { 1963 if (cookie > 0) {
1964 chan->common.completed_cookie = cookie; 1964 chan->common.completed_cookie = cookie;
1965 pr_debug("\tcompleted cookie %d\n", cookie); 1965 pr_debug("\tcompleted cookie %d\n", cookie);
1966 } 1966 }
1967 1967
1968 } 1968 }
1969 1969
1970 /** 1970 /**
1971 * ppc440spe_adma_tasklet - clean up watch-dog initiator 1971 * ppc440spe_adma_tasklet - clean up watch-dog initiator
1972 */ 1972 */
1973 static void ppc440spe_adma_tasklet(unsigned long data) 1973 static void ppc440spe_adma_tasklet(unsigned long data)
1974 { 1974 {
1975 struct ppc440spe_adma_chan *chan = (struct ppc440spe_adma_chan *) data; 1975 struct ppc440spe_adma_chan *chan = (struct ppc440spe_adma_chan *) data;
1976 1976
1977 spin_lock_nested(&chan->lock, SINGLE_DEPTH_NESTING); 1977 spin_lock_nested(&chan->lock, SINGLE_DEPTH_NESTING);
1978 __ppc440spe_adma_slot_cleanup(chan); 1978 __ppc440spe_adma_slot_cleanup(chan);
1979 spin_unlock(&chan->lock); 1979 spin_unlock(&chan->lock);
1980 } 1980 }
1981 1981
1982 /** 1982 /**
1983 * ppc440spe_adma_slot_cleanup - clean up scheduled initiator 1983 * ppc440spe_adma_slot_cleanup - clean up scheduled initiator
1984 */ 1984 */
1985 static void ppc440spe_adma_slot_cleanup(struct ppc440spe_adma_chan *chan) 1985 static void ppc440spe_adma_slot_cleanup(struct ppc440spe_adma_chan *chan)
1986 { 1986 {
1987 spin_lock_bh(&chan->lock); 1987 spin_lock_bh(&chan->lock);
1988 __ppc440spe_adma_slot_cleanup(chan); 1988 __ppc440spe_adma_slot_cleanup(chan);
1989 spin_unlock_bh(&chan->lock); 1989 spin_unlock_bh(&chan->lock);
1990 } 1990 }
1991 1991
1992 /** 1992 /**
1993 * ppc440spe_adma_alloc_slots - allocate free slots (if any) 1993 * ppc440spe_adma_alloc_slots - allocate free slots (if any)
1994 */ 1994 */
1995 static struct ppc440spe_adma_desc_slot *ppc440spe_adma_alloc_slots( 1995 static struct ppc440spe_adma_desc_slot *ppc440spe_adma_alloc_slots(
1996 struct ppc440spe_adma_chan *chan, int num_slots, 1996 struct ppc440spe_adma_chan *chan, int num_slots,
1997 int slots_per_op) 1997 int slots_per_op)
1998 { 1998 {
1999 struct ppc440spe_adma_desc_slot *iter = NULL, *_iter; 1999 struct ppc440spe_adma_desc_slot *iter = NULL, *_iter;
2000 struct ppc440spe_adma_desc_slot *alloc_start = NULL; 2000 struct ppc440spe_adma_desc_slot *alloc_start = NULL;
2001 struct list_head chain = LIST_HEAD_INIT(chain); 2001 struct list_head chain = LIST_HEAD_INIT(chain);
2002 int slots_found, retry = 0; 2002 int slots_found, retry = 0;
2003 2003
2004 2004
2005 BUG_ON(!num_slots || !slots_per_op); 2005 BUG_ON(!num_slots || !slots_per_op);
2006 /* start search from the last allocated descrtiptor 2006 /* start search from the last allocated descrtiptor
2007 * if a contiguous allocation can not be found start searching 2007 * if a contiguous allocation can not be found start searching
2008 * from the beginning of the list 2008 * from the beginning of the list
2009 */ 2009 */
2010 retry: 2010 retry:
2011 slots_found = 0; 2011 slots_found = 0;
2012 if (retry == 0) 2012 if (retry == 0)
2013 iter = chan->last_used; 2013 iter = chan->last_used;
2014 else 2014 else
2015 iter = list_entry(&chan->all_slots, 2015 iter = list_entry(&chan->all_slots,
2016 struct ppc440spe_adma_desc_slot, 2016 struct ppc440spe_adma_desc_slot,
2017 slot_node); 2017 slot_node);
2018 list_for_each_entry_safe_continue(iter, _iter, &chan->all_slots, 2018 list_for_each_entry_safe_continue(iter, _iter, &chan->all_slots,
2019 slot_node) { 2019 slot_node) {
2020 prefetch(_iter); 2020 prefetch(_iter);
2021 prefetch(&_iter->async_tx); 2021 prefetch(&_iter->async_tx);
2022 if (iter->slots_per_op) { 2022 if (iter->slots_per_op) {
2023 slots_found = 0; 2023 slots_found = 0;
2024 continue; 2024 continue;
2025 } 2025 }
2026 2026
2027 /* start the allocation if the slot is correctly aligned */ 2027 /* start the allocation if the slot is correctly aligned */
2028 if (!slots_found++) 2028 if (!slots_found++)
2029 alloc_start = iter; 2029 alloc_start = iter;
2030 2030
2031 if (slots_found == num_slots) { 2031 if (slots_found == num_slots) {
2032 struct ppc440spe_adma_desc_slot *alloc_tail = NULL; 2032 struct ppc440spe_adma_desc_slot *alloc_tail = NULL;
2033 struct ppc440spe_adma_desc_slot *last_used = NULL; 2033 struct ppc440spe_adma_desc_slot *last_used = NULL;
2034 2034
2035 iter = alloc_start; 2035 iter = alloc_start;
2036 while (num_slots) { 2036 while (num_slots) {
2037 int i; 2037 int i;
2038 /* pre-ack all but the last descriptor */ 2038 /* pre-ack all but the last descriptor */
2039 if (num_slots != slots_per_op) 2039 if (num_slots != slots_per_op)
2040 async_tx_ack(&iter->async_tx); 2040 async_tx_ack(&iter->async_tx);
2041 2041
2042 list_add_tail(&iter->chain_node, &chain); 2042 list_add_tail(&iter->chain_node, &chain);
2043 alloc_tail = iter; 2043 alloc_tail = iter;
2044 iter->async_tx.cookie = 0; 2044 iter->async_tx.cookie = 0;
2045 iter->hw_next = NULL; 2045 iter->hw_next = NULL;
2046 iter->flags = 0; 2046 iter->flags = 0;
2047 iter->slot_cnt = num_slots; 2047 iter->slot_cnt = num_slots;
2048 iter->xor_check_result = NULL; 2048 iter->xor_check_result = NULL;
2049 for (i = 0; i < slots_per_op; i++) { 2049 for (i = 0; i < slots_per_op; i++) {
2050 iter->slots_per_op = slots_per_op - i; 2050 iter->slots_per_op = slots_per_op - i;
2051 last_used = iter; 2051 last_used = iter;
2052 iter = list_entry(iter->slot_node.next, 2052 iter = list_entry(iter->slot_node.next,
2053 struct ppc440spe_adma_desc_slot, 2053 struct ppc440spe_adma_desc_slot,
2054 slot_node); 2054 slot_node);
2055 } 2055 }
2056 num_slots -= slots_per_op; 2056 num_slots -= slots_per_op;
2057 } 2057 }
2058 alloc_tail->group_head = alloc_start; 2058 alloc_tail->group_head = alloc_start;
2059 alloc_tail->async_tx.cookie = -EBUSY; 2059 alloc_tail->async_tx.cookie = -EBUSY;
2060 list_splice(&chain, &alloc_tail->group_list); 2060 list_splice(&chain, &alloc_tail->group_list);
2061 chan->last_used = last_used; 2061 chan->last_used = last_used;
2062 return alloc_tail; 2062 return alloc_tail;
2063 } 2063 }
2064 } 2064 }
2065 if (!retry++) 2065 if (!retry++)
2066 goto retry; 2066 goto retry;
2067 2067
2068 /* try to free some slots if the allocation fails */ 2068 /* try to free some slots if the allocation fails */
2069 tasklet_schedule(&chan->irq_tasklet); 2069 tasklet_schedule(&chan->irq_tasklet);
2070 return NULL; 2070 return NULL;
2071 } 2071 }
2072 2072
2073 /** 2073 /**
2074 * ppc440spe_adma_alloc_chan_resources - allocate pools for CDB slots 2074 * ppc440spe_adma_alloc_chan_resources - allocate pools for CDB slots
2075 */ 2075 */
2076 static int ppc440spe_adma_alloc_chan_resources(struct dma_chan *chan) 2076 static int ppc440spe_adma_alloc_chan_resources(struct dma_chan *chan)
2077 { 2077 {
2078 struct ppc440spe_adma_chan *ppc440spe_chan; 2078 struct ppc440spe_adma_chan *ppc440spe_chan;
2079 struct ppc440spe_adma_desc_slot *slot = NULL; 2079 struct ppc440spe_adma_desc_slot *slot = NULL;
2080 char *hw_desc; 2080 char *hw_desc;
2081 int i, db_sz; 2081 int i, db_sz;
2082 int init; 2082 int init;
2083 2083
2084 ppc440spe_chan = to_ppc440spe_adma_chan(chan); 2084 ppc440spe_chan = to_ppc440spe_adma_chan(chan);
2085 init = ppc440spe_chan->slots_allocated ? 0 : 1; 2085 init = ppc440spe_chan->slots_allocated ? 0 : 1;
2086 chan->chan_id = ppc440spe_chan->device->id; 2086 chan->chan_id = ppc440spe_chan->device->id;
2087 2087
2088 /* Allocate descriptor slots */ 2088 /* Allocate descriptor slots */
2089 i = ppc440spe_chan->slots_allocated; 2089 i = ppc440spe_chan->slots_allocated;
2090 if (ppc440spe_chan->device->id != PPC440SPE_XOR_ID) 2090 if (ppc440spe_chan->device->id != PPC440SPE_XOR_ID)
2091 db_sz = sizeof(struct dma_cdb); 2091 db_sz = sizeof(struct dma_cdb);
2092 else 2092 else
2093 db_sz = sizeof(struct xor_cb); 2093 db_sz = sizeof(struct xor_cb);
2094 2094
2095 for (; i < (ppc440spe_chan->device->pool_size / db_sz); i++) { 2095 for (; i < (ppc440spe_chan->device->pool_size / db_sz); i++) {
2096 slot = kzalloc(sizeof(struct ppc440spe_adma_desc_slot), 2096 slot = kzalloc(sizeof(struct ppc440spe_adma_desc_slot),
2097 GFP_KERNEL); 2097 GFP_KERNEL);
2098 if (!slot) { 2098 if (!slot) {
2099 printk(KERN_INFO "SPE ADMA Channel only initialized" 2099 printk(KERN_INFO "SPE ADMA Channel only initialized"
2100 " %d descriptor slots", i--); 2100 " %d descriptor slots", i--);
2101 break; 2101 break;
2102 } 2102 }
2103 2103
2104 hw_desc = (char *) ppc440spe_chan->device->dma_desc_pool_virt; 2104 hw_desc = (char *) ppc440spe_chan->device->dma_desc_pool_virt;
2105 slot->hw_desc = (void *) &hw_desc[i * db_sz]; 2105 slot->hw_desc = (void *) &hw_desc[i * db_sz];
2106 dma_async_tx_descriptor_init(&slot->async_tx, chan); 2106 dma_async_tx_descriptor_init(&slot->async_tx, chan);
2107 slot->async_tx.tx_submit = ppc440spe_adma_tx_submit; 2107 slot->async_tx.tx_submit = ppc440spe_adma_tx_submit;
2108 INIT_LIST_HEAD(&slot->chain_node); 2108 INIT_LIST_HEAD(&slot->chain_node);
2109 INIT_LIST_HEAD(&slot->slot_node); 2109 INIT_LIST_HEAD(&slot->slot_node);
2110 INIT_LIST_HEAD(&slot->group_list); 2110 INIT_LIST_HEAD(&slot->group_list);
2111 slot->phys = ppc440spe_chan->device->dma_desc_pool + i * db_sz; 2111 slot->phys = ppc440spe_chan->device->dma_desc_pool + i * db_sz;
2112 slot->idx = i; 2112 slot->idx = i;
2113 2113
2114 spin_lock_bh(&ppc440spe_chan->lock); 2114 spin_lock_bh(&ppc440spe_chan->lock);
2115 ppc440spe_chan->slots_allocated++; 2115 ppc440spe_chan->slots_allocated++;
2116 list_add_tail(&slot->slot_node, &ppc440spe_chan->all_slots); 2116 list_add_tail(&slot->slot_node, &ppc440spe_chan->all_slots);
2117 spin_unlock_bh(&ppc440spe_chan->lock); 2117 spin_unlock_bh(&ppc440spe_chan->lock);
2118 } 2118 }
2119 2119
2120 if (i && !ppc440spe_chan->last_used) { 2120 if (i && !ppc440spe_chan->last_used) {
2121 ppc440spe_chan->last_used = 2121 ppc440spe_chan->last_used =
2122 list_entry(ppc440spe_chan->all_slots.next, 2122 list_entry(ppc440spe_chan->all_slots.next,
2123 struct ppc440spe_adma_desc_slot, 2123 struct ppc440spe_adma_desc_slot,
2124 slot_node); 2124 slot_node);
2125 } 2125 }
2126 2126
2127 dev_dbg(ppc440spe_chan->device->common.dev, 2127 dev_dbg(ppc440spe_chan->device->common.dev,
2128 "ppc440spe adma%d: allocated %d descriptor slots\n", 2128 "ppc440spe adma%d: allocated %d descriptor slots\n",
2129 ppc440spe_chan->device->id, i); 2129 ppc440spe_chan->device->id, i);
2130 2130
2131 /* initialize the channel and the chain with a null operation */ 2131 /* initialize the channel and the chain with a null operation */
2132 if (init) { 2132 if (init) {
2133 switch (ppc440spe_chan->device->id) { 2133 switch (ppc440spe_chan->device->id) {
2134 case PPC440SPE_DMA0_ID: 2134 case PPC440SPE_DMA0_ID:
2135 case PPC440SPE_DMA1_ID: 2135 case PPC440SPE_DMA1_ID:
2136 ppc440spe_chan->hw_chain_inited = 0; 2136 ppc440spe_chan->hw_chain_inited = 0;
2137 /* Use WXOR for self-testing */ 2137 /* Use WXOR for self-testing */
2138 if (!ppc440spe_r6_tchan) 2138 if (!ppc440spe_r6_tchan)
2139 ppc440spe_r6_tchan = ppc440spe_chan; 2139 ppc440spe_r6_tchan = ppc440spe_chan;
2140 break; 2140 break;
2141 case PPC440SPE_XOR_ID: 2141 case PPC440SPE_XOR_ID:
2142 ppc440spe_chan_start_null_xor(ppc440spe_chan); 2142 ppc440spe_chan_start_null_xor(ppc440spe_chan);
2143 break; 2143 break;
2144 default: 2144 default:
2145 BUG(); 2145 BUG();
2146 } 2146 }
2147 ppc440spe_chan->needs_unmap = 1; 2147 ppc440spe_chan->needs_unmap = 1;
2148 } 2148 }
2149 2149
2150 return (i > 0) ? i : -ENOMEM; 2150 return (i > 0) ? i : -ENOMEM;
2151 } 2151 }
2152 2152
2153 /** 2153 /**
2154 * ppc440spe_rxor_set_region_data - 2154 * ppc440spe_rxor_set_region_data -
2155 */ 2155 */
2156 static void ppc440spe_rxor_set_region(struct ppc440spe_adma_desc_slot *desc, 2156 static void ppc440spe_rxor_set_region(struct ppc440spe_adma_desc_slot *desc,
2157 u8 xor_arg_no, u32 mask) 2157 u8 xor_arg_no, u32 mask)
2158 { 2158 {
2159 struct xor_cb *xcb = desc->hw_desc; 2159 struct xor_cb *xcb = desc->hw_desc;
2160 2160
2161 xcb->ops[xor_arg_no].h |= mask; 2161 xcb->ops[xor_arg_no].h |= mask;
2162 } 2162 }
2163 2163
2164 /** 2164 /**
2165 * ppc440spe_rxor_set_src - 2165 * ppc440spe_rxor_set_src -
2166 */ 2166 */
2167 static void ppc440spe_rxor_set_src(struct ppc440spe_adma_desc_slot *desc, 2167 static void ppc440spe_rxor_set_src(struct ppc440spe_adma_desc_slot *desc,
2168 u8 xor_arg_no, dma_addr_t addr) 2168 u8 xor_arg_no, dma_addr_t addr)
2169 { 2169 {
2170 struct xor_cb *xcb = desc->hw_desc; 2170 struct xor_cb *xcb = desc->hw_desc;
2171 2171
2172 xcb->ops[xor_arg_no].h |= DMA_CUED_XOR_BASE; 2172 xcb->ops[xor_arg_no].h |= DMA_CUED_XOR_BASE;
2173 xcb->ops[xor_arg_no].l = addr; 2173 xcb->ops[xor_arg_no].l = addr;
2174 } 2174 }
2175 2175
2176 /** 2176 /**
2177 * ppc440spe_rxor_set_mult - 2177 * ppc440spe_rxor_set_mult -
2178 */ 2178 */
2179 static void ppc440spe_rxor_set_mult(struct ppc440spe_adma_desc_slot *desc, 2179 static void ppc440spe_rxor_set_mult(struct ppc440spe_adma_desc_slot *desc,
2180 u8 xor_arg_no, u8 idx, u8 mult) 2180 u8 xor_arg_no, u8 idx, u8 mult)
2181 { 2181 {
2182 struct xor_cb *xcb = desc->hw_desc; 2182 struct xor_cb *xcb = desc->hw_desc;
2183 2183
2184 xcb->ops[xor_arg_no].h |= mult << (DMA_CUED_MULT1_OFF + idx * 8); 2184 xcb->ops[xor_arg_no].h |= mult << (DMA_CUED_MULT1_OFF + idx * 8);
2185 } 2185 }
2186 2186
2187 /** 2187 /**
2188 * ppc440spe_adma_check_threshold - append CDBs to h/w chain if threshold 2188 * ppc440spe_adma_check_threshold - append CDBs to h/w chain if threshold
2189 * has been achieved 2189 * has been achieved
2190 */ 2190 */
2191 static void ppc440spe_adma_check_threshold(struct ppc440spe_adma_chan *chan) 2191 static void ppc440spe_adma_check_threshold(struct ppc440spe_adma_chan *chan)
2192 { 2192 {
2193 dev_dbg(chan->device->common.dev, "ppc440spe adma%d: pending: %d\n", 2193 dev_dbg(chan->device->common.dev, "ppc440spe adma%d: pending: %d\n",
2194 chan->device->id, chan->pending); 2194 chan->device->id, chan->pending);
2195 2195
2196 if (chan->pending >= PPC440SPE_ADMA_THRESHOLD) { 2196 if (chan->pending >= PPC440SPE_ADMA_THRESHOLD) {
2197 chan->pending = 0; 2197 chan->pending = 0;
2198 ppc440spe_chan_append(chan); 2198 ppc440spe_chan_append(chan);
2199 } 2199 }
2200 } 2200 }
2201 2201
2202 /** 2202 /**
2203 * ppc440spe_adma_tx_submit - submit new descriptor group to the channel 2203 * ppc440spe_adma_tx_submit - submit new descriptor group to the channel
2204 * (it's not necessary that descriptors will be submitted to the h/w 2204 * (it's not necessary that descriptors will be submitted to the h/w
2205 * chains too right now) 2205 * chains too right now)
2206 */ 2206 */
2207 static dma_cookie_t ppc440spe_adma_tx_submit(struct dma_async_tx_descriptor *tx) 2207 static dma_cookie_t ppc440spe_adma_tx_submit(struct dma_async_tx_descriptor *tx)
2208 { 2208 {
2209 struct ppc440spe_adma_desc_slot *sw_desc; 2209 struct ppc440spe_adma_desc_slot *sw_desc;
2210 struct ppc440spe_adma_chan *chan = to_ppc440spe_adma_chan(tx->chan); 2210 struct ppc440spe_adma_chan *chan = to_ppc440spe_adma_chan(tx->chan);
2211 struct ppc440spe_adma_desc_slot *group_start, *old_chain_tail; 2211 struct ppc440spe_adma_desc_slot *group_start, *old_chain_tail;
2212 int slot_cnt; 2212 int slot_cnt;
2213 int slots_per_op; 2213 int slots_per_op;
2214 dma_cookie_t cookie; 2214 dma_cookie_t cookie;
2215 2215
2216 sw_desc = tx_to_ppc440spe_adma_slot(tx); 2216 sw_desc = tx_to_ppc440spe_adma_slot(tx);
2217 2217
2218 group_start = sw_desc->group_head; 2218 group_start = sw_desc->group_head;
2219 slot_cnt = group_start->slot_cnt; 2219 slot_cnt = group_start->slot_cnt;
2220 slots_per_op = group_start->slots_per_op; 2220 slots_per_op = group_start->slots_per_op;
2221 2221
2222 spin_lock_bh(&chan->lock); 2222 spin_lock_bh(&chan->lock);
2223 cookie = dma_cookie_assign(tx); 2223 cookie = dma_cookie_assign(tx);
2224 2224
2225 if (unlikely(list_empty(&chan->chain))) { 2225 if (unlikely(list_empty(&chan->chain))) {
2226 /* first peer */ 2226 /* first peer */
2227 list_splice_init(&sw_desc->group_list, &chan->chain); 2227 list_splice_init(&sw_desc->group_list, &chan->chain);
2228 chan_first_cdb[chan->device->id] = group_start; 2228 chan_first_cdb[chan->device->id] = group_start;
2229 } else { 2229 } else {
2230 /* isn't first peer, bind CDBs to chain */ 2230 /* isn't first peer, bind CDBs to chain */
2231 old_chain_tail = list_entry(chan->chain.prev, 2231 old_chain_tail = list_entry(chan->chain.prev,
2232 struct ppc440spe_adma_desc_slot, 2232 struct ppc440spe_adma_desc_slot,
2233 chain_node); 2233 chain_node);
2234 list_splice_init(&sw_desc->group_list, 2234 list_splice_init(&sw_desc->group_list,
2235 &old_chain_tail->chain_node); 2235 &old_chain_tail->chain_node);
2236 /* fix up the hardware chain */ 2236 /* fix up the hardware chain */
2237 ppc440spe_desc_set_link(chan, old_chain_tail, group_start); 2237 ppc440spe_desc_set_link(chan, old_chain_tail, group_start);
2238 } 2238 }
2239 2239
2240 /* increment the pending count by the number of operations */ 2240 /* increment the pending count by the number of operations */
2241 chan->pending += slot_cnt / slots_per_op; 2241 chan->pending += slot_cnt / slots_per_op;
2242 ppc440spe_adma_check_threshold(chan); 2242 ppc440spe_adma_check_threshold(chan);
2243 spin_unlock_bh(&chan->lock); 2243 spin_unlock_bh(&chan->lock);
2244 2244
2245 dev_dbg(chan->device->common.dev, 2245 dev_dbg(chan->device->common.dev,
2246 "ppc440spe adma%d: %s cookie: %d slot: %d tx %p\n", 2246 "ppc440spe adma%d: %s cookie: %d slot: %d tx %p\n",
2247 chan->device->id, __func__, 2247 chan->device->id, __func__,
2248 sw_desc->async_tx.cookie, sw_desc->idx, sw_desc); 2248 sw_desc->async_tx.cookie, sw_desc->idx, sw_desc);
2249 2249
2250 return cookie; 2250 return cookie;
2251 } 2251 }
2252 2252
2253 /** 2253 /**
2254 * ppc440spe_adma_prep_dma_interrupt - prepare CDB for a pseudo DMA operation 2254 * ppc440spe_adma_prep_dma_interrupt - prepare CDB for a pseudo DMA operation
2255 */ 2255 */
2256 static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_interrupt( 2256 static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_interrupt(
2257 struct dma_chan *chan, unsigned long flags) 2257 struct dma_chan *chan, unsigned long flags)
2258 { 2258 {
2259 struct ppc440spe_adma_chan *ppc440spe_chan; 2259 struct ppc440spe_adma_chan *ppc440spe_chan;
2260 struct ppc440spe_adma_desc_slot *sw_desc, *group_start; 2260 struct ppc440spe_adma_desc_slot *sw_desc, *group_start;
2261 int slot_cnt, slots_per_op; 2261 int slot_cnt, slots_per_op;
2262 2262
2263 ppc440spe_chan = to_ppc440spe_adma_chan(chan); 2263 ppc440spe_chan = to_ppc440spe_adma_chan(chan);
2264 2264
2265 dev_dbg(ppc440spe_chan->device->common.dev, 2265 dev_dbg(ppc440spe_chan->device->common.dev,
2266 "ppc440spe adma%d: %s\n", ppc440spe_chan->device->id, 2266 "ppc440spe adma%d: %s\n", ppc440spe_chan->device->id,
2267 __func__); 2267 __func__);
2268 2268
2269 spin_lock_bh(&ppc440spe_chan->lock); 2269 spin_lock_bh(&ppc440spe_chan->lock);
2270 slot_cnt = slots_per_op = 1; 2270 slot_cnt = slots_per_op = 1;
2271 sw_desc = ppc440spe_adma_alloc_slots(ppc440spe_chan, slot_cnt, 2271 sw_desc = ppc440spe_adma_alloc_slots(ppc440spe_chan, slot_cnt,
2272 slots_per_op); 2272 slots_per_op);
2273 if (sw_desc) { 2273 if (sw_desc) {
2274 group_start = sw_desc->group_head; 2274 group_start = sw_desc->group_head;
2275 ppc440spe_desc_init_interrupt(group_start, ppc440spe_chan); 2275 ppc440spe_desc_init_interrupt(group_start, ppc440spe_chan);
2276 group_start->unmap_len = 0; 2276 group_start->unmap_len = 0;
2277 sw_desc->async_tx.flags = flags; 2277 sw_desc->async_tx.flags = flags;
2278 } 2278 }
2279 spin_unlock_bh(&ppc440spe_chan->lock); 2279 spin_unlock_bh(&ppc440spe_chan->lock);
2280 2280
2281 return sw_desc ? &sw_desc->async_tx : NULL; 2281 return sw_desc ? &sw_desc->async_tx : NULL;
2282 } 2282 }
2283 2283
2284 /** 2284 /**
2285 * ppc440spe_adma_prep_dma_memcpy - prepare CDB for a MEMCPY operation 2285 * ppc440spe_adma_prep_dma_memcpy - prepare CDB for a MEMCPY operation
2286 */ 2286 */
2287 static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_memcpy( 2287 static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_memcpy(
2288 struct dma_chan *chan, dma_addr_t dma_dest, 2288 struct dma_chan *chan, dma_addr_t dma_dest,
2289 dma_addr_t dma_src, size_t len, unsigned long flags) 2289 dma_addr_t dma_src, size_t len, unsigned long flags)
2290 { 2290 {
2291 struct ppc440spe_adma_chan *ppc440spe_chan; 2291 struct ppc440spe_adma_chan *ppc440spe_chan;
2292 struct ppc440spe_adma_desc_slot *sw_desc, *group_start; 2292 struct ppc440spe_adma_desc_slot *sw_desc, *group_start;
2293 int slot_cnt, slots_per_op; 2293 int slot_cnt, slots_per_op;
2294 2294
2295 ppc440spe_chan = to_ppc440spe_adma_chan(chan); 2295 ppc440spe_chan = to_ppc440spe_adma_chan(chan);
2296 2296
2297 if (unlikely(!len)) 2297 if (unlikely(!len))
2298 return NULL; 2298 return NULL;
2299 2299
2300 BUG_ON(len > PPC440SPE_ADMA_DMA_MAX_BYTE_COUNT); 2300 BUG_ON(len > PPC440SPE_ADMA_DMA_MAX_BYTE_COUNT);
2301 2301
2302 spin_lock_bh(&ppc440spe_chan->lock); 2302 spin_lock_bh(&ppc440spe_chan->lock);
2303 2303
2304 dev_dbg(ppc440spe_chan->device->common.dev, 2304 dev_dbg(ppc440spe_chan->device->common.dev,
2305 "ppc440spe adma%d: %s len: %u int_en %d\n", 2305 "ppc440spe adma%d: %s len: %u int_en %d\n",
2306 ppc440spe_chan->device->id, __func__, len, 2306 ppc440spe_chan->device->id, __func__, len,
2307 flags & DMA_PREP_INTERRUPT ? 1 : 0); 2307 flags & DMA_PREP_INTERRUPT ? 1 : 0);
2308 slot_cnt = slots_per_op = 1; 2308 slot_cnt = slots_per_op = 1;
2309 sw_desc = ppc440spe_adma_alloc_slots(ppc440spe_chan, slot_cnt, 2309 sw_desc = ppc440spe_adma_alloc_slots(ppc440spe_chan, slot_cnt,
2310 slots_per_op); 2310 slots_per_op);
2311 if (sw_desc) { 2311 if (sw_desc) {
2312 group_start = sw_desc->group_head; 2312 group_start = sw_desc->group_head;
2313 ppc440spe_desc_init_memcpy(group_start, flags); 2313 ppc440spe_desc_init_memcpy(group_start, flags);
2314 ppc440spe_adma_set_dest(group_start, dma_dest, 0); 2314 ppc440spe_adma_set_dest(group_start, dma_dest, 0);
2315 ppc440spe_adma_memcpy_xor_set_src(group_start, dma_src, 0); 2315 ppc440spe_adma_memcpy_xor_set_src(group_start, dma_src, 0);
2316 ppc440spe_desc_set_byte_count(group_start, ppc440spe_chan, len); 2316 ppc440spe_desc_set_byte_count(group_start, ppc440spe_chan, len);
2317 sw_desc->unmap_len = len; 2317 sw_desc->unmap_len = len;
2318 sw_desc->async_tx.flags = flags; 2318 sw_desc->async_tx.flags = flags;
2319 } 2319 }
2320 spin_unlock_bh(&ppc440spe_chan->lock); 2320 spin_unlock_bh(&ppc440spe_chan->lock);
2321 2321
2322 return sw_desc ? &sw_desc->async_tx : NULL; 2322 return sw_desc ? &sw_desc->async_tx : NULL;
2323 } 2323 }
2324 2324
2325 /** 2325 /**
2326 * ppc440spe_adma_prep_dma_xor - prepare CDB for a XOR operation 2326 * ppc440spe_adma_prep_dma_xor - prepare CDB for a XOR operation
2327 */ 2327 */
2328 static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_xor( 2328 static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_xor(
2329 struct dma_chan *chan, dma_addr_t dma_dest, 2329 struct dma_chan *chan, dma_addr_t dma_dest,
2330 dma_addr_t *dma_src, u32 src_cnt, size_t len, 2330 dma_addr_t *dma_src, u32 src_cnt, size_t len,
2331 unsigned long flags) 2331 unsigned long flags)
2332 { 2332 {
2333 struct ppc440spe_adma_chan *ppc440spe_chan; 2333 struct ppc440spe_adma_chan *ppc440spe_chan;
2334 struct ppc440spe_adma_desc_slot *sw_desc, *group_start; 2334 struct ppc440spe_adma_desc_slot *sw_desc, *group_start;
2335 int slot_cnt, slots_per_op; 2335 int slot_cnt, slots_per_op;
2336 2336
2337 ppc440spe_chan = to_ppc440spe_adma_chan(chan); 2337 ppc440spe_chan = to_ppc440spe_adma_chan(chan);
2338 2338
2339 ADMA_LL_DBG(prep_dma_xor_dbg(ppc440spe_chan->device->id, 2339 ADMA_LL_DBG(prep_dma_xor_dbg(ppc440spe_chan->device->id,
2340 dma_dest, dma_src, src_cnt)); 2340 dma_dest, dma_src, src_cnt));
2341 if (unlikely(!len)) 2341 if (unlikely(!len))
2342 return NULL; 2342 return NULL;
2343 BUG_ON(len > PPC440SPE_ADMA_XOR_MAX_BYTE_COUNT); 2343 BUG_ON(len > PPC440SPE_ADMA_XOR_MAX_BYTE_COUNT);
2344 2344
2345 dev_dbg(ppc440spe_chan->device->common.dev, 2345 dev_dbg(ppc440spe_chan->device->common.dev,
2346 "ppc440spe adma%d: %s src_cnt: %d len: %u int_en: %d\n", 2346 "ppc440spe adma%d: %s src_cnt: %d len: %u int_en: %d\n",
2347 ppc440spe_chan->device->id, __func__, src_cnt, len, 2347 ppc440spe_chan->device->id, __func__, src_cnt, len,
2348 flags & DMA_PREP_INTERRUPT ? 1 : 0); 2348 flags & DMA_PREP_INTERRUPT ? 1 : 0);
2349 2349
2350 spin_lock_bh(&ppc440spe_chan->lock); 2350 spin_lock_bh(&ppc440spe_chan->lock);
2351 slot_cnt = ppc440spe_chan_xor_slot_count(len, src_cnt, &slots_per_op); 2351 slot_cnt = ppc440spe_chan_xor_slot_count(len, src_cnt, &slots_per_op);
2352 sw_desc = ppc440spe_adma_alloc_slots(ppc440spe_chan, slot_cnt, 2352 sw_desc = ppc440spe_adma_alloc_slots(ppc440spe_chan, slot_cnt,
2353 slots_per_op); 2353 slots_per_op);
2354 if (sw_desc) { 2354 if (sw_desc) {
2355 group_start = sw_desc->group_head; 2355 group_start = sw_desc->group_head;
2356 ppc440spe_desc_init_xor(group_start, src_cnt, flags); 2356 ppc440spe_desc_init_xor(group_start, src_cnt, flags);
2357 ppc440spe_adma_set_dest(group_start, dma_dest, 0); 2357 ppc440spe_adma_set_dest(group_start, dma_dest, 0);
2358 while (src_cnt--) 2358 while (src_cnt--)
2359 ppc440spe_adma_memcpy_xor_set_src(group_start, 2359 ppc440spe_adma_memcpy_xor_set_src(group_start,
2360 dma_src[src_cnt], src_cnt); 2360 dma_src[src_cnt], src_cnt);
2361 ppc440spe_desc_set_byte_count(group_start, ppc440spe_chan, len); 2361 ppc440spe_desc_set_byte_count(group_start, ppc440spe_chan, len);
2362 sw_desc->unmap_len = len; 2362 sw_desc->unmap_len = len;
2363 sw_desc->async_tx.flags = flags; 2363 sw_desc->async_tx.flags = flags;
2364 } 2364 }
2365 spin_unlock_bh(&ppc440spe_chan->lock); 2365 spin_unlock_bh(&ppc440spe_chan->lock);
2366 2366
2367 return sw_desc ? &sw_desc->async_tx : NULL; 2367 return sw_desc ? &sw_desc->async_tx : NULL;
2368 } 2368 }
2369 2369
2370 static inline void 2370 static inline void
2371 ppc440spe_desc_set_xor_src_cnt(struct ppc440spe_adma_desc_slot *desc, 2371 ppc440spe_desc_set_xor_src_cnt(struct ppc440spe_adma_desc_slot *desc,
2372 int src_cnt); 2372 int src_cnt);
2373 static void ppc440spe_init_rxor_cursor(struct ppc440spe_rxor *cursor); 2373 static void ppc440spe_init_rxor_cursor(struct ppc440spe_rxor *cursor);
2374 2374
2375 /** 2375 /**
2376 * ppc440spe_adma_init_dma2rxor_slot - 2376 * ppc440spe_adma_init_dma2rxor_slot -
2377 */ 2377 */
2378 static void ppc440spe_adma_init_dma2rxor_slot( 2378 static void ppc440spe_adma_init_dma2rxor_slot(
2379 struct ppc440spe_adma_desc_slot *desc, 2379 struct ppc440spe_adma_desc_slot *desc,
2380 dma_addr_t *src, int src_cnt) 2380 dma_addr_t *src, int src_cnt)
2381 { 2381 {
2382 int i; 2382 int i;
2383 2383
2384 /* initialize CDB */ 2384 /* initialize CDB */
2385 for (i = 0; i < src_cnt; i++) { 2385 for (i = 0; i < src_cnt; i++) {
2386 ppc440spe_adma_dma2rxor_prep_src(desc, &desc->rxor_cursor, i, 2386 ppc440spe_adma_dma2rxor_prep_src(desc, &desc->rxor_cursor, i,
2387 desc->src_cnt, (u32)src[i]); 2387 desc->src_cnt, (u32)src[i]);
2388 } 2388 }
2389 } 2389 }
2390 2390
2391 /** 2391 /**
2392 * ppc440spe_dma01_prep_mult - 2392 * ppc440spe_dma01_prep_mult -
2393 * for Q operation where destination is also the source 2393 * for Q operation where destination is also the source
2394 */ 2394 */
2395 static struct ppc440spe_adma_desc_slot *ppc440spe_dma01_prep_mult( 2395 static struct ppc440spe_adma_desc_slot *ppc440spe_dma01_prep_mult(
2396 struct ppc440spe_adma_chan *ppc440spe_chan, 2396 struct ppc440spe_adma_chan *ppc440spe_chan,
2397 dma_addr_t *dst, int dst_cnt, dma_addr_t *src, int src_cnt, 2397 dma_addr_t *dst, int dst_cnt, dma_addr_t *src, int src_cnt,
2398 const unsigned char *scf, size_t len, unsigned long flags) 2398 const unsigned char *scf, size_t len, unsigned long flags)
2399 { 2399 {
2400 struct ppc440spe_adma_desc_slot *sw_desc = NULL; 2400 struct ppc440spe_adma_desc_slot *sw_desc = NULL;
2401 unsigned long op = 0; 2401 unsigned long op = 0;
2402 int slot_cnt; 2402 int slot_cnt;
2403 2403
2404 set_bit(PPC440SPE_DESC_WXOR, &op); 2404 set_bit(PPC440SPE_DESC_WXOR, &op);
2405 slot_cnt = 2; 2405 slot_cnt = 2;
2406 2406
2407 spin_lock_bh(&ppc440spe_chan->lock); 2407 spin_lock_bh(&ppc440spe_chan->lock);
2408 2408
2409 /* use WXOR, each descriptor occupies one slot */ 2409 /* use WXOR, each descriptor occupies one slot */
2410 sw_desc = ppc440spe_adma_alloc_slots(ppc440spe_chan, slot_cnt, 1); 2410 sw_desc = ppc440spe_adma_alloc_slots(ppc440spe_chan, slot_cnt, 1);
2411 if (sw_desc) { 2411 if (sw_desc) {
2412 struct ppc440spe_adma_chan *chan; 2412 struct ppc440spe_adma_chan *chan;
2413 struct ppc440spe_adma_desc_slot *iter; 2413 struct ppc440spe_adma_desc_slot *iter;
2414 struct dma_cdb *hw_desc; 2414 struct dma_cdb *hw_desc;
2415 2415
2416 chan = to_ppc440spe_adma_chan(sw_desc->async_tx.chan); 2416 chan = to_ppc440spe_adma_chan(sw_desc->async_tx.chan);
2417 set_bits(op, &sw_desc->flags); 2417 set_bits(op, &sw_desc->flags);
2418 sw_desc->src_cnt = src_cnt; 2418 sw_desc->src_cnt = src_cnt;
2419 sw_desc->dst_cnt = dst_cnt; 2419 sw_desc->dst_cnt = dst_cnt;
2420 /* First descriptor, zero data in the destination and copy it 2420 /* First descriptor, zero data in the destination and copy it
2421 * to q page using MULTICAST transfer. 2421 * to q page using MULTICAST transfer.
2422 */ 2422 */
2423 iter = list_first_entry(&sw_desc->group_list, 2423 iter = list_first_entry(&sw_desc->group_list,
2424 struct ppc440spe_adma_desc_slot, 2424 struct ppc440spe_adma_desc_slot,
2425 chain_node); 2425 chain_node);
2426 memset(iter->hw_desc, 0, sizeof(struct dma_cdb)); 2426 memset(iter->hw_desc, 0, sizeof(struct dma_cdb));
2427 /* set 'next' pointer */ 2427 /* set 'next' pointer */
2428 iter->hw_next = list_entry(iter->chain_node.next, 2428 iter->hw_next = list_entry(iter->chain_node.next,
2429 struct ppc440spe_adma_desc_slot, 2429 struct ppc440spe_adma_desc_slot,
2430 chain_node); 2430 chain_node);
2431 clear_bit(PPC440SPE_DESC_INT, &iter->flags); 2431 clear_bit(PPC440SPE_DESC_INT, &iter->flags);
2432 hw_desc = iter->hw_desc; 2432 hw_desc = iter->hw_desc;
2433 hw_desc->opc = DMA_CDB_OPC_MULTICAST; 2433 hw_desc->opc = DMA_CDB_OPC_MULTICAST;
2434 2434
2435 ppc440spe_desc_set_dest_addr(iter, chan, 2435 ppc440spe_desc_set_dest_addr(iter, chan,
2436 DMA_CUED_XOR_BASE, dst[0], 0); 2436 DMA_CUED_XOR_BASE, dst[0], 0);
2437 ppc440spe_desc_set_dest_addr(iter, chan, 0, dst[1], 1); 2437 ppc440spe_desc_set_dest_addr(iter, chan, 0, dst[1], 1);
2438 ppc440spe_desc_set_src_addr(iter, chan, 0, DMA_CUED_XOR_HB, 2438 ppc440spe_desc_set_src_addr(iter, chan, 0, DMA_CUED_XOR_HB,
2439 src[0]); 2439 src[0]);
2440 ppc440spe_desc_set_byte_count(iter, ppc440spe_chan, len); 2440 ppc440spe_desc_set_byte_count(iter, ppc440spe_chan, len);
2441 iter->unmap_len = len; 2441 iter->unmap_len = len;
2442 2442
2443 /* 2443 /*
2444 * Second descriptor, multiply data from the q page 2444 * Second descriptor, multiply data from the q page
2445 * and store the result in real destination. 2445 * and store the result in real destination.
2446 */ 2446 */
2447 iter = list_first_entry(&iter->chain_node, 2447 iter = list_first_entry(&iter->chain_node,
2448 struct ppc440spe_adma_desc_slot, 2448 struct ppc440spe_adma_desc_slot,
2449 chain_node); 2449 chain_node);
2450 memset(iter->hw_desc, 0, sizeof(struct dma_cdb)); 2450 memset(iter->hw_desc, 0, sizeof(struct dma_cdb));
2451 iter->hw_next = NULL; 2451 iter->hw_next = NULL;
2452 if (flags & DMA_PREP_INTERRUPT) 2452 if (flags & DMA_PREP_INTERRUPT)
2453 set_bit(PPC440SPE_DESC_INT, &iter->flags); 2453 set_bit(PPC440SPE_DESC_INT, &iter->flags);
2454 else 2454 else
2455 clear_bit(PPC440SPE_DESC_INT, &iter->flags); 2455 clear_bit(PPC440SPE_DESC_INT, &iter->flags);
2456 2456
2457 hw_desc = iter->hw_desc; 2457 hw_desc = iter->hw_desc;
2458 hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2; 2458 hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2;
2459 ppc440spe_desc_set_src_addr(iter, chan, 0, 2459 ppc440spe_desc_set_src_addr(iter, chan, 0,
2460 DMA_CUED_XOR_HB, dst[1]); 2460 DMA_CUED_XOR_HB, dst[1]);
2461 ppc440spe_desc_set_dest_addr(iter, chan, 2461 ppc440spe_desc_set_dest_addr(iter, chan,
2462 DMA_CUED_XOR_BASE, dst[0], 0); 2462 DMA_CUED_XOR_BASE, dst[0], 0);
2463 2463
2464 ppc440spe_desc_set_src_mult(iter, chan, DMA_CUED_MULT1_OFF, 2464 ppc440spe_desc_set_src_mult(iter, chan, DMA_CUED_MULT1_OFF,
2465 DMA_CDB_SG_DST1, scf[0]); 2465 DMA_CDB_SG_DST1, scf[0]);
2466 ppc440spe_desc_set_byte_count(iter, ppc440spe_chan, len); 2466 ppc440spe_desc_set_byte_count(iter, ppc440spe_chan, len);
2467 iter->unmap_len = len; 2467 iter->unmap_len = len;
2468 sw_desc->async_tx.flags = flags; 2468 sw_desc->async_tx.flags = flags;
2469 } 2469 }
2470 2470
2471 spin_unlock_bh(&ppc440spe_chan->lock); 2471 spin_unlock_bh(&ppc440spe_chan->lock);
2472 2472
2473 return sw_desc; 2473 return sw_desc;
2474 } 2474 }
2475 2475
2476 /** 2476 /**
2477 * ppc440spe_dma01_prep_sum_product - 2477 * ppc440spe_dma01_prep_sum_product -
2478 * Dx = A*(P+Pxy) + B*(Q+Qxy) operation where destination is also 2478 * Dx = A*(P+Pxy) + B*(Q+Qxy) operation where destination is also
2479 * the source. 2479 * the source.
2480 */ 2480 */
2481 static struct ppc440spe_adma_desc_slot *ppc440spe_dma01_prep_sum_product( 2481 static struct ppc440spe_adma_desc_slot *ppc440spe_dma01_prep_sum_product(
2482 struct ppc440spe_adma_chan *ppc440spe_chan, 2482 struct ppc440spe_adma_chan *ppc440spe_chan,
2483 dma_addr_t *dst, dma_addr_t *src, int src_cnt, 2483 dma_addr_t *dst, dma_addr_t *src, int src_cnt,
2484 const unsigned char *scf, size_t len, unsigned long flags) 2484 const unsigned char *scf, size_t len, unsigned long flags)
2485 { 2485 {
2486 struct ppc440spe_adma_desc_slot *sw_desc = NULL; 2486 struct ppc440spe_adma_desc_slot *sw_desc = NULL;
2487 unsigned long op = 0; 2487 unsigned long op = 0;
2488 int slot_cnt; 2488 int slot_cnt;
2489 2489
2490 set_bit(PPC440SPE_DESC_WXOR, &op); 2490 set_bit(PPC440SPE_DESC_WXOR, &op);
2491 slot_cnt = 3; 2491 slot_cnt = 3;
2492 2492
2493 spin_lock_bh(&ppc440spe_chan->lock); 2493 spin_lock_bh(&ppc440spe_chan->lock);
2494 2494
2495 /* WXOR, each descriptor occupies one slot */ 2495 /* WXOR, each descriptor occupies one slot */
2496 sw_desc = ppc440spe_adma_alloc_slots(ppc440spe_chan, slot_cnt, 1); 2496 sw_desc = ppc440spe_adma_alloc_slots(ppc440spe_chan, slot_cnt, 1);
2497 if (sw_desc) { 2497 if (sw_desc) {
2498 struct ppc440spe_adma_chan *chan; 2498 struct ppc440spe_adma_chan *chan;
2499 struct ppc440spe_adma_desc_slot *iter; 2499 struct ppc440spe_adma_desc_slot *iter;
2500 struct dma_cdb *hw_desc; 2500 struct dma_cdb *hw_desc;
2501 2501
2502 chan = to_ppc440spe_adma_chan(sw_desc->async_tx.chan); 2502 chan = to_ppc440spe_adma_chan(sw_desc->async_tx.chan);
2503 set_bits(op, &sw_desc->flags); 2503 set_bits(op, &sw_desc->flags);
2504 sw_desc->src_cnt = src_cnt; 2504 sw_desc->src_cnt = src_cnt;
2505 sw_desc->dst_cnt = 1; 2505 sw_desc->dst_cnt = 1;
2506 /* 1st descriptor, src[1] data to q page and zero destination */ 2506 /* 1st descriptor, src[1] data to q page and zero destination */
2507 iter = list_first_entry(&sw_desc->group_list, 2507 iter = list_first_entry(&sw_desc->group_list,
2508 struct ppc440spe_adma_desc_slot, 2508 struct ppc440spe_adma_desc_slot,
2509 chain_node); 2509 chain_node);
2510 memset(iter->hw_desc, 0, sizeof(struct dma_cdb)); 2510 memset(iter->hw_desc, 0, sizeof(struct dma_cdb));
2511 iter->hw_next = list_entry(iter->chain_node.next, 2511 iter->hw_next = list_entry(iter->chain_node.next,
2512 struct ppc440spe_adma_desc_slot, 2512 struct ppc440spe_adma_desc_slot,
2513 chain_node); 2513 chain_node);
2514 clear_bit(PPC440SPE_DESC_INT, &iter->flags); 2514 clear_bit(PPC440SPE_DESC_INT, &iter->flags);
2515 hw_desc = iter->hw_desc; 2515 hw_desc = iter->hw_desc;
2516 hw_desc->opc = DMA_CDB_OPC_MULTICAST; 2516 hw_desc->opc = DMA_CDB_OPC_MULTICAST;
2517 2517
2518 ppc440spe_desc_set_dest_addr(iter, chan, DMA_CUED_XOR_BASE, 2518 ppc440spe_desc_set_dest_addr(iter, chan, DMA_CUED_XOR_BASE,
2519 *dst, 0); 2519 *dst, 0);
2520 ppc440spe_desc_set_dest_addr(iter, chan, 0, 2520 ppc440spe_desc_set_dest_addr(iter, chan, 0,
2521 ppc440spe_chan->qdest, 1); 2521 ppc440spe_chan->qdest, 1);
2522 ppc440spe_desc_set_src_addr(iter, chan, 0, DMA_CUED_XOR_HB, 2522 ppc440spe_desc_set_src_addr(iter, chan, 0, DMA_CUED_XOR_HB,
2523 src[1]); 2523 src[1]);
2524 ppc440spe_desc_set_byte_count(iter, ppc440spe_chan, len); 2524 ppc440spe_desc_set_byte_count(iter, ppc440spe_chan, len);
2525 iter->unmap_len = len; 2525 iter->unmap_len = len;
2526 2526
2527 /* 2nd descriptor, multiply src[1] data and store the 2527 /* 2nd descriptor, multiply src[1] data and store the
2528 * result in destination */ 2528 * result in destination */
2529 iter = list_first_entry(&iter->chain_node, 2529 iter = list_first_entry(&iter->chain_node,
2530 struct ppc440spe_adma_desc_slot, 2530 struct ppc440spe_adma_desc_slot,
2531 chain_node); 2531 chain_node);
2532 memset(iter->hw_desc, 0, sizeof(struct dma_cdb)); 2532 memset(iter->hw_desc, 0, sizeof(struct dma_cdb));
2533 /* set 'next' pointer */ 2533 /* set 'next' pointer */
2534 iter->hw_next = list_entry(iter->chain_node.next, 2534 iter->hw_next = list_entry(iter->chain_node.next,
2535 struct ppc440spe_adma_desc_slot, 2535 struct ppc440spe_adma_desc_slot,
2536 chain_node); 2536 chain_node);
2537 if (flags & DMA_PREP_INTERRUPT) 2537 if (flags & DMA_PREP_INTERRUPT)
2538 set_bit(PPC440SPE_DESC_INT, &iter->flags); 2538 set_bit(PPC440SPE_DESC_INT, &iter->flags);
2539 else 2539 else
2540 clear_bit(PPC440SPE_DESC_INT, &iter->flags); 2540 clear_bit(PPC440SPE_DESC_INT, &iter->flags);
2541 2541
2542 hw_desc = iter->hw_desc; 2542 hw_desc = iter->hw_desc;
2543 hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2; 2543 hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2;
2544 ppc440spe_desc_set_src_addr(iter, chan, 0, DMA_CUED_XOR_HB, 2544 ppc440spe_desc_set_src_addr(iter, chan, 0, DMA_CUED_XOR_HB,
2545 ppc440spe_chan->qdest); 2545 ppc440spe_chan->qdest);
2546 ppc440spe_desc_set_dest_addr(iter, chan, DMA_CUED_XOR_BASE, 2546 ppc440spe_desc_set_dest_addr(iter, chan, DMA_CUED_XOR_BASE,
2547 *dst, 0); 2547 *dst, 0);
2548 ppc440spe_desc_set_src_mult(iter, chan, DMA_CUED_MULT1_OFF, 2548 ppc440spe_desc_set_src_mult(iter, chan, DMA_CUED_MULT1_OFF,
2549 DMA_CDB_SG_DST1, scf[1]); 2549 DMA_CDB_SG_DST1, scf[1]);
2550 ppc440spe_desc_set_byte_count(iter, ppc440spe_chan, len); 2550 ppc440spe_desc_set_byte_count(iter, ppc440spe_chan, len);
2551 iter->unmap_len = len; 2551 iter->unmap_len = len;
2552 2552
2553 /* 2553 /*
2554 * 3rd descriptor, multiply src[0] data and xor it 2554 * 3rd descriptor, multiply src[0] data and xor it
2555 * with destination 2555 * with destination
2556 */ 2556 */
2557 iter = list_first_entry(&iter->chain_node, 2557 iter = list_first_entry(&iter->chain_node,
2558 struct ppc440spe_adma_desc_slot, 2558 struct ppc440spe_adma_desc_slot,
2559 chain_node); 2559 chain_node);
2560 memset(iter->hw_desc, 0, sizeof(struct dma_cdb)); 2560 memset(iter->hw_desc, 0, sizeof(struct dma_cdb));
2561 iter->hw_next = NULL; 2561 iter->hw_next = NULL;
2562 if (flags & DMA_PREP_INTERRUPT) 2562 if (flags & DMA_PREP_INTERRUPT)
2563 set_bit(PPC440SPE_DESC_INT, &iter->flags); 2563 set_bit(PPC440SPE_DESC_INT, &iter->flags);
2564 else 2564 else
2565 clear_bit(PPC440SPE_DESC_INT, &iter->flags); 2565 clear_bit(PPC440SPE_DESC_INT, &iter->flags);
2566 2566
2567 hw_desc = iter->hw_desc; 2567 hw_desc = iter->hw_desc;
2568 hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2; 2568 hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2;
2569 ppc440spe_desc_set_src_addr(iter, chan, 0, DMA_CUED_XOR_HB, 2569 ppc440spe_desc_set_src_addr(iter, chan, 0, DMA_CUED_XOR_HB,
2570 src[0]); 2570 src[0]);
2571 ppc440spe_desc_set_dest_addr(iter, chan, DMA_CUED_XOR_BASE, 2571 ppc440spe_desc_set_dest_addr(iter, chan, DMA_CUED_XOR_BASE,
2572 *dst, 0); 2572 *dst, 0);
2573 ppc440spe_desc_set_src_mult(iter, chan, DMA_CUED_MULT1_OFF, 2573 ppc440spe_desc_set_src_mult(iter, chan, DMA_CUED_MULT1_OFF,
2574 DMA_CDB_SG_DST1, scf[0]); 2574 DMA_CDB_SG_DST1, scf[0]);
2575 ppc440spe_desc_set_byte_count(iter, ppc440spe_chan, len); 2575 ppc440spe_desc_set_byte_count(iter, ppc440spe_chan, len);
2576 iter->unmap_len = len; 2576 iter->unmap_len = len;
2577 sw_desc->async_tx.flags = flags; 2577 sw_desc->async_tx.flags = flags;
2578 } 2578 }
2579 2579
2580 spin_unlock_bh(&ppc440spe_chan->lock); 2580 spin_unlock_bh(&ppc440spe_chan->lock);
2581 2581
2582 return sw_desc; 2582 return sw_desc;
2583 } 2583 }
2584 2584
2585 static struct ppc440spe_adma_desc_slot *ppc440spe_dma01_prep_pq( 2585 static struct ppc440spe_adma_desc_slot *ppc440spe_dma01_prep_pq(
2586 struct ppc440spe_adma_chan *ppc440spe_chan, 2586 struct ppc440spe_adma_chan *ppc440spe_chan,
2587 dma_addr_t *dst, int dst_cnt, dma_addr_t *src, int src_cnt, 2587 dma_addr_t *dst, int dst_cnt, dma_addr_t *src, int src_cnt,
2588 const unsigned char *scf, size_t len, unsigned long flags) 2588 const unsigned char *scf, size_t len, unsigned long flags)
2589 { 2589 {
2590 int slot_cnt; 2590 int slot_cnt;
2591 struct ppc440spe_adma_desc_slot *sw_desc = NULL, *iter; 2591 struct ppc440spe_adma_desc_slot *sw_desc = NULL, *iter;
2592 unsigned long op = 0; 2592 unsigned long op = 0;
2593 unsigned char mult = 1; 2593 unsigned char mult = 1;
2594 2594
2595 pr_debug("%s: dst_cnt %d, src_cnt %d, len %d\n", 2595 pr_debug("%s: dst_cnt %d, src_cnt %d, len %d\n",
2596 __func__, dst_cnt, src_cnt, len); 2596 __func__, dst_cnt, src_cnt, len);
2597 /* select operations WXOR/RXOR depending on the 2597 /* select operations WXOR/RXOR depending on the
2598 * source addresses of operators and the number 2598 * source addresses of operators and the number
2599 * of destinations (RXOR support only Q-parity calculations) 2599 * of destinations (RXOR support only Q-parity calculations)
2600 */ 2600 */
2601 set_bit(PPC440SPE_DESC_WXOR, &op); 2601 set_bit(PPC440SPE_DESC_WXOR, &op);
2602 if (!test_and_set_bit(PPC440SPE_RXOR_RUN, &ppc440spe_rxor_state)) { 2602 if (!test_and_set_bit(PPC440SPE_RXOR_RUN, &ppc440spe_rxor_state)) {
2603 /* no active RXOR; 2603 /* no active RXOR;
2604 * do RXOR if: 2604 * do RXOR if:
2605 * - there are more than 1 source, 2605 * - there are more than 1 source,
2606 * - len is aligned on 512-byte boundary, 2606 * - len is aligned on 512-byte boundary,
2607 * - source addresses fit to one of 4 possible regions. 2607 * - source addresses fit to one of 4 possible regions.
2608 */ 2608 */
2609 if (src_cnt > 1 && 2609 if (src_cnt > 1 &&
2610 !(len & MQ0_CF2H_RXOR_BS_MASK) && 2610 !(len & MQ0_CF2H_RXOR_BS_MASK) &&
2611 (src[0] + len) == src[1]) { 2611 (src[0] + len) == src[1]) {
2612 /* may do RXOR R1 R2 */ 2612 /* may do RXOR R1 R2 */
2613 set_bit(PPC440SPE_DESC_RXOR, &op); 2613 set_bit(PPC440SPE_DESC_RXOR, &op);
2614 if (src_cnt != 2) { 2614 if (src_cnt != 2) {
2615 /* may try to enhance region of RXOR */ 2615 /* may try to enhance region of RXOR */
2616 if ((src[1] + len) == src[2]) { 2616 if ((src[1] + len) == src[2]) {
2617 /* do RXOR R1 R2 R3 */ 2617 /* do RXOR R1 R2 R3 */
2618 set_bit(PPC440SPE_DESC_RXOR123, 2618 set_bit(PPC440SPE_DESC_RXOR123,
2619 &op); 2619 &op);
2620 } else if ((src[1] + len * 2) == src[2]) { 2620 } else if ((src[1] + len * 2) == src[2]) {
2621 /* do RXOR R1 R2 R4 */ 2621 /* do RXOR R1 R2 R4 */
2622 set_bit(PPC440SPE_DESC_RXOR124, &op); 2622 set_bit(PPC440SPE_DESC_RXOR124, &op);
2623 } else if ((src[1] + len * 3) == src[2]) { 2623 } else if ((src[1] + len * 3) == src[2]) {
2624 /* do RXOR R1 R2 R5 */ 2624 /* do RXOR R1 R2 R5 */
2625 set_bit(PPC440SPE_DESC_RXOR125, 2625 set_bit(PPC440SPE_DESC_RXOR125,
2626 &op); 2626 &op);
2627 } else { 2627 } else {
2628 /* do RXOR R1 R2 */ 2628 /* do RXOR R1 R2 */
2629 set_bit(PPC440SPE_DESC_RXOR12, 2629 set_bit(PPC440SPE_DESC_RXOR12,
2630 &op); 2630 &op);
2631 } 2631 }
2632 } else { 2632 } else {
2633 /* do RXOR R1 R2 */ 2633 /* do RXOR R1 R2 */
2634 set_bit(PPC440SPE_DESC_RXOR12, &op); 2634 set_bit(PPC440SPE_DESC_RXOR12, &op);
2635 } 2635 }
2636 } 2636 }
2637 2637
2638 if (!test_bit(PPC440SPE_DESC_RXOR, &op)) { 2638 if (!test_bit(PPC440SPE_DESC_RXOR, &op)) {
2639 /* can not do this operation with RXOR */ 2639 /* can not do this operation with RXOR */
2640 clear_bit(PPC440SPE_RXOR_RUN, 2640 clear_bit(PPC440SPE_RXOR_RUN,
2641 &ppc440spe_rxor_state); 2641 &ppc440spe_rxor_state);
2642 } else { 2642 } else {
2643 /* can do; set block size right now */ 2643 /* can do; set block size right now */
2644 ppc440spe_desc_set_rxor_block_size(len); 2644 ppc440spe_desc_set_rxor_block_size(len);
2645 } 2645 }
2646 } 2646 }
2647 2647
2648 /* Number of necessary slots depends on operation type selected */ 2648 /* Number of necessary slots depends on operation type selected */
2649 if (!test_bit(PPC440SPE_DESC_RXOR, &op)) { 2649 if (!test_bit(PPC440SPE_DESC_RXOR, &op)) {
2650 /* This is a WXOR only chain. Need descriptors for each 2650 /* This is a WXOR only chain. Need descriptors for each
2651 * source to GF-XOR them with WXOR, and need descriptors 2651 * source to GF-XOR them with WXOR, and need descriptors
2652 * for each destination to zero them with WXOR 2652 * for each destination to zero them with WXOR
2653 */ 2653 */
2654 slot_cnt = src_cnt; 2654 slot_cnt = src_cnt;
2655 2655
2656 if (flags & DMA_PREP_ZERO_P) { 2656 if (flags & DMA_PREP_ZERO_P) {
2657 slot_cnt++; 2657 slot_cnt++;
2658 set_bit(PPC440SPE_ZERO_P, &op); 2658 set_bit(PPC440SPE_ZERO_P, &op);
2659 } 2659 }
2660 if (flags & DMA_PREP_ZERO_Q) { 2660 if (flags & DMA_PREP_ZERO_Q) {
2661 slot_cnt++; 2661 slot_cnt++;
2662 set_bit(PPC440SPE_ZERO_Q, &op); 2662 set_bit(PPC440SPE_ZERO_Q, &op);
2663 } 2663 }
2664 } else { 2664 } else {
2665 /* Need 1/2 descriptor for RXOR operation, and 2665 /* Need 1/2 descriptor for RXOR operation, and
2666 * need (src_cnt - (2 or 3)) for WXOR of sources 2666 * need (src_cnt - (2 or 3)) for WXOR of sources
2667 * remained (if any) 2667 * remained (if any)
2668 */ 2668 */
2669 slot_cnt = dst_cnt; 2669 slot_cnt = dst_cnt;
2670 2670
2671 if (flags & DMA_PREP_ZERO_P) 2671 if (flags & DMA_PREP_ZERO_P)
2672 set_bit(PPC440SPE_ZERO_P, &op); 2672 set_bit(PPC440SPE_ZERO_P, &op);
2673 if (flags & DMA_PREP_ZERO_Q) 2673 if (flags & DMA_PREP_ZERO_Q)
2674 set_bit(PPC440SPE_ZERO_Q, &op); 2674 set_bit(PPC440SPE_ZERO_Q, &op);
2675 2675
2676 if (test_bit(PPC440SPE_DESC_RXOR12, &op)) 2676 if (test_bit(PPC440SPE_DESC_RXOR12, &op))
2677 slot_cnt += src_cnt - 2; 2677 slot_cnt += src_cnt - 2;
2678 else 2678 else
2679 slot_cnt += src_cnt - 3; 2679 slot_cnt += src_cnt - 3;
2680 2680
2681 /* Thus we have either RXOR only chain or 2681 /* Thus we have either RXOR only chain or
2682 * mixed RXOR/WXOR 2682 * mixed RXOR/WXOR
2683 */ 2683 */
2684 if (slot_cnt == dst_cnt) 2684 if (slot_cnt == dst_cnt)
2685 /* RXOR only chain */ 2685 /* RXOR only chain */
2686 clear_bit(PPC440SPE_DESC_WXOR, &op); 2686 clear_bit(PPC440SPE_DESC_WXOR, &op);
2687 } 2687 }
2688 2688
2689 spin_lock_bh(&ppc440spe_chan->lock); 2689 spin_lock_bh(&ppc440spe_chan->lock);
2690 /* for both RXOR/WXOR each descriptor occupies one slot */ 2690 /* for both RXOR/WXOR each descriptor occupies one slot */
2691 sw_desc = ppc440spe_adma_alloc_slots(ppc440spe_chan, slot_cnt, 1); 2691 sw_desc = ppc440spe_adma_alloc_slots(ppc440spe_chan, slot_cnt, 1);
2692 if (sw_desc) { 2692 if (sw_desc) {
2693 ppc440spe_desc_init_dma01pq(sw_desc, dst_cnt, src_cnt, 2693 ppc440spe_desc_init_dma01pq(sw_desc, dst_cnt, src_cnt,
2694 flags, op); 2694 flags, op);
2695 2695
2696 /* setup dst/src/mult */ 2696 /* setup dst/src/mult */
2697 pr_debug("%s: set dst descriptor 0, 1: 0x%016llx, 0x%016llx\n", 2697 pr_debug("%s: set dst descriptor 0, 1: 0x%016llx, 0x%016llx\n",
2698 __func__, dst[0], dst[1]); 2698 __func__, dst[0], dst[1]);
2699 ppc440spe_adma_pq_set_dest(sw_desc, dst, flags); 2699 ppc440spe_adma_pq_set_dest(sw_desc, dst, flags);
2700 while (src_cnt--) { 2700 while (src_cnt--) {
2701 ppc440spe_adma_pq_set_src(sw_desc, src[src_cnt], 2701 ppc440spe_adma_pq_set_src(sw_desc, src[src_cnt],
2702 src_cnt); 2702 src_cnt);
2703 2703
2704 /* NOTE: "Multi = 0 is equivalent to = 1" as it 2704 /* NOTE: "Multi = 0 is equivalent to = 1" as it
2705 * stated in 440SPSPe_RAID6_Addendum_UM_1_17.pdf 2705 * stated in 440SPSPe_RAID6_Addendum_UM_1_17.pdf
2706 * doesn't work for RXOR with DMA0/1! Instead, multi=0 2706 * doesn't work for RXOR with DMA0/1! Instead, multi=0
2707 * leads to zeroing source data after RXOR. 2707 * leads to zeroing source data after RXOR.
2708 * So, for P case set-up mult=1 explicitly. 2708 * So, for P case set-up mult=1 explicitly.
2709 */ 2709 */
2710 if (!(flags & DMA_PREP_PQ_DISABLE_Q)) 2710 if (!(flags & DMA_PREP_PQ_DISABLE_Q))
2711 mult = scf[src_cnt]; 2711 mult = scf[src_cnt];
2712 ppc440spe_adma_pq_set_src_mult(sw_desc, 2712 ppc440spe_adma_pq_set_src_mult(sw_desc,
2713 mult, src_cnt, dst_cnt - 1); 2713 mult, src_cnt, dst_cnt - 1);
2714 } 2714 }
2715 2715
2716 /* Setup byte count foreach slot just allocated */ 2716 /* Setup byte count foreach slot just allocated */
2717 sw_desc->async_tx.flags = flags; 2717 sw_desc->async_tx.flags = flags;
2718 list_for_each_entry(iter, &sw_desc->group_list, 2718 list_for_each_entry(iter, &sw_desc->group_list,
2719 chain_node) { 2719 chain_node) {
2720 ppc440spe_desc_set_byte_count(iter, 2720 ppc440spe_desc_set_byte_count(iter,
2721 ppc440spe_chan, len); 2721 ppc440spe_chan, len);
2722 iter->unmap_len = len; 2722 iter->unmap_len = len;
2723 } 2723 }
2724 } 2724 }
2725 spin_unlock_bh(&ppc440spe_chan->lock); 2725 spin_unlock_bh(&ppc440spe_chan->lock);
2726 2726
2727 return sw_desc; 2727 return sw_desc;
2728 } 2728 }
2729 2729
2730 static struct ppc440spe_adma_desc_slot *ppc440spe_dma2_prep_pq( 2730 static struct ppc440spe_adma_desc_slot *ppc440spe_dma2_prep_pq(
2731 struct ppc440spe_adma_chan *ppc440spe_chan, 2731 struct ppc440spe_adma_chan *ppc440spe_chan,
2732 dma_addr_t *dst, int dst_cnt, dma_addr_t *src, int src_cnt, 2732 dma_addr_t *dst, int dst_cnt, dma_addr_t *src, int src_cnt,
2733 const unsigned char *scf, size_t len, unsigned long flags) 2733 const unsigned char *scf, size_t len, unsigned long flags)
2734 { 2734 {
2735 int slot_cnt, descs_per_op; 2735 int slot_cnt, descs_per_op;
2736 struct ppc440spe_adma_desc_slot *sw_desc = NULL, *iter; 2736 struct ppc440spe_adma_desc_slot *sw_desc = NULL, *iter;
2737 unsigned long op = 0; 2737 unsigned long op = 0;
2738 unsigned char mult = 1; 2738 unsigned char mult = 1;
2739 2739
2740 BUG_ON(!dst_cnt); 2740 BUG_ON(!dst_cnt);
2741 /*pr_debug("%s: dst_cnt %d, src_cnt %d, len %d\n", 2741 /*pr_debug("%s: dst_cnt %d, src_cnt %d, len %d\n",
2742 __func__, dst_cnt, src_cnt, len);*/ 2742 __func__, dst_cnt, src_cnt, len);*/
2743 2743
2744 spin_lock_bh(&ppc440spe_chan->lock); 2744 spin_lock_bh(&ppc440spe_chan->lock);
2745 descs_per_op = ppc440spe_dma2_pq_slot_count(src, src_cnt, len); 2745 descs_per_op = ppc440spe_dma2_pq_slot_count(src, src_cnt, len);
2746 if (descs_per_op < 0) { 2746 if (descs_per_op < 0) {
2747 spin_unlock_bh(&ppc440spe_chan->lock); 2747 spin_unlock_bh(&ppc440spe_chan->lock);
2748 return NULL; 2748 return NULL;
2749 } 2749 }
2750 2750
2751 /* depending on number of sources we have 1 or 2 RXOR chains */ 2751 /* depending on number of sources we have 1 or 2 RXOR chains */
2752 slot_cnt = descs_per_op * dst_cnt; 2752 slot_cnt = descs_per_op * dst_cnt;
2753 2753
2754 sw_desc = ppc440spe_adma_alloc_slots(ppc440spe_chan, slot_cnt, 1); 2754 sw_desc = ppc440spe_adma_alloc_slots(ppc440spe_chan, slot_cnt, 1);
2755 if (sw_desc) { 2755 if (sw_desc) {
2756 op = slot_cnt; 2756 op = slot_cnt;
2757 sw_desc->async_tx.flags = flags; 2757 sw_desc->async_tx.flags = flags;
2758 list_for_each_entry(iter, &sw_desc->group_list, chain_node) { 2758 list_for_each_entry(iter, &sw_desc->group_list, chain_node) {
2759 ppc440spe_desc_init_dma2pq(iter, dst_cnt, src_cnt, 2759 ppc440spe_desc_init_dma2pq(iter, dst_cnt, src_cnt,
2760 --op ? 0 : flags); 2760 --op ? 0 : flags);
2761 ppc440spe_desc_set_byte_count(iter, ppc440spe_chan, 2761 ppc440spe_desc_set_byte_count(iter, ppc440spe_chan,
2762 len); 2762 len);
2763 iter->unmap_len = len; 2763 iter->unmap_len = len;
2764 2764
2765 ppc440spe_init_rxor_cursor(&(iter->rxor_cursor)); 2765 ppc440spe_init_rxor_cursor(&(iter->rxor_cursor));
2766 iter->rxor_cursor.len = len; 2766 iter->rxor_cursor.len = len;
2767 iter->descs_per_op = descs_per_op; 2767 iter->descs_per_op = descs_per_op;
2768 } 2768 }
2769 op = 0; 2769 op = 0;
2770 list_for_each_entry(iter, &sw_desc->group_list, chain_node) { 2770 list_for_each_entry(iter, &sw_desc->group_list, chain_node) {
2771 op++; 2771 op++;
2772 if (op % descs_per_op == 0) 2772 if (op % descs_per_op == 0)
2773 ppc440spe_adma_init_dma2rxor_slot(iter, src, 2773 ppc440spe_adma_init_dma2rxor_slot(iter, src,
2774 src_cnt); 2774 src_cnt);
2775 if (likely(!list_is_last(&iter->chain_node, 2775 if (likely(!list_is_last(&iter->chain_node,
2776 &sw_desc->group_list))) { 2776 &sw_desc->group_list))) {
2777 /* set 'next' pointer */ 2777 /* set 'next' pointer */
2778 iter->hw_next = 2778 iter->hw_next =
2779 list_entry(iter->chain_node.next, 2779 list_entry(iter->chain_node.next,
2780 struct ppc440spe_adma_desc_slot, 2780 struct ppc440spe_adma_desc_slot,
2781 chain_node); 2781 chain_node);
2782 ppc440spe_xor_set_link(iter, iter->hw_next); 2782 ppc440spe_xor_set_link(iter, iter->hw_next);
2783 } else { 2783 } else {
2784 /* this is the last descriptor. */ 2784 /* this is the last descriptor. */
2785 iter->hw_next = NULL; 2785 iter->hw_next = NULL;
2786 } 2786 }
2787 } 2787 }
2788 2788
2789 /* fixup head descriptor */ 2789 /* fixup head descriptor */
2790 sw_desc->dst_cnt = dst_cnt; 2790 sw_desc->dst_cnt = dst_cnt;
2791 if (flags & DMA_PREP_ZERO_P) 2791 if (flags & DMA_PREP_ZERO_P)
2792 set_bit(PPC440SPE_ZERO_P, &sw_desc->flags); 2792 set_bit(PPC440SPE_ZERO_P, &sw_desc->flags);
2793 if (flags & DMA_PREP_ZERO_Q) 2793 if (flags & DMA_PREP_ZERO_Q)
2794 set_bit(PPC440SPE_ZERO_Q, &sw_desc->flags); 2794 set_bit(PPC440SPE_ZERO_Q, &sw_desc->flags);
2795 2795
2796 /* setup dst/src/mult */ 2796 /* setup dst/src/mult */
2797 ppc440spe_adma_pq_set_dest(sw_desc, dst, flags); 2797 ppc440spe_adma_pq_set_dest(sw_desc, dst, flags);
2798 2798
2799 while (src_cnt--) { 2799 while (src_cnt--) {
2800 /* handle descriptors (if dst_cnt == 2) inside 2800 /* handle descriptors (if dst_cnt == 2) inside
2801 * the ppc440spe_adma_pq_set_srcxxx() functions 2801 * the ppc440spe_adma_pq_set_srcxxx() functions
2802 */ 2802 */
2803 ppc440spe_adma_pq_set_src(sw_desc, src[src_cnt], 2803 ppc440spe_adma_pq_set_src(sw_desc, src[src_cnt],
2804 src_cnt); 2804 src_cnt);
2805 if (!(flags & DMA_PREP_PQ_DISABLE_Q)) 2805 if (!(flags & DMA_PREP_PQ_DISABLE_Q))
2806 mult = scf[src_cnt]; 2806 mult = scf[src_cnt];
2807 ppc440spe_adma_pq_set_src_mult(sw_desc, 2807 ppc440spe_adma_pq_set_src_mult(sw_desc,
2808 mult, src_cnt, dst_cnt - 1); 2808 mult, src_cnt, dst_cnt - 1);
2809 } 2809 }
2810 } 2810 }
2811 spin_unlock_bh(&ppc440spe_chan->lock); 2811 spin_unlock_bh(&ppc440spe_chan->lock);
2812 ppc440spe_desc_set_rxor_block_size(len); 2812 ppc440spe_desc_set_rxor_block_size(len);
2813 return sw_desc; 2813 return sw_desc;
2814 } 2814 }
2815 2815
2816 /** 2816 /**
2817 * ppc440spe_adma_prep_dma_pq - prepare CDB (group) for a GF-XOR operation 2817 * ppc440spe_adma_prep_dma_pq - prepare CDB (group) for a GF-XOR operation
2818 */ 2818 */
2819 static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_pq( 2819 static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_pq(
2820 struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src, 2820 struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
2821 unsigned int src_cnt, const unsigned char *scf, 2821 unsigned int src_cnt, const unsigned char *scf,
2822 size_t len, unsigned long flags) 2822 size_t len, unsigned long flags)
2823 { 2823 {
2824 struct ppc440spe_adma_chan *ppc440spe_chan; 2824 struct ppc440spe_adma_chan *ppc440spe_chan;
2825 struct ppc440spe_adma_desc_slot *sw_desc = NULL; 2825 struct ppc440spe_adma_desc_slot *sw_desc = NULL;
2826 int dst_cnt = 0; 2826 int dst_cnt = 0;
2827 2827
2828 ppc440spe_chan = to_ppc440spe_adma_chan(chan); 2828 ppc440spe_chan = to_ppc440spe_adma_chan(chan);
2829 2829
2830 ADMA_LL_DBG(prep_dma_pq_dbg(ppc440spe_chan->device->id, 2830 ADMA_LL_DBG(prep_dma_pq_dbg(ppc440spe_chan->device->id,
2831 dst, src, src_cnt)); 2831 dst, src, src_cnt));
2832 BUG_ON(!len); 2832 BUG_ON(!len);
2833 BUG_ON(len > PPC440SPE_ADMA_XOR_MAX_BYTE_COUNT); 2833 BUG_ON(len > PPC440SPE_ADMA_XOR_MAX_BYTE_COUNT);
2834 BUG_ON(!src_cnt); 2834 BUG_ON(!src_cnt);
2835 2835
2836 if (src_cnt == 1 && dst[1] == src[0]) { 2836 if (src_cnt == 1 && dst[1] == src[0]) {
2837 dma_addr_t dest[2]; 2837 dma_addr_t dest[2];
2838 2838
2839 /* dst[1] is real destination (Q) */ 2839 /* dst[1] is real destination (Q) */
2840 dest[0] = dst[1]; 2840 dest[0] = dst[1];
2841 /* this is the page to multicast source data to */ 2841 /* this is the page to multicast source data to */
2842 dest[1] = ppc440spe_chan->qdest; 2842 dest[1] = ppc440spe_chan->qdest;
2843 sw_desc = ppc440spe_dma01_prep_mult(ppc440spe_chan, 2843 sw_desc = ppc440spe_dma01_prep_mult(ppc440spe_chan,
2844 dest, 2, src, src_cnt, scf, len, flags); 2844 dest, 2, src, src_cnt, scf, len, flags);
2845 return sw_desc ? &sw_desc->async_tx : NULL; 2845 return sw_desc ? &sw_desc->async_tx : NULL;
2846 } 2846 }
2847 2847
2848 if (src_cnt == 2 && dst[1] == src[1]) { 2848 if (src_cnt == 2 && dst[1] == src[1]) {
2849 sw_desc = ppc440spe_dma01_prep_sum_product(ppc440spe_chan, 2849 sw_desc = ppc440spe_dma01_prep_sum_product(ppc440spe_chan,
2850 &dst[1], src, 2, scf, len, flags); 2850 &dst[1], src, 2, scf, len, flags);
2851 return sw_desc ? &sw_desc->async_tx : NULL; 2851 return sw_desc ? &sw_desc->async_tx : NULL;
2852 } 2852 }
2853 2853
2854 if (!(flags & DMA_PREP_PQ_DISABLE_P)) { 2854 if (!(flags & DMA_PREP_PQ_DISABLE_P)) {
2855 BUG_ON(!dst[0]); 2855 BUG_ON(!dst[0]);
2856 dst_cnt++; 2856 dst_cnt++;
2857 flags |= DMA_PREP_ZERO_P; 2857 flags |= DMA_PREP_ZERO_P;
2858 } 2858 }
2859 2859
2860 if (!(flags & DMA_PREP_PQ_DISABLE_Q)) { 2860 if (!(flags & DMA_PREP_PQ_DISABLE_Q)) {
2861 BUG_ON(!dst[1]); 2861 BUG_ON(!dst[1]);
2862 dst_cnt++; 2862 dst_cnt++;
2863 flags |= DMA_PREP_ZERO_Q; 2863 flags |= DMA_PREP_ZERO_Q;
2864 } 2864 }
2865 2865
2866 BUG_ON(!dst_cnt); 2866 BUG_ON(!dst_cnt);
2867 2867
2868 dev_dbg(ppc440spe_chan->device->common.dev, 2868 dev_dbg(ppc440spe_chan->device->common.dev,
2869 "ppc440spe adma%d: %s src_cnt: %d len: %u int_en: %d\n", 2869 "ppc440spe adma%d: %s src_cnt: %d len: %u int_en: %d\n",
2870 ppc440spe_chan->device->id, __func__, src_cnt, len, 2870 ppc440spe_chan->device->id, __func__, src_cnt, len,
2871 flags & DMA_PREP_INTERRUPT ? 1 : 0); 2871 flags & DMA_PREP_INTERRUPT ? 1 : 0);
2872 2872
2873 switch (ppc440spe_chan->device->id) { 2873 switch (ppc440spe_chan->device->id) {
2874 case PPC440SPE_DMA0_ID: 2874 case PPC440SPE_DMA0_ID:
2875 case PPC440SPE_DMA1_ID: 2875 case PPC440SPE_DMA1_ID:
2876 sw_desc = ppc440spe_dma01_prep_pq(ppc440spe_chan, 2876 sw_desc = ppc440spe_dma01_prep_pq(ppc440spe_chan,
2877 dst, dst_cnt, src, src_cnt, scf, 2877 dst, dst_cnt, src, src_cnt, scf,
2878 len, flags); 2878 len, flags);
2879 break; 2879 break;
2880 2880
2881 case PPC440SPE_XOR_ID: 2881 case PPC440SPE_XOR_ID:
2882 sw_desc = ppc440spe_dma2_prep_pq(ppc440spe_chan, 2882 sw_desc = ppc440spe_dma2_prep_pq(ppc440spe_chan,
2883 dst, dst_cnt, src, src_cnt, scf, 2883 dst, dst_cnt, src, src_cnt, scf,
2884 len, flags); 2884 len, flags);
2885 break; 2885 break;
2886 } 2886 }
2887 2887
2888 return sw_desc ? &sw_desc->async_tx : NULL; 2888 return sw_desc ? &sw_desc->async_tx : NULL;
2889 } 2889 }
2890 2890
2891 /** 2891 /**
2892 * ppc440spe_adma_prep_dma_pqzero_sum - prepare CDB group for 2892 * ppc440spe_adma_prep_dma_pqzero_sum - prepare CDB group for
2893 * a PQ_ZERO_SUM operation 2893 * a PQ_ZERO_SUM operation
2894 */ 2894 */
2895 static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_pqzero_sum( 2895 static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_pqzero_sum(
2896 struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src, 2896 struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
2897 unsigned int src_cnt, const unsigned char *scf, size_t len, 2897 unsigned int src_cnt, const unsigned char *scf, size_t len,
2898 enum sum_check_flags *pqres, unsigned long flags) 2898 enum sum_check_flags *pqres, unsigned long flags)
2899 { 2899 {
2900 struct ppc440spe_adma_chan *ppc440spe_chan; 2900 struct ppc440spe_adma_chan *ppc440spe_chan;
2901 struct ppc440spe_adma_desc_slot *sw_desc, *iter; 2901 struct ppc440spe_adma_desc_slot *sw_desc, *iter;
2902 dma_addr_t pdest, qdest; 2902 dma_addr_t pdest, qdest;
2903 int slot_cnt, slots_per_op, idst, dst_cnt; 2903 int slot_cnt, slots_per_op, idst, dst_cnt;
2904 2904
2905 ppc440spe_chan = to_ppc440spe_adma_chan(chan); 2905 ppc440spe_chan = to_ppc440spe_adma_chan(chan);
2906 2906
2907 if (flags & DMA_PREP_PQ_DISABLE_P) 2907 if (flags & DMA_PREP_PQ_DISABLE_P)
2908 pdest = 0; 2908 pdest = 0;
2909 else 2909 else
2910 pdest = pq[0]; 2910 pdest = pq[0];
2911 2911
2912 if (flags & DMA_PREP_PQ_DISABLE_Q) 2912 if (flags & DMA_PREP_PQ_DISABLE_Q)
2913 qdest = 0; 2913 qdest = 0;
2914 else 2914 else
2915 qdest = pq[1]; 2915 qdest = pq[1];
2916 2916
2917 ADMA_LL_DBG(prep_dma_pqzero_sum_dbg(ppc440spe_chan->device->id, 2917 ADMA_LL_DBG(prep_dma_pqzero_sum_dbg(ppc440spe_chan->device->id,
2918 src, src_cnt, scf)); 2918 src, src_cnt, scf));
2919 2919
2920 /* Always use WXOR for P/Q calculations (two destinations). 2920 /* Always use WXOR for P/Q calculations (two destinations).
2921 * Need 1 or 2 extra slots to verify results are zero. 2921 * Need 1 or 2 extra slots to verify results are zero.
2922 */ 2922 */
2923 idst = dst_cnt = (pdest && qdest) ? 2 : 1; 2923 idst = dst_cnt = (pdest && qdest) ? 2 : 1;
2924 2924
2925 /* One additional slot per destination to clone P/Q 2925 /* One additional slot per destination to clone P/Q
2926 * before calculation (we have to preserve destinations). 2926 * before calculation (we have to preserve destinations).
2927 */ 2927 */
2928 slot_cnt = src_cnt + dst_cnt * 2; 2928 slot_cnt = src_cnt + dst_cnt * 2;
2929 slots_per_op = 1; 2929 slots_per_op = 1;
2930 2930
2931 spin_lock_bh(&ppc440spe_chan->lock); 2931 spin_lock_bh(&ppc440spe_chan->lock);
2932 sw_desc = ppc440spe_adma_alloc_slots(ppc440spe_chan, slot_cnt, 2932 sw_desc = ppc440spe_adma_alloc_slots(ppc440spe_chan, slot_cnt,
2933 slots_per_op); 2933 slots_per_op);
2934 if (sw_desc) { 2934 if (sw_desc) {
2935 ppc440spe_desc_init_dma01pqzero_sum(sw_desc, dst_cnt, src_cnt); 2935 ppc440spe_desc_init_dma01pqzero_sum(sw_desc, dst_cnt, src_cnt);
2936 2936
2937 /* Setup byte count for each slot just allocated */ 2937 /* Setup byte count for each slot just allocated */
2938 sw_desc->async_tx.flags = flags; 2938 sw_desc->async_tx.flags = flags;
2939 list_for_each_entry(iter, &sw_desc->group_list, chain_node) { 2939 list_for_each_entry(iter, &sw_desc->group_list, chain_node) {
2940 ppc440spe_desc_set_byte_count(iter, ppc440spe_chan, 2940 ppc440spe_desc_set_byte_count(iter, ppc440spe_chan,
2941 len); 2941 len);
2942 iter->unmap_len = len; 2942 iter->unmap_len = len;
2943 } 2943 }
2944 2944
2945 if (pdest) { 2945 if (pdest) {
2946 struct dma_cdb *hw_desc; 2946 struct dma_cdb *hw_desc;
2947 struct ppc440spe_adma_chan *chan; 2947 struct ppc440spe_adma_chan *chan;
2948 2948
2949 iter = sw_desc->group_head; 2949 iter = sw_desc->group_head;
2950 chan = to_ppc440spe_adma_chan(iter->async_tx.chan); 2950 chan = to_ppc440spe_adma_chan(iter->async_tx.chan);
2951 memset(iter->hw_desc, 0, sizeof(struct dma_cdb)); 2951 memset(iter->hw_desc, 0, sizeof(struct dma_cdb));
2952 iter->hw_next = list_entry(iter->chain_node.next, 2952 iter->hw_next = list_entry(iter->chain_node.next,
2953 struct ppc440spe_adma_desc_slot, 2953 struct ppc440spe_adma_desc_slot,
2954 chain_node); 2954 chain_node);
2955 hw_desc = iter->hw_desc; 2955 hw_desc = iter->hw_desc;
2956 hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2; 2956 hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2;
2957 iter->src_cnt = 0; 2957 iter->src_cnt = 0;
2958 iter->dst_cnt = 0; 2958 iter->dst_cnt = 0;
2959 ppc440spe_desc_set_dest_addr(iter, chan, 0, 2959 ppc440spe_desc_set_dest_addr(iter, chan, 0,
2960 ppc440spe_chan->pdest, 0); 2960 ppc440spe_chan->pdest, 0);
2961 ppc440spe_desc_set_src_addr(iter, chan, 0, 0, pdest); 2961 ppc440spe_desc_set_src_addr(iter, chan, 0, 0, pdest);
2962 ppc440spe_desc_set_byte_count(iter, ppc440spe_chan, 2962 ppc440spe_desc_set_byte_count(iter, ppc440spe_chan,
2963 len); 2963 len);
2964 iter->unmap_len = 0; 2964 iter->unmap_len = 0;
2965 /* override pdest to preserve original P */ 2965 /* override pdest to preserve original P */
2966 pdest = ppc440spe_chan->pdest; 2966 pdest = ppc440spe_chan->pdest;
2967 } 2967 }
2968 if (qdest) { 2968 if (qdest) {
2969 struct dma_cdb *hw_desc; 2969 struct dma_cdb *hw_desc;
2970 struct ppc440spe_adma_chan *chan; 2970 struct ppc440spe_adma_chan *chan;
2971 2971
2972 iter = list_first_entry(&sw_desc->group_list, 2972 iter = list_first_entry(&sw_desc->group_list,
2973 struct ppc440spe_adma_desc_slot, 2973 struct ppc440spe_adma_desc_slot,
2974 chain_node); 2974 chain_node);
2975 chan = to_ppc440spe_adma_chan(iter->async_tx.chan); 2975 chan = to_ppc440spe_adma_chan(iter->async_tx.chan);
2976 2976
2977 if (pdest) { 2977 if (pdest) {
2978 iter = list_entry(iter->chain_node.next, 2978 iter = list_entry(iter->chain_node.next,
2979 struct ppc440spe_adma_desc_slot, 2979 struct ppc440spe_adma_desc_slot,
2980 chain_node); 2980 chain_node);
2981 } 2981 }
2982 2982
2983 memset(iter->hw_desc, 0, sizeof(struct dma_cdb)); 2983 memset(iter->hw_desc, 0, sizeof(struct dma_cdb));
2984 iter->hw_next = list_entry(iter->chain_node.next, 2984 iter->hw_next = list_entry(iter->chain_node.next,
2985 struct ppc440spe_adma_desc_slot, 2985 struct ppc440spe_adma_desc_slot,
2986 chain_node); 2986 chain_node);
2987 hw_desc = iter->hw_desc; 2987 hw_desc = iter->hw_desc;
2988 hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2; 2988 hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2;
2989 iter->src_cnt = 0; 2989 iter->src_cnt = 0;
2990 iter->dst_cnt = 0; 2990 iter->dst_cnt = 0;
2991 ppc440spe_desc_set_dest_addr(iter, chan, 0, 2991 ppc440spe_desc_set_dest_addr(iter, chan, 0,
2992 ppc440spe_chan->qdest, 0); 2992 ppc440spe_chan->qdest, 0);
2993 ppc440spe_desc_set_src_addr(iter, chan, 0, 0, qdest); 2993 ppc440spe_desc_set_src_addr(iter, chan, 0, 0, qdest);
2994 ppc440spe_desc_set_byte_count(iter, ppc440spe_chan, 2994 ppc440spe_desc_set_byte_count(iter, ppc440spe_chan,
2995 len); 2995 len);
2996 iter->unmap_len = 0; 2996 iter->unmap_len = 0;
2997 /* override qdest to preserve original Q */ 2997 /* override qdest to preserve original Q */
2998 qdest = ppc440spe_chan->qdest; 2998 qdest = ppc440spe_chan->qdest;
2999 } 2999 }
3000 3000
3001 /* Setup destinations for P/Q ops */ 3001 /* Setup destinations for P/Q ops */
3002 ppc440spe_adma_pqzero_sum_set_dest(sw_desc, pdest, qdest); 3002 ppc440spe_adma_pqzero_sum_set_dest(sw_desc, pdest, qdest);
3003 3003
3004 /* Setup zero QWORDs into DCHECK CDBs */ 3004 /* Setup zero QWORDs into DCHECK CDBs */
3005 idst = dst_cnt; 3005 idst = dst_cnt;
3006 list_for_each_entry_reverse(iter, &sw_desc->group_list, 3006 list_for_each_entry_reverse(iter, &sw_desc->group_list,
3007 chain_node) { 3007 chain_node) {
3008 /* 3008 /*
3009 * The last CDB corresponds to Q-parity check, 3009 * The last CDB corresponds to Q-parity check,
3010 * the one before last CDB corresponds 3010 * the one before last CDB corresponds
3011 * P-parity check 3011 * P-parity check
3012 */ 3012 */
3013 if (idst == DMA_DEST_MAX_NUM) { 3013 if (idst == DMA_DEST_MAX_NUM) {
3014 if (idst == dst_cnt) { 3014 if (idst == dst_cnt) {
3015 set_bit(PPC440SPE_DESC_QCHECK, 3015 set_bit(PPC440SPE_DESC_QCHECK,
3016 &iter->flags); 3016 &iter->flags);
3017 } else { 3017 } else {
3018 set_bit(PPC440SPE_DESC_PCHECK, 3018 set_bit(PPC440SPE_DESC_PCHECK,
3019 &iter->flags); 3019 &iter->flags);
3020 } 3020 }
3021 } else { 3021 } else {
3022 if (qdest) { 3022 if (qdest) {
3023 set_bit(PPC440SPE_DESC_QCHECK, 3023 set_bit(PPC440SPE_DESC_QCHECK,
3024 &iter->flags); 3024 &iter->flags);
3025 } else { 3025 } else {
3026 set_bit(PPC440SPE_DESC_PCHECK, 3026 set_bit(PPC440SPE_DESC_PCHECK,
3027 &iter->flags); 3027 &iter->flags);
3028 } 3028 }
3029 } 3029 }
3030 iter->xor_check_result = pqres; 3030 iter->xor_check_result = pqres;
3031 3031
3032 /* 3032 /*
3033 * set it to zero, if check fail then result will 3033 * set it to zero, if check fail then result will
3034 * be updated 3034 * be updated
3035 */ 3035 */
3036 *iter->xor_check_result = 0; 3036 *iter->xor_check_result = 0;
3037 ppc440spe_desc_set_dcheck(iter, ppc440spe_chan, 3037 ppc440spe_desc_set_dcheck(iter, ppc440spe_chan,
3038 ppc440spe_qword); 3038 ppc440spe_qword);
3039 3039
3040 if (!(--dst_cnt)) 3040 if (!(--dst_cnt))
3041 break; 3041 break;
3042 } 3042 }
3043 3043
3044 /* Setup sources and mults for P/Q ops */ 3044 /* Setup sources and mults for P/Q ops */
3045 list_for_each_entry_continue_reverse(iter, &sw_desc->group_list, 3045 list_for_each_entry_continue_reverse(iter, &sw_desc->group_list,
3046 chain_node) { 3046 chain_node) {
3047 struct ppc440spe_adma_chan *chan; 3047 struct ppc440spe_adma_chan *chan;
3048 u32 mult_dst; 3048 u32 mult_dst;
3049 3049
3050 chan = to_ppc440spe_adma_chan(iter->async_tx.chan); 3050 chan = to_ppc440spe_adma_chan(iter->async_tx.chan);
3051 ppc440spe_desc_set_src_addr(iter, chan, 0, 3051 ppc440spe_desc_set_src_addr(iter, chan, 0,
3052 DMA_CUED_XOR_HB, 3052 DMA_CUED_XOR_HB,
3053 src[src_cnt - 1]); 3053 src[src_cnt - 1]);
3054 if (qdest) { 3054 if (qdest) {
3055 mult_dst = (dst_cnt - 1) ? DMA_CDB_SG_DST2 : 3055 mult_dst = (dst_cnt - 1) ? DMA_CDB_SG_DST2 :
3056 DMA_CDB_SG_DST1; 3056 DMA_CDB_SG_DST1;
3057 ppc440spe_desc_set_src_mult(iter, chan, 3057 ppc440spe_desc_set_src_mult(iter, chan,
3058 DMA_CUED_MULT1_OFF, 3058 DMA_CUED_MULT1_OFF,
3059 mult_dst, 3059 mult_dst,
3060 scf[src_cnt - 1]); 3060 scf[src_cnt - 1]);
3061 } 3061 }
3062 if (!(--src_cnt)) 3062 if (!(--src_cnt))
3063 break; 3063 break;
3064 } 3064 }
3065 } 3065 }
3066 spin_unlock_bh(&ppc440spe_chan->lock); 3066 spin_unlock_bh(&ppc440spe_chan->lock);
3067 return sw_desc ? &sw_desc->async_tx : NULL; 3067 return sw_desc ? &sw_desc->async_tx : NULL;
3068 } 3068 }
3069 3069
3070 /** 3070 /**
3071 * ppc440spe_adma_prep_dma_xor_zero_sum - prepare CDB group for 3071 * ppc440spe_adma_prep_dma_xor_zero_sum - prepare CDB group for
3072 * XOR ZERO_SUM operation 3072 * XOR ZERO_SUM operation
3073 */ 3073 */
3074 static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_xor_zero_sum( 3074 static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_xor_zero_sum(
3075 struct dma_chan *chan, dma_addr_t *src, unsigned int src_cnt, 3075 struct dma_chan *chan, dma_addr_t *src, unsigned int src_cnt,
3076 size_t len, enum sum_check_flags *result, unsigned long flags) 3076 size_t len, enum sum_check_flags *result, unsigned long flags)
3077 { 3077 {
3078 struct dma_async_tx_descriptor *tx; 3078 struct dma_async_tx_descriptor *tx;
3079 dma_addr_t pq[2]; 3079 dma_addr_t pq[2];
3080 3080
3081 /* validate P, disable Q */ 3081 /* validate P, disable Q */
3082 pq[0] = src[0]; 3082 pq[0] = src[0];
3083 pq[1] = 0; 3083 pq[1] = 0;
3084 flags |= DMA_PREP_PQ_DISABLE_Q; 3084 flags |= DMA_PREP_PQ_DISABLE_Q;
3085 3085
3086 tx = ppc440spe_adma_prep_dma_pqzero_sum(chan, pq, &src[1], 3086 tx = ppc440spe_adma_prep_dma_pqzero_sum(chan, pq, &src[1],
3087 src_cnt - 1, 0, len, 3087 src_cnt - 1, 0, len,
3088 result, flags); 3088 result, flags);
3089 return tx; 3089 return tx;
3090 } 3090 }
3091 3091
3092 /** 3092 /**
3093 * ppc440spe_adma_set_dest - set destination address into descriptor 3093 * ppc440spe_adma_set_dest - set destination address into descriptor
3094 */ 3094 */
3095 static void ppc440spe_adma_set_dest(struct ppc440spe_adma_desc_slot *sw_desc, 3095 static void ppc440spe_adma_set_dest(struct ppc440spe_adma_desc_slot *sw_desc,
3096 dma_addr_t addr, int index) 3096 dma_addr_t addr, int index)
3097 { 3097 {
3098 struct ppc440spe_adma_chan *chan; 3098 struct ppc440spe_adma_chan *chan;
3099 3099
3100 BUG_ON(index >= sw_desc->dst_cnt); 3100 BUG_ON(index >= sw_desc->dst_cnt);
3101 3101
3102 chan = to_ppc440spe_adma_chan(sw_desc->async_tx.chan); 3102 chan = to_ppc440spe_adma_chan(sw_desc->async_tx.chan);
3103 3103
3104 switch (chan->device->id) { 3104 switch (chan->device->id) {
3105 case PPC440SPE_DMA0_ID: 3105 case PPC440SPE_DMA0_ID:
3106 case PPC440SPE_DMA1_ID: 3106 case PPC440SPE_DMA1_ID:
3107 /* to do: support transfers lengths > 3107 /* to do: support transfers lengths >
3108 * PPC440SPE_ADMA_DMA/XOR_MAX_BYTE_COUNT 3108 * PPC440SPE_ADMA_DMA/XOR_MAX_BYTE_COUNT
3109 */ 3109 */
3110 ppc440spe_desc_set_dest_addr(sw_desc->group_head, 3110 ppc440spe_desc_set_dest_addr(sw_desc->group_head,
3111 chan, 0, addr, index); 3111 chan, 0, addr, index);
3112 break; 3112 break;
3113 case PPC440SPE_XOR_ID: 3113 case PPC440SPE_XOR_ID:
3114 sw_desc = ppc440spe_get_group_entry(sw_desc, index); 3114 sw_desc = ppc440spe_get_group_entry(sw_desc, index);
3115 ppc440spe_desc_set_dest_addr(sw_desc, 3115 ppc440spe_desc_set_dest_addr(sw_desc,
3116 chan, 0, addr, index); 3116 chan, 0, addr, index);
3117 break; 3117 break;
3118 } 3118 }
3119 } 3119 }
3120 3120
3121 static void ppc440spe_adma_pq_zero_op(struct ppc440spe_adma_desc_slot *iter, 3121 static void ppc440spe_adma_pq_zero_op(struct ppc440spe_adma_desc_slot *iter,
3122 struct ppc440spe_adma_chan *chan, dma_addr_t addr) 3122 struct ppc440spe_adma_chan *chan, dma_addr_t addr)
3123 { 3123 {
3124 /* To clear destinations update the descriptor 3124 /* To clear destinations update the descriptor
3125 * (P or Q depending on index) as follows: 3125 * (P or Q depending on index) as follows:
3126 * addr is destination (0 corresponds to SG2): 3126 * addr is destination (0 corresponds to SG2):
3127 */ 3127 */
3128 ppc440spe_desc_set_dest_addr(iter, chan, DMA_CUED_XOR_BASE, addr, 0); 3128 ppc440spe_desc_set_dest_addr(iter, chan, DMA_CUED_XOR_BASE, addr, 0);
3129 3129
3130 /* ... and the addr is source: */ 3130 /* ... and the addr is source: */
3131 ppc440spe_desc_set_src_addr(iter, chan, 0, DMA_CUED_XOR_HB, addr); 3131 ppc440spe_desc_set_src_addr(iter, chan, 0, DMA_CUED_XOR_HB, addr);
3132 3132
3133 /* addr is always SG2 then the mult is always DST1 */ 3133 /* addr is always SG2 then the mult is always DST1 */
3134 ppc440spe_desc_set_src_mult(iter, chan, DMA_CUED_MULT1_OFF, 3134 ppc440spe_desc_set_src_mult(iter, chan, DMA_CUED_MULT1_OFF,
3135 DMA_CDB_SG_DST1, 1); 3135 DMA_CDB_SG_DST1, 1);
3136 } 3136 }
3137 3137
3138 /** 3138 /**
3139 * ppc440spe_adma_pq_set_dest - set destination address into descriptor 3139 * ppc440spe_adma_pq_set_dest - set destination address into descriptor
3140 * for the PQXOR operation 3140 * for the PQXOR operation
3141 */ 3141 */
3142 static void ppc440spe_adma_pq_set_dest(struct ppc440spe_adma_desc_slot *sw_desc, 3142 static void ppc440spe_adma_pq_set_dest(struct ppc440spe_adma_desc_slot *sw_desc,
3143 dma_addr_t *addrs, unsigned long flags) 3143 dma_addr_t *addrs, unsigned long flags)
3144 { 3144 {
3145 struct ppc440spe_adma_desc_slot *iter; 3145 struct ppc440spe_adma_desc_slot *iter;
3146 struct ppc440spe_adma_chan *chan; 3146 struct ppc440spe_adma_chan *chan;
3147 dma_addr_t paddr, qaddr; 3147 dma_addr_t paddr, qaddr;
3148 dma_addr_t addr = 0, ppath, qpath; 3148 dma_addr_t addr = 0, ppath, qpath;
3149 int index = 0, i; 3149 int index = 0, i;
3150 3150
3151 chan = to_ppc440spe_adma_chan(sw_desc->async_tx.chan); 3151 chan = to_ppc440spe_adma_chan(sw_desc->async_tx.chan);
3152 3152
3153 if (flags & DMA_PREP_PQ_DISABLE_P) 3153 if (flags & DMA_PREP_PQ_DISABLE_P)
3154 paddr = 0; 3154 paddr = 0;
3155 else 3155 else
3156 paddr = addrs[0]; 3156 paddr = addrs[0];
3157 3157
3158 if (flags & DMA_PREP_PQ_DISABLE_Q) 3158 if (flags & DMA_PREP_PQ_DISABLE_Q)
3159 qaddr = 0; 3159 qaddr = 0;
3160 else 3160 else
3161 qaddr = addrs[1]; 3161 qaddr = addrs[1];
3162 3162
3163 if (!paddr || !qaddr) 3163 if (!paddr || !qaddr)
3164 addr = paddr ? paddr : qaddr; 3164 addr = paddr ? paddr : qaddr;
3165 3165
3166 switch (chan->device->id) { 3166 switch (chan->device->id) {
3167 case PPC440SPE_DMA0_ID: 3167 case PPC440SPE_DMA0_ID:
3168 case PPC440SPE_DMA1_ID: 3168 case PPC440SPE_DMA1_ID:
3169 /* walk through the WXOR source list and set P/Q-destinations 3169 /* walk through the WXOR source list and set P/Q-destinations
3170 * for each slot: 3170 * for each slot:
3171 */ 3171 */
3172 if (!test_bit(PPC440SPE_DESC_RXOR, &sw_desc->flags)) { 3172 if (!test_bit(PPC440SPE_DESC_RXOR, &sw_desc->flags)) {
3173 /* This is WXOR-only chain; may have 1/2 zero descs */ 3173 /* This is WXOR-only chain; may have 1/2 zero descs */
3174 if (test_bit(PPC440SPE_ZERO_P, &sw_desc->flags)) 3174 if (test_bit(PPC440SPE_ZERO_P, &sw_desc->flags))
3175 index++; 3175 index++;
3176 if (test_bit(PPC440SPE_ZERO_Q, &sw_desc->flags)) 3176 if (test_bit(PPC440SPE_ZERO_Q, &sw_desc->flags))
3177 index++; 3177 index++;
3178 3178
3179 iter = ppc440spe_get_group_entry(sw_desc, index); 3179 iter = ppc440spe_get_group_entry(sw_desc, index);
3180 if (addr) { 3180 if (addr) {
3181 /* one destination */ 3181 /* one destination */
3182 list_for_each_entry_from(iter, 3182 list_for_each_entry_from(iter,
3183 &sw_desc->group_list, chain_node) 3183 &sw_desc->group_list, chain_node)
3184 ppc440spe_desc_set_dest_addr(iter, chan, 3184 ppc440spe_desc_set_dest_addr(iter, chan,
3185 DMA_CUED_XOR_BASE, addr, 0); 3185 DMA_CUED_XOR_BASE, addr, 0);
3186 } else { 3186 } else {
3187 /* two destinations */ 3187 /* two destinations */
3188 list_for_each_entry_from(iter, 3188 list_for_each_entry_from(iter,
3189 &sw_desc->group_list, chain_node) { 3189 &sw_desc->group_list, chain_node) {
3190 ppc440spe_desc_set_dest_addr(iter, chan, 3190 ppc440spe_desc_set_dest_addr(iter, chan,
3191 DMA_CUED_XOR_BASE, paddr, 0); 3191 DMA_CUED_XOR_BASE, paddr, 0);
3192 ppc440spe_desc_set_dest_addr(iter, chan, 3192 ppc440spe_desc_set_dest_addr(iter, chan,
3193 DMA_CUED_XOR_BASE, qaddr, 1); 3193 DMA_CUED_XOR_BASE, qaddr, 1);
3194 } 3194 }
3195 } 3195 }
3196 3196
3197 if (index) { 3197 if (index) {
3198 /* To clear destinations update the descriptor 3198 /* To clear destinations update the descriptor
3199 * (1st,2nd, or both depending on flags) 3199 * (1st,2nd, or both depending on flags)
3200 */ 3200 */
3201 index = 0; 3201 index = 0;
3202 if (test_bit(PPC440SPE_ZERO_P, 3202 if (test_bit(PPC440SPE_ZERO_P,
3203 &sw_desc->flags)) { 3203 &sw_desc->flags)) {
3204 iter = ppc440spe_get_group_entry( 3204 iter = ppc440spe_get_group_entry(
3205 sw_desc, index++); 3205 sw_desc, index++);
3206 ppc440spe_adma_pq_zero_op(iter, chan, 3206 ppc440spe_adma_pq_zero_op(iter, chan,
3207 paddr); 3207 paddr);
3208 } 3208 }
3209 3209
3210 if (test_bit(PPC440SPE_ZERO_Q, 3210 if (test_bit(PPC440SPE_ZERO_Q,
3211 &sw_desc->flags)) { 3211 &sw_desc->flags)) {
3212 iter = ppc440spe_get_group_entry( 3212 iter = ppc440spe_get_group_entry(
3213 sw_desc, index++); 3213 sw_desc, index++);
3214 ppc440spe_adma_pq_zero_op(iter, chan, 3214 ppc440spe_adma_pq_zero_op(iter, chan,
3215 qaddr); 3215 qaddr);
3216 } 3216 }
3217 3217
3218 return; 3218 return;
3219 } 3219 }
3220 } else { 3220 } else {
3221 /* This is RXOR-only or RXOR/WXOR mixed chain */ 3221 /* This is RXOR-only or RXOR/WXOR mixed chain */
3222 3222
3223 /* If we want to include destination into calculations, 3223 /* If we want to include destination into calculations,
3224 * then make dest addresses cued with mult=1 (XOR). 3224 * then make dest addresses cued with mult=1 (XOR).
3225 */ 3225 */
3226 ppath = test_bit(PPC440SPE_ZERO_P, &sw_desc->flags) ? 3226 ppath = test_bit(PPC440SPE_ZERO_P, &sw_desc->flags) ?
3227 DMA_CUED_XOR_HB : 3227 DMA_CUED_XOR_HB :
3228 DMA_CUED_XOR_BASE | 3228 DMA_CUED_XOR_BASE |
3229 (1 << DMA_CUED_MULT1_OFF); 3229 (1 << DMA_CUED_MULT1_OFF);
3230 qpath = test_bit(PPC440SPE_ZERO_Q, &sw_desc->flags) ? 3230 qpath = test_bit(PPC440SPE_ZERO_Q, &sw_desc->flags) ?
3231 DMA_CUED_XOR_HB : 3231 DMA_CUED_XOR_HB :
3232 DMA_CUED_XOR_BASE | 3232 DMA_CUED_XOR_BASE |
3233 (1 << DMA_CUED_MULT1_OFF); 3233 (1 << DMA_CUED_MULT1_OFF);
3234 3234
3235 /* Setup destination(s) in RXOR slot(s) */ 3235 /* Setup destination(s) in RXOR slot(s) */
3236 iter = ppc440spe_get_group_entry(sw_desc, index++); 3236 iter = ppc440spe_get_group_entry(sw_desc, index++);
3237 ppc440spe_desc_set_dest_addr(iter, chan, 3237 ppc440spe_desc_set_dest_addr(iter, chan,
3238 paddr ? ppath : qpath, 3238 paddr ? ppath : qpath,
3239 paddr ? paddr : qaddr, 0); 3239 paddr ? paddr : qaddr, 0);
3240 if (!addr) { 3240 if (!addr) {
3241 /* two destinations */ 3241 /* two destinations */
3242 iter = ppc440spe_get_group_entry(sw_desc, 3242 iter = ppc440spe_get_group_entry(sw_desc,
3243 index++); 3243 index++);
3244 ppc440spe_desc_set_dest_addr(iter, chan, 3244 ppc440spe_desc_set_dest_addr(iter, chan,
3245 qpath, qaddr, 0); 3245 qpath, qaddr, 0);
3246 } 3246 }
3247 3247
3248 if (test_bit(PPC440SPE_DESC_WXOR, &sw_desc->flags)) { 3248 if (test_bit(PPC440SPE_DESC_WXOR, &sw_desc->flags)) {
3249 /* Setup destination(s) in remaining WXOR 3249 /* Setup destination(s) in remaining WXOR
3250 * slots 3250 * slots
3251 */ 3251 */
3252 iter = ppc440spe_get_group_entry(sw_desc, 3252 iter = ppc440spe_get_group_entry(sw_desc,
3253 index); 3253 index);
3254 if (addr) { 3254 if (addr) {
3255 /* one destination */ 3255 /* one destination */
3256 list_for_each_entry_from(iter, 3256 list_for_each_entry_from(iter,
3257 &sw_desc->group_list, 3257 &sw_desc->group_list,
3258 chain_node) 3258 chain_node)
3259 ppc440spe_desc_set_dest_addr( 3259 ppc440spe_desc_set_dest_addr(
3260 iter, chan, 3260 iter, chan,
3261 DMA_CUED_XOR_BASE, 3261 DMA_CUED_XOR_BASE,
3262 addr, 0); 3262 addr, 0);
3263 3263
3264 } else { 3264 } else {
3265 /* two destinations */ 3265 /* two destinations */
3266 list_for_each_entry_from(iter, 3266 list_for_each_entry_from(iter,
3267 &sw_desc->group_list, 3267 &sw_desc->group_list,
3268 chain_node) { 3268 chain_node) {
3269 ppc440spe_desc_set_dest_addr( 3269 ppc440spe_desc_set_dest_addr(
3270 iter, chan, 3270 iter, chan,
3271 DMA_CUED_XOR_BASE, 3271 DMA_CUED_XOR_BASE,
3272 paddr, 0); 3272 paddr, 0);
3273 ppc440spe_desc_set_dest_addr( 3273 ppc440spe_desc_set_dest_addr(
3274 iter, chan, 3274 iter, chan,
3275 DMA_CUED_XOR_BASE, 3275 DMA_CUED_XOR_BASE,
3276 qaddr, 1); 3276 qaddr, 1);
3277 } 3277 }
3278 } 3278 }
3279 } 3279 }
3280 3280
3281 } 3281 }
3282 break; 3282 break;
3283 3283
3284 case PPC440SPE_XOR_ID: 3284 case PPC440SPE_XOR_ID:
3285 /* DMA2 descriptors have only 1 destination, so there are 3285 /* DMA2 descriptors have only 1 destination, so there are
3286 * two chains - one for each dest. 3286 * two chains - one for each dest.
3287 * If we want to include destination into calculations, 3287 * If we want to include destination into calculations,
3288 * then make dest addresses cued with mult=1 (XOR). 3288 * then make dest addresses cued with mult=1 (XOR).
3289 */ 3289 */
3290 ppath = test_bit(PPC440SPE_ZERO_P, &sw_desc->flags) ? 3290 ppath = test_bit(PPC440SPE_ZERO_P, &sw_desc->flags) ?
3291 DMA_CUED_XOR_HB : 3291 DMA_CUED_XOR_HB :
3292 DMA_CUED_XOR_BASE | 3292 DMA_CUED_XOR_BASE |
3293 (1 << DMA_CUED_MULT1_OFF); 3293 (1 << DMA_CUED_MULT1_OFF);
3294 3294
3295 qpath = test_bit(PPC440SPE_ZERO_Q, &sw_desc->flags) ? 3295 qpath = test_bit(PPC440SPE_ZERO_Q, &sw_desc->flags) ?
3296 DMA_CUED_XOR_HB : 3296 DMA_CUED_XOR_HB :
3297 DMA_CUED_XOR_BASE | 3297 DMA_CUED_XOR_BASE |
3298 (1 << DMA_CUED_MULT1_OFF); 3298 (1 << DMA_CUED_MULT1_OFF);
3299 3299
3300 iter = ppc440spe_get_group_entry(sw_desc, 0); 3300 iter = ppc440spe_get_group_entry(sw_desc, 0);
3301 for (i = 0; i < sw_desc->descs_per_op; i++) { 3301 for (i = 0; i < sw_desc->descs_per_op; i++) {
3302 ppc440spe_desc_set_dest_addr(iter, chan, 3302 ppc440spe_desc_set_dest_addr(iter, chan,
3303 paddr ? ppath : qpath, 3303 paddr ? ppath : qpath,
3304 paddr ? paddr : qaddr, 0); 3304 paddr ? paddr : qaddr, 0);
3305 iter = list_entry(iter->chain_node.next, 3305 iter = list_entry(iter->chain_node.next,
3306 struct ppc440spe_adma_desc_slot, 3306 struct ppc440spe_adma_desc_slot,
3307 chain_node); 3307 chain_node);
3308 } 3308 }
3309 3309
3310 if (!addr) { 3310 if (!addr) {
3311 /* Two destinations; setup Q here */ 3311 /* Two destinations; setup Q here */
3312 iter = ppc440spe_get_group_entry(sw_desc, 3312 iter = ppc440spe_get_group_entry(sw_desc,
3313 sw_desc->descs_per_op); 3313 sw_desc->descs_per_op);
3314 for (i = 0; i < sw_desc->descs_per_op; i++) { 3314 for (i = 0; i < sw_desc->descs_per_op; i++) {
3315 ppc440spe_desc_set_dest_addr(iter, 3315 ppc440spe_desc_set_dest_addr(iter,
3316 chan, qpath, qaddr, 0); 3316 chan, qpath, qaddr, 0);
3317 iter = list_entry(iter->chain_node.next, 3317 iter = list_entry(iter->chain_node.next,
3318 struct ppc440spe_adma_desc_slot, 3318 struct ppc440spe_adma_desc_slot,
3319 chain_node); 3319 chain_node);
3320 } 3320 }
3321 } 3321 }
3322 3322
3323 break; 3323 break;
3324 } 3324 }
3325 } 3325 }
3326 3326
3327 /** 3327 /**
3328 * ppc440spe_adma_pq_zero_sum_set_dest - set destination address into descriptor 3328 * ppc440spe_adma_pq_zero_sum_set_dest - set destination address into descriptor
3329 * for the PQ_ZERO_SUM operation 3329 * for the PQ_ZERO_SUM operation
3330 */ 3330 */
3331 static void ppc440spe_adma_pqzero_sum_set_dest( 3331 static void ppc440spe_adma_pqzero_sum_set_dest(
3332 struct ppc440spe_adma_desc_slot *sw_desc, 3332 struct ppc440spe_adma_desc_slot *sw_desc,
3333 dma_addr_t paddr, dma_addr_t qaddr) 3333 dma_addr_t paddr, dma_addr_t qaddr)
3334 { 3334 {
3335 struct ppc440spe_adma_desc_slot *iter, *end; 3335 struct ppc440spe_adma_desc_slot *iter, *end;
3336 struct ppc440spe_adma_chan *chan; 3336 struct ppc440spe_adma_chan *chan;
3337 dma_addr_t addr = 0; 3337 dma_addr_t addr = 0;
3338 int idx; 3338 int idx;
3339 3339
3340 chan = to_ppc440spe_adma_chan(sw_desc->async_tx.chan); 3340 chan = to_ppc440spe_adma_chan(sw_desc->async_tx.chan);
3341 3341
3342 /* walk through the WXOR source list and set P/Q-destinations 3342 /* walk through the WXOR source list and set P/Q-destinations
3343 * for each slot 3343 * for each slot
3344 */ 3344 */
3345 idx = (paddr && qaddr) ? 2 : 1; 3345 idx = (paddr && qaddr) ? 2 : 1;
3346 /* set end */ 3346 /* set end */
3347 list_for_each_entry_reverse(end, &sw_desc->group_list, 3347 list_for_each_entry_reverse(end, &sw_desc->group_list,
3348 chain_node) { 3348 chain_node) {
3349 if (!(--idx)) 3349 if (!(--idx))
3350 break; 3350 break;
3351 } 3351 }
3352 /* set start */ 3352 /* set start */
3353 idx = (paddr && qaddr) ? 2 : 1; 3353 idx = (paddr && qaddr) ? 2 : 1;
3354 iter = ppc440spe_get_group_entry(sw_desc, idx); 3354 iter = ppc440spe_get_group_entry(sw_desc, idx);
3355 3355
3356 if (paddr && qaddr) { 3356 if (paddr && qaddr) {
3357 /* two destinations */ 3357 /* two destinations */
3358 list_for_each_entry_from(iter, &sw_desc->group_list, 3358 list_for_each_entry_from(iter, &sw_desc->group_list,
3359 chain_node) { 3359 chain_node) {
3360 if (unlikely(iter == end)) 3360 if (unlikely(iter == end))
3361 break; 3361 break;
3362 ppc440spe_desc_set_dest_addr(iter, chan, 3362 ppc440spe_desc_set_dest_addr(iter, chan,
3363 DMA_CUED_XOR_BASE, paddr, 0); 3363 DMA_CUED_XOR_BASE, paddr, 0);
3364 ppc440spe_desc_set_dest_addr(iter, chan, 3364 ppc440spe_desc_set_dest_addr(iter, chan,
3365 DMA_CUED_XOR_BASE, qaddr, 1); 3365 DMA_CUED_XOR_BASE, qaddr, 1);
3366 } 3366 }
3367 } else { 3367 } else {
3368 /* one destination */ 3368 /* one destination */
3369 addr = paddr ? paddr : qaddr; 3369 addr = paddr ? paddr : qaddr;
3370 list_for_each_entry_from(iter, &sw_desc->group_list, 3370 list_for_each_entry_from(iter, &sw_desc->group_list,
3371 chain_node) { 3371 chain_node) {
3372 if (unlikely(iter == end)) 3372 if (unlikely(iter == end))
3373 break; 3373 break;
3374 ppc440spe_desc_set_dest_addr(iter, chan, 3374 ppc440spe_desc_set_dest_addr(iter, chan,
3375 DMA_CUED_XOR_BASE, addr, 0); 3375 DMA_CUED_XOR_BASE, addr, 0);
3376 } 3376 }
3377 } 3377 }
3378 3378
3379 /* The remaining descriptors are DATACHECK. These have no need in 3379 /* The remaining descriptors are DATACHECK. These have no need in
3380 * destination. Actually, these destinations are used there 3380 * destination. Actually, these destinations are used there
3381 * as sources for check operation. So, set addr as source. 3381 * as sources for check operation. So, set addr as source.
3382 */ 3382 */
3383 ppc440spe_desc_set_src_addr(end, chan, 0, 0, addr ? addr : paddr); 3383 ppc440spe_desc_set_src_addr(end, chan, 0, 0, addr ? addr : paddr);
3384 3384
3385 if (!addr) { 3385 if (!addr) {
3386 end = list_entry(end->chain_node.next, 3386 end = list_entry(end->chain_node.next,
3387 struct ppc440spe_adma_desc_slot, chain_node); 3387 struct ppc440spe_adma_desc_slot, chain_node);
3388 ppc440spe_desc_set_src_addr(end, chan, 0, 0, qaddr); 3388 ppc440spe_desc_set_src_addr(end, chan, 0, 0, qaddr);
3389 } 3389 }
3390 } 3390 }
3391 3391
3392 /** 3392 /**
3393 * ppc440spe_desc_set_xor_src_cnt - set source count into descriptor 3393 * ppc440spe_desc_set_xor_src_cnt - set source count into descriptor
3394 */ 3394 */
3395 static inline void ppc440spe_desc_set_xor_src_cnt( 3395 static inline void ppc440spe_desc_set_xor_src_cnt(
3396 struct ppc440spe_adma_desc_slot *desc, 3396 struct ppc440spe_adma_desc_slot *desc,
3397 int src_cnt) 3397 int src_cnt)
3398 { 3398 {
3399 struct xor_cb *hw_desc = desc->hw_desc; 3399 struct xor_cb *hw_desc = desc->hw_desc;
3400 3400
3401 hw_desc->cbc &= ~XOR_CDCR_OAC_MSK; 3401 hw_desc->cbc &= ~XOR_CDCR_OAC_MSK;
3402 hw_desc->cbc |= src_cnt; 3402 hw_desc->cbc |= src_cnt;
3403 } 3403 }
3404 3404
3405 /** 3405 /**
3406 * ppc440spe_adma_pq_set_src - set source address into descriptor 3406 * ppc440spe_adma_pq_set_src - set source address into descriptor
3407 */ 3407 */
3408 static void ppc440spe_adma_pq_set_src(struct ppc440spe_adma_desc_slot *sw_desc, 3408 static void ppc440spe_adma_pq_set_src(struct ppc440spe_adma_desc_slot *sw_desc,
3409 dma_addr_t addr, int index) 3409 dma_addr_t addr, int index)
3410 { 3410 {
3411 struct ppc440spe_adma_chan *chan; 3411 struct ppc440spe_adma_chan *chan;
3412 dma_addr_t haddr = 0; 3412 dma_addr_t haddr = 0;
3413 struct ppc440spe_adma_desc_slot *iter = NULL; 3413 struct ppc440spe_adma_desc_slot *iter = NULL;
3414 3414
3415 chan = to_ppc440spe_adma_chan(sw_desc->async_tx.chan); 3415 chan = to_ppc440spe_adma_chan(sw_desc->async_tx.chan);
3416 3416
3417 switch (chan->device->id) { 3417 switch (chan->device->id) {
3418 case PPC440SPE_DMA0_ID: 3418 case PPC440SPE_DMA0_ID:
3419 case PPC440SPE_DMA1_ID: 3419 case PPC440SPE_DMA1_ID:
3420 /* DMA0,1 may do: WXOR, RXOR, RXOR+WXORs chain 3420 /* DMA0,1 may do: WXOR, RXOR, RXOR+WXORs chain
3421 */ 3421 */
3422 if (test_bit(PPC440SPE_DESC_RXOR, &sw_desc->flags)) { 3422 if (test_bit(PPC440SPE_DESC_RXOR, &sw_desc->flags)) {
3423 /* RXOR-only or RXOR/WXOR operation */ 3423 /* RXOR-only or RXOR/WXOR operation */
3424 int iskip = test_bit(PPC440SPE_DESC_RXOR12, 3424 int iskip = test_bit(PPC440SPE_DESC_RXOR12,
3425 &sw_desc->flags) ? 2 : 3; 3425 &sw_desc->flags) ? 2 : 3;
3426 3426
3427 if (index == 0) { 3427 if (index == 0) {
3428 /* 1st slot (RXOR) */ 3428 /* 1st slot (RXOR) */
3429 /* setup sources region (R1-2-3, R1-2-4, 3429 /* setup sources region (R1-2-3, R1-2-4,
3430 * or R1-2-5) 3430 * or R1-2-5)
3431 */ 3431 */
3432 if (test_bit(PPC440SPE_DESC_RXOR12, 3432 if (test_bit(PPC440SPE_DESC_RXOR12,
3433 &sw_desc->flags)) 3433 &sw_desc->flags))
3434 haddr = DMA_RXOR12 << 3434 haddr = DMA_RXOR12 <<
3435 DMA_CUED_REGION_OFF; 3435 DMA_CUED_REGION_OFF;
3436 else if (test_bit(PPC440SPE_DESC_RXOR123, 3436 else if (test_bit(PPC440SPE_DESC_RXOR123,
3437 &sw_desc->flags)) 3437 &sw_desc->flags))
3438 haddr = DMA_RXOR123 << 3438 haddr = DMA_RXOR123 <<
3439 DMA_CUED_REGION_OFF; 3439 DMA_CUED_REGION_OFF;
3440 else if (test_bit(PPC440SPE_DESC_RXOR124, 3440 else if (test_bit(PPC440SPE_DESC_RXOR124,
3441 &sw_desc->flags)) 3441 &sw_desc->flags))
3442 haddr = DMA_RXOR124 << 3442 haddr = DMA_RXOR124 <<
3443 DMA_CUED_REGION_OFF; 3443 DMA_CUED_REGION_OFF;
3444 else if (test_bit(PPC440SPE_DESC_RXOR125, 3444 else if (test_bit(PPC440SPE_DESC_RXOR125,
3445 &sw_desc->flags)) 3445 &sw_desc->flags))
3446 haddr = DMA_RXOR125 << 3446 haddr = DMA_RXOR125 <<
3447 DMA_CUED_REGION_OFF; 3447 DMA_CUED_REGION_OFF;
3448 else 3448 else
3449 BUG(); 3449 BUG();
3450 haddr |= DMA_CUED_XOR_BASE; 3450 haddr |= DMA_CUED_XOR_BASE;
3451 iter = ppc440spe_get_group_entry(sw_desc, 0); 3451 iter = ppc440spe_get_group_entry(sw_desc, 0);
3452 } else if (index < iskip) { 3452 } else if (index < iskip) {
3453 /* 1st slot (RXOR) 3453 /* 1st slot (RXOR)
3454 * shall actually set source address only once 3454 * shall actually set source address only once
3455 * instead of first <iskip> 3455 * instead of first <iskip>
3456 */ 3456 */
3457 iter = NULL; 3457 iter = NULL;
3458 } else { 3458 } else {
3459 /* 2nd/3d and next slots (WXOR); 3459 /* 2nd/3d and next slots (WXOR);
3460 * skip first slot with RXOR 3460 * skip first slot with RXOR
3461 */ 3461 */
3462 haddr = DMA_CUED_XOR_HB; 3462 haddr = DMA_CUED_XOR_HB;
3463 iter = ppc440spe_get_group_entry(sw_desc, 3463 iter = ppc440spe_get_group_entry(sw_desc,
3464 index - iskip + sw_desc->dst_cnt); 3464 index - iskip + sw_desc->dst_cnt);
3465 } 3465 }
3466 } else { 3466 } else {
3467 int znum = 0; 3467 int znum = 0;
3468 3468
3469 /* WXOR-only operation; skip first slots with 3469 /* WXOR-only operation; skip first slots with
3470 * zeroing destinations 3470 * zeroing destinations
3471 */ 3471 */
3472 if (test_bit(PPC440SPE_ZERO_P, &sw_desc->flags)) 3472 if (test_bit(PPC440SPE_ZERO_P, &sw_desc->flags))
3473 znum++; 3473 znum++;
3474 if (test_bit(PPC440SPE_ZERO_Q, &sw_desc->flags)) 3474 if (test_bit(PPC440SPE_ZERO_Q, &sw_desc->flags))
3475 znum++; 3475 znum++;
3476 3476
3477 haddr = DMA_CUED_XOR_HB; 3477 haddr = DMA_CUED_XOR_HB;
3478 iter = ppc440spe_get_group_entry(sw_desc, 3478 iter = ppc440spe_get_group_entry(sw_desc,
3479 index + znum); 3479 index + znum);
3480 } 3480 }
3481 3481
3482 if (likely(iter)) { 3482 if (likely(iter)) {
3483 ppc440spe_desc_set_src_addr(iter, chan, 0, haddr, addr); 3483 ppc440spe_desc_set_src_addr(iter, chan, 0, haddr, addr);
3484 3484
3485 if (!index && 3485 if (!index &&
3486 test_bit(PPC440SPE_DESC_RXOR, &sw_desc->flags) && 3486 test_bit(PPC440SPE_DESC_RXOR, &sw_desc->flags) &&
3487 sw_desc->dst_cnt == 2) { 3487 sw_desc->dst_cnt == 2) {
3488 /* if we have two destinations for RXOR, then 3488 /* if we have two destinations for RXOR, then
3489 * setup source in the second descr too 3489 * setup source in the second descr too
3490 */ 3490 */
3491 iter = ppc440spe_get_group_entry(sw_desc, 1); 3491 iter = ppc440spe_get_group_entry(sw_desc, 1);
3492 ppc440spe_desc_set_src_addr(iter, chan, 0, 3492 ppc440spe_desc_set_src_addr(iter, chan, 0,
3493 haddr, addr); 3493 haddr, addr);
3494 } 3494 }
3495 } 3495 }
3496 break; 3496 break;
3497 3497
3498 case PPC440SPE_XOR_ID: 3498 case PPC440SPE_XOR_ID:
3499 /* DMA2 may do Biskup */ 3499 /* DMA2 may do Biskup */
3500 iter = sw_desc->group_head; 3500 iter = sw_desc->group_head;
3501 if (iter->dst_cnt == 2) { 3501 if (iter->dst_cnt == 2) {
3502 /* both P & Q calculations required; set P src here */ 3502 /* both P & Q calculations required; set P src here */
3503 ppc440spe_adma_dma2rxor_set_src(iter, index, addr); 3503 ppc440spe_adma_dma2rxor_set_src(iter, index, addr);
3504 3504
3505 /* this is for Q */ 3505 /* this is for Q */
3506 iter = ppc440spe_get_group_entry(sw_desc, 3506 iter = ppc440spe_get_group_entry(sw_desc,
3507 sw_desc->descs_per_op); 3507 sw_desc->descs_per_op);
3508 } 3508 }
3509 ppc440spe_adma_dma2rxor_set_src(iter, index, addr); 3509 ppc440spe_adma_dma2rxor_set_src(iter, index, addr);
3510 break; 3510 break;
3511 } 3511 }
3512 } 3512 }
3513 3513
3514 /** 3514 /**
3515 * ppc440spe_adma_memcpy_xor_set_src - set source address into descriptor 3515 * ppc440spe_adma_memcpy_xor_set_src - set source address into descriptor
3516 */ 3516 */
3517 static void ppc440spe_adma_memcpy_xor_set_src( 3517 static void ppc440spe_adma_memcpy_xor_set_src(
3518 struct ppc440spe_adma_desc_slot *sw_desc, 3518 struct ppc440spe_adma_desc_slot *sw_desc,
3519 dma_addr_t addr, int index) 3519 dma_addr_t addr, int index)
3520 { 3520 {
3521 struct ppc440spe_adma_chan *chan; 3521 struct ppc440spe_adma_chan *chan;
3522 3522
3523 chan = to_ppc440spe_adma_chan(sw_desc->async_tx.chan); 3523 chan = to_ppc440spe_adma_chan(sw_desc->async_tx.chan);
3524 sw_desc = sw_desc->group_head; 3524 sw_desc = sw_desc->group_head;
3525 3525
3526 if (likely(sw_desc)) 3526 if (likely(sw_desc))
3527 ppc440spe_desc_set_src_addr(sw_desc, chan, index, 0, addr); 3527 ppc440spe_desc_set_src_addr(sw_desc, chan, index, 0, addr);
3528 } 3528 }
3529 3529
3530 /** 3530 /**
3531 * ppc440spe_adma_dma2rxor_inc_addr - 3531 * ppc440spe_adma_dma2rxor_inc_addr -
3532 */ 3532 */
3533 static void ppc440spe_adma_dma2rxor_inc_addr( 3533 static void ppc440spe_adma_dma2rxor_inc_addr(
3534 struct ppc440spe_adma_desc_slot *desc, 3534 struct ppc440spe_adma_desc_slot *desc,
3535 struct ppc440spe_rxor *cursor, int index, int src_cnt) 3535 struct ppc440spe_rxor *cursor, int index, int src_cnt)
3536 { 3536 {
3537 cursor->addr_count++; 3537 cursor->addr_count++;
3538 if (index == src_cnt - 1) { 3538 if (index == src_cnt - 1) {
3539 ppc440spe_desc_set_xor_src_cnt(desc, cursor->addr_count); 3539 ppc440spe_desc_set_xor_src_cnt(desc, cursor->addr_count);
3540 } else if (cursor->addr_count == XOR_MAX_OPS) { 3540 } else if (cursor->addr_count == XOR_MAX_OPS) {
3541 ppc440spe_desc_set_xor_src_cnt(desc, cursor->addr_count); 3541 ppc440spe_desc_set_xor_src_cnt(desc, cursor->addr_count);
3542 cursor->addr_count = 0; 3542 cursor->addr_count = 0;
3543 cursor->desc_count++; 3543 cursor->desc_count++;
3544 } 3544 }
3545 } 3545 }
3546 3546
3547 /** 3547 /**
3548 * ppc440spe_adma_dma2rxor_prep_src - setup RXOR types in DMA2 CDB 3548 * ppc440spe_adma_dma2rxor_prep_src - setup RXOR types in DMA2 CDB
3549 */ 3549 */
3550 static int ppc440spe_adma_dma2rxor_prep_src( 3550 static int ppc440spe_adma_dma2rxor_prep_src(
3551 struct ppc440spe_adma_desc_slot *hdesc, 3551 struct ppc440spe_adma_desc_slot *hdesc,
3552 struct ppc440spe_rxor *cursor, int index, 3552 struct ppc440spe_rxor *cursor, int index,
3553 int src_cnt, u32 addr) 3553 int src_cnt, u32 addr)
3554 { 3554 {
3555 int rval = 0; 3555 int rval = 0;
3556 u32 sign; 3556 u32 sign;
3557 struct ppc440spe_adma_desc_slot *desc = hdesc; 3557 struct ppc440spe_adma_desc_slot *desc = hdesc;
3558 int i; 3558 int i;
3559 3559
3560 for (i = 0; i < cursor->desc_count; i++) { 3560 for (i = 0; i < cursor->desc_count; i++) {
3561 desc = list_entry(hdesc->chain_node.next, 3561 desc = list_entry(hdesc->chain_node.next,
3562 struct ppc440spe_adma_desc_slot, 3562 struct ppc440spe_adma_desc_slot,
3563 chain_node); 3563 chain_node);
3564 } 3564 }
3565 3565
3566 switch (cursor->state) { 3566 switch (cursor->state) {
3567 case 0: 3567 case 0:
3568 if (addr == cursor->addrl + cursor->len) { 3568 if (addr == cursor->addrl + cursor->len) {
3569 /* direct RXOR */ 3569 /* direct RXOR */
3570 cursor->state = 1; 3570 cursor->state = 1;
3571 cursor->xor_count++; 3571 cursor->xor_count++;
3572 if (index == src_cnt-1) { 3572 if (index == src_cnt-1) {
3573 ppc440spe_rxor_set_region(desc, 3573 ppc440spe_rxor_set_region(desc,
3574 cursor->addr_count, 3574 cursor->addr_count,
3575 DMA_RXOR12 << DMA_CUED_REGION_OFF); 3575 DMA_RXOR12 << DMA_CUED_REGION_OFF);
3576 ppc440spe_adma_dma2rxor_inc_addr( 3576 ppc440spe_adma_dma2rxor_inc_addr(
3577 desc, cursor, index, src_cnt); 3577 desc, cursor, index, src_cnt);
3578 } 3578 }
3579 } else if (cursor->addrl == addr + cursor->len) { 3579 } else if (cursor->addrl == addr + cursor->len) {
3580 /* reverse RXOR */ 3580 /* reverse RXOR */
3581 cursor->state = 1; 3581 cursor->state = 1;
3582 cursor->xor_count++; 3582 cursor->xor_count++;
3583 set_bit(cursor->addr_count, &desc->reverse_flags[0]); 3583 set_bit(cursor->addr_count, &desc->reverse_flags[0]);
3584 if (index == src_cnt-1) { 3584 if (index == src_cnt-1) {
3585 ppc440spe_rxor_set_region(desc, 3585 ppc440spe_rxor_set_region(desc,
3586 cursor->addr_count, 3586 cursor->addr_count,
3587 DMA_RXOR12 << DMA_CUED_REGION_OFF); 3587 DMA_RXOR12 << DMA_CUED_REGION_OFF);
3588 ppc440spe_adma_dma2rxor_inc_addr( 3588 ppc440spe_adma_dma2rxor_inc_addr(
3589 desc, cursor, index, src_cnt); 3589 desc, cursor, index, src_cnt);
3590 } 3590 }
3591 } else { 3591 } else {
3592 printk(KERN_ERR "Cannot build " 3592 printk(KERN_ERR "Cannot build "
3593 "DMA2 RXOR command block.\n"); 3593 "DMA2 RXOR command block.\n");
3594 BUG(); 3594 BUG();
3595 } 3595 }
3596 break; 3596 break;
3597 case 1: 3597 case 1:
3598 sign = test_bit(cursor->addr_count, 3598 sign = test_bit(cursor->addr_count,
3599 desc->reverse_flags) 3599 desc->reverse_flags)
3600 ? -1 : 1; 3600 ? -1 : 1;
3601 if (index == src_cnt-2 || (sign == -1 3601 if (index == src_cnt-2 || (sign == -1
3602 && addr != cursor->addrl - 2*cursor->len)) { 3602 && addr != cursor->addrl - 2*cursor->len)) {
3603 cursor->state = 0; 3603 cursor->state = 0;
3604 cursor->xor_count = 1; 3604 cursor->xor_count = 1;
3605 cursor->addrl = addr; 3605 cursor->addrl = addr;
3606 ppc440spe_rxor_set_region(desc, 3606 ppc440spe_rxor_set_region(desc,
3607 cursor->addr_count, 3607 cursor->addr_count,
3608 DMA_RXOR12 << DMA_CUED_REGION_OFF); 3608 DMA_RXOR12 << DMA_CUED_REGION_OFF);
3609 ppc440spe_adma_dma2rxor_inc_addr( 3609 ppc440spe_adma_dma2rxor_inc_addr(
3610 desc, cursor, index, src_cnt); 3610 desc, cursor, index, src_cnt);
3611 } else if (addr == cursor->addrl + 2*sign*cursor->len) { 3611 } else if (addr == cursor->addrl + 2*sign*cursor->len) {
3612 cursor->state = 2; 3612 cursor->state = 2;
3613 cursor->xor_count = 0; 3613 cursor->xor_count = 0;
3614 ppc440spe_rxor_set_region(desc, 3614 ppc440spe_rxor_set_region(desc,
3615 cursor->addr_count, 3615 cursor->addr_count,
3616 DMA_RXOR123 << DMA_CUED_REGION_OFF); 3616 DMA_RXOR123 << DMA_CUED_REGION_OFF);
3617 if (index == src_cnt-1) { 3617 if (index == src_cnt-1) {
3618 ppc440spe_adma_dma2rxor_inc_addr( 3618 ppc440spe_adma_dma2rxor_inc_addr(
3619 desc, cursor, index, src_cnt); 3619 desc, cursor, index, src_cnt);
3620 } 3620 }
3621 } else if (addr == cursor->addrl + 3*cursor->len) { 3621 } else if (addr == cursor->addrl + 3*cursor->len) {
3622 cursor->state = 2; 3622 cursor->state = 2;
3623 cursor->xor_count = 0; 3623 cursor->xor_count = 0;
3624 ppc440spe_rxor_set_region(desc, 3624 ppc440spe_rxor_set_region(desc,
3625 cursor->addr_count, 3625 cursor->addr_count,
3626 DMA_RXOR124 << DMA_CUED_REGION_OFF); 3626 DMA_RXOR124 << DMA_CUED_REGION_OFF);
3627 if (index == src_cnt-1) { 3627 if (index == src_cnt-1) {
3628 ppc440spe_adma_dma2rxor_inc_addr( 3628 ppc440spe_adma_dma2rxor_inc_addr(
3629 desc, cursor, index, src_cnt); 3629 desc, cursor, index, src_cnt);
3630 } 3630 }
3631 } else if (addr == cursor->addrl + 4*cursor->len) { 3631 } else if (addr == cursor->addrl + 4*cursor->len) {
3632 cursor->state = 2; 3632 cursor->state = 2;
3633 cursor->xor_count = 0; 3633 cursor->xor_count = 0;
3634 ppc440spe_rxor_set_region(desc, 3634 ppc440spe_rxor_set_region(desc,
3635 cursor->addr_count, 3635 cursor->addr_count,
3636 DMA_RXOR125 << DMA_CUED_REGION_OFF); 3636 DMA_RXOR125 << DMA_CUED_REGION_OFF);
3637 if (index == src_cnt-1) { 3637 if (index == src_cnt-1) {
3638 ppc440spe_adma_dma2rxor_inc_addr( 3638 ppc440spe_adma_dma2rxor_inc_addr(
3639 desc, cursor, index, src_cnt); 3639 desc, cursor, index, src_cnt);
3640 } 3640 }
3641 } else { 3641 } else {
3642 cursor->state = 0; 3642 cursor->state = 0;
3643 cursor->xor_count = 1; 3643 cursor->xor_count = 1;
3644 cursor->addrl = addr; 3644 cursor->addrl = addr;
3645 ppc440spe_rxor_set_region(desc, 3645 ppc440spe_rxor_set_region(desc,
3646 cursor->addr_count, 3646 cursor->addr_count,
3647 DMA_RXOR12 << DMA_CUED_REGION_OFF); 3647 DMA_RXOR12 << DMA_CUED_REGION_OFF);
3648 ppc440spe_adma_dma2rxor_inc_addr( 3648 ppc440spe_adma_dma2rxor_inc_addr(
3649 desc, cursor, index, src_cnt); 3649 desc, cursor, index, src_cnt);
3650 } 3650 }
3651 break; 3651 break;
3652 case 2: 3652 case 2:
3653 cursor->state = 0; 3653 cursor->state = 0;
3654 cursor->addrl = addr; 3654 cursor->addrl = addr;
3655 cursor->xor_count++; 3655 cursor->xor_count++;
3656 if (index) { 3656 if (index) {
3657 ppc440spe_adma_dma2rxor_inc_addr( 3657 ppc440spe_adma_dma2rxor_inc_addr(
3658 desc, cursor, index, src_cnt); 3658 desc, cursor, index, src_cnt);
3659 } 3659 }
3660 break; 3660 break;
3661 } 3661 }
3662 3662
3663 return rval; 3663 return rval;
3664 } 3664 }
3665 3665
3666 /** 3666 /**
3667 * ppc440spe_adma_dma2rxor_set_src - set RXOR source address; it's assumed that 3667 * ppc440spe_adma_dma2rxor_set_src - set RXOR source address; it's assumed that
3668 * ppc440spe_adma_dma2rxor_prep_src() has already done prior this call 3668 * ppc440spe_adma_dma2rxor_prep_src() has already done prior this call
3669 */ 3669 */
3670 static void ppc440spe_adma_dma2rxor_set_src( 3670 static void ppc440spe_adma_dma2rxor_set_src(
3671 struct ppc440spe_adma_desc_slot *desc, 3671 struct ppc440spe_adma_desc_slot *desc,
3672 int index, dma_addr_t addr) 3672 int index, dma_addr_t addr)
3673 { 3673 {
3674 struct xor_cb *xcb = desc->hw_desc; 3674 struct xor_cb *xcb = desc->hw_desc;
3675 int k = 0, op = 0, lop = 0; 3675 int k = 0, op = 0, lop = 0;
3676 3676
3677 /* get the RXOR operand which corresponds to index addr */ 3677 /* get the RXOR operand which corresponds to index addr */
3678 while (op <= index) { 3678 while (op <= index) {
3679 lop = op; 3679 lop = op;
3680 if (k == XOR_MAX_OPS) { 3680 if (k == XOR_MAX_OPS) {
3681 k = 0; 3681 k = 0;
3682 desc = list_entry(desc->chain_node.next, 3682 desc = list_entry(desc->chain_node.next,
3683 struct ppc440spe_adma_desc_slot, chain_node); 3683 struct ppc440spe_adma_desc_slot, chain_node);
3684 xcb = desc->hw_desc; 3684 xcb = desc->hw_desc;
3685 3685
3686 } 3686 }
3687 if ((xcb->ops[k++].h & (DMA_RXOR12 << DMA_CUED_REGION_OFF)) == 3687 if ((xcb->ops[k++].h & (DMA_RXOR12 << DMA_CUED_REGION_OFF)) ==
3688 (DMA_RXOR12 << DMA_CUED_REGION_OFF)) 3688 (DMA_RXOR12 << DMA_CUED_REGION_OFF))
3689 op += 2; 3689 op += 2;
3690 else 3690 else
3691 op += 3; 3691 op += 3;
3692 } 3692 }
3693 3693
3694 BUG_ON(k < 1); 3694 BUG_ON(k < 1);
3695 3695
3696 if (test_bit(k-1, desc->reverse_flags)) { 3696 if (test_bit(k-1, desc->reverse_flags)) {
3697 /* reverse operand order; put last op in RXOR group */ 3697 /* reverse operand order; put last op in RXOR group */
3698 if (index == op - 1) 3698 if (index == op - 1)
3699 ppc440spe_rxor_set_src(desc, k - 1, addr); 3699 ppc440spe_rxor_set_src(desc, k - 1, addr);
3700 } else { 3700 } else {
3701 /* direct operand order; put first op in RXOR group */ 3701 /* direct operand order; put first op in RXOR group */
3702 if (index == lop) 3702 if (index == lop)
3703 ppc440spe_rxor_set_src(desc, k - 1, addr); 3703 ppc440spe_rxor_set_src(desc, k - 1, addr);
3704 } 3704 }
3705 } 3705 }
3706 3706
3707 /** 3707 /**
3708 * ppc440spe_adma_dma2rxor_set_mult - set RXOR multipliers; it's assumed that 3708 * ppc440spe_adma_dma2rxor_set_mult - set RXOR multipliers; it's assumed that
3709 * ppc440spe_adma_dma2rxor_prep_src() has already done prior this call 3709 * ppc440spe_adma_dma2rxor_prep_src() has already done prior this call
3710 */ 3710 */
3711 static void ppc440spe_adma_dma2rxor_set_mult( 3711 static void ppc440spe_adma_dma2rxor_set_mult(
3712 struct ppc440spe_adma_desc_slot *desc, 3712 struct ppc440spe_adma_desc_slot *desc,
3713 int index, u8 mult) 3713 int index, u8 mult)
3714 { 3714 {
3715 struct xor_cb *xcb = desc->hw_desc; 3715 struct xor_cb *xcb = desc->hw_desc;
3716 int k = 0, op = 0, lop = 0; 3716 int k = 0, op = 0, lop = 0;
3717 3717
3718 /* get the RXOR operand which corresponds to index mult */ 3718 /* get the RXOR operand which corresponds to index mult */
3719 while (op <= index) { 3719 while (op <= index) {
3720 lop = op; 3720 lop = op;
3721 if (k == XOR_MAX_OPS) { 3721 if (k == XOR_MAX_OPS) {
3722 k = 0; 3722 k = 0;
3723 desc = list_entry(desc->chain_node.next, 3723 desc = list_entry(desc->chain_node.next,
3724 struct ppc440spe_adma_desc_slot, 3724 struct ppc440spe_adma_desc_slot,
3725 chain_node); 3725 chain_node);
3726 xcb = desc->hw_desc; 3726 xcb = desc->hw_desc;
3727 3727
3728 } 3728 }
3729 if ((xcb->ops[k++].h & (DMA_RXOR12 << DMA_CUED_REGION_OFF)) == 3729 if ((xcb->ops[k++].h & (DMA_RXOR12 << DMA_CUED_REGION_OFF)) ==
3730 (DMA_RXOR12 << DMA_CUED_REGION_OFF)) 3730 (DMA_RXOR12 << DMA_CUED_REGION_OFF))
3731 op += 2; 3731 op += 2;
3732 else 3732 else
3733 op += 3; 3733 op += 3;
3734 } 3734 }
3735 3735
3736 BUG_ON(k < 1); 3736 BUG_ON(k < 1);
3737 if (test_bit(k-1, desc->reverse_flags)) { 3737 if (test_bit(k-1, desc->reverse_flags)) {
3738 /* reverse order */ 3738 /* reverse order */
3739 ppc440spe_rxor_set_mult(desc, k - 1, op - index - 1, mult); 3739 ppc440spe_rxor_set_mult(desc, k - 1, op - index - 1, mult);
3740 } else { 3740 } else {
3741 /* direct order */ 3741 /* direct order */
3742 ppc440spe_rxor_set_mult(desc, k - 1, index - lop, mult); 3742 ppc440spe_rxor_set_mult(desc, k - 1, index - lop, mult);
3743 } 3743 }
3744 } 3744 }
3745 3745
3746 /** 3746 /**
3747 * ppc440spe_init_rxor_cursor - 3747 * ppc440spe_init_rxor_cursor -
3748 */ 3748 */
3749 static void ppc440spe_init_rxor_cursor(struct ppc440spe_rxor *cursor) 3749 static void ppc440spe_init_rxor_cursor(struct ppc440spe_rxor *cursor)
3750 { 3750 {
3751 memset(cursor, 0, sizeof(struct ppc440spe_rxor)); 3751 memset(cursor, 0, sizeof(struct ppc440spe_rxor));
3752 cursor->state = 2; 3752 cursor->state = 2;
3753 } 3753 }
3754 3754
3755 /** 3755 /**
3756 * ppc440spe_adma_pq_set_src_mult - set multiplication coefficient into 3756 * ppc440spe_adma_pq_set_src_mult - set multiplication coefficient into
3757 * descriptor for the PQXOR operation 3757 * descriptor for the PQXOR operation
3758 */ 3758 */
3759 static void ppc440spe_adma_pq_set_src_mult( 3759 static void ppc440spe_adma_pq_set_src_mult(
3760 struct ppc440spe_adma_desc_slot *sw_desc, 3760 struct ppc440spe_adma_desc_slot *sw_desc,
3761 unsigned char mult, int index, int dst_pos) 3761 unsigned char mult, int index, int dst_pos)
3762 { 3762 {
3763 struct ppc440spe_adma_chan *chan; 3763 struct ppc440spe_adma_chan *chan;
3764 u32 mult_idx, mult_dst; 3764 u32 mult_idx, mult_dst;
3765 struct ppc440spe_adma_desc_slot *iter = NULL, *iter1 = NULL; 3765 struct ppc440spe_adma_desc_slot *iter = NULL, *iter1 = NULL;
3766 3766
3767 chan = to_ppc440spe_adma_chan(sw_desc->async_tx.chan); 3767 chan = to_ppc440spe_adma_chan(sw_desc->async_tx.chan);
3768 3768
3769 switch (chan->device->id) { 3769 switch (chan->device->id) {
3770 case PPC440SPE_DMA0_ID: 3770 case PPC440SPE_DMA0_ID:
3771 case PPC440SPE_DMA1_ID: 3771 case PPC440SPE_DMA1_ID:
3772 if (test_bit(PPC440SPE_DESC_RXOR, &sw_desc->flags)) { 3772 if (test_bit(PPC440SPE_DESC_RXOR, &sw_desc->flags)) {
3773 int region = test_bit(PPC440SPE_DESC_RXOR12, 3773 int region = test_bit(PPC440SPE_DESC_RXOR12,
3774 &sw_desc->flags) ? 2 : 3; 3774 &sw_desc->flags) ? 2 : 3;
3775 3775
3776 if (index < region) { 3776 if (index < region) {
3777 /* RXOR multipliers */ 3777 /* RXOR multipliers */
3778 iter = ppc440spe_get_group_entry(sw_desc, 3778 iter = ppc440spe_get_group_entry(sw_desc,
3779 sw_desc->dst_cnt - 1); 3779 sw_desc->dst_cnt - 1);
3780 if (sw_desc->dst_cnt == 2) 3780 if (sw_desc->dst_cnt == 2)
3781 iter1 = ppc440spe_get_group_entry( 3781 iter1 = ppc440spe_get_group_entry(
3782 sw_desc, 0); 3782 sw_desc, 0);
3783 3783
3784 mult_idx = DMA_CUED_MULT1_OFF + (index << 3); 3784 mult_idx = DMA_CUED_MULT1_OFF + (index << 3);
3785 mult_dst = DMA_CDB_SG_SRC; 3785 mult_dst = DMA_CDB_SG_SRC;
3786 } else { 3786 } else {
3787 /* WXOR multiplier */ 3787 /* WXOR multiplier */
3788 iter = ppc440spe_get_group_entry(sw_desc, 3788 iter = ppc440spe_get_group_entry(sw_desc,
3789 index - region + 3789 index - region +
3790 sw_desc->dst_cnt); 3790 sw_desc->dst_cnt);
3791 mult_idx = DMA_CUED_MULT1_OFF; 3791 mult_idx = DMA_CUED_MULT1_OFF;
3792 mult_dst = dst_pos ? DMA_CDB_SG_DST2 : 3792 mult_dst = dst_pos ? DMA_CDB_SG_DST2 :
3793 DMA_CDB_SG_DST1; 3793 DMA_CDB_SG_DST1;
3794 } 3794 }
3795 } else { 3795 } else {
3796 int znum = 0; 3796 int znum = 0;
3797 3797
3798 /* WXOR-only; 3798 /* WXOR-only;
3799 * skip first slots with destinations (if ZERO_DST has 3799 * skip first slots with destinations (if ZERO_DST has
3800 * place) 3800 * place)
3801 */ 3801 */
3802 if (test_bit(PPC440SPE_ZERO_P, &sw_desc->flags)) 3802 if (test_bit(PPC440SPE_ZERO_P, &sw_desc->flags))
3803 znum++; 3803 znum++;
3804 if (test_bit(PPC440SPE_ZERO_Q, &sw_desc->flags)) 3804 if (test_bit(PPC440SPE_ZERO_Q, &sw_desc->flags))
3805 znum++; 3805 znum++;
3806 3806
3807 iter = ppc440spe_get_group_entry(sw_desc, index + znum); 3807 iter = ppc440spe_get_group_entry(sw_desc, index + znum);
3808 mult_idx = DMA_CUED_MULT1_OFF; 3808 mult_idx = DMA_CUED_MULT1_OFF;
3809 mult_dst = dst_pos ? DMA_CDB_SG_DST2 : DMA_CDB_SG_DST1; 3809 mult_dst = dst_pos ? DMA_CDB_SG_DST2 : DMA_CDB_SG_DST1;
3810 } 3810 }
3811 3811
3812 if (likely(iter)) { 3812 if (likely(iter)) {
3813 ppc440spe_desc_set_src_mult(iter, chan, 3813 ppc440spe_desc_set_src_mult(iter, chan,
3814 mult_idx, mult_dst, mult); 3814 mult_idx, mult_dst, mult);
3815 3815
3816 if (unlikely(iter1)) { 3816 if (unlikely(iter1)) {
3817 /* if we have two destinations for RXOR, then 3817 /* if we have two destinations for RXOR, then
3818 * we've just set Q mult. Set-up P now. 3818 * we've just set Q mult. Set-up P now.
3819 */ 3819 */
3820 ppc440spe_desc_set_src_mult(iter1, chan, 3820 ppc440spe_desc_set_src_mult(iter1, chan,
3821 mult_idx, mult_dst, 1); 3821 mult_idx, mult_dst, 1);
3822 } 3822 }
3823 3823
3824 } 3824 }
3825 break; 3825 break;
3826 3826
3827 case PPC440SPE_XOR_ID: 3827 case PPC440SPE_XOR_ID:
3828 iter = sw_desc->group_head; 3828 iter = sw_desc->group_head;
3829 if (sw_desc->dst_cnt == 2) { 3829 if (sw_desc->dst_cnt == 2) {
3830 /* both P & Q calculations required; set P mult here */ 3830 /* both P & Q calculations required; set P mult here */
3831 ppc440spe_adma_dma2rxor_set_mult(iter, index, 1); 3831 ppc440spe_adma_dma2rxor_set_mult(iter, index, 1);
3832 3832
3833 /* and then set Q mult */ 3833 /* and then set Q mult */
3834 iter = ppc440spe_get_group_entry(sw_desc, 3834 iter = ppc440spe_get_group_entry(sw_desc,
3835 sw_desc->descs_per_op); 3835 sw_desc->descs_per_op);
3836 } 3836 }
3837 ppc440spe_adma_dma2rxor_set_mult(iter, index, mult); 3837 ppc440spe_adma_dma2rxor_set_mult(iter, index, mult);
3838 break; 3838 break;
3839 } 3839 }
3840 } 3840 }
3841 3841
3842 /** 3842 /**
3843 * ppc440spe_adma_free_chan_resources - free the resources allocated 3843 * ppc440spe_adma_free_chan_resources - free the resources allocated
3844 */ 3844 */
3845 static void ppc440spe_adma_free_chan_resources(struct dma_chan *chan) 3845 static void ppc440spe_adma_free_chan_resources(struct dma_chan *chan)
3846 { 3846 {
3847 struct ppc440spe_adma_chan *ppc440spe_chan; 3847 struct ppc440spe_adma_chan *ppc440spe_chan;
3848 struct ppc440spe_adma_desc_slot *iter, *_iter; 3848 struct ppc440spe_adma_desc_slot *iter, *_iter;
3849 int in_use_descs = 0; 3849 int in_use_descs = 0;
3850 3850
3851 ppc440spe_chan = to_ppc440spe_adma_chan(chan); 3851 ppc440spe_chan = to_ppc440spe_adma_chan(chan);
3852 ppc440spe_adma_slot_cleanup(ppc440spe_chan); 3852 ppc440spe_adma_slot_cleanup(ppc440spe_chan);
3853 3853
3854 spin_lock_bh(&ppc440spe_chan->lock); 3854 spin_lock_bh(&ppc440spe_chan->lock);
3855 list_for_each_entry_safe(iter, _iter, &ppc440spe_chan->chain, 3855 list_for_each_entry_safe(iter, _iter, &ppc440spe_chan->chain,
3856 chain_node) { 3856 chain_node) {
3857 in_use_descs++; 3857 in_use_descs++;
3858 list_del(&iter->chain_node); 3858 list_del(&iter->chain_node);
3859 } 3859 }
3860 list_for_each_entry_safe_reverse(iter, _iter, 3860 list_for_each_entry_safe_reverse(iter, _iter,
3861 &ppc440spe_chan->all_slots, slot_node) { 3861 &ppc440spe_chan->all_slots, slot_node) {
3862 list_del(&iter->slot_node); 3862 list_del(&iter->slot_node);
3863 kfree(iter); 3863 kfree(iter);
3864 ppc440spe_chan->slots_allocated--; 3864 ppc440spe_chan->slots_allocated--;
3865 } 3865 }
3866 ppc440spe_chan->last_used = NULL; 3866 ppc440spe_chan->last_used = NULL;
3867 3867
3868 dev_dbg(ppc440spe_chan->device->common.dev, 3868 dev_dbg(ppc440spe_chan->device->common.dev,
3869 "ppc440spe adma%d %s slots_allocated %d\n", 3869 "ppc440spe adma%d %s slots_allocated %d\n",
3870 ppc440spe_chan->device->id, 3870 ppc440spe_chan->device->id,
3871 __func__, ppc440spe_chan->slots_allocated); 3871 __func__, ppc440spe_chan->slots_allocated);
3872 spin_unlock_bh(&ppc440spe_chan->lock); 3872 spin_unlock_bh(&ppc440spe_chan->lock);
3873 3873
3874 /* one is ok since we left it on there on purpose */ 3874 /* one is ok since we left it on there on purpose */
3875 if (in_use_descs > 1) 3875 if (in_use_descs > 1)
3876 printk(KERN_ERR "SPE: Freeing %d in use descriptors!\n", 3876 printk(KERN_ERR "SPE: Freeing %d in use descriptors!\n",
3877 in_use_descs - 1); 3877 in_use_descs - 1);
3878 } 3878 }
3879 3879
3880 /** 3880 /**
3881 * ppc440spe_adma_tx_status - poll the status of an ADMA transaction 3881 * ppc440spe_adma_tx_status - poll the status of an ADMA transaction
3882 * @chan: ADMA channel handle 3882 * @chan: ADMA channel handle
3883 * @cookie: ADMA transaction identifier 3883 * @cookie: ADMA transaction identifier
3884 * @txstate: a holder for the current state of the channel 3884 * @txstate: a holder for the current state of the channel
3885 */ 3885 */
3886 static enum dma_status ppc440spe_adma_tx_status(struct dma_chan *chan, 3886 static enum dma_status ppc440spe_adma_tx_status(struct dma_chan *chan,
3887 dma_cookie_t cookie, struct dma_tx_state *txstate) 3887 dma_cookie_t cookie, struct dma_tx_state *txstate)
3888 { 3888 {
3889 struct ppc440spe_adma_chan *ppc440spe_chan; 3889 struct ppc440spe_adma_chan *ppc440spe_chan;
3890 enum dma_status ret; 3890 enum dma_status ret;
3891 3891
3892 ppc440spe_chan = to_ppc440spe_adma_chan(chan); 3892 ppc440spe_chan = to_ppc440spe_adma_chan(chan);
3893 ret = dma_cookie_status(chan, cookie, txstate); 3893 ret = dma_cookie_status(chan, cookie, txstate);
3894 if (ret == DMA_SUCCESS) 3894 if (ret == DMA_COMPLETE)
3895 return ret; 3895 return ret;
3896 3896
3897 ppc440spe_adma_slot_cleanup(ppc440spe_chan); 3897 ppc440spe_adma_slot_cleanup(ppc440spe_chan);
3898 3898
3899 return dma_cookie_status(chan, cookie, txstate); 3899 return dma_cookie_status(chan, cookie, txstate);
3900 } 3900 }
3901 3901
3902 /** 3902 /**
3903 * ppc440spe_adma_eot_handler - end of transfer interrupt handler 3903 * ppc440spe_adma_eot_handler - end of transfer interrupt handler
3904 */ 3904 */
3905 static irqreturn_t ppc440spe_adma_eot_handler(int irq, void *data) 3905 static irqreturn_t ppc440spe_adma_eot_handler(int irq, void *data)
3906 { 3906 {
3907 struct ppc440spe_adma_chan *chan = data; 3907 struct ppc440spe_adma_chan *chan = data;
3908 3908
3909 dev_dbg(chan->device->common.dev, 3909 dev_dbg(chan->device->common.dev,
3910 "ppc440spe adma%d: %s\n", chan->device->id, __func__); 3910 "ppc440spe adma%d: %s\n", chan->device->id, __func__);
3911 3911
3912 tasklet_schedule(&chan->irq_tasklet); 3912 tasklet_schedule(&chan->irq_tasklet);
3913 ppc440spe_adma_device_clear_eot_status(chan); 3913 ppc440spe_adma_device_clear_eot_status(chan);
3914 3914
3915 return IRQ_HANDLED; 3915 return IRQ_HANDLED;
3916 } 3916 }
3917 3917
3918 /** 3918 /**
3919 * ppc440spe_adma_err_handler - DMA error interrupt handler; 3919 * ppc440spe_adma_err_handler - DMA error interrupt handler;
3920 * do the same things as a eot handler 3920 * do the same things as a eot handler
3921 */ 3921 */
3922 static irqreturn_t ppc440spe_adma_err_handler(int irq, void *data) 3922 static irqreturn_t ppc440spe_adma_err_handler(int irq, void *data)
3923 { 3923 {
3924 struct ppc440spe_adma_chan *chan = data; 3924 struct ppc440spe_adma_chan *chan = data;
3925 3925
3926 dev_dbg(chan->device->common.dev, 3926 dev_dbg(chan->device->common.dev,
3927 "ppc440spe adma%d: %s\n", chan->device->id, __func__); 3927 "ppc440spe adma%d: %s\n", chan->device->id, __func__);
3928 3928
3929 tasklet_schedule(&chan->irq_tasklet); 3929 tasklet_schedule(&chan->irq_tasklet);
3930 ppc440spe_adma_device_clear_eot_status(chan); 3930 ppc440spe_adma_device_clear_eot_status(chan);
3931 3931
3932 return IRQ_HANDLED; 3932 return IRQ_HANDLED;
3933 } 3933 }
3934 3934
3935 /** 3935 /**
3936 * ppc440spe_test_callback - called when test operation has been done 3936 * ppc440spe_test_callback - called when test operation has been done
3937 */ 3937 */
3938 static void ppc440spe_test_callback(void *unused) 3938 static void ppc440spe_test_callback(void *unused)
3939 { 3939 {
3940 complete(&ppc440spe_r6_test_comp); 3940 complete(&ppc440spe_r6_test_comp);
3941 } 3941 }
3942 3942
3943 /** 3943 /**
3944 * ppc440spe_adma_issue_pending - flush all pending descriptors to h/w 3944 * ppc440spe_adma_issue_pending - flush all pending descriptors to h/w
3945 */ 3945 */
3946 static void ppc440spe_adma_issue_pending(struct dma_chan *chan) 3946 static void ppc440spe_adma_issue_pending(struct dma_chan *chan)
3947 { 3947 {
3948 struct ppc440spe_adma_chan *ppc440spe_chan; 3948 struct ppc440spe_adma_chan *ppc440spe_chan;
3949 3949
3950 ppc440spe_chan = to_ppc440spe_adma_chan(chan); 3950 ppc440spe_chan = to_ppc440spe_adma_chan(chan);
3951 dev_dbg(ppc440spe_chan->device->common.dev, 3951 dev_dbg(ppc440spe_chan->device->common.dev,
3952 "ppc440spe adma%d: %s %d \n", ppc440spe_chan->device->id, 3952 "ppc440spe adma%d: %s %d \n", ppc440spe_chan->device->id,
3953 __func__, ppc440spe_chan->pending); 3953 __func__, ppc440spe_chan->pending);
3954 3954
3955 if (ppc440spe_chan->pending) { 3955 if (ppc440spe_chan->pending) {
3956 ppc440spe_chan->pending = 0; 3956 ppc440spe_chan->pending = 0;
3957 ppc440spe_chan_append(ppc440spe_chan); 3957 ppc440spe_chan_append(ppc440spe_chan);
3958 } 3958 }
3959 } 3959 }
3960 3960
3961 /** 3961 /**
3962 * ppc440spe_chan_start_null_xor - initiate the first XOR operation (DMA engines 3962 * ppc440spe_chan_start_null_xor - initiate the first XOR operation (DMA engines
3963 * use FIFOs (as opposite to chains used in XOR) so this is a XOR 3963 * use FIFOs (as opposite to chains used in XOR) so this is a XOR
3964 * specific operation) 3964 * specific operation)
3965 */ 3965 */
3966 static void ppc440spe_chan_start_null_xor(struct ppc440spe_adma_chan *chan) 3966 static void ppc440spe_chan_start_null_xor(struct ppc440spe_adma_chan *chan)
3967 { 3967 {
3968 struct ppc440spe_adma_desc_slot *sw_desc, *group_start; 3968 struct ppc440spe_adma_desc_slot *sw_desc, *group_start;
3969 dma_cookie_t cookie; 3969 dma_cookie_t cookie;
3970 int slot_cnt, slots_per_op; 3970 int slot_cnt, slots_per_op;
3971 3971
3972 dev_dbg(chan->device->common.dev, 3972 dev_dbg(chan->device->common.dev,
3973 "ppc440spe adma%d: %s\n", chan->device->id, __func__); 3973 "ppc440spe adma%d: %s\n", chan->device->id, __func__);
3974 3974
3975 spin_lock_bh(&chan->lock); 3975 spin_lock_bh(&chan->lock);
3976 slot_cnt = ppc440spe_chan_xor_slot_count(0, 2, &slots_per_op); 3976 slot_cnt = ppc440spe_chan_xor_slot_count(0, 2, &slots_per_op);
3977 sw_desc = ppc440spe_adma_alloc_slots(chan, slot_cnt, slots_per_op); 3977 sw_desc = ppc440spe_adma_alloc_slots(chan, slot_cnt, slots_per_op);
3978 if (sw_desc) { 3978 if (sw_desc) {
3979 group_start = sw_desc->group_head; 3979 group_start = sw_desc->group_head;
3980 list_splice_init(&sw_desc->group_list, &chan->chain); 3980 list_splice_init(&sw_desc->group_list, &chan->chain);
3981 async_tx_ack(&sw_desc->async_tx); 3981 async_tx_ack(&sw_desc->async_tx);
3982 ppc440spe_desc_init_null_xor(group_start); 3982 ppc440spe_desc_init_null_xor(group_start);
3983 3983
3984 cookie = dma_cookie_assign(&sw_desc->async_tx); 3984 cookie = dma_cookie_assign(&sw_desc->async_tx);
3985 3985
3986 /* initialize the completed cookie to be less than 3986 /* initialize the completed cookie to be less than
3987 * the most recently used cookie 3987 * the most recently used cookie
3988 */ 3988 */
3989 chan->common.completed_cookie = cookie - 1; 3989 chan->common.completed_cookie = cookie - 1;
3990 3990
3991 /* channel should not be busy */ 3991 /* channel should not be busy */
3992 BUG_ON(ppc440spe_chan_is_busy(chan)); 3992 BUG_ON(ppc440spe_chan_is_busy(chan));
3993 3993
3994 /* set the descriptor address */ 3994 /* set the descriptor address */
3995 ppc440spe_chan_set_first_xor_descriptor(chan, sw_desc); 3995 ppc440spe_chan_set_first_xor_descriptor(chan, sw_desc);
3996 3996
3997 /* run the descriptor */ 3997 /* run the descriptor */
3998 ppc440spe_chan_run(chan); 3998 ppc440spe_chan_run(chan);
3999 } else 3999 } else
4000 printk(KERN_ERR "ppc440spe adma%d" 4000 printk(KERN_ERR "ppc440spe adma%d"
4001 " failed to allocate null descriptor\n", 4001 " failed to allocate null descriptor\n",
4002 chan->device->id); 4002 chan->device->id);
4003 spin_unlock_bh(&chan->lock); 4003 spin_unlock_bh(&chan->lock);
4004 } 4004 }
4005 4005
4006 /** 4006 /**
4007 * ppc440spe_test_raid6 - test are RAID-6 capabilities enabled successfully. 4007 * ppc440spe_test_raid6 - test are RAID-6 capabilities enabled successfully.
4008 * For this we just perform one WXOR operation with the same source 4008 * For this we just perform one WXOR operation with the same source
4009 * and destination addresses, the GF-multiplier is 1; so if RAID-6 4009 * and destination addresses, the GF-multiplier is 1; so if RAID-6
4010 * capabilities are enabled then we'll get src/dst filled with zero. 4010 * capabilities are enabled then we'll get src/dst filled with zero.
4011 */ 4011 */
4012 static int ppc440spe_test_raid6(struct ppc440spe_adma_chan *chan) 4012 static int ppc440spe_test_raid6(struct ppc440spe_adma_chan *chan)
4013 { 4013 {
4014 struct ppc440spe_adma_desc_slot *sw_desc, *iter; 4014 struct ppc440spe_adma_desc_slot *sw_desc, *iter;
4015 struct page *pg; 4015 struct page *pg;
4016 char *a; 4016 char *a;
4017 dma_addr_t dma_addr, addrs[2]; 4017 dma_addr_t dma_addr, addrs[2];
4018 unsigned long op = 0; 4018 unsigned long op = 0;
4019 int rval = 0; 4019 int rval = 0;
4020 4020
4021 set_bit(PPC440SPE_DESC_WXOR, &op); 4021 set_bit(PPC440SPE_DESC_WXOR, &op);
4022 4022
4023 pg = alloc_page(GFP_KERNEL); 4023 pg = alloc_page(GFP_KERNEL);
4024 if (!pg) 4024 if (!pg)
4025 return -ENOMEM; 4025 return -ENOMEM;
4026 4026
4027 spin_lock_bh(&chan->lock); 4027 spin_lock_bh(&chan->lock);
4028 sw_desc = ppc440spe_adma_alloc_slots(chan, 1, 1); 4028 sw_desc = ppc440spe_adma_alloc_slots(chan, 1, 1);
4029 if (sw_desc) { 4029 if (sw_desc) {
4030 /* 1 src, 1 dsr, int_ena, WXOR */ 4030 /* 1 src, 1 dsr, int_ena, WXOR */
4031 ppc440spe_desc_init_dma01pq(sw_desc, 1, 1, 1, op); 4031 ppc440spe_desc_init_dma01pq(sw_desc, 1, 1, 1, op);
4032 list_for_each_entry(iter, &sw_desc->group_list, chain_node) { 4032 list_for_each_entry(iter, &sw_desc->group_list, chain_node) {
4033 ppc440spe_desc_set_byte_count(iter, chan, PAGE_SIZE); 4033 ppc440spe_desc_set_byte_count(iter, chan, PAGE_SIZE);
4034 iter->unmap_len = PAGE_SIZE; 4034 iter->unmap_len = PAGE_SIZE;
4035 } 4035 }
4036 } else { 4036 } else {
4037 rval = -EFAULT; 4037 rval = -EFAULT;
4038 spin_unlock_bh(&chan->lock); 4038 spin_unlock_bh(&chan->lock);
4039 goto exit; 4039 goto exit;
4040 } 4040 }
4041 spin_unlock_bh(&chan->lock); 4041 spin_unlock_bh(&chan->lock);
4042 4042
4043 /* Fill the test page with ones */ 4043 /* Fill the test page with ones */
4044 memset(page_address(pg), 0xFF, PAGE_SIZE); 4044 memset(page_address(pg), 0xFF, PAGE_SIZE);
4045 dma_addr = dma_map_page(chan->device->dev, pg, 0, 4045 dma_addr = dma_map_page(chan->device->dev, pg, 0,
4046 PAGE_SIZE, DMA_BIDIRECTIONAL); 4046 PAGE_SIZE, DMA_BIDIRECTIONAL);
4047 4047
4048 /* Setup addresses */ 4048 /* Setup addresses */
4049 ppc440spe_adma_pq_set_src(sw_desc, dma_addr, 0); 4049 ppc440spe_adma_pq_set_src(sw_desc, dma_addr, 0);
4050 ppc440spe_adma_pq_set_src_mult(sw_desc, 1, 0, 0); 4050 ppc440spe_adma_pq_set_src_mult(sw_desc, 1, 0, 0);
4051 addrs[0] = dma_addr; 4051 addrs[0] = dma_addr;
4052 addrs[1] = 0; 4052 addrs[1] = 0;
4053 ppc440spe_adma_pq_set_dest(sw_desc, addrs, DMA_PREP_PQ_DISABLE_Q); 4053 ppc440spe_adma_pq_set_dest(sw_desc, addrs, DMA_PREP_PQ_DISABLE_Q);
4054 4054
4055 async_tx_ack(&sw_desc->async_tx); 4055 async_tx_ack(&sw_desc->async_tx);
4056 sw_desc->async_tx.callback = ppc440spe_test_callback; 4056 sw_desc->async_tx.callback = ppc440spe_test_callback;
4057 sw_desc->async_tx.callback_param = NULL; 4057 sw_desc->async_tx.callback_param = NULL;
4058 4058
4059 init_completion(&ppc440spe_r6_test_comp); 4059 init_completion(&ppc440spe_r6_test_comp);
4060 4060
4061 ppc440spe_adma_tx_submit(&sw_desc->async_tx); 4061 ppc440spe_adma_tx_submit(&sw_desc->async_tx);
4062 ppc440spe_adma_issue_pending(&chan->common); 4062 ppc440spe_adma_issue_pending(&chan->common);
4063 4063
4064 wait_for_completion(&ppc440spe_r6_test_comp); 4064 wait_for_completion(&ppc440spe_r6_test_comp);
4065 4065
4066 /* Now check if the test page is zeroed */ 4066 /* Now check if the test page is zeroed */
4067 a = page_address(pg); 4067 a = page_address(pg);
4068 if ((*(u32 *)a) == 0 && memcmp(a, a+4, PAGE_SIZE-4) == 0) { 4068 if ((*(u32 *)a) == 0 && memcmp(a, a+4, PAGE_SIZE-4) == 0) {
4069 /* page is zero - RAID-6 enabled */ 4069 /* page is zero - RAID-6 enabled */
4070 rval = 0; 4070 rval = 0;
4071 } else { 4071 } else {
4072 /* RAID-6 was not enabled */ 4072 /* RAID-6 was not enabled */
4073 rval = -EINVAL; 4073 rval = -EINVAL;
4074 } 4074 }
4075 exit: 4075 exit:
4076 __free_page(pg); 4076 __free_page(pg);
4077 return rval; 4077 return rval;
4078 } 4078 }
4079 4079
4080 static void ppc440spe_adma_init_capabilities(struct ppc440spe_adma_device *adev) 4080 static void ppc440spe_adma_init_capabilities(struct ppc440spe_adma_device *adev)
4081 { 4081 {
4082 switch (adev->id) { 4082 switch (adev->id) {
4083 case PPC440SPE_DMA0_ID: 4083 case PPC440SPE_DMA0_ID:
4084 case PPC440SPE_DMA1_ID: 4084 case PPC440SPE_DMA1_ID:
4085 dma_cap_set(DMA_MEMCPY, adev->common.cap_mask); 4085 dma_cap_set(DMA_MEMCPY, adev->common.cap_mask);
4086 dma_cap_set(DMA_INTERRUPT, adev->common.cap_mask); 4086 dma_cap_set(DMA_INTERRUPT, adev->common.cap_mask);
4087 dma_cap_set(DMA_PQ, adev->common.cap_mask); 4087 dma_cap_set(DMA_PQ, adev->common.cap_mask);
4088 dma_cap_set(DMA_PQ_VAL, adev->common.cap_mask); 4088 dma_cap_set(DMA_PQ_VAL, adev->common.cap_mask);
4089 dma_cap_set(DMA_XOR_VAL, adev->common.cap_mask); 4089 dma_cap_set(DMA_XOR_VAL, adev->common.cap_mask);
4090 break; 4090 break;
4091 case PPC440SPE_XOR_ID: 4091 case PPC440SPE_XOR_ID:
4092 dma_cap_set(DMA_XOR, adev->common.cap_mask); 4092 dma_cap_set(DMA_XOR, adev->common.cap_mask);
4093 dma_cap_set(DMA_PQ, adev->common.cap_mask); 4093 dma_cap_set(DMA_PQ, adev->common.cap_mask);
4094 dma_cap_set(DMA_INTERRUPT, adev->common.cap_mask); 4094 dma_cap_set(DMA_INTERRUPT, adev->common.cap_mask);
4095 adev->common.cap_mask = adev->common.cap_mask; 4095 adev->common.cap_mask = adev->common.cap_mask;
4096 break; 4096 break;
4097 } 4097 }
4098 4098
4099 /* Set base routines */ 4099 /* Set base routines */
4100 adev->common.device_alloc_chan_resources = 4100 adev->common.device_alloc_chan_resources =
4101 ppc440spe_adma_alloc_chan_resources; 4101 ppc440spe_adma_alloc_chan_resources;
4102 adev->common.device_free_chan_resources = 4102 adev->common.device_free_chan_resources =
4103 ppc440spe_adma_free_chan_resources; 4103 ppc440spe_adma_free_chan_resources;
4104 adev->common.device_tx_status = ppc440spe_adma_tx_status; 4104 adev->common.device_tx_status = ppc440spe_adma_tx_status;
4105 adev->common.device_issue_pending = ppc440spe_adma_issue_pending; 4105 adev->common.device_issue_pending = ppc440spe_adma_issue_pending;
4106 4106
4107 /* Set prep routines based on capability */ 4107 /* Set prep routines based on capability */
4108 if (dma_has_cap(DMA_MEMCPY, adev->common.cap_mask)) { 4108 if (dma_has_cap(DMA_MEMCPY, adev->common.cap_mask)) {
4109 adev->common.device_prep_dma_memcpy = 4109 adev->common.device_prep_dma_memcpy =
4110 ppc440spe_adma_prep_dma_memcpy; 4110 ppc440spe_adma_prep_dma_memcpy;
4111 } 4111 }
4112 if (dma_has_cap(DMA_XOR, adev->common.cap_mask)) { 4112 if (dma_has_cap(DMA_XOR, adev->common.cap_mask)) {
4113 adev->common.max_xor = XOR_MAX_OPS; 4113 adev->common.max_xor = XOR_MAX_OPS;
4114 adev->common.device_prep_dma_xor = 4114 adev->common.device_prep_dma_xor =
4115 ppc440spe_adma_prep_dma_xor; 4115 ppc440spe_adma_prep_dma_xor;
4116 } 4116 }
4117 if (dma_has_cap(DMA_PQ, adev->common.cap_mask)) { 4117 if (dma_has_cap(DMA_PQ, adev->common.cap_mask)) {
4118 switch (adev->id) { 4118 switch (adev->id) {
4119 case PPC440SPE_DMA0_ID: 4119 case PPC440SPE_DMA0_ID:
4120 dma_set_maxpq(&adev->common, 4120 dma_set_maxpq(&adev->common,
4121 DMA0_FIFO_SIZE / sizeof(struct dma_cdb), 0); 4121 DMA0_FIFO_SIZE / sizeof(struct dma_cdb), 0);
4122 break; 4122 break;
4123 case PPC440SPE_DMA1_ID: 4123 case PPC440SPE_DMA1_ID:
4124 dma_set_maxpq(&adev->common, 4124 dma_set_maxpq(&adev->common,
4125 DMA1_FIFO_SIZE / sizeof(struct dma_cdb), 0); 4125 DMA1_FIFO_SIZE / sizeof(struct dma_cdb), 0);
4126 break; 4126 break;
4127 case PPC440SPE_XOR_ID: 4127 case PPC440SPE_XOR_ID:
4128 adev->common.max_pq = XOR_MAX_OPS * 3; 4128 adev->common.max_pq = XOR_MAX_OPS * 3;
4129 break; 4129 break;
4130 } 4130 }
4131 adev->common.device_prep_dma_pq = 4131 adev->common.device_prep_dma_pq =
4132 ppc440spe_adma_prep_dma_pq; 4132 ppc440spe_adma_prep_dma_pq;
4133 } 4133 }
4134 if (dma_has_cap(DMA_PQ_VAL, adev->common.cap_mask)) { 4134 if (dma_has_cap(DMA_PQ_VAL, adev->common.cap_mask)) {
4135 switch (adev->id) { 4135 switch (adev->id) {
4136 case PPC440SPE_DMA0_ID: 4136 case PPC440SPE_DMA0_ID:
4137 adev->common.max_pq = DMA0_FIFO_SIZE / 4137 adev->common.max_pq = DMA0_FIFO_SIZE /
4138 sizeof(struct dma_cdb); 4138 sizeof(struct dma_cdb);
4139 break; 4139 break;
4140 case PPC440SPE_DMA1_ID: 4140 case PPC440SPE_DMA1_ID:
4141 adev->common.max_pq = DMA1_FIFO_SIZE / 4141 adev->common.max_pq = DMA1_FIFO_SIZE /
4142 sizeof(struct dma_cdb); 4142 sizeof(struct dma_cdb);
4143 break; 4143 break;
4144 } 4144 }
4145 adev->common.device_prep_dma_pq_val = 4145 adev->common.device_prep_dma_pq_val =
4146 ppc440spe_adma_prep_dma_pqzero_sum; 4146 ppc440spe_adma_prep_dma_pqzero_sum;
4147 } 4147 }
4148 if (dma_has_cap(DMA_XOR_VAL, adev->common.cap_mask)) { 4148 if (dma_has_cap(DMA_XOR_VAL, adev->common.cap_mask)) {
4149 switch (adev->id) { 4149 switch (adev->id) {
4150 case PPC440SPE_DMA0_ID: 4150 case PPC440SPE_DMA0_ID:
4151 adev->common.max_xor = DMA0_FIFO_SIZE / 4151 adev->common.max_xor = DMA0_FIFO_SIZE /
4152 sizeof(struct dma_cdb); 4152 sizeof(struct dma_cdb);
4153 break; 4153 break;
4154 case PPC440SPE_DMA1_ID: 4154 case PPC440SPE_DMA1_ID:
4155 adev->common.max_xor = DMA1_FIFO_SIZE / 4155 adev->common.max_xor = DMA1_FIFO_SIZE /
4156 sizeof(struct dma_cdb); 4156 sizeof(struct dma_cdb);
4157 break; 4157 break;
4158 } 4158 }
4159 adev->common.device_prep_dma_xor_val = 4159 adev->common.device_prep_dma_xor_val =
4160 ppc440spe_adma_prep_dma_xor_zero_sum; 4160 ppc440spe_adma_prep_dma_xor_zero_sum;
4161 } 4161 }
4162 if (dma_has_cap(DMA_INTERRUPT, adev->common.cap_mask)) { 4162 if (dma_has_cap(DMA_INTERRUPT, adev->common.cap_mask)) {
4163 adev->common.device_prep_dma_interrupt = 4163 adev->common.device_prep_dma_interrupt =
4164 ppc440spe_adma_prep_dma_interrupt; 4164 ppc440spe_adma_prep_dma_interrupt;
4165 } 4165 }
4166 pr_info("%s: AMCC(R) PPC440SP(E) ADMA Engine: " 4166 pr_info("%s: AMCC(R) PPC440SP(E) ADMA Engine: "
4167 "( %s%s%s%s%s%s%s)\n", 4167 "( %s%s%s%s%s%s%s)\n",
4168 dev_name(adev->dev), 4168 dev_name(adev->dev),
4169 dma_has_cap(DMA_PQ, adev->common.cap_mask) ? "pq " : "", 4169 dma_has_cap(DMA_PQ, adev->common.cap_mask) ? "pq " : "",
4170 dma_has_cap(DMA_PQ_VAL, adev->common.cap_mask) ? "pq_val " : "", 4170 dma_has_cap(DMA_PQ_VAL, adev->common.cap_mask) ? "pq_val " : "",
4171 dma_has_cap(DMA_XOR, adev->common.cap_mask) ? "xor " : "", 4171 dma_has_cap(DMA_XOR, adev->common.cap_mask) ? "xor " : "",
4172 dma_has_cap(DMA_XOR_VAL, adev->common.cap_mask) ? "xor_val " : "", 4172 dma_has_cap(DMA_XOR_VAL, adev->common.cap_mask) ? "xor_val " : "",
4173 dma_has_cap(DMA_MEMCPY, adev->common.cap_mask) ? "memcpy " : "", 4173 dma_has_cap(DMA_MEMCPY, adev->common.cap_mask) ? "memcpy " : "",
4174 dma_has_cap(DMA_INTERRUPT, adev->common.cap_mask) ? "intr " : ""); 4174 dma_has_cap(DMA_INTERRUPT, adev->common.cap_mask) ? "intr " : "");
4175 } 4175 }
4176 4176
4177 static int ppc440spe_adma_setup_irqs(struct ppc440spe_adma_device *adev, 4177 static int ppc440spe_adma_setup_irqs(struct ppc440spe_adma_device *adev,
4178 struct ppc440spe_adma_chan *chan, 4178 struct ppc440spe_adma_chan *chan,
4179 int *initcode) 4179 int *initcode)
4180 { 4180 {
4181 struct platform_device *ofdev; 4181 struct platform_device *ofdev;
4182 struct device_node *np; 4182 struct device_node *np;
4183 int ret; 4183 int ret;
4184 4184
4185 ofdev = container_of(adev->dev, struct platform_device, dev); 4185 ofdev = container_of(adev->dev, struct platform_device, dev);
4186 np = ofdev->dev.of_node; 4186 np = ofdev->dev.of_node;
4187 if (adev->id != PPC440SPE_XOR_ID) { 4187 if (adev->id != PPC440SPE_XOR_ID) {
4188 adev->err_irq = irq_of_parse_and_map(np, 1); 4188 adev->err_irq = irq_of_parse_and_map(np, 1);
4189 if (adev->err_irq == NO_IRQ) { 4189 if (adev->err_irq == NO_IRQ) {
4190 dev_warn(adev->dev, "no err irq resource?\n"); 4190 dev_warn(adev->dev, "no err irq resource?\n");
4191 *initcode = PPC_ADMA_INIT_IRQ2; 4191 *initcode = PPC_ADMA_INIT_IRQ2;
4192 adev->err_irq = -ENXIO; 4192 adev->err_irq = -ENXIO;
4193 } else 4193 } else
4194 atomic_inc(&ppc440spe_adma_err_irq_ref); 4194 atomic_inc(&ppc440spe_adma_err_irq_ref);
4195 } else { 4195 } else {
4196 adev->err_irq = -ENXIO; 4196 adev->err_irq = -ENXIO;
4197 } 4197 }
4198 4198
4199 adev->irq = irq_of_parse_and_map(np, 0); 4199 adev->irq = irq_of_parse_and_map(np, 0);
4200 if (adev->irq == NO_IRQ) { 4200 if (adev->irq == NO_IRQ) {
4201 dev_err(adev->dev, "no irq resource\n"); 4201 dev_err(adev->dev, "no irq resource\n");
4202 *initcode = PPC_ADMA_INIT_IRQ1; 4202 *initcode = PPC_ADMA_INIT_IRQ1;
4203 ret = -ENXIO; 4203 ret = -ENXIO;
4204 goto err_irq_map; 4204 goto err_irq_map;
4205 } 4205 }
4206 dev_dbg(adev->dev, "irq %d, err irq %d\n", 4206 dev_dbg(adev->dev, "irq %d, err irq %d\n",
4207 adev->irq, adev->err_irq); 4207 adev->irq, adev->err_irq);
4208 4208
4209 ret = request_irq(adev->irq, ppc440spe_adma_eot_handler, 4209 ret = request_irq(adev->irq, ppc440spe_adma_eot_handler,
4210 0, dev_driver_string(adev->dev), chan); 4210 0, dev_driver_string(adev->dev), chan);
4211 if (ret) { 4211 if (ret) {
4212 dev_err(adev->dev, "can't request irq %d\n", 4212 dev_err(adev->dev, "can't request irq %d\n",
4213 adev->irq); 4213 adev->irq);
4214 *initcode = PPC_ADMA_INIT_IRQ1; 4214 *initcode = PPC_ADMA_INIT_IRQ1;
4215 ret = -EIO; 4215 ret = -EIO;
4216 goto err_req1; 4216 goto err_req1;
4217 } 4217 }
4218 4218
4219 /* only DMA engines have a separate error IRQ 4219 /* only DMA engines have a separate error IRQ
4220 * so it's Ok if err_irq < 0 in XOR engine case. 4220 * so it's Ok if err_irq < 0 in XOR engine case.
4221 */ 4221 */
4222 if (adev->err_irq > 0) { 4222 if (adev->err_irq > 0) {
4223 /* both DMA engines share common error IRQ */ 4223 /* both DMA engines share common error IRQ */
4224 ret = request_irq(adev->err_irq, 4224 ret = request_irq(adev->err_irq,
4225 ppc440spe_adma_err_handler, 4225 ppc440spe_adma_err_handler,
4226 IRQF_SHARED, 4226 IRQF_SHARED,
4227 dev_driver_string(adev->dev), 4227 dev_driver_string(adev->dev),
4228 chan); 4228 chan);
4229 if (ret) { 4229 if (ret) {
4230 dev_err(adev->dev, "can't request irq %d\n", 4230 dev_err(adev->dev, "can't request irq %d\n",
4231 adev->err_irq); 4231 adev->err_irq);
4232 *initcode = PPC_ADMA_INIT_IRQ2; 4232 *initcode = PPC_ADMA_INIT_IRQ2;
4233 ret = -EIO; 4233 ret = -EIO;
4234 goto err_req2; 4234 goto err_req2;
4235 } 4235 }
4236 } 4236 }
4237 4237
4238 if (adev->id == PPC440SPE_XOR_ID) { 4238 if (adev->id == PPC440SPE_XOR_ID) {
4239 /* enable XOR engine interrupts */ 4239 /* enable XOR engine interrupts */
4240 iowrite32be(XOR_IE_CBCIE_BIT | XOR_IE_ICBIE_BIT | 4240 iowrite32be(XOR_IE_CBCIE_BIT | XOR_IE_ICBIE_BIT |
4241 XOR_IE_ICIE_BIT | XOR_IE_RPTIE_BIT, 4241 XOR_IE_ICIE_BIT | XOR_IE_RPTIE_BIT,
4242 &adev->xor_reg->ier); 4242 &adev->xor_reg->ier);
4243 } else { 4243 } else {
4244 u32 mask, enable; 4244 u32 mask, enable;
4245 4245
4246 np = of_find_compatible_node(NULL, NULL, "ibm,i2o-440spe"); 4246 np = of_find_compatible_node(NULL, NULL, "ibm,i2o-440spe");
4247 if (!np) { 4247 if (!np) {
4248 pr_err("%s: can't find I2O device tree node\n", 4248 pr_err("%s: can't find I2O device tree node\n",
4249 __func__); 4249 __func__);
4250 ret = -ENODEV; 4250 ret = -ENODEV;
4251 goto err_req2; 4251 goto err_req2;
4252 } 4252 }
4253 adev->i2o_reg = of_iomap(np, 0); 4253 adev->i2o_reg = of_iomap(np, 0);
4254 if (!adev->i2o_reg) { 4254 if (!adev->i2o_reg) {
4255 pr_err("%s: failed to map I2O registers\n", __func__); 4255 pr_err("%s: failed to map I2O registers\n", __func__);
4256 of_node_put(np); 4256 of_node_put(np);
4257 ret = -EINVAL; 4257 ret = -EINVAL;
4258 goto err_req2; 4258 goto err_req2;
4259 } 4259 }
4260 of_node_put(np); 4260 of_node_put(np);
4261 /* Unmask 'CS FIFO Attention' interrupts and 4261 /* Unmask 'CS FIFO Attention' interrupts and
4262 * enable generating interrupts on errors 4262 * enable generating interrupts on errors
4263 */ 4263 */
4264 enable = (adev->id == PPC440SPE_DMA0_ID) ? 4264 enable = (adev->id == PPC440SPE_DMA0_ID) ?
4265 ~(I2O_IOPIM_P0SNE | I2O_IOPIM_P0EM) : 4265 ~(I2O_IOPIM_P0SNE | I2O_IOPIM_P0EM) :
4266 ~(I2O_IOPIM_P1SNE | I2O_IOPIM_P1EM); 4266 ~(I2O_IOPIM_P1SNE | I2O_IOPIM_P1EM);
4267 mask = ioread32(&adev->i2o_reg->iopim) & enable; 4267 mask = ioread32(&adev->i2o_reg->iopim) & enable;
4268 iowrite32(mask, &adev->i2o_reg->iopim); 4268 iowrite32(mask, &adev->i2o_reg->iopim);
4269 } 4269 }
4270 return 0; 4270 return 0;
4271 4271
4272 err_req2: 4272 err_req2:
4273 free_irq(adev->irq, chan); 4273 free_irq(adev->irq, chan);
4274 err_req1: 4274 err_req1:
4275 irq_dispose_mapping(adev->irq); 4275 irq_dispose_mapping(adev->irq);
4276 err_irq_map: 4276 err_irq_map:
4277 if (adev->err_irq > 0) { 4277 if (adev->err_irq > 0) {
4278 if (atomic_dec_and_test(&ppc440spe_adma_err_irq_ref)) 4278 if (atomic_dec_and_test(&ppc440spe_adma_err_irq_ref))
4279 irq_dispose_mapping(adev->err_irq); 4279 irq_dispose_mapping(adev->err_irq);
4280 } 4280 }
4281 return ret; 4281 return ret;
4282 } 4282 }
4283 4283
4284 static void ppc440spe_adma_release_irqs(struct ppc440spe_adma_device *adev, 4284 static void ppc440spe_adma_release_irqs(struct ppc440spe_adma_device *adev,
4285 struct ppc440spe_adma_chan *chan) 4285 struct ppc440spe_adma_chan *chan)
4286 { 4286 {
4287 u32 mask, disable; 4287 u32 mask, disable;
4288 4288
4289 if (adev->id == PPC440SPE_XOR_ID) { 4289 if (adev->id == PPC440SPE_XOR_ID) {
4290 /* disable XOR engine interrupts */ 4290 /* disable XOR engine interrupts */
4291 mask = ioread32be(&adev->xor_reg->ier); 4291 mask = ioread32be(&adev->xor_reg->ier);
4292 mask &= ~(XOR_IE_CBCIE_BIT | XOR_IE_ICBIE_BIT | 4292 mask &= ~(XOR_IE_CBCIE_BIT | XOR_IE_ICBIE_BIT |
4293 XOR_IE_ICIE_BIT | XOR_IE_RPTIE_BIT); 4293 XOR_IE_ICIE_BIT | XOR_IE_RPTIE_BIT);
4294 iowrite32be(mask, &adev->xor_reg->ier); 4294 iowrite32be(mask, &adev->xor_reg->ier);
4295 } else { 4295 } else {
4296 /* disable DMAx engine interrupts */ 4296 /* disable DMAx engine interrupts */
4297 disable = (adev->id == PPC440SPE_DMA0_ID) ? 4297 disable = (adev->id == PPC440SPE_DMA0_ID) ?
4298 (I2O_IOPIM_P0SNE | I2O_IOPIM_P0EM) : 4298 (I2O_IOPIM_P0SNE | I2O_IOPIM_P0EM) :
4299 (I2O_IOPIM_P1SNE | I2O_IOPIM_P1EM); 4299 (I2O_IOPIM_P1SNE | I2O_IOPIM_P1EM);
4300 mask = ioread32(&adev->i2o_reg->iopim) | disable; 4300 mask = ioread32(&adev->i2o_reg->iopim) | disable;
4301 iowrite32(mask, &adev->i2o_reg->iopim); 4301 iowrite32(mask, &adev->i2o_reg->iopim);
4302 } 4302 }
4303 free_irq(adev->irq, chan); 4303 free_irq(adev->irq, chan);
4304 irq_dispose_mapping(adev->irq); 4304 irq_dispose_mapping(adev->irq);
4305 if (adev->err_irq > 0) { 4305 if (adev->err_irq > 0) {
4306 free_irq(adev->err_irq, chan); 4306 free_irq(adev->err_irq, chan);
4307 if (atomic_dec_and_test(&ppc440spe_adma_err_irq_ref)) { 4307 if (atomic_dec_and_test(&ppc440spe_adma_err_irq_ref)) {
4308 irq_dispose_mapping(adev->err_irq); 4308 irq_dispose_mapping(adev->err_irq);
4309 iounmap(adev->i2o_reg); 4309 iounmap(adev->i2o_reg);
4310 } 4310 }
4311 } 4311 }
4312 } 4312 }
4313 4313
4314 /** 4314 /**
4315 * ppc440spe_adma_probe - probe the asynch device 4315 * ppc440spe_adma_probe - probe the asynch device
4316 */ 4316 */
4317 static int ppc440spe_adma_probe(struct platform_device *ofdev) 4317 static int ppc440spe_adma_probe(struct platform_device *ofdev)
4318 { 4318 {
4319 struct device_node *np = ofdev->dev.of_node; 4319 struct device_node *np = ofdev->dev.of_node;
4320 struct resource res; 4320 struct resource res;
4321 struct ppc440spe_adma_device *adev; 4321 struct ppc440spe_adma_device *adev;
4322 struct ppc440spe_adma_chan *chan; 4322 struct ppc440spe_adma_chan *chan;
4323 struct ppc_dma_chan_ref *ref, *_ref; 4323 struct ppc_dma_chan_ref *ref, *_ref;
4324 int ret = 0, initcode = PPC_ADMA_INIT_OK; 4324 int ret = 0, initcode = PPC_ADMA_INIT_OK;
4325 const u32 *idx; 4325 const u32 *idx;
4326 int len; 4326 int len;
4327 void *regs; 4327 void *regs;
4328 u32 id, pool_size; 4328 u32 id, pool_size;
4329 4329
4330 if (of_device_is_compatible(np, "amcc,xor-accelerator")) { 4330 if (of_device_is_compatible(np, "amcc,xor-accelerator")) {
4331 id = PPC440SPE_XOR_ID; 4331 id = PPC440SPE_XOR_ID;
4332 /* As far as the XOR engine is concerned, it does not 4332 /* As far as the XOR engine is concerned, it does not
4333 * use FIFOs but uses linked list. So there is no dependency 4333 * use FIFOs but uses linked list. So there is no dependency
4334 * between pool size to allocate and the engine configuration. 4334 * between pool size to allocate and the engine configuration.
4335 */ 4335 */
4336 pool_size = PAGE_SIZE << 1; 4336 pool_size = PAGE_SIZE << 1;
4337 } else { 4337 } else {
4338 /* it is DMA0 or DMA1 */ 4338 /* it is DMA0 or DMA1 */
4339 idx = of_get_property(np, "cell-index", &len); 4339 idx = of_get_property(np, "cell-index", &len);
4340 if (!idx || (len != sizeof(u32))) { 4340 if (!idx || (len != sizeof(u32))) {
4341 dev_err(&ofdev->dev, "Device node %s has missing " 4341 dev_err(&ofdev->dev, "Device node %s has missing "
4342 "or invalid cell-index property\n", 4342 "or invalid cell-index property\n",
4343 np->full_name); 4343 np->full_name);
4344 return -EINVAL; 4344 return -EINVAL;
4345 } 4345 }
4346 id = *idx; 4346 id = *idx;
4347 /* DMA0,1 engines use FIFO to maintain CDBs, so we 4347 /* DMA0,1 engines use FIFO to maintain CDBs, so we
4348 * should allocate the pool accordingly to size of this 4348 * should allocate the pool accordingly to size of this
4349 * FIFO. Thus, the pool size depends on the FIFO depth: 4349 * FIFO. Thus, the pool size depends on the FIFO depth:
4350 * how much CDBs pointers the FIFO may contain then so 4350 * how much CDBs pointers the FIFO may contain then so
4351 * much CDBs we should provide in the pool. 4351 * much CDBs we should provide in the pool.
4352 * That is 4352 * That is
4353 * CDB size = 32B; 4353 * CDB size = 32B;
4354 * CDBs number = (DMA0_FIFO_SIZE >> 3); 4354 * CDBs number = (DMA0_FIFO_SIZE >> 3);
4355 * Pool size = CDBs number * CDB size = 4355 * Pool size = CDBs number * CDB size =
4356 * = (DMA0_FIFO_SIZE >> 3) << 5 = DMA0_FIFO_SIZE << 2. 4356 * = (DMA0_FIFO_SIZE >> 3) << 5 = DMA0_FIFO_SIZE << 2.
4357 */ 4357 */
4358 pool_size = (id == PPC440SPE_DMA0_ID) ? 4358 pool_size = (id == PPC440SPE_DMA0_ID) ?
4359 DMA0_FIFO_SIZE : DMA1_FIFO_SIZE; 4359 DMA0_FIFO_SIZE : DMA1_FIFO_SIZE;
4360 pool_size <<= 2; 4360 pool_size <<= 2;
4361 } 4361 }
4362 4362
4363 if (of_address_to_resource(np, 0, &res)) { 4363 if (of_address_to_resource(np, 0, &res)) {
4364 dev_err(&ofdev->dev, "failed to get memory resource\n"); 4364 dev_err(&ofdev->dev, "failed to get memory resource\n");
4365 initcode = PPC_ADMA_INIT_MEMRES; 4365 initcode = PPC_ADMA_INIT_MEMRES;
4366 ret = -ENODEV; 4366 ret = -ENODEV;
4367 goto out; 4367 goto out;
4368 } 4368 }
4369 4369
4370 if (!request_mem_region(res.start, resource_size(&res), 4370 if (!request_mem_region(res.start, resource_size(&res),
4371 dev_driver_string(&ofdev->dev))) { 4371 dev_driver_string(&ofdev->dev))) {
4372 dev_err(&ofdev->dev, "failed to request memory region %pR\n", 4372 dev_err(&ofdev->dev, "failed to request memory region %pR\n",
4373 &res); 4373 &res);
4374 initcode = PPC_ADMA_INIT_MEMREG; 4374 initcode = PPC_ADMA_INIT_MEMREG;
4375 ret = -EBUSY; 4375 ret = -EBUSY;
4376 goto out; 4376 goto out;
4377 } 4377 }
4378 4378
4379 /* create a device */ 4379 /* create a device */
4380 adev = kzalloc(sizeof(*adev), GFP_KERNEL); 4380 adev = kzalloc(sizeof(*adev), GFP_KERNEL);
4381 if (!adev) { 4381 if (!adev) {
4382 dev_err(&ofdev->dev, "failed to allocate device\n"); 4382 dev_err(&ofdev->dev, "failed to allocate device\n");
4383 initcode = PPC_ADMA_INIT_ALLOC; 4383 initcode = PPC_ADMA_INIT_ALLOC;
4384 ret = -ENOMEM; 4384 ret = -ENOMEM;
4385 goto err_adev_alloc; 4385 goto err_adev_alloc;
4386 } 4386 }
4387 4387
4388 adev->id = id; 4388 adev->id = id;
4389 adev->pool_size = pool_size; 4389 adev->pool_size = pool_size;
4390 /* allocate coherent memory for hardware descriptors */ 4390 /* allocate coherent memory for hardware descriptors */
4391 adev->dma_desc_pool_virt = dma_alloc_coherent(&ofdev->dev, 4391 adev->dma_desc_pool_virt = dma_alloc_coherent(&ofdev->dev,
4392 adev->pool_size, &adev->dma_desc_pool, 4392 adev->pool_size, &adev->dma_desc_pool,
4393 GFP_KERNEL); 4393 GFP_KERNEL);
4394 if (adev->dma_desc_pool_virt == NULL) { 4394 if (adev->dma_desc_pool_virt == NULL) {
4395 dev_err(&ofdev->dev, "failed to allocate %d bytes of coherent " 4395 dev_err(&ofdev->dev, "failed to allocate %d bytes of coherent "
4396 "memory for hardware descriptors\n", 4396 "memory for hardware descriptors\n",
4397 adev->pool_size); 4397 adev->pool_size);
4398 initcode = PPC_ADMA_INIT_COHERENT; 4398 initcode = PPC_ADMA_INIT_COHERENT;
4399 ret = -ENOMEM; 4399 ret = -ENOMEM;
4400 goto err_dma_alloc; 4400 goto err_dma_alloc;
4401 } 4401 }
4402 dev_dbg(&ofdev->dev, "allocated descriptor pool virt 0x%p phys 0x%llx\n", 4402 dev_dbg(&ofdev->dev, "allocated descriptor pool virt 0x%p phys 0x%llx\n",
4403 adev->dma_desc_pool_virt, (u64)adev->dma_desc_pool); 4403 adev->dma_desc_pool_virt, (u64)adev->dma_desc_pool);
4404 4404
4405 regs = ioremap(res.start, resource_size(&res)); 4405 regs = ioremap(res.start, resource_size(&res));
4406 if (!regs) { 4406 if (!regs) {
4407 dev_err(&ofdev->dev, "failed to ioremap regs!\n"); 4407 dev_err(&ofdev->dev, "failed to ioremap regs!\n");
4408 goto err_regs_alloc; 4408 goto err_regs_alloc;
4409 } 4409 }
4410 4410
4411 if (adev->id == PPC440SPE_XOR_ID) { 4411 if (adev->id == PPC440SPE_XOR_ID) {
4412 adev->xor_reg = regs; 4412 adev->xor_reg = regs;
4413 /* Reset XOR */ 4413 /* Reset XOR */
4414 iowrite32be(XOR_CRSR_XASR_BIT, &adev->xor_reg->crsr); 4414 iowrite32be(XOR_CRSR_XASR_BIT, &adev->xor_reg->crsr);
4415 iowrite32be(XOR_CRSR_64BA_BIT, &adev->xor_reg->crrr); 4415 iowrite32be(XOR_CRSR_64BA_BIT, &adev->xor_reg->crrr);
4416 } else { 4416 } else {
4417 size_t fifo_size = (adev->id == PPC440SPE_DMA0_ID) ? 4417 size_t fifo_size = (adev->id == PPC440SPE_DMA0_ID) ?
4418 DMA0_FIFO_SIZE : DMA1_FIFO_SIZE; 4418 DMA0_FIFO_SIZE : DMA1_FIFO_SIZE;
4419 adev->dma_reg = regs; 4419 adev->dma_reg = regs;
4420 /* DMAx_FIFO_SIZE is defined in bytes, 4420 /* DMAx_FIFO_SIZE is defined in bytes,
4421 * <fsiz> - is defined in number of CDB pointers (8byte). 4421 * <fsiz> - is defined in number of CDB pointers (8byte).
4422 * DMA FIFO Length = CSlength + CPlength, where 4422 * DMA FIFO Length = CSlength + CPlength, where
4423 * CSlength = CPlength = (fsiz + 1) * 8. 4423 * CSlength = CPlength = (fsiz + 1) * 8.
4424 */ 4424 */
4425 iowrite32(DMA_FIFO_ENABLE | ((fifo_size >> 3) - 2), 4425 iowrite32(DMA_FIFO_ENABLE | ((fifo_size >> 3) - 2),
4426 &adev->dma_reg->fsiz); 4426 &adev->dma_reg->fsiz);
4427 /* Configure DMA engine */ 4427 /* Configure DMA engine */
4428 iowrite32(DMA_CFG_DXEPR_HP | DMA_CFG_DFMPP_HP | DMA_CFG_FALGN, 4428 iowrite32(DMA_CFG_DXEPR_HP | DMA_CFG_DFMPP_HP | DMA_CFG_FALGN,
4429 &adev->dma_reg->cfg); 4429 &adev->dma_reg->cfg);
4430 /* Clear Status */ 4430 /* Clear Status */
4431 iowrite32(~0, &adev->dma_reg->dsts); 4431 iowrite32(~0, &adev->dma_reg->dsts);
4432 } 4432 }
4433 4433
4434 adev->dev = &ofdev->dev; 4434 adev->dev = &ofdev->dev;
4435 adev->common.dev = &ofdev->dev; 4435 adev->common.dev = &ofdev->dev;
4436 INIT_LIST_HEAD(&adev->common.channels); 4436 INIT_LIST_HEAD(&adev->common.channels);
4437 platform_set_drvdata(ofdev, adev); 4437 platform_set_drvdata(ofdev, adev);
4438 4438
4439 /* create a channel */ 4439 /* create a channel */
4440 chan = kzalloc(sizeof(*chan), GFP_KERNEL); 4440 chan = kzalloc(sizeof(*chan), GFP_KERNEL);
4441 if (!chan) { 4441 if (!chan) {
4442 dev_err(&ofdev->dev, "can't allocate channel structure\n"); 4442 dev_err(&ofdev->dev, "can't allocate channel structure\n");
4443 initcode = PPC_ADMA_INIT_CHANNEL; 4443 initcode = PPC_ADMA_INIT_CHANNEL;
4444 ret = -ENOMEM; 4444 ret = -ENOMEM;
4445 goto err_chan_alloc; 4445 goto err_chan_alloc;
4446 } 4446 }
4447 4447
4448 spin_lock_init(&chan->lock); 4448 spin_lock_init(&chan->lock);
4449 INIT_LIST_HEAD(&chan->chain); 4449 INIT_LIST_HEAD(&chan->chain);
4450 INIT_LIST_HEAD(&chan->all_slots); 4450 INIT_LIST_HEAD(&chan->all_slots);
4451 chan->device = adev; 4451 chan->device = adev;
4452 chan->common.device = &adev->common; 4452 chan->common.device = &adev->common;
4453 dma_cookie_init(&chan->common); 4453 dma_cookie_init(&chan->common);
4454 list_add_tail(&chan->common.device_node, &adev->common.channels); 4454 list_add_tail(&chan->common.device_node, &adev->common.channels);
4455 tasklet_init(&chan->irq_tasklet, ppc440spe_adma_tasklet, 4455 tasklet_init(&chan->irq_tasklet, ppc440spe_adma_tasklet,
4456 (unsigned long)chan); 4456 (unsigned long)chan);
4457 4457
4458 /* allocate and map helper pages for async validation or 4458 /* allocate and map helper pages for async validation or
4459 * async_mult/async_sum_product operations on DMA0/1. 4459 * async_mult/async_sum_product operations on DMA0/1.
4460 */ 4460 */
4461 if (adev->id != PPC440SPE_XOR_ID) { 4461 if (adev->id != PPC440SPE_XOR_ID) {
4462 chan->pdest_page = alloc_page(GFP_KERNEL); 4462 chan->pdest_page = alloc_page(GFP_KERNEL);
4463 chan->qdest_page = alloc_page(GFP_KERNEL); 4463 chan->qdest_page = alloc_page(GFP_KERNEL);
4464 if (!chan->pdest_page || 4464 if (!chan->pdest_page ||
4465 !chan->qdest_page) { 4465 !chan->qdest_page) {
4466 if (chan->pdest_page) 4466 if (chan->pdest_page)
4467 __free_page(chan->pdest_page); 4467 __free_page(chan->pdest_page);
4468 if (chan->qdest_page) 4468 if (chan->qdest_page)
4469 __free_page(chan->qdest_page); 4469 __free_page(chan->qdest_page);
4470 ret = -ENOMEM; 4470 ret = -ENOMEM;
4471 goto err_page_alloc; 4471 goto err_page_alloc;
4472 } 4472 }
4473 chan->pdest = dma_map_page(&ofdev->dev, chan->pdest_page, 0, 4473 chan->pdest = dma_map_page(&ofdev->dev, chan->pdest_page, 0,
4474 PAGE_SIZE, DMA_BIDIRECTIONAL); 4474 PAGE_SIZE, DMA_BIDIRECTIONAL);
4475 chan->qdest = dma_map_page(&ofdev->dev, chan->qdest_page, 0, 4475 chan->qdest = dma_map_page(&ofdev->dev, chan->qdest_page, 0,
4476 PAGE_SIZE, DMA_BIDIRECTIONAL); 4476 PAGE_SIZE, DMA_BIDIRECTIONAL);
4477 } 4477 }
4478 4478
4479 ref = kmalloc(sizeof(*ref), GFP_KERNEL); 4479 ref = kmalloc(sizeof(*ref), GFP_KERNEL);
4480 if (ref) { 4480 if (ref) {
4481 ref->chan = &chan->common; 4481 ref->chan = &chan->common;
4482 INIT_LIST_HEAD(&ref->node); 4482 INIT_LIST_HEAD(&ref->node);
4483 list_add_tail(&ref->node, &ppc440spe_adma_chan_list); 4483 list_add_tail(&ref->node, &ppc440spe_adma_chan_list);
4484 } else { 4484 } else {
4485 dev_err(&ofdev->dev, "failed to allocate channel reference!\n"); 4485 dev_err(&ofdev->dev, "failed to allocate channel reference!\n");
4486 ret = -ENOMEM; 4486 ret = -ENOMEM;
4487 goto err_ref_alloc; 4487 goto err_ref_alloc;
4488 } 4488 }
4489 4489
4490 ret = ppc440spe_adma_setup_irqs(adev, chan, &initcode); 4490 ret = ppc440spe_adma_setup_irqs(adev, chan, &initcode);
4491 if (ret) 4491 if (ret)
4492 goto err_irq; 4492 goto err_irq;
4493 4493
4494 ppc440spe_adma_init_capabilities(adev); 4494 ppc440spe_adma_init_capabilities(adev);
4495 4495
4496 ret = dma_async_device_register(&adev->common); 4496 ret = dma_async_device_register(&adev->common);
4497 if (ret) { 4497 if (ret) {
4498 initcode = PPC_ADMA_INIT_REGISTER; 4498 initcode = PPC_ADMA_INIT_REGISTER;
4499 dev_err(&ofdev->dev, "failed to register dma device\n"); 4499 dev_err(&ofdev->dev, "failed to register dma device\n");
4500 goto err_dev_reg; 4500 goto err_dev_reg;
4501 } 4501 }
4502 4502
4503 goto out; 4503 goto out;
4504 4504
4505 err_dev_reg: 4505 err_dev_reg:
4506 ppc440spe_adma_release_irqs(adev, chan); 4506 ppc440spe_adma_release_irqs(adev, chan);
4507 err_irq: 4507 err_irq:
4508 list_for_each_entry_safe(ref, _ref, &ppc440spe_adma_chan_list, node) { 4508 list_for_each_entry_safe(ref, _ref, &ppc440spe_adma_chan_list, node) {
4509 if (chan == to_ppc440spe_adma_chan(ref->chan)) { 4509 if (chan == to_ppc440spe_adma_chan(ref->chan)) {
4510 list_del(&ref->node); 4510 list_del(&ref->node);
4511 kfree(ref); 4511 kfree(ref);
4512 } 4512 }
4513 } 4513 }
4514 err_ref_alloc: 4514 err_ref_alloc:
4515 if (adev->id != PPC440SPE_XOR_ID) { 4515 if (adev->id != PPC440SPE_XOR_ID) {
4516 dma_unmap_page(&ofdev->dev, chan->pdest, 4516 dma_unmap_page(&ofdev->dev, chan->pdest,
4517 PAGE_SIZE, DMA_BIDIRECTIONAL); 4517 PAGE_SIZE, DMA_BIDIRECTIONAL);
4518 dma_unmap_page(&ofdev->dev, chan->qdest, 4518 dma_unmap_page(&ofdev->dev, chan->qdest,
4519 PAGE_SIZE, DMA_BIDIRECTIONAL); 4519 PAGE_SIZE, DMA_BIDIRECTIONAL);
4520 __free_page(chan->pdest_page); 4520 __free_page(chan->pdest_page);
4521 __free_page(chan->qdest_page); 4521 __free_page(chan->qdest_page);
4522 } 4522 }
4523 err_page_alloc: 4523 err_page_alloc:
4524 kfree(chan); 4524 kfree(chan);
4525 err_chan_alloc: 4525 err_chan_alloc:
4526 if (adev->id == PPC440SPE_XOR_ID) 4526 if (adev->id == PPC440SPE_XOR_ID)
4527 iounmap(adev->xor_reg); 4527 iounmap(adev->xor_reg);
4528 else 4528 else
4529 iounmap(adev->dma_reg); 4529 iounmap(adev->dma_reg);
4530 err_regs_alloc: 4530 err_regs_alloc:
4531 dma_free_coherent(adev->dev, adev->pool_size, 4531 dma_free_coherent(adev->dev, adev->pool_size,
4532 adev->dma_desc_pool_virt, 4532 adev->dma_desc_pool_virt,
4533 adev->dma_desc_pool); 4533 adev->dma_desc_pool);
4534 err_dma_alloc: 4534 err_dma_alloc:
4535 kfree(adev); 4535 kfree(adev);
4536 err_adev_alloc: 4536 err_adev_alloc:
4537 release_mem_region(res.start, resource_size(&res)); 4537 release_mem_region(res.start, resource_size(&res));
4538 out: 4538 out:
4539 if (id < PPC440SPE_ADMA_ENGINES_NUM) 4539 if (id < PPC440SPE_ADMA_ENGINES_NUM)
4540 ppc440spe_adma_devices[id] = initcode; 4540 ppc440spe_adma_devices[id] = initcode;
4541 4541
4542 return ret; 4542 return ret;
4543 } 4543 }
4544 4544
4545 /** 4545 /**
4546 * ppc440spe_adma_remove - remove the asynch device 4546 * ppc440spe_adma_remove - remove the asynch device
4547 */ 4547 */
4548 static int ppc440spe_adma_remove(struct platform_device *ofdev) 4548 static int ppc440spe_adma_remove(struct platform_device *ofdev)
4549 { 4549 {
4550 struct ppc440spe_adma_device *adev = platform_get_drvdata(ofdev); 4550 struct ppc440spe_adma_device *adev = platform_get_drvdata(ofdev);
4551 struct device_node *np = ofdev->dev.of_node; 4551 struct device_node *np = ofdev->dev.of_node;
4552 struct resource res; 4552 struct resource res;
4553 struct dma_chan *chan, *_chan; 4553 struct dma_chan *chan, *_chan;
4554 struct ppc_dma_chan_ref *ref, *_ref; 4554 struct ppc_dma_chan_ref *ref, *_ref;
4555 struct ppc440spe_adma_chan *ppc440spe_chan; 4555 struct ppc440spe_adma_chan *ppc440spe_chan;
4556 4556
4557 if (adev->id < PPC440SPE_ADMA_ENGINES_NUM) 4557 if (adev->id < PPC440SPE_ADMA_ENGINES_NUM)
4558 ppc440spe_adma_devices[adev->id] = -1; 4558 ppc440spe_adma_devices[adev->id] = -1;
4559 4559
4560 dma_async_device_unregister(&adev->common); 4560 dma_async_device_unregister(&adev->common);
4561 4561
4562 list_for_each_entry_safe(chan, _chan, &adev->common.channels, 4562 list_for_each_entry_safe(chan, _chan, &adev->common.channels,
4563 device_node) { 4563 device_node) {
4564 ppc440spe_chan = to_ppc440spe_adma_chan(chan); 4564 ppc440spe_chan = to_ppc440spe_adma_chan(chan);
4565 ppc440spe_adma_release_irqs(adev, ppc440spe_chan); 4565 ppc440spe_adma_release_irqs(adev, ppc440spe_chan);
4566 tasklet_kill(&ppc440spe_chan->irq_tasklet); 4566 tasklet_kill(&ppc440spe_chan->irq_tasklet);
4567 if (adev->id != PPC440SPE_XOR_ID) { 4567 if (adev->id != PPC440SPE_XOR_ID) {
4568 dma_unmap_page(&ofdev->dev, ppc440spe_chan->pdest, 4568 dma_unmap_page(&ofdev->dev, ppc440spe_chan->pdest,
4569 PAGE_SIZE, DMA_BIDIRECTIONAL); 4569 PAGE_SIZE, DMA_BIDIRECTIONAL);
4570 dma_unmap_page(&ofdev->dev, ppc440spe_chan->qdest, 4570 dma_unmap_page(&ofdev->dev, ppc440spe_chan->qdest,
4571 PAGE_SIZE, DMA_BIDIRECTIONAL); 4571 PAGE_SIZE, DMA_BIDIRECTIONAL);
4572 __free_page(ppc440spe_chan->pdest_page); 4572 __free_page(ppc440spe_chan->pdest_page);
4573 __free_page(ppc440spe_chan->qdest_page); 4573 __free_page(ppc440spe_chan->qdest_page);
4574 } 4574 }
4575 list_for_each_entry_safe(ref, _ref, &ppc440spe_adma_chan_list, 4575 list_for_each_entry_safe(ref, _ref, &ppc440spe_adma_chan_list,
4576 node) { 4576 node) {
4577 if (ppc440spe_chan == 4577 if (ppc440spe_chan ==
4578 to_ppc440spe_adma_chan(ref->chan)) { 4578 to_ppc440spe_adma_chan(ref->chan)) {
4579 list_del(&ref->node); 4579 list_del(&ref->node);
4580 kfree(ref); 4580 kfree(ref);
4581 } 4581 }
4582 } 4582 }
4583 list_del(&chan->device_node); 4583 list_del(&chan->device_node);
4584 kfree(ppc440spe_chan); 4584 kfree(ppc440spe_chan);
4585 } 4585 }
4586 4586
4587 dma_free_coherent(adev->dev, adev->pool_size, 4587 dma_free_coherent(adev->dev, adev->pool_size,
4588 adev->dma_desc_pool_virt, adev->dma_desc_pool); 4588 adev->dma_desc_pool_virt, adev->dma_desc_pool);
4589 if (adev->id == PPC440SPE_XOR_ID) 4589 if (adev->id == PPC440SPE_XOR_ID)
4590 iounmap(adev->xor_reg); 4590 iounmap(adev->xor_reg);
4591 else 4591 else
4592 iounmap(adev->dma_reg); 4592 iounmap(adev->dma_reg);
4593 of_address_to_resource(np, 0, &res); 4593 of_address_to_resource(np, 0, &res);
4594 release_mem_region(res.start, resource_size(&res)); 4594 release_mem_region(res.start, resource_size(&res));
4595 kfree(adev); 4595 kfree(adev);
4596 return 0; 4596 return 0;
4597 } 4597 }
4598 4598
4599 /* 4599 /*
4600 * /sys driver interface to enable h/w RAID-6 capabilities 4600 * /sys driver interface to enable h/w RAID-6 capabilities
4601 * Files created in e.g. /sys/devices/plb.0/400100100.dma0/driver/ 4601 * Files created in e.g. /sys/devices/plb.0/400100100.dma0/driver/
4602 * directory are "devices", "enable" and "poly". 4602 * directory are "devices", "enable" and "poly".
4603 * "devices" shows available engines. 4603 * "devices" shows available engines.
4604 * "enable" is used to enable RAID-6 capabilities or to check 4604 * "enable" is used to enable RAID-6 capabilities or to check
4605 * whether these has been activated. 4605 * whether these has been activated.
4606 * "poly" allows setting/checking used polynomial (for PPC440SPe only). 4606 * "poly" allows setting/checking used polynomial (for PPC440SPe only).
4607 */ 4607 */
4608 4608
4609 static ssize_t show_ppc440spe_devices(struct device_driver *dev, char *buf) 4609 static ssize_t show_ppc440spe_devices(struct device_driver *dev, char *buf)
4610 { 4610 {
4611 ssize_t size = 0; 4611 ssize_t size = 0;
4612 int i; 4612 int i;
4613 4613
4614 for (i = 0; i < PPC440SPE_ADMA_ENGINES_NUM; i++) { 4614 for (i = 0; i < PPC440SPE_ADMA_ENGINES_NUM; i++) {
4615 if (ppc440spe_adma_devices[i] == -1) 4615 if (ppc440spe_adma_devices[i] == -1)
4616 continue; 4616 continue;
4617 size += snprintf(buf + size, PAGE_SIZE - size, 4617 size += snprintf(buf + size, PAGE_SIZE - size,
4618 "PPC440SP(E)-ADMA.%d: %s\n", i, 4618 "PPC440SP(E)-ADMA.%d: %s\n", i,
4619 ppc_adma_errors[ppc440spe_adma_devices[i]]); 4619 ppc_adma_errors[ppc440spe_adma_devices[i]]);
4620 } 4620 }
4621 return size; 4621 return size;
4622 } 4622 }
4623 4623
4624 static ssize_t show_ppc440spe_r6enable(struct device_driver *dev, char *buf) 4624 static ssize_t show_ppc440spe_r6enable(struct device_driver *dev, char *buf)
4625 { 4625 {
4626 return snprintf(buf, PAGE_SIZE, 4626 return snprintf(buf, PAGE_SIZE,
4627 "PPC440SP(e) RAID-6 capabilities are %sABLED.\n", 4627 "PPC440SP(e) RAID-6 capabilities are %sABLED.\n",
4628 ppc440spe_r6_enabled ? "EN" : "DIS"); 4628 ppc440spe_r6_enabled ? "EN" : "DIS");
4629 } 4629 }
4630 4630
4631 static ssize_t store_ppc440spe_r6enable(struct device_driver *dev, 4631 static ssize_t store_ppc440spe_r6enable(struct device_driver *dev,
4632 const char *buf, size_t count) 4632 const char *buf, size_t count)
4633 { 4633 {
4634 unsigned long val; 4634 unsigned long val;
4635 4635
4636 if (!count || count > 11) 4636 if (!count || count > 11)
4637 return -EINVAL; 4637 return -EINVAL;
4638 4638
4639 if (!ppc440spe_r6_tchan) 4639 if (!ppc440spe_r6_tchan)
4640 return -EFAULT; 4640 return -EFAULT;
4641 4641
4642 /* Write a key */ 4642 /* Write a key */
4643 sscanf(buf, "%lx", &val); 4643 sscanf(buf, "%lx", &val);
4644 dcr_write(ppc440spe_mq_dcr_host, DCRN_MQ0_XORBA, val); 4644 dcr_write(ppc440spe_mq_dcr_host, DCRN_MQ0_XORBA, val);
4645 isync(); 4645 isync();
4646 4646
4647 /* Verify whether it really works now */ 4647 /* Verify whether it really works now */
4648 if (ppc440spe_test_raid6(ppc440spe_r6_tchan) == 0) { 4648 if (ppc440spe_test_raid6(ppc440spe_r6_tchan) == 0) {
4649 pr_info("PPC440SP(e) RAID-6 has been activated " 4649 pr_info("PPC440SP(e) RAID-6 has been activated "
4650 "successfully\n"); 4650 "successfully\n");
4651 ppc440spe_r6_enabled = 1; 4651 ppc440spe_r6_enabled = 1;
4652 } else { 4652 } else {
4653 pr_info("PPC440SP(e) RAID-6 hasn't been activated!" 4653 pr_info("PPC440SP(e) RAID-6 hasn't been activated!"
4654 " Error key ?\n"); 4654 " Error key ?\n");
4655 ppc440spe_r6_enabled = 0; 4655 ppc440spe_r6_enabled = 0;
4656 } 4656 }
4657 return count; 4657 return count;
4658 } 4658 }
4659 4659
4660 static ssize_t show_ppc440spe_r6poly(struct device_driver *dev, char *buf) 4660 static ssize_t show_ppc440spe_r6poly(struct device_driver *dev, char *buf)
4661 { 4661 {
4662 ssize_t size = 0; 4662 ssize_t size = 0;
4663 u32 reg; 4663 u32 reg;
4664 4664
4665 #ifdef CONFIG_440SP 4665 #ifdef CONFIG_440SP
4666 /* 440SP has fixed polynomial */ 4666 /* 440SP has fixed polynomial */
4667 reg = 0x4d; 4667 reg = 0x4d;
4668 #else 4668 #else
4669 reg = dcr_read(ppc440spe_mq_dcr_host, DCRN_MQ0_CFBHL); 4669 reg = dcr_read(ppc440spe_mq_dcr_host, DCRN_MQ0_CFBHL);
4670 reg >>= MQ0_CFBHL_POLY; 4670 reg >>= MQ0_CFBHL_POLY;
4671 reg &= 0xFF; 4671 reg &= 0xFF;
4672 #endif 4672 #endif
4673 4673
4674 size = snprintf(buf, PAGE_SIZE, "PPC440SP(e) RAID-6 driver " 4674 size = snprintf(buf, PAGE_SIZE, "PPC440SP(e) RAID-6 driver "
4675 "uses 0x1%02x polynomial.\n", reg); 4675 "uses 0x1%02x polynomial.\n", reg);
4676 return size; 4676 return size;
4677 } 4677 }
4678 4678
4679 static ssize_t store_ppc440spe_r6poly(struct device_driver *dev, 4679 static ssize_t store_ppc440spe_r6poly(struct device_driver *dev,
4680 const char *buf, size_t count) 4680 const char *buf, size_t count)
4681 { 4681 {
4682 unsigned long reg, val; 4682 unsigned long reg, val;
4683 4683
4684 #ifdef CONFIG_440SP 4684 #ifdef CONFIG_440SP
4685 /* 440SP uses default 0x14D polynomial only */ 4685 /* 440SP uses default 0x14D polynomial only */
4686 return -EINVAL; 4686 return -EINVAL;
4687 #endif 4687 #endif
4688 4688
4689 if (!count || count > 6) 4689 if (!count || count > 6)
4690 return -EINVAL; 4690 return -EINVAL;
4691 4691
4692 /* e.g., 0x14D or 0x11D */ 4692 /* e.g., 0x14D or 0x11D */
4693 sscanf(buf, "%lx", &val); 4693 sscanf(buf, "%lx", &val);
4694 4694
4695 if (val & ~0x1FF) 4695 if (val & ~0x1FF)
4696 return -EINVAL; 4696 return -EINVAL;
4697 4697
4698 val &= 0xFF; 4698 val &= 0xFF;
4699 reg = dcr_read(ppc440spe_mq_dcr_host, DCRN_MQ0_CFBHL); 4699 reg = dcr_read(ppc440spe_mq_dcr_host, DCRN_MQ0_CFBHL);
4700 reg &= ~(0xFF << MQ0_CFBHL_POLY); 4700 reg &= ~(0xFF << MQ0_CFBHL_POLY);
4701 reg |= val << MQ0_CFBHL_POLY; 4701 reg |= val << MQ0_CFBHL_POLY;
4702 dcr_write(ppc440spe_mq_dcr_host, DCRN_MQ0_CFBHL, reg); 4702 dcr_write(ppc440spe_mq_dcr_host, DCRN_MQ0_CFBHL, reg);
4703 4703
4704 return count; 4704 return count;
4705 } 4705 }
4706 4706
4707 static DRIVER_ATTR(devices, S_IRUGO, show_ppc440spe_devices, NULL); 4707 static DRIVER_ATTR(devices, S_IRUGO, show_ppc440spe_devices, NULL);
4708 static DRIVER_ATTR(enable, S_IRUGO | S_IWUSR, show_ppc440spe_r6enable, 4708 static DRIVER_ATTR(enable, S_IRUGO | S_IWUSR, show_ppc440spe_r6enable,
4709 store_ppc440spe_r6enable); 4709 store_ppc440spe_r6enable);
4710 static DRIVER_ATTR(poly, S_IRUGO | S_IWUSR, show_ppc440spe_r6poly, 4710 static DRIVER_ATTR(poly, S_IRUGO | S_IWUSR, show_ppc440spe_r6poly,
4711 store_ppc440spe_r6poly); 4711 store_ppc440spe_r6poly);
4712 4712
4713 /* 4713 /*
4714 * Common initialisation for RAID engines; allocate memory for 4714 * Common initialisation for RAID engines; allocate memory for
4715 * DMAx FIFOs, perform configuration common for all DMA engines. 4715 * DMAx FIFOs, perform configuration common for all DMA engines.
4716 * Further DMA engine specific configuration is done at probe time. 4716 * Further DMA engine specific configuration is done at probe time.
4717 */ 4717 */
4718 static int ppc440spe_configure_raid_devices(void) 4718 static int ppc440spe_configure_raid_devices(void)
4719 { 4719 {
4720 struct device_node *np; 4720 struct device_node *np;
4721 struct resource i2o_res; 4721 struct resource i2o_res;
4722 struct i2o_regs __iomem *i2o_reg; 4722 struct i2o_regs __iomem *i2o_reg;
4723 dcr_host_t i2o_dcr_host; 4723 dcr_host_t i2o_dcr_host;
4724 unsigned int dcr_base, dcr_len; 4724 unsigned int dcr_base, dcr_len;
4725 int i, ret; 4725 int i, ret;
4726 4726
4727 np = of_find_compatible_node(NULL, NULL, "ibm,i2o-440spe"); 4727 np = of_find_compatible_node(NULL, NULL, "ibm,i2o-440spe");
4728 if (!np) { 4728 if (!np) {
4729 pr_err("%s: can't find I2O device tree node\n", 4729 pr_err("%s: can't find I2O device tree node\n",
4730 __func__); 4730 __func__);
4731 return -ENODEV; 4731 return -ENODEV;
4732 } 4732 }
4733 4733
4734 if (of_address_to_resource(np, 0, &i2o_res)) { 4734 if (of_address_to_resource(np, 0, &i2o_res)) {
4735 of_node_put(np); 4735 of_node_put(np);
4736 return -EINVAL; 4736 return -EINVAL;
4737 } 4737 }
4738 4738
4739 i2o_reg = of_iomap(np, 0); 4739 i2o_reg = of_iomap(np, 0);
4740 if (!i2o_reg) { 4740 if (!i2o_reg) {
4741 pr_err("%s: failed to map I2O registers\n", __func__); 4741 pr_err("%s: failed to map I2O registers\n", __func__);
4742 of_node_put(np); 4742 of_node_put(np);
4743 return -EINVAL; 4743 return -EINVAL;
4744 } 4744 }
4745 4745
4746 /* Get I2O DCRs base */ 4746 /* Get I2O DCRs base */
4747 dcr_base = dcr_resource_start(np, 0); 4747 dcr_base = dcr_resource_start(np, 0);
4748 dcr_len = dcr_resource_len(np, 0); 4748 dcr_len = dcr_resource_len(np, 0);
4749 if (!dcr_base && !dcr_len) { 4749 if (!dcr_base && !dcr_len) {
4750 pr_err("%s: can't get DCR registers base/len!\n", 4750 pr_err("%s: can't get DCR registers base/len!\n",
4751 np->full_name); 4751 np->full_name);
4752 of_node_put(np); 4752 of_node_put(np);
4753 iounmap(i2o_reg); 4753 iounmap(i2o_reg);
4754 return -ENODEV; 4754 return -ENODEV;
4755 } 4755 }
4756 4756
4757 i2o_dcr_host = dcr_map(np, dcr_base, dcr_len); 4757 i2o_dcr_host = dcr_map(np, dcr_base, dcr_len);
4758 if (!DCR_MAP_OK(i2o_dcr_host)) { 4758 if (!DCR_MAP_OK(i2o_dcr_host)) {
4759 pr_err("%s: failed to map DCRs!\n", np->full_name); 4759 pr_err("%s: failed to map DCRs!\n", np->full_name);
4760 of_node_put(np); 4760 of_node_put(np);
4761 iounmap(i2o_reg); 4761 iounmap(i2o_reg);
4762 return -ENODEV; 4762 return -ENODEV;
4763 } 4763 }
4764 of_node_put(np); 4764 of_node_put(np);
4765 4765
4766 /* Provide memory regions for DMA's FIFOs: I2O, DMA0 and DMA1 share 4766 /* Provide memory regions for DMA's FIFOs: I2O, DMA0 and DMA1 share
4767 * the base address of FIFO memory space. 4767 * the base address of FIFO memory space.
4768 * Actually we need twice more physical memory than programmed in the 4768 * Actually we need twice more physical memory than programmed in the
4769 * <fsiz> register (because there are two FIFOs for each DMA: CP and CS) 4769 * <fsiz> register (because there are two FIFOs for each DMA: CP and CS)
4770 */ 4770 */
4771 ppc440spe_dma_fifo_buf = kmalloc((DMA0_FIFO_SIZE + DMA1_FIFO_SIZE) << 1, 4771 ppc440spe_dma_fifo_buf = kmalloc((DMA0_FIFO_SIZE + DMA1_FIFO_SIZE) << 1,
4772 GFP_KERNEL); 4772 GFP_KERNEL);
4773 if (!ppc440spe_dma_fifo_buf) { 4773 if (!ppc440spe_dma_fifo_buf) {
4774 pr_err("%s: DMA FIFO buffer allocation failed.\n", __func__); 4774 pr_err("%s: DMA FIFO buffer allocation failed.\n", __func__);
4775 iounmap(i2o_reg); 4775 iounmap(i2o_reg);
4776 dcr_unmap(i2o_dcr_host, dcr_len); 4776 dcr_unmap(i2o_dcr_host, dcr_len);
4777 return -ENOMEM; 4777 return -ENOMEM;
4778 } 4778 }
4779 4779
4780 /* 4780 /*
4781 * Configure h/w 4781 * Configure h/w
4782 */ 4782 */
4783 /* Reset I2O/DMA */ 4783 /* Reset I2O/DMA */
4784 mtdcri(SDR0, DCRN_SDR0_SRST, DCRN_SDR0_SRST_I2ODMA); 4784 mtdcri(SDR0, DCRN_SDR0_SRST, DCRN_SDR0_SRST_I2ODMA);
4785 mtdcri(SDR0, DCRN_SDR0_SRST, 0); 4785 mtdcri(SDR0, DCRN_SDR0_SRST, 0);
4786 4786
4787 /* Setup the base address of mmaped registers */ 4787 /* Setup the base address of mmaped registers */
4788 dcr_write(i2o_dcr_host, DCRN_I2O0_IBAH, (u32)(i2o_res.start >> 32)); 4788 dcr_write(i2o_dcr_host, DCRN_I2O0_IBAH, (u32)(i2o_res.start >> 32));
4789 dcr_write(i2o_dcr_host, DCRN_I2O0_IBAL, (u32)(i2o_res.start) | 4789 dcr_write(i2o_dcr_host, DCRN_I2O0_IBAL, (u32)(i2o_res.start) |
4790 I2O_REG_ENABLE); 4790 I2O_REG_ENABLE);
4791 dcr_unmap(i2o_dcr_host, dcr_len); 4791 dcr_unmap(i2o_dcr_host, dcr_len);
4792 4792
4793 /* Setup FIFO memory space base address */ 4793 /* Setup FIFO memory space base address */
4794 iowrite32(0, &i2o_reg->ifbah); 4794 iowrite32(0, &i2o_reg->ifbah);
4795 iowrite32(((u32)__pa(ppc440spe_dma_fifo_buf)), &i2o_reg->ifbal); 4795 iowrite32(((u32)__pa(ppc440spe_dma_fifo_buf)), &i2o_reg->ifbal);
4796 4796
4797 /* set zero FIFO size for I2O, so the whole 4797 /* set zero FIFO size for I2O, so the whole
4798 * ppc440spe_dma_fifo_buf is used by DMAs. 4798 * ppc440spe_dma_fifo_buf is used by DMAs.
4799 * DMAx_FIFOs will be configured while probe. 4799 * DMAx_FIFOs will be configured while probe.
4800 */ 4800 */
4801 iowrite32(0, &i2o_reg->ifsiz); 4801 iowrite32(0, &i2o_reg->ifsiz);
4802 iounmap(i2o_reg); 4802 iounmap(i2o_reg);
4803 4803
4804 /* To prepare WXOR/RXOR functionality we need access to 4804 /* To prepare WXOR/RXOR functionality we need access to
4805 * Memory Queue Module DCRs (finally it will be enabled 4805 * Memory Queue Module DCRs (finally it will be enabled
4806 * via /sys interface of the ppc440spe ADMA driver). 4806 * via /sys interface of the ppc440spe ADMA driver).
4807 */ 4807 */
4808 np = of_find_compatible_node(NULL, NULL, "ibm,mq-440spe"); 4808 np = of_find_compatible_node(NULL, NULL, "ibm,mq-440spe");
4809 if (!np) { 4809 if (!np) {
4810 pr_err("%s: can't find MQ device tree node\n", 4810 pr_err("%s: can't find MQ device tree node\n",
4811 __func__); 4811 __func__);
4812 ret = -ENODEV; 4812 ret = -ENODEV;
4813 goto out_free; 4813 goto out_free;
4814 } 4814 }
4815 4815
4816 /* Get MQ DCRs base */ 4816 /* Get MQ DCRs base */
4817 dcr_base = dcr_resource_start(np, 0); 4817 dcr_base = dcr_resource_start(np, 0);
4818 dcr_len = dcr_resource_len(np, 0); 4818 dcr_len = dcr_resource_len(np, 0);
4819 if (!dcr_base && !dcr_len) { 4819 if (!dcr_base && !dcr_len) {
4820 pr_err("%s: can't get DCR registers base/len!\n", 4820 pr_err("%s: can't get DCR registers base/len!\n",
4821 np->full_name); 4821 np->full_name);
4822 ret = -ENODEV; 4822 ret = -ENODEV;
4823 goto out_mq; 4823 goto out_mq;
4824 } 4824 }
4825 4825
4826 ppc440spe_mq_dcr_host = dcr_map(np, dcr_base, dcr_len); 4826 ppc440spe_mq_dcr_host = dcr_map(np, dcr_base, dcr_len);
4827 if (!DCR_MAP_OK(ppc440spe_mq_dcr_host)) { 4827 if (!DCR_MAP_OK(ppc440spe_mq_dcr_host)) {
4828 pr_err("%s: failed to map DCRs!\n", np->full_name); 4828 pr_err("%s: failed to map DCRs!\n", np->full_name);
4829 ret = -ENODEV; 4829 ret = -ENODEV;
4830 goto out_mq; 4830 goto out_mq;
4831 } 4831 }
4832 of_node_put(np); 4832 of_node_put(np);
4833 ppc440spe_mq_dcr_len = dcr_len; 4833 ppc440spe_mq_dcr_len = dcr_len;
4834 4834
4835 /* Set HB alias */ 4835 /* Set HB alias */
4836 dcr_write(ppc440spe_mq_dcr_host, DCRN_MQ0_BAUH, DMA_CUED_XOR_HB); 4836 dcr_write(ppc440spe_mq_dcr_host, DCRN_MQ0_BAUH, DMA_CUED_XOR_HB);
4837 4837
4838 /* Set: 4838 /* Set:
4839 * - LL transaction passing limit to 1; 4839 * - LL transaction passing limit to 1;
4840 * - Memory controller cycle limit to 1; 4840 * - Memory controller cycle limit to 1;
4841 * - Galois Polynomial to 0x14d (default) 4841 * - Galois Polynomial to 0x14d (default)
4842 */ 4842 */
4843 dcr_write(ppc440spe_mq_dcr_host, DCRN_MQ0_CFBHL, 4843 dcr_write(ppc440spe_mq_dcr_host, DCRN_MQ0_CFBHL,
4844 (1 << MQ0_CFBHL_TPLM) | (1 << MQ0_CFBHL_HBCL) | 4844 (1 << MQ0_CFBHL_TPLM) | (1 << MQ0_CFBHL_HBCL) |
4845 (PPC440SPE_DEFAULT_POLY << MQ0_CFBHL_POLY)); 4845 (PPC440SPE_DEFAULT_POLY << MQ0_CFBHL_POLY));
4846 4846
4847 atomic_set(&ppc440spe_adma_err_irq_ref, 0); 4847 atomic_set(&ppc440spe_adma_err_irq_ref, 0);
4848 for (i = 0; i < PPC440SPE_ADMA_ENGINES_NUM; i++) 4848 for (i = 0; i < PPC440SPE_ADMA_ENGINES_NUM; i++)
4849 ppc440spe_adma_devices[i] = -1; 4849 ppc440spe_adma_devices[i] = -1;
4850 4850
4851 return 0; 4851 return 0;
4852 4852
4853 out_mq: 4853 out_mq:
4854 of_node_put(np); 4854 of_node_put(np);
4855 out_free: 4855 out_free:
4856 kfree(ppc440spe_dma_fifo_buf); 4856 kfree(ppc440spe_dma_fifo_buf);
4857 return ret; 4857 return ret;
4858 } 4858 }
4859 4859
4860 static const struct of_device_id ppc440spe_adma_of_match[] = { 4860 static const struct of_device_id ppc440spe_adma_of_match[] = {
4861 { .compatible = "ibm,dma-440spe", }, 4861 { .compatible = "ibm,dma-440spe", },
4862 { .compatible = "amcc,xor-accelerator", }, 4862 { .compatible = "amcc,xor-accelerator", },
4863 {}, 4863 {},
4864 }; 4864 };
4865 MODULE_DEVICE_TABLE(of, ppc440spe_adma_of_match); 4865 MODULE_DEVICE_TABLE(of, ppc440spe_adma_of_match);
4866 4866
4867 static struct platform_driver ppc440spe_adma_driver = { 4867 static struct platform_driver ppc440spe_adma_driver = {
4868 .probe = ppc440spe_adma_probe, 4868 .probe = ppc440spe_adma_probe,
4869 .remove = ppc440spe_adma_remove, 4869 .remove = ppc440spe_adma_remove,
4870 .driver = { 4870 .driver = {
4871 .name = "PPC440SP(E)-ADMA", 4871 .name = "PPC440SP(E)-ADMA",
4872 .owner = THIS_MODULE, 4872 .owner = THIS_MODULE,
4873 .of_match_table = ppc440spe_adma_of_match, 4873 .of_match_table = ppc440spe_adma_of_match,
4874 }, 4874 },
4875 }; 4875 };
4876 4876
4877 static __init int ppc440spe_adma_init(void) 4877 static __init int ppc440spe_adma_init(void)
4878 { 4878 {
4879 int ret; 4879 int ret;
4880 4880
4881 ret = ppc440spe_configure_raid_devices(); 4881 ret = ppc440spe_configure_raid_devices();
4882 if (ret) 4882 if (ret)
4883 return ret; 4883 return ret;
4884 4884
4885 ret = platform_driver_register(&ppc440spe_adma_driver); 4885 ret = platform_driver_register(&ppc440spe_adma_driver);
4886 if (ret) { 4886 if (ret) {
4887 pr_err("%s: failed to register platform driver\n", 4887 pr_err("%s: failed to register platform driver\n",
4888 __func__); 4888 __func__);
4889 goto out_reg; 4889 goto out_reg;
4890 } 4890 }
4891 4891
4892 /* Initialization status */ 4892 /* Initialization status */
4893 ret = driver_create_file(&ppc440spe_adma_driver.driver, 4893 ret = driver_create_file(&ppc440spe_adma_driver.driver,
4894 &driver_attr_devices); 4894 &driver_attr_devices);
4895 if (ret) 4895 if (ret)
4896 goto out_dev; 4896 goto out_dev;
4897 4897
4898 /* RAID-6 h/w enable entry */ 4898 /* RAID-6 h/w enable entry */
4899 ret = driver_create_file(&ppc440spe_adma_driver.driver, 4899 ret = driver_create_file(&ppc440spe_adma_driver.driver,
4900 &driver_attr_enable); 4900 &driver_attr_enable);
4901 if (ret) 4901 if (ret)
4902 goto out_en; 4902 goto out_en;
4903 4903
4904 /* GF polynomial to use */ 4904 /* GF polynomial to use */
4905 ret = driver_create_file(&ppc440spe_adma_driver.driver, 4905 ret = driver_create_file(&ppc440spe_adma_driver.driver,
4906 &driver_attr_poly); 4906 &driver_attr_poly);
4907 if (!ret) 4907 if (!ret)
4908 return ret; 4908 return ret;
4909 4909
4910 driver_remove_file(&ppc440spe_adma_driver.driver, 4910 driver_remove_file(&ppc440spe_adma_driver.driver,
4911 &driver_attr_enable); 4911 &driver_attr_enable);
4912 out_en: 4912 out_en:
4913 driver_remove_file(&ppc440spe_adma_driver.driver, 4913 driver_remove_file(&ppc440spe_adma_driver.driver,
4914 &driver_attr_devices); 4914 &driver_attr_devices);
4915 out_dev: 4915 out_dev:
4916 /* User will not be able to enable h/w RAID-6 */ 4916 /* User will not be able to enable h/w RAID-6 */
4917 pr_err("%s: failed to create RAID-6 driver interface\n", 4917 pr_err("%s: failed to create RAID-6 driver interface\n",
4918 __func__); 4918 __func__);
4919 platform_driver_unregister(&ppc440spe_adma_driver); 4919 platform_driver_unregister(&ppc440spe_adma_driver);
4920 out_reg: 4920 out_reg:
4921 dcr_unmap(ppc440spe_mq_dcr_host, ppc440spe_mq_dcr_len); 4921 dcr_unmap(ppc440spe_mq_dcr_host, ppc440spe_mq_dcr_len);
4922 kfree(ppc440spe_dma_fifo_buf); 4922 kfree(ppc440spe_dma_fifo_buf);
4923 return ret; 4923 return ret;
4924 } 4924 }
4925 4925
4926 static void __exit ppc440spe_adma_exit(void) 4926 static void __exit ppc440spe_adma_exit(void)
4927 { 4927 {
4928 driver_remove_file(&ppc440spe_adma_driver.driver, 4928 driver_remove_file(&ppc440spe_adma_driver.driver,
4929 &driver_attr_poly); 4929 &driver_attr_poly);
4930 driver_remove_file(&ppc440spe_adma_driver.driver, 4930 driver_remove_file(&ppc440spe_adma_driver.driver,
4931 &driver_attr_enable); 4931 &driver_attr_enable);
4932 driver_remove_file(&ppc440spe_adma_driver.driver, 4932 driver_remove_file(&ppc440spe_adma_driver.driver,
4933 &driver_attr_devices); 4933 &driver_attr_devices);
4934 platform_driver_unregister(&ppc440spe_adma_driver); 4934 platform_driver_unregister(&ppc440spe_adma_driver);
4935 dcr_unmap(ppc440spe_mq_dcr_host, ppc440spe_mq_dcr_len); 4935 dcr_unmap(ppc440spe_mq_dcr_host, ppc440spe_mq_dcr_len);
4936 kfree(ppc440spe_dma_fifo_buf); 4936 kfree(ppc440spe_dma_fifo_buf);
4937 } 4937 }
4938 4938
4939 arch_initcall(ppc440spe_adma_init); 4939 arch_initcall(ppc440spe_adma_init);
4940 module_exit(ppc440spe_adma_exit); 4940 module_exit(ppc440spe_adma_exit);
4941 4941
4942 MODULE_AUTHOR("Yuri Tikhonov <yur@emcraft.com>"); 4942 MODULE_AUTHOR("Yuri Tikhonov <yur@emcraft.com>");
4943 MODULE_DESCRIPTION("PPC440SPE ADMA Engine Driver"); 4943 MODULE_DESCRIPTION("PPC440SPE ADMA Engine Driver");
4944 MODULE_LICENSE("GPL"); 4944 MODULE_LICENSE("GPL");
4945 4945
drivers/dma/sa11x0-dma.c
1 /* 1 /*
2 * SA11x0 DMAengine support 2 * SA11x0 DMAengine support
3 * 3 *
4 * Copyright (C) 2012 Russell King 4 * Copyright (C) 2012 Russell King
5 * Derived in part from arch/arm/mach-sa1100/dma.c, 5 * Derived in part from arch/arm/mach-sa1100/dma.c,
6 * Copyright (C) 2000, 2001 by Nicolas Pitre 6 * Copyright (C) 2000, 2001 by Nicolas Pitre
7 * 7 *
8 * This program is free software; you can redistribute it and/or modify 8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as 9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation. 10 * published by the Free Software Foundation.
11 */ 11 */
12 #include <linux/sched.h> 12 #include <linux/sched.h>
13 #include <linux/device.h> 13 #include <linux/device.h>
14 #include <linux/dmaengine.h> 14 #include <linux/dmaengine.h>
15 #include <linux/init.h> 15 #include <linux/init.h>
16 #include <linux/interrupt.h> 16 #include <linux/interrupt.h>
17 #include <linux/kernel.h> 17 #include <linux/kernel.h>
18 #include <linux/module.h> 18 #include <linux/module.h>
19 #include <linux/platform_device.h> 19 #include <linux/platform_device.h>
20 #include <linux/sa11x0-dma.h> 20 #include <linux/sa11x0-dma.h>
21 #include <linux/slab.h> 21 #include <linux/slab.h>
22 #include <linux/spinlock.h> 22 #include <linux/spinlock.h>
23 23
24 #include "virt-dma.h" 24 #include "virt-dma.h"
25 25
26 #define NR_PHY_CHAN 6 26 #define NR_PHY_CHAN 6
27 #define DMA_ALIGN 3 27 #define DMA_ALIGN 3
28 #define DMA_MAX_SIZE 0x1fff 28 #define DMA_MAX_SIZE 0x1fff
29 #define DMA_CHUNK_SIZE 0x1000 29 #define DMA_CHUNK_SIZE 0x1000
30 30
31 #define DMA_DDAR 0x00 31 #define DMA_DDAR 0x00
32 #define DMA_DCSR_S 0x04 32 #define DMA_DCSR_S 0x04
33 #define DMA_DCSR_C 0x08 33 #define DMA_DCSR_C 0x08
34 #define DMA_DCSR_R 0x0c 34 #define DMA_DCSR_R 0x0c
35 #define DMA_DBSA 0x10 35 #define DMA_DBSA 0x10
36 #define DMA_DBTA 0x14 36 #define DMA_DBTA 0x14
37 #define DMA_DBSB 0x18 37 #define DMA_DBSB 0x18
38 #define DMA_DBTB 0x1c 38 #define DMA_DBTB 0x1c
39 #define DMA_SIZE 0x20 39 #define DMA_SIZE 0x20
40 40
41 #define DCSR_RUN (1 << 0) 41 #define DCSR_RUN (1 << 0)
42 #define DCSR_IE (1 << 1) 42 #define DCSR_IE (1 << 1)
43 #define DCSR_ERROR (1 << 2) 43 #define DCSR_ERROR (1 << 2)
44 #define DCSR_DONEA (1 << 3) 44 #define DCSR_DONEA (1 << 3)
45 #define DCSR_STRTA (1 << 4) 45 #define DCSR_STRTA (1 << 4)
46 #define DCSR_DONEB (1 << 5) 46 #define DCSR_DONEB (1 << 5)
47 #define DCSR_STRTB (1 << 6) 47 #define DCSR_STRTB (1 << 6)
48 #define DCSR_BIU (1 << 7) 48 #define DCSR_BIU (1 << 7)
49 49
50 #define DDAR_RW (1 << 0) /* 0 = W, 1 = R */ 50 #define DDAR_RW (1 << 0) /* 0 = W, 1 = R */
51 #define DDAR_E (1 << 1) /* 0 = LE, 1 = BE */ 51 #define DDAR_E (1 << 1) /* 0 = LE, 1 = BE */
52 #define DDAR_BS (1 << 2) /* 0 = BS4, 1 = BS8 */ 52 #define DDAR_BS (1 << 2) /* 0 = BS4, 1 = BS8 */
53 #define DDAR_DW (1 << 3) /* 0 = 8b, 1 = 16b */ 53 #define DDAR_DW (1 << 3) /* 0 = 8b, 1 = 16b */
54 #define DDAR_Ser0UDCTr (0x0 << 4) 54 #define DDAR_Ser0UDCTr (0x0 << 4)
55 #define DDAR_Ser0UDCRc (0x1 << 4) 55 #define DDAR_Ser0UDCRc (0x1 << 4)
56 #define DDAR_Ser1SDLCTr (0x2 << 4) 56 #define DDAR_Ser1SDLCTr (0x2 << 4)
57 #define DDAR_Ser1SDLCRc (0x3 << 4) 57 #define DDAR_Ser1SDLCRc (0x3 << 4)
58 #define DDAR_Ser1UARTTr (0x4 << 4) 58 #define DDAR_Ser1UARTTr (0x4 << 4)
59 #define DDAR_Ser1UARTRc (0x5 << 4) 59 #define DDAR_Ser1UARTRc (0x5 << 4)
60 #define DDAR_Ser2ICPTr (0x6 << 4) 60 #define DDAR_Ser2ICPTr (0x6 << 4)
61 #define DDAR_Ser2ICPRc (0x7 << 4) 61 #define DDAR_Ser2ICPRc (0x7 << 4)
62 #define DDAR_Ser3UARTTr (0x8 << 4) 62 #define DDAR_Ser3UARTTr (0x8 << 4)
63 #define DDAR_Ser3UARTRc (0x9 << 4) 63 #define DDAR_Ser3UARTRc (0x9 << 4)
64 #define DDAR_Ser4MCP0Tr (0xa << 4) 64 #define DDAR_Ser4MCP0Tr (0xa << 4)
65 #define DDAR_Ser4MCP0Rc (0xb << 4) 65 #define DDAR_Ser4MCP0Rc (0xb << 4)
66 #define DDAR_Ser4MCP1Tr (0xc << 4) 66 #define DDAR_Ser4MCP1Tr (0xc << 4)
67 #define DDAR_Ser4MCP1Rc (0xd << 4) 67 #define DDAR_Ser4MCP1Rc (0xd << 4)
68 #define DDAR_Ser4SSPTr (0xe << 4) 68 #define DDAR_Ser4SSPTr (0xe << 4)
69 #define DDAR_Ser4SSPRc (0xf << 4) 69 #define DDAR_Ser4SSPRc (0xf << 4)
70 70
71 struct sa11x0_dma_sg { 71 struct sa11x0_dma_sg {
72 u32 addr; 72 u32 addr;
73 u32 len; 73 u32 len;
74 }; 74 };
75 75
76 struct sa11x0_dma_desc { 76 struct sa11x0_dma_desc {
77 struct virt_dma_desc vd; 77 struct virt_dma_desc vd;
78 78
79 u32 ddar; 79 u32 ddar;
80 size_t size; 80 size_t size;
81 unsigned period; 81 unsigned period;
82 bool cyclic; 82 bool cyclic;
83 83
84 unsigned sglen; 84 unsigned sglen;
85 struct sa11x0_dma_sg sg[0]; 85 struct sa11x0_dma_sg sg[0];
86 }; 86 };
87 87
88 struct sa11x0_dma_phy; 88 struct sa11x0_dma_phy;
89 89
90 struct sa11x0_dma_chan { 90 struct sa11x0_dma_chan {
91 struct virt_dma_chan vc; 91 struct virt_dma_chan vc;
92 92
93 /* protected by c->vc.lock */ 93 /* protected by c->vc.lock */
94 struct sa11x0_dma_phy *phy; 94 struct sa11x0_dma_phy *phy;
95 enum dma_status status; 95 enum dma_status status;
96 96
97 /* protected by d->lock */ 97 /* protected by d->lock */
98 struct list_head node; 98 struct list_head node;
99 99
100 u32 ddar; 100 u32 ddar;
101 const char *name; 101 const char *name;
102 }; 102 };
103 103
104 struct sa11x0_dma_phy { 104 struct sa11x0_dma_phy {
105 void __iomem *base; 105 void __iomem *base;
106 struct sa11x0_dma_dev *dev; 106 struct sa11x0_dma_dev *dev;
107 unsigned num; 107 unsigned num;
108 108
109 struct sa11x0_dma_chan *vchan; 109 struct sa11x0_dma_chan *vchan;
110 110
111 /* Protected by c->vc.lock */ 111 /* Protected by c->vc.lock */
112 unsigned sg_load; 112 unsigned sg_load;
113 struct sa11x0_dma_desc *txd_load; 113 struct sa11x0_dma_desc *txd_load;
114 unsigned sg_done; 114 unsigned sg_done;
115 struct sa11x0_dma_desc *txd_done; 115 struct sa11x0_dma_desc *txd_done;
116 #ifdef CONFIG_PM_SLEEP 116 #ifdef CONFIG_PM_SLEEP
117 u32 dbs[2]; 117 u32 dbs[2];
118 u32 dbt[2]; 118 u32 dbt[2];
119 u32 dcsr; 119 u32 dcsr;
120 #endif 120 #endif
121 }; 121 };
122 122
123 struct sa11x0_dma_dev { 123 struct sa11x0_dma_dev {
124 struct dma_device slave; 124 struct dma_device slave;
125 void __iomem *base; 125 void __iomem *base;
126 spinlock_t lock; 126 spinlock_t lock;
127 struct tasklet_struct task; 127 struct tasklet_struct task;
128 struct list_head chan_pending; 128 struct list_head chan_pending;
129 struct sa11x0_dma_phy phy[NR_PHY_CHAN]; 129 struct sa11x0_dma_phy phy[NR_PHY_CHAN];
130 }; 130 };
131 131
132 static struct sa11x0_dma_chan *to_sa11x0_dma_chan(struct dma_chan *chan) 132 static struct sa11x0_dma_chan *to_sa11x0_dma_chan(struct dma_chan *chan)
133 { 133 {
134 return container_of(chan, struct sa11x0_dma_chan, vc.chan); 134 return container_of(chan, struct sa11x0_dma_chan, vc.chan);
135 } 135 }
136 136
137 static struct sa11x0_dma_dev *to_sa11x0_dma(struct dma_device *dmadev) 137 static struct sa11x0_dma_dev *to_sa11x0_dma(struct dma_device *dmadev)
138 { 138 {
139 return container_of(dmadev, struct sa11x0_dma_dev, slave); 139 return container_of(dmadev, struct sa11x0_dma_dev, slave);
140 } 140 }
141 141
142 static struct sa11x0_dma_desc *sa11x0_dma_next_desc(struct sa11x0_dma_chan *c) 142 static struct sa11x0_dma_desc *sa11x0_dma_next_desc(struct sa11x0_dma_chan *c)
143 { 143 {
144 struct virt_dma_desc *vd = vchan_next_desc(&c->vc); 144 struct virt_dma_desc *vd = vchan_next_desc(&c->vc);
145 145
146 return vd ? container_of(vd, struct sa11x0_dma_desc, vd) : NULL; 146 return vd ? container_of(vd, struct sa11x0_dma_desc, vd) : NULL;
147 } 147 }
148 148
149 static void sa11x0_dma_free_desc(struct virt_dma_desc *vd) 149 static void sa11x0_dma_free_desc(struct virt_dma_desc *vd)
150 { 150 {
151 kfree(container_of(vd, struct sa11x0_dma_desc, vd)); 151 kfree(container_of(vd, struct sa11x0_dma_desc, vd));
152 } 152 }
153 153
154 static void sa11x0_dma_start_desc(struct sa11x0_dma_phy *p, struct sa11x0_dma_desc *txd) 154 static void sa11x0_dma_start_desc(struct sa11x0_dma_phy *p, struct sa11x0_dma_desc *txd)
155 { 155 {
156 list_del(&txd->vd.node); 156 list_del(&txd->vd.node);
157 p->txd_load = txd; 157 p->txd_load = txd;
158 p->sg_load = 0; 158 p->sg_load = 0;
159 159
160 dev_vdbg(p->dev->slave.dev, "pchan %u: txd %p[%x]: starting: DDAR:%x\n", 160 dev_vdbg(p->dev->slave.dev, "pchan %u: txd %p[%x]: starting: DDAR:%x\n",
161 p->num, &txd->vd, txd->vd.tx.cookie, txd->ddar); 161 p->num, &txd->vd, txd->vd.tx.cookie, txd->ddar);
162 } 162 }
163 163
164 static void noinline sa11x0_dma_start_sg(struct sa11x0_dma_phy *p, 164 static void noinline sa11x0_dma_start_sg(struct sa11x0_dma_phy *p,
165 struct sa11x0_dma_chan *c) 165 struct sa11x0_dma_chan *c)
166 { 166 {
167 struct sa11x0_dma_desc *txd = p->txd_load; 167 struct sa11x0_dma_desc *txd = p->txd_load;
168 struct sa11x0_dma_sg *sg; 168 struct sa11x0_dma_sg *sg;
169 void __iomem *base = p->base; 169 void __iomem *base = p->base;
170 unsigned dbsx, dbtx; 170 unsigned dbsx, dbtx;
171 u32 dcsr; 171 u32 dcsr;
172 172
173 if (!txd) 173 if (!txd)
174 return; 174 return;
175 175
176 dcsr = readl_relaxed(base + DMA_DCSR_R); 176 dcsr = readl_relaxed(base + DMA_DCSR_R);
177 177
178 /* Don't try to load the next transfer if both buffers are started */ 178 /* Don't try to load the next transfer if both buffers are started */
179 if ((dcsr & (DCSR_STRTA | DCSR_STRTB)) == (DCSR_STRTA | DCSR_STRTB)) 179 if ((dcsr & (DCSR_STRTA | DCSR_STRTB)) == (DCSR_STRTA | DCSR_STRTB))
180 return; 180 return;
181 181
182 if (p->sg_load == txd->sglen) { 182 if (p->sg_load == txd->sglen) {
183 if (!txd->cyclic) { 183 if (!txd->cyclic) {
184 struct sa11x0_dma_desc *txn = sa11x0_dma_next_desc(c); 184 struct sa11x0_dma_desc *txn = sa11x0_dma_next_desc(c);
185 185
186 /* 186 /*
187 * We have reached the end of the current descriptor. 187 * We have reached the end of the current descriptor.
188 * Peek at the next descriptor, and if compatible with 188 * Peek at the next descriptor, and if compatible with
189 * the current, start processing it. 189 * the current, start processing it.
190 */ 190 */
191 if (txn && txn->ddar == txd->ddar) { 191 if (txn && txn->ddar == txd->ddar) {
192 txd = txn; 192 txd = txn;
193 sa11x0_dma_start_desc(p, txn); 193 sa11x0_dma_start_desc(p, txn);
194 } else { 194 } else {
195 p->txd_load = NULL; 195 p->txd_load = NULL;
196 return; 196 return;
197 } 197 }
198 } else { 198 } else {
199 /* Cyclic: reset back to beginning */ 199 /* Cyclic: reset back to beginning */
200 p->sg_load = 0; 200 p->sg_load = 0;
201 } 201 }
202 } 202 }
203 203
204 sg = &txd->sg[p->sg_load++]; 204 sg = &txd->sg[p->sg_load++];
205 205
206 /* Select buffer to load according to channel status */ 206 /* Select buffer to load according to channel status */
207 if (((dcsr & (DCSR_BIU | DCSR_STRTB)) == (DCSR_BIU | DCSR_STRTB)) || 207 if (((dcsr & (DCSR_BIU | DCSR_STRTB)) == (DCSR_BIU | DCSR_STRTB)) ||
208 ((dcsr & (DCSR_BIU | DCSR_STRTA)) == 0)) { 208 ((dcsr & (DCSR_BIU | DCSR_STRTA)) == 0)) {
209 dbsx = DMA_DBSA; 209 dbsx = DMA_DBSA;
210 dbtx = DMA_DBTA; 210 dbtx = DMA_DBTA;
211 dcsr = DCSR_STRTA | DCSR_IE | DCSR_RUN; 211 dcsr = DCSR_STRTA | DCSR_IE | DCSR_RUN;
212 } else { 212 } else {
213 dbsx = DMA_DBSB; 213 dbsx = DMA_DBSB;
214 dbtx = DMA_DBTB; 214 dbtx = DMA_DBTB;
215 dcsr = DCSR_STRTB | DCSR_IE | DCSR_RUN; 215 dcsr = DCSR_STRTB | DCSR_IE | DCSR_RUN;
216 } 216 }
217 217
218 writel_relaxed(sg->addr, base + dbsx); 218 writel_relaxed(sg->addr, base + dbsx);
219 writel_relaxed(sg->len, base + dbtx); 219 writel_relaxed(sg->len, base + dbtx);
220 writel(dcsr, base + DMA_DCSR_S); 220 writel(dcsr, base + DMA_DCSR_S);
221 221
222 dev_dbg(p->dev->slave.dev, "pchan %u: load: DCSR:%02x DBS%c:%08x DBT%c:%08x\n", 222 dev_dbg(p->dev->slave.dev, "pchan %u: load: DCSR:%02x DBS%c:%08x DBT%c:%08x\n",
223 p->num, dcsr, 223 p->num, dcsr,
224 'A' + (dbsx == DMA_DBSB), sg->addr, 224 'A' + (dbsx == DMA_DBSB), sg->addr,
225 'A' + (dbtx == DMA_DBTB), sg->len); 225 'A' + (dbtx == DMA_DBTB), sg->len);
226 } 226 }
227 227
228 static void noinline sa11x0_dma_complete(struct sa11x0_dma_phy *p, 228 static void noinline sa11x0_dma_complete(struct sa11x0_dma_phy *p,
229 struct sa11x0_dma_chan *c) 229 struct sa11x0_dma_chan *c)
230 { 230 {
231 struct sa11x0_dma_desc *txd = p->txd_done; 231 struct sa11x0_dma_desc *txd = p->txd_done;
232 232
233 if (++p->sg_done == txd->sglen) { 233 if (++p->sg_done == txd->sglen) {
234 if (!txd->cyclic) { 234 if (!txd->cyclic) {
235 vchan_cookie_complete(&txd->vd); 235 vchan_cookie_complete(&txd->vd);
236 236
237 p->sg_done = 0; 237 p->sg_done = 0;
238 p->txd_done = p->txd_load; 238 p->txd_done = p->txd_load;
239 239
240 if (!p->txd_done) 240 if (!p->txd_done)
241 tasklet_schedule(&p->dev->task); 241 tasklet_schedule(&p->dev->task);
242 } else { 242 } else {
243 if ((p->sg_done % txd->period) == 0) 243 if ((p->sg_done % txd->period) == 0)
244 vchan_cyclic_callback(&txd->vd); 244 vchan_cyclic_callback(&txd->vd);
245 245
246 /* Cyclic: reset back to beginning */ 246 /* Cyclic: reset back to beginning */
247 p->sg_done = 0; 247 p->sg_done = 0;
248 } 248 }
249 } 249 }
250 250
251 sa11x0_dma_start_sg(p, c); 251 sa11x0_dma_start_sg(p, c);
252 } 252 }
253 253
254 static irqreturn_t sa11x0_dma_irq(int irq, void *dev_id) 254 static irqreturn_t sa11x0_dma_irq(int irq, void *dev_id)
255 { 255 {
256 struct sa11x0_dma_phy *p = dev_id; 256 struct sa11x0_dma_phy *p = dev_id;
257 struct sa11x0_dma_dev *d = p->dev; 257 struct sa11x0_dma_dev *d = p->dev;
258 struct sa11x0_dma_chan *c; 258 struct sa11x0_dma_chan *c;
259 u32 dcsr; 259 u32 dcsr;
260 260
261 dcsr = readl_relaxed(p->base + DMA_DCSR_R); 261 dcsr = readl_relaxed(p->base + DMA_DCSR_R);
262 if (!(dcsr & (DCSR_ERROR | DCSR_DONEA | DCSR_DONEB))) 262 if (!(dcsr & (DCSR_ERROR | DCSR_DONEA | DCSR_DONEB)))
263 return IRQ_NONE; 263 return IRQ_NONE;
264 264
265 /* Clear reported status bits */ 265 /* Clear reported status bits */
266 writel_relaxed(dcsr & (DCSR_ERROR | DCSR_DONEA | DCSR_DONEB), 266 writel_relaxed(dcsr & (DCSR_ERROR | DCSR_DONEA | DCSR_DONEB),
267 p->base + DMA_DCSR_C); 267 p->base + DMA_DCSR_C);
268 268
269 dev_dbg(d->slave.dev, "pchan %u: irq: DCSR:%02x\n", p->num, dcsr); 269 dev_dbg(d->slave.dev, "pchan %u: irq: DCSR:%02x\n", p->num, dcsr);
270 270
271 if (dcsr & DCSR_ERROR) { 271 if (dcsr & DCSR_ERROR) {
272 dev_err(d->slave.dev, "pchan %u: error. DCSR:%02x DDAR:%08x DBSA:%08x DBTA:%08x DBSB:%08x DBTB:%08x\n", 272 dev_err(d->slave.dev, "pchan %u: error. DCSR:%02x DDAR:%08x DBSA:%08x DBTA:%08x DBSB:%08x DBTB:%08x\n",
273 p->num, dcsr, 273 p->num, dcsr,
274 readl_relaxed(p->base + DMA_DDAR), 274 readl_relaxed(p->base + DMA_DDAR),
275 readl_relaxed(p->base + DMA_DBSA), 275 readl_relaxed(p->base + DMA_DBSA),
276 readl_relaxed(p->base + DMA_DBTA), 276 readl_relaxed(p->base + DMA_DBTA),
277 readl_relaxed(p->base + DMA_DBSB), 277 readl_relaxed(p->base + DMA_DBSB),
278 readl_relaxed(p->base + DMA_DBTB)); 278 readl_relaxed(p->base + DMA_DBTB));
279 } 279 }
280 280
281 c = p->vchan; 281 c = p->vchan;
282 if (c) { 282 if (c) {
283 unsigned long flags; 283 unsigned long flags;
284 284
285 spin_lock_irqsave(&c->vc.lock, flags); 285 spin_lock_irqsave(&c->vc.lock, flags);
286 /* 286 /*
287 * Now that we're holding the lock, check that the vchan 287 * Now that we're holding the lock, check that the vchan
288 * really is associated with this pchan before touching the 288 * really is associated with this pchan before touching the
289 * hardware. This should always succeed, because we won't 289 * hardware. This should always succeed, because we won't
290 * change p->vchan or c->phy while the channel is actively 290 * change p->vchan or c->phy while the channel is actively
291 * transferring. 291 * transferring.
292 */ 292 */
293 if (c->phy == p) { 293 if (c->phy == p) {
294 if (dcsr & DCSR_DONEA) 294 if (dcsr & DCSR_DONEA)
295 sa11x0_dma_complete(p, c); 295 sa11x0_dma_complete(p, c);
296 if (dcsr & DCSR_DONEB) 296 if (dcsr & DCSR_DONEB)
297 sa11x0_dma_complete(p, c); 297 sa11x0_dma_complete(p, c);
298 } 298 }
299 spin_unlock_irqrestore(&c->vc.lock, flags); 299 spin_unlock_irqrestore(&c->vc.lock, flags);
300 } 300 }
301 301
302 return IRQ_HANDLED; 302 return IRQ_HANDLED;
303 } 303 }
304 304
305 static void sa11x0_dma_start_txd(struct sa11x0_dma_chan *c) 305 static void sa11x0_dma_start_txd(struct sa11x0_dma_chan *c)
306 { 306 {
307 struct sa11x0_dma_desc *txd = sa11x0_dma_next_desc(c); 307 struct sa11x0_dma_desc *txd = sa11x0_dma_next_desc(c);
308 308
309 /* If the issued list is empty, we have no further txds to process */ 309 /* If the issued list is empty, we have no further txds to process */
310 if (txd) { 310 if (txd) {
311 struct sa11x0_dma_phy *p = c->phy; 311 struct sa11x0_dma_phy *p = c->phy;
312 312
313 sa11x0_dma_start_desc(p, txd); 313 sa11x0_dma_start_desc(p, txd);
314 p->txd_done = txd; 314 p->txd_done = txd;
315 p->sg_done = 0; 315 p->sg_done = 0;
316 316
317 /* The channel should not have any transfers started */ 317 /* The channel should not have any transfers started */
318 WARN_ON(readl_relaxed(p->base + DMA_DCSR_R) & 318 WARN_ON(readl_relaxed(p->base + DMA_DCSR_R) &
319 (DCSR_STRTA | DCSR_STRTB)); 319 (DCSR_STRTA | DCSR_STRTB));
320 320
321 /* Clear the run and start bits before changing DDAR */ 321 /* Clear the run and start bits before changing DDAR */
322 writel_relaxed(DCSR_RUN | DCSR_STRTA | DCSR_STRTB, 322 writel_relaxed(DCSR_RUN | DCSR_STRTA | DCSR_STRTB,
323 p->base + DMA_DCSR_C); 323 p->base + DMA_DCSR_C);
324 writel_relaxed(txd->ddar, p->base + DMA_DDAR); 324 writel_relaxed(txd->ddar, p->base + DMA_DDAR);
325 325
326 /* Try to start both buffers */ 326 /* Try to start both buffers */
327 sa11x0_dma_start_sg(p, c); 327 sa11x0_dma_start_sg(p, c);
328 sa11x0_dma_start_sg(p, c); 328 sa11x0_dma_start_sg(p, c);
329 } 329 }
330 } 330 }
331 331
332 static void sa11x0_dma_tasklet(unsigned long arg) 332 static void sa11x0_dma_tasklet(unsigned long arg)
333 { 333 {
334 struct sa11x0_dma_dev *d = (struct sa11x0_dma_dev *)arg; 334 struct sa11x0_dma_dev *d = (struct sa11x0_dma_dev *)arg;
335 struct sa11x0_dma_phy *p; 335 struct sa11x0_dma_phy *p;
336 struct sa11x0_dma_chan *c; 336 struct sa11x0_dma_chan *c;
337 unsigned pch, pch_alloc = 0; 337 unsigned pch, pch_alloc = 0;
338 338
339 dev_dbg(d->slave.dev, "tasklet enter\n"); 339 dev_dbg(d->slave.dev, "tasklet enter\n");
340 340
341 list_for_each_entry(c, &d->slave.channels, vc.chan.device_node) { 341 list_for_each_entry(c, &d->slave.channels, vc.chan.device_node) {
342 spin_lock_irq(&c->vc.lock); 342 spin_lock_irq(&c->vc.lock);
343 p = c->phy; 343 p = c->phy;
344 if (p && !p->txd_done) { 344 if (p && !p->txd_done) {
345 sa11x0_dma_start_txd(c); 345 sa11x0_dma_start_txd(c);
346 if (!p->txd_done) { 346 if (!p->txd_done) {
347 /* No current txd associated with this channel */ 347 /* No current txd associated with this channel */
348 dev_dbg(d->slave.dev, "pchan %u: free\n", p->num); 348 dev_dbg(d->slave.dev, "pchan %u: free\n", p->num);
349 349
350 /* Mark this channel free */ 350 /* Mark this channel free */
351 c->phy = NULL; 351 c->phy = NULL;
352 p->vchan = NULL; 352 p->vchan = NULL;
353 } 353 }
354 } 354 }
355 spin_unlock_irq(&c->vc.lock); 355 spin_unlock_irq(&c->vc.lock);
356 } 356 }
357 357
358 spin_lock_irq(&d->lock); 358 spin_lock_irq(&d->lock);
359 for (pch = 0; pch < NR_PHY_CHAN; pch++) { 359 for (pch = 0; pch < NR_PHY_CHAN; pch++) {
360 p = &d->phy[pch]; 360 p = &d->phy[pch];
361 361
362 if (p->vchan == NULL && !list_empty(&d->chan_pending)) { 362 if (p->vchan == NULL && !list_empty(&d->chan_pending)) {
363 c = list_first_entry(&d->chan_pending, 363 c = list_first_entry(&d->chan_pending,
364 struct sa11x0_dma_chan, node); 364 struct sa11x0_dma_chan, node);
365 list_del_init(&c->node); 365 list_del_init(&c->node);
366 366
367 pch_alloc |= 1 << pch; 367 pch_alloc |= 1 << pch;
368 368
369 /* Mark this channel allocated */ 369 /* Mark this channel allocated */
370 p->vchan = c; 370 p->vchan = c;
371 371
372 dev_dbg(d->slave.dev, "pchan %u: alloc vchan %p\n", pch, &c->vc); 372 dev_dbg(d->slave.dev, "pchan %u: alloc vchan %p\n", pch, &c->vc);
373 } 373 }
374 } 374 }
375 spin_unlock_irq(&d->lock); 375 spin_unlock_irq(&d->lock);
376 376
377 for (pch = 0; pch < NR_PHY_CHAN; pch++) { 377 for (pch = 0; pch < NR_PHY_CHAN; pch++) {
378 if (pch_alloc & (1 << pch)) { 378 if (pch_alloc & (1 << pch)) {
379 p = &d->phy[pch]; 379 p = &d->phy[pch];
380 c = p->vchan; 380 c = p->vchan;
381 381
382 spin_lock_irq(&c->vc.lock); 382 spin_lock_irq(&c->vc.lock);
383 c->phy = p; 383 c->phy = p;
384 384
385 sa11x0_dma_start_txd(c); 385 sa11x0_dma_start_txd(c);
386 spin_unlock_irq(&c->vc.lock); 386 spin_unlock_irq(&c->vc.lock);
387 } 387 }
388 } 388 }
389 389
390 dev_dbg(d->slave.dev, "tasklet exit\n"); 390 dev_dbg(d->slave.dev, "tasklet exit\n");
391 } 391 }
392 392
393 393
394 static int sa11x0_dma_alloc_chan_resources(struct dma_chan *chan) 394 static int sa11x0_dma_alloc_chan_resources(struct dma_chan *chan)
395 { 395 {
396 return 0; 396 return 0;
397 } 397 }
398 398
399 static void sa11x0_dma_free_chan_resources(struct dma_chan *chan) 399 static void sa11x0_dma_free_chan_resources(struct dma_chan *chan)
400 { 400 {
401 struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); 401 struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
402 struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device); 402 struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
403 unsigned long flags; 403 unsigned long flags;
404 404
405 spin_lock_irqsave(&d->lock, flags); 405 spin_lock_irqsave(&d->lock, flags);
406 list_del_init(&c->node); 406 list_del_init(&c->node);
407 spin_unlock_irqrestore(&d->lock, flags); 407 spin_unlock_irqrestore(&d->lock, flags);
408 408
409 vchan_free_chan_resources(&c->vc); 409 vchan_free_chan_resources(&c->vc);
410 } 410 }
411 411
412 static dma_addr_t sa11x0_dma_pos(struct sa11x0_dma_phy *p) 412 static dma_addr_t sa11x0_dma_pos(struct sa11x0_dma_phy *p)
413 { 413 {
414 unsigned reg; 414 unsigned reg;
415 u32 dcsr; 415 u32 dcsr;
416 416
417 dcsr = readl_relaxed(p->base + DMA_DCSR_R); 417 dcsr = readl_relaxed(p->base + DMA_DCSR_R);
418 418
419 if ((dcsr & (DCSR_BIU | DCSR_STRTA)) == DCSR_STRTA || 419 if ((dcsr & (DCSR_BIU | DCSR_STRTA)) == DCSR_STRTA ||
420 (dcsr & (DCSR_BIU | DCSR_STRTB)) == DCSR_BIU) 420 (dcsr & (DCSR_BIU | DCSR_STRTB)) == DCSR_BIU)
421 reg = DMA_DBSA; 421 reg = DMA_DBSA;
422 else 422 else
423 reg = DMA_DBSB; 423 reg = DMA_DBSB;
424 424
425 return readl_relaxed(p->base + reg); 425 return readl_relaxed(p->base + reg);
426 } 426 }
427 427
428 static enum dma_status sa11x0_dma_tx_status(struct dma_chan *chan, 428 static enum dma_status sa11x0_dma_tx_status(struct dma_chan *chan,
429 dma_cookie_t cookie, struct dma_tx_state *state) 429 dma_cookie_t cookie, struct dma_tx_state *state)
430 { 430 {
431 struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); 431 struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
432 struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device); 432 struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
433 struct sa11x0_dma_phy *p; 433 struct sa11x0_dma_phy *p;
434 struct virt_dma_desc *vd; 434 struct virt_dma_desc *vd;
435 unsigned long flags; 435 unsigned long flags;
436 enum dma_status ret; 436 enum dma_status ret;
437 437
438 ret = dma_cookie_status(&c->vc.chan, cookie, state); 438 ret = dma_cookie_status(&c->vc.chan, cookie, state);
439 if (ret == DMA_SUCCESS) 439 if (ret == DMA_COMPLETE)
440 return ret; 440 return ret;
441 441
442 if (!state) 442 if (!state)
443 return c->status; 443 return c->status;
444 444
445 spin_lock_irqsave(&c->vc.lock, flags); 445 spin_lock_irqsave(&c->vc.lock, flags);
446 p = c->phy; 446 p = c->phy;
447 447
448 /* 448 /*
449 * If the cookie is on our issue queue, then the residue is 449 * If the cookie is on our issue queue, then the residue is
450 * its total size. 450 * its total size.
451 */ 451 */
452 vd = vchan_find_desc(&c->vc, cookie); 452 vd = vchan_find_desc(&c->vc, cookie);
453 if (vd) { 453 if (vd) {
454 state->residue = container_of(vd, struct sa11x0_dma_desc, vd)->size; 454 state->residue = container_of(vd, struct sa11x0_dma_desc, vd)->size;
455 } else if (!p) { 455 } else if (!p) {
456 state->residue = 0; 456 state->residue = 0;
457 } else { 457 } else {
458 struct sa11x0_dma_desc *txd; 458 struct sa11x0_dma_desc *txd;
459 size_t bytes = 0; 459 size_t bytes = 0;
460 460
461 if (p->txd_done && p->txd_done->vd.tx.cookie == cookie) 461 if (p->txd_done && p->txd_done->vd.tx.cookie == cookie)
462 txd = p->txd_done; 462 txd = p->txd_done;
463 else if (p->txd_load && p->txd_load->vd.tx.cookie == cookie) 463 else if (p->txd_load && p->txd_load->vd.tx.cookie == cookie)
464 txd = p->txd_load; 464 txd = p->txd_load;
465 else 465 else
466 txd = NULL; 466 txd = NULL;
467 467
468 ret = c->status; 468 ret = c->status;
469 if (txd) { 469 if (txd) {
470 dma_addr_t addr = sa11x0_dma_pos(p); 470 dma_addr_t addr = sa11x0_dma_pos(p);
471 unsigned i; 471 unsigned i;
472 472
473 dev_vdbg(d->slave.dev, "tx_status: addr:%x\n", addr); 473 dev_vdbg(d->slave.dev, "tx_status: addr:%x\n", addr);
474 474
475 for (i = 0; i < txd->sglen; i++) { 475 for (i = 0; i < txd->sglen; i++) {
476 dev_vdbg(d->slave.dev, "tx_status: [%u] %x+%x\n", 476 dev_vdbg(d->slave.dev, "tx_status: [%u] %x+%x\n",
477 i, txd->sg[i].addr, txd->sg[i].len); 477 i, txd->sg[i].addr, txd->sg[i].len);
478 if (addr >= txd->sg[i].addr && 478 if (addr >= txd->sg[i].addr &&
479 addr < txd->sg[i].addr + txd->sg[i].len) { 479 addr < txd->sg[i].addr + txd->sg[i].len) {
480 unsigned len; 480 unsigned len;
481 481
482 len = txd->sg[i].len - 482 len = txd->sg[i].len -
483 (addr - txd->sg[i].addr); 483 (addr - txd->sg[i].addr);
484 dev_vdbg(d->slave.dev, "tx_status: [%u] +%x\n", 484 dev_vdbg(d->slave.dev, "tx_status: [%u] +%x\n",
485 i, len); 485 i, len);
486 bytes += len; 486 bytes += len;
487 i++; 487 i++;
488 break; 488 break;
489 } 489 }
490 } 490 }
491 for (; i < txd->sglen; i++) { 491 for (; i < txd->sglen; i++) {
492 dev_vdbg(d->slave.dev, "tx_status: [%u] %x+%x ++\n", 492 dev_vdbg(d->slave.dev, "tx_status: [%u] %x+%x ++\n",
493 i, txd->sg[i].addr, txd->sg[i].len); 493 i, txd->sg[i].addr, txd->sg[i].len);
494 bytes += txd->sg[i].len; 494 bytes += txd->sg[i].len;
495 } 495 }
496 } 496 }
497 state->residue = bytes; 497 state->residue = bytes;
498 } 498 }
499 spin_unlock_irqrestore(&c->vc.lock, flags); 499 spin_unlock_irqrestore(&c->vc.lock, flags);
500 500
501 dev_vdbg(d->slave.dev, "tx_status: bytes 0x%zx\n", state->residue); 501 dev_vdbg(d->slave.dev, "tx_status: bytes 0x%zx\n", state->residue);
502 502
503 return ret; 503 return ret;
504 } 504 }
505 505
506 /* 506 /*
507 * Move pending txds to the issued list, and re-init pending list. 507 * Move pending txds to the issued list, and re-init pending list.
508 * If not already pending, add this channel to the list of pending 508 * If not already pending, add this channel to the list of pending
509 * channels and trigger the tasklet to run. 509 * channels and trigger the tasklet to run.
510 */ 510 */
511 static void sa11x0_dma_issue_pending(struct dma_chan *chan) 511 static void sa11x0_dma_issue_pending(struct dma_chan *chan)
512 { 512 {
513 struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); 513 struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
514 struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device); 514 struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
515 unsigned long flags; 515 unsigned long flags;
516 516
517 spin_lock_irqsave(&c->vc.lock, flags); 517 spin_lock_irqsave(&c->vc.lock, flags);
518 if (vchan_issue_pending(&c->vc)) { 518 if (vchan_issue_pending(&c->vc)) {
519 if (!c->phy) { 519 if (!c->phy) {
520 spin_lock(&d->lock); 520 spin_lock(&d->lock);
521 if (list_empty(&c->node)) { 521 if (list_empty(&c->node)) {
522 list_add_tail(&c->node, &d->chan_pending); 522 list_add_tail(&c->node, &d->chan_pending);
523 tasklet_schedule(&d->task); 523 tasklet_schedule(&d->task);
524 dev_dbg(d->slave.dev, "vchan %p: issued\n", &c->vc); 524 dev_dbg(d->slave.dev, "vchan %p: issued\n", &c->vc);
525 } 525 }
526 spin_unlock(&d->lock); 526 spin_unlock(&d->lock);
527 } 527 }
528 } else 528 } else
529 dev_dbg(d->slave.dev, "vchan %p: nothing to issue\n", &c->vc); 529 dev_dbg(d->slave.dev, "vchan %p: nothing to issue\n", &c->vc);
530 spin_unlock_irqrestore(&c->vc.lock, flags); 530 spin_unlock_irqrestore(&c->vc.lock, flags);
531 } 531 }
532 532
533 static struct dma_async_tx_descriptor *sa11x0_dma_prep_slave_sg( 533 static struct dma_async_tx_descriptor *sa11x0_dma_prep_slave_sg(
534 struct dma_chan *chan, struct scatterlist *sg, unsigned int sglen, 534 struct dma_chan *chan, struct scatterlist *sg, unsigned int sglen,
535 enum dma_transfer_direction dir, unsigned long flags, void *context) 535 enum dma_transfer_direction dir, unsigned long flags, void *context)
536 { 536 {
537 struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); 537 struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
538 struct sa11x0_dma_desc *txd; 538 struct sa11x0_dma_desc *txd;
539 struct scatterlist *sgent; 539 struct scatterlist *sgent;
540 unsigned i, j = sglen; 540 unsigned i, j = sglen;
541 size_t size = 0; 541 size_t size = 0;
542 542
543 /* SA11x0 channels can only operate in their native direction */ 543 /* SA11x0 channels can only operate in their native direction */
544 if (dir != (c->ddar & DDAR_RW ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV)) { 544 if (dir != (c->ddar & DDAR_RW ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV)) {
545 dev_err(chan->device->dev, "vchan %p: bad DMA direction: DDAR:%08x dir:%u\n", 545 dev_err(chan->device->dev, "vchan %p: bad DMA direction: DDAR:%08x dir:%u\n",
546 &c->vc, c->ddar, dir); 546 &c->vc, c->ddar, dir);
547 return NULL; 547 return NULL;
548 } 548 }
549 549
550 /* Do not allow zero-sized txds */ 550 /* Do not allow zero-sized txds */
551 if (sglen == 0) 551 if (sglen == 0)
552 return NULL; 552 return NULL;
553 553
554 for_each_sg(sg, sgent, sglen, i) { 554 for_each_sg(sg, sgent, sglen, i) {
555 dma_addr_t addr = sg_dma_address(sgent); 555 dma_addr_t addr = sg_dma_address(sgent);
556 unsigned int len = sg_dma_len(sgent); 556 unsigned int len = sg_dma_len(sgent);
557 557
558 if (len > DMA_MAX_SIZE) 558 if (len > DMA_MAX_SIZE)
559 j += DIV_ROUND_UP(len, DMA_MAX_SIZE & ~DMA_ALIGN) - 1; 559 j += DIV_ROUND_UP(len, DMA_MAX_SIZE & ~DMA_ALIGN) - 1;
560 if (addr & DMA_ALIGN) { 560 if (addr & DMA_ALIGN) {
561 dev_dbg(chan->device->dev, "vchan %p: bad buffer alignment: %08x\n", 561 dev_dbg(chan->device->dev, "vchan %p: bad buffer alignment: %08x\n",
562 &c->vc, addr); 562 &c->vc, addr);
563 return NULL; 563 return NULL;
564 } 564 }
565 } 565 }
566 566
567 txd = kzalloc(sizeof(*txd) + j * sizeof(txd->sg[0]), GFP_ATOMIC); 567 txd = kzalloc(sizeof(*txd) + j * sizeof(txd->sg[0]), GFP_ATOMIC);
568 if (!txd) { 568 if (!txd) {
569 dev_dbg(chan->device->dev, "vchan %p: kzalloc failed\n", &c->vc); 569 dev_dbg(chan->device->dev, "vchan %p: kzalloc failed\n", &c->vc);
570 return NULL; 570 return NULL;
571 } 571 }
572 572
573 j = 0; 573 j = 0;
574 for_each_sg(sg, sgent, sglen, i) { 574 for_each_sg(sg, sgent, sglen, i) {
575 dma_addr_t addr = sg_dma_address(sgent); 575 dma_addr_t addr = sg_dma_address(sgent);
576 unsigned len = sg_dma_len(sgent); 576 unsigned len = sg_dma_len(sgent);
577 577
578 size += len; 578 size += len;
579 579
580 do { 580 do {
581 unsigned tlen = len; 581 unsigned tlen = len;
582 582
583 /* 583 /*
584 * Check whether the transfer will fit. If not, try 584 * Check whether the transfer will fit. If not, try
585 * to split the transfer up such that we end up with 585 * to split the transfer up such that we end up with
586 * equal chunks - but make sure that we preserve the 586 * equal chunks - but make sure that we preserve the
587 * alignment. This avoids small segments. 587 * alignment. This avoids small segments.
588 */ 588 */
589 if (tlen > DMA_MAX_SIZE) { 589 if (tlen > DMA_MAX_SIZE) {
590 unsigned mult = DIV_ROUND_UP(tlen, 590 unsigned mult = DIV_ROUND_UP(tlen,
591 DMA_MAX_SIZE & ~DMA_ALIGN); 591 DMA_MAX_SIZE & ~DMA_ALIGN);
592 592
593 tlen = (tlen / mult) & ~DMA_ALIGN; 593 tlen = (tlen / mult) & ~DMA_ALIGN;
594 } 594 }
595 595
596 txd->sg[j].addr = addr; 596 txd->sg[j].addr = addr;
597 txd->sg[j].len = tlen; 597 txd->sg[j].len = tlen;
598 598
599 addr += tlen; 599 addr += tlen;
600 len -= tlen; 600 len -= tlen;
601 j++; 601 j++;
602 } while (len); 602 } while (len);
603 } 603 }
604 604
605 txd->ddar = c->ddar; 605 txd->ddar = c->ddar;
606 txd->size = size; 606 txd->size = size;
607 txd->sglen = j; 607 txd->sglen = j;
608 608
609 dev_dbg(chan->device->dev, "vchan %p: txd %p: size %u nr %u\n", 609 dev_dbg(chan->device->dev, "vchan %p: txd %p: size %u nr %u\n",
610 &c->vc, &txd->vd, txd->size, txd->sglen); 610 &c->vc, &txd->vd, txd->size, txd->sglen);
611 611
612 return vchan_tx_prep(&c->vc, &txd->vd, flags); 612 return vchan_tx_prep(&c->vc, &txd->vd, flags);
613 } 613 }
614 614
615 static struct dma_async_tx_descriptor *sa11x0_dma_prep_dma_cyclic( 615 static struct dma_async_tx_descriptor *sa11x0_dma_prep_dma_cyclic(
616 struct dma_chan *chan, dma_addr_t addr, size_t size, size_t period, 616 struct dma_chan *chan, dma_addr_t addr, size_t size, size_t period,
617 enum dma_transfer_direction dir, unsigned long flags, void *context) 617 enum dma_transfer_direction dir, unsigned long flags, void *context)
618 { 618 {
619 struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); 619 struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
620 struct sa11x0_dma_desc *txd; 620 struct sa11x0_dma_desc *txd;
621 unsigned i, j, k, sglen, sgperiod; 621 unsigned i, j, k, sglen, sgperiod;
622 622
623 /* SA11x0 channels can only operate in their native direction */ 623 /* SA11x0 channels can only operate in their native direction */
624 if (dir != (c->ddar & DDAR_RW ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV)) { 624 if (dir != (c->ddar & DDAR_RW ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV)) {
625 dev_err(chan->device->dev, "vchan %p: bad DMA direction: DDAR:%08x dir:%u\n", 625 dev_err(chan->device->dev, "vchan %p: bad DMA direction: DDAR:%08x dir:%u\n",
626 &c->vc, c->ddar, dir); 626 &c->vc, c->ddar, dir);
627 return NULL; 627 return NULL;
628 } 628 }
629 629
630 sgperiod = DIV_ROUND_UP(period, DMA_MAX_SIZE & ~DMA_ALIGN); 630 sgperiod = DIV_ROUND_UP(period, DMA_MAX_SIZE & ~DMA_ALIGN);
631 sglen = size * sgperiod / period; 631 sglen = size * sgperiod / period;
632 632
633 /* Do not allow zero-sized txds */ 633 /* Do not allow zero-sized txds */
634 if (sglen == 0) 634 if (sglen == 0)
635 return NULL; 635 return NULL;
636 636
637 txd = kzalloc(sizeof(*txd) + sglen * sizeof(txd->sg[0]), GFP_ATOMIC); 637 txd = kzalloc(sizeof(*txd) + sglen * sizeof(txd->sg[0]), GFP_ATOMIC);
638 if (!txd) { 638 if (!txd) {
639 dev_dbg(chan->device->dev, "vchan %p: kzalloc failed\n", &c->vc); 639 dev_dbg(chan->device->dev, "vchan %p: kzalloc failed\n", &c->vc);
640 return NULL; 640 return NULL;
641 } 641 }
642 642
643 for (i = k = 0; i < size / period; i++) { 643 for (i = k = 0; i < size / period; i++) {
644 size_t tlen, len = period; 644 size_t tlen, len = period;
645 645
646 for (j = 0; j < sgperiod; j++, k++) { 646 for (j = 0; j < sgperiod; j++, k++) {
647 tlen = len; 647 tlen = len;
648 648
649 if (tlen > DMA_MAX_SIZE) { 649 if (tlen > DMA_MAX_SIZE) {
650 unsigned mult = DIV_ROUND_UP(tlen, DMA_MAX_SIZE & ~DMA_ALIGN); 650 unsigned mult = DIV_ROUND_UP(tlen, DMA_MAX_SIZE & ~DMA_ALIGN);
651 tlen = (tlen / mult) & ~DMA_ALIGN; 651 tlen = (tlen / mult) & ~DMA_ALIGN;
652 } 652 }
653 653
654 txd->sg[k].addr = addr; 654 txd->sg[k].addr = addr;
655 txd->sg[k].len = tlen; 655 txd->sg[k].len = tlen;
656 addr += tlen; 656 addr += tlen;
657 len -= tlen; 657 len -= tlen;
658 } 658 }
659 659
660 WARN_ON(len != 0); 660 WARN_ON(len != 0);
661 } 661 }
662 662
663 WARN_ON(k != sglen); 663 WARN_ON(k != sglen);
664 664
665 txd->ddar = c->ddar; 665 txd->ddar = c->ddar;
666 txd->size = size; 666 txd->size = size;
667 txd->sglen = sglen; 667 txd->sglen = sglen;
668 txd->cyclic = 1; 668 txd->cyclic = 1;
669 txd->period = sgperiod; 669 txd->period = sgperiod;
670 670
671 return vchan_tx_prep(&c->vc, &txd->vd, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 671 return vchan_tx_prep(&c->vc, &txd->vd, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
672 } 672 }
673 673
674 static int sa11x0_dma_slave_config(struct sa11x0_dma_chan *c, struct dma_slave_config *cfg) 674 static int sa11x0_dma_slave_config(struct sa11x0_dma_chan *c, struct dma_slave_config *cfg)
675 { 675 {
676 u32 ddar = c->ddar & ((0xf << 4) | DDAR_RW); 676 u32 ddar = c->ddar & ((0xf << 4) | DDAR_RW);
677 dma_addr_t addr; 677 dma_addr_t addr;
678 enum dma_slave_buswidth width; 678 enum dma_slave_buswidth width;
679 u32 maxburst; 679 u32 maxburst;
680 680
681 if (ddar & DDAR_RW) { 681 if (ddar & DDAR_RW) {
682 addr = cfg->src_addr; 682 addr = cfg->src_addr;
683 width = cfg->src_addr_width; 683 width = cfg->src_addr_width;
684 maxburst = cfg->src_maxburst; 684 maxburst = cfg->src_maxburst;
685 } else { 685 } else {
686 addr = cfg->dst_addr; 686 addr = cfg->dst_addr;
687 width = cfg->dst_addr_width; 687 width = cfg->dst_addr_width;
688 maxburst = cfg->dst_maxburst; 688 maxburst = cfg->dst_maxburst;
689 } 689 }
690 690
691 if ((width != DMA_SLAVE_BUSWIDTH_1_BYTE && 691 if ((width != DMA_SLAVE_BUSWIDTH_1_BYTE &&
692 width != DMA_SLAVE_BUSWIDTH_2_BYTES) || 692 width != DMA_SLAVE_BUSWIDTH_2_BYTES) ||
693 (maxburst != 4 && maxburst != 8)) 693 (maxburst != 4 && maxburst != 8))
694 return -EINVAL; 694 return -EINVAL;
695 695
696 if (width == DMA_SLAVE_BUSWIDTH_2_BYTES) 696 if (width == DMA_SLAVE_BUSWIDTH_2_BYTES)
697 ddar |= DDAR_DW; 697 ddar |= DDAR_DW;
698 if (maxburst == 8) 698 if (maxburst == 8)
699 ddar |= DDAR_BS; 699 ddar |= DDAR_BS;
700 700
701 dev_dbg(c->vc.chan.device->dev, "vchan %p: dma_slave_config addr %x width %u burst %u\n", 701 dev_dbg(c->vc.chan.device->dev, "vchan %p: dma_slave_config addr %x width %u burst %u\n",
702 &c->vc, addr, width, maxburst); 702 &c->vc, addr, width, maxburst);
703 703
704 c->ddar = ddar | (addr & 0xf0000000) | (addr & 0x003ffffc) << 6; 704 c->ddar = ddar | (addr & 0xf0000000) | (addr & 0x003ffffc) << 6;
705 705
706 return 0; 706 return 0;
707 } 707 }
708 708
709 static int sa11x0_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 709 static int sa11x0_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
710 unsigned long arg) 710 unsigned long arg)
711 { 711 {
712 struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); 712 struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
713 struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device); 713 struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
714 struct sa11x0_dma_phy *p; 714 struct sa11x0_dma_phy *p;
715 LIST_HEAD(head); 715 LIST_HEAD(head);
716 unsigned long flags; 716 unsigned long flags;
717 int ret; 717 int ret;
718 718
719 switch (cmd) { 719 switch (cmd) {
720 case DMA_SLAVE_CONFIG: 720 case DMA_SLAVE_CONFIG:
721 return sa11x0_dma_slave_config(c, (struct dma_slave_config *)arg); 721 return sa11x0_dma_slave_config(c, (struct dma_slave_config *)arg);
722 722
723 case DMA_TERMINATE_ALL: 723 case DMA_TERMINATE_ALL:
724 dev_dbg(d->slave.dev, "vchan %p: terminate all\n", &c->vc); 724 dev_dbg(d->slave.dev, "vchan %p: terminate all\n", &c->vc);
725 /* Clear the tx descriptor lists */ 725 /* Clear the tx descriptor lists */
726 spin_lock_irqsave(&c->vc.lock, flags); 726 spin_lock_irqsave(&c->vc.lock, flags);
727 vchan_get_all_descriptors(&c->vc, &head); 727 vchan_get_all_descriptors(&c->vc, &head);
728 728
729 p = c->phy; 729 p = c->phy;
730 if (p) { 730 if (p) {
731 dev_dbg(d->slave.dev, "pchan %u: terminating\n", p->num); 731 dev_dbg(d->slave.dev, "pchan %u: terminating\n", p->num);
732 /* vchan is assigned to a pchan - stop the channel */ 732 /* vchan is assigned to a pchan - stop the channel */
733 writel(DCSR_RUN | DCSR_IE | 733 writel(DCSR_RUN | DCSR_IE |
734 DCSR_STRTA | DCSR_DONEA | 734 DCSR_STRTA | DCSR_DONEA |
735 DCSR_STRTB | DCSR_DONEB, 735 DCSR_STRTB | DCSR_DONEB,
736 p->base + DMA_DCSR_C); 736 p->base + DMA_DCSR_C);
737 737
738 if (p->txd_load) { 738 if (p->txd_load) {
739 if (p->txd_load != p->txd_done) 739 if (p->txd_load != p->txd_done)
740 list_add_tail(&p->txd_load->vd.node, &head); 740 list_add_tail(&p->txd_load->vd.node, &head);
741 p->txd_load = NULL; 741 p->txd_load = NULL;
742 } 742 }
743 if (p->txd_done) { 743 if (p->txd_done) {
744 list_add_tail(&p->txd_done->vd.node, &head); 744 list_add_tail(&p->txd_done->vd.node, &head);
745 p->txd_done = NULL; 745 p->txd_done = NULL;
746 } 746 }
747 c->phy = NULL; 747 c->phy = NULL;
748 spin_lock(&d->lock); 748 spin_lock(&d->lock);
749 p->vchan = NULL; 749 p->vchan = NULL;
750 spin_unlock(&d->lock); 750 spin_unlock(&d->lock);
751 tasklet_schedule(&d->task); 751 tasklet_schedule(&d->task);
752 } 752 }
753 spin_unlock_irqrestore(&c->vc.lock, flags); 753 spin_unlock_irqrestore(&c->vc.lock, flags);
754 vchan_dma_desc_free_list(&c->vc, &head); 754 vchan_dma_desc_free_list(&c->vc, &head);
755 ret = 0; 755 ret = 0;
756 break; 756 break;
757 757
758 case DMA_PAUSE: 758 case DMA_PAUSE:
759 dev_dbg(d->slave.dev, "vchan %p: pause\n", &c->vc); 759 dev_dbg(d->slave.dev, "vchan %p: pause\n", &c->vc);
760 spin_lock_irqsave(&c->vc.lock, flags); 760 spin_lock_irqsave(&c->vc.lock, flags);
761 if (c->status == DMA_IN_PROGRESS) { 761 if (c->status == DMA_IN_PROGRESS) {
762 c->status = DMA_PAUSED; 762 c->status = DMA_PAUSED;
763 763
764 p = c->phy; 764 p = c->phy;
765 if (p) { 765 if (p) {
766 writel(DCSR_RUN | DCSR_IE, p->base + DMA_DCSR_C); 766 writel(DCSR_RUN | DCSR_IE, p->base + DMA_DCSR_C);
767 } else { 767 } else {
768 spin_lock(&d->lock); 768 spin_lock(&d->lock);
769 list_del_init(&c->node); 769 list_del_init(&c->node);
770 spin_unlock(&d->lock); 770 spin_unlock(&d->lock);
771 } 771 }
772 } 772 }
773 spin_unlock_irqrestore(&c->vc.lock, flags); 773 spin_unlock_irqrestore(&c->vc.lock, flags);
774 ret = 0; 774 ret = 0;
775 break; 775 break;
776 776
777 case DMA_RESUME: 777 case DMA_RESUME:
778 dev_dbg(d->slave.dev, "vchan %p: resume\n", &c->vc); 778 dev_dbg(d->slave.dev, "vchan %p: resume\n", &c->vc);
779 spin_lock_irqsave(&c->vc.lock, flags); 779 spin_lock_irqsave(&c->vc.lock, flags);
780 if (c->status == DMA_PAUSED) { 780 if (c->status == DMA_PAUSED) {
781 c->status = DMA_IN_PROGRESS; 781 c->status = DMA_IN_PROGRESS;
782 782
783 p = c->phy; 783 p = c->phy;
784 if (p) { 784 if (p) {
785 writel(DCSR_RUN | DCSR_IE, p->base + DMA_DCSR_S); 785 writel(DCSR_RUN | DCSR_IE, p->base + DMA_DCSR_S);
786 } else if (!list_empty(&c->vc.desc_issued)) { 786 } else if (!list_empty(&c->vc.desc_issued)) {
787 spin_lock(&d->lock); 787 spin_lock(&d->lock);
788 list_add_tail(&c->node, &d->chan_pending); 788 list_add_tail(&c->node, &d->chan_pending);
789 spin_unlock(&d->lock); 789 spin_unlock(&d->lock);
790 } 790 }
791 } 791 }
792 spin_unlock_irqrestore(&c->vc.lock, flags); 792 spin_unlock_irqrestore(&c->vc.lock, flags);
793 ret = 0; 793 ret = 0;
794 break; 794 break;
795 795
796 default: 796 default:
797 ret = -ENXIO; 797 ret = -ENXIO;
798 break; 798 break;
799 } 799 }
800 800
801 return ret; 801 return ret;
802 } 802 }
803 803
804 struct sa11x0_dma_channel_desc { 804 struct sa11x0_dma_channel_desc {
805 u32 ddar; 805 u32 ddar;
806 const char *name; 806 const char *name;
807 }; 807 };
808 808
809 #define CD(d1, d2) { .ddar = DDAR_##d1 | d2, .name = #d1 } 809 #define CD(d1, d2) { .ddar = DDAR_##d1 | d2, .name = #d1 }
810 static const struct sa11x0_dma_channel_desc chan_desc[] = { 810 static const struct sa11x0_dma_channel_desc chan_desc[] = {
811 CD(Ser0UDCTr, 0), 811 CD(Ser0UDCTr, 0),
812 CD(Ser0UDCRc, DDAR_RW), 812 CD(Ser0UDCRc, DDAR_RW),
813 CD(Ser1SDLCTr, 0), 813 CD(Ser1SDLCTr, 0),
814 CD(Ser1SDLCRc, DDAR_RW), 814 CD(Ser1SDLCRc, DDAR_RW),
815 CD(Ser1UARTTr, 0), 815 CD(Ser1UARTTr, 0),
816 CD(Ser1UARTRc, DDAR_RW), 816 CD(Ser1UARTRc, DDAR_RW),
817 CD(Ser2ICPTr, 0), 817 CD(Ser2ICPTr, 0),
818 CD(Ser2ICPRc, DDAR_RW), 818 CD(Ser2ICPRc, DDAR_RW),
819 CD(Ser3UARTTr, 0), 819 CD(Ser3UARTTr, 0),
820 CD(Ser3UARTRc, DDAR_RW), 820 CD(Ser3UARTRc, DDAR_RW),
821 CD(Ser4MCP0Tr, 0), 821 CD(Ser4MCP0Tr, 0),
822 CD(Ser4MCP0Rc, DDAR_RW), 822 CD(Ser4MCP0Rc, DDAR_RW),
823 CD(Ser4MCP1Tr, 0), 823 CD(Ser4MCP1Tr, 0),
824 CD(Ser4MCP1Rc, DDAR_RW), 824 CD(Ser4MCP1Rc, DDAR_RW),
825 CD(Ser4SSPTr, 0), 825 CD(Ser4SSPTr, 0),
826 CD(Ser4SSPRc, DDAR_RW), 826 CD(Ser4SSPRc, DDAR_RW),
827 }; 827 };
828 828
829 static int sa11x0_dma_init_dmadev(struct dma_device *dmadev, 829 static int sa11x0_dma_init_dmadev(struct dma_device *dmadev,
830 struct device *dev) 830 struct device *dev)
831 { 831 {
832 unsigned i; 832 unsigned i;
833 833
834 dmadev->chancnt = ARRAY_SIZE(chan_desc); 834 dmadev->chancnt = ARRAY_SIZE(chan_desc);
835 INIT_LIST_HEAD(&dmadev->channels); 835 INIT_LIST_HEAD(&dmadev->channels);
836 dmadev->dev = dev; 836 dmadev->dev = dev;
837 dmadev->device_alloc_chan_resources = sa11x0_dma_alloc_chan_resources; 837 dmadev->device_alloc_chan_resources = sa11x0_dma_alloc_chan_resources;
838 dmadev->device_free_chan_resources = sa11x0_dma_free_chan_resources; 838 dmadev->device_free_chan_resources = sa11x0_dma_free_chan_resources;
839 dmadev->device_control = sa11x0_dma_control; 839 dmadev->device_control = sa11x0_dma_control;
840 dmadev->device_tx_status = sa11x0_dma_tx_status; 840 dmadev->device_tx_status = sa11x0_dma_tx_status;
841 dmadev->device_issue_pending = sa11x0_dma_issue_pending; 841 dmadev->device_issue_pending = sa11x0_dma_issue_pending;
842 842
843 for (i = 0; i < dmadev->chancnt; i++) { 843 for (i = 0; i < dmadev->chancnt; i++) {
844 struct sa11x0_dma_chan *c; 844 struct sa11x0_dma_chan *c;
845 845
846 c = kzalloc(sizeof(*c), GFP_KERNEL); 846 c = kzalloc(sizeof(*c), GFP_KERNEL);
847 if (!c) { 847 if (!c) {
848 dev_err(dev, "no memory for channel %u\n", i); 848 dev_err(dev, "no memory for channel %u\n", i);
849 return -ENOMEM; 849 return -ENOMEM;
850 } 850 }
851 851
852 c->status = DMA_IN_PROGRESS; 852 c->status = DMA_IN_PROGRESS;
853 c->ddar = chan_desc[i].ddar; 853 c->ddar = chan_desc[i].ddar;
854 c->name = chan_desc[i].name; 854 c->name = chan_desc[i].name;
855 INIT_LIST_HEAD(&c->node); 855 INIT_LIST_HEAD(&c->node);
856 856
857 c->vc.desc_free = sa11x0_dma_free_desc; 857 c->vc.desc_free = sa11x0_dma_free_desc;
858 vchan_init(&c->vc, dmadev); 858 vchan_init(&c->vc, dmadev);
859 } 859 }
860 860
861 return dma_async_device_register(dmadev); 861 return dma_async_device_register(dmadev);
862 } 862 }
863 863
864 static int sa11x0_dma_request_irq(struct platform_device *pdev, int nr, 864 static int sa11x0_dma_request_irq(struct platform_device *pdev, int nr,
865 void *data) 865 void *data)
866 { 866 {
867 int irq = platform_get_irq(pdev, nr); 867 int irq = platform_get_irq(pdev, nr);
868 868
869 if (irq <= 0) 869 if (irq <= 0)
870 return -ENXIO; 870 return -ENXIO;
871 871
872 return request_irq(irq, sa11x0_dma_irq, 0, dev_name(&pdev->dev), data); 872 return request_irq(irq, sa11x0_dma_irq, 0, dev_name(&pdev->dev), data);
873 } 873 }
874 874
875 static void sa11x0_dma_free_irq(struct platform_device *pdev, int nr, 875 static void sa11x0_dma_free_irq(struct platform_device *pdev, int nr,
876 void *data) 876 void *data)
877 { 877 {
878 int irq = platform_get_irq(pdev, nr); 878 int irq = platform_get_irq(pdev, nr);
879 if (irq > 0) 879 if (irq > 0)
880 free_irq(irq, data); 880 free_irq(irq, data);
881 } 881 }
882 882
883 static void sa11x0_dma_free_channels(struct dma_device *dmadev) 883 static void sa11x0_dma_free_channels(struct dma_device *dmadev)
884 { 884 {
885 struct sa11x0_dma_chan *c, *cn; 885 struct sa11x0_dma_chan *c, *cn;
886 886
887 list_for_each_entry_safe(c, cn, &dmadev->channels, vc.chan.device_node) { 887 list_for_each_entry_safe(c, cn, &dmadev->channels, vc.chan.device_node) {
888 list_del(&c->vc.chan.device_node); 888 list_del(&c->vc.chan.device_node);
889 tasklet_kill(&c->vc.task); 889 tasklet_kill(&c->vc.task);
890 kfree(c); 890 kfree(c);
891 } 891 }
892 } 892 }
893 893
894 static int sa11x0_dma_probe(struct platform_device *pdev) 894 static int sa11x0_dma_probe(struct platform_device *pdev)
895 { 895 {
896 struct sa11x0_dma_dev *d; 896 struct sa11x0_dma_dev *d;
897 struct resource *res; 897 struct resource *res;
898 unsigned i; 898 unsigned i;
899 int ret; 899 int ret;
900 900
901 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 901 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
902 if (!res) 902 if (!res)
903 return -ENXIO; 903 return -ENXIO;
904 904
905 d = kzalloc(sizeof(*d), GFP_KERNEL); 905 d = kzalloc(sizeof(*d), GFP_KERNEL);
906 if (!d) { 906 if (!d) {
907 ret = -ENOMEM; 907 ret = -ENOMEM;
908 goto err_alloc; 908 goto err_alloc;
909 } 909 }
910 910
911 spin_lock_init(&d->lock); 911 spin_lock_init(&d->lock);
912 INIT_LIST_HEAD(&d->chan_pending); 912 INIT_LIST_HEAD(&d->chan_pending);
913 913
914 d->base = ioremap(res->start, resource_size(res)); 914 d->base = ioremap(res->start, resource_size(res));
915 if (!d->base) { 915 if (!d->base) {
916 ret = -ENOMEM; 916 ret = -ENOMEM;
917 goto err_ioremap; 917 goto err_ioremap;
918 } 918 }
919 919
920 tasklet_init(&d->task, sa11x0_dma_tasklet, (unsigned long)d); 920 tasklet_init(&d->task, sa11x0_dma_tasklet, (unsigned long)d);
921 921
922 for (i = 0; i < NR_PHY_CHAN; i++) { 922 for (i = 0; i < NR_PHY_CHAN; i++) {
923 struct sa11x0_dma_phy *p = &d->phy[i]; 923 struct sa11x0_dma_phy *p = &d->phy[i];
924 924
925 p->dev = d; 925 p->dev = d;
926 p->num = i; 926 p->num = i;
927 p->base = d->base + i * DMA_SIZE; 927 p->base = d->base + i * DMA_SIZE;
928 writel_relaxed(DCSR_RUN | DCSR_IE | DCSR_ERROR | 928 writel_relaxed(DCSR_RUN | DCSR_IE | DCSR_ERROR |
929 DCSR_DONEA | DCSR_STRTA | DCSR_DONEB | DCSR_STRTB, 929 DCSR_DONEA | DCSR_STRTA | DCSR_DONEB | DCSR_STRTB,
930 p->base + DMA_DCSR_C); 930 p->base + DMA_DCSR_C);
931 writel_relaxed(0, p->base + DMA_DDAR); 931 writel_relaxed(0, p->base + DMA_DDAR);
932 932
933 ret = sa11x0_dma_request_irq(pdev, i, p); 933 ret = sa11x0_dma_request_irq(pdev, i, p);
934 if (ret) { 934 if (ret) {
935 while (i) { 935 while (i) {
936 i--; 936 i--;
937 sa11x0_dma_free_irq(pdev, i, &d->phy[i]); 937 sa11x0_dma_free_irq(pdev, i, &d->phy[i]);
938 } 938 }
939 goto err_irq; 939 goto err_irq;
940 } 940 }
941 } 941 }
942 942
943 dma_cap_set(DMA_SLAVE, d->slave.cap_mask); 943 dma_cap_set(DMA_SLAVE, d->slave.cap_mask);
944 dma_cap_set(DMA_CYCLIC, d->slave.cap_mask); 944 dma_cap_set(DMA_CYCLIC, d->slave.cap_mask);
945 d->slave.device_prep_slave_sg = sa11x0_dma_prep_slave_sg; 945 d->slave.device_prep_slave_sg = sa11x0_dma_prep_slave_sg;
946 d->slave.device_prep_dma_cyclic = sa11x0_dma_prep_dma_cyclic; 946 d->slave.device_prep_dma_cyclic = sa11x0_dma_prep_dma_cyclic;
947 ret = sa11x0_dma_init_dmadev(&d->slave, &pdev->dev); 947 ret = sa11x0_dma_init_dmadev(&d->slave, &pdev->dev);
948 if (ret) { 948 if (ret) {
949 dev_warn(d->slave.dev, "failed to register slave async device: %d\n", 949 dev_warn(d->slave.dev, "failed to register slave async device: %d\n",
950 ret); 950 ret);
951 goto err_slave_reg; 951 goto err_slave_reg;
952 } 952 }
953 953
954 platform_set_drvdata(pdev, d); 954 platform_set_drvdata(pdev, d);
955 return 0; 955 return 0;
956 956
957 err_slave_reg: 957 err_slave_reg:
958 sa11x0_dma_free_channels(&d->slave); 958 sa11x0_dma_free_channels(&d->slave);
959 for (i = 0; i < NR_PHY_CHAN; i++) 959 for (i = 0; i < NR_PHY_CHAN; i++)
960 sa11x0_dma_free_irq(pdev, i, &d->phy[i]); 960 sa11x0_dma_free_irq(pdev, i, &d->phy[i]);
961 err_irq: 961 err_irq:
962 tasklet_kill(&d->task); 962 tasklet_kill(&d->task);
963 iounmap(d->base); 963 iounmap(d->base);
964 err_ioremap: 964 err_ioremap:
965 kfree(d); 965 kfree(d);
966 err_alloc: 966 err_alloc:
967 return ret; 967 return ret;
968 } 968 }
969 969
970 static int sa11x0_dma_remove(struct platform_device *pdev) 970 static int sa11x0_dma_remove(struct platform_device *pdev)
971 { 971 {
972 struct sa11x0_dma_dev *d = platform_get_drvdata(pdev); 972 struct sa11x0_dma_dev *d = platform_get_drvdata(pdev);
973 unsigned pch; 973 unsigned pch;
974 974
975 dma_async_device_unregister(&d->slave); 975 dma_async_device_unregister(&d->slave);
976 976
977 sa11x0_dma_free_channels(&d->slave); 977 sa11x0_dma_free_channels(&d->slave);
978 for (pch = 0; pch < NR_PHY_CHAN; pch++) 978 for (pch = 0; pch < NR_PHY_CHAN; pch++)
979 sa11x0_dma_free_irq(pdev, pch, &d->phy[pch]); 979 sa11x0_dma_free_irq(pdev, pch, &d->phy[pch]);
980 tasklet_kill(&d->task); 980 tasklet_kill(&d->task);
981 iounmap(d->base); 981 iounmap(d->base);
982 kfree(d); 982 kfree(d);
983 983
984 return 0; 984 return 0;
985 } 985 }
986 986
987 #ifdef CONFIG_PM_SLEEP 987 #ifdef CONFIG_PM_SLEEP
988 static int sa11x0_dma_suspend(struct device *dev) 988 static int sa11x0_dma_suspend(struct device *dev)
989 { 989 {
990 struct sa11x0_dma_dev *d = dev_get_drvdata(dev); 990 struct sa11x0_dma_dev *d = dev_get_drvdata(dev);
991 unsigned pch; 991 unsigned pch;
992 992
993 for (pch = 0; pch < NR_PHY_CHAN; pch++) { 993 for (pch = 0; pch < NR_PHY_CHAN; pch++) {
994 struct sa11x0_dma_phy *p = &d->phy[pch]; 994 struct sa11x0_dma_phy *p = &d->phy[pch];
995 u32 dcsr, saved_dcsr; 995 u32 dcsr, saved_dcsr;
996 996
997 dcsr = saved_dcsr = readl_relaxed(p->base + DMA_DCSR_R); 997 dcsr = saved_dcsr = readl_relaxed(p->base + DMA_DCSR_R);
998 if (dcsr & DCSR_RUN) { 998 if (dcsr & DCSR_RUN) {
999 writel(DCSR_RUN | DCSR_IE, p->base + DMA_DCSR_C); 999 writel(DCSR_RUN | DCSR_IE, p->base + DMA_DCSR_C);
1000 dcsr = readl_relaxed(p->base + DMA_DCSR_R); 1000 dcsr = readl_relaxed(p->base + DMA_DCSR_R);
1001 } 1001 }
1002 1002
1003 saved_dcsr &= DCSR_RUN | DCSR_IE; 1003 saved_dcsr &= DCSR_RUN | DCSR_IE;
1004 if (dcsr & DCSR_BIU) { 1004 if (dcsr & DCSR_BIU) {
1005 p->dbs[0] = readl_relaxed(p->base + DMA_DBSB); 1005 p->dbs[0] = readl_relaxed(p->base + DMA_DBSB);
1006 p->dbt[0] = readl_relaxed(p->base + DMA_DBTB); 1006 p->dbt[0] = readl_relaxed(p->base + DMA_DBTB);
1007 p->dbs[1] = readl_relaxed(p->base + DMA_DBSA); 1007 p->dbs[1] = readl_relaxed(p->base + DMA_DBSA);
1008 p->dbt[1] = readl_relaxed(p->base + DMA_DBTA); 1008 p->dbt[1] = readl_relaxed(p->base + DMA_DBTA);
1009 saved_dcsr |= (dcsr & DCSR_STRTA ? DCSR_STRTB : 0) | 1009 saved_dcsr |= (dcsr & DCSR_STRTA ? DCSR_STRTB : 0) |
1010 (dcsr & DCSR_STRTB ? DCSR_STRTA : 0); 1010 (dcsr & DCSR_STRTB ? DCSR_STRTA : 0);
1011 } else { 1011 } else {
1012 p->dbs[0] = readl_relaxed(p->base + DMA_DBSA); 1012 p->dbs[0] = readl_relaxed(p->base + DMA_DBSA);
1013 p->dbt[0] = readl_relaxed(p->base + DMA_DBTA); 1013 p->dbt[0] = readl_relaxed(p->base + DMA_DBTA);
1014 p->dbs[1] = readl_relaxed(p->base + DMA_DBSB); 1014 p->dbs[1] = readl_relaxed(p->base + DMA_DBSB);
1015 p->dbt[1] = readl_relaxed(p->base + DMA_DBTB); 1015 p->dbt[1] = readl_relaxed(p->base + DMA_DBTB);
1016 saved_dcsr |= dcsr & (DCSR_STRTA | DCSR_STRTB); 1016 saved_dcsr |= dcsr & (DCSR_STRTA | DCSR_STRTB);
1017 } 1017 }
1018 p->dcsr = saved_dcsr; 1018 p->dcsr = saved_dcsr;
1019 1019
1020 writel(DCSR_STRTA | DCSR_STRTB, p->base + DMA_DCSR_C); 1020 writel(DCSR_STRTA | DCSR_STRTB, p->base + DMA_DCSR_C);
1021 } 1021 }
1022 1022
1023 return 0; 1023 return 0;
1024 } 1024 }
1025 1025
1026 static int sa11x0_dma_resume(struct device *dev) 1026 static int sa11x0_dma_resume(struct device *dev)
1027 { 1027 {
1028 struct sa11x0_dma_dev *d = dev_get_drvdata(dev); 1028 struct sa11x0_dma_dev *d = dev_get_drvdata(dev);
1029 unsigned pch; 1029 unsigned pch;
1030 1030
1031 for (pch = 0; pch < NR_PHY_CHAN; pch++) { 1031 for (pch = 0; pch < NR_PHY_CHAN; pch++) {
1032 struct sa11x0_dma_phy *p = &d->phy[pch]; 1032 struct sa11x0_dma_phy *p = &d->phy[pch];
1033 struct sa11x0_dma_desc *txd = NULL; 1033 struct sa11x0_dma_desc *txd = NULL;
1034 u32 dcsr = readl_relaxed(p->base + DMA_DCSR_R); 1034 u32 dcsr = readl_relaxed(p->base + DMA_DCSR_R);
1035 1035
1036 WARN_ON(dcsr & (DCSR_BIU | DCSR_STRTA | DCSR_STRTB | DCSR_RUN)); 1036 WARN_ON(dcsr & (DCSR_BIU | DCSR_STRTA | DCSR_STRTB | DCSR_RUN));
1037 1037
1038 if (p->txd_done) 1038 if (p->txd_done)
1039 txd = p->txd_done; 1039 txd = p->txd_done;
1040 else if (p->txd_load) 1040 else if (p->txd_load)
1041 txd = p->txd_load; 1041 txd = p->txd_load;
1042 1042
1043 if (!txd) 1043 if (!txd)
1044 continue; 1044 continue;
1045 1045
1046 writel_relaxed(txd->ddar, p->base + DMA_DDAR); 1046 writel_relaxed(txd->ddar, p->base + DMA_DDAR);
1047 1047
1048 writel_relaxed(p->dbs[0], p->base + DMA_DBSA); 1048 writel_relaxed(p->dbs[0], p->base + DMA_DBSA);
1049 writel_relaxed(p->dbt[0], p->base + DMA_DBTA); 1049 writel_relaxed(p->dbt[0], p->base + DMA_DBTA);
1050 writel_relaxed(p->dbs[1], p->base + DMA_DBSB); 1050 writel_relaxed(p->dbs[1], p->base + DMA_DBSB);
1051 writel_relaxed(p->dbt[1], p->base + DMA_DBTB); 1051 writel_relaxed(p->dbt[1], p->base + DMA_DBTB);
1052 writel_relaxed(p->dcsr, p->base + DMA_DCSR_S); 1052 writel_relaxed(p->dcsr, p->base + DMA_DCSR_S);
1053 } 1053 }
1054 1054
1055 return 0; 1055 return 0;
1056 } 1056 }
1057 #endif 1057 #endif
1058 1058
1059 static const struct dev_pm_ops sa11x0_dma_pm_ops = { 1059 static const struct dev_pm_ops sa11x0_dma_pm_ops = {
1060 .suspend_noirq = sa11x0_dma_suspend, 1060 .suspend_noirq = sa11x0_dma_suspend,
1061 .resume_noirq = sa11x0_dma_resume, 1061 .resume_noirq = sa11x0_dma_resume,
1062 .freeze_noirq = sa11x0_dma_suspend, 1062 .freeze_noirq = sa11x0_dma_suspend,
1063 .thaw_noirq = sa11x0_dma_resume, 1063 .thaw_noirq = sa11x0_dma_resume,
1064 .poweroff_noirq = sa11x0_dma_suspend, 1064 .poweroff_noirq = sa11x0_dma_suspend,
1065 .restore_noirq = sa11x0_dma_resume, 1065 .restore_noirq = sa11x0_dma_resume,
1066 }; 1066 };
1067 1067
1068 static struct platform_driver sa11x0_dma_driver = { 1068 static struct platform_driver sa11x0_dma_driver = {
1069 .driver = { 1069 .driver = {
1070 .name = "sa11x0-dma", 1070 .name = "sa11x0-dma",
1071 .owner = THIS_MODULE, 1071 .owner = THIS_MODULE,
1072 .pm = &sa11x0_dma_pm_ops, 1072 .pm = &sa11x0_dma_pm_ops,
1073 }, 1073 },
1074 .probe = sa11x0_dma_probe, 1074 .probe = sa11x0_dma_probe,
1075 .remove = sa11x0_dma_remove, 1075 .remove = sa11x0_dma_remove,
1076 }; 1076 };
1077 1077
1078 bool sa11x0_dma_filter_fn(struct dma_chan *chan, void *param) 1078 bool sa11x0_dma_filter_fn(struct dma_chan *chan, void *param)
1079 { 1079 {
1080 if (chan->device->dev->driver == &sa11x0_dma_driver.driver) { 1080 if (chan->device->dev->driver == &sa11x0_dma_driver.driver) {
1081 struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); 1081 struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
1082 const char *p = param; 1082 const char *p = param;
1083 1083
1084 return !strcmp(c->name, p); 1084 return !strcmp(c->name, p);
1085 } 1085 }
1086 return false; 1086 return false;
1087 } 1087 }
1088 EXPORT_SYMBOL(sa11x0_dma_filter_fn); 1088 EXPORT_SYMBOL(sa11x0_dma_filter_fn);
1089 1089
1090 static int __init sa11x0_dma_init(void) 1090 static int __init sa11x0_dma_init(void)
1091 { 1091 {
1092 return platform_driver_register(&sa11x0_dma_driver); 1092 return platform_driver_register(&sa11x0_dma_driver);
1093 } 1093 }
1094 subsys_initcall(sa11x0_dma_init); 1094 subsys_initcall(sa11x0_dma_init);
1095 1095
1096 static void __exit sa11x0_dma_exit(void) 1096 static void __exit sa11x0_dma_exit(void)
1097 { 1097 {
1098 platform_driver_unregister(&sa11x0_dma_driver); 1098 platform_driver_unregister(&sa11x0_dma_driver);
1099 } 1099 }
1100 module_exit(sa11x0_dma_exit); 1100 module_exit(sa11x0_dma_exit);
1101 1101
1102 MODULE_AUTHOR("Russell King"); 1102 MODULE_AUTHOR("Russell King");
1103 MODULE_DESCRIPTION("SA-11x0 DMA driver"); 1103 MODULE_DESCRIPTION("SA-11x0 DMA driver");
1104 MODULE_LICENSE("GPL v2"); 1104 MODULE_LICENSE("GPL v2");
1105 MODULE_ALIAS("platform:sa11x0-dma"); 1105 MODULE_ALIAS("platform:sa11x0-dma");
1106 1106
drivers/dma/sh/shdma-base.c
1 /* 1 /*
2 * Dmaengine driver base library for DMA controllers, found on SH-based SoCs 2 * Dmaengine driver base library for DMA controllers, found on SH-based SoCs
3 * 3 *
4 * extracted from shdma.c 4 * extracted from shdma.c
5 * 5 *
6 * Copyright (C) 2011-2012 Guennadi Liakhovetski <g.liakhovetski@gmx.de> 6 * Copyright (C) 2011-2012 Guennadi Liakhovetski <g.liakhovetski@gmx.de>
7 * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com> 7 * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
8 * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved. 8 * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
9 * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved. 9 * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
10 * 10 *
11 * This is free software; you can redistribute it and/or modify 11 * This is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as 12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation. 13 * published by the Free Software Foundation.
14 */ 14 */
15 15
16 #include <linux/delay.h> 16 #include <linux/delay.h>
17 #include <linux/shdma-base.h> 17 #include <linux/shdma-base.h>
18 #include <linux/dmaengine.h> 18 #include <linux/dmaengine.h>
19 #include <linux/init.h> 19 #include <linux/init.h>
20 #include <linux/interrupt.h> 20 #include <linux/interrupt.h>
21 #include <linux/module.h> 21 #include <linux/module.h>
22 #include <linux/pm_runtime.h> 22 #include <linux/pm_runtime.h>
23 #include <linux/slab.h> 23 #include <linux/slab.h>
24 #include <linux/spinlock.h> 24 #include <linux/spinlock.h>
25 25
26 #include "../dmaengine.h" 26 #include "../dmaengine.h"
27 27
28 /* DMA descriptor control */ 28 /* DMA descriptor control */
29 enum shdma_desc_status { 29 enum shdma_desc_status {
30 DESC_IDLE, 30 DESC_IDLE,
31 DESC_PREPARED, 31 DESC_PREPARED,
32 DESC_SUBMITTED, 32 DESC_SUBMITTED,
33 DESC_COMPLETED, /* completed, have to call callback */ 33 DESC_COMPLETED, /* completed, have to call callback */
34 DESC_WAITING, /* callback called, waiting for ack / re-submit */ 34 DESC_WAITING, /* callback called, waiting for ack / re-submit */
35 }; 35 };
36 36
37 #define NR_DESCS_PER_CHANNEL 32 37 #define NR_DESCS_PER_CHANNEL 32
38 38
39 #define to_shdma_chan(c) container_of(c, struct shdma_chan, dma_chan) 39 #define to_shdma_chan(c) container_of(c, struct shdma_chan, dma_chan)
40 #define to_shdma_dev(d) container_of(d, struct shdma_dev, dma_dev) 40 #define to_shdma_dev(d) container_of(d, struct shdma_dev, dma_dev)
41 41
42 /* 42 /*
43 * For slave DMA we assume, that there is a finite number of DMA slaves in the 43 * For slave DMA we assume, that there is a finite number of DMA slaves in the
44 * system, and that each such slave can only use a finite number of channels. 44 * system, and that each such slave can only use a finite number of channels.
45 * We use slave channel IDs to make sure, that no such slave channel ID is 45 * We use slave channel IDs to make sure, that no such slave channel ID is
46 * allocated more than once. 46 * allocated more than once.
47 */ 47 */
48 static unsigned int slave_num = 256; 48 static unsigned int slave_num = 256;
49 module_param(slave_num, uint, 0444); 49 module_param(slave_num, uint, 0444);
50 50
51 /* A bitmask with slave_num bits */ 51 /* A bitmask with slave_num bits */
52 static unsigned long *shdma_slave_used; 52 static unsigned long *shdma_slave_used;
53 53
54 /* Called under spin_lock_irq(&schan->chan_lock") */ 54 /* Called under spin_lock_irq(&schan->chan_lock") */
55 static void shdma_chan_xfer_ld_queue(struct shdma_chan *schan) 55 static void shdma_chan_xfer_ld_queue(struct shdma_chan *schan)
56 { 56 {
57 struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device); 57 struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
58 const struct shdma_ops *ops = sdev->ops; 58 const struct shdma_ops *ops = sdev->ops;
59 struct shdma_desc *sdesc; 59 struct shdma_desc *sdesc;
60 60
61 /* DMA work check */ 61 /* DMA work check */
62 if (ops->channel_busy(schan)) 62 if (ops->channel_busy(schan))
63 return; 63 return;
64 64
65 /* Find the first not transferred descriptor */ 65 /* Find the first not transferred descriptor */
66 list_for_each_entry(sdesc, &schan->ld_queue, node) 66 list_for_each_entry(sdesc, &schan->ld_queue, node)
67 if (sdesc->mark == DESC_SUBMITTED) { 67 if (sdesc->mark == DESC_SUBMITTED) {
68 ops->start_xfer(schan, sdesc); 68 ops->start_xfer(schan, sdesc);
69 break; 69 break;
70 } 70 }
71 } 71 }
72 72
73 static dma_cookie_t shdma_tx_submit(struct dma_async_tx_descriptor *tx) 73 static dma_cookie_t shdma_tx_submit(struct dma_async_tx_descriptor *tx)
74 { 74 {
75 struct shdma_desc *chunk, *c, *desc = 75 struct shdma_desc *chunk, *c, *desc =
76 container_of(tx, struct shdma_desc, async_tx), 76 container_of(tx, struct shdma_desc, async_tx),
77 *last = desc; 77 *last = desc;
78 struct shdma_chan *schan = to_shdma_chan(tx->chan); 78 struct shdma_chan *schan = to_shdma_chan(tx->chan);
79 dma_async_tx_callback callback = tx->callback; 79 dma_async_tx_callback callback = tx->callback;
80 dma_cookie_t cookie; 80 dma_cookie_t cookie;
81 bool power_up; 81 bool power_up;
82 82
83 spin_lock_irq(&schan->chan_lock); 83 spin_lock_irq(&schan->chan_lock);
84 84
85 power_up = list_empty(&schan->ld_queue); 85 power_up = list_empty(&schan->ld_queue);
86 86
87 cookie = dma_cookie_assign(tx); 87 cookie = dma_cookie_assign(tx);
88 88
89 /* Mark all chunks of this descriptor as submitted, move to the queue */ 89 /* Mark all chunks of this descriptor as submitted, move to the queue */
90 list_for_each_entry_safe(chunk, c, desc->node.prev, node) { 90 list_for_each_entry_safe(chunk, c, desc->node.prev, node) {
91 /* 91 /*
92 * All chunks are on the global ld_free, so, we have to find 92 * All chunks are on the global ld_free, so, we have to find
93 * the end of the chain ourselves 93 * the end of the chain ourselves
94 */ 94 */
95 if (chunk != desc && (chunk->mark == DESC_IDLE || 95 if (chunk != desc && (chunk->mark == DESC_IDLE ||
96 chunk->async_tx.cookie > 0 || 96 chunk->async_tx.cookie > 0 ||
97 chunk->async_tx.cookie == -EBUSY || 97 chunk->async_tx.cookie == -EBUSY ||
98 &chunk->node == &schan->ld_free)) 98 &chunk->node == &schan->ld_free))
99 break; 99 break;
100 chunk->mark = DESC_SUBMITTED; 100 chunk->mark = DESC_SUBMITTED;
101 /* Callback goes to the last chunk */ 101 /* Callback goes to the last chunk */
102 chunk->async_tx.callback = NULL; 102 chunk->async_tx.callback = NULL;
103 chunk->cookie = cookie; 103 chunk->cookie = cookie;
104 list_move_tail(&chunk->node, &schan->ld_queue); 104 list_move_tail(&chunk->node, &schan->ld_queue);
105 last = chunk; 105 last = chunk;
106 106
107 dev_dbg(schan->dev, "submit #%d@%p on %d\n", 107 dev_dbg(schan->dev, "submit #%d@%p on %d\n",
108 tx->cookie, &last->async_tx, schan->id); 108 tx->cookie, &last->async_tx, schan->id);
109 } 109 }
110 110
111 last->async_tx.callback = callback; 111 last->async_tx.callback = callback;
112 last->async_tx.callback_param = tx->callback_param; 112 last->async_tx.callback_param = tx->callback_param;
113 113
114 if (power_up) { 114 if (power_up) {
115 int ret; 115 int ret;
116 schan->pm_state = SHDMA_PM_BUSY; 116 schan->pm_state = SHDMA_PM_BUSY;
117 117
118 ret = pm_runtime_get(schan->dev); 118 ret = pm_runtime_get(schan->dev);
119 119
120 spin_unlock_irq(&schan->chan_lock); 120 spin_unlock_irq(&schan->chan_lock);
121 if (ret < 0) 121 if (ret < 0)
122 dev_err(schan->dev, "%s(): GET = %d\n", __func__, ret); 122 dev_err(schan->dev, "%s(): GET = %d\n", __func__, ret);
123 123
124 pm_runtime_barrier(schan->dev); 124 pm_runtime_barrier(schan->dev);
125 125
126 spin_lock_irq(&schan->chan_lock); 126 spin_lock_irq(&schan->chan_lock);
127 127
128 /* Have we been reset, while waiting? */ 128 /* Have we been reset, while waiting? */
129 if (schan->pm_state != SHDMA_PM_ESTABLISHED) { 129 if (schan->pm_state != SHDMA_PM_ESTABLISHED) {
130 struct shdma_dev *sdev = 130 struct shdma_dev *sdev =
131 to_shdma_dev(schan->dma_chan.device); 131 to_shdma_dev(schan->dma_chan.device);
132 const struct shdma_ops *ops = sdev->ops; 132 const struct shdma_ops *ops = sdev->ops;
133 dev_dbg(schan->dev, "Bring up channel %d\n", 133 dev_dbg(schan->dev, "Bring up channel %d\n",
134 schan->id); 134 schan->id);
135 /* 135 /*
136 * TODO: .xfer_setup() might fail on some platforms. 136 * TODO: .xfer_setup() might fail on some platforms.
137 * Make it int then, on error remove chunks from the 137 * Make it int then, on error remove chunks from the
138 * queue again 138 * queue again
139 */ 139 */
140 ops->setup_xfer(schan, schan->slave_id); 140 ops->setup_xfer(schan, schan->slave_id);
141 141
142 if (schan->pm_state == SHDMA_PM_PENDING) 142 if (schan->pm_state == SHDMA_PM_PENDING)
143 shdma_chan_xfer_ld_queue(schan); 143 shdma_chan_xfer_ld_queue(schan);
144 schan->pm_state = SHDMA_PM_ESTABLISHED; 144 schan->pm_state = SHDMA_PM_ESTABLISHED;
145 } 145 }
146 } else { 146 } else {
147 /* 147 /*
148 * Tell .device_issue_pending() not to run the queue, interrupts 148 * Tell .device_issue_pending() not to run the queue, interrupts
149 * will do it anyway 149 * will do it anyway
150 */ 150 */
151 schan->pm_state = SHDMA_PM_PENDING; 151 schan->pm_state = SHDMA_PM_PENDING;
152 } 152 }
153 153
154 spin_unlock_irq(&schan->chan_lock); 154 spin_unlock_irq(&schan->chan_lock);
155 155
156 return cookie; 156 return cookie;
157 } 157 }
158 158
159 /* Called with desc_lock held */ 159 /* Called with desc_lock held */
160 static struct shdma_desc *shdma_get_desc(struct shdma_chan *schan) 160 static struct shdma_desc *shdma_get_desc(struct shdma_chan *schan)
161 { 161 {
162 struct shdma_desc *sdesc; 162 struct shdma_desc *sdesc;
163 163
164 list_for_each_entry(sdesc, &schan->ld_free, node) 164 list_for_each_entry(sdesc, &schan->ld_free, node)
165 if (sdesc->mark != DESC_PREPARED) { 165 if (sdesc->mark != DESC_PREPARED) {
166 BUG_ON(sdesc->mark != DESC_IDLE); 166 BUG_ON(sdesc->mark != DESC_IDLE);
167 list_del(&sdesc->node); 167 list_del(&sdesc->node);
168 return sdesc; 168 return sdesc;
169 } 169 }
170 170
171 return NULL; 171 return NULL;
172 } 172 }
173 173
174 static int shdma_setup_slave(struct shdma_chan *schan, int slave_id, 174 static int shdma_setup_slave(struct shdma_chan *schan, int slave_id,
175 dma_addr_t slave_addr) 175 dma_addr_t slave_addr)
176 { 176 {
177 struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device); 177 struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
178 const struct shdma_ops *ops = sdev->ops; 178 const struct shdma_ops *ops = sdev->ops;
179 int ret, match; 179 int ret, match;
180 180
181 if (schan->dev->of_node) { 181 if (schan->dev->of_node) {
182 match = schan->hw_req; 182 match = schan->hw_req;
183 ret = ops->set_slave(schan, match, slave_addr, true); 183 ret = ops->set_slave(schan, match, slave_addr, true);
184 if (ret < 0) 184 if (ret < 0)
185 return ret; 185 return ret;
186 186
187 slave_id = schan->slave_id; 187 slave_id = schan->slave_id;
188 } else { 188 } else {
189 match = slave_id; 189 match = slave_id;
190 } 190 }
191 191
192 if (slave_id < 0 || slave_id >= slave_num) 192 if (slave_id < 0 || slave_id >= slave_num)
193 return -EINVAL; 193 return -EINVAL;
194 194
195 if (test_and_set_bit(slave_id, shdma_slave_used)) 195 if (test_and_set_bit(slave_id, shdma_slave_used))
196 return -EBUSY; 196 return -EBUSY;
197 197
198 ret = ops->set_slave(schan, match, slave_addr, false); 198 ret = ops->set_slave(schan, match, slave_addr, false);
199 if (ret < 0) { 199 if (ret < 0) {
200 clear_bit(slave_id, shdma_slave_used); 200 clear_bit(slave_id, shdma_slave_used);
201 return ret; 201 return ret;
202 } 202 }
203 203
204 schan->slave_id = slave_id; 204 schan->slave_id = slave_id;
205 205
206 return 0; 206 return 0;
207 } 207 }
208 208
209 /* 209 /*
210 * This is the standard shdma filter function to be used as a replacement to the 210 * This is the standard shdma filter function to be used as a replacement to the
211 * "old" method, using the .private pointer. If for some reason you allocate a 211 * "old" method, using the .private pointer. If for some reason you allocate a
212 * channel without slave data, use something like ERR_PTR(-EINVAL) as a filter 212 * channel without slave data, use something like ERR_PTR(-EINVAL) as a filter
213 * parameter. If this filter is used, the slave driver, after calling 213 * parameter. If this filter is used, the slave driver, after calling
214 * dma_request_channel(), will also have to call dmaengine_slave_config() with 214 * dma_request_channel(), will also have to call dmaengine_slave_config() with
215 * .slave_id, .direction, and either .src_addr or .dst_addr set. 215 * .slave_id, .direction, and either .src_addr or .dst_addr set.
216 * NOTE: this filter doesn't support multiple DMAC drivers with the DMA_SLAVE 216 * NOTE: this filter doesn't support multiple DMAC drivers with the DMA_SLAVE
217 * capability! If this becomes a requirement, hardware glue drivers, using this 217 * capability! If this becomes a requirement, hardware glue drivers, using this
218 * services would have to provide their own filters, which first would check 218 * services would have to provide their own filters, which first would check
219 * the device driver, similar to how other DMAC drivers, e.g., sa11x0-dma.c, do 219 * the device driver, similar to how other DMAC drivers, e.g., sa11x0-dma.c, do
220 * this, and only then, in case of a match, call this common filter. 220 * this, and only then, in case of a match, call this common filter.
221 * NOTE 2: This filter function is also used in the DT case by shdma_of_xlate(). 221 * NOTE 2: This filter function is also used in the DT case by shdma_of_xlate().
222 * In that case the MID-RID value is used for slave channel filtering and is 222 * In that case the MID-RID value is used for slave channel filtering and is
223 * passed to this function in the "arg" parameter. 223 * passed to this function in the "arg" parameter.
224 */ 224 */
225 bool shdma_chan_filter(struct dma_chan *chan, void *arg) 225 bool shdma_chan_filter(struct dma_chan *chan, void *arg)
226 { 226 {
227 struct shdma_chan *schan = to_shdma_chan(chan); 227 struct shdma_chan *schan = to_shdma_chan(chan);
228 struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device); 228 struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
229 const struct shdma_ops *ops = sdev->ops; 229 const struct shdma_ops *ops = sdev->ops;
230 int match = (int)arg; 230 int match = (int)arg;
231 int ret; 231 int ret;
232 232
233 if (match < 0) 233 if (match < 0)
234 /* No slave requested - arbitrary channel */ 234 /* No slave requested - arbitrary channel */
235 return true; 235 return true;
236 236
237 if (!schan->dev->of_node && match >= slave_num) 237 if (!schan->dev->of_node && match >= slave_num)
238 return false; 238 return false;
239 239
240 ret = ops->set_slave(schan, match, 0, true); 240 ret = ops->set_slave(schan, match, 0, true);
241 if (ret < 0) 241 if (ret < 0)
242 return false; 242 return false;
243 243
244 return true; 244 return true;
245 } 245 }
246 EXPORT_SYMBOL(shdma_chan_filter); 246 EXPORT_SYMBOL(shdma_chan_filter);
247 247
248 static int shdma_alloc_chan_resources(struct dma_chan *chan) 248 static int shdma_alloc_chan_resources(struct dma_chan *chan)
249 { 249 {
250 struct shdma_chan *schan = to_shdma_chan(chan); 250 struct shdma_chan *schan = to_shdma_chan(chan);
251 struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device); 251 struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
252 const struct shdma_ops *ops = sdev->ops; 252 const struct shdma_ops *ops = sdev->ops;
253 struct shdma_desc *desc; 253 struct shdma_desc *desc;
254 struct shdma_slave *slave = chan->private; 254 struct shdma_slave *slave = chan->private;
255 int ret, i; 255 int ret, i;
256 256
257 /* 257 /*
258 * This relies on the guarantee from dmaengine that alloc_chan_resources 258 * This relies on the guarantee from dmaengine that alloc_chan_resources
259 * never runs concurrently with itself or free_chan_resources. 259 * never runs concurrently with itself or free_chan_resources.
260 */ 260 */
261 if (slave) { 261 if (slave) {
262 /* Legacy mode: .private is set in filter */ 262 /* Legacy mode: .private is set in filter */
263 ret = shdma_setup_slave(schan, slave->slave_id, 0); 263 ret = shdma_setup_slave(schan, slave->slave_id, 0);
264 if (ret < 0) 264 if (ret < 0)
265 goto esetslave; 265 goto esetslave;
266 } else { 266 } else {
267 schan->slave_id = -EINVAL; 267 schan->slave_id = -EINVAL;
268 } 268 }
269 269
270 schan->desc = kcalloc(NR_DESCS_PER_CHANNEL, 270 schan->desc = kcalloc(NR_DESCS_PER_CHANNEL,
271 sdev->desc_size, GFP_KERNEL); 271 sdev->desc_size, GFP_KERNEL);
272 if (!schan->desc) { 272 if (!schan->desc) {
273 ret = -ENOMEM; 273 ret = -ENOMEM;
274 goto edescalloc; 274 goto edescalloc;
275 } 275 }
276 schan->desc_num = NR_DESCS_PER_CHANNEL; 276 schan->desc_num = NR_DESCS_PER_CHANNEL;
277 277
278 for (i = 0; i < NR_DESCS_PER_CHANNEL; i++) { 278 for (i = 0; i < NR_DESCS_PER_CHANNEL; i++) {
279 desc = ops->embedded_desc(schan->desc, i); 279 desc = ops->embedded_desc(schan->desc, i);
280 dma_async_tx_descriptor_init(&desc->async_tx, 280 dma_async_tx_descriptor_init(&desc->async_tx,
281 &schan->dma_chan); 281 &schan->dma_chan);
282 desc->async_tx.tx_submit = shdma_tx_submit; 282 desc->async_tx.tx_submit = shdma_tx_submit;
283 desc->mark = DESC_IDLE; 283 desc->mark = DESC_IDLE;
284 284
285 list_add(&desc->node, &schan->ld_free); 285 list_add(&desc->node, &schan->ld_free);
286 } 286 }
287 287
288 return NR_DESCS_PER_CHANNEL; 288 return NR_DESCS_PER_CHANNEL;
289 289
290 edescalloc: 290 edescalloc:
291 if (slave) 291 if (slave)
292 esetslave: 292 esetslave:
293 clear_bit(slave->slave_id, shdma_slave_used); 293 clear_bit(slave->slave_id, shdma_slave_used);
294 chan->private = NULL; 294 chan->private = NULL;
295 return ret; 295 return ret;
296 } 296 }
297 297
298 static dma_async_tx_callback __ld_cleanup(struct shdma_chan *schan, bool all) 298 static dma_async_tx_callback __ld_cleanup(struct shdma_chan *schan, bool all)
299 { 299 {
300 struct shdma_desc *desc, *_desc; 300 struct shdma_desc *desc, *_desc;
301 /* Is the "exposed" head of a chain acked? */ 301 /* Is the "exposed" head of a chain acked? */
302 bool head_acked = false; 302 bool head_acked = false;
303 dma_cookie_t cookie = 0; 303 dma_cookie_t cookie = 0;
304 dma_async_tx_callback callback = NULL; 304 dma_async_tx_callback callback = NULL;
305 void *param = NULL; 305 void *param = NULL;
306 unsigned long flags; 306 unsigned long flags;
307 307
308 spin_lock_irqsave(&schan->chan_lock, flags); 308 spin_lock_irqsave(&schan->chan_lock, flags);
309 list_for_each_entry_safe(desc, _desc, &schan->ld_queue, node) { 309 list_for_each_entry_safe(desc, _desc, &schan->ld_queue, node) {
310 struct dma_async_tx_descriptor *tx = &desc->async_tx; 310 struct dma_async_tx_descriptor *tx = &desc->async_tx;
311 311
312 BUG_ON(tx->cookie > 0 && tx->cookie != desc->cookie); 312 BUG_ON(tx->cookie > 0 && tx->cookie != desc->cookie);
313 BUG_ON(desc->mark != DESC_SUBMITTED && 313 BUG_ON(desc->mark != DESC_SUBMITTED &&
314 desc->mark != DESC_COMPLETED && 314 desc->mark != DESC_COMPLETED &&
315 desc->mark != DESC_WAITING); 315 desc->mark != DESC_WAITING);
316 316
317 /* 317 /*
318 * queue is ordered, and we use this loop to (1) clean up all 318 * queue is ordered, and we use this loop to (1) clean up all
319 * completed descriptors, and to (2) update descriptor flags of 319 * completed descriptors, and to (2) update descriptor flags of
320 * any chunks in a (partially) completed chain 320 * any chunks in a (partially) completed chain
321 */ 321 */
322 if (!all && desc->mark == DESC_SUBMITTED && 322 if (!all && desc->mark == DESC_SUBMITTED &&
323 desc->cookie != cookie) 323 desc->cookie != cookie)
324 break; 324 break;
325 325
326 if (tx->cookie > 0) 326 if (tx->cookie > 0)
327 cookie = tx->cookie; 327 cookie = tx->cookie;
328 328
329 if (desc->mark == DESC_COMPLETED && desc->chunks == 1) { 329 if (desc->mark == DESC_COMPLETED && desc->chunks == 1) {
330 if (schan->dma_chan.completed_cookie != desc->cookie - 1) 330 if (schan->dma_chan.completed_cookie != desc->cookie - 1)
331 dev_dbg(schan->dev, 331 dev_dbg(schan->dev,
332 "Completing cookie %d, expected %d\n", 332 "Completing cookie %d, expected %d\n",
333 desc->cookie, 333 desc->cookie,
334 schan->dma_chan.completed_cookie + 1); 334 schan->dma_chan.completed_cookie + 1);
335 schan->dma_chan.completed_cookie = desc->cookie; 335 schan->dma_chan.completed_cookie = desc->cookie;
336 } 336 }
337 337
338 /* Call callback on the last chunk */ 338 /* Call callback on the last chunk */
339 if (desc->mark == DESC_COMPLETED && tx->callback) { 339 if (desc->mark == DESC_COMPLETED && tx->callback) {
340 desc->mark = DESC_WAITING; 340 desc->mark = DESC_WAITING;
341 callback = tx->callback; 341 callback = tx->callback;
342 param = tx->callback_param; 342 param = tx->callback_param;
343 dev_dbg(schan->dev, "descriptor #%d@%p on %d callback\n", 343 dev_dbg(schan->dev, "descriptor #%d@%p on %d callback\n",
344 tx->cookie, tx, schan->id); 344 tx->cookie, tx, schan->id);
345 BUG_ON(desc->chunks != 1); 345 BUG_ON(desc->chunks != 1);
346 break; 346 break;
347 } 347 }
348 348
349 if (tx->cookie > 0 || tx->cookie == -EBUSY) { 349 if (tx->cookie > 0 || tx->cookie == -EBUSY) {
350 if (desc->mark == DESC_COMPLETED) { 350 if (desc->mark == DESC_COMPLETED) {
351 BUG_ON(tx->cookie < 0); 351 BUG_ON(tx->cookie < 0);
352 desc->mark = DESC_WAITING; 352 desc->mark = DESC_WAITING;
353 } 353 }
354 head_acked = async_tx_test_ack(tx); 354 head_acked = async_tx_test_ack(tx);
355 } else { 355 } else {
356 switch (desc->mark) { 356 switch (desc->mark) {
357 case DESC_COMPLETED: 357 case DESC_COMPLETED:
358 desc->mark = DESC_WAITING; 358 desc->mark = DESC_WAITING;
359 /* Fall through */ 359 /* Fall through */
360 case DESC_WAITING: 360 case DESC_WAITING:
361 if (head_acked) 361 if (head_acked)
362 async_tx_ack(&desc->async_tx); 362 async_tx_ack(&desc->async_tx);
363 } 363 }
364 } 364 }
365 365
366 dev_dbg(schan->dev, "descriptor %p #%d completed.\n", 366 dev_dbg(schan->dev, "descriptor %p #%d completed.\n",
367 tx, tx->cookie); 367 tx, tx->cookie);
368 368
369 if (((desc->mark == DESC_COMPLETED || 369 if (((desc->mark == DESC_COMPLETED ||
370 desc->mark == DESC_WAITING) && 370 desc->mark == DESC_WAITING) &&
371 async_tx_test_ack(&desc->async_tx)) || all) { 371 async_tx_test_ack(&desc->async_tx)) || all) {
372 /* Remove from ld_queue list */ 372 /* Remove from ld_queue list */
373 desc->mark = DESC_IDLE; 373 desc->mark = DESC_IDLE;
374 374
375 list_move(&desc->node, &schan->ld_free); 375 list_move(&desc->node, &schan->ld_free);
376 376
377 if (list_empty(&schan->ld_queue)) { 377 if (list_empty(&schan->ld_queue)) {
378 dev_dbg(schan->dev, "Bring down channel %d\n", schan->id); 378 dev_dbg(schan->dev, "Bring down channel %d\n", schan->id);
379 pm_runtime_put(schan->dev); 379 pm_runtime_put(schan->dev);
380 schan->pm_state = SHDMA_PM_ESTABLISHED; 380 schan->pm_state = SHDMA_PM_ESTABLISHED;
381 } 381 }
382 } 382 }
383 } 383 }
384 384
385 if (all && !callback) 385 if (all && !callback)
386 /* 386 /*
387 * Terminating and the loop completed normally: forgive 387 * Terminating and the loop completed normally: forgive
388 * uncompleted cookies 388 * uncompleted cookies
389 */ 389 */
390 schan->dma_chan.completed_cookie = schan->dma_chan.cookie; 390 schan->dma_chan.completed_cookie = schan->dma_chan.cookie;
391 391
392 spin_unlock_irqrestore(&schan->chan_lock, flags); 392 spin_unlock_irqrestore(&schan->chan_lock, flags);
393 393
394 if (callback) 394 if (callback)
395 callback(param); 395 callback(param);
396 396
397 return callback; 397 return callback;
398 } 398 }
399 399
400 /* 400 /*
401 * shdma_chan_ld_cleanup - Clean up link descriptors 401 * shdma_chan_ld_cleanup - Clean up link descriptors
402 * 402 *
403 * Clean up the ld_queue of DMA channel. 403 * Clean up the ld_queue of DMA channel.
404 */ 404 */
405 static void shdma_chan_ld_cleanup(struct shdma_chan *schan, bool all) 405 static void shdma_chan_ld_cleanup(struct shdma_chan *schan, bool all)
406 { 406 {
407 while (__ld_cleanup(schan, all)) 407 while (__ld_cleanup(schan, all))
408 ; 408 ;
409 } 409 }
410 410
411 /* 411 /*
412 * shdma_free_chan_resources - Free all resources of the channel. 412 * shdma_free_chan_resources - Free all resources of the channel.
413 */ 413 */
414 static void shdma_free_chan_resources(struct dma_chan *chan) 414 static void shdma_free_chan_resources(struct dma_chan *chan)
415 { 415 {
416 struct shdma_chan *schan = to_shdma_chan(chan); 416 struct shdma_chan *schan = to_shdma_chan(chan);
417 struct shdma_dev *sdev = to_shdma_dev(chan->device); 417 struct shdma_dev *sdev = to_shdma_dev(chan->device);
418 const struct shdma_ops *ops = sdev->ops; 418 const struct shdma_ops *ops = sdev->ops;
419 LIST_HEAD(list); 419 LIST_HEAD(list);
420 420
421 /* Protect against ISR */ 421 /* Protect against ISR */
422 spin_lock_irq(&schan->chan_lock); 422 spin_lock_irq(&schan->chan_lock);
423 ops->halt_channel(schan); 423 ops->halt_channel(schan);
424 spin_unlock_irq(&schan->chan_lock); 424 spin_unlock_irq(&schan->chan_lock);
425 425
426 /* Now no new interrupts will occur */ 426 /* Now no new interrupts will occur */
427 427
428 /* Prepared and not submitted descriptors can still be on the queue */ 428 /* Prepared and not submitted descriptors can still be on the queue */
429 if (!list_empty(&schan->ld_queue)) 429 if (!list_empty(&schan->ld_queue))
430 shdma_chan_ld_cleanup(schan, true); 430 shdma_chan_ld_cleanup(schan, true);
431 431
432 if (schan->slave_id >= 0) { 432 if (schan->slave_id >= 0) {
433 /* The caller is holding dma_list_mutex */ 433 /* The caller is holding dma_list_mutex */
434 clear_bit(schan->slave_id, shdma_slave_used); 434 clear_bit(schan->slave_id, shdma_slave_used);
435 chan->private = NULL; 435 chan->private = NULL;
436 } 436 }
437 437
438 spin_lock_irq(&schan->chan_lock); 438 spin_lock_irq(&schan->chan_lock);
439 439
440 list_splice_init(&schan->ld_free, &list); 440 list_splice_init(&schan->ld_free, &list);
441 schan->desc_num = 0; 441 schan->desc_num = 0;
442 442
443 spin_unlock_irq(&schan->chan_lock); 443 spin_unlock_irq(&schan->chan_lock);
444 444
445 kfree(schan->desc); 445 kfree(schan->desc);
446 } 446 }
447 447
448 /** 448 /**
449 * shdma_add_desc - get, set up and return one transfer descriptor 449 * shdma_add_desc - get, set up and return one transfer descriptor
450 * @schan: DMA channel 450 * @schan: DMA channel
451 * @flags: DMA transfer flags 451 * @flags: DMA transfer flags
452 * @dst: destination DMA address, incremented when direction equals 452 * @dst: destination DMA address, incremented when direction equals
453 * DMA_DEV_TO_MEM or DMA_MEM_TO_MEM 453 * DMA_DEV_TO_MEM or DMA_MEM_TO_MEM
454 * @src: source DMA address, incremented when direction equals 454 * @src: source DMA address, incremented when direction equals
455 * DMA_MEM_TO_DEV or DMA_MEM_TO_MEM 455 * DMA_MEM_TO_DEV or DMA_MEM_TO_MEM
456 * @len: DMA transfer length 456 * @len: DMA transfer length
457 * @first: if NULL, set to the current descriptor and cookie set to -EBUSY 457 * @first: if NULL, set to the current descriptor and cookie set to -EBUSY
458 * @direction: needed for slave DMA to decide which address to keep constant, 458 * @direction: needed for slave DMA to decide which address to keep constant,
459 * equals DMA_MEM_TO_MEM for MEMCPY 459 * equals DMA_MEM_TO_MEM for MEMCPY
460 * Returns 0 or an error 460 * Returns 0 or an error
461 * Locks: called with desc_lock held 461 * Locks: called with desc_lock held
462 */ 462 */
463 static struct shdma_desc *shdma_add_desc(struct shdma_chan *schan, 463 static struct shdma_desc *shdma_add_desc(struct shdma_chan *schan,
464 unsigned long flags, dma_addr_t *dst, dma_addr_t *src, size_t *len, 464 unsigned long flags, dma_addr_t *dst, dma_addr_t *src, size_t *len,
465 struct shdma_desc **first, enum dma_transfer_direction direction) 465 struct shdma_desc **first, enum dma_transfer_direction direction)
466 { 466 {
467 struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device); 467 struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
468 const struct shdma_ops *ops = sdev->ops; 468 const struct shdma_ops *ops = sdev->ops;
469 struct shdma_desc *new; 469 struct shdma_desc *new;
470 size_t copy_size = *len; 470 size_t copy_size = *len;
471 471
472 if (!copy_size) 472 if (!copy_size)
473 return NULL; 473 return NULL;
474 474
475 /* Allocate the link descriptor from the free list */ 475 /* Allocate the link descriptor from the free list */
476 new = shdma_get_desc(schan); 476 new = shdma_get_desc(schan);
477 if (!new) { 477 if (!new) {
478 dev_err(schan->dev, "No free link descriptor available\n"); 478 dev_err(schan->dev, "No free link descriptor available\n");
479 return NULL; 479 return NULL;
480 } 480 }
481 481
482 ops->desc_setup(schan, new, *src, *dst, &copy_size); 482 ops->desc_setup(schan, new, *src, *dst, &copy_size);
483 483
484 if (!*first) { 484 if (!*first) {
485 /* First desc */ 485 /* First desc */
486 new->async_tx.cookie = -EBUSY; 486 new->async_tx.cookie = -EBUSY;
487 *first = new; 487 *first = new;
488 } else { 488 } else {
489 /* Other desc - invisible to the user */ 489 /* Other desc - invisible to the user */
490 new->async_tx.cookie = -EINVAL; 490 new->async_tx.cookie = -EINVAL;
491 } 491 }
492 492
493 dev_dbg(schan->dev, 493 dev_dbg(schan->dev,
494 "chaining (%u/%u)@%x -> %x with %p, cookie %d\n", 494 "chaining (%u/%u)@%x -> %x with %p, cookie %d\n",
495 copy_size, *len, *src, *dst, &new->async_tx, 495 copy_size, *len, *src, *dst, &new->async_tx,
496 new->async_tx.cookie); 496 new->async_tx.cookie);
497 497
498 new->mark = DESC_PREPARED; 498 new->mark = DESC_PREPARED;
499 new->async_tx.flags = flags; 499 new->async_tx.flags = flags;
500 new->direction = direction; 500 new->direction = direction;
501 new->partial = 0; 501 new->partial = 0;
502 502
503 *len -= copy_size; 503 *len -= copy_size;
504 if (direction == DMA_MEM_TO_MEM || direction == DMA_MEM_TO_DEV) 504 if (direction == DMA_MEM_TO_MEM || direction == DMA_MEM_TO_DEV)
505 *src += copy_size; 505 *src += copy_size;
506 if (direction == DMA_MEM_TO_MEM || direction == DMA_DEV_TO_MEM) 506 if (direction == DMA_MEM_TO_MEM || direction == DMA_DEV_TO_MEM)
507 *dst += copy_size; 507 *dst += copy_size;
508 508
509 return new; 509 return new;
510 } 510 }
511 511
512 /* 512 /*
513 * shdma_prep_sg - prepare transfer descriptors from an SG list 513 * shdma_prep_sg - prepare transfer descriptors from an SG list
514 * 514 *
515 * Common routine for public (MEMCPY) and slave DMA. The MEMCPY case is also 515 * Common routine for public (MEMCPY) and slave DMA. The MEMCPY case is also
516 * converted to scatter-gather to guarantee consistent locking and a correct 516 * converted to scatter-gather to guarantee consistent locking and a correct
517 * list manipulation. For slave DMA direction carries the usual meaning, and, 517 * list manipulation. For slave DMA direction carries the usual meaning, and,
518 * logically, the SG list is RAM and the addr variable contains slave address, 518 * logically, the SG list is RAM and the addr variable contains slave address,
519 * e.g., the FIFO I/O register. For MEMCPY direction equals DMA_MEM_TO_MEM 519 * e.g., the FIFO I/O register. For MEMCPY direction equals DMA_MEM_TO_MEM
520 * and the SG list contains only one element and points at the source buffer. 520 * and the SG list contains only one element and points at the source buffer.
521 */ 521 */
522 static struct dma_async_tx_descriptor *shdma_prep_sg(struct shdma_chan *schan, 522 static struct dma_async_tx_descriptor *shdma_prep_sg(struct shdma_chan *schan,
523 struct scatterlist *sgl, unsigned int sg_len, dma_addr_t *addr, 523 struct scatterlist *sgl, unsigned int sg_len, dma_addr_t *addr,
524 enum dma_transfer_direction direction, unsigned long flags) 524 enum dma_transfer_direction direction, unsigned long flags)
525 { 525 {
526 struct scatterlist *sg; 526 struct scatterlist *sg;
527 struct shdma_desc *first = NULL, *new = NULL /* compiler... */; 527 struct shdma_desc *first = NULL, *new = NULL /* compiler... */;
528 LIST_HEAD(tx_list); 528 LIST_HEAD(tx_list);
529 int chunks = 0; 529 int chunks = 0;
530 unsigned long irq_flags; 530 unsigned long irq_flags;
531 int i; 531 int i;
532 532
533 for_each_sg(sgl, sg, sg_len, i) 533 for_each_sg(sgl, sg, sg_len, i)
534 chunks += DIV_ROUND_UP(sg_dma_len(sg), schan->max_xfer_len); 534 chunks += DIV_ROUND_UP(sg_dma_len(sg), schan->max_xfer_len);
535 535
536 /* Have to lock the whole loop to protect against concurrent release */ 536 /* Have to lock the whole loop to protect against concurrent release */
537 spin_lock_irqsave(&schan->chan_lock, irq_flags); 537 spin_lock_irqsave(&schan->chan_lock, irq_flags);
538 538
539 /* 539 /*
540 * Chaining: 540 * Chaining:
541 * first descriptor is what user is dealing with in all API calls, its 541 * first descriptor is what user is dealing with in all API calls, its
542 * cookie is at first set to -EBUSY, at tx-submit to a positive 542 * cookie is at first set to -EBUSY, at tx-submit to a positive
543 * number 543 * number
544 * if more than one chunk is needed further chunks have cookie = -EINVAL 544 * if more than one chunk is needed further chunks have cookie = -EINVAL
545 * the last chunk, if not equal to the first, has cookie = -ENOSPC 545 * the last chunk, if not equal to the first, has cookie = -ENOSPC
546 * all chunks are linked onto the tx_list head with their .node heads 546 * all chunks are linked onto the tx_list head with their .node heads
547 * only during this function, then they are immediately spliced 547 * only during this function, then they are immediately spliced
548 * back onto the free list in form of a chain 548 * back onto the free list in form of a chain
549 */ 549 */
550 for_each_sg(sgl, sg, sg_len, i) { 550 for_each_sg(sgl, sg, sg_len, i) {
551 dma_addr_t sg_addr = sg_dma_address(sg); 551 dma_addr_t sg_addr = sg_dma_address(sg);
552 size_t len = sg_dma_len(sg); 552 size_t len = sg_dma_len(sg);
553 553
554 if (!len) 554 if (!len)
555 goto err_get_desc; 555 goto err_get_desc;
556 556
557 do { 557 do {
558 dev_dbg(schan->dev, "Add SG #%d@%p[%d], dma %llx\n", 558 dev_dbg(schan->dev, "Add SG #%d@%p[%d], dma %llx\n",
559 i, sg, len, (unsigned long long)sg_addr); 559 i, sg, len, (unsigned long long)sg_addr);
560 560
561 if (direction == DMA_DEV_TO_MEM) 561 if (direction == DMA_DEV_TO_MEM)
562 new = shdma_add_desc(schan, flags, 562 new = shdma_add_desc(schan, flags,
563 &sg_addr, addr, &len, &first, 563 &sg_addr, addr, &len, &first,
564 direction); 564 direction);
565 else 565 else
566 new = shdma_add_desc(schan, flags, 566 new = shdma_add_desc(schan, flags,
567 addr, &sg_addr, &len, &first, 567 addr, &sg_addr, &len, &first,
568 direction); 568 direction);
569 if (!new) 569 if (!new)
570 goto err_get_desc; 570 goto err_get_desc;
571 571
572 new->chunks = chunks--; 572 new->chunks = chunks--;
573 list_add_tail(&new->node, &tx_list); 573 list_add_tail(&new->node, &tx_list);
574 } while (len); 574 } while (len);
575 } 575 }
576 576
577 if (new != first) 577 if (new != first)
578 new->async_tx.cookie = -ENOSPC; 578 new->async_tx.cookie = -ENOSPC;
579 579
580 /* Put them back on the free list, so, they don't get lost */ 580 /* Put them back on the free list, so, they don't get lost */
581 list_splice_tail(&tx_list, &schan->ld_free); 581 list_splice_tail(&tx_list, &schan->ld_free);
582 582
583 spin_unlock_irqrestore(&schan->chan_lock, irq_flags); 583 spin_unlock_irqrestore(&schan->chan_lock, irq_flags);
584 584
585 return &first->async_tx; 585 return &first->async_tx;
586 586
587 err_get_desc: 587 err_get_desc:
588 list_for_each_entry(new, &tx_list, node) 588 list_for_each_entry(new, &tx_list, node)
589 new->mark = DESC_IDLE; 589 new->mark = DESC_IDLE;
590 list_splice(&tx_list, &schan->ld_free); 590 list_splice(&tx_list, &schan->ld_free);
591 591
592 spin_unlock_irqrestore(&schan->chan_lock, irq_flags); 592 spin_unlock_irqrestore(&schan->chan_lock, irq_flags);
593 593
594 return NULL; 594 return NULL;
595 } 595 }
596 596
597 static struct dma_async_tx_descriptor *shdma_prep_memcpy( 597 static struct dma_async_tx_descriptor *shdma_prep_memcpy(
598 struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src, 598 struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src,
599 size_t len, unsigned long flags) 599 size_t len, unsigned long flags)
600 { 600 {
601 struct shdma_chan *schan = to_shdma_chan(chan); 601 struct shdma_chan *schan = to_shdma_chan(chan);
602 struct scatterlist sg; 602 struct scatterlist sg;
603 603
604 if (!chan || !len) 604 if (!chan || !len)
605 return NULL; 605 return NULL;
606 606
607 BUG_ON(!schan->desc_num); 607 BUG_ON(!schan->desc_num);
608 608
609 sg_init_table(&sg, 1); 609 sg_init_table(&sg, 1);
610 sg_set_page(&sg, pfn_to_page(PFN_DOWN(dma_src)), len, 610 sg_set_page(&sg, pfn_to_page(PFN_DOWN(dma_src)), len,
611 offset_in_page(dma_src)); 611 offset_in_page(dma_src));
612 sg_dma_address(&sg) = dma_src; 612 sg_dma_address(&sg) = dma_src;
613 sg_dma_len(&sg) = len; 613 sg_dma_len(&sg) = len;
614 614
615 return shdma_prep_sg(schan, &sg, 1, &dma_dest, DMA_MEM_TO_MEM, flags); 615 return shdma_prep_sg(schan, &sg, 1, &dma_dest, DMA_MEM_TO_MEM, flags);
616 } 616 }
617 617
618 static struct dma_async_tx_descriptor *shdma_prep_slave_sg( 618 static struct dma_async_tx_descriptor *shdma_prep_slave_sg(
619 struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len, 619 struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
620 enum dma_transfer_direction direction, unsigned long flags, void *context) 620 enum dma_transfer_direction direction, unsigned long flags, void *context)
621 { 621 {
622 struct shdma_chan *schan = to_shdma_chan(chan); 622 struct shdma_chan *schan = to_shdma_chan(chan);
623 struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device); 623 struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
624 const struct shdma_ops *ops = sdev->ops; 624 const struct shdma_ops *ops = sdev->ops;
625 int slave_id = schan->slave_id; 625 int slave_id = schan->slave_id;
626 dma_addr_t slave_addr; 626 dma_addr_t slave_addr;
627 627
628 if (!chan) 628 if (!chan)
629 return NULL; 629 return NULL;
630 630
631 BUG_ON(!schan->desc_num); 631 BUG_ON(!schan->desc_num);
632 632
633 /* Someone calling slave DMA on a generic channel? */ 633 /* Someone calling slave DMA on a generic channel? */
634 if (slave_id < 0 || !sg_len) { 634 if (slave_id < 0 || !sg_len) {
635 dev_warn(schan->dev, "%s: bad parameter: len=%d, id=%d\n", 635 dev_warn(schan->dev, "%s: bad parameter: len=%d, id=%d\n",
636 __func__, sg_len, slave_id); 636 __func__, sg_len, slave_id);
637 return NULL; 637 return NULL;
638 } 638 }
639 639
640 slave_addr = ops->slave_addr(schan); 640 slave_addr = ops->slave_addr(schan);
641 641
642 return shdma_prep_sg(schan, sgl, sg_len, &slave_addr, 642 return shdma_prep_sg(schan, sgl, sg_len, &slave_addr,
643 direction, flags); 643 direction, flags);
644 } 644 }
645 645
646 static int shdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 646 static int shdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
647 unsigned long arg) 647 unsigned long arg)
648 { 648 {
649 struct shdma_chan *schan = to_shdma_chan(chan); 649 struct shdma_chan *schan = to_shdma_chan(chan);
650 struct shdma_dev *sdev = to_shdma_dev(chan->device); 650 struct shdma_dev *sdev = to_shdma_dev(chan->device);
651 const struct shdma_ops *ops = sdev->ops; 651 const struct shdma_ops *ops = sdev->ops;
652 struct dma_slave_config *config; 652 struct dma_slave_config *config;
653 unsigned long flags; 653 unsigned long flags;
654 int ret; 654 int ret;
655 655
656 switch (cmd) { 656 switch (cmd) {
657 case DMA_TERMINATE_ALL: 657 case DMA_TERMINATE_ALL:
658 spin_lock_irqsave(&schan->chan_lock, flags); 658 spin_lock_irqsave(&schan->chan_lock, flags);
659 ops->halt_channel(schan); 659 ops->halt_channel(schan);
660 660
661 if (ops->get_partial && !list_empty(&schan->ld_queue)) { 661 if (ops->get_partial && !list_empty(&schan->ld_queue)) {
662 /* Record partial transfer */ 662 /* Record partial transfer */
663 struct shdma_desc *desc = list_first_entry(&schan->ld_queue, 663 struct shdma_desc *desc = list_first_entry(&schan->ld_queue,
664 struct shdma_desc, node); 664 struct shdma_desc, node);
665 desc->partial = ops->get_partial(schan, desc); 665 desc->partial = ops->get_partial(schan, desc);
666 } 666 }
667 667
668 spin_unlock_irqrestore(&schan->chan_lock, flags); 668 spin_unlock_irqrestore(&schan->chan_lock, flags);
669 669
670 shdma_chan_ld_cleanup(schan, true); 670 shdma_chan_ld_cleanup(schan, true);
671 break; 671 break;
672 case DMA_SLAVE_CONFIG: 672 case DMA_SLAVE_CONFIG:
673 /* 673 /*
674 * So far only .slave_id is used, but the slave drivers are 674 * So far only .slave_id is used, but the slave drivers are
675 * encouraged to also set a transfer direction and an address. 675 * encouraged to also set a transfer direction and an address.
676 */ 676 */
677 if (!arg) 677 if (!arg)
678 return -EINVAL; 678 return -EINVAL;
679 /* 679 /*
680 * We could lock this, but you shouldn't be configuring the 680 * We could lock this, but you shouldn't be configuring the
681 * channel, while using it... 681 * channel, while using it...
682 */ 682 */
683 config = (struct dma_slave_config *)arg; 683 config = (struct dma_slave_config *)arg;
684 ret = shdma_setup_slave(schan, config->slave_id, 684 ret = shdma_setup_slave(schan, config->slave_id,
685 config->direction == DMA_DEV_TO_MEM ? 685 config->direction == DMA_DEV_TO_MEM ?
686 config->src_addr : config->dst_addr); 686 config->src_addr : config->dst_addr);
687 if (ret < 0) 687 if (ret < 0)
688 return ret; 688 return ret;
689 break; 689 break;
690 default: 690 default:
691 return -ENXIO; 691 return -ENXIO;
692 } 692 }
693 693
694 return 0; 694 return 0;
695 } 695 }
696 696
697 static void shdma_issue_pending(struct dma_chan *chan) 697 static void shdma_issue_pending(struct dma_chan *chan)
698 { 698 {
699 struct shdma_chan *schan = to_shdma_chan(chan); 699 struct shdma_chan *schan = to_shdma_chan(chan);
700 700
701 spin_lock_irq(&schan->chan_lock); 701 spin_lock_irq(&schan->chan_lock);
702 if (schan->pm_state == SHDMA_PM_ESTABLISHED) 702 if (schan->pm_state == SHDMA_PM_ESTABLISHED)
703 shdma_chan_xfer_ld_queue(schan); 703 shdma_chan_xfer_ld_queue(schan);
704 else 704 else
705 schan->pm_state = SHDMA_PM_PENDING; 705 schan->pm_state = SHDMA_PM_PENDING;
706 spin_unlock_irq(&schan->chan_lock); 706 spin_unlock_irq(&schan->chan_lock);
707 } 707 }
708 708
709 static enum dma_status shdma_tx_status(struct dma_chan *chan, 709 static enum dma_status shdma_tx_status(struct dma_chan *chan,
710 dma_cookie_t cookie, 710 dma_cookie_t cookie,
711 struct dma_tx_state *txstate) 711 struct dma_tx_state *txstate)
712 { 712 {
713 struct shdma_chan *schan = to_shdma_chan(chan); 713 struct shdma_chan *schan = to_shdma_chan(chan);
714 enum dma_status status; 714 enum dma_status status;
715 unsigned long flags; 715 unsigned long flags;
716 716
717 shdma_chan_ld_cleanup(schan, false); 717 shdma_chan_ld_cleanup(schan, false);
718 718
719 spin_lock_irqsave(&schan->chan_lock, flags); 719 spin_lock_irqsave(&schan->chan_lock, flags);
720 720
721 status = dma_cookie_status(chan, cookie, txstate); 721 status = dma_cookie_status(chan, cookie, txstate);
722 722
723 /* 723 /*
724 * If we don't find cookie on the queue, it has been aborted and we have 724 * If we don't find cookie on the queue, it has been aborted and we have
725 * to report error 725 * to report error
726 */ 726 */
727 if (status != DMA_SUCCESS) { 727 if (status != DMA_COMPLETE) {
728 struct shdma_desc *sdesc; 728 struct shdma_desc *sdesc;
729 status = DMA_ERROR; 729 status = DMA_ERROR;
730 list_for_each_entry(sdesc, &schan->ld_queue, node) 730 list_for_each_entry(sdesc, &schan->ld_queue, node)
731 if (sdesc->cookie == cookie) { 731 if (sdesc->cookie == cookie) {
732 status = DMA_IN_PROGRESS; 732 status = DMA_IN_PROGRESS;
733 break; 733 break;
734 } 734 }
735 } 735 }
736 736
737 spin_unlock_irqrestore(&schan->chan_lock, flags); 737 spin_unlock_irqrestore(&schan->chan_lock, flags);
738 738
739 return status; 739 return status;
740 } 740 }
741 741
742 /* Called from error IRQ or NMI */ 742 /* Called from error IRQ or NMI */
743 bool shdma_reset(struct shdma_dev *sdev) 743 bool shdma_reset(struct shdma_dev *sdev)
744 { 744 {
745 const struct shdma_ops *ops = sdev->ops; 745 const struct shdma_ops *ops = sdev->ops;
746 struct shdma_chan *schan; 746 struct shdma_chan *schan;
747 unsigned int handled = 0; 747 unsigned int handled = 0;
748 int i; 748 int i;
749 749
750 /* Reset all channels */ 750 /* Reset all channels */
751 shdma_for_each_chan(schan, sdev, i) { 751 shdma_for_each_chan(schan, sdev, i) {
752 struct shdma_desc *sdesc; 752 struct shdma_desc *sdesc;
753 LIST_HEAD(dl); 753 LIST_HEAD(dl);
754 754
755 if (!schan) 755 if (!schan)
756 continue; 756 continue;
757 757
758 spin_lock(&schan->chan_lock); 758 spin_lock(&schan->chan_lock);
759 759
760 /* Stop the channel */ 760 /* Stop the channel */
761 ops->halt_channel(schan); 761 ops->halt_channel(schan);
762 762
763 list_splice_init(&schan->ld_queue, &dl); 763 list_splice_init(&schan->ld_queue, &dl);
764 764
765 if (!list_empty(&dl)) { 765 if (!list_empty(&dl)) {
766 dev_dbg(schan->dev, "Bring down channel %d\n", schan->id); 766 dev_dbg(schan->dev, "Bring down channel %d\n", schan->id);
767 pm_runtime_put(schan->dev); 767 pm_runtime_put(schan->dev);
768 } 768 }
769 schan->pm_state = SHDMA_PM_ESTABLISHED; 769 schan->pm_state = SHDMA_PM_ESTABLISHED;
770 770
771 spin_unlock(&schan->chan_lock); 771 spin_unlock(&schan->chan_lock);
772 772
773 /* Complete all */ 773 /* Complete all */
774 list_for_each_entry(sdesc, &dl, node) { 774 list_for_each_entry(sdesc, &dl, node) {
775 struct dma_async_tx_descriptor *tx = &sdesc->async_tx; 775 struct dma_async_tx_descriptor *tx = &sdesc->async_tx;
776 sdesc->mark = DESC_IDLE; 776 sdesc->mark = DESC_IDLE;
777 if (tx->callback) 777 if (tx->callback)
778 tx->callback(tx->callback_param); 778 tx->callback(tx->callback_param);
779 } 779 }
780 780
781 spin_lock(&schan->chan_lock); 781 spin_lock(&schan->chan_lock);
782 list_splice(&dl, &schan->ld_free); 782 list_splice(&dl, &schan->ld_free);
783 spin_unlock(&schan->chan_lock); 783 spin_unlock(&schan->chan_lock);
784 784
785 handled++; 785 handled++;
786 } 786 }
787 787
788 return !!handled; 788 return !!handled;
789 } 789 }
790 EXPORT_SYMBOL(shdma_reset); 790 EXPORT_SYMBOL(shdma_reset);
791 791
792 static irqreturn_t chan_irq(int irq, void *dev) 792 static irqreturn_t chan_irq(int irq, void *dev)
793 { 793 {
794 struct shdma_chan *schan = dev; 794 struct shdma_chan *schan = dev;
795 const struct shdma_ops *ops = 795 const struct shdma_ops *ops =
796 to_shdma_dev(schan->dma_chan.device)->ops; 796 to_shdma_dev(schan->dma_chan.device)->ops;
797 irqreturn_t ret; 797 irqreturn_t ret;
798 798
799 spin_lock(&schan->chan_lock); 799 spin_lock(&schan->chan_lock);
800 800
801 ret = ops->chan_irq(schan, irq) ? IRQ_WAKE_THREAD : IRQ_NONE; 801 ret = ops->chan_irq(schan, irq) ? IRQ_WAKE_THREAD : IRQ_NONE;
802 802
803 spin_unlock(&schan->chan_lock); 803 spin_unlock(&schan->chan_lock);
804 804
805 return ret; 805 return ret;
806 } 806 }
807 807
808 static irqreturn_t chan_irqt(int irq, void *dev) 808 static irqreturn_t chan_irqt(int irq, void *dev)
809 { 809 {
810 struct shdma_chan *schan = dev; 810 struct shdma_chan *schan = dev;
811 const struct shdma_ops *ops = 811 const struct shdma_ops *ops =
812 to_shdma_dev(schan->dma_chan.device)->ops; 812 to_shdma_dev(schan->dma_chan.device)->ops;
813 struct shdma_desc *sdesc; 813 struct shdma_desc *sdesc;
814 814
815 spin_lock_irq(&schan->chan_lock); 815 spin_lock_irq(&schan->chan_lock);
816 list_for_each_entry(sdesc, &schan->ld_queue, node) { 816 list_for_each_entry(sdesc, &schan->ld_queue, node) {
817 if (sdesc->mark == DESC_SUBMITTED && 817 if (sdesc->mark == DESC_SUBMITTED &&
818 ops->desc_completed(schan, sdesc)) { 818 ops->desc_completed(schan, sdesc)) {
819 dev_dbg(schan->dev, "done #%d@%p\n", 819 dev_dbg(schan->dev, "done #%d@%p\n",
820 sdesc->async_tx.cookie, &sdesc->async_tx); 820 sdesc->async_tx.cookie, &sdesc->async_tx);
821 sdesc->mark = DESC_COMPLETED; 821 sdesc->mark = DESC_COMPLETED;
822 break; 822 break;
823 } 823 }
824 } 824 }
825 /* Next desc */ 825 /* Next desc */
826 shdma_chan_xfer_ld_queue(schan); 826 shdma_chan_xfer_ld_queue(schan);
827 spin_unlock_irq(&schan->chan_lock); 827 spin_unlock_irq(&schan->chan_lock);
828 828
829 shdma_chan_ld_cleanup(schan, false); 829 shdma_chan_ld_cleanup(schan, false);
830 830
831 return IRQ_HANDLED; 831 return IRQ_HANDLED;
832 } 832 }
833 833
834 int shdma_request_irq(struct shdma_chan *schan, int irq, 834 int shdma_request_irq(struct shdma_chan *schan, int irq,
835 unsigned long flags, const char *name) 835 unsigned long flags, const char *name)
836 { 836 {
837 int ret = devm_request_threaded_irq(schan->dev, irq, chan_irq, 837 int ret = devm_request_threaded_irq(schan->dev, irq, chan_irq,
838 chan_irqt, flags, name, schan); 838 chan_irqt, flags, name, schan);
839 839
840 schan->irq = ret < 0 ? ret : irq; 840 schan->irq = ret < 0 ? ret : irq;
841 841
842 return ret; 842 return ret;
843 } 843 }
844 EXPORT_SYMBOL(shdma_request_irq); 844 EXPORT_SYMBOL(shdma_request_irq);
845 845
846 void shdma_chan_probe(struct shdma_dev *sdev, 846 void shdma_chan_probe(struct shdma_dev *sdev,
847 struct shdma_chan *schan, int id) 847 struct shdma_chan *schan, int id)
848 { 848 {
849 schan->pm_state = SHDMA_PM_ESTABLISHED; 849 schan->pm_state = SHDMA_PM_ESTABLISHED;
850 850
851 /* reference struct dma_device */ 851 /* reference struct dma_device */
852 schan->dma_chan.device = &sdev->dma_dev; 852 schan->dma_chan.device = &sdev->dma_dev;
853 dma_cookie_init(&schan->dma_chan); 853 dma_cookie_init(&schan->dma_chan);
854 854
855 schan->dev = sdev->dma_dev.dev; 855 schan->dev = sdev->dma_dev.dev;
856 schan->id = id; 856 schan->id = id;
857 857
858 if (!schan->max_xfer_len) 858 if (!schan->max_xfer_len)
859 schan->max_xfer_len = PAGE_SIZE; 859 schan->max_xfer_len = PAGE_SIZE;
860 860
861 spin_lock_init(&schan->chan_lock); 861 spin_lock_init(&schan->chan_lock);
862 862
863 /* Init descripter manage list */ 863 /* Init descripter manage list */
864 INIT_LIST_HEAD(&schan->ld_queue); 864 INIT_LIST_HEAD(&schan->ld_queue);
865 INIT_LIST_HEAD(&schan->ld_free); 865 INIT_LIST_HEAD(&schan->ld_free);
866 866
867 /* Add the channel to DMA device channel list */ 867 /* Add the channel to DMA device channel list */
868 list_add_tail(&schan->dma_chan.device_node, 868 list_add_tail(&schan->dma_chan.device_node,
869 &sdev->dma_dev.channels); 869 &sdev->dma_dev.channels);
870 sdev->schan[sdev->dma_dev.chancnt++] = schan; 870 sdev->schan[sdev->dma_dev.chancnt++] = schan;
871 } 871 }
872 EXPORT_SYMBOL(shdma_chan_probe); 872 EXPORT_SYMBOL(shdma_chan_probe);
873 873
874 void shdma_chan_remove(struct shdma_chan *schan) 874 void shdma_chan_remove(struct shdma_chan *schan)
875 { 875 {
876 list_del(&schan->dma_chan.device_node); 876 list_del(&schan->dma_chan.device_node);
877 } 877 }
878 EXPORT_SYMBOL(shdma_chan_remove); 878 EXPORT_SYMBOL(shdma_chan_remove);
879 879
880 int shdma_init(struct device *dev, struct shdma_dev *sdev, 880 int shdma_init(struct device *dev, struct shdma_dev *sdev,
881 int chan_num) 881 int chan_num)
882 { 882 {
883 struct dma_device *dma_dev = &sdev->dma_dev; 883 struct dma_device *dma_dev = &sdev->dma_dev;
884 884
885 /* 885 /*
886 * Require all call-backs for now, they can trivially be made optional 886 * Require all call-backs for now, they can trivially be made optional
887 * later as required 887 * later as required
888 */ 888 */
889 if (!sdev->ops || 889 if (!sdev->ops ||
890 !sdev->desc_size || 890 !sdev->desc_size ||
891 !sdev->ops->embedded_desc || 891 !sdev->ops->embedded_desc ||
892 !sdev->ops->start_xfer || 892 !sdev->ops->start_xfer ||
893 !sdev->ops->setup_xfer || 893 !sdev->ops->setup_xfer ||
894 !sdev->ops->set_slave || 894 !sdev->ops->set_slave ||
895 !sdev->ops->desc_setup || 895 !sdev->ops->desc_setup ||
896 !sdev->ops->slave_addr || 896 !sdev->ops->slave_addr ||
897 !sdev->ops->channel_busy || 897 !sdev->ops->channel_busy ||
898 !sdev->ops->halt_channel || 898 !sdev->ops->halt_channel ||
899 !sdev->ops->desc_completed) 899 !sdev->ops->desc_completed)
900 return -EINVAL; 900 return -EINVAL;
901 901
902 sdev->schan = kcalloc(chan_num, sizeof(*sdev->schan), GFP_KERNEL); 902 sdev->schan = kcalloc(chan_num, sizeof(*sdev->schan), GFP_KERNEL);
903 if (!sdev->schan) 903 if (!sdev->schan)
904 return -ENOMEM; 904 return -ENOMEM;
905 905
906 INIT_LIST_HEAD(&dma_dev->channels); 906 INIT_LIST_HEAD(&dma_dev->channels);
907 907
908 /* Common and MEMCPY operations */ 908 /* Common and MEMCPY operations */
909 dma_dev->device_alloc_chan_resources 909 dma_dev->device_alloc_chan_resources
910 = shdma_alloc_chan_resources; 910 = shdma_alloc_chan_resources;
911 dma_dev->device_free_chan_resources = shdma_free_chan_resources; 911 dma_dev->device_free_chan_resources = shdma_free_chan_resources;
912 dma_dev->device_prep_dma_memcpy = shdma_prep_memcpy; 912 dma_dev->device_prep_dma_memcpy = shdma_prep_memcpy;
913 dma_dev->device_tx_status = shdma_tx_status; 913 dma_dev->device_tx_status = shdma_tx_status;
914 dma_dev->device_issue_pending = shdma_issue_pending; 914 dma_dev->device_issue_pending = shdma_issue_pending;
915 915
916 /* Compulsory for DMA_SLAVE fields */ 916 /* Compulsory for DMA_SLAVE fields */
917 dma_dev->device_prep_slave_sg = shdma_prep_slave_sg; 917 dma_dev->device_prep_slave_sg = shdma_prep_slave_sg;
918 dma_dev->device_control = shdma_control; 918 dma_dev->device_control = shdma_control;
919 919
920 dma_dev->dev = dev; 920 dma_dev->dev = dev;
921 921
922 return 0; 922 return 0;
923 } 923 }
924 EXPORT_SYMBOL(shdma_init); 924 EXPORT_SYMBOL(shdma_init);
925 925
926 void shdma_cleanup(struct shdma_dev *sdev) 926 void shdma_cleanup(struct shdma_dev *sdev)
927 { 927 {
928 kfree(sdev->schan); 928 kfree(sdev->schan);
929 } 929 }
930 EXPORT_SYMBOL(shdma_cleanup); 930 EXPORT_SYMBOL(shdma_cleanup);
931 931
932 static int __init shdma_enter(void) 932 static int __init shdma_enter(void)
933 { 933 {
934 shdma_slave_used = kzalloc(DIV_ROUND_UP(slave_num, BITS_PER_LONG) * 934 shdma_slave_used = kzalloc(DIV_ROUND_UP(slave_num, BITS_PER_LONG) *
935 sizeof(long), GFP_KERNEL); 935 sizeof(long), GFP_KERNEL);
936 if (!shdma_slave_used) 936 if (!shdma_slave_used)
937 return -ENOMEM; 937 return -ENOMEM;
938 return 0; 938 return 0;
939 } 939 }
940 module_init(shdma_enter); 940 module_init(shdma_enter);
941 941
942 static void __exit shdma_exit(void) 942 static void __exit shdma_exit(void)
943 { 943 {
944 kfree(shdma_slave_used); 944 kfree(shdma_slave_used);
945 } 945 }
946 module_exit(shdma_exit); 946 module_exit(shdma_exit);
947 947
948 MODULE_LICENSE("GPL v2"); 948 MODULE_LICENSE("GPL v2");
949 MODULE_DESCRIPTION("SH-DMA driver base library"); 949 MODULE_DESCRIPTION("SH-DMA driver base library");
950 MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de>"); 950 MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de>");
951 951
drivers/dma/ste_dma40.c
1 /* 1 /*
2 * Copyright (C) Ericsson AB 2007-2008 2 * Copyright (C) Ericsson AB 2007-2008
3 * Copyright (C) ST-Ericsson SA 2008-2010 3 * Copyright (C) ST-Ericsson SA 2008-2010
4 * Author: Per Forlin <per.forlin@stericsson.com> for ST-Ericsson 4 * Author: Per Forlin <per.forlin@stericsson.com> for ST-Ericsson
5 * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson 5 * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson
6 * License terms: GNU General Public License (GPL) version 2 6 * License terms: GNU General Public License (GPL) version 2
7 */ 7 */
8 8
9 #include <linux/dma-mapping.h> 9 #include <linux/dma-mapping.h>
10 #include <linux/kernel.h> 10 #include <linux/kernel.h>
11 #include <linux/slab.h> 11 #include <linux/slab.h>
12 #include <linux/export.h> 12 #include <linux/export.h>
13 #include <linux/dmaengine.h> 13 #include <linux/dmaengine.h>
14 #include <linux/platform_device.h> 14 #include <linux/platform_device.h>
15 #include <linux/clk.h> 15 #include <linux/clk.h>
16 #include <linux/delay.h> 16 #include <linux/delay.h>
17 #include <linux/log2.h> 17 #include <linux/log2.h>
18 #include <linux/pm.h> 18 #include <linux/pm.h>
19 #include <linux/pm_runtime.h> 19 #include <linux/pm_runtime.h>
20 #include <linux/err.h> 20 #include <linux/err.h>
21 #include <linux/of.h> 21 #include <linux/of.h>
22 #include <linux/of_dma.h> 22 #include <linux/of_dma.h>
23 #include <linux/amba/bus.h> 23 #include <linux/amba/bus.h>
24 #include <linux/regulator/consumer.h> 24 #include <linux/regulator/consumer.h>
25 #include <linux/platform_data/dma-ste-dma40.h> 25 #include <linux/platform_data/dma-ste-dma40.h>
26 26
27 #include "dmaengine.h" 27 #include "dmaengine.h"
28 #include "ste_dma40_ll.h" 28 #include "ste_dma40_ll.h"
29 29
30 #define D40_NAME "dma40" 30 #define D40_NAME "dma40"
31 31
32 #define D40_PHY_CHAN -1 32 #define D40_PHY_CHAN -1
33 33
34 /* For masking out/in 2 bit channel positions */ 34 /* For masking out/in 2 bit channel positions */
35 #define D40_CHAN_POS(chan) (2 * (chan / 2)) 35 #define D40_CHAN_POS(chan) (2 * (chan / 2))
36 #define D40_CHAN_POS_MASK(chan) (0x3 << D40_CHAN_POS(chan)) 36 #define D40_CHAN_POS_MASK(chan) (0x3 << D40_CHAN_POS(chan))
37 37
38 /* Maximum iterations taken before giving up suspending a channel */ 38 /* Maximum iterations taken before giving up suspending a channel */
39 #define D40_SUSPEND_MAX_IT 500 39 #define D40_SUSPEND_MAX_IT 500
40 40
41 /* Milliseconds */ 41 /* Milliseconds */
42 #define DMA40_AUTOSUSPEND_DELAY 100 42 #define DMA40_AUTOSUSPEND_DELAY 100
43 43
44 /* Hardware requirement on LCLA alignment */ 44 /* Hardware requirement on LCLA alignment */
45 #define LCLA_ALIGNMENT 0x40000 45 #define LCLA_ALIGNMENT 0x40000
46 46
47 /* Max number of links per event group */ 47 /* Max number of links per event group */
48 #define D40_LCLA_LINK_PER_EVENT_GRP 128 48 #define D40_LCLA_LINK_PER_EVENT_GRP 128
49 #define D40_LCLA_END D40_LCLA_LINK_PER_EVENT_GRP 49 #define D40_LCLA_END D40_LCLA_LINK_PER_EVENT_GRP
50 50
51 /* Max number of logical channels per physical channel */ 51 /* Max number of logical channels per physical channel */
52 #define D40_MAX_LOG_CHAN_PER_PHY 32 52 #define D40_MAX_LOG_CHAN_PER_PHY 32
53 53
54 /* Attempts before giving up to trying to get pages that are aligned */ 54 /* Attempts before giving up to trying to get pages that are aligned */
55 #define MAX_LCLA_ALLOC_ATTEMPTS 256 55 #define MAX_LCLA_ALLOC_ATTEMPTS 256
56 56
57 /* Bit markings for allocation map */ 57 /* Bit markings for allocation map */
58 #define D40_ALLOC_FREE BIT(31) 58 #define D40_ALLOC_FREE BIT(31)
59 #define D40_ALLOC_PHY BIT(30) 59 #define D40_ALLOC_PHY BIT(30)
60 #define D40_ALLOC_LOG_FREE 0 60 #define D40_ALLOC_LOG_FREE 0
61 61
62 #define D40_MEMCPY_MAX_CHANS 8 62 #define D40_MEMCPY_MAX_CHANS 8
63 63
64 /* Reserved event lines for memcpy only. */ 64 /* Reserved event lines for memcpy only. */
65 #define DB8500_DMA_MEMCPY_EV_0 51 65 #define DB8500_DMA_MEMCPY_EV_0 51
66 #define DB8500_DMA_MEMCPY_EV_1 56 66 #define DB8500_DMA_MEMCPY_EV_1 56
67 #define DB8500_DMA_MEMCPY_EV_2 57 67 #define DB8500_DMA_MEMCPY_EV_2 57
68 #define DB8500_DMA_MEMCPY_EV_3 58 68 #define DB8500_DMA_MEMCPY_EV_3 58
69 #define DB8500_DMA_MEMCPY_EV_4 59 69 #define DB8500_DMA_MEMCPY_EV_4 59
70 #define DB8500_DMA_MEMCPY_EV_5 60 70 #define DB8500_DMA_MEMCPY_EV_5 60
71 71
72 static int dma40_memcpy_channels[] = { 72 static int dma40_memcpy_channels[] = {
73 DB8500_DMA_MEMCPY_EV_0, 73 DB8500_DMA_MEMCPY_EV_0,
74 DB8500_DMA_MEMCPY_EV_1, 74 DB8500_DMA_MEMCPY_EV_1,
75 DB8500_DMA_MEMCPY_EV_2, 75 DB8500_DMA_MEMCPY_EV_2,
76 DB8500_DMA_MEMCPY_EV_3, 76 DB8500_DMA_MEMCPY_EV_3,
77 DB8500_DMA_MEMCPY_EV_4, 77 DB8500_DMA_MEMCPY_EV_4,
78 DB8500_DMA_MEMCPY_EV_5, 78 DB8500_DMA_MEMCPY_EV_5,
79 }; 79 };
80 80
81 /* Default configuration for physcial memcpy */ 81 /* Default configuration for physcial memcpy */
82 static struct stedma40_chan_cfg dma40_memcpy_conf_phy = { 82 static struct stedma40_chan_cfg dma40_memcpy_conf_phy = {
83 .mode = STEDMA40_MODE_PHYSICAL, 83 .mode = STEDMA40_MODE_PHYSICAL,
84 .dir = DMA_MEM_TO_MEM, 84 .dir = DMA_MEM_TO_MEM,
85 85
86 .src_info.data_width = DMA_SLAVE_BUSWIDTH_1_BYTE, 86 .src_info.data_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
87 .src_info.psize = STEDMA40_PSIZE_PHY_1, 87 .src_info.psize = STEDMA40_PSIZE_PHY_1,
88 .src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL, 88 .src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
89 89
90 .dst_info.data_width = DMA_SLAVE_BUSWIDTH_1_BYTE, 90 .dst_info.data_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
91 .dst_info.psize = STEDMA40_PSIZE_PHY_1, 91 .dst_info.psize = STEDMA40_PSIZE_PHY_1,
92 .dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL, 92 .dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
93 }; 93 };
94 94
95 /* Default configuration for logical memcpy */ 95 /* Default configuration for logical memcpy */
96 static struct stedma40_chan_cfg dma40_memcpy_conf_log = { 96 static struct stedma40_chan_cfg dma40_memcpy_conf_log = {
97 .mode = STEDMA40_MODE_LOGICAL, 97 .mode = STEDMA40_MODE_LOGICAL,
98 .dir = DMA_MEM_TO_MEM, 98 .dir = DMA_MEM_TO_MEM,
99 99
100 .src_info.data_width = DMA_SLAVE_BUSWIDTH_1_BYTE, 100 .src_info.data_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
101 .src_info.psize = STEDMA40_PSIZE_LOG_1, 101 .src_info.psize = STEDMA40_PSIZE_LOG_1,
102 .src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL, 102 .src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
103 103
104 .dst_info.data_width = DMA_SLAVE_BUSWIDTH_1_BYTE, 104 .dst_info.data_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
105 .dst_info.psize = STEDMA40_PSIZE_LOG_1, 105 .dst_info.psize = STEDMA40_PSIZE_LOG_1,
106 .dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL, 106 .dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
107 }; 107 };
108 108
109 /** 109 /**
110 * enum 40_command - The different commands and/or statuses. 110 * enum 40_command - The different commands and/or statuses.
111 * 111 *
112 * @D40_DMA_STOP: DMA channel command STOP or status STOPPED, 112 * @D40_DMA_STOP: DMA channel command STOP or status STOPPED,
113 * @D40_DMA_RUN: The DMA channel is RUNNING of the command RUN. 113 * @D40_DMA_RUN: The DMA channel is RUNNING of the command RUN.
114 * @D40_DMA_SUSPEND_REQ: Request the DMA to SUSPEND as soon as possible. 114 * @D40_DMA_SUSPEND_REQ: Request the DMA to SUSPEND as soon as possible.
115 * @D40_DMA_SUSPENDED: The DMA channel is SUSPENDED. 115 * @D40_DMA_SUSPENDED: The DMA channel is SUSPENDED.
116 */ 116 */
117 enum d40_command { 117 enum d40_command {
118 D40_DMA_STOP = 0, 118 D40_DMA_STOP = 0,
119 D40_DMA_RUN = 1, 119 D40_DMA_RUN = 1,
120 D40_DMA_SUSPEND_REQ = 2, 120 D40_DMA_SUSPEND_REQ = 2,
121 D40_DMA_SUSPENDED = 3 121 D40_DMA_SUSPENDED = 3
122 }; 122 };
123 123
124 /* 124 /*
125 * enum d40_events - The different Event Enables for the event lines. 125 * enum d40_events - The different Event Enables for the event lines.
126 * 126 *
127 * @D40_DEACTIVATE_EVENTLINE: De-activate Event line, stopping the logical chan. 127 * @D40_DEACTIVATE_EVENTLINE: De-activate Event line, stopping the logical chan.
128 * @D40_ACTIVATE_EVENTLINE: Activate the Event line, to start a logical chan. 128 * @D40_ACTIVATE_EVENTLINE: Activate the Event line, to start a logical chan.
129 * @D40_SUSPEND_REQ_EVENTLINE: Requesting for suspending a event line. 129 * @D40_SUSPEND_REQ_EVENTLINE: Requesting for suspending a event line.
130 * @D40_ROUND_EVENTLINE: Status check for event line. 130 * @D40_ROUND_EVENTLINE: Status check for event line.
131 */ 131 */
132 132
133 enum d40_events { 133 enum d40_events {
134 D40_DEACTIVATE_EVENTLINE = 0, 134 D40_DEACTIVATE_EVENTLINE = 0,
135 D40_ACTIVATE_EVENTLINE = 1, 135 D40_ACTIVATE_EVENTLINE = 1,
136 D40_SUSPEND_REQ_EVENTLINE = 2, 136 D40_SUSPEND_REQ_EVENTLINE = 2,
137 D40_ROUND_EVENTLINE = 3 137 D40_ROUND_EVENTLINE = 3
138 }; 138 };
139 139
140 /* 140 /*
141 * These are the registers that has to be saved and later restored 141 * These are the registers that has to be saved and later restored
142 * when the DMA hw is powered off. 142 * when the DMA hw is powered off.
143 * TODO: Add save/restore of D40_DREG_GCC on dma40 v3 or later, if that works. 143 * TODO: Add save/restore of D40_DREG_GCC on dma40 v3 or later, if that works.
144 */ 144 */
145 static u32 d40_backup_regs[] = { 145 static u32 d40_backup_regs[] = {
146 D40_DREG_LCPA, 146 D40_DREG_LCPA,
147 D40_DREG_LCLA, 147 D40_DREG_LCLA,
148 D40_DREG_PRMSE, 148 D40_DREG_PRMSE,
149 D40_DREG_PRMSO, 149 D40_DREG_PRMSO,
150 D40_DREG_PRMOE, 150 D40_DREG_PRMOE,
151 D40_DREG_PRMOO, 151 D40_DREG_PRMOO,
152 }; 152 };
153 153
154 #define BACKUP_REGS_SZ ARRAY_SIZE(d40_backup_regs) 154 #define BACKUP_REGS_SZ ARRAY_SIZE(d40_backup_regs)
155 155
156 /* 156 /*
157 * since 9540 and 8540 has the same HW revision 157 * since 9540 and 8540 has the same HW revision
158 * use v4a for 9540 or ealier 158 * use v4a for 9540 or ealier
159 * use v4b for 8540 or later 159 * use v4b for 8540 or later
160 * HW revision: 160 * HW revision:
161 * DB8500ed has revision 0 161 * DB8500ed has revision 0
162 * DB8500v1 has revision 2 162 * DB8500v1 has revision 2
163 * DB8500v2 has revision 3 163 * DB8500v2 has revision 3
164 * AP9540v1 has revision 4 164 * AP9540v1 has revision 4
165 * DB8540v1 has revision 4 165 * DB8540v1 has revision 4
166 * TODO: Check if all these registers have to be saved/restored on dma40 v4a 166 * TODO: Check if all these registers have to be saved/restored on dma40 v4a
167 */ 167 */
168 static u32 d40_backup_regs_v4a[] = { 168 static u32 d40_backup_regs_v4a[] = {
169 D40_DREG_PSEG1, 169 D40_DREG_PSEG1,
170 D40_DREG_PSEG2, 170 D40_DREG_PSEG2,
171 D40_DREG_PSEG3, 171 D40_DREG_PSEG3,
172 D40_DREG_PSEG4, 172 D40_DREG_PSEG4,
173 D40_DREG_PCEG1, 173 D40_DREG_PCEG1,
174 D40_DREG_PCEG2, 174 D40_DREG_PCEG2,
175 D40_DREG_PCEG3, 175 D40_DREG_PCEG3,
176 D40_DREG_PCEG4, 176 D40_DREG_PCEG4,
177 D40_DREG_RSEG1, 177 D40_DREG_RSEG1,
178 D40_DREG_RSEG2, 178 D40_DREG_RSEG2,
179 D40_DREG_RSEG3, 179 D40_DREG_RSEG3,
180 D40_DREG_RSEG4, 180 D40_DREG_RSEG4,
181 D40_DREG_RCEG1, 181 D40_DREG_RCEG1,
182 D40_DREG_RCEG2, 182 D40_DREG_RCEG2,
183 D40_DREG_RCEG3, 183 D40_DREG_RCEG3,
184 D40_DREG_RCEG4, 184 D40_DREG_RCEG4,
185 }; 185 };
186 186
187 #define BACKUP_REGS_SZ_V4A ARRAY_SIZE(d40_backup_regs_v4a) 187 #define BACKUP_REGS_SZ_V4A ARRAY_SIZE(d40_backup_regs_v4a)
188 188
189 static u32 d40_backup_regs_v4b[] = { 189 static u32 d40_backup_regs_v4b[] = {
190 D40_DREG_CPSEG1, 190 D40_DREG_CPSEG1,
191 D40_DREG_CPSEG2, 191 D40_DREG_CPSEG2,
192 D40_DREG_CPSEG3, 192 D40_DREG_CPSEG3,
193 D40_DREG_CPSEG4, 193 D40_DREG_CPSEG4,
194 D40_DREG_CPSEG5, 194 D40_DREG_CPSEG5,
195 D40_DREG_CPCEG1, 195 D40_DREG_CPCEG1,
196 D40_DREG_CPCEG2, 196 D40_DREG_CPCEG2,
197 D40_DREG_CPCEG3, 197 D40_DREG_CPCEG3,
198 D40_DREG_CPCEG4, 198 D40_DREG_CPCEG4,
199 D40_DREG_CPCEG5, 199 D40_DREG_CPCEG5,
200 D40_DREG_CRSEG1, 200 D40_DREG_CRSEG1,
201 D40_DREG_CRSEG2, 201 D40_DREG_CRSEG2,
202 D40_DREG_CRSEG3, 202 D40_DREG_CRSEG3,
203 D40_DREG_CRSEG4, 203 D40_DREG_CRSEG4,
204 D40_DREG_CRSEG5, 204 D40_DREG_CRSEG5,
205 D40_DREG_CRCEG1, 205 D40_DREG_CRCEG1,
206 D40_DREG_CRCEG2, 206 D40_DREG_CRCEG2,
207 D40_DREG_CRCEG3, 207 D40_DREG_CRCEG3,
208 D40_DREG_CRCEG4, 208 D40_DREG_CRCEG4,
209 D40_DREG_CRCEG5, 209 D40_DREG_CRCEG5,
210 }; 210 };
211 211
212 #define BACKUP_REGS_SZ_V4B ARRAY_SIZE(d40_backup_regs_v4b) 212 #define BACKUP_REGS_SZ_V4B ARRAY_SIZE(d40_backup_regs_v4b)
213 213
214 static u32 d40_backup_regs_chan[] = { 214 static u32 d40_backup_regs_chan[] = {
215 D40_CHAN_REG_SSCFG, 215 D40_CHAN_REG_SSCFG,
216 D40_CHAN_REG_SSELT, 216 D40_CHAN_REG_SSELT,
217 D40_CHAN_REG_SSPTR, 217 D40_CHAN_REG_SSPTR,
218 D40_CHAN_REG_SSLNK, 218 D40_CHAN_REG_SSLNK,
219 D40_CHAN_REG_SDCFG, 219 D40_CHAN_REG_SDCFG,
220 D40_CHAN_REG_SDELT, 220 D40_CHAN_REG_SDELT,
221 D40_CHAN_REG_SDPTR, 221 D40_CHAN_REG_SDPTR,
222 D40_CHAN_REG_SDLNK, 222 D40_CHAN_REG_SDLNK,
223 }; 223 };
224 224
225 #define BACKUP_REGS_SZ_MAX ((BACKUP_REGS_SZ_V4A > BACKUP_REGS_SZ_V4B) ? \ 225 #define BACKUP_REGS_SZ_MAX ((BACKUP_REGS_SZ_V4A > BACKUP_REGS_SZ_V4B) ? \
226 BACKUP_REGS_SZ_V4A : BACKUP_REGS_SZ_V4B) 226 BACKUP_REGS_SZ_V4A : BACKUP_REGS_SZ_V4B)
227 227
228 /** 228 /**
229 * struct d40_interrupt_lookup - lookup table for interrupt handler 229 * struct d40_interrupt_lookup - lookup table for interrupt handler
230 * 230 *
231 * @src: Interrupt mask register. 231 * @src: Interrupt mask register.
232 * @clr: Interrupt clear register. 232 * @clr: Interrupt clear register.
233 * @is_error: true if this is an error interrupt. 233 * @is_error: true if this is an error interrupt.
234 * @offset: start delta in the lookup_log_chans in d40_base. If equals to 234 * @offset: start delta in the lookup_log_chans in d40_base. If equals to
235 * D40_PHY_CHAN, the lookup_phy_chans shall be used instead. 235 * D40_PHY_CHAN, the lookup_phy_chans shall be used instead.
236 */ 236 */
237 struct d40_interrupt_lookup { 237 struct d40_interrupt_lookup {
238 u32 src; 238 u32 src;
239 u32 clr; 239 u32 clr;
240 bool is_error; 240 bool is_error;
241 int offset; 241 int offset;
242 }; 242 };
243 243
244 244
245 static struct d40_interrupt_lookup il_v4a[] = { 245 static struct d40_interrupt_lookup il_v4a[] = {
246 {D40_DREG_LCTIS0, D40_DREG_LCICR0, false, 0}, 246 {D40_DREG_LCTIS0, D40_DREG_LCICR0, false, 0},
247 {D40_DREG_LCTIS1, D40_DREG_LCICR1, false, 32}, 247 {D40_DREG_LCTIS1, D40_DREG_LCICR1, false, 32},
248 {D40_DREG_LCTIS2, D40_DREG_LCICR2, false, 64}, 248 {D40_DREG_LCTIS2, D40_DREG_LCICR2, false, 64},
249 {D40_DREG_LCTIS3, D40_DREG_LCICR3, false, 96}, 249 {D40_DREG_LCTIS3, D40_DREG_LCICR3, false, 96},
250 {D40_DREG_LCEIS0, D40_DREG_LCICR0, true, 0}, 250 {D40_DREG_LCEIS0, D40_DREG_LCICR0, true, 0},
251 {D40_DREG_LCEIS1, D40_DREG_LCICR1, true, 32}, 251 {D40_DREG_LCEIS1, D40_DREG_LCICR1, true, 32},
252 {D40_DREG_LCEIS2, D40_DREG_LCICR2, true, 64}, 252 {D40_DREG_LCEIS2, D40_DREG_LCICR2, true, 64},
253 {D40_DREG_LCEIS3, D40_DREG_LCICR3, true, 96}, 253 {D40_DREG_LCEIS3, D40_DREG_LCICR3, true, 96},
254 {D40_DREG_PCTIS, D40_DREG_PCICR, false, D40_PHY_CHAN}, 254 {D40_DREG_PCTIS, D40_DREG_PCICR, false, D40_PHY_CHAN},
255 {D40_DREG_PCEIS, D40_DREG_PCICR, true, D40_PHY_CHAN}, 255 {D40_DREG_PCEIS, D40_DREG_PCICR, true, D40_PHY_CHAN},
256 }; 256 };
257 257
258 static struct d40_interrupt_lookup il_v4b[] = { 258 static struct d40_interrupt_lookup il_v4b[] = {
259 {D40_DREG_CLCTIS1, D40_DREG_CLCICR1, false, 0}, 259 {D40_DREG_CLCTIS1, D40_DREG_CLCICR1, false, 0},
260 {D40_DREG_CLCTIS2, D40_DREG_CLCICR2, false, 32}, 260 {D40_DREG_CLCTIS2, D40_DREG_CLCICR2, false, 32},
261 {D40_DREG_CLCTIS3, D40_DREG_CLCICR3, false, 64}, 261 {D40_DREG_CLCTIS3, D40_DREG_CLCICR3, false, 64},
262 {D40_DREG_CLCTIS4, D40_DREG_CLCICR4, false, 96}, 262 {D40_DREG_CLCTIS4, D40_DREG_CLCICR4, false, 96},
263 {D40_DREG_CLCTIS5, D40_DREG_CLCICR5, false, 128}, 263 {D40_DREG_CLCTIS5, D40_DREG_CLCICR5, false, 128},
264 {D40_DREG_CLCEIS1, D40_DREG_CLCICR1, true, 0}, 264 {D40_DREG_CLCEIS1, D40_DREG_CLCICR1, true, 0},
265 {D40_DREG_CLCEIS2, D40_DREG_CLCICR2, true, 32}, 265 {D40_DREG_CLCEIS2, D40_DREG_CLCICR2, true, 32},
266 {D40_DREG_CLCEIS3, D40_DREG_CLCICR3, true, 64}, 266 {D40_DREG_CLCEIS3, D40_DREG_CLCICR3, true, 64},
267 {D40_DREG_CLCEIS4, D40_DREG_CLCICR4, true, 96}, 267 {D40_DREG_CLCEIS4, D40_DREG_CLCICR4, true, 96},
268 {D40_DREG_CLCEIS5, D40_DREG_CLCICR5, true, 128}, 268 {D40_DREG_CLCEIS5, D40_DREG_CLCICR5, true, 128},
269 {D40_DREG_CPCTIS, D40_DREG_CPCICR, false, D40_PHY_CHAN}, 269 {D40_DREG_CPCTIS, D40_DREG_CPCICR, false, D40_PHY_CHAN},
270 {D40_DREG_CPCEIS, D40_DREG_CPCICR, true, D40_PHY_CHAN}, 270 {D40_DREG_CPCEIS, D40_DREG_CPCICR, true, D40_PHY_CHAN},
271 }; 271 };
272 272
273 /** 273 /**
274 * struct d40_reg_val - simple lookup struct 274 * struct d40_reg_val - simple lookup struct
275 * 275 *
276 * @reg: The register. 276 * @reg: The register.
277 * @val: The value that belongs to the register in reg. 277 * @val: The value that belongs to the register in reg.
278 */ 278 */
279 struct d40_reg_val { 279 struct d40_reg_val {
280 unsigned int reg; 280 unsigned int reg;
281 unsigned int val; 281 unsigned int val;
282 }; 282 };
283 283
284 static __initdata struct d40_reg_val dma_init_reg_v4a[] = { 284 static __initdata struct d40_reg_val dma_init_reg_v4a[] = {
285 /* Clock every part of the DMA block from start */ 285 /* Clock every part of the DMA block from start */
286 { .reg = D40_DREG_GCC, .val = D40_DREG_GCC_ENABLE_ALL}, 286 { .reg = D40_DREG_GCC, .val = D40_DREG_GCC_ENABLE_ALL},
287 287
288 /* Interrupts on all logical channels */ 288 /* Interrupts on all logical channels */
289 { .reg = D40_DREG_LCMIS0, .val = 0xFFFFFFFF}, 289 { .reg = D40_DREG_LCMIS0, .val = 0xFFFFFFFF},
290 { .reg = D40_DREG_LCMIS1, .val = 0xFFFFFFFF}, 290 { .reg = D40_DREG_LCMIS1, .val = 0xFFFFFFFF},
291 { .reg = D40_DREG_LCMIS2, .val = 0xFFFFFFFF}, 291 { .reg = D40_DREG_LCMIS2, .val = 0xFFFFFFFF},
292 { .reg = D40_DREG_LCMIS3, .val = 0xFFFFFFFF}, 292 { .reg = D40_DREG_LCMIS3, .val = 0xFFFFFFFF},
293 { .reg = D40_DREG_LCICR0, .val = 0xFFFFFFFF}, 293 { .reg = D40_DREG_LCICR0, .val = 0xFFFFFFFF},
294 { .reg = D40_DREG_LCICR1, .val = 0xFFFFFFFF}, 294 { .reg = D40_DREG_LCICR1, .val = 0xFFFFFFFF},
295 { .reg = D40_DREG_LCICR2, .val = 0xFFFFFFFF}, 295 { .reg = D40_DREG_LCICR2, .val = 0xFFFFFFFF},
296 { .reg = D40_DREG_LCICR3, .val = 0xFFFFFFFF}, 296 { .reg = D40_DREG_LCICR3, .val = 0xFFFFFFFF},
297 { .reg = D40_DREG_LCTIS0, .val = 0xFFFFFFFF}, 297 { .reg = D40_DREG_LCTIS0, .val = 0xFFFFFFFF},
298 { .reg = D40_DREG_LCTIS1, .val = 0xFFFFFFFF}, 298 { .reg = D40_DREG_LCTIS1, .val = 0xFFFFFFFF},
299 { .reg = D40_DREG_LCTIS2, .val = 0xFFFFFFFF}, 299 { .reg = D40_DREG_LCTIS2, .val = 0xFFFFFFFF},
300 { .reg = D40_DREG_LCTIS3, .val = 0xFFFFFFFF} 300 { .reg = D40_DREG_LCTIS3, .val = 0xFFFFFFFF}
301 }; 301 };
302 static __initdata struct d40_reg_val dma_init_reg_v4b[] = { 302 static __initdata struct d40_reg_val dma_init_reg_v4b[] = {
303 /* Clock every part of the DMA block from start */ 303 /* Clock every part of the DMA block from start */
304 { .reg = D40_DREG_GCC, .val = D40_DREG_GCC_ENABLE_ALL}, 304 { .reg = D40_DREG_GCC, .val = D40_DREG_GCC_ENABLE_ALL},
305 305
306 /* Interrupts on all logical channels */ 306 /* Interrupts on all logical channels */
307 { .reg = D40_DREG_CLCMIS1, .val = 0xFFFFFFFF}, 307 { .reg = D40_DREG_CLCMIS1, .val = 0xFFFFFFFF},
308 { .reg = D40_DREG_CLCMIS2, .val = 0xFFFFFFFF}, 308 { .reg = D40_DREG_CLCMIS2, .val = 0xFFFFFFFF},
309 { .reg = D40_DREG_CLCMIS3, .val = 0xFFFFFFFF}, 309 { .reg = D40_DREG_CLCMIS3, .val = 0xFFFFFFFF},
310 { .reg = D40_DREG_CLCMIS4, .val = 0xFFFFFFFF}, 310 { .reg = D40_DREG_CLCMIS4, .val = 0xFFFFFFFF},
311 { .reg = D40_DREG_CLCMIS5, .val = 0xFFFFFFFF}, 311 { .reg = D40_DREG_CLCMIS5, .val = 0xFFFFFFFF},
312 { .reg = D40_DREG_CLCICR1, .val = 0xFFFFFFFF}, 312 { .reg = D40_DREG_CLCICR1, .val = 0xFFFFFFFF},
313 { .reg = D40_DREG_CLCICR2, .val = 0xFFFFFFFF}, 313 { .reg = D40_DREG_CLCICR2, .val = 0xFFFFFFFF},
314 { .reg = D40_DREG_CLCICR3, .val = 0xFFFFFFFF}, 314 { .reg = D40_DREG_CLCICR3, .val = 0xFFFFFFFF},
315 { .reg = D40_DREG_CLCICR4, .val = 0xFFFFFFFF}, 315 { .reg = D40_DREG_CLCICR4, .val = 0xFFFFFFFF},
316 { .reg = D40_DREG_CLCICR5, .val = 0xFFFFFFFF}, 316 { .reg = D40_DREG_CLCICR5, .val = 0xFFFFFFFF},
317 { .reg = D40_DREG_CLCTIS1, .val = 0xFFFFFFFF}, 317 { .reg = D40_DREG_CLCTIS1, .val = 0xFFFFFFFF},
318 { .reg = D40_DREG_CLCTIS2, .val = 0xFFFFFFFF}, 318 { .reg = D40_DREG_CLCTIS2, .val = 0xFFFFFFFF},
319 { .reg = D40_DREG_CLCTIS3, .val = 0xFFFFFFFF}, 319 { .reg = D40_DREG_CLCTIS3, .val = 0xFFFFFFFF},
320 { .reg = D40_DREG_CLCTIS4, .val = 0xFFFFFFFF}, 320 { .reg = D40_DREG_CLCTIS4, .val = 0xFFFFFFFF},
321 { .reg = D40_DREG_CLCTIS5, .val = 0xFFFFFFFF} 321 { .reg = D40_DREG_CLCTIS5, .val = 0xFFFFFFFF}
322 }; 322 };
323 323
324 /** 324 /**
325 * struct d40_lli_pool - Structure for keeping LLIs in memory 325 * struct d40_lli_pool - Structure for keeping LLIs in memory
326 * 326 *
327 * @base: Pointer to memory area when the pre_alloc_lli's are not large 327 * @base: Pointer to memory area when the pre_alloc_lli's are not large
328 * enough, IE bigger than the most common case, 1 dst and 1 src. NULL if 328 * enough, IE bigger than the most common case, 1 dst and 1 src. NULL if
329 * pre_alloc_lli is used. 329 * pre_alloc_lli is used.
330 * @dma_addr: DMA address, if mapped 330 * @dma_addr: DMA address, if mapped
331 * @size: The size in bytes of the memory at base or the size of pre_alloc_lli. 331 * @size: The size in bytes of the memory at base or the size of pre_alloc_lli.
332 * @pre_alloc_lli: Pre allocated area for the most common case of transfers, 332 * @pre_alloc_lli: Pre allocated area for the most common case of transfers,
333 * one buffer to one buffer. 333 * one buffer to one buffer.
334 */ 334 */
335 struct d40_lli_pool { 335 struct d40_lli_pool {
336 void *base; 336 void *base;
337 int size; 337 int size;
338 dma_addr_t dma_addr; 338 dma_addr_t dma_addr;
339 /* Space for dst and src, plus an extra for padding */ 339 /* Space for dst and src, plus an extra for padding */
340 u8 pre_alloc_lli[3 * sizeof(struct d40_phy_lli)]; 340 u8 pre_alloc_lli[3 * sizeof(struct d40_phy_lli)];
341 }; 341 };
342 342
343 /** 343 /**
344 * struct d40_desc - A descriptor is one DMA job. 344 * struct d40_desc - A descriptor is one DMA job.
345 * 345 *
346 * @lli_phy: LLI settings for physical channel. Both src and dst= 346 * @lli_phy: LLI settings for physical channel. Both src and dst=
347 * points into the lli_pool, to base if lli_len > 1 or to pre_alloc_lli if 347 * points into the lli_pool, to base if lli_len > 1 or to pre_alloc_lli if
348 * lli_len equals one. 348 * lli_len equals one.
349 * @lli_log: Same as above but for logical channels. 349 * @lli_log: Same as above but for logical channels.
350 * @lli_pool: The pool with two entries pre-allocated. 350 * @lli_pool: The pool with two entries pre-allocated.
351 * @lli_len: Number of llis of current descriptor. 351 * @lli_len: Number of llis of current descriptor.
352 * @lli_current: Number of transferred llis. 352 * @lli_current: Number of transferred llis.
353 * @lcla_alloc: Number of LCLA entries allocated. 353 * @lcla_alloc: Number of LCLA entries allocated.
354 * @txd: DMA engine struct. Used for among other things for communication 354 * @txd: DMA engine struct. Used for among other things for communication
355 * during a transfer. 355 * during a transfer.
356 * @node: List entry. 356 * @node: List entry.
357 * @is_in_client_list: true if the client owns this descriptor. 357 * @is_in_client_list: true if the client owns this descriptor.
358 * @cyclic: true if this is a cyclic job 358 * @cyclic: true if this is a cyclic job
359 * 359 *
360 * This descriptor is used for both logical and physical transfers. 360 * This descriptor is used for both logical and physical transfers.
361 */ 361 */
362 struct d40_desc { 362 struct d40_desc {
363 /* LLI physical */ 363 /* LLI physical */
364 struct d40_phy_lli_bidir lli_phy; 364 struct d40_phy_lli_bidir lli_phy;
365 /* LLI logical */ 365 /* LLI logical */
366 struct d40_log_lli_bidir lli_log; 366 struct d40_log_lli_bidir lli_log;
367 367
368 struct d40_lli_pool lli_pool; 368 struct d40_lli_pool lli_pool;
369 int lli_len; 369 int lli_len;
370 int lli_current; 370 int lli_current;
371 int lcla_alloc; 371 int lcla_alloc;
372 372
373 struct dma_async_tx_descriptor txd; 373 struct dma_async_tx_descriptor txd;
374 struct list_head node; 374 struct list_head node;
375 375
376 bool is_in_client_list; 376 bool is_in_client_list;
377 bool cyclic; 377 bool cyclic;
378 }; 378 };
379 379
380 /** 380 /**
381 * struct d40_lcla_pool - LCLA pool settings and data. 381 * struct d40_lcla_pool - LCLA pool settings and data.
382 * 382 *
383 * @base: The virtual address of LCLA. 18 bit aligned. 383 * @base: The virtual address of LCLA. 18 bit aligned.
384 * @base_unaligned: The orignal kmalloc pointer, if kmalloc is used. 384 * @base_unaligned: The orignal kmalloc pointer, if kmalloc is used.
385 * This pointer is only there for clean-up on error. 385 * This pointer is only there for clean-up on error.
386 * @pages: The number of pages needed for all physical channels. 386 * @pages: The number of pages needed for all physical channels.
387 * Only used later for clean-up on error 387 * Only used later for clean-up on error
388 * @lock: Lock to protect the content in this struct. 388 * @lock: Lock to protect the content in this struct.
389 * @alloc_map: big map over which LCLA entry is own by which job. 389 * @alloc_map: big map over which LCLA entry is own by which job.
390 */ 390 */
391 struct d40_lcla_pool { 391 struct d40_lcla_pool {
392 void *base; 392 void *base;
393 dma_addr_t dma_addr; 393 dma_addr_t dma_addr;
394 void *base_unaligned; 394 void *base_unaligned;
395 int pages; 395 int pages;
396 spinlock_t lock; 396 spinlock_t lock;
397 struct d40_desc **alloc_map; 397 struct d40_desc **alloc_map;
398 }; 398 };
399 399
400 /** 400 /**
401 * struct d40_phy_res - struct for handling eventlines mapped to physical 401 * struct d40_phy_res - struct for handling eventlines mapped to physical
402 * channels. 402 * channels.
403 * 403 *
404 * @lock: A lock protection this entity. 404 * @lock: A lock protection this entity.
405 * @reserved: True if used by secure world or otherwise. 405 * @reserved: True if used by secure world or otherwise.
406 * @num: The physical channel number of this entity. 406 * @num: The physical channel number of this entity.
407 * @allocated_src: Bit mapped to show which src event line's are mapped to 407 * @allocated_src: Bit mapped to show which src event line's are mapped to
408 * this physical channel. Can also be free or physically allocated. 408 * this physical channel. Can also be free or physically allocated.
409 * @allocated_dst: Same as for src but is dst. 409 * @allocated_dst: Same as for src but is dst.
410 * allocated_dst and allocated_src uses the D40_ALLOC* defines as well as 410 * allocated_dst and allocated_src uses the D40_ALLOC* defines as well as
411 * event line number. 411 * event line number.
412 * @use_soft_lli: To mark if the linked lists of channel are managed by SW. 412 * @use_soft_lli: To mark if the linked lists of channel are managed by SW.
413 */ 413 */
414 struct d40_phy_res { 414 struct d40_phy_res {
415 spinlock_t lock; 415 spinlock_t lock;
416 bool reserved; 416 bool reserved;
417 int num; 417 int num;
418 u32 allocated_src; 418 u32 allocated_src;
419 u32 allocated_dst; 419 u32 allocated_dst;
420 bool use_soft_lli; 420 bool use_soft_lli;
421 }; 421 };
422 422
423 struct d40_base; 423 struct d40_base;
424 424
425 /** 425 /**
426 * struct d40_chan - Struct that describes a channel. 426 * struct d40_chan - Struct that describes a channel.
427 * 427 *
428 * @lock: A spinlock to protect this struct. 428 * @lock: A spinlock to protect this struct.
429 * @log_num: The logical number, if any of this channel. 429 * @log_num: The logical number, if any of this channel.
430 * @pending_tx: The number of pending transfers. Used between interrupt handler 430 * @pending_tx: The number of pending transfers. Used between interrupt handler
431 * and tasklet. 431 * and tasklet.
432 * @busy: Set to true when transfer is ongoing on this channel. 432 * @busy: Set to true when transfer is ongoing on this channel.
433 * @phy_chan: Pointer to physical channel which this instance runs on. If this 433 * @phy_chan: Pointer to physical channel which this instance runs on. If this
434 * point is NULL, then the channel is not allocated. 434 * point is NULL, then the channel is not allocated.
435 * @chan: DMA engine handle. 435 * @chan: DMA engine handle.
436 * @tasklet: Tasklet that gets scheduled from interrupt context to complete a 436 * @tasklet: Tasklet that gets scheduled from interrupt context to complete a
437 * transfer and call client callback. 437 * transfer and call client callback.
438 * @client: Cliented owned descriptor list. 438 * @client: Cliented owned descriptor list.
439 * @pending_queue: Submitted jobs, to be issued by issue_pending() 439 * @pending_queue: Submitted jobs, to be issued by issue_pending()
440 * @active: Active descriptor. 440 * @active: Active descriptor.
441 * @done: Completed jobs 441 * @done: Completed jobs
442 * @queue: Queued jobs. 442 * @queue: Queued jobs.
443 * @prepare_queue: Prepared jobs. 443 * @prepare_queue: Prepared jobs.
444 * @dma_cfg: The client configuration of this dma channel. 444 * @dma_cfg: The client configuration of this dma channel.
445 * @configured: whether the dma_cfg configuration is valid 445 * @configured: whether the dma_cfg configuration is valid
446 * @base: Pointer to the device instance struct. 446 * @base: Pointer to the device instance struct.
447 * @src_def_cfg: Default cfg register setting for src. 447 * @src_def_cfg: Default cfg register setting for src.
448 * @dst_def_cfg: Default cfg register setting for dst. 448 * @dst_def_cfg: Default cfg register setting for dst.
449 * @log_def: Default logical channel settings. 449 * @log_def: Default logical channel settings.
450 * @lcpa: Pointer to dst and src lcpa settings. 450 * @lcpa: Pointer to dst and src lcpa settings.
451 * @runtime_addr: runtime configured address. 451 * @runtime_addr: runtime configured address.
452 * @runtime_direction: runtime configured direction. 452 * @runtime_direction: runtime configured direction.
453 * 453 *
454 * This struct can either "be" a logical or a physical channel. 454 * This struct can either "be" a logical or a physical channel.
455 */ 455 */
456 struct d40_chan { 456 struct d40_chan {
457 spinlock_t lock; 457 spinlock_t lock;
458 int log_num; 458 int log_num;
459 int pending_tx; 459 int pending_tx;
460 bool busy; 460 bool busy;
461 struct d40_phy_res *phy_chan; 461 struct d40_phy_res *phy_chan;
462 struct dma_chan chan; 462 struct dma_chan chan;
463 struct tasklet_struct tasklet; 463 struct tasklet_struct tasklet;
464 struct list_head client; 464 struct list_head client;
465 struct list_head pending_queue; 465 struct list_head pending_queue;
466 struct list_head active; 466 struct list_head active;
467 struct list_head done; 467 struct list_head done;
468 struct list_head queue; 468 struct list_head queue;
469 struct list_head prepare_queue; 469 struct list_head prepare_queue;
470 struct stedma40_chan_cfg dma_cfg; 470 struct stedma40_chan_cfg dma_cfg;
471 bool configured; 471 bool configured;
472 struct d40_base *base; 472 struct d40_base *base;
473 /* Default register configurations */ 473 /* Default register configurations */
474 u32 src_def_cfg; 474 u32 src_def_cfg;
475 u32 dst_def_cfg; 475 u32 dst_def_cfg;
476 struct d40_def_lcsp log_def; 476 struct d40_def_lcsp log_def;
477 struct d40_log_lli_full *lcpa; 477 struct d40_log_lli_full *lcpa;
478 /* Runtime reconfiguration */ 478 /* Runtime reconfiguration */
479 dma_addr_t runtime_addr; 479 dma_addr_t runtime_addr;
480 enum dma_transfer_direction runtime_direction; 480 enum dma_transfer_direction runtime_direction;
481 }; 481 };
482 482
483 /** 483 /**
484 * struct d40_gen_dmac - generic values to represent u8500/u8540 DMA 484 * struct d40_gen_dmac - generic values to represent u8500/u8540 DMA
485 * controller 485 * controller
486 * 486 *
487 * @backup: the pointer to the registers address array for backup 487 * @backup: the pointer to the registers address array for backup
488 * @backup_size: the size of the registers address array for backup 488 * @backup_size: the size of the registers address array for backup
489 * @realtime_en: the realtime enable register 489 * @realtime_en: the realtime enable register
490 * @realtime_clear: the realtime clear register 490 * @realtime_clear: the realtime clear register
491 * @high_prio_en: the high priority enable register 491 * @high_prio_en: the high priority enable register
492 * @high_prio_clear: the high priority clear register 492 * @high_prio_clear: the high priority clear register
493 * @interrupt_en: the interrupt enable register 493 * @interrupt_en: the interrupt enable register
494 * @interrupt_clear: the interrupt clear register 494 * @interrupt_clear: the interrupt clear register
495 * @il: the pointer to struct d40_interrupt_lookup 495 * @il: the pointer to struct d40_interrupt_lookup
496 * @il_size: the size of d40_interrupt_lookup array 496 * @il_size: the size of d40_interrupt_lookup array
497 * @init_reg: the pointer to the struct d40_reg_val 497 * @init_reg: the pointer to the struct d40_reg_val
498 * @init_reg_size: the size of d40_reg_val array 498 * @init_reg_size: the size of d40_reg_val array
499 */ 499 */
500 struct d40_gen_dmac { 500 struct d40_gen_dmac {
501 u32 *backup; 501 u32 *backup;
502 u32 backup_size; 502 u32 backup_size;
503 u32 realtime_en; 503 u32 realtime_en;
504 u32 realtime_clear; 504 u32 realtime_clear;
505 u32 high_prio_en; 505 u32 high_prio_en;
506 u32 high_prio_clear; 506 u32 high_prio_clear;
507 u32 interrupt_en; 507 u32 interrupt_en;
508 u32 interrupt_clear; 508 u32 interrupt_clear;
509 struct d40_interrupt_lookup *il; 509 struct d40_interrupt_lookup *il;
510 u32 il_size; 510 u32 il_size;
511 struct d40_reg_val *init_reg; 511 struct d40_reg_val *init_reg;
512 u32 init_reg_size; 512 u32 init_reg_size;
513 }; 513 };
514 514
515 /** 515 /**
516 * struct d40_base - The big global struct, one for each probe'd instance. 516 * struct d40_base - The big global struct, one for each probe'd instance.
517 * 517 *
518 * @interrupt_lock: Lock used to make sure one interrupt is handle a time. 518 * @interrupt_lock: Lock used to make sure one interrupt is handle a time.
519 * @execmd_lock: Lock for execute command usage since several channels share 519 * @execmd_lock: Lock for execute command usage since several channels share
520 * the same physical register. 520 * the same physical register.
521 * @dev: The device structure. 521 * @dev: The device structure.
522 * @virtbase: The virtual base address of the DMA's register. 522 * @virtbase: The virtual base address of the DMA's register.
523 * @rev: silicon revision detected. 523 * @rev: silicon revision detected.
524 * @clk: Pointer to the DMA clock structure. 524 * @clk: Pointer to the DMA clock structure.
525 * @phy_start: Physical memory start of the DMA registers. 525 * @phy_start: Physical memory start of the DMA registers.
526 * @phy_size: Size of the DMA register map. 526 * @phy_size: Size of the DMA register map.
527 * @irq: The IRQ number. 527 * @irq: The IRQ number.
528 * @num_memcpy_chans: The number of channels used for memcpy (mem-to-mem 528 * @num_memcpy_chans: The number of channels used for memcpy (mem-to-mem
529 * transfers). 529 * transfers).
530 * @num_phy_chans: The number of physical channels. Read from HW. This 530 * @num_phy_chans: The number of physical channels. Read from HW. This
531 * is the number of available channels for this driver, not counting "Secure 531 * is the number of available channels for this driver, not counting "Secure
532 * mode" allocated physical channels. 532 * mode" allocated physical channels.
533 * @num_log_chans: The number of logical channels. Calculated from 533 * @num_log_chans: The number of logical channels. Calculated from
534 * num_phy_chans. 534 * num_phy_chans.
535 * @dma_both: dma_device channels that can do both memcpy and slave transfers. 535 * @dma_both: dma_device channels that can do both memcpy and slave transfers.
536 * @dma_slave: dma_device channels that can do only do slave transfers. 536 * @dma_slave: dma_device channels that can do only do slave transfers.
537 * @dma_memcpy: dma_device channels that can do only do memcpy transfers. 537 * @dma_memcpy: dma_device channels that can do only do memcpy transfers.
538 * @phy_chans: Room for all possible physical channels in system. 538 * @phy_chans: Room for all possible physical channels in system.
539 * @log_chans: Room for all possible logical channels in system. 539 * @log_chans: Room for all possible logical channels in system.
540 * @lookup_log_chans: Used to map interrupt number to logical channel. Points 540 * @lookup_log_chans: Used to map interrupt number to logical channel. Points
541 * to log_chans entries. 541 * to log_chans entries.
542 * @lookup_phy_chans: Used to map interrupt number to physical channel. Points 542 * @lookup_phy_chans: Used to map interrupt number to physical channel. Points
543 * to phy_chans entries. 543 * to phy_chans entries.
544 * @plat_data: Pointer to provided platform_data which is the driver 544 * @plat_data: Pointer to provided platform_data which is the driver
545 * configuration. 545 * configuration.
546 * @lcpa_regulator: Pointer to hold the regulator for the esram bank for lcla. 546 * @lcpa_regulator: Pointer to hold the regulator for the esram bank for lcla.
547 * @phy_res: Vector containing all physical channels. 547 * @phy_res: Vector containing all physical channels.
548 * @lcla_pool: lcla pool settings and data. 548 * @lcla_pool: lcla pool settings and data.
549 * @lcpa_base: The virtual mapped address of LCPA. 549 * @lcpa_base: The virtual mapped address of LCPA.
550 * @phy_lcpa: The physical address of the LCPA. 550 * @phy_lcpa: The physical address of the LCPA.
551 * @lcpa_size: The size of the LCPA area. 551 * @lcpa_size: The size of the LCPA area.
552 * @desc_slab: cache for descriptors. 552 * @desc_slab: cache for descriptors.
553 * @reg_val_backup: Here the values of some hardware registers are stored 553 * @reg_val_backup: Here the values of some hardware registers are stored
554 * before the DMA is powered off. They are restored when the power is back on. 554 * before the DMA is powered off. They are restored when the power is back on.
555 * @reg_val_backup_v4: Backup of registers that only exits on dma40 v3 and 555 * @reg_val_backup_v4: Backup of registers that only exits on dma40 v3 and
556 * later 556 * later
557 * @reg_val_backup_chan: Backup data for standard channel parameter registers. 557 * @reg_val_backup_chan: Backup data for standard channel parameter registers.
558 * @gcc_pwr_off_mask: Mask to maintain the channels that can be turned off. 558 * @gcc_pwr_off_mask: Mask to maintain the channels that can be turned off.
559 * @initialized: true if the dma has been initialized 559 * @initialized: true if the dma has been initialized
560 * @gen_dmac: the struct for generic registers values to represent u8500/8540 560 * @gen_dmac: the struct for generic registers values to represent u8500/8540
561 * DMA controller 561 * DMA controller
562 */ 562 */
563 struct d40_base { 563 struct d40_base {
564 spinlock_t interrupt_lock; 564 spinlock_t interrupt_lock;
565 spinlock_t execmd_lock; 565 spinlock_t execmd_lock;
566 struct device *dev; 566 struct device *dev;
567 void __iomem *virtbase; 567 void __iomem *virtbase;
568 u8 rev:4; 568 u8 rev:4;
569 struct clk *clk; 569 struct clk *clk;
570 phys_addr_t phy_start; 570 phys_addr_t phy_start;
571 resource_size_t phy_size; 571 resource_size_t phy_size;
572 int irq; 572 int irq;
573 int num_memcpy_chans; 573 int num_memcpy_chans;
574 int num_phy_chans; 574 int num_phy_chans;
575 int num_log_chans; 575 int num_log_chans;
576 struct device_dma_parameters dma_parms; 576 struct device_dma_parameters dma_parms;
577 struct dma_device dma_both; 577 struct dma_device dma_both;
578 struct dma_device dma_slave; 578 struct dma_device dma_slave;
579 struct dma_device dma_memcpy; 579 struct dma_device dma_memcpy;
580 struct d40_chan *phy_chans; 580 struct d40_chan *phy_chans;
581 struct d40_chan *log_chans; 581 struct d40_chan *log_chans;
582 struct d40_chan **lookup_log_chans; 582 struct d40_chan **lookup_log_chans;
583 struct d40_chan **lookup_phy_chans; 583 struct d40_chan **lookup_phy_chans;
584 struct stedma40_platform_data *plat_data; 584 struct stedma40_platform_data *plat_data;
585 struct regulator *lcpa_regulator; 585 struct regulator *lcpa_regulator;
586 /* Physical half channels */ 586 /* Physical half channels */
587 struct d40_phy_res *phy_res; 587 struct d40_phy_res *phy_res;
588 struct d40_lcla_pool lcla_pool; 588 struct d40_lcla_pool lcla_pool;
589 void *lcpa_base; 589 void *lcpa_base;
590 dma_addr_t phy_lcpa; 590 dma_addr_t phy_lcpa;
591 resource_size_t lcpa_size; 591 resource_size_t lcpa_size;
592 struct kmem_cache *desc_slab; 592 struct kmem_cache *desc_slab;
593 u32 reg_val_backup[BACKUP_REGS_SZ]; 593 u32 reg_val_backup[BACKUP_REGS_SZ];
594 u32 reg_val_backup_v4[BACKUP_REGS_SZ_MAX]; 594 u32 reg_val_backup_v4[BACKUP_REGS_SZ_MAX];
595 u32 *reg_val_backup_chan; 595 u32 *reg_val_backup_chan;
596 u16 gcc_pwr_off_mask; 596 u16 gcc_pwr_off_mask;
597 bool initialized; 597 bool initialized;
598 struct d40_gen_dmac gen_dmac; 598 struct d40_gen_dmac gen_dmac;
599 }; 599 };
600 600
601 static struct device *chan2dev(struct d40_chan *d40c) 601 static struct device *chan2dev(struct d40_chan *d40c)
602 { 602 {
603 return &d40c->chan.dev->device; 603 return &d40c->chan.dev->device;
604 } 604 }
605 605
606 static bool chan_is_physical(struct d40_chan *chan) 606 static bool chan_is_physical(struct d40_chan *chan)
607 { 607 {
608 return chan->log_num == D40_PHY_CHAN; 608 return chan->log_num == D40_PHY_CHAN;
609 } 609 }
610 610
611 static bool chan_is_logical(struct d40_chan *chan) 611 static bool chan_is_logical(struct d40_chan *chan)
612 { 612 {
613 return !chan_is_physical(chan); 613 return !chan_is_physical(chan);
614 } 614 }
615 615
616 static void __iomem *chan_base(struct d40_chan *chan) 616 static void __iomem *chan_base(struct d40_chan *chan)
617 { 617 {
618 return chan->base->virtbase + D40_DREG_PCBASE + 618 return chan->base->virtbase + D40_DREG_PCBASE +
619 chan->phy_chan->num * D40_DREG_PCDELTA; 619 chan->phy_chan->num * D40_DREG_PCDELTA;
620 } 620 }
621 621
622 #define d40_err(dev, format, arg...) \ 622 #define d40_err(dev, format, arg...) \
623 dev_err(dev, "[%s] " format, __func__, ## arg) 623 dev_err(dev, "[%s] " format, __func__, ## arg)
624 624
625 #define chan_err(d40c, format, arg...) \ 625 #define chan_err(d40c, format, arg...) \
626 d40_err(chan2dev(d40c), format, ## arg) 626 d40_err(chan2dev(d40c), format, ## arg)
627 627
628 static int d40_pool_lli_alloc(struct d40_chan *d40c, struct d40_desc *d40d, 628 static int d40_pool_lli_alloc(struct d40_chan *d40c, struct d40_desc *d40d,
629 int lli_len) 629 int lli_len)
630 { 630 {
631 bool is_log = chan_is_logical(d40c); 631 bool is_log = chan_is_logical(d40c);
632 u32 align; 632 u32 align;
633 void *base; 633 void *base;
634 634
635 if (is_log) 635 if (is_log)
636 align = sizeof(struct d40_log_lli); 636 align = sizeof(struct d40_log_lli);
637 else 637 else
638 align = sizeof(struct d40_phy_lli); 638 align = sizeof(struct d40_phy_lli);
639 639
640 if (lli_len == 1) { 640 if (lli_len == 1) {
641 base = d40d->lli_pool.pre_alloc_lli; 641 base = d40d->lli_pool.pre_alloc_lli;
642 d40d->lli_pool.size = sizeof(d40d->lli_pool.pre_alloc_lli); 642 d40d->lli_pool.size = sizeof(d40d->lli_pool.pre_alloc_lli);
643 d40d->lli_pool.base = NULL; 643 d40d->lli_pool.base = NULL;
644 } else { 644 } else {
645 d40d->lli_pool.size = lli_len * 2 * align; 645 d40d->lli_pool.size = lli_len * 2 * align;
646 646
647 base = kmalloc(d40d->lli_pool.size + align, GFP_NOWAIT); 647 base = kmalloc(d40d->lli_pool.size + align, GFP_NOWAIT);
648 d40d->lli_pool.base = base; 648 d40d->lli_pool.base = base;
649 649
650 if (d40d->lli_pool.base == NULL) 650 if (d40d->lli_pool.base == NULL)
651 return -ENOMEM; 651 return -ENOMEM;
652 } 652 }
653 653
654 if (is_log) { 654 if (is_log) {
655 d40d->lli_log.src = PTR_ALIGN(base, align); 655 d40d->lli_log.src = PTR_ALIGN(base, align);
656 d40d->lli_log.dst = d40d->lli_log.src + lli_len; 656 d40d->lli_log.dst = d40d->lli_log.src + lli_len;
657 657
658 d40d->lli_pool.dma_addr = 0; 658 d40d->lli_pool.dma_addr = 0;
659 } else { 659 } else {
660 d40d->lli_phy.src = PTR_ALIGN(base, align); 660 d40d->lli_phy.src = PTR_ALIGN(base, align);
661 d40d->lli_phy.dst = d40d->lli_phy.src + lli_len; 661 d40d->lli_phy.dst = d40d->lli_phy.src + lli_len;
662 662
663 d40d->lli_pool.dma_addr = dma_map_single(d40c->base->dev, 663 d40d->lli_pool.dma_addr = dma_map_single(d40c->base->dev,
664 d40d->lli_phy.src, 664 d40d->lli_phy.src,
665 d40d->lli_pool.size, 665 d40d->lli_pool.size,
666 DMA_TO_DEVICE); 666 DMA_TO_DEVICE);
667 667
668 if (dma_mapping_error(d40c->base->dev, 668 if (dma_mapping_error(d40c->base->dev,
669 d40d->lli_pool.dma_addr)) { 669 d40d->lli_pool.dma_addr)) {
670 kfree(d40d->lli_pool.base); 670 kfree(d40d->lli_pool.base);
671 d40d->lli_pool.base = NULL; 671 d40d->lli_pool.base = NULL;
672 d40d->lli_pool.dma_addr = 0; 672 d40d->lli_pool.dma_addr = 0;
673 return -ENOMEM; 673 return -ENOMEM;
674 } 674 }
675 } 675 }
676 676
677 return 0; 677 return 0;
678 } 678 }
679 679
680 static void d40_pool_lli_free(struct d40_chan *d40c, struct d40_desc *d40d) 680 static void d40_pool_lli_free(struct d40_chan *d40c, struct d40_desc *d40d)
681 { 681 {
682 if (d40d->lli_pool.dma_addr) 682 if (d40d->lli_pool.dma_addr)
683 dma_unmap_single(d40c->base->dev, d40d->lli_pool.dma_addr, 683 dma_unmap_single(d40c->base->dev, d40d->lli_pool.dma_addr,
684 d40d->lli_pool.size, DMA_TO_DEVICE); 684 d40d->lli_pool.size, DMA_TO_DEVICE);
685 685
686 kfree(d40d->lli_pool.base); 686 kfree(d40d->lli_pool.base);
687 d40d->lli_pool.base = NULL; 687 d40d->lli_pool.base = NULL;
688 d40d->lli_pool.size = 0; 688 d40d->lli_pool.size = 0;
689 d40d->lli_log.src = NULL; 689 d40d->lli_log.src = NULL;
690 d40d->lli_log.dst = NULL; 690 d40d->lli_log.dst = NULL;
691 d40d->lli_phy.src = NULL; 691 d40d->lli_phy.src = NULL;
692 d40d->lli_phy.dst = NULL; 692 d40d->lli_phy.dst = NULL;
693 } 693 }
694 694
695 static int d40_lcla_alloc_one(struct d40_chan *d40c, 695 static int d40_lcla_alloc_one(struct d40_chan *d40c,
696 struct d40_desc *d40d) 696 struct d40_desc *d40d)
697 { 697 {
698 unsigned long flags; 698 unsigned long flags;
699 int i; 699 int i;
700 int ret = -EINVAL; 700 int ret = -EINVAL;
701 701
702 spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags); 702 spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
703 703
704 /* 704 /*
705 * Allocate both src and dst at the same time, therefore the half 705 * Allocate both src and dst at the same time, therefore the half
706 * start on 1 since 0 can't be used since zero is used as end marker. 706 * start on 1 since 0 can't be used since zero is used as end marker.
707 */ 707 */
708 for (i = 1 ; i < D40_LCLA_LINK_PER_EVENT_GRP / 2; i++) { 708 for (i = 1 ; i < D40_LCLA_LINK_PER_EVENT_GRP / 2; i++) {
709 int idx = d40c->phy_chan->num * D40_LCLA_LINK_PER_EVENT_GRP + i; 709 int idx = d40c->phy_chan->num * D40_LCLA_LINK_PER_EVENT_GRP + i;
710 710
711 if (!d40c->base->lcla_pool.alloc_map[idx]) { 711 if (!d40c->base->lcla_pool.alloc_map[idx]) {
712 d40c->base->lcla_pool.alloc_map[idx] = d40d; 712 d40c->base->lcla_pool.alloc_map[idx] = d40d;
713 d40d->lcla_alloc++; 713 d40d->lcla_alloc++;
714 ret = i; 714 ret = i;
715 break; 715 break;
716 } 716 }
717 } 717 }
718 718
719 spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags); 719 spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
720 720
721 return ret; 721 return ret;
722 } 722 }
723 723
724 static int d40_lcla_free_all(struct d40_chan *d40c, 724 static int d40_lcla_free_all(struct d40_chan *d40c,
725 struct d40_desc *d40d) 725 struct d40_desc *d40d)
726 { 726 {
727 unsigned long flags; 727 unsigned long flags;
728 int i; 728 int i;
729 int ret = -EINVAL; 729 int ret = -EINVAL;
730 730
731 if (chan_is_physical(d40c)) 731 if (chan_is_physical(d40c))
732 return 0; 732 return 0;
733 733
734 spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags); 734 spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
735 735
736 for (i = 1 ; i < D40_LCLA_LINK_PER_EVENT_GRP / 2; i++) { 736 for (i = 1 ; i < D40_LCLA_LINK_PER_EVENT_GRP / 2; i++) {
737 int idx = d40c->phy_chan->num * D40_LCLA_LINK_PER_EVENT_GRP + i; 737 int idx = d40c->phy_chan->num * D40_LCLA_LINK_PER_EVENT_GRP + i;
738 738
739 if (d40c->base->lcla_pool.alloc_map[idx] == d40d) { 739 if (d40c->base->lcla_pool.alloc_map[idx] == d40d) {
740 d40c->base->lcla_pool.alloc_map[idx] = NULL; 740 d40c->base->lcla_pool.alloc_map[idx] = NULL;
741 d40d->lcla_alloc--; 741 d40d->lcla_alloc--;
742 if (d40d->lcla_alloc == 0) { 742 if (d40d->lcla_alloc == 0) {
743 ret = 0; 743 ret = 0;
744 break; 744 break;
745 } 745 }
746 } 746 }
747 } 747 }
748 748
749 spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags); 749 spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
750 750
751 return ret; 751 return ret;
752 752
753 } 753 }
754 754
755 static void d40_desc_remove(struct d40_desc *d40d) 755 static void d40_desc_remove(struct d40_desc *d40d)
756 { 756 {
757 list_del(&d40d->node); 757 list_del(&d40d->node);
758 } 758 }
759 759
760 static struct d40_desc *d40_desc_get(struct d40_chan *d40c) 760 static struct d40_desc *d40_desc_get(struct d40_chan *d40c)
761 { 761 {
762 struct d40_desc *desc = NULL; 762 struct d40_desc *desc = NULL;
763 763
764 if (!list_empty(&d40c->client)) { 764 if (!list_empty(&d40c->client)) {
765 struct d40_desc *d; 765 struct d40_desc *d;
766 struct d40_desc *_d; 766 struct d40_desc *_d;
767 767
768 list_for_each_entry_safe(d, _d, &d40c->client, node) { 768 list_for_each_entry_safe(d, _d, &d40c->client, node) {
769 if (async_tx_test_ack(&d->txd)) { 769 if (async_tx_test_ack(&d->txd)) {
770 d40_desc_remove(d); 770 d40_desc_remove(d);
771 desc = d; 771 desc = d;
772 memset(desc, 0, sizeof(*desc)); 772 memset(desc, 0, sizeof(*desc));
773 break; 773 break;
774 } 774 }
775 } 775 }
776 } 776 }
777 777
778 if (!desc) 778 if (!desc)
779 desc = kmem_cache_zalloc(d40c->base->desc_slab, GFP_NOWAIT); 779 desc = kmem_cache_zalloc(d40c->base->desc_slab, GFP_NOWAIT);
780 780
781 if (desc) 781 if (desc)
782 INIT_LIST_HEAD(&desc->node); 782 INIT_LIST_HEAD(&desc->node);
783 783
784 return desc; 784 return desc;
785 } 785 }
786 786
787 static void d40_desc_free(struct d40_chan *d40c, struct d40_desc *d40d) 787 static void d40_desc_free(struct d40_chan *d40c, struct d40_desc *d40d)
788 { 788 {
789 789
790 d40_pool_lli_free(d40c, d40d); 790 d40_pool_lli_free(d40c, d40d);
791 d40_lcla_free_all(d40c, d40d); 791 d40_lcla_free_all(d40c, d40d);
792 kmem_cache_free(d40c->base->desc_slab, d40d); 792 kmem_cache_free(d40c->base->desc_slab, d40d);
793 } 793 }
794 794
795 static void d40_desc_submit(struct d40_chan *d40c, struct d40_desc *desc) 795 static void d40_desc_submit(struct d40_chan *d40c, struct d40_desc *desc)
796 { 796 {
797 list_add_tail(&desc->node, &d40c->active); 797 list_add_tail(&desc->node, &d40c->active);
798 } 798 }
799 799
800 static void d40_phy_lli_load(struct d40_chan *chan, struct d40_desc *desc) 800 static void d40_phy_lli_load(struct d40_chan *chan, struct d40_desc *desc)
801 { 801 {
802 struct d40_phy_lli *lli_dst = desc->lli_phy.dst; 802 struct d40_phy_lli *lli_dst = desc->lli_phy.dst;
803 struct d40_phy_lli *lli_src = desc->lli_phy.src; 803 struct d40_phy_lli *lli_src = desc->lli_phy.src;
804 void __iomem *base = chan_base(chan); 804 void __iomem *base = chan_base(chan);
805 805
806 writel(lli_src->reg_cfg, base + D40_CHAN_REG_SSCFG); 806 writel(lli_src->reg_cfg, base + D40_CHAN_REG_SSCFG);
807 writel(lli_src->reg_elt, base + D40_CHAN_REG_SSELT); 807 writel(lli_src->reg_elt, base + D40_CHAN_REG_SSELT);
808 writel(lli_src->reg_ptr, base + D40_CHAN_REG_SSPTR); 808 writel(lli_src->reg_ptr, base + D40_CHAN_REG_SSPTR);
809 writel(lli_src->reg_lnk, base + D40_CHAN_REG_SSLNK); 809 writel(lli_src->reg_lnk, base + D40_CHAN_REG_SSLNK);
810 810
811 writel(lli_dst->reg_cfg, base + D40_CHAN_REG_SDCFG); 811 writel(lli_dst->reg_cfg, base + D40_CHAN_REG_SDCFG);
812 writel(lli_dst->reg_elt, base + D40_CHAN_REG_SDELT); 812 writel(lli_dst->reg_elt, base + D40_CHAN_REG_SDELT);
813 writel(lli_dst->reg_ptr, base + D40_CHAN_REG_SDPTR); 813 writel(lli_dst->reg_ptr, base + D40_CHAN_REG_SDPTR);
814 writel(lli_dst->reg_lnk, base + D40_CHAN_REG_SDLNK); 814 writel(lli_dst->reg_lnk, base + D40_CHAN_REG_SDLNK);
815 } 815 }
816 816
817 static void d40_desc_done(struct d40_chan *d40c, struct d40_desc *desc) 817 static void d40_desc_done(struct d40_chan *d40c, struct d40_desc *desc)
818 { 818 {
819 list_add_tail(&desc->node, &d40c->done); 819 list_add_tail(&desc->node, &d40c->done);
820 } 820 }
821 821
822 static void d40_log_lli_to_lcxa(struct d40_chan *chan, struct d40_desc *desc) 822 static void d40_log_lli_to_lcxa(struct d40_chan *chan, struct d40_desc *desc)
823 { 823 {
824 struct d40_lcla_pool *pool = &chan->base->lcla_pool; 824 struct d40_lcla_pool *pool = &chan->base->lcla_pool;
825 struct d40_log_lli_bidir *lli = &desc->lli_log; 825 struct d40_log_lli_bidir *lli = &desc->lli_log;
826 int lli_current = desc->lli_current; 826 int lli_current = desc->lli_current;
827 int lli_len = desc->lli_len; 827 int lli_len = desc->lli_len;
828 bool cyclic = desc->cyclic; 828 bool cyclic = desc->cyclic;
829 int curr_lcla = -EINVAL; 829 int curr_lcla = -EINVAL;
830 int first_lcla = 0; 830 int first_lcla = 0;
831 bool use_esram_lcla = chan->base->plat_data->use_esram_lcla; 831 bool use_esram_lcla = chan->base->plat_data->use_esram_lcla;
832 bool linkback; 832 bool linkback;
833 833
834 /* 834 /*
835 * We may have partially running cyclic transfers, in case we did't get 835 * We may have partially running cyclic transfers, in case we did't get
836 * enough LCLA entries. 836 * enough LCLA entries.
837 */ 837 */
838 linkback = cyclic && lli_current == 0; 838 linkback = cyclic && lli_current == 0;
839 839
840 /* 840 /*
841 * For linkback, we need one LCLA even with only one link, because we 841 * For linkback, we need one LCLA even with only one link, because we
842 * can't link back to the one in LCPA space 842 * can't link back to the one in LCPA space
843 */ 843 */
844 if (linkback || (lli_len - lli_current > 1)) { 844 if (linkback || (lli_len - lli_current > 1)) {
845 /* 845 /*
846 * If the channel is expected to use only soft_lli don't 846 * If the channel is expected to use only soft_lli don't
847 * allocate a lcla. This is to avoid a HW issue that exists 847 * allocate a lcla. This is to avoid a HW issue that exists
848 * in some controller during a peripheral to memory transfer 848 * in some controller during a peripheral to memory transfer
849 * that uses linked lists. 849 * that uses linked lists.
850 */ 850 */
851 if (!(chan->phy_chan->use_soft_lli && 851 if (!(chan->phy_chan->use_soft_lli &&
852 chan->dma_cfg.dir == DMA_DEV_TO_MEM)) 852 chan->dma_cfg.dir == DMA_DEV_TO_MEM))
853 curr_lcla = d40_lcla_alloc_one(chan, desc); 853 curr_lcla = d40_lcla_alloc_one(chan, desc);
854 854
855 first_lcla = curr_lcla; 855 first_lcla = curr_lcla;
856 } 856 }
857 857
858 /* 858 /*
859 * For linkback, we normally load the LCPA in the loop since we need to 859 * For linkback, we normally load the LCPA in the loop since we need to
860 * link it to the second LCLA and not the first. However, if we 860 * link it to the second LCLA and not the first. However, if we
861 * couldn't even get a first LCLA, then we have to run in LCPA and 861 * couldn't even get a first LCLA, then we have to run in LCPA and
862 * reload manually. 862 * reload manually.
863 */ 863 */
864 if (!linkback || curr_lcla == -EINVAL) { 864 if (!linkback || curr_lcla == -EINVAL) {
865 unsigned int flags = 0; 865 unsigned int flags = 0;
866 866
867 if (curr_lcla == -EINVAL) 867 if (curr_lcla == -EINVAL)
868 flags |= LLI_TERM_INT; 868 flags |= LLI_TERM_INT;
869 869
870 d40_log_lli_lcpa_write(chan->lcpa, 870 d40_log_lli_lcpa_write(chan->lcpa,
871 &lli->dst[lli_current], 871 &lli->dst[lli_current],
872 &lli->src[lli_current], 872 &lli->src[lli_current],
873 curr_lcla, 873 curr_lcla,
874 flags); 874 flags);
875 lli_current++; 875 lli_current++;
876 } 876 }
877 877
878 if (curr_lcla < 0) 878 if (curr_lcla < 0)
879 goto out; 879 goto out;
880 880
881 for (; lli_current < lli_len; lli_current++) { 881 for (; lli_current < lli_len; lli_current++) {
882 unsigned int lcla_offset = chan->phy_chan->num * 1024 + 882 unsigned int lcla_offset = chan->phy_chan->num * 1024 +
883 8 * curr_lcla * 2; 883 8 * curr_lcla * 2;
884 struct d40_log_lli *lcla = pool->base + lcla_offset; 884 struct d40_log_lli *lcla = pool->base + lcla_offset;
885 unsigned int flags = 0; 885 unsigned int flags = 0;
886 int next_lcla; 886 int next_lcla;
887 887
888 if (lli_current + 1 < lli_len) 888 if (lli_current + 1 < lli_len)
889 next_lcla = d40_lcla_alloc_one(chan, desc); 889 next_lcla = d40_lcla_alloc_one(chan, desc);
890 else 890 else
891 next_lcla = linkback ? first_lcla : -EINVAL; 891 next_lcla = linkback ? first_lcla : -EINVAL;
892 892
893 if (cyclic || next_lcla == -EINVAL) 893 if (cyclic || next_lcla == -EINVAL)
894 flags |= LLI_TERM_INT; 894 flags |= LLI_TERM_INT;
895 895
896 if (linkback && curr_lcla == first_lcla) { 896 if (linkback && curr_lcla == first_lcla) {
897 /* First link goes in both LCPA and LCLA */ 897 /* First link goes in both LCPA and LCLA */
898 d40_log_lli_lcpa_write(chan->lcpa, 898 d40_log_lli_lcpa_write(chan->lcpa,
899 &lli->dst[lli_current], 899 &lli->dst[lli_current],
900 &lli->src[lli_current], 900 &lli->src[lli_current],
901 next_lcla, flags); 901 next_lcla, flags);
902 } 902 }
903 903
904 /* 904 /*
905 * One unused LCLA in the cyclic case if the very first 905 * One unused LCLA in the cyclic case if the very first
906 * next_lcla fails... 906 * next_lcla fails...
907 */ 907 */
908 d40_log_lli_lcla_write(lcla, 908 d40_log_lli_lcla_write(lcla,
909 &lli->dst[lli_current], 909 &lli->dst[lli_current],
910 &lli->src[lli_current], 910 &lli->src[lli_current],
911 next_lcla, flags); 911 next_lcla, flags);
912 912
913 /* 913 /*
914 * Cache maintenance is not needed if lcla is 914 * Cache maintenance is not needed if lcla is
915 * mapped in esram 915 * mapped in esram
916 */ 916 */
917 if (!use_esram_lcla) { 917 if (!use_esram_lcla) {
918 dma_sync_single_range_for_device(chan->base->dev, 918 dma_sync_single_range_for_device(chan->base->dev,
919 pool->dma_addr, lcla_offset, 919 pool->dma_addr, lcla_offset,
920 2 * sizeof(struct d40_log_lli), 920 2 * sizeof(struct d40_log_lli),
921 DMA_TO_DEVICE); 921 DMA_TO_DEVICE);
922 } 922 }
923 curr_lcla = next_lcla; 923 curr_lcla = next_lcla;
924 924
925 if (curr_lcla == -EINVAL || curr_lcla == first_lcla) { 925 if (curr_lcla == -EINVAL || curr_lcla == first_lcla) {
926 lli_current++; 926 lli_current++;
927 break; 927 break;
928 } 928 }
929 } 929 }
930 930
931 out: 931 out:
932 desc->lli_current = lli_current; 932 desc->lli_current = lli_current;
933 } 933 }
934 934
935 static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d) 935 static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d)
936 { 936 {
937 if (chan_is_physical(d40c)) { 937 if (chan_is_physical(d40c)) {
938 d40_phy_lli_load(d40c, d40d); 938 d40_phy_lli_load(d40c, d40d);
939 d40d->lli_current = d40d->lli_len; 939 d40d->lli_current = d40d->lli_len;
940 } else 940 } else
941 d40_log_lli_to_lcxa(d40c, d40d); 941 d40_log_lli_to_lcxa(d40c, d40d);
942 } 942 }
943 943
944 static struct d40_desc *d40_first_active_get(struct d40_chan *d40c) 944 static struct d40_desc *d40_first_active_get(struct d40_chan *d40c)
945 { 945 {
946 struct d40_desc *d; 946 struct d40_desc *d;
947 947
948 if (list_empty(&d40c->active)) 948 if (list_empty(&d40c->active))
949 return NULL; 949 return NULL;
950 950
951 d = list_first_entry(&d40c->active, 951 d = list_first_entry(&d40c->active,
952 struct d40_desc, 952 struct d40_desc,
953 node); 953 node);
954 return d; 954 return d;
955 } 955 }
956 956
957 /* remove desc from current queue and add it to the pending_queue */ 957 /* remove desc from current queue and add it to the pending_queue */
958 static void d40_desc_queue(struct d40_chan *d40c, struct d40_desc *desc) 958 static void d40_desc_queue(struct d40_chan *d40c, struct d40_desc *desc)
959 { 959 {
960 d40_desc_remove(desc); 960 d40_desc_remove(desc);
961 desc->is_in_client_list = false; 961 desc->is_in_client_list = false;
962 list_add_tail(&desc->node, &d40c->pending_queue); 962 list_add_tail(&desc->node, &d40c->pending_queue);
963 } 963 }
964 964
965 static struct d40_desc *d40_first_pending(struct d40_chan *d40c) 965 static struct d40_desc *d40_first_pending(struct d40_chan *d40c)
966 { 966 {
967 struct d40_desc *d; 967 struct d40_desc *d;
968 968
969 if (list_empty(&d40c->pending_queue)) 969 if (list_empty(&d40c->pending_queue))
970 return NULL; 970 return NULL;
971 971
972 d = list_first_entry(&d40c->pending_queue, 972 d = list_first_entry(&d40c->pending_queue,
973 struct d40_desc, 973 struct d40_desc,
974 node); 974 node);
975 return d; 975 return d;
976 } 976 }
977 977
978 static struct d40_desc *d40_first_queued(struct d40_chan *d40c) 978 static struct d40_desc *d40_first_queued(struct d40_chan *d40c)
979 { 979 {
980 struct d40_desc *d; 980 struct d40_desc *d;
981 981
982 if (list_empty(&d40c->queue)) 982 if (list_empty(&d40c->queue))
983 return NULL; 983 return NULL;
984 984
985 d = list_first_entry(&d40c->queue, 985 d = list_first_entry(&d40c->queue,
986 struct d40_desc, 986 struct d40_desc,
987 node); 987 node);
988 return d; 988 return d;
989 } 989 }
990 990
991 static struct d40_desc *d40_first_done(struct d40_chan *d40c) 991 static struct d40_desc *d40_first_done(struct d40_chan *d40c)
992 { 992 {
993 if (list_empty(&d40c->done)) 993 if (list_empty(&d40c->done))
994 return NULL; 994 return NULL;
995 995
996 return list_first_entry(&d40c->done, struct d40_desc, node); 996 return list_first_entry(&d40c->done, struct d40_desc, node);
997 } 997 }
998 998
999 static int d40_psize_2_burst_size(bool is_log, int psize) 999 static int d40_psize_2_burst_size(bool is_log, int psize)
1000 { 1000 {
1001 if (is_log) { 1001 if (is_log) {
1002 if (psize == STEDMA40_PSIZE_LOG_1) 1002 if (psize == STEDMA40_PSIZE_LOG_1)
1003 return 1; 1003 return 1;
1004 } else { 1004 } else {
1005 if (psize == STEDMA40_PSIZE_PHY_1) 1005 if (psize == STEDMA40_PSIZE_PHY_1)
1006 return 1; 1006 return 1;
1007 } 1007 }
1008 1008
1009 return 2 << psize; 1009 return 2 << psize;
1010 } 1010 }
1011 1011
1012 /* 1012 /*
1013 * The dma only supports transmitting packages up to 1013 * The dma only supports transmitting packages up to
1014 * STEDMA40_MAX_SEG_SIZE * data_width, where data_width is stored in Bytes. 1014 * STEDMA40_MAX_SEG_SIZE * data_width, where data_width is stored in Bytes.
1015 * 1015 *
1016 * Calculate the total number of dma elements required to send the entire sg list. 1016 * Calculate the total number of dma elements required to send the entire sg list.
1017 */ 1017 */
1018 static int d40_size_2_dmalen(int size, u32 data_width1, u32 data_width2) 1018 static int d40_size_2_dmalen(int size, u32 data_width1, u32 data_width2)
1019 { 1019 {
1020 int dmalen; 1020 int dmalen;
1021 u32 max_w = max(data_width1, data_width2); 1021 u32 max_w = max(data_width1, data_width2);
1022 u32 min_w = min(data_width1, data_width2); 1022 u32 min_w = min(data_width1, data_width2);
1023 u32 seg_max = ALIGN(STEDMA40_MAX_SEG_SIZE * min_w, max_w); 1023 u32 seg_max = ALIGN(STEDMA40_MAX_SEG_SIZE * min_w, max_w);
1024 1024
1025 if (seg_max > STEDMA40_MAX_SEG_SIZE) 1025 if (seg_max > STEDMA40_MAX_SEG_SIZE)
1026 seg_max -= max_w; 1026 seg_max -= max_w;
1027 1027
1028 if (!IS_ALIGNED(size, max_w)) 1028 if (!IS_ALIGNED(size, max_w))
1029 return -EINVAL; 1029 return -EINVAL;
1030 1030
1031 if (size <= seg_max) 1031 if (size <= seg_max)
1032 dmalen = 1; 1032 dmalen = 1;
1033 else { 1033 else {
1034 dmalen = size / seg_max; 1034 dmalen = size / seg_max;
1035 if (dmalen * seg_max < size) 1035 if (dmalen * seg_max < size)
1036 dmalen++; 1036 dmalen++;
1037 } 1037 }
1038 return dmalen; 1038 return dmalen;
1039 } 1039 }
1040 1040
1041 static int d40_sg_2_dmalen(struct scatterlist *sgl, int sg_len, 1041 static int d40_sg_2_dmalen(struct scatterlist *sgl, int sg_len,
1042 u32 data_width1, u32 data_width2) 1042 u32 data_width1, u32 data_width2)
1043 { 1043 {
1044 struct scatterlist *sg; 1044 struct scatterlist *sg;
1045 int i; 1045 int i;
1046 int len = 0; 1046 int len = 0;
1047 int ret; 1047 int ret;
1048 1048
1049 for_each_sg(sgl, sg, sg_len, i) { 1049 for_each_sg(sgl, sg, sg_len, i) {
1050 ret = d40_size_2_dmalen(sg_dma_len(sg), 1050 ret = d40_size_2_dmalen(sg_dma_len(sg),
1051 data_width1, data_width2); 1051 data_width1, data_width2);
1052 if (ret < 0) 1052 if (ret < 0)
1053 return ret; 1053 return ret;
1054 len += ret; 1054 len += ret;
1055 } 1055 }
1056 return len; 1056 return len;
1057 } 1057 }
1058 1058
1059 1059
1060 #ifdef CONFIG_PM 1060 #ifdef CONFIG_PM
1061 static void dma40_backup(void __iomem *baseaddr, u32 *backup, 1061 static void dma40_backup(void __iomem *baseaddr, u32 *backup,
1062 u32 *regaddr, int num, bool save) 1062 u32 *regaddr, int num, bool save)
1063 { 1063 {
1064 int i; 1064 int i;
1065 1065
1066 for (i = 0; i < num; i++) { 1066 for (i = 0; i < num; i++) {
1067 void __iomem *addr = baseaddr + regaddr[i]; 1067 void __iomem *addr = baseaddr + regaddr[i];
1068 1068
1069 if (save) 1069 if (save)
1070 backup[i] = readl_relaxed(addr); 1070 backup[i] = readl_relaxed(addr);
1071 else 1071 else
1072 writel_relaxed(backup[i], addr); 1072 writel_relaxed(backup[i], addr);
1073 } 1073 }
1074 } 1074 }
1075 1075
1076 static void d40_save_restore_registers(struct d40_base *base, bool save) 1076 static void d40_save_restore_registers(struct d40_base *base, bool save)
1077 { 1077 {
1078 int i; 1078 int i;
1079 1079
1080 /* Save/Restore channel specific registers */ 1080 /* Save/Restore channel specific registers */
1081 for (i = 0; i < base->num_phy_chans; i++) { 1081 for (i = 0; i < base->num_phy_chans; i++) {
1082 void __iomem *addr; 1082 void __iomem *addr;
1083 int idx; 1083 int idx;
1084 1084
1085 if (base->phy_res[i].reserved) 1085 if (base->phy_res[i].reserved)
1086 continue; 1086 continue;
1087 1087
1088 addr = base->virtbase + D40_DREG_PCBASE + i * D40_DREG_PCDELTA; 1088 addr = base->virtbase + D40_DREG_PCBASE + i * D40_DREG_PCDELTA;
1089 idx = i * ARRAY_SIZE(d40_backup_regs_chan); 1089 idx = i * ARRAY_SIZE(d40_backup_regs_chan);
1090 1090
1091 dma40_backup(addr, &base->reg_val_backup_chan[idx], 1091 dma40_backup(addr, &base->reg_val_backup_chan[idx],
1092 d40_backup_regs_chan, 1092 d40_backup_regs_chan,
1093 ARRAY_SIZE(d40_backup_regs_chan), 1093 ARRAY_SIZE(d40_backup_regs_chan),
1094 save); 1094 save);
1095 } 1095 }
1096 1096
1097 /* Save/Restore global registers */ 1097 /* Save/Restore global registers */
1098 dma40_backup(base->virtbase, base->reg_val_backup, 1098 dma40_backup(base->virtbase, base->reg_val_backup,
1099 d40_backup_regs, ARRAY_SIZE(d40_backup_regs), 1099 d40_backup_regs, ARRAY_SIZE(d40_backup_regs),
1100 save); 1100 save);
1101 1101
1102 /* Save/Restore registers only existing on dma40 v3 and later */ 1102 /* Save/Restore registers only existing on dma40 v3 and later */
1103 if (base->gen_dmac.backup) 1103 if (base->gen_dmac.backup)
1104 dma40_backup(base->virtbase, base->reg_val_backup_v4, 1104 dma40_backup(base->virtbase, base->reg_val_backup_v4,
1105 base->gen_dmac.backup, 1105 base->gen_dmac.backup,
1106 base->gen_dmac.backup_size, 1106 base->gen_dmac.backup_size,
1107 save); 1107 save);
1108 } 1108 }
1109 #else 1109 #else
1110 static void d40_save_restore_registers(struct d40_base *base, bool save) 1110 static void d40_save_restore_registers(struct d40_base *base, bool save)
1111 { 1111 {
1112 } 1112 }
1113 #endif 1113 #endif
1114 1114
1115 static int __d40_execute_command_phy(struct d40_chan *d40c, 1115 static int __d40_execute_command_phy(struct d40_chan *d40c,
1116 enum d40_command command) 1116 enum d40_command command)
1117 { 1117 {
1118 u32 status; 1118 u32 status;
1119 int i; 1119 int i;
1120 void __iomem *active_reg; 1120 void __iomem *active_reg;
1121 int ret = 0; 1121 int ret = 0;
1122 unsigned long flags; 1122 unsigned long flags;
1123 u32 wmask; 1123 u32 wmask;
1124 1124
1125 if (command == D40_DMA_STOP) { 1125 if (command == D40_DMA_STOP) {
1126 ret = __d40_execute_command_phy(d40c, D40_DMA_SUSPEND_REQ); 1126 ret = __d40_execute_command_phy(d40c, D40_DMA_SUSPEND_REQ);
1127 if (ret) 1127 if (ret)
1128 return ret; 1128 return ret;
1129 } 1129 }
1130 1130
1131 spin_lock_irqsave(&d40c->base->execmd_lock, flags); 1131 spin_lock_irqsave(&d40c->base->execmd_lock, flags);
1132 1132
1133 if (d40c->phy_chan->num % 2 == 0) 1133 if (d40c->phy_chan->num % 2 == 0)
1134 active_reg = d40c->base->virtbase + D40_DREG_ACTIVE; 1134 active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
1135 else 1135 else
1136 active_reg = d40c->base->virtbase + D40_DREG_ACTIVO; 1136 active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
1137 1137
1138 if (command == D40_DMA_SUSPEND_REQ) { 1138 if (command == D40_DMA_SUSPEND_REQ) {
1139 status = (readl(active_reg) & 1139 status = (readl(active_reg) &
1140 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >> 1140 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
1141 D40_CHAN_POS(d40c->phy_chan->num); 1141 D40_CHAN_POS(d40c->phy_chan->num);
1142 1142
1143 if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP) 1143 if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP)
1144 goto done; 1144 goto done;
1145 } 1145 }
1146 1146
1147 wmask = 0xffffffff & ~(D40_CHAN_POS_MASK(d40c->phy_chan->num)); 1147 wmask = 0xffffffff & ~(D40_CHAN_POS_MASK(d40c->phy_chan->num));
1148 writel(wmask | (command << D40_CHAN_POS(d40c->phy_chan->num)), 1148 writel(wmask | (command << D40_CHAN_POS(d40c->phy_chan->num)),
1149 active_reg); 1149 active_reg);
1150 1150
1151 if (command == D40_DMA_SUSPEND_REQ) { 1151 if (command == D40_DMA_SUSPEND_REQ) {
1152 1152
1153 for (i = 0 ; i < D40_SUSPEND_MAX_IT; i++) { 1153 for (i = 0 ; i < D40_SUSPEND_MAX_IT; i++) {
1154 status = (readl(active_reg) & 1154 status = (readl(active_reg) &
1155 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >> 1155 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
1156 D40_CHAN_POS(d40c->phy_chan->num); 1156 D40_CHAN_POS(d40c->phy_chan->num);
1157 1157
1158 cpu_relax(); 1158 cpu_relax();
1159 /* 1159 /*
1160 * Reduce the number of bus accesses while 1160 * Reduce the number of bus accesses while
1161 * waiting for the DMA to suspend. 1161 * waiting for the DMA to suspend.
1162 */ 1162 */
1163 udelay(3); 1163 udelay(3);
1164 1164
1165 if (status == D40_DMA_STOP || 1165 if (status == D40_DMA_STOP ||
1166 status == D40_DMA_SUSPENDED) 1166 status == D40_DMA_SUSPENDED)
1167 break; 1167 break;
1168 } 1168 }
1169 1169
1170 if (i == D40_SUSPEND_MAX_IT) { 1170 if (i == D40_SUSPEND_MAX_IT) {
1171 chan_err(d40c, 1171 chan_err(d40c,
1172 "unable to suspend the chl %d (log: %d) status %x\n", 1172 "unable to suspend the chl %d (log: %d) status %x\n",
1173 d40c->phy_chan->num, d40c->log_num, 1173 d40c->phy_chan->num, d40c->log_num,
1174 status); 1174 status);
1175 dump_stack(); 1175 dump_stack();
1176 ret = -EBUSY; 1176 ret = -EBUSY;
1177 } 1177 }
1178 1178
1179 } 1179 }
1180 done: 1180 done:
1181 spin_unlock_irqrestore(&d40c->base->execmd_lock, flags); 1181 spin_unlock_irqrestore(&d40c->base->execmd_lock, flags);
1182 return ret; 1182 return ret;
1183 } 1183 }
1184 1184
1185 static void d40_term_all(struct d40_chan *d40c) 1185 static void d40_term_all(struct d40_chan *d40c)
1186 { 1186 {
1187 struct d40_desc *d40d; 1187 struct d40_desc *d40d;
1188 struct d40_desc *_d; 1188 struct d40_desc *_d;
1189 1189
1190 /* Release completed descriptors */ 1190 /* Release completed descriptors */
1191 while ((d40d = d40_first_done(d40c))) { 1191 while ((d40d = d40_first_done(d40c))) {
1192 d40_desc_remove(d40d); 1192 d40_desc_remove(d40d);
1193 d40_desc_free(d40c, d40d); 1193 d40_desc_free(d40c, d40d);
1194 } 1194 }
1195 1195
1196 /* Release active descriptors */ 1196 /* Release active descriptors */
1197 while ((d40d = d40_first_active_get(d40c))) { 1197 while ((d40d = d40_first_active_get(d40c))) {
1198 d40_desc_remove(d40d); 1198 d40_desc_remove(d40d);
1199 d40_desc_free(d40c, d40d); 1199 d40_desc_free(d40c, d40d);
1200 } 1200 }
1201 1201
1202 /* Release queued descriptors waiting for transfer */ 1202 /* Release queued descriptors waiting for transfer */
1203 while ((d40d = d40_first_queued(d40c))) { 1203 while ((d40d = d40_first_queued(d40c))) {
1204 d40_desc_remove(d40d); 1204 d40_desc_remove(d40d);
1205 d40_desc_free(d40c, d40d); 1205 d40_desc_free(d40c, d40d);
1206 } 1206 }
1207 1207
1208 /* Release pending descriptors */ 1208 /* Release pending descriptors */
1209 while ((d40d = d40_first_pending(d40c))) { 1209 while ((d40d = d40_first_pending(d40c))) {
1210 d40_desc_remove(d40d); 1210 d40_desc_remove(d40d);
1211 d40_desc_free(d40c, d40d); 1211 d40_desc_free(d40c, d40d);
1212 } 1212 }
1213 1213
1214 /* Release client owned descriptors */ 1214 /* Release client owned descriptors */
1215 if (!list_empty(&d40c->client)) 1215 if (!list_empty(&d40c->client))
1216 list_for_each_entry_safe(d40d, _d, &d40c->client, node) { 1216 list_for_each_entry_safe(d40d, _d, &d40c->client, node) {
1217 d40_desc_remove(d40d); 1217 d40_desc_remove(d40d);
1218 d40_desc_free(d40c, d40d); 1218 d40_desc_free(d40c, d40d);
1219 } 1219 }
1220 1220
1221 /* Release descriptors in prepare queue */ 1221 /* Release descriptors in prepare queue */
1222 if (!list_empty(&d40c->prepare_queue)) 1222 if (!list_empty(&d40c->prepare_queue))
1223 list_for_each_entry_safe(d40d, _d, 1223 list_for_each_entry_safe(d40d, _d,
1224 &d40c->prepare_queue, node) { 1224 &d40c->prepare_queue, node) {
1225 d40_desc_remove(d40d); 1225 d40_desc_remove(d40d);
1226 d40_desc_free(d40c, d40d); 1226 d40_desc_free(d40c, d40d);
1227 } 1227 }
1228 1228
1229 d40c->pending_tx = 0; 1229 d40c->pending_tx = 0;
1230 } 1230 }
1231 1231
1232 static void __d40_config_set_event(struct d40_chan *d40c, 1232 static void __d40_config_set_event(struct d40_chan *d40c,
1233 enum d40_events event_type, u32 event, 1233 enum d40_events event_type, u32 event,
1234 int reg) 1234 int reg)
1235 { 1235 {
1236 void __iomem *addr = chan_base(d40c) + reg; 1236 void __iomem *addr = chan_base(d40c) + reg;
1237 int tries; 1237 int tries;
1238 u32 status; 1238 u32 status;
1239 1239
1240 switch (event_type) { 1240 switch (event_type) {
1241 1241
1242 case D40_DEACTIVATE_EVENTLINE: 1242 case D40_DEACTIVATE_EVENTLINE:
1243 1243
1244 writel((D40_DEACTIVATE_EVENTLINE << D40_EVENTLINE_POS(event)) 1244 writel((D40_DEACTIVATE_EVENTLINE << D40_EVENTLINE_POS(event))
1245 | ~D40_EVENTLINE_MASK(event), addr); 1245 | ~D40_EVENTLINE_MASK(event), addr);
1246 break; 1246 break;
1247 1247
1248 case D40_SUSPEND_REQ_EVENTLINE: 1248 case D40_SUSPEND_REQ_EVENTLINE:
1249 status = (readl(addr) & D40_EVENTLINE_MASK(event)) >> 1249 status = (readl(addr) & D40_EVENTLINE_MASK(event)) >>
1250 D40_EVENTLINE_POS(event); 1250 D40_EVENTLINE_POS(event);
1251 1251
1252 if (status == D40_DEACTIVATE_EVENTLINE || 1252 if (status == D40_DEACTIVATE_EVENTLINE ||
1253 status == D40_SUSPEND_REQ_EVENTLINE) 1253 status == D40_SUSPEND_REQ_EVENTLINE)
1254 break; 1254 break;
1255 1255
1256 writel((D40_SUSPEND_REQ_EVENTLINE << D40_EVENTLINE_POS(event)) 1256 writel((D40_SUSPEND_REQ_EVENTLINE << D40_EVENTLINE_POS(event))
1257 | ~D40_EVENTLINE_MASK(event), addr); 1257 | ~D40_EVENTLINE_MASK(event), addr);
1258 1258
1259 for (tries = 0 ; tries < D40_SUSPEND_MAX_IT; tries++) { 1259 for (tries = 0 ; tries < D40_SUSPEND_MAX_IT; tries++) {
1260 1260
1261 status = (readl(addr) & D40_EVENTLINE_MASK(event)) >> 1261 status = (readl(addr) & D40_EVENTLINE_MASK(event)) >>
1262 D40_EVENTLINE_POS(event); 1262 D40_EVENTLINE_POS(event);
1263 1263
1264 cpu_relax(); 1264 cpu_relax();
1265 /* 1265 /*
1266 * Reduce the number of bus accesses while 1266 * Reduce the number of bus accesses while
1267 * waiting for the DMA to suspend. 1267 * waiting for the DMA to suspend.
1268 */ 1268 */
1269 udelay(3); 1269 udelay(3);
1270 1270
1271 if (status == D40_DEACTIVATE_EVENTLINE) 1271 if (status == D40_DEACTIVATE_EVENTLINE)
1272 break; 1272 break;
1273 } 1273 }
1274 1274
1275 if (tries == D40_SUSPEND_MAX_IT) { 1275 if (tries == D40_SUSPEND_MAX_IT) {
1276 chan_err(d40c, 1276 chan_err(d40c,
1277 "unable to stop the event_line chl %d (log: %d)" 1277 "unable to stop the event_line chl %d (log: %d)"
1278 "status %x\n", d40c->phy_chan->num, 1278 "status %x\n", d40c->phy_chan->num,
1279 d40c->log_num, status); 1279 d40c->log_num, status);
1280 } 1280 }
1281 break; 1281 break;
1282 1282
1283 case D40_ACTIVATE_EVENTLINE: 1283 case D40_ACTIVATE_EVENTLINE:
1284 /* 1284 /*
1285 * The hardware sometimes doesn't register the enable when src and dst 1285 * The hardware sometimes doesn't register the enable when src and dst
1286 * event lines are active on the same logical channel. Retry to ensure 1286 * event lines are active on the same logical channel. Retry to ensure
1287 * it does. Usually only one retry is sufficient. 1287 * it does. Usually only one retry is sufficient.
1288 */ 1288 */
1289 tries = 100; 1289 tries = 100;
1290 while (--tries) { 1290 while (--tries) {
1291 writel((D40_ACTIVATE_EVENTLINE << 1291 writel((D40_ACTIVATE_EVENTLINE <<
1292 D40_EVENTLINE_POS(event)) | 1292 D40_EVENTLINE_POS(event)) |
1293 ~D40_EVENTLINE_MASK(event), addr); 1293 ~D40_EVENTLINE_MASK(event), addr);
1294 1294
1295 if (readl(addr) & D40_EVENTLINE_MASK(event)) 1295 if (readl(addr) & D40_EVENTLINE_MASK(event))
1296 break; 1296 break;
1297 } 1297 }
1298 1298
1299 if (tries != 99) 1299 if (tries != 99)
1300 dev_dbg(chan2dev(d40c), 1300 dev_dbg(chan2dev(d40c),
1301 "[%s] workaround enable S%cLNK (%d tries)\n", 1301 "[%s] workaround enable S%cLNK (%d tries)\n",
1302 __func__, reg == D40_CHAN_REG_SSLNK ? 'S' : 'D', 1302 __func__, reg == D40_CHAN_REG_SSLNK ? 'S' : 'D',
1303 100 - tries); 1303 100 - tries);
1304 1304
1305 WARN_ON(!tries); 1305 WARN_ON(!tries);
1306 break; 1306 break;
1307 1307
1308 case D40_ROUND_EVENTLINE: 1308 case D40_ROUND_EVENTLINE:
1309 BUG(); 1309 BUG();
1310 break; 1310 break;
1311 1311
1312 } 1312 }
1313 } 1313 }
1314 1314
1315 static void d40_config_set_event(struct d40_chan *d40c, 1315 static void d40_config_set_event(struct d40_chan *d40c,
1316 enum d40_events event_type) 1316 enum d40_events event_type)
1317 { 1317 {
1318 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dev_type); 1318 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dev_type);
1319 1319
1320 /* Enable event line connected to device (or memcpy) */ 1320 /* Enable event line connected to device (or memcpy) */
1321 if ((d40c->dma_cfg.dir == DMA_DEV_TO_MEM) || 1321 if ((d40c->dma_cfg.dir == DMA_DEV_TO_MEM) ||
1322 (d40c->dma_cfg.dir == DMA_DEV_TO_DEV)) 1322 (d40c->dma_cfg.dir == DMA_DEV_TO_DEV))
1323 __d40_config_set_event(d40c, event_type, event, 1323 __d40_config_set_event(d40c, event_type, event,
1324 D40_CHAN_REG_SSLNK); 1324 D40_CHAN_REG_SSLNK);
1325 1325
1326 if (d40c->dma_cfg.dir != DMA_DEV_TO_MEM) 1326 if (d40c->dma_cfg.dir != DMA_DEV_TO_MEM)
1327 __d40_config_set_event(d40c, event_type, event, 1327 __d40_config_set_event(d40c, event_type, event,
1328 D40_CHAN_REG_SDLNK); 1328 D40_CHAN_REG_SDLNK);
1329 } 1329 }
1330 1330
1331 static u32 d40_chan_has_events(struct d40_chan *d40c) 1331 static u32 d40_chan_has_events(struct d40_chan *d40c)
1332 { 1332 {
1333 void __iomem *chanbase = chan_base(d40c); 1333 void __iomem *chanbase = chan_base(d40c);
1334 u32 val; 1334 u32 val;
1335 1335
1336 val = readl(chanbase + D40_CHAN_REG_SSLNK); 1336 val = readl(chanbase + D40_CHAN_REG_SSLNK);
1337 val |= readl(chanbase + D40_CHAN_REG_SDLNK); 1337 val |= readl(chanbase + D40_CHAN_REG_SDLNK);
1338 1338
1339 return val; 1339 return val;
1340 } 1340 }
1341 1341
1342 static int 1342 static int
1343 __d40_execute_command_log(struct d40_chan *d40c, enum d40_command command) 1343 __d40_execute_command_log(struct d40_chan *d40c, enum d40_command command)
1344 { 1344 {
1345 unsigned long flags; 1345 unsigned long flags;
1346 int ret = 0; 1346 int ret = 0;
1347 u32 active_status; 1347 u32 active_status;
1348 void __iomem *active_reg; 1348 void __iomem *active_reg;
1349 1349
1350 if (d40c->phy_chan->num % 2 == 0) 1350 if (d40c->phy_chan->num % 2 == 0)
1351 active_reg = d40c->base->virtbase + D40_DREG_ACTIVE; 1351 active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
1352 else 1352 else
1353 active_reg = d40c->base->virtbase + D40_DREG_ACTIVO; 1353 active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
1354 1354
1355 1355
1356 spin_lock_irqsave(&d40c->phy_chan->lock, flags); 1356 spin_lock_irqsave(&d40c->phy_chan->lock, flags);
1357 1357
1358 switch (command) { 1358 switch (command) {
1359 case D40_DMA_STOP: 1359 case D40_DMA_STOP:
1360 case D40_DMA_SUSPEND_REQ: 1360 case D40_DMA_SUSPEND_REQ:
1361 1361
1362 active_status = (readl(active_reg) & 1362 active_status = (readl(active_reg) &
1363 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >> 1363 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
1364 D40_CHAN_POS(d40c->phy_chan->num); 1364 D40_CHAN_POS(d40c->phy_chan->num);
1365 1365
1366 if (active_status == D40_DMA_RUN) 1366 if (active_status == D40_DMA_RUN)
1367 d40_config_set_event(d40c, D40_SUSPEND_REQ_EVENTLINE); 1367 d40_config_set_event(d40c, D40_SUSPEND_REQ_EVENTLINE);
1368 else 1368 else
1369 d40_config_set_event(d40c, D40_DEACTIVATE_EVENTLINE); 1369 d40_config_set_event(d40c, D40_DEACTIVATE_EVENTLINE);
1370 1370
1371 if (!d40_chan_has_events(d40c) && (command == D40_DMA_STOP)) 1371 if (!d40_chan_has_events(d40c) && (command == D40_DMA_STOP))
1372 ret = __d40_execute_command_phy(d40c, command); 1372 ret = __d40_execute_command_phy(d40c, command);
1373 1373
1374 break; 1374 break;
1375 1375
1376 case D40_DMA_RUN: 1376 case D40_DMA_RUN:
1377 1377
1378 d40_config_set_event(d40c, D40_ACTIVATE_EVENTLINE); 1378 d40_config_set_event(d40c, D40_ACTIVATE_EVENTLINE);
1379 ret = __d40_execute_command_phy(d40c, command); 1379 ret = __d40_execute_command_phy(d40c, command);
1380 break; 1380 break;
1381 1381
1382 case D40_DMA_SUSPENDED: 1382 case D40_DMA_SUSPENDED:
1383 BUG(); 1383 BUG();
1384 break; 1384 break;
1385 } 1385 }
1386 1386
1387 spin_unlock_irqrestore(&d40c->phy_chan->lock, flags); 1387 spin_unlock_irqrestore(&d40c->phy_chan->lock, flags);
1388 return ret; 1388 return ret;
1389 } 1389 }
1390 1390
1391 static int d40_channel_execute_command(struct d40_chan *d40c, 1391 static int d40_channel_execute_command(struct d40_chan *d40c,
1392 enum d40_command command) 1392 enum d40_command command)
1393 { 1393 {
1394 if (chan_is_logical(d40c)) 1394 if (chan_is_logical(d40c))
1395 return __d40_execute_command_log(d40c, command); 1395 return __d40_execute_command_log(d40c, command);
1396 else 1396 else
1397 return __d40_execute_command_phy(d40c, command); 1397 return __d40_execute_command_phy(d40c, command);
1398 } 1398 }
1399 1399
1400 static u32 d40_get_prmo(struct d40_chan *d40c) 1400 static u32 d40_get_prmo(struct d40_chan *d40c)
1401 { 1401 {
1402 static const unsigned int phy_map[] = { 1402 static const unsigned int phy_map[] = {
1403 [STEDMA40_PCHAN_BASIC_MODE] 1403 [STEDMA40_PCHAN_BASIC_MODE]
1404 = D40_DREG_PRMO_PCHAN_BASIC, 1404 = D40_DREG_PRMO_PCHAN_BASIC,
1405 [STEDMA40_PCHAN_MODULO_MODE] 1405 [STEDMA40_PCHAN_MODULO_MODE]
1406 = D40_DREG_PRMO_PCHAN_MODULO, 1406 = D40_DREG_PRMO_PCHAN_MODULO,
1407 [STEDMA40_PCHAN_DOUBLE_DST_MODE] 1407 [STEDMA40_PCHAN_DOUBLE_DST_MODE]
1408 = D40_DREG_PRMO_PCHAN_DOUBLE_DST, 1408 = D40_DREG_PRMO_PCHAN_DOUBLE_DST,
1409 }; 1409 };
1410 static const unsigned int log_map[] = { 1410 static const unsigned int log_map[] = {
1411 [STEDMA40_LCHAN_SRC_PHY_DST_LOG] 1411 [STEDMA40_LCHAN_SRC_PHY_DST_LOG]
1412 = D40_DREG_PRMO_LCHAN_SRC_PHY_DST_LOG, 1412 = D40_DREG_PRMO_LCHAN_SRC_PHY_DST_LOG,
1413 [STEDMA40_LCHAN_SRC_LOG_DST_PHY] 1413 [STEDMA40_LCHAN_SRC_LOG_DST_PHY]
1414 = D40_DREG_PRMO_LCHAN_SRC_LOG_DST_PHY, 1414 = D40_DREG_PRMO_LCHAN_SRC_LOG_DST_PHY,
1415 [STEDMA40_LCHAN_SRC_LOG_DST_LOG] 1415 [STEDMA40_LCHAN_SRC_LOG_DST_LOG]
1416 = D40_DREG_PRMO_LCHAN_SRC_LOG_DST_LOG, 1416 = D40_DREG_PRMO_LCHAN_SRC_LOG_DST_LOG,
1417 }; 1417 };
1418 1418
1419 if (chan_is_physical(d40c)) 1419 if (chan_is_physical(d40c))
1420 return phy_map[d40c->dma_cfg.mode_opt]; 1420 return phy_map[d40c->dma_cfg.mode_opt];
1421 else 1421 else
1422 return log_map[d40c->dma_cfg.mode_opt]; 1422 return log_map[d40c->dma_cfg.mode_opt];
1423 } 1423 }
1424 1424
1425 static void d40_config_write(struct d40_chan *d40c) 1425 static void d40_config_write(struct d40_chan *d40c)
1426 { 1426 {
1427 u32 addr_base; 1427 u32 addr_base;
1428 u32 var; 1428 u32 var;
1429 1429
1430 /* Odd addresses are even addresses + 4 */ 1430 /* Odd addresses are even addresses + 4 */
1431 addr_base = (d40c->phy_chan->num % 2) * 4; 1431 addr_base = (d40c->phy_chan->num % 2) * 4;
1432 /* Setup channel mode to logical or physical */ 1432 /* Setup channel mode to logical or physical */
1433 var = ((u32)(chan_is_logical(d40c)) + 1) << 1433 var = ((u32)(chan_is_logical(d40c)) + 1) <<
1434 D40_CHAN_POS(d40c->phy_chan->num); 1434 D40_CHAN_POS(d40c->phy_chan->num);
1435 writel(var, d40c->base->virtbase + D40_DREG_PRMSE + addr_base); 1435 writel(var, d40c->base->virtbase + D40_DREG_PRMSE + addr_base);
1436 1436
1437 /* Setup operational mode option register */ 1437 /* Setup operational mode option register */
1438 var = d40_get_prmo(d40c) << D40_CHAN_POS(d40c->phy_chan->num); 1438 var = d40_get_prmo(d40c) << D40_CHAN_POS(d40c->phy_chan->num);
1439 1439
1440 writel(var, d40c->base->virtbase + D40_DREG_PRMOE + addr_base); 1440 writel(var, d40c->base->virtbase + D40_DREG_PRMOE + addr_base);
1441 1441
1442 if (chan_is_logical(d40c)) { 1442 if (chan_is_logical(d40c)) {
1443 int lidx = (d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) 1443 int lidx = (d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS)
1444 & D40_SREG_ELEM_LOG_LIDX_MASK; 1444 & D40_SREG_ELEM_LOG_LIDX_MASK;
1445 void __iomem *chanbase = chan_base(d40c); 1445 void __iomem *chanbase = chan_base(d40c);
1446 1446
1447 /* Set default config for CFG reg */ 1447 /* Set default config for CFG reg */
1448 writel(d40c->src_def_cfg, chanbase + D40_CHAN_REG_SSCFG); 1448 writel(d40c->src_def_cfg, chanbase + D40_CHAN_REG_SSCFG);
1449 writel(d40c->dst_def_cfg, chanbase + D40_CHAN_REG_SDCFG); 1449 writel(d40c->dst_def_cfg, chanbase + D40_CHAN_REG_SDCFG);
1450 1450
1451 /* Set LIDX for lcla */ 1451 /* Set LIDX for lcla */
1452 writel(lidx, chanbase + D40_CHAN_REG_SSELT); 1452 writel(lidx, chanbase + D40_CHAN_REG_SSELT);
1453 writel(lidx, chanbase + D40_CHAN_REG_SDELT); 1453 writel(lidx, chanbase + D40_CHAN_REG_SDELT);
1454 1454
1455 /* Clear LNK which will be used by d40_chan_has_events() */ 1455 /* Clear LNK which will be used by d40_chan_has_events() */
1456 writel(0, chanbase + D40_CHAN_REG_SSLNK); 1456 writel(0, chanbase + D40_CHAN_REG_SSLNK);
1457 writel(0, chanbase + D40_CHAN_REG_SDLNK); 1457 writel(0, chanbase + D40_CHAN_REG_SDLNK);
1458 } 1458 }
1459 } 1459 }
1460 1460
1461 static u32 d40_residue(struct d40_chan *d40c) 1461 static u32 d40_residue(struct d40_chan *d40c)
1462 { 1462 {
1463 u32 num_elt; 1463 u32 num_elt;
1464 1464
1465 if (chan_is_logical(d40c)) 1465 if (chan_is_logical(d40c))
1466 num_elt = (readl(&d40c->lcpa->lcsp2) & D40_MEM_LCSP2_ECNT_MASK) 1466 num_elt = (readl(&d40c->lcpa->lcsp2) & D40_MEM_LCSP2_ECNT_MASK)
1467 >> D40_MEM_LCSP2_ECNT_POS; 1467 >> D40_MEM_LCSP2_ECNT_POS;
1468 else { 1468 else {
1469 u32 val = readl(chan_base(d40c) + D40_CHAN_REG_SDELT); 1469 u32 val = readl(chan_base(d40c) + D40_CHAN_REG_SDELT);
1470 num_elt = (val & D40_SREG_ELEM_PHY_ECNT_MASK) 1470 num_elt = (val & D40_SREG_ELEM_PHY_ECNT_MASK)
1471 >> D40_SREG_ELEM_PHY_ECNT_POS; 1471 >> D40_SREG_ELEM_PHY_ECNT_POS;
1472 } 1472 }
1473 1473
1474 return num_elt * d40c->dma_cfg.dst_info.data_width; 1474 return num_elt * d40c->dma_cfg.dst_info.data_width;
1475 } 1475 }
1476 1476
1477 static bool d40_tx_is_linked(struct d40_chan *d40c) 1477 static bool d40_tx_is_linked(struct d40_chan *d40c)
1478 { 1478 {
1479 bool is_link; 1479 bool is_link;
1480 1480
1481 if (chan_is_logical(d40c)) 1481 if (chan_is_logical(d40c))
1482 is_link = readl(&d40c->lcpa->lcsp3) & D40_MEM_LCSP3_DLOS_MASK; 1482 is_link = readl(&d40c->lcpa->lcsp3) & D40_MEM_LCSP3_DLOS_MASK;
1483 else 1483 else
1484 is_link = readl(chan_base(d40c) + D40_CHAN_REG_SDLNK) 1484 is_link = readl(chan_base(d40c) + D40_CHAN_REG_SDLNK)
1485 & D40_SREG_LNK_PHYS_LNK_MASK; 1485 & D40_SREG_LNK_PHYS_LNK_MASK;
1486 1486
1487 return is_link; 1487 return is_link;
1488 } 1488 }
1489 1489
1490 static int d40_pause(struct d40_chan *d40c) 1490 static int d40_pause(struct d40_chan *d40c)
1491 { 1491 {
1492 int res = 0; 1492 int res = 0;
1493 unsigned long flags; 1493 unsigned long flags;
1494 1494
1495 if (!d40c->busy) 1495 if (!d40c->busy)
1496 return 0; 1496 return 0;
1497 1497
1498 pm_runtime_get_sync(d40c->base->dev); 1498 pm_runtime_get_sync(d40c->base->dev);
1499 spin_lock_irqsave(&d40c->lock, flags); 1499 spin_lock_irqsave(&d40c->lock, flags);
1500 1500
1501 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); 1501 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
1502 1502
1503 pm_runtime_mark_last_busy(d40c->base->dev); 1503 pm_runtime_mark_last_busy(d40c->base->dev);
1504 pm_runtime_put_autosuspend(d40c->base->dev); 1504 pm_runtime_put_autosuspend(d40c->base->dev);
1505 spin_unlock_irqrestore(&d40c->lock, flags); 1505 spin_unlock_irqrestore(&d40c->lock, flags);
1506 return res; 1506 return res;
1507 } 1507 }
1508 1508
1509 static int d40_resume(struct d40_chan *d40c) 1509 static int d40_resume(struct d40_chan *d40c)
1510 { 1510 {
1511 int res = 0; 1511 int res = 0;
1512 unsigned long flags; 1512 unsigned long flags;
1513 1513
1514 if (!d40c->busy) 1514 if (!d40c->busy)
1515 return 0; 1515 return 0;
1516 1516
1517 spin_lock_irqsave(&d40c->lock, flags); 1517 spin_lock_irqsave(&d40c->lock, flags);
1518 pm_runtime_get_sync(d40c->base->dev); 1518 pm_runtime_get_sync(d40c->base->dev);
1519 1519
1520 /* If bytes left to transfer or linked tx resume job */ 1520 /* If bytes left to transfer or linked tx resume job */
1521 if (d40_residue(d40c) || d40_tx_is_linked(d40c)) 1521 if (d40_residue(d40c) || d40_tx_is_linked(d40c))
1522 res = d40_channel_execute_command(d40c, D40_DMA_RUN); 1522 res = d40_channel_execute_command(d40c, D40_DMA_RUN);
1523 1523
1524 pm_runtime_mark_last_busy(d40c->base->dev); 1524 pm_runtime_mark_last_busy(d40c->base->dev);
1525 pm_runtime_put_autosuspend(d40c->base->dev); 1525 pm_runtime_put_autosuspend(d40c->base->dev);
1526 spin_unlock_irqrestore(&d40c->lock, flags); 1526 spin_unlock_irqrestore(&d40c->lock, flags);
1527 return res; 1527 return res;
1528 } 1528 }
1529 1529
1530 static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx) 1530 static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx)
1531 { 1531 {
1532 struct d40_chan *d40c = container_of(tx->chan, 1532 struct d40_chan *d40c = container_of(tx->chan,
1533 struct d40_chan, 1533 struct d40_chan,
1534 chan); 1534 chan);
1535 struct d40_desc *d40d = container_of(tx, struct d40_desc, txd); 1535 struct d40_desc *d40d = container_of(tx, struct d40_desc, txd);
1536 unsigned long flags; 1536 unsigned long flags;
1537 dma_cookie_t cookie; 1537 dma_cookie_t cookie;
1538 1538
1539 spin_lock_irqsave(&d40c->lock, flags); 1539 spin_lock_irqsave(&d40c->lock, flags);
1540 cookie = dma_cookie_assign(tx); 1540 cookie = dma_cookie_assign(tx);
1541 d40_desc_queue(d40c, d40d); 1541 d40_desc_queue(d40c, d40d);
1542 spin_unlock_irqrestore(&d40c->lock, flags); 1542 spin_unlock_irqrestore(&d40c->lock, flags);
1543 1543
1544 return cookie; 1544 return cookie;
1545 } 1545 }
1546 1546
1547 static int d40_start(struct d40_chan *d40c) 1547 static int d40_start(struct d40_chan *d40c)
1548 { 1548 {
1549 return d40_channel_execute_command(d40c, D40_DMA_RUN); 1549 return d40_channel_execute_command(d40c, D40_DMA_RUN);
1550 } 1550 }
1551 1551
1552 static struct d40_desc *d40_queue_start(struct d40_chan *d40c) 1552 static struct d40_desc *d40_queue_start(struct d40_chan *d40c)
1553 { 1553 {
1554 struct d40_desc *d40d; 1554 struct d40_desc *d40d;
1555 int err; 1555 int err;
1556 1556
1557 /* Start queued jobs, if any */ 1557 /* Start queued jobs, if any */
1558 d40d = d40_first_queued(d40c); 1558 d40d = d40_first_queued(d40c);
1559 1559
1560 if (d40d != NULL) { 1560 if (d40d != NULL) {
1561 if (!d40c->busy) { 1561 if (!d40c->busy) {
1562 d40c->busy = true; 1562 d40c->busy = true;
1563 pm_runtime_get_sync(d40c->base->dev); 1563 pm_runtime_get_sync(d40c->base->dev);
1564 } 1564 }
1565 1565
1566 /* Remove from queue */ 1566 /* Remove from queue */
1567 d40_desc_remove(d40d); 1567 d40_desc_remove(d40d);
1568 1568
1569 /* Add to active queue */ 1569 /* Add to active queue */
1570 d40_desc_submit(d40c, d40d); 1570 d40_desc_submit(d40c, d40d);
1571 1571
1572 /* Initiate DMA job */ 1572 /* Initiate DMA job */
1573 d40_desc_load(d40c, d40d); 1573 d40_desc_load(d40c, d40d);
1574 1574
1575 /* Start dma job */ 1575 /* Start dma job */
1576 err = d40_start(d40c); 1576 err = d40_start(d40c);
1577 1577
1578 if (err) 1578 if (err)
1579 return NULL; 1579 return NULL;
1580 } 1580 }
1581 1581
1582 return d40d; 1582 return d40d;
1583 } 1583 }
1584 1584
1585 /* called from interrupt context */ 1585 /* called from interrupt context */
1586 static void dma_tc_handle(struct d40_chan *d40c) 1586 static void dma_tc_handle(struct d40_chan *d40c)
1587 { 1587 {
1588 struct d40_desc *d40d; 1588 struct d40_desc *d40d;
1589 1589
1590 /* Get first active entry from list */ 1590 /* Get first active entry from list */
1591 d40d = d40_first_active_get(d40c); 1591 d40d = d40_first_active_get(d40c);
1592 1592
1593 if (d40d == NULL) 1593 if (d40d == NULL)
1594 return; 1594 return;
1595 1595
1596 if (d40d->cyclic) { 1596 if (d40d->cyclic) {
1597 /* 1597 /*
1598 * If this was a paritially loaded list, we need to reloaded 1598 * If this was a paritially loaded list, we need to reloaded
1599 * it, and only when the list is completed. We need to check 1599 * it, and only when the list is completed. We need to check
1600 * for done because the interrupt will hit for every link, and 1600 * for done because the interrupt will hit for every link, and
1601 * not just the last one. 1601 * not just the last one.
1602 */ 1602 */
1603 if (d40d->lli_current < d40d->lli_len 1603 if (d40d->lli_current < d40d->lli_len
1604 && !d40_tx_is_linked(d40c) 1604 && !d40_tx_is_linked(d40c)
1605 && !d40_residue(d40c)) { 1605 && !d40_residue(d40c)) {
1606 d40_lcla_free_all(d40c, d40d); 1606 d40_lcla_free_all(d40c, d40d);
1607 d40_desc_load(d40c, d40d); 1607 d40_desc_load(d40c, d40d);
1608 (void) d40_start(d40c); 1608 (void) d40_start(d40c);
1609 1609
1610 if (d40d->lli_current == d40d->lli_len) 1610 if (d40d->lli_current == d40d->lli_len)
1611 d40d->lli_current = 0; 1611 d40d->lli_current = 0;
1612 } 1612 }
1613 } else { 1613 } else {
1614 d40_lcla_free_all(d40c, d40d); 1614 d40_lcla_free_all(d40c, d40d);
1615 1615
1616 if (d40d->lli_current < d40d->lli_len) { 1616 if (d40d->lli_current < d40d->lli_len) {
1617 d40_desc_load(d40c, d40d); 1617 d40_desc_load(d40c, d40d);
1618 /* Start dma job */ 1618 /* Start dma job */
1619 (void) d40_start(d40c); 1619 (void) d40_start(d40c);
1620 return; 1620 return;
1621 } 1621 }
1622 1622
1623 if (d40_queue_start(d40c) == NULL) { 1623 if (d40_queue_start(d40c) == NULL) {
1624 d40c->busy = false; 1624 d40c->busy = false;
1625 1625
1626 pm_runtime_mark_last_busy(d40c->base->dev); 1626 pm_runtime_mark_last_busy(d40c->base->dev);
1627 pm_runtime_put_autosuspend(d40c->base->dev); 1627 pm_runtime_put_autosuspend(d40c->base->dev);
1628 } 1628 }
1629 1629
1630 d40_desc_remove(d40d); 1630 d40_desc_remove(d40d);
1631 d40_desc_done(d40c, d40d); 1631 d40_desc_done(d40c, d40d);
1632 } 1632 }
1633 1633
1634 d40c->pending_tx++; 1634 d40c->pending_tx++;
1635 tasklet_schedule(&d40c->tasklet); 1635 tasklet_schedule(&d40c->tasklet);
1636 1636
1637 } 1637 }
1638 1638
1639 static void dma_tasklet(unsigned long data) 1639 static void dma_tasklet(unsigned long data)
1640 { 1640 {
1641 struct d40_chan *d40c = (struct d40_chan *) data; 1641 struct d40_chan *d40c = (struct d40_chan *) data;
1642 struct d40_desc *d40d; 1642 struct d40_desc *d40d;
1643 unsigned long flags; 1643 unsigned long flags;
1644 dma_async_tx_callback callback; 1644 dma_async_tx_callback callback;
1645 void *callback_param; 1645 void *callback_param;
1646 1646
1647 spin_lock_irqsave(&d40c->lock, flags); 1647 spin_lock_irqsave(&d40c->lock, flags);
1648 1648
1649 /* Get first entry from the done list */ 1649 /* Get first entry from the done list */
1650 d40d = d40_first_done(d40c); 1650 d40d = d40_first_done(d40c);
1651 if (d40d == NULL) { 1651 if (d40d == NULL) {
1652 /* Check if we have reached here for cyclic job */ 1652 /* Check if we have reached here for cyclic job */
1653 d40d = d40_first_active_get(d40c); 1653 d40d = d40_first_active_get(d40c);
1654 if (d40d == NULL || !d40d->cyclic) 1654 if (d40d == NULL || !d40d->cyclic)
1655 goto err; 1655 goto err;
1656 } 1656 }
1657 1657
1658 if (!d40d->cyclic) 1658 if (!d40d->cyclic)
1659 dma_cookie_complete(&d40d->txd); 1659 dma_cookie_complete(&d40d->txd);
1660 1660
1661 /* 1661 /*
1662 * If terminating a channel pending_tx is set to zero. 1662 * If terminating a channel pending_tx is set to zero.
1663 * This prevents any finished active jobs to return to the client. 1663 * This prevents any finished active jobs to return to the client.
1664 */ 1664 */
1665 if (d40c->pending_tx == 0) { 1665 if (d40c->pending_tx == 0) {
1666 spin_unlock_irqrestore(&d40c->lock, flags); 1666 spin_unlock_irqrestore(&d40c->lock, flags);
1667 return; 1667 return;
1668 } 1668 }
1669 1669
1670 /* Callback to client */ 1670 /* Callback to client */
1671 callback = d40d->txd.callback; 1671 callback = d40d->txd.callback;
1672 callback_param = d40d->txd.callback_param; 1672 callback_param = d40d->txd.callback_param;
1673 1673
1674 if (!d40d->cyclic) { 1674 if (!d40d->cyclic) {
1675 if (async_tx_test_ack(&d40d->txd)) { 1675 if (async_tx_test_ack(&d40d->txd)) {
1676 d40_desc_remove(d40d); 1676 d40_desc_remove(d40d);
1677 d40_desc_free(d40c, d40d); 1677 d40_desc_free(d40c, d40d);
1678 } else if (!d40d->is_in_client_list) { 1678 } else if (!d40d->is_in_client_list) {
1679 d40_desc_remove(d40d); 1679 d40_desc_remove(d40d);
1680 d40_lcla_free_all(d40c, d40d); 1680 d40_lcla_free_all(d40c, d40d);
1681 list_add_tail(&d40d->node, &d40c->client); 1681 list_add_tail(&d40d->node, &d40c->client);
1682 d40d->is_in_client_list = true; 1682 d40d->is_in_client_list = true;
1683 } 1683 }
1684 } 1684 }
1685 1685
1686 d40c->pending_tx--; 1686 d40c->pending_tx--;
1687 1687
1688 if (d40c->pending_tx) 1688 if (d40c->pending_tx)
1689 tasklet_schedule(&d40c->tasklet); 1689 tasklet_schedule(&d40c->tasklet);
1690 1690
1691 spin_unlock_irqrestore(&d40c->lock, flags); 1691 spin_unlock_irqrestore(&d40c->lock, flags);
1692 1692
1693 if (callback && (d40d->txd.flags & DMA_PREP_INTERRUPT)) 1693 if (callback && (d40d->txd.flags & DMA_PREP_INTERRUPT))
1694 callback(callback_param); 1694 callback(callback_param);
1695 1695
1696 return; 1696 return;
1697 1697
1698 err: 1698 err:
1699 /* Rescue manouver if receiving double interrupts */ 1699 /* Rescue manouver if receiving double interrupts */
1700 if (d40c->pending_tx > 0) 1700 if (d40c->pending_tx > 0)
1701 d40c->pending_tx--; 1701 d40c->pending_tx--;
1702 spin_unlock_irqrestore(&d40c->lock, flags); 1702 spin_unlock_irqrestore(&d40c->lock, flags);
1703 } 1703 }
1704 1704
1705 static irqreturn_t d40_handle_interrupt(int irq, void *data) 1705 static irqreturn_t d40_handle_interrupt(int irq, void *data)
1706 { 1706 {
1707 int i; 1707 int i;
1708 u32 idx; 1708 u32 idx;
1709 u32 row; 1709 u32 row;
1710 long chan = -1; 1710 long chan = -1;
1711 struct d40_chan *d40c; 1711 struct d40_chan *d40c;
1712 unsigned long flags; 1712 unsigned long flags;
1713 struct d40_base *base = data; 1713 struct d40_base *base = data;
1714 u32 regs[base->gen_dmac.il_size]; 1714 u32 regs[base->gen_dmac.il_size];
1715 struct d40_interrupt_lookup *il = base->gen_dmac.il; 1715 struct d40_interrupt_lookup *il = base->gen_dmac.il;
1716 u32 il_size = base->gen_dmac.il_size; 1716 u32 il_size = base->gen_dmac.il_size;
1717 1717
1718 spin_lock_irqsave(&base->interrupt_lock, flags); 1718 spin_lock_irqsave(&base->interrupt_lock, flags);
1719 1719
1720 /* Read interrupt status of both logical and physical channels */ 1720 /* Read interrupt status of both logical and physical channels */
1721 for (i = 0; i < il_size; i++) 1721 for (i = 0; i < il_size; i++)
1722 regs[i] = readl(base->virtbase + il[i].src); 1722 regs[i] = readl(base->virtbase + il[i].src);
1723 1723
1724 for (;;) { 1724 for (;;) {
1725 1725
1726 chan = find_next_bit((unsigned long *)regs, 1726 chan = find_next_bit((unsigned long *)regs,
1727 BITS_PER_LONG * il_size, chan + 1); 1727 BITS_PER_LONG * il_size, chan + 1);
1728 1728
1729 /* No more set bits found? */ 1729 /* No more set bits found? */
1730 if (chan == BITS_PER_LONG * il_size) 1730 if (chan == BITS_PER_LONG * il_size)
1731 break; 1731 break;
1732 1732
1733 row = chan / BITS_PER_LONG; 1733 row = chan / BITS_PER_LONG;
1734 idx = chan & (BITS_PER_LONG - 1); 1734 idx = chan & (BITS_PER_LONG - 1);
1735 1735
1736 if (il[row].offset == D40_PHY_CHAN) 1736 if (il[row].offset == D40_PHY_CHAN)
1737 d40c = base->lookup_phy_chans[idx]; 1737 d40c = base->lookup_phy_chans[idx];
1738 else 1738 else
1739 d40c = base->lookup_log_chans[il[row].offset + idx]; 1739 d40c = base->lookup_log_chans[il[row].offset + idx];
1740 1740
1741 if (!d40c) { 1741 if (!d40c) {
1742 /* 1742 /*
1743 * No error because this can happen if something else 1743 * No error because this can happen if something else
1744 * in the system is using the channel. 1744 * in the system is using the channel.
1745 */ 1745 */
1746 continue; 1746 continue;
1747 } 1747 }
1748 1748
1749 /* ACK interrupt */ 1749 /* ACK interrupt */
1750 writel(BIT(idx), base->virtbase + il[row].clr); 1750 writel(BIT(idx), base->virtbase + il[row].clr);
1751 1751
1752 spin_lock(&d40c->lock); 1752 spin_lock(&d40c->lock);
1753 1753
1754 if (!il[row].is_error) 1754 if (!il[row].is_error)
1755 dma_tc_handle(d40c); 1755 dma_tc_handle(d40c);
1756 else 1756 else
1757 d40_err(base->dev, "IRQ chan: %ld offset %d idx %d\n", 1757 d40_err(base->dev, "IRQ chan: %ld offset %d idx %d\n",
1758 chan, il[row].offset, idx); 1758 chan, il[row].offset, idx);
1759 1759
1760 spin_unlock(&d40c->lock); 1760 spin_unlock(&d40c->lock);
1761 } 1761 }
1762 1762
1763 spin_unlock_irqrestore(&base->interrupt_lock, flags); 1763 spin_unlock_irqrestore(&base->interrupt_lock, flags);
1764 1764
1765 return IRQ_HANDLED; 1765 return IRQ_HANDLED;
1766 } 1766 }
1767 1767
1768 static int d40_validate_conf(struct d40_chan *d40c, 1768 static int d40_validate_conf(struct d40_chan *d40c,
1769 struct stedma40_chan_cfg *conf) 1769 struct stedma40_chan_cfg *conf)
1770 { 1770 {
1771 int res = 0; 1771 int res = 0;
1772 bool is_log = conf->mode == STEDMA40_MODE_LOGICAL; 1772 bool is_log = conf->mode == STEDMA40_MODE_LOGICAL;
1773 1773
1774 if (!conf->dir) { 1774 if (!conf->dir) {
1775 chan_err(d40c, "Invalid direction.\n"); 1775 chan_err(d40c, "Invalid direction.\n");
1776 res = -EINVAL; 1776 res = -EINVAL;
1777 } 1777 }
1778 1778
1779 if ((is_log && conf->dev_type > d40c->base->num_log_chans) || 1779 if ((is_log && conf->dev_type > d40c->base->num_log_chans) ||
1780 (!is_log && conf->dev_type > d40c->base->num_phy_chans) || 1780 (!is_log && conf->dev_type > d40c->base->num_phy_chans) ||
1781 (conf->dev_type < 0)) { 1781 (conf->dev_type < 0)) {
1782 chan_err(d40c, "Invalid device type (%d)\n", conf->dev_type); 1782 chan_err(d40c, "Invalid device type (%d)\n", conf->dev_type);
1783 res = -EINVAL; 1783 res = -EINVAL;
1784 } 1784 }
1785 1785
1786 if (conf->dir == DMA_DEV_TO_DEV) { 1786 if (conf->dir == DMA_DEV_TO_DEV) {
1787 /* 1787 /*
1788 * DMAC HW supports it. Will be added to this driver, 1788 * DMAC HW supports it. Will be added to this driver,
1789 * in case any dma client requires it. 1789 * in case any dma client requires it.
1790 */ 1790 */
1791 chan_err(d40c, "periph to periph not supported\n"); 1791 chan_err(d40c, "periph to periph not supported\n");
1792 res = -EINVAL; 1792 res = -EINVAL;
1793 } 1793 }
1794 1794
1795 if (d40_psize_2_burst_size(is_log, conf->src_info.psize) * 1795 if (d40_psize_2_burst_size(is_log, conf->src_info.psize) *
1796 conf->src_info.data_width != 1796 conf->src_info.data_width !=
1797 d40_psize_2_burst_size(is_log, conf->dst_info.psize) * 1797 d40_psize_2_burst_size(is_log, conf->dst_info.psize) *
1798 conf->dst_info.data_width) { 1798 conf->dst_info.data_width) {
1799 /* 1799 /*
1800 * The DMAC hardware only supports 1800 * The DMAC hardware only supports
1801 * src (burst x width) == dst (burst x width) 1801 * src (burst x width) == dst (burst x width)
1802 */ 1802 */
1803 1803
1804 chan_err(d40c, "src (burst x width) != dst (burst x width)\n"); 1804 chan_err(d40c, "src (burst x width) != dst (burst x width)\n");
1805 res = -EINVAL; 1805 res = -EINVAL;
1806 } 1806 }
1807 1807
1808 return res; 1808 return res;
1809 } 1809 }
1810 1810
1811 static bool d40_alloc_mask_set(struct d40_phy_res *phy, 1811 static bool d40_alloc_mask_set(struct d40_phy_res *phy,
1812 bool is_src, int log_event_line, bool is_log, 1812 bool is_src, int log_event_line, bool is_log,
1813 bool *first_user) 1813 bool *first_user)
1814 { 1814 {
1815 unsigned long flags; 1815 unsigned long flags;
1816 spin_lock_irqsave(&phy->lock, flags); 1816 spin_lock_irqsave(&phy->lock, flags);
1817 1817
1818 *first_user = ((phy->allocated_src | phy->allocated_dst) 1818 *first_user = ((phy->allocated_src | phy->allocated_dst)
1819 == D40_ALLOC_FREE); 1819 == D40_ALLOC_FREE);
1820 1820
1821 if (!is_log) { 1821 if (!is_log) {
1822 /* Physical interrupts are masked per physical full channel */ 1822 /* Physical interrupts are masked per physical full channel */
1823 if (phy->allocated_src == D40_ALLOC_FREE && 1823 if (phy->allocated_src == D40_ALLOC_FREE &&
1824 phy->allocated_dst == D40_ALLOC_FREE) { 1824 phy->allocated_dst == D40_ALLOC_FREE) {
1825 phy->allocated_dst = D40_ALLOC_PHY; 1825 phy->allocated_dst = D40_ALLOC_PHY;
1826 phy->allocated_src = D40_ALLOC_PHY; 1826 phy->allocated_src = D40_ALLOC_PHY;
1827 goto found; 1827 goto found;
1828 } else 1828 } else
1829 goto not_found; 1829 goto not_found;
1830 } 1830 }
1831 1831
1832 /* Logical channel */ 1832 /* Logical channel */
1833 if (is_src) { 1833 if (is_src) {
1834 if (phy->allocated_src == D40_ALLOC_PHY) 1834 if (phy->allocated_src == D40_ALLOC_PHY)
1835 goto not_found; 1835 goto not_found;
1836 1836
1837 if (phy->allocated_src == D40_ALLOC_FREE) 1837 if (phy->allocated_src == D40_ALLOC_FREE)
1838 phy->allocated_src = D40_ALLOC_LOG_FREE; 1838 phy->allocated_src = D40_ALLOC_LOG_FREE;
1839 1839
1840 if (!(phy->allocated_src & BIT(log_event_line))) { 1840 if (!(phy->allocated_src & BIT(log_event_line))) {
1841 phy->allocated_src |= BIT(log_event_line); 1841 phy->allocated_src |= BIT(log_event_line);
1842 goto found; 1842 goto found;
1843 } else 1843 } else
1844 goto not_found; 1844 goto not_found;
1845 } else { 1845 } else {
1846 if (phy->allocated_dst == D40_ALLOC_PHY) 1846 if (phy->allocated_dst == D40_ALLOC_PHY)
1847 goto not_found; 1847 goto not_found;
1848 1848
1849 if (phy->allocated_dst == D40_ALLOC_FREE) 1849 if (phy->allocated_dst == D40_ALLOC_FREE)
1850 phy->allocated_dst = D40_ALLOC_LOG_FREE; 1850 phy->allocated_dst = D40_ALLOC_LOG_FREE;
1851 1851
1852 if (!(phy->allocated_dst & BIT(log_event_line))) { 1852 if (!(phy->allocated_dst & BIT(log_event_line))) {
1853 phy->allocated_dst |= BIT(log_event_line); 1853 phy->allocated_dst |= BIT(log_event_line);
1854 goto found; 1854 goto found;
1855 } else 1855 } else
1856 goto not_found; 1856 goto not_found;
1857 } 1857 }
1858 1858
1859 not_found: 1859 not_found:
1860 spin_unlock_irqrestore(&phy->lock, flags); 1860 spin_unlock_irqrestore(&phy->lock, flags);
1861 return false; 1861 return false;
1862 found: 1862 found:
1863 spin_unlock_irqrestore(&phy->lock, flags); 1863 spin_unlock_irqrestore(&phy->lock, flags);
1864 return true; 1864 return true;
1865 } 1865 }
1866 1866
1867 static bool d40_alloc_mask_free(struct d40_phy_res *phy, bool is_src, 1867 static bool d40_alloc_mask_free(struct d40_phy_res *phy, bool is_src,
1868 int log_event_line) 1868 int log_event_line)
1869 { 1869 {
1870 unsigned long flags; 1870 unsigned long flags;
1871 bool is_free = false; 1871 bool is_free = false;
1872 1872
1873 spin_lock_irqsave(&phy->lock, flags); 1873 spin_lock_irqsave(&phy->lock, flags);
1874 if (!log_event_line) { 1874 if (!log_event_line) {
1875 phy->allocated_dst = D40_ALLOC_FREE; 1875 phy->allocated_dst = D40_ALLOC_FREE;
1876 phy->allocated_src = D40_ALLOC_FREE; 1876 phy->allocated_src = D40_ALLOC_FREE;
1877 is_free = true; 1877 is_free = true;
1878 goto out; 1878 goto out;
1879 } 1879 }
1880 1880
1881 /* Logical channel */ 1881 /* Logical channel */
1882 if (is_src) { 1882 if (is_src) {
1883 phy->allocated_src &= ~BIT(log_event_line); 1883 phy->allocated_src &= ~BIT(log_event_line);
1884 if (phy->allocated_src == D40_ALLOC_LOG_FREE) 1884 if (phy->allocated_src == D40_ALLOC_LOG_FREE)
1885 phy->allocated_src = D40_ALLOC_FREE; 1885 phy->allocated_src = D40_ALLOC_FREE;
1886 } else { 1886 } else {
1887 phy->allocated_dst &= ~BIT(log_event_line); 1887 phy->allocated_dst &= ~BIT(log_event_line);
1888 if (phy->allocated_dst == D40_ALLOC_LOG_FREE) 1888 if (phy->allocated_dst == D40_ALLOC_LOG_FREE)
1889 phy->allocated_dst = D40_ALLOC_FREE; 1889 phy->allocated_dst = D40_ALLOC_FREE;
1890 } 1890 }
1891 1891
1892 is_free = ((phy->allocated_src | phy->allocated_dst) == 1892 is_free = ((phy->allocated_src | phy->allocated_dst) ==
1893 D40_ALLOC_FREE); 1893 D40_ALLOC_FREE);
1894 1894
1895 out: 1895 out:
1896 spin_unlock_irqrestore(&phy->lock, flags); 1896 spin_unlock_irqrestore(&phy->lock, flags);
1897 1897
1898 return is_free; 1898 return is_free;
1899 } 1899 }
1900 1900
1901 static int d40_allocate_channel(struct d40_chan *d40c, bool *first_phy_user) 1901 static int d40_allocate_channel(struct d40_chan *d40c, bool *first_phy_user)
1902 { 1902 {
1903 int dev_type = d40c->dma_cfg.dev_type; 1903 int dev_type = d40c->dma_cfg.dev_type;
1904 int event_group; 1904 int event_group;
1905 int event_line; 1905 int event_line;
1906 struct d40_phy_res *phys; 1906 struct d40_phy_res *phys;
1907 int i; 1907 int i;
1908 int j; 1908 int j;
1909 int log_num; 1909 int log_num;
1910 int num_phy_chans; 1910 int num_phy_chans;
1911 bool is_src; 1911 bool is_src;
1912 bool is_log = d40c->dma_cfg.mode == STEDMA40_MODE_LOGICAL; 1912 bool is_log = d40c->dma_cfg.mode == STEDMA40_MODE_LOGICAL;
1913 1913
1914 phys = d40c->base->phy_res; 1914 phys = d40c->base->phy_res;
1915 num_phy_chans = d40c->base->num_phy_chans; 1915 num_phy_chans = d40c->base->num_phy_chans;
1916 1916
1917 if (d40c->dma_cfg.dir == DMA_DEV_TO_MEM) { 1917 if (d40c->dma_cfg.dir == DMA_DEV_TO_MEM) {
1918 log_num = 2 * dev_type; 1918 log_num = 2 * dev_type;
1919 is_src = true; 1919 is_src = true;
1920 } else if (d40c->dma_cfg.dir == DMA_MEM_TO_DEV || 1920 } else if (d40c->dma_cfg.dir == DMA_MEM_TO_DEV ||
1921 d40c->dma_cfg.dir == DMA_MEM_TO_MEM) { 1921 d40c->dma_cfg.dir == DMA_MEM_TO_MEM) {
1922 /* dst event lines are used for logical memcpy */ 1922 /* dst event lines are used for logical memcpy */
1923 log_num = 2 * dev_type + 1; 1923 log_num = 2 * dev_type + 1;
1924 is_src = false; 1924 is_src = false;
1925 } else 1925 } else
1926 return -EINVAL; 1926 return -EINVAL;
1927 1927
1928 event_group = D40_TYPE_TO_GROUP(dev_type); 1928 event_group = D40_TYPE_TO_GROUP(dev_type);
1929 event_line = D40_TYPE_TO_EVENT(dev_type); 1929 event_line = D40_TYPE_TO_EVENT(dev_type);
1930 1930
1931 if (!is_log) { 1931 if (!is_log) {
1932 if (d40c->dma_cfg.dir == DMA_MEM_TO_MEM) { 1932 if (d40c->dma_cfg.dir == DMA_MEM_TO_MEM) {
1933 /* Find physical half channel */ 1933 /* Find physical half channel */
1934 if (d40c->dma_cfg.use_fixed_channel) { 1934 if (d40c->dma_cfg.use_fixed_channel) {
1935 i = d40c->dma_cfg.phy_channel; 1935 i = d40c->dma_cfg.phy_channel;
1936 if (d40_alloc_mask_set(&phys[i], is_src, 1936 if (d40_alloc_mask_set(&phys[i], is_src,
1937 0, is_log, 1937 0, is_log,
1938 first_phy_user)) 1938 first_phy_user))
1939 goto found_phy; 1939 goto found_phy;
1940 } else { 1940 } else {
1941 for (i = 0; i < num_phy_chans; i++) { 1941 for (i = 0; i < num_phy_chans; i++) {
1942 if (d40_alloc_mask_set(&phys[i], is_src, 1942 if (d40_alloc_mask_set(&phys[i], is_src,
1943 0, is_log, 1943 0, is_log,
1944 first_phy_user)) 1944 first_phy_user))
1945 goto found_phy; 1945 goto found_phy;
1946 } 1946 }
1947 } 1947 }
1948 } else 1948 } else
1949 for (j = 0; j < d40c->base->num_phy_chans; j += 8) { 1949 for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
1950 int phy_num = j + event_group * 2; 1950 int phy_num = j + event_group * 2;
1951 for (i = phy_num; i < phy_num + 2; i++) { 1951 for (i = phy_num; i < phy_num + 2; i++) {
1952 if (d40_alloc_mask_set(&phys[i], 1952 if (d40_alloc_mask_set(&phys[i],
1953 is_src, 1953 is_src,
1954 0, 1954 0,
1955 is_log, 1955 is_log,
1956 first_phy_user)) 1956 first_phy_user))
1957 goto found_phy; 1957 goto found_phy;
1958 } 1958 }
1959 } 1959 }
1960 return -EINVAL; 1960 return -EINVAL;
1961 found_phy: 1961 found_phy:
1962 d40c->phy_chan = &phys[i]; 1962 d40c->phy_chan = &phys[i];
1963 d40c->log_num = D40_PHY_CHAN; 1963 d40c->log_num = D40_PHY_CHAN;
1964 goto out; 1964 goto out;
1965 } 1965 }
1966 if (dev_type == -1) 1966 if (dev_type == -1)
1967 return -EINVAL; 1967 return -EINVAL;
1968 1968
1969 /* Find logical channel */ 1969 /* Find logical channel */
1970 for (j = 0; j < d40c->base->num_phy_chans; j += 8) { 1970 for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
1971 int phy_num = j + event_group * 2; 1971 int phy_num = j + event_group * 2;
1972 1972
1973 if (d40c->dma_cfg.use_fixed_channel) { 1973 if (d40c->dma_cfg.use_fixed_channel) {
1974 i = d40c->dma_cfg.phy_channel; 1974 i = d40c->dma_cfg.phy_channel;
1975 1975
1976 if ((i != phy_num) && (i != phy_num + 1)) { 1976 if ((i != phy_num) && (i != phy_num + 1)) {
1977 dev_err(chan2dev(d40c), 1977 dev_err(chan2dev(d40c),
1978 "invalid fixed phy channel %d\n", i); 1978 "invalid fixed phy channel %d\n", i);
1979 return -EINVAL; 1979 return -EINVAL;
1980 } 1980 }
1981 1981
1982 if (d40_alloc_mask_set(&phys[i], is_src, event_line, 1982 if (d40_alloc_mask_set(&phys[i], is_src, event_line,
1983 is_log, first_phy_user)) 1983 is_log, first_phy_user))
1984 goto found_log; 1984 goto found_log;
1985 1985
1986 dev_err(chan2dev(d40c), 1986 dev_err(chan2dev(d40c),
1987 "could not allocate fixed phy channel %d\n", i); 1987 "could not allocate fixed phy channel %d\n", i);
1988 return -EINVAL; 1988 return -EINVAL;
1989 } 1989 }
1990 1990
1991 /* 1991 /*
1992 * Spread logical channels across all available physical rather 1992 * Spread logical channels across all available physical rather
1993 * than pack every logical channel at the first available phy 1993 * than pack every logical channel at the first available phy
1994 * channels. 1994 * channels.
1995 */ 1995 */
1996 if (is_src) { 1996 if (is_src) {
1997 for (i = phy_num; i < phy_num + 2; i++) { 1997 for (i = phy_num; i < phy_num + 2; i++) {
1998 if (d40_alloc_mask_set(&phys[i], is_src, 1998 if (d40_alloc_mask_set(&phys[i], is_src,
1999 event_line, is_log, 1999 event_line, is_log,
2000 first_phy_user)) 2000 first_phy_user))
2001 goto found_log; 2001 goto found_log;
2002 } 2002 }
2003 } else { 2003 } else {
2004 for (i = phy_num + 1; i >= phy_num; i--) { 2004 for (i = phy_num + 1; i >= phy_num; i--) {
2005 if (d40_alloc_mask_set(&phys[i], is_src, 2005 if (d40_alloc_mask_set(&phys[i], is_src,
2006 event_line, is_log, 2006 event_line, is_log,
2007 first_phy_user)) 2007 first_phy_user))
2008 goto found_log; 2008 goto found_log;
2009 } 2009 }
2010 } 2010 }
2011 } 2011 }
2012 return -EINVAL; 2012 return -EINVAL;
2013 2013
2014 found_log: 2014 found_log:
2015 d40c->phy_chan = &phys[i]; 2015 d40c->phy_chan = &phys[i];
2016 d40c->log_num = log_num; 2016 d40c->log_num = log_num;
2017 out: 2017 out:
2018 2018
2019 if (is_log) 2019 if (is_log)
2020 d40c->base->lookup_log_chans[d40c->log_num] = d40c; 2020 d40c->base->lookup_log_chans[d40c->log_num] = d40c;
2021 else 2021 else
2022 d40c->base->lookup_phy_chans[d40c->phy_chan->num] = d40c; 2022 d40c->base->lookup_phy_chans[d40c->phy_chan->num] = d40c;
2023 2023
2024 return 0; 2024 return 0;
2025 2025
2026 } 2026 }
2027 2027
2028 static int d40_config_memcpy(struct d40_chan *d40c) 2028 static int d40_config_memcpy(struct d40_chan *d40c)
2029 { 2029 {
2030 dma_cap_mask_t cap = d40c->chan.device->cap_mask; 2030 dma_cap_mask_t cap = d40c->chan.device->cap_mask;
2031 2031
2032 if (dma_has_cap(DMA_MEMCPY, cap) && !dma_has_cap(DMA_SLAVE, cap)) { 2032 if (dma_has_cap(DMA_MEMCPY, cap) && !dma_has_cap(DMA_SLAVE, cap)) {
2033 d40c->dma_cfg = dma40_memcpy_conf_log; 2033 d40c->dma_cfg = dma40_memcpy_conf_log;
2034 d40c->dma_cfg.dev_type = dma40_memcpy_channels[d40c->chan.chan_id]; 2034 d40c->dma_cfg.dev_type = dma40_memcpy_channels[d40c->chan.chan_id];
2035 2035
2036 d40_log_cfg(&d40c->dma_cfg, 2036 d40_log_cfg(&d40c->dma_cfg,
2037 &d40c->log_def.lcsp1, &d40c->log_def.lcsp3); 2037 &d40c->log_def.lcsp1, &d40c->log_def.lcsp3);
2038 2038
2039 } else if (dma_has_cap(DMA_MEMCPY, cap) && 2039 } else if (dma_has_cap(DMA_MEMCPY, cap) &&
2040 dma_has_cap(DMA_SLAVE, cap)) { 2040 dma_has_cap(DMA_SLAVE, cap)) {
2041 d40c->dma_cfg = dma40_memcpy_conf_phy; 2041 d40c->dma_cfg = dma40_memcpy_conf_phy;
2042 2042
2043 /* Generate interrrupt at end of transfer or relink. */ 2043 /* Generate interrrupt at end of transfer or relink. */
2044 d40c->dst_def_cfg |= BIT(D40_SREG_CFG_TIM_POS); 2044 d40c->dst_def_cfg |= BIT(D40_SREG_CFG_TIM_POS);
2045 2045
2046 /* Generate interrupt on error. */ 2046 /* Generate interrupt on error. */
2047 d40c->src_def_cfg |= BIT(D40_SREG_CFG_EIM_POS); 2047 d40c->src_def_cfg |= BIT(D40_SREG_CFG_EIM_POS);
2048 d40c->dst_def_cfg |= BIT(D40_SREG_CFG_EIM_POS); 2048 d40c->dst_def_cfg |= BIT(D40_SREG_CFG_EIM_POS);
2049 2049
2050 } else { 2050 } else {
2051 chan_err(d40c, "No memcpy\n"); 2051 chan_err(d40c, "No memcpy\n");
2052 return -EINVAL; 2052 return -EINVAL;
2053 } 2053 }
2054 2054
2055 return 0; 2055 return 0;
2056 } 2056 }
2057 2057
2058 static int d40_free_dma(struct d40_chan *d40c) 2058 static int d40_free_dma(struct d40_chan *d40c)
2059 { 2059 {
2060 2060
2061 int res = 0; 2061 int res = 0;
2062 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dev_type); 2062 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dev_type);
2063 struct d40_phy_res *phy = d40c->phy_chan; 2063 struct d40_phy_res *phy = d40c->phy_chan;
2064 bool is_src; 2064 bool is_src;
2065 2065
2066 /* Terminate all queued and active transfers */ 2066 /* Terminate all queued and active transfers */
2067 d40_term_all(d40c); 2067 d40_term_all(d40c);
2068 2068
2069 if (phy == NULL) { 2069 if (phy == NULL) {
2070 chan_err(d40c, "phy == null\n"); 2070 chan_err(d40c, "phy == null\n");
2071 return -EINVAL; 2071 return -EINVAL;
2072 } 2072 }
2073 2073
2074 if (phy->allocated_src == D40_ALLOC_FREE && 2074 if (phy->allocated_src == D40_ALLOC_FREE &&
2075 phy->allocated_dst == D40_ALLOC_FREE) { 2075 phy->allocated_dst == D40_ALLOC_FREE) {
2076 chan_err(d40c, "channel already free\n"); 2076 chan_err(d40c, "channel already free\n");
2077 return -EINVAL; 2077 return -EINVAL;
2078 } 2078 }
2079 2079
2080 if (d40c->dma_cfg.dir == DMA_MEM_TO_DEV || 2080 if (d40c->dma_cfg.dir == DMA_MEM_TO_DEV ||
2081 d40c->dma_cfg.dir == DMA_MEM_TO_MEM) 2081 d40c->dma_cfg.dir == DMA_MEM_TO_MEM)
2082 is_src = false; 2082 is_src = false;
2083 else if (d40c->dma_cfg.dir == DMA_DEV_TO_MEM) 2083 else if (d40c->dma_cfg.dir == DMA_DEV_TO_MEM)
2084 is_src = true; 2084 is_src = true;
2085 else { 2085 else {
2086 chan_err(d40c, "Unknown direction\n"); 2086 chan_err(d40c, "Unknown direction\n");
2087 return -EINVAL; 2087 return -EINVAL;
2088 } 2088 }
2089 2089
2090 pm_runtime_get_sync(d40c->base->dev); 2090 pm_runtime_get_sync(d40c->base->dev);
2091 res = d40_channel_execute_command(d40c, D40_DMA_STOP); 2091 res = d40_channel_execute_command(d40c, D40_DMA_STOP);
2092 if (res) { 2092 if (res) {
2093 chan_err(d40c, "stop failed\n"); 2093 chan_err(d40c, "stop failed\n");
2094 goto out; 2094 goto out;
2095 } 2095 }
2096 2096
2097 d40_alloc_mask_free(phy, is_src, chan_is_logical(d40c) ? event : 0); 2097 d40_alloc_mask_free(phy, is_src, chan_is_logical(d40c) ? event : 0);
2098 2098
2099 if (chan_is_logical(d40c)) 2099 if (chan_is_logical(d40c))
2100 d40c->base->lookup_log_chans[d40c->log_num] = NULL; 2100 d40c->base->lookup_log_chans[d40c->log_num] = NULL;
2101 else 2101 else
2102 d40c->base->lookup_phy_chans[phy->num] = NULL; 2102 d40c->base->lookup_phy_chans[phy->num] = NULL;
2103 2103
2104 if (d40c->busy) { 2104 if (d40c->busy) {
2105 pm_runtime_mark_last_busy(d40c->base->dev); 2105 pm_runtime_mark_last_busy(d40c->base->dev);
2106 pm_runtime_put_autosuspend(d40c->base->dev); 2106 pm_runtime_put_autosuspend(d40c->base->dev);
2107 } 2107 }
2108 2108
2109 d40c->busy = false; 2109 d40c->busy = false;
2110 d40c->phy_chan = NULL; 2110 d40c->phy_chan = NULL;
2111 d40c->configured = false; 2111 d40c->configured = false;
2112 out: 2112 out:
2113 2113
2114 pm_runtime_mark_last_busy(d40c->base->dev); 2114 pm_runtime_mark_last_busy(d40c->base->dev);
2115 pm_runtime_put_autosuspend(d40c->base->dev); 2115 pm_runtime_put_autosuspend(d40c->base->dev);
2116 return res; 2116 return res;
2117 } 2117 }
2118 2118
2119 static bool d40_is_paused(struct d40_chan *d40c) 2119 static bool d40_is_paused(struct d40_chan *d40c)
2120 { 2120 {
2121 void __iomem *chanbase = chan_base(d40c); 2121 void __iomem *chanbase = chan_base(d40c);
2122 bool is_paused = false; 2122 bool is_paused = false;
2123 unsigned long flags; 2123 unsigned long flags;
2124 void __iomem *active_reg; 2124 void __iomem *active_reg;
2125 u32 status; 2125 u32 status;
2126 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dev_type); 2126 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dev_type);
2127 2127
2128 spin_lock_irqsave(&d40c->lock, flags); 2128 spin_lock_irqsave(&d40c->lock, flags);
2129 2129
2130 if (chan_is_physical(d40c)) { 2130 if (chan_is_physical(d40c)) {
2131 if (d40c->phy_chan->num % 2 == 0) 2131 if (d40c->phy_chan->num % 2 == 0)
2132 active_reg = d40c->base->virtbase + D40_DREG_ACTIVE; 2132 active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
2133 else 2133 else
2134 active_reg = d40c->base->virtbase + D40_DREG_ACTIVO; 2134 active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
2135 2135
2136 status = (readl(active_reg) & 2136 status = (readl(active_reg) &
2137 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >> 2137 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
2138 D40_CHAN_POS(d40c->phy_chan->num); 2138 D40_CHAN_POS(d40c->phy_chan->num);
2139 if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP) 2139 if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP)
2140 is_paused = true; 2140 is_paused = true;
2141 2141
2142 goto _exit; 2142 goto _exit;
2143 } 2143 }
2144 2144
2145 if (d40c->dma_cfg.dir == DMA_MEM_TO_DEV || 2145 if (d40c->dma_cfg.dir == DMA_MEM_TO_DEV ||
2146 d40c->dma_cfg.dir == DMA_MEM_TO_MEM) { 2146 d40c->dma_cfg.dir == DMA_MEM_TO_MEM) {
2147 status = readl(chanbase + D40_CHAN_REG_SDLNK); 2147 status = readl(chanbase + D40_CHAN_REG_SDLNK);
2148 } else if (d40c->dma_cfg.dir == DMA_DEV_TO_MEM) { 2148 } else if (d40c->dma_cfg.dir == DMA_DEV_TO_MEM) {
2149 status = readl(chanbase + D40_CHAN_REG_SSLNK); 2149 status = readl(chanbase + D40_CHAN_REG_SSLNK);
2150 } else { 2150 } else {
2151 chan_err(d40c, "Unknown direction\n"); 2151 chan_err(d40c, "Unknown direction\n");
2152 goto _exit; 2152 goto _exit;
2153 } 2153 }
2154 2154
2155 status = (status & D40_EVENTLINE_MASK(event)) >> 2155 status = (status & D40_EVENTLINE_MASK(event)) >>
2156 D40_EVENTLINE_POS(event); 2156 D40_EVENTLINE_POS(event);
2157 2157
2158 if (status != D40_DMA_RUN) 2158 if (status != D40_DMA_RUN)
2159 is_paused = true; 2159 is_paused = true;
2160 _exit: 2160 _exit:
2161 spin_unlock_irqrestore(&d40c->lock, flags); 2161 spin_unlock_irqrestore(&d40c->lock, flags);
2162 return is_paused; 2162 return is_paused;
2163 2163
2164 } 2164 }
2165 2165
2166 static u32 stedma40_residue(struct dma_chan *chan) 2166 static u32 stedma40_residue(struct dma_chan *chan)
2167 { 2167 {
2168 struct d40_chan *d40c = 2168 struct d40_chan *d40c =
2169 container_of(chan, struct d40_chan, chan); 2169 container_of(chan, struct d40_chan, chan);
2170 u32 bytes_left; 2170 u32 bytes_left;
2171 unsigned long flags; 2171 unsigned long flags;
2172 2172
2173 spin_lock_irqsave(&d40c->lock, flags); 2173 spin_lock_irqsave(&d40c->lock, flags);
2174 bytes_left = d40_residue(d40c); 2174 bytes_left = d40_residue(d40c);
2175 spin_unlock_irqrestore(&d40c->lock, flags); 2175 spin_unlock_irqrestore(&d40c->lock, flags);
2176 2176
2177 return bytes_left; 2177 return bytes_left;
2178 } 2178 }
2179 2179
2180 static int 2180 static int
2181 d40_prep_sg_log(struct d40_chan *chan, struct d40_desc *desc, 2181 d40_prep_sg_log(struct d40_chan *chan, struct d40_desc *desc,
2182 struct scatterlist *sg_src, struct scatterlist *sg_dst, 2182 struct scatterlist *sg_src, struct scatterlist *sg_dst,
2183 unsigned int sg_len, dma_addr_t src_dev_addr, 2183 unsigned int sg_len, dma_addr_t src_dev_addr,
2184 dma_addr_t dst_dev_addr) 2184 dma_addr_t dst_dev_addr)
2185 { 2185 {
2186 struct stedma40_chan_cfg *cfg = &chan->dma_cfg; 2186 struct stedma40_chan_cfg *cfg = &chan->dma_cfg;
2187 struct stedma40_half_channel_info *src_info = &cfg->src_info; 2187 struct stedma40_half_channel_info *src_info = &cfg->src_info;
2188 struct stedma40_half_channel_info *dst_info = &cfg->dst_info; 2188 struct stedma40_half_channel_info *dst_info = &cfg->dst_info;
2189 int ret; 2189 int ret;
2190 2190
2191 ret = d40_log_sg_to_lli(sg_src, sg_len, 2191 ret = d40_log_sg_to_lli(sg_src, sg_len,
2192 src_dev_addr, 2192 src_dev_addr,
2193 desc->lli_log.src, 2193 desc->lli_log.src,
2194 chan->log_def.lcsp1, 2194 chan->log_def.lcsp1,
2195 src_info->data_width, 2195 src_info->data_width,
2196 dst_info->data_width); 2196 dst_info->data_width);
2197 2197
2198 ret = d40_log_sg_to_lli(sg_dst, sg_len, 2198 ret = d40_log_sg_to_lli(sg_dst, sg_len,
2199 dst_dev_addr, 2199 dst_dev_addr,
2200 desc->lli_log.dst, 2200 desc->lli_log.dst,
2201 chan->log_def.lcsp3, 2201 chan->log_def.lcsp3,
2202 dst_info->data_width, 2202 dst_info->data_width,
2203 src_info->data_width); 2203 src_info->data_width);
2204 2204
2205 return ret < 0 ? ret : 0; 2205 return ret < 0 ? ret : 0;
2206 } 2206 }
2207 2207
2208 static int 2208 static int
2209 d40_prep_sg_phy(struct d40_chan *chan, struct d40_desc *desc, 2209 d40_prep_sg_phy(struct d40_chan *chan, struct d40_desc *desc,
2210 struct scatterlist *sg_src, struct scatterlist *sg_dst, 2210 struct scatterlist *sg_src, struct scatterlist *sg_dst,
2211 unsigned int sg_len, dma_addr_t src_dev_addr, 2211 unsigned int sg_len, dma_addr_t src_dev_addr,
2212 dma_addr_t dst_dev_addr) 2212 dma_addr_t dst_dev_addr)
2213 { 2213 {
2214 struct stedma40_chan_cfg *cfg = &chan->dma_cfg; 2214 struct stedma40_chan_cfg *cfg = &chan->dma_cfg;
2215 struct stedma40_half_channel_info *src_info = &cfg->src_info; 2215 struct stedma40_half_channel_info *src_info = &cfg->src_info;
2216 struct stedma40_half_channel_info *dst_info = &cfg->dst_info; 2216 struct stedma40_half_channel_info *dst_info = &cfg->dst_info;
2217 unsigned long flags = 0; 2217 unsigned long flags = 0;
2218 int ret; 2218 int ret;
2219 2219
2220 if (desc->cyclic) 2220 if (desc->cyclic)
2221 flags |= LLI_CYCLIC | LLI_TERM_INT; 2221 flags |= LLI_CYCLIC | LLI_TERM_INT;
2222 2222
2223 ret = d40_phy_sg_to_lli(sg_src, sg_len, src_dev_addr, 2223 ret = d40_phy_sg_to_lli(sg_src, sg_len, src_dev_addr,
2224 desc->lli_phy.src, 2224 desc->lli_phy.src,
2225 virt_to_phys(desc->lli_phy.src), 2225 virt_to_phys(desc->lli_phy.src),
2226 chan->src_def_cfg, 2226 chan->src_def_cfg,
2227 src_info, dst_info, flags); 2227 src_info, dst_info, flags);
2228 2228
2229 ret = d40_phy_sg_to_lli(sg_dst, sg_len, dst_dev_addr, 2229 ret = d40_phy_sg_to_lli(sg_dst, sg_len, dst_dev_addr,
2230 desc->lli_phy.dst, 2230 desc->lli_phy.dst,
2231 virt_to_phys(desc->lli_phy.dst), 2231 virt_to_phys(desc->lli_phy.dst),
2232 chan->dst_def_cfg, 2232 chan->dst_def_cfg,
2233 dst_info, src_info, flags); 2233 dst_info, src_info, flags);
2234 2234
2235 dma_sync_single_for_device(chan->base->dev, desc->lli_pool.dma_addr, 2235 dma_sync_single_for_device(chan->base->dev, desc->lli_pool.dma_addr,
2236 desc->lli_pool.size, DMA_TO_DEVICE); 2236 desc->lli_pool.size, DMA_TO_DEVICE);
2237 2237
2238 return ret < 0 ? ret : 0; 2238 return ret < 0 ? ret : 0;
2239 } 2239 }
2240 2240
2241 static struct d40_desc * 2241 static struct d40_desc *
2242 d40_prep_desc(struct d40_chan *chan, struct scatterlist *sg, 2242 d40_prep_desc(struct d40_chan *chan, struct scatterlist *sg,
2243 unsigned int sg_len, unsigned long dma_flags) 2243 unsigned int sg_len, unsigned long dma_flags)
2244 { 2244 {
2245 struct stedma40_chan_cfg *cfg = &chan->dma_cfg; 2245 struct stedma40_chan_cfg *cfg = &chan->dma_cfg;
2246 struct d40_desc *desc; 2246 struct d40_desc *desc;
2247 int ret; 2247 int ret;
2248 2248
2249 desc = d40_desc_get(chan); 2249 desc = d40_desc_get(chan);
2250 if (!desc) 2250 if (!desc)
2251 return NULL; 2251 return NULL;
2252 2252
2253 desc->lli_len = d40_sg_2_dmalen(sg, sg_len, cfg->src_info.data_width, 2253 desc->lli_len = d40_sg_2_dmalen(sg, sg_len, cfg->src_info.data_width,
2254 cfg->dst_info.data_width); 2254 cfg->dst_info.data_width);
2255 if (desc->lli_len < 0) { 2255 if (desc->lli_len < 0) {
2256 chan_err(chan, "Unaligned size\n"); 2256 chan_err(chan, "Unaligned size\n");
2257 goto err; 2257 goto err;
2258 } 2258 }
2259 2259
2260 ret = d40_pool_lli_alloc(chan, desc, desc->lli_len); 2260 ret = d40_pool_lli_alloc(chan, desc, desc->lli_len);
2261 if (ret < 0) { 2261 if (ret < 0) {
2262 chan_err(chan, "Could not allocate lli\n"); 2262 chan_err(chan, "Could not allocate lli\n");
2263 goto err; 2263 goto err;
2264 } 2264 }
2265 2265
2266 desc->lli_current = 0; 2266 desc->lli_current = 0;
2267 desc->txd.flags = dma_flags; 2267 desc->txd.flags = dma_flags;
2268 desc->txd.tx_submit = d40_tx_submit; 2268 desc->txd.tx_submit = d40_tx_submit;
2269 2269
2270 dma_async_tx_descriptor_init(&desc->txd, &chan->chan); 2270 dma_async_tx_descriptor_init(&desc->txd, &chan->chan);
2271 2271
2272 return desc; 2272 return desc;
2273 2273
2274 err: 2274 err:
2275 d40_desc_free(chan, desc); 2275 d40_desc_free(chan, desc);
2276 return NULL; 2276 return NULL;
2277 } 2277 }
2278 2278
2279 static struct dma_async_tx_descriptor * 2279 static struct dma_async_tx_descriptor *
2280 d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src, 2280 d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src,
2281 struct scatterlist *sg_dst, unsigned int sg_len, 2281 struct scatterlist *sg_dst, unsigned int sg_len,
2282 enum dma_transfer_direction direction, unsigned long dma_flags) 2282 enum dma_transfer_direction direction, unsigned long dma_flags)
2283 { 2283 {
2284 struct d40_chan *chan = container_of(dchan, struct d40_chan, chan); 2284 struct d40_chan *chan = container_of(dchan, struct d40_chan, chan);
2285 dma_addr_t src_dev_addr = 0; 2285 dma_addr_t src_dev_addr = 0;
2286 dma_addr_t dst_dev_addr = 0; 2286 dma_addr_t dst_dev_addr = 0;
2287 struct d40_desc *desc; 2287 struct d40_desc *desc;
2288 unsigned long flags; 2288 unsigned long flags;
2289 int ret; 2289 int ret;
2290 2290
2291 if (!chan->phy_chan) { 2291 if (!chan->phy_chan) {
2292 chan_err(chan, "Cannot prepare unallocated channel\n"); 2292 chan_err(chan, "Cannot prepare unallocated channel\n");
2293 return NULL; 2293 return NULL;
2294 } 2294 }
2295 2295
2296 spin_lock_irqsave(&chan->lock, flags); 2296 spin_lock_irqsave(&chan->lock, flags);
2297 2297
2298 desc = d40_prep_desc(chan, sg_src, sg_len, dma_flags); 2298 desc = d40_prep_desc(chan, sg_src, sg_len, dma_flags);
2299 if (desc == NULL) 2299 if (desc == NULL)
2300 goto err; 2300 goto err;
2301 2301
2302 if (sg_next(&sg_src[sg_len - 1]) == sg_src) 2302 if (sg_next(&sg_src[sg_len - 1]) == sg_src)
2303 desc->cyclic = true; 2303 desc->cyclic = true;
2304 2304
2305 if (direction == DMA_DEV_TO_MEM) 2305 if (direction == DMA_DEV_TO_MEM)
2306 src_dev_addr = chan->runtime_addr; 2306 src_dev_addr = chan->runtime_addr;
2307 else if (direction == DMA_MEM_TO_DEV) 2307 else if (direction == DMA_MEM_TO_DEV)
2308 dst_dev_addr = chan->runtime_addr; 2308 dst_dev_addr = chan->runtime_addr;
2309 2309
2310 if (chan_is_logical(chan)) 2310 if (chan_is_logical(chan))
2311 ret = d40_prep_sg_log(chan, desc, sg_src, sg_dst, 2311 ret = d40_prep_sg_log(chan, desc, sg_src, sg_dst,
2312 sg_len, src_dev_addr, dst_dev_addr); 2312 sg_len, src_dev_addr, dst_dev_addr);
2313 else 2313 else
2314 ret = d40_prep_sg_phy(chan, desc, sg_src, sg_dst, 2314 ret = d40_prep_sg_phy(chan, desc, sg_src, sg_dst,
2315 sg_len, src_dev_addr, dst_dev_addr); 2315 sg_len, src_dev_addr, dst_dev_addr);
2316 2316
2317 if (ret) { 2317 if (ret) {
2318 chan_err(chan, "Failed to prepare %s sg job: %d\n", 2318 chan_err(chan, "Failed to prepare %s sg job: %d\n",
2319 chan_is_logical(chan) ? "log" : "phy", ret); 2319 chan_is_logical(chan) ? "log" : "phy", ret);
2320 goto err; 2320 goto err;
2321 } 2321 }
2322 2322
2323 /* 2323 /*
2324 * add descriptor to the prepare queue in order to be able 2324 * add descriptor to the prepare queue in order to be able
2325 * to free them later in terminate_all 2325 * to free them later in terminate_all
2326 */ 2326 */
2327 list_add_tail(&desc->node, &chan->prepare_queue); 2327 list_add_tail(&desc->node, &chan->prepare_queue);
2328 2328
2329 spin_unlock_irqrestore(&chan->lock, flags); 2329 spin_unlock_irqrestore(&chan->lock, flags);
2330 2330
2331 return &desc->txd; 2331 return &desc->txd;
2332 2332
2333 err: 2333 err:
2334 if (desc) 2334 if (desc)
2335 d40_desc_free(chan, desc); 2335 d40_desc_free(chan, desc);
2336 spin_unlock_irqrestore(&chan->lock, flags); 2336 spin_unlock_irqrestore(&chan->lock, flags);
2337 return NULL; 2337 return NULL;
2338 } 2338 }
2339 2339
2340 bool stedma40_filter(struct dma_chan *chan, void *data) 2340 bool stedma40_filter(struct dma_chan *chan, void *data)
2341 { 2341 {
2342 struct stedma40_chan_cfg *info = data; 2342 struct stedma40_chan_cfg *info = data;
2343 struct d40_chan *d40c = 2343 struct d40_chan *d40c =
2344 container_of(chan, struct d40_chan, chan); 2344 container_of(chan, struct d40_chan, chan);
2345 int err; 2345 int err;
2346 2346
2347 if (data) { 2347 if (data) {
2348 err = d40_validate_conf(d40c, info); 2348 err = d40_validate_conf(d40c, info);
2349 if (!err) 2349 if (!err)
2350 d40c->dma_cfg = *info; 2350 d40c->dma_cfg = *info;
2351 } else 2351 } else
2352 err = d40_config_memcpy(d40c); 2352 err = d40_config_memcpy(d40c);
2353 2353
2354 if (!err) 2354 if (!err)
2355 d40c->configured = true; 2355 d40c->configured = true;
2356 2356
2357 return err == 0; 2357 return err == 0;
2358 } 2358 }
2359 EXPORT_SYMBOL(stedma40_filter); 2359 EXPORT_SYMBOL(stedma40_filter);
2360 2360
2361 static void __d40_set_prio_rt(struct d40_chan *d40c, int dev_type, bool src) 2361 static void __d40_set_prio_rt(struct d40_chan *d40c, int dev_type, bool src)
2362 { 2362 {
2363 bool realtime = d40c->dma_cfg.realtime; 2363 bool realtime = d40c->dma_cfg.realtime;
2364 bool highprio = d40c->dma_cfg.high_priority; 2364 bool highprio = d40c->dma_cfg.high_priority;
2365 u32 rtreg; 2365 u32 rtreg;
2366 u32 event = D40_TYPE_TO_EVENT(dev_type); 2366 u32 event = D40_TYPE_TO_EVENT(dev_type);
2367 u32 group = D40_TYPE_TO_GROUP(dev_type); 2367 u32 group = D40_TYPE_TO_GROUP(dev_type);
2368 u32 bit = BIT(event); 2368 u32 bit = BIT(event);
2369 u32 prioreg; 2369 u32 prioreg;
2370 struct d40_gen_dmac *dmac = &d40c->base->gen_dmac; 2370 struct d40_gen_dmac *dmac = &d40c->base->gen_dmac;
2371 2371
2372 rtreg = realtime ? dmac->realtime_en : dmac->realtime_clear; 2372 rtreg = realtime ? dmac->realtime_en : dmac->realtime_clear;
2373 /* 2373 /*
2374 * Due to a hardware bug, in some cases a logical channel triggered by 2374 * Due to a hardware bug, in some cases a logical channel triggered by
2375 * a high priority destination event line can generate extra packet 2375 * a high priority destination event line can generate extra packet
2376 * transactions. 2376 * transactions.
2377 * 2377 *
2378 * The workaround is to not set the high priority level for the 2378 * The workaround is to not set the high priority level for the
2379 * destination event lines that trigger logical channels. 2379 * destination event lines that trigger logical channels.
2380 */ 2380 */
2381 if (!src && chan_is_logical(d40c)) 2381 if (!src && chan_is_logical(d40c))
2382 highprio = false; 2382 highprio = false;
2383 2383
2384 prioreg = highprio ? dmac->high_prio_en : dmac->high_prio_clear; 2384 prioreg = highprio ? dmac->high_prio_en : dmac->high_prio_clear;
2385 2385
2386 /* Destination event lines are stored in the upper halfword */ 2386 /* Destination event lines are stored in the upper halfword */
2387 if (!src) 2387 if (!src)
2388 bit <<= 16; 2388 bit <<= 16;
2389 2389
2390 writel(bit, d40c->base->virtbase + prioreg + group * 4); 2390 writel(bit, d40c->base->virtbase + prioreg + group * 4);
2391 writel(bit, d40c->base->virtbase + rtreg + group * 4); 2391 writel(bit, d40c->base->virtbase + rtreg + group * 4);
2392 } 2392 }
2393 2393
2394 static void d40_set_prio_realtime(struct d40_chan *d40c) 2394 static void d40_set_prio_realtime(struct d40_chan *d40c)
2395 { 2395 {
2396 if (d40c->base->rev < 3) 2396 if (d40c->base->rev < 3)
2397 return; 2397 return;
2398 2398
2399 if ((d40c->dma_cfg.dir == DMA_DEV_TO_MEM) || 2399 if ((d40c->dma_cfg.dir == DMA_DEV_TO_MEM) ||
2400 (d40c->dma_cfg.dir == DMA_DEV_TO_DEV)) 2400 (d40c->dma_cfg.dir == DMA_DEV_TO_DEV))
2401 __d40_set_prio_rt(d40c, d40c->dma_cfg.dev_type, true); 2401 __d40_set_prio_rt(d40c, d40c->dma_cfg.dev_type, true);
2402 2402
2403 if ((d40c->dma_cfg.dir == DMA_MEM_TO_DEV) || 2403 if ((d40c->dma_cfg.dir == DMA_MEM_TO_DEV) ||
2404 (d40c->dma_cfg.dir == DMA_DEV_TO_DEV)) 2404 (d40c->dma_cfg.dir == DMA_DEV_TO_DEV))
2405 __d40_set_prio_rt(d40c, d40c->dma_cfg.dev_type, false); 2405 __d40_set_prio_rt(d40c, d40c->dma_cfg.dev_type, false);
2406 } 2406 }
2407 2407
2408 #define D40_DT_FLAGS_MODE(flags) ((flags >> 0) & 0x1) 2408 #define D40_DT_FLAGS_MODE(flags) ((flags >> 0) & 0x1)
2409 #define D40_DT_FLAGS_DIR(flags) ((flags >> 1) & 0x1) 2409 #define D40_DT_FLAGS_DIR(flags) ((flags >> 1) & 0x1)
2410 #define D40_DT_FLAGS_BIG_ENDIAN(flags) ((flags >> 2) & 0x1) 2410 #define D40_DT_FLAGS_BIG_ENDIAN(flags) ((flags >> 2) & 0x1)
2411 #define D40_DT_FLAGS_FIXED_CHAN(flags) ((flags >> 3) & 0x1) 2411 #define D40_DT_FLAGS_FIXED_CHAN(flags) ((flags >> 3) & 0x1)
2412 2412
2413 static struct dma_chan *d40_xlate(struct of_phandle_args *dma_spec, 2413 static struct dma_chan *d40_xlate(struct of_phandle_args *dma_spec,
2414 struct of_dma *ofdma) 2414 struct of_dma *ofdma)
2415 { 2415 {
2416 struct stedma40_chan_cfg cfg; 2416 struct stedma40_chan_cfg cfg;
2417 dma_cap_mask_t cap; 2417 dma_cap_mask_t cap;
2418 u32 flags; 2418 u32 flags;
2419 2419
2420 memset(&cfg, 0, sizeof(struct stedma40_chan_cfg)); 2420 memset(&cfg, 0, sizeof(struct stedma40_chan_cfg));
2421 2421
2422 dma_cap_zero(cap); 2422 dma_cap_zero(cap);
2423 dma_cap_set(DMA_SLAVE, cap); 2423 dma_cap_set(DMA_SLAVE, cap);
2424 2424
2425 cfg.dev_type = dma_spec->args[0]; 2425 cfg.dev_type = dma_spec->args[0];
2426 flags = dma_spec->args[2]; 2426 flags = dma_spec->args[2];
2427 2427
2428 switch (D40_DT_FLAGS_MODE(flags)) { 2428 switch (D40_DT_FLAGS_MODE(flags)) {
2429 case 0: cfg.mode = STEDMA40_MODE_LOGICAL; break; 2429 case 0: cfg.mode = STEDMA40_MODE_LOGICAL; break;
2430 case 1: cfg.mode = STEDMA40_MODE_PHYSICAL; break; 2430 case 1: cfg.mode = STEDMA40_MODE_PHYSICAL; break;
2431 } 2431 }
2432 2432
2433 switch (D40_DT_FLAGS_DIR(flags)) { 2433 switch (D40_DT_FLAGS_DIR(flags)) {
2434 case 0: 2434 case 0:
2435 cfg.dir = DMA_MEM_TO_DEV; 2435 cfg.dir = DMA_MEM_TO_DEV;
2436 cfg.dst_info.big_endian = D40_DT_FLAGS_BIG_ENDIAN(flags); 2436 cfg.dst_info.big_endian = D40_DT_FLAGS_BIG_ENDIAN(flags);
2437 break; 2437 break;
2438 case 1: 2438 case 1:
2439 cfg.dir = DMA_DEV_TO_MEM; 2439 cfg.dir = DMA_DEV_TO_MEM;
2440 cfg.src_info.big_endian = D40_DT_FLAGS_BIG_ENDIAN(flags); 2440 cfg.src_info.big_endian = D40_DT_FLAGS_BIG_ENDIAN(flags);
2441 break; 2441 break;
2442 } 2442 }
2443 2443
2444 if (D40_DT_FLAGS_FIXED_CHAN(flags)) { 2444 if (D40_DT_FLAGS_FIXED_CHAN(flags)) {
2445 cfg.phy_channel = dma_spec->args[1]; 2445 cfg.phy_channel = dma_spec->args[1];
2446 cfg.use_fixed_channel = true; 2446 cfg.use_fixed_channel = true;
2447 } 2447 }
2448 2448
2449 return dma_request_channel(cap, stedma40_filter, &cfg); 2449 return dma_request_channel(cap, stedma40_filter, &cfg);
2450 } 2450 }
2451 2451
2452 /* DMA ENGINE functions */ 2452 /* DMA ENGINE functions */
2453 static int d40_alloc_chan_resources(struct dma_chan *chan) 2453 static int d40_alloc_chan_resources(struct dma_chan *chan)
2454 { 2454 {
2455 int err; 2455 int err;
2456 unsigned long flags; 2456 unsigned long flags;
2457 struct d40_chan *d40c = 2457 struct d40_chan *d40c =
2458 container_of(chan, struct d40_chan, chan); 2458 container_of(chan, struct d40_chan, chan);
2459 bool is_free_phy; 2459 bool is_free_phy;
2460 spin_lock_irqsave(&d40c->lock, flags); 2460 spin_lock_irqsave(&d40c->lock, flags);
2461 2461
2462 dma_cookie_init(chan); 2462 dma_cookie_init(chan);
2463 2463
2464 /* If no dma configuration is set use default configuration (memcpy) */ 2464 /* If no dma configuration is set use default configuration (memcpy) */
2465 if (!d40c->configured) { 2465 if (!d40c->configured) {
2466 err = d40_config_memcpy(d40c); 2466 err = d40_config_memcpy(d40c);
2467 if (err) { 2467 if (err) {
2468 chan_err(d40c, "Failed to configure memcpy channel\n"); 2468 chan_err(d40c, "Failed to configure memcpy channel\n");
2469 goto fail; 2469 goto fail;
2470 } 2470 }
2471 } 2471 }
2472 2472
2473 err = d40_allocate_channel(d40c, &is_free_phy); 2473 err = d40_allocate_channel(d40c, &is_free_phy);
2474 if (err) { 2474 if (err) {
2475 chan_err(d40c, "Failed to allocate channel\n"); 2475 chan_err(d40c, "Failed to allocate channel\n");
2476 d40c->configured = false; 2476 d40c->configured = false;
2477 goto fail; 2477 goto fail;
2478 } 2478 }
2479 2479
2480 pm_runtime_get_sync(d40c->base->dev); 2480 pm_runtime_get_sync(d40c->base->dev);
2481 2481
2482 d40_set_prio_realtime(d40c); 2482 d40_set_prio_realtime(d40c);
2483 2483
2484 if (chan_is_logical(d40c)) { 2484 if (chan_is_logical(d40c)) {
2485 if (d40c->dma_cfg.dir == DMA_DEV_TO_MEM) 2485 if (d40c->dma_cfg.dir == DMA_DEV_TO_MEM)
2486 d40c->lcpa = d40c->base->lcpa_base + 2486 d40c->lcpa = d40c->base->lcpa_base +
2487 d40c->dma_cfg.dev_type * D40_LCPA_CHAN_SIZE; 2487 d40c->dma_cfg.dev_type * D40_LCPA_CHAN_SIZE;
2488 else 2488 else
2489 d40c->lcpa = d40c->base->lcpa_base + 2489 d40c->lcpa = d40c->base->lcpa_base +
2490 d40c->dma_cfg.dev_type * 2490 d40c->dma_cfg.dev_type *
2491 D40_LCPA_CHAN_SIZE + D40_LCPA_CHAN_DST_DELTA; 2491 D40_LCPA_CHAN_SIZE + D40_LCPA_CHAN_DST_DELTA;
2492 2492
2493 /* Unmask the Global Interrupt Mask. */ 2493 /* Unmask the Global Interrupt Mask. */
2494 d40c->src_def_cfg |= BIT(D40_SREG_CFG_LOG_GIM_POS); 2494 d40c->src_def_cfg |= BIT(D40_SREG_CFG_LOG_GIM_POS);
2495 d40c->dst_def_cfg |= BIT(D40_SREG_CFG_LOG_GIM_POS); 2495 d40c->dst_def_cfg |= BIT(D40_SREG_CFG_LOG_GIM_POS);
2496 } 2496 }
2497 2497
2498 dev_dbg(chan2dev(d40c), "allocated %s channel (phy %d%s)\n", 2498 dev_dbg(chan2dev(d40c), "allocated %s channel (phy %d%s)\n",
2499 chan_is_logical(d40c) ? "logical" : "physical", 2499 chan_is_logical(d40c) ? "logical" : "physical",
2500 d40c->phy_chan->num, 2500 d40c->phy_chan->num,
2501 d40c->dma_cfg.use_fixed_channel ? ", fixed" : ""); 2501 d40c->dma_cfg.use_fixed_channel ? ", fixed" : "");
2502 2502
2503 2503
2504 /* 2504 /*
2505 * Only write channel configuration to the DMA if the physical 2505 * Only write channel configuration to the DMA if the physical
2506 * resource is free. In case of multiple logical channels 2506 * resource is free. In case of multiple logical channels
2507 * on the same physical resource, only the first write is necessary. 2507 * on the same physical resource, only the first write is necessary.
2508 */ 2508 */
2509 if (is_free_phy) 2509 if (is_free_phy)
2510 d40_config_write(d40c); 2510 d40_config_write(d40c);
2511 fail: 2511 fail:
2512 pm_runtime_mark_last_busy(d40c->base->dev); 2512 pm_runtime_mark_last_busy(d40c->base->dev);
2513 pm_runtime_put_autosuspend(d40c->base->dev); 2513 pm_runtime_put_autosuspend(d40c->base->dev);
2514 spin_unlock_irqrestore(&d40c->lock, flags); 2514 spin_unlock_irqrestore(&d40c->lock, flags);
2515 return err; 2515 return err;
2516 } 2516 }
2517 2517
2518 static void d40_free_chan_resources(struct dma_chan *chan) 2518 static void d40_free_chan_resources(struct dma_chan *chan)
2519 { 2519 {
2520 struct d40_chan *d40c = 2520 struct d40_chan *d40c =
2521 container_of(chan, struct d40_chan, chan); 2521 container_of(chan, struct d40_chan, chan);
2522 int err; 2522 int err;
2523 unsigned long flags; 2523 unsigned long flags;
2524 2524
2525 if (d40c->phy_chan == NULL) { 2525 if (d40c->phy_chan == NULL) {
2526 chan_err(d40c, "Cannot free unallocated channel\n"); 2526 chan_err(d40c, "Cannot free unallocated channel\n");
2527 return; 2527 return;
2528 } 2528 }
2529 2529
2530 spin_lock_irqsave(&d40c->lock, flags); 2530 spin_lock_irqsave(&d40c->lock, flags);
2531 2531
2532 err = d40_free_dma(d40c); 2532 err = d40_free_dma(d40c);
2533 2533
2534 if (err) 2534 if (err)
2535 chan_err(d40c, "Failed to free channel\n"); 2535 chan_err(d40c, "Failed to free channel\n");
2536 spin_unlock_irqrestore(&d40c->lock, flags); 2536 spin_unlock_irqrestore(&d40c->lock, flags);
2537 } 2537 }
2538 2538
2539 static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan, 2539 static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
2540 dma_addr_t dst, 2540 dma_addr_t dst,
2541 dma_addr_t src, 2541 dma_addr_t src,
2542 size_t size, 2542 size_t size,
2543 unsigned long dma_flags) 2543 unsigned long dma_flags)
2544 { 2544 {
2545 struct scatterlist dst_sg; 2545 struct scatterlist dst_sg;
2546 struct scatterlist src_sg; 2546 struct scatterlist src_sg;
2547 2547
2548 sg_init_table(&dst_sg, 1); 2548 sg_init_table(&dst_sg, 1);
2549 sg_init_table(&src_sg, 1); 2549 sg_init_table(&src_sg, 1);
2550 2550
2551 sg_dma_address(&dst_sg) = dst; 2551 sg_dma_address(&dst_sg) = dst;
2552 sg_dma_address(&src_sg) = src; 2552 sg_dma_address(&src_sg) = src;
2553 2553
2554 sg_dma_len(&dst_sg) = size; 2554 sg_dma_len(&dst_sg) = size;
2555 sg_dma_len(&src_sg) = size; 2555 sg_dma_len(&src_sg) = size;
2556 2556
2557 return d40_prep_sg(chan, &src_sg, &dst_sg, 1, DMA_NONE, dma_flags); 2557 return d40_prep_sg(chan, &src_sg, &dst_sg, 1, DMA_NONE, dma_flags);
2558 } 2558 }
2559 2559
2560 static struct dma_async_tx_descriptor * 2560 static struct dma_async_tx_descriptor *
2561 d40_prep_memcpy_sg(struct dma_chan *chan, 2561 d40_prep_memcpy_sg(struct dma_chan *chan,
2562 struct scatterlist *dst_sg, unsigned int dst_nents, 2562 struct scatterlist *dst_sg, unsigned int dst_nents,
2563 struct scatterlist *src_sg, unsigned int src_nents, 2563 struct scatterlist *src_sg, unsigned int src_nents,
2564 unsigned long dma_flags) 2564 unsigned long dma_flags)
2565 { 2565 {
2566 if (dst_nents != src_nents) 2566 if (dst_nents != src_nents)
2567 return NULL; 2567 return NULL;
2568 2568
2569 return d40_prep_sg(chan, src_sg, dst_sg, src_nents, DMA_NONE, dma_flags); 2569 return d40_prep_sg(chan, src_sg, dst_sg, src_nents, DMA_NONE, dma_flags);
2570 } 2570 }
2571 2571
2572 static struct dma_async_tx_descriptor * 2572 static struct dma_async_tx_descriptor *
2573 d40_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, 2573 d40_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
2574 unsigned int sg_len, enum dma_transfer_direction direction, 2574 unsigned int sg_len, enum dma_transfer_direction direction,
2575 unsigned long dma_flags, void *context) 2575 unsigned long dma_flags, void *context)
2576 { 2576 {
2577 if (!is_slave_direction(direction)) 2577 if (!is_slave_direction(direction))
2578 return NULL; 2578 return NULL;
2579 2579
2580 return d40_prep_sg(chan, sgl, sgl, sg_len, direction, dma_flags); 2580 return d40_prep_sg(chan, sgl, sgl, sg_len, direction, dma_flags);
2581 } 2581 }
2582 2582
2583 static struct dma_async_tx_descriptor * 2583 static struct dma_async_tx_descriptor *
2584 dma40_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr, 2584 dma40_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
2585 size_t buf_len, size_t period_len, 2585 size_t buf_len, size_t period_len,
2586 enum dma_transfer_direction direction, unsigned long flags, 2586 enum dma_transfer_direction direction, unsigned long flags,
2587 void *context) 2587 void *context)
2588 { 2588 {
2589 unsigned int periods = buf_len / period_len; 2589 unsigned int periods = buf_len / period_len;
2590 struct dma_async_tx_descriptor *txd; 2590 struct dma_async_tx_descriptor *txd;
2591 struct scatterlist *sg; 2591 struct scatterlist *sg;
2592 int i; 2592 int i;
2593 2593
2594 sg = kcalloc(periods + 1, sizeof(struct scatterlist), GFP_NOWAIT); 2594 sg = kcalloc(periods + 1, sizeof(struct scatterlist), GFP_NOWAIT);
2595 if (!sg) 2595 if (!sg)
2596 return NULL; 2596 return NULL;
2597 2597
2598 for (i = 0; i < periods; i++) { 2598 for (i = 0; i < periods; i++) {
2599 sg_dma_address(&sg[i]) = dma_addr; 2599 sg_dma_address(&sg[i]) = dma_addr;
2600 sg_dma_len(&sg[i]) = period_len; 2600 sg_dma_len(&sg[i]) = period_len;
2601 dma_addr += period_len; 2601 dma_addr += period_len;
2602 } 2602 }
2603 2603
2604 sg[periods].offset = 0; 2604 sg[periods].offset = 0;
2605 sg_dma_len(&sg[periods]) = 0; 2605 sg_dma_len(&sg[periods]) = 0;
2606 sg[periods].page_link = 2606 sg[periods].page_link =
2607 ((unsigned long)sg | 0x01) & ~0x02; 2607 ((unsigned long)sg | 0x01) & ~0x02;
2608 2608
2609 txd = d40_prep_sg(chan, sg, sg, periods, direction, 2609 txd = d40_prep_sg(chan, sg, sg, periods, direction,
2610 DMA_PREP_INTERRUPT); 2610 DMA_PREP_INTERRUPT);
2611 2611
2612 kfree(sg); 2612 kfree(sg);
2613 2613
2614 return txd; 2614 return txd;
2615 } 2615 }
2616 2616
2617 static enum dma_status d40_tx_status(struct dma_chan *chan, 2617 static enum dma_status d40_tx_status(struct dma_chan *chan,
2618 dma_cookie_t cookie, 2618 dma_cookie_t cookie,
2619 struct dma_tx_state *txstate) 2619 struct dma_tx_state *txstate)
2620 { 2620 {
2621 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); 2621 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2622 enum dma_status ret; 2622 enum dma_status ret;
2623 2623
2624 if (d40c->phy_chan == NULL) { 2624 if (d40c->phy_chan == NULL) {
2625 chan_err(d40c, "Cannot read status of unallocated channel\n"); 2625 chan_err(d40c, "Cannot read status of unallocated channel\n");
2626 return -EINVAL; 2626 return -EINVAL;
2627 } 2627 }
2628 2628
2629 ret = dma_cookie_status(chan, cookie, txstate); 2629 ret = dma_cookie_status(chan, cookie, txstate);
2630 if (ret != DMA_SUCCESS) 2630 if (ret != DMA_COMPLETE)
2631 dma_set_residue(txstate, stedma40_residue(chan)); 2631 dma_set_residue(txstate, stedma40_residue(chan));
2632 2632
2633 if (d40_is_paused(d40c)) 2633 if (d40_is_paused(d40c))
2634 ret = DMA_PAUSED; 2634 ret = DMA_PAUSED;
2635 2635
2636 return ret; 2636 return ret;
2637 } 2637 }
2638 2638
2639 static void d40_issue_pending(struct dma_chan *chan) 2639 static void d40_issue_pending(struct dma_chan *chan)
2640 { 2640 {
2641 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); 2641 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2642 unsigned long flags; 2642 unsigned long flags;
2643 2643
2644 if (d40c->phy_chan == NULL) { 2644 if (d40c->phy_chan == NULL) {
2645 chan_err(d40c, "Channel is not allocated!\n"); 2645 chan_err(d40c, "Channel is not allocated!\n");
2646 return; 2646 return;
2647 } 2647 }
2648 2648
2649 spin_lock_irqsave(&d40c->lock, flags); 2649 spin_lock_irqsave(&d40c->lock, flags);
2650 2650
2651 list_splice_tail_init(&d40c->pending_queue, &d40c->queue); 2651 list_splice_tail_init(&d40c->pending_queue, &d40c->queue);
2652 2652
2653 /* Busy means that queued jobs are already being processed */ 2653 /* Busy means that queued jobs are already being processed */
2654 if (!d40c->busy) 2654 if (!d40c->busy)
2655 (void) d40_queue_start(d40c); 2655 (void) d40_queue_start(d40c);
2656 2656
2657 spin_unlock_irqrestore(&d40c->lock, flags); 2657 spin_unlock_irqrestore(&d40c->lock, flags);
2658 } 2658 }
2659 2659
2660 static void d40_terminate_all(struct dma_chan *chan) 2660 static void d40_terminate_all(struct dma_chan *chan)
2661 { 2661 {
2662 unsigned long flags; 2662 unsigned long flags;
2663 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); 2663 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2664 int ret; 2664 int ret;
2665 2665
2666 spin_lock_irqsave(&d40c->lock, flags); 2666 spin_lock_irqsave(&d40c->lock, flags);
2667 2667
2668 pm_runtime_get_sync(d40c->base->dev); 2668 pm_runtime_get_sync(d40c->base->dev);
2669 ret = d40_channel_execute_command(d40c, D40_DMA_STOP); 2669 ret = d40_channel_execute_command(d40c, D40_DMA_STOP);
2670 if (ret) 2670 if (ret)
2671 chan_err(d40c, "Failed to stop channel\n"); 2671 chan_err(d40c, "Failed to stop channel\n");
2672 2672
2673 d40_term_all(d40c); 2673 d40_term_all(d40c);
2674 pm_runtime_mark_last_busy(d40c->base->dev); 2674 pm_runtime_mark_last_busy(d40c->base->dev);
2675 pm_runtime_put_autosuspend(d40c->base->dev); 2675 pm_runtime_put_autosuspend(d40c->base->dev);
2676 if (d40c->busy) { 2676 if (d40c->busy) {
2677 pm_runtime_mark_last_busy(d40c->base->dev); 2677 pm_runtime_mark_last_busy(d40c->base->dev);
2678 pm_runtime_put_autosuspend(d40c->base->dev); 2678 pm_runtime_put_autosuspend(d40c->base->dev);
2679 } 2679 }
2680 d40c->busy = false; 2680 d40c->busy = false;
2681 2681
2682 spin_unlock_irqrestore(&d40c->lock, flags); 2682 spin_unlock_irqrestore(&d40c->lock, flags);
2683 } 2683 }
2684 2684
2685 static int 2685 static int
2686 dma40_config_to_halfchannel(struct d40_chan *d40c, 2686 dma40_config_to_halfchannel(struct d40_chan *d40c,
2687 struct stedma40_half_channel_info *info, 2687 struct stedma40_half_channel_info *info,
2688 u32 maxburst) 2688 u32 maxburst)
2689 { 2689 {
2690 int psize; 2690 int psize;
2691 2691
2692 if (chan_is_logical(d40c)) { 2692 if (chan_is_logical(d40c)) {
2693 if (maxburst >= 16) 2693 if (maxburst >= 16)
2694 psize = STEDMA40_PSIZE_LOG_16; 2694 psize = STEDMA40_PSIZE_LOG_16;
2695 else if (maxburst >= 8) 2695 else if (maxburst >= 8)
2696 psize = STEDMA40_PSIZE_LOG_8; 2696 psize = STEDMA40_PSIZE_LOG_8;
2697 else if (maxburst >= 4) 2697 else if (maxburst >= 4)
2698 psize = STEDMA40_PSIZE_LOG_4; 2698 psize = STEDMA40_PSIZE_LOG_4;
2699 else 2699 else
2700 psize = STEDMA40_PSIZE_LOG_1; 2700 psize = STEDMA40_PSIZE_LOG_1;
2701 } else { 2701 } else {
2702 if (maxburst >= 16) 2702 if (maxburst >= 16)
2703 psize = STEDMA40_PSIZE_PHY_16; 2703 psize = STEDMA40_PSIZE_PHY_16;
2704 else if (maxburst >= 8) 2704 else if (maxburst >= 8)
2705 psize = STEDMA40_PSIZE_PHY_8; 2705 psize = STEDMA40_PSIZE_PHY_8;
2706 else if (maxburst >= 4) 2706 else if (maxburst >= 4)
2707 psize = STEDMA40_PSIZE_PHY_4; 2707 psize = STEDMA40_PSIZE_PHY_4;
2708 else 2708 else
2709 psize = STEDMA40_PSIZE_PHY_1; 2709 psize = STEDMA40_PSIZE_PHY_1;
2710 } 2710 }
2711 2711
2712 info->psize = psize; 2712 info->psize = psize;
2713 info->flow_ctrl = STEDMA40_NO_FLOW_CTRL; 2713 info->flow_ctrl = STEDMA40_NO_FLOW_CTRL;
2714 2714
2715 return 0; 2715 return 0;
2716 } 2716 }
2717 2717
2718 /* Runtime reconfiguration extension */ 2718 /* Runtime reconfiguration extension */
2719 static int d40_set_runtime_config(struct dma_chan *chan, 2719 static int d40_set_runtime_config(struct dma_chan *chan,
2720 struct dma_slave_config *config) 2720 struct dma_slave_config *config)
2721 { 2721 {
2722 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); 2722 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2723 struct stedma40_chan_cfg *cfg = &d40c->dma_cfg; 2723 struct stedma40_chan_cfg *cfg = &d40c->dma_cfg;
2724 enum dma_slave_buswidth src_addr_width, dst_addr_width; 2724 enum dma_slave_buswidth src_addr_width, dst_addr_width;
2725 dma_addr_t config_addr; 2725 dma_addr_t config_addr;
2726 u32 src_maxburst, dst_maxburst; 2726 u32 src_maxburst, dst_maxburst;
2727 int ret; 2727 int ret;
2728 2728
2729 src_addr_width = config->src_addr_width; 2729 src_addr_width = config->src_addr_width;
2730 src_maxburst = config->src_maxburst; 2730 src_maxburst = config->src_maxburst;
2731 dst_addr_width = config->dst_addr_width; 2731 dst_addr_width = config->dst_addr_width;
2732 dst_maxburst = config->dst_maxburst; 2732 dst_maxburst = config->dst_maxburst;
2733 2733
2734 if (config->direction == DMA_DEV_TO_MEM) { 2734 if (config->direction == DMA_DEV_TO_MEM) {
2735 config_addr = config->src_addr; 2735 config_addr = config->src_addr;
2736 2736
2737 if (cfg->dir != DMA_DEV_TO_MEM) 2737 if (cfg->dir != DMA_DEV_TO_MEM)
2738 dev_dbg(d40c->base->dev, 2738 dev_dbg(d40c->base->dev,
2739 "channel was not configured for peripheral " 2739 "channel was not configured for peripheral "
2740 "to memory transfer (%d) overriding\n", 2740 "to memory transfer (%d) overriding\n",
2741 cfg->dir); 2741 cfg->dir);
2742 cfg->dir = DMA_DEV_TO_MEM; 2742 cfg->dir = DMA_DEV_TO_MEM;
2743 2743
2744 /* Configure the memory side */ 2744 /* Configure the memory side */
2745 if (dst_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) 2745 if (dst_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
2746 dst_addr_width = src_addr_width; 2746 dst_addr_width = src_addr_width;
2747 if (dst_maxburst == 0) 2747 if (dst_maxburst == 0)
2748 dst_maxburst = src_maxburst; 2748 dst_maxburst = src_maxburst;
2749 2749
2750 } else if (config->direction == DMA_MEM_TO_DEV) { 2750 } else if (config->direction == DMA_MEM_TO_DEV) {
2751 config_addr = config->dst_addr; 2751 config_addr = config->dst_addr;
2752 2752
2753 if (cfg->dir != DMA_MEM_TO_DEV) 2753 if (cfg->dir != DMA_MEM_TO_DEV)
2754 dev_dbg(d40c->base->dev, 2754 dev_dbg(d40c->base->dev,
2755 "channel was not configured for memory " 2755 "channel was not configured for memory "
2756 "to peripheral transfer (%d) overriding\n", 2756 "to peripheral transfer (%d) overriding\n",
2757 cfg->dir); 2757 cfg->dir);
2758 cfg->dir = DMA_MEM_TO_DEV; 2758 cfg->dir = DMA_MEM_TO_DEV;
2759 2759
2760 /* Configure the memory side */ 2760 /* Configure the memory side */
2761 if (src_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) 2761 if (src_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
2762 src_addr_width = dst_addr_width; 2762 src_addr_width = dst_addr_width;
2763 if (src_maxburst == 0) 2763 if (src_maxburst == 0)
2764 src_maxburst = dst_maxburst; 2764 src_maxburst = dst_maxburst;
2765 } else { 2765 } else {
2766 dev_err(d40c->base->dev, 2766 dev_err(d40c->base->dev,
2767 "unrecognized channel direction %d\n", 2767 "unrecognized channel direction %d\n",
2768 config->direction); 2768 config->direction);
2769 return -EINVAL; 2769 return -EINVAL;
2770 } 2770 }
2771 2771
2772 if (config_addr <= 0) { 2772 if (config_addr <= 0) {
2773 dev_err(d40c->base->dev, "no address supplied\n"); 2773 dev_err(d40c->base->dev, "no address supplied\n");
2774 return -EINVAL; 2774 return -EINVAL;
2775 } 2775 }
2776 2776
2777 if (src_maxburst * src_addr_width != dst_maxburst * dst_addr_width) { 2777 if (src_maxburst * src_addr_width != dst_maxburst * dst_addr_width) {
2778 dev_err(d40c->base->dev, 2778 dev_err(d40c->base->dev,
2779 "src/dst width/maxburst mismatch: %d*%d != %d*%d\n", 2779 "src/dst width/maxburst mismatch: %d*%d != %d*%d\n",
2780 src_maxburst, 2780 src_maxburst,
2781 src_addr_width, 2781 src_addr_width,
2782 dst_maxburst, 2782 dst_maxburst,
2783 dst_addr_width); 2783 dst_addr_width);
2784 return -EINVAL; 2784 return -EINVAL;
2785 } 2785 }
2786 2786
2787 if (src_maxburst > 16) { 2787 if (src_maxburst > 16) {
2788 src_maxburst = 16; 2788 src_maxburst = 16;
2789 dst_maxburst = src_maxburst * src_addr_width / dst_addr_width; 2789 dst_maxburst = src_maxburst * src_addr_width / dst_addr_width;
2790 } else if (dst_maxburst > 16) { 2790 } else if (dst_maxburst > 16) {
2791 dst_maxburst = 16; 2791 dst_maxburst = 16;
2792 src_maxburst = dst_maxburst * dst_addr_width / src_addr_width; 2792 src_maxburst = dst_maxburst * dst_addr_width / src_addr_width;
2793 } 2793 }
2794 2794
2795 /* Only valid widths are; 1, 2, 4 and 8. */ 2795 /* Only valid widths are; 1, 2, 4 and 8. */
2796 if (src_addr_width <= DMA_SLAVE_BUSWIDTH_UNDEFINED || 2796 if (src_addr_width <= DMA_SLAVE_BUSWIDTH_UNDEFINED ||
2797 src_addr_width > DMA_SLAVE_BUSWIDTH_8_BYTES || 2797 src_addr_width > DMA_SLAVE_BUSWIDTH_8_BYTES ||
2798 dst_addr_width <= DMA_SLAVE_BUSWIDTH_UNDEFINED || 2798 dst_addr_width <= DMA_SLAVE_BUSWIDTH_UNDEFINED ||
2799 dst_addr_width > DMA_SLAVE_BUSWIDTH_8_BYTES || 2799 dst_addr_width > DMA_SLAVE_BUSWIDTH_8_BYTES ||
2800 !is_power_of_2(src_addr_width) || 2800 !is_power_of_2(src_addr_width) ||
2801 !is_power_of_2(dst_addr_width)) 2801 !is_power_of_2(dst_addr_width))
2802 return -EINVAL; 2802 return -EINVAL;
2803 2803
2804 cfg->src_info.data_width = src_addr_width; 2804 cfg->src_info.data_width = src_addr_width;
2805 cfg->dst_info.data_width = dst_addr_width; 2805 cfg->dst_info.data_width = dst_addr_width;
2806 2806
2807 ret = dma40_config_to_halfchannel(d40c, &cfg->src_info, 2807 ret = dma40_config_to_halfchannel(d40c, &cfg->src_info,
2808 src_maxburst); 2808 src_maxburst);
2809 if (ret) 2809 if (ret)
2810 return ret; 2810 return ret;
2811 2811
2812 ret = dma40_config_to_halfchannel(d40c, &cfg->dst_info, 2812 ret = dma40_config_to_halfchannel(d40c, &cfg->dst_info,
2813 dst_maxburst); 2813 dst_maxburst);
2814 if (ret) 2814 if (ret)
2815 return ret; 2815 return ret;
2816 2816
2817 /* Fill in register values */ 2817 /* Fill in register values */
2818 if (chan_is_logical(d40c)) 2818 if (chan_is_logical(d40c))
2819 d40_log_cfg(cfg, &d40c->log_def.lcsp1, &d40c->log_def.lcsp3); 2819 d40_log_cfg(cfg, &d40c->log_def.lcsp1, &d40c->log_def.lcsp3);
2820 else 2820 else
2821 d40_phy_cfg(cfg, &d40c->src_def_cfg, &d40c->dst_def_cfg); 2821 d40_phy_cfg(cfg, &d40c->src_def_cfg, &d40c->dst_def_cfg);
2822 2822
2823 /* These settings will take precedence later */ 2823 /* These settings will take precedence later */
2824 d40c->runtime_addr = config_addr; 2824 d40c->runtime_addr = config_addr;
2825 d40c->runtime_direction = config->direction; 2825 d40c->runtime_direction = config->direction;
2826 dev_dbg(d40c->base->dev, 2826 dev_dbg(d40c->base->dev,
2827 "configured channel %s for %s, data width %d/%d, " 2827 "configured channel %s for %s, data width %d/%d, "
2828 "maxburst %d/%d elements, LE, no flow control\n", 2828 "maxburst %d/%d elements, LE, no flow control\n",
2829 dma_chan_name(chan), 2829 dma_chan_name(chan),
2830 (config->direction == DMA_DEV_TO_MEM) ? "RX" : "TX", 2830 (config->direction == DMA_DEV_TO_MEM) ? "RX" : "TX",
2831 src_addr_width, dst_addr_width, 2831 src_addr_width, dst_addr_width,
2832 src_maxburst, dst_maxburst); 2832 src_maxburst, dst_maxburst);
2833 2833
2834 return 0; 2834 return 0;
2835 } 2835 }
2836 2836
2837 static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 2837 static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
2838 unsigned long arg) 2838 unsigned long arg)
2839 { 2839 {
2840 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); 2840 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2841 2841
2842 if (d40c->phy_chan == NULL) { 2842 if (d40c->phy_chan == NULL) {
2843 chan_err(d40c, "Channel is not allocated!\n"); 2843 chan_err(d40c, "Channel is not allocated!\n");
2844 return -EINVAL; 2844 return -EINVAL;
2845 } 2845 }
2846 2846
2847 switch (cmd) { 2847 switch (cmd) {
2848 case DMA_TERMINATE_ALL: 2848 case DMA_TERMINATE_ALL:
2849 d40_terminate_all(chan); 2849 d40_terminate_all(chan);
2850 return 0; 2850 return 0;
2851 case DMA_PAUSE: 2851 case DMA_PAUSE:
2852 return d40_pause(d40c); 2852 return d40_pause(d40c);
2853 case DMA_RESUME: 2853 case DMA_RESUME:
2854 return d40_resume(d40c); 2854 return d40_resume(d40c);
2855 case DMA_SLAVE_CONFIG: 2855 case DMA_SLAVE_CONFIG:
2856 return d40_set_runtime_config(chan, 2856 return d40_set_runtime_config(chan,
2857 (struct dma_slave_config *) arg); 2857 (struct dma_slave_config *) arg);
2858 default: 2858 default:
2859 break; 2859 break;
2860 } 2860 }
2861 2861
2862 /* Other commands are unimplemented */ 2862 /* Other commands are unimplemented */
2863 return -ENXIO; 2863 return -ENXIO;
2864 } 2864 }
2865 2865
2866 /* Initialization functions */ 2866 /* Initialization functions */
2867 2867
2868 static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma, 2868 static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma,
2869 struct d40_chan *chans, int offset, 2869 struct d40_chan *chans, int offset,
2870 int num_chans) 2870 int num_chans)
2871 { 2871 {
2872 int i = 0; 2872 int i = 0;
2873 struct d40_chan *d40c; 2873 struct d40_chan *d40c;
2874 2874
2875 INIT_LIST_HEAD(&dma->channels); 2875 INIT_LIST_HEAD(&dma->channels);
2876 2876
2877 for (i = offset; i < offset + num_chans; i++) { 2877 for (i = offset; i < offset + num_chans; i++) {
2878 d40c = &chans[i]; 2878 d40c = &chans[i];
2879 d40c->base = base; 2879 d40c->base = base;
2880 d40c->chan.device = dma; 2880 d40c->chan.device = dma;
2881 2881
2882 spin_lock_init(&d40c->lock); 2882 spin_lock_init(&d40c->lock);
2883 2883
2884 d40c->log_num = D40_PHY_CHAN; 2884 d40c->log_num = D40_PHY_CHAN;
2885 2885
2886 INIT_LIST_HEAD(&d40c->done); 2886 INIT_LIST_HEAD(&d40c->done);
2887 INIT_LIST_HEAD(&d40c->active); 2887 INIT_LIST_HEAD(&d40c->active);
2888 INIT_LIST_HEAD(&d40c->queue); 2888 INIT_LIST_HEAD(&d40c->queue);
2889 INIT_LIST_HEAD(&d40c->pending_queue); 2889 INIT_LIST_HEAD(&d40c->pending_queue);
2890 INIT_LIST_HEAD(&d40c->client); 2890 INIT_LIST_HEAD(&d40c->client);
2891 INIT_LIST_HEAD(&d40c->prepare_queue); 2891 INIT_LIST_HEAD(&d40c->prepare_queue);
2892 2892
2893 tasklet_init(&d40c->tasklet, dma_tasklet, 2893 tasklet_init(&d40c->tasklet, dma_tasklet,
2894 (unsigned long) d40c); 2894 (unsigned long) d40c);
2895 2895
2896 list_add_tail(&d40c->chan.device_node, 2896 list_add_tail(&d40c->chan.device_node,
2897 &dma->channels); 2897 &dma->channels);
2898 } 2898 }
2899 } 2899 }
2900 2900
2901 static void d40_ops_init(struct d40_base *base, struct dma_device *dev) 2901 static void d40_ops_init(struct d40_base *base, struct dma_device *dev)
2902 { 2902 {
2903 if (dma_has_cap(DMA_SLAVE, dev->cap_mask)) 2903 if (dma_has_cap(DMA_SLAVE, dev->cap_mask))
2904 dev->device_prep_slave_sg = d40_prep_slave_sg; 2904 dev->device_prep_slave_sg = d40_prep_slave_sg;
2905 2905
2906 if (dma_has_cap(DMA_MEMCPY, dev->cap_mask)) { 2906 if (dma_has_cap(DMA_MEMCPY, dev->cap_mask)) {
2907 dev->device_prep_dma_memcpy = d40_prep_memcpy; 2907 dev->device_prep_dma_memcpy = d40_prep_memcpy;
2908 2908
2909 /* 2909 /*
2910 * This controller can only access address at even 2910 * This controller can only access address at even
2911 * 32bit boundaries, i.e. 2^2 2911 * 32bit boundaries, i.e. 2^2
2912 */ 2912 */
2913 dev->copy_align = 2; 2913 dev->copy_align = 2;
2914 } 2914 }
2915 2915
2916 if (dma_has_cap(DMA_SG, dev->cap_mask)) 2916 if (dma_has_cap(DMA_SG, dev->cap_mask))
2917 dev->device_prep_dma_sg = d40_prep_memcpy_sg; 2917 dev->device_prep_dma_sg = d40_prep_memcpy_sg;
2918 2918
2919 if (dma_has_cap(DMA_CYCLIC, dev->cap_mask)) 2919 if (dma_has_cap(DMA_CYCLIC, dev->cap_mask))
2920 dev->device_prep_dma_cyclic = dma40_prep_dma_cyclic; 2920 dev->device_prep_dma_cyclic = dma40_prep_dma_cyclic;
2921 2921
2922 dev->device_alloc_chan_resources = d40_alloc_chan_resources; 2922 dev->device_alloc_chan_resources = d40_alloc_chan_resources;
2923 dev->device_free_chan_resources = d40_free_chan_resources; 2923 dev->device_free_chan_resources = d40_free_chan_resources;
2924 dev->device_issue_pending = d40_issue_pending; 2924 dev->device_issue_pending = d40_issue_pending;
2925 dev->device_tx_status = d40_tx_status; 2925 dev->device_tx_status = d40_tx_status;
2926 dev->device_control = d40_control; 2926 dev->device_control = d40_control;
2927 dev->dev = base->dev; 2927 dev->dev = base->dev;
2928 } 2928 }
2929 2929
2930 static int __init d40_dmaengine_init(struct d40_base *base, 2930 static int __init d40_dmaengine_init(struct d40_base *base,
2931 int num_reserved_chans) 2931 int num_reserved_chans)
2932 { 2932 {
2933 int err ; 2933 int err ;
2934 2934
2935 d40_chan_init(base, &base->dma_slave, base->log_chans, 2935 d40_chan_init(base, &base->dma_slave, base->log_chans,
2936 0, base->num_log_chans); 2936 0, base->num_log_chans);
2937 2937
2938 dma_cap_zero(base->dma_slave.cap_mask); 2938 dma_cap_zero(base->dma_slave.cap_mask);
2939 dma_cap_set(DMA_SLAVE, base->dma_slave.cap_mask); 2939 dma_cap_set(DMA_SLAVE, base->dma_slave.cap_mask);
2940 dma_cap_set(DMA_CYCLIC, base->dma_slave.cap_mask); 2940 dma_cap_set(DMA_CYCLIC, base->dma_slave.cap_mask);
2941 2941
2942 d40_ops_init(base, &base->dma_slave); 2942 d40_ops_init(base, &base->dma_slave);
2943 2943
2944 err = dma_async_device_register(&base->dma_slave); 2944 err = dma_async_device_register(&base->dma_slave);
2945 2945
2946 if (err) { 2946 if (err) {
2947 d40_err(base->dev, "Failed to register slave channels\n"); 2947 d40_err(base->dev, "Failed to register slave channels\n");
2948 goto failure1; 2948 goto failure1;
2949 } 2949 }
2950 2950
2951 d40_chan_init(base, &base->dma_memcpy, base->log_chans, 2951 d40_chan_init(base, &base->dma_memcpy, base->log_chans,
2952 base->num_log_chans, base->num_memcpy_chans); 2952 base->num_log_chans, base->num_memcpy_chans);
2953 2953
2954 dma_cap_zero(base->dma_memcpy.cap_mask); 2954 dma_cap_zero(base->dma_memcpy.cap_mask);
2955 dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask); 2955 dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask);
2956 dma_cap_set(DMA_SG, base->dma_memcpy.cap_mask); 2956 dma_cap_set(DMA_SG, base->dma_memcpy.cap_mask);
2957 2957
2958 d40_ops_init(base, &base->dma_memcpy); 2958 d40_ops_init(base, &base->dma_memcpy);
2959 2959
2960 err = dma_async_device_register(&base->dma_memcpy); 2960 err = dma_async_device_register(&base->dma_memcpy);
2961 2961
2962 if (err) { 2962 if (err) {
2963 d40_err(base->dev, 2963 d40_err(base->dev,
2964 "Failed to regsiter memcpy only channels\n"); 2964 "Failed to regsiter memcpy only channels\n");
2965 goto failure2; 2965 goto failure2;
2966 } 2966 }
2967 2967
2968 d40_chan_init(base, &base->dma_both, base->phy_chans, 2968 d40_chan_init(base, &base->dma_both, base->phy_chans,
2969 0, num_reserved_chans); 2969 0, num_reserved_chans);
2970 2970
2971 dma_cap_zero(base->dma_both.cap_mask); 2971 dma_cap_zero(base->dma_both.cap_mask);
2972 dma_cap_set(DMA_SLAVE, base->dma_both.cap_mask); 2972 dma_cap_set(DMA_SLAVE, base->dma_both.cap_mask);
2973 dma_cap_set(DMA_MEMCPY, base->dma_both.cap_mask); 2973 dma_cap_set(DMA_MEMCPY, base->dma_both.cap_mask);
2974 dma_cap_set(DMA_SG, base->dma_both.cap_mask); 2974 dma_cap_set(DMA_SG, base->dma_both.cap_mask);
2975 dma_cap_set(DMA_CYCLIC, base->dma_slave.cap_mask); 2975 dma_cap_set(DMA_CYCLIC, base->dma_slave.cap_mask);
2976 2976
2977 d40_ops_init(base, &base->dma_both); 2977 d40_ops_init(base, &base->dma_both);
2978 err = dma_async_device_register(&base->dma_both); 2978 err = dma_async_device_register(&base->dma_both);
2979 2979
2980 if (err) { 2980 if (err) {
2981 d40_err(base->dev, 2981 d40_err(base->dev,
2982 "Failed to register logical and physical capable channels\n"); 2982 "Failed to register logical and physical capable channels\n");
2983 goto failure3; 2983 goto failure3;
2984 } 2984 }
2985 return 0; 2985 return 0;
2986 failure3: 2986 failure3:
2987 dma_async_device_unregister(&base->dma_memcpy); 2987 dma_async_device_unregister(&base->dma_memcpy);
2988 failure2: 2988 failure2:
2989 dma_async_device_unregister(&base->dma_slave); 2989 dma_async_device_unregister(&base->dma_slave);
2990 failure1: 2990 failure1:
2991 return err; 2991 return err;
2992 } 2992 }
2993 2993
2994 /* Suspend resume functionality */ 2994 /* Suspend resume functionality */
2995 #ifdef CONFIG_PM 2995 #ifdef CONFIG_PM
2996 static int dma40_pm_suspend(struct device *dev) 2996 static int dma40_pm_suspend(struct device *dev)
2997 { 2997 {
2998 struct platform_device *pdev = to_platform_device(dev); 2998 struct platform_device *pdev = to_platform_device(dev);
2999 struct d40_base *base = platform_get_drvdata(pdev); 2999 struct d40_base *base = platform_get_drvdata(pdev);
3000 int ret = 0; 3000 int ret = 0;
3001 3001
3002 if (base->lcpa_regulator) 3002 if (base->lcpa_regulator)
3003 ret = regulator_disable(base->lcpa_regulator); 3003 ret = regulator_disable(base->lcpa_regulator);
3004 return ret; 3004 return ret;
3005 } 3005 }
3006 3006
3007 static int dma40_runtime_suspend(struct device *dev) 3007 static int dma40_runtime_suspend(struct device *dev)
3008 { 3008 {
3009 struct platform_device *pdev = to_platform_device(dev); 3009 struct platform_device *pdev = to_platform_device(dev);
3010 struct d40_base *base = platform_get_drvdata(pdev); 3010 struct d40_base *base = platform_get_drvdata(pdev);
3011 3011
3012 d40_save_restore_registers(base, true); 3012 d40_save_restore_registers(base, true);
3013 3013
3014 /* Don't disable/enable clocks for v1 due to HW bugs */ 3014 /* Don't disable/enable clocks for v1 due to HW bugs */
3015 if (base->rev != 1) 3015 if (base->rev != 1)
3016 writel_relaxed(base->gcc_pwr_off_mask, 3016 writel_relaxed(base->gcc_pwr_off_mask,
3017 base->virtbase + D40_DREG_GCC); 3017 base->virtbase + D40_DREG_GCC);
3018 3018
3019 return 0; 3019 return 0;
3020 } 3020 }
3021 3021
3022 static int dma40_runtime_resume(struct device *dev) 3022 static int dma40_runtime_resume(struct device *dev)
3023 { 3023 {
3024 struct platform_device *pdev = to_platform_device(dev); 3024 struct platform_device *pdev = to_platform_device(dev);
3025 struct d40_base *base = platform_get_drvdata(pdev); 3025 struct d40_base *base = platform_get_drvdata(pdev);
3026 3026
3027 if (base->initialized) 3027 if (base->initialized)
3028 d40_save_restore_registers(base, false); 3028 d40_save_restore_registers(base, false);
3029 3029
3030 writel_relaxed(D40_DREG_GCC_ENABLE_ALL, 3030 writel_relaxed(D40_DREG_GCC_ENABLE_ALL,
3031 base->virtbase + D40_DREG_GCC); 3031 base->virtbase + D40_DREG_GCC);
3032 return 0; 3032 return 0;
3033 } 3033 }
3034 3034
3035 static int dma40_resume(struct device *dev) 3035 static int dma40_resume(struct device *dev)
3036 { 3036 {
3037 struct platform_device *pdev = to_platform_device(dev); 3037 struct platform_device *pdev = to_platform_device(dev);
3038 struct d40_base *base = platform_get_drvdata(pdev); 3038 struct d40_base *base = platform_get_drvdata(pdev);
3039 int ret = 0; 3039 int ret = 0;
3040 3040
3041 if (base->lcpa_regulator) 3041 if (base->lcpa_regulator)
3042 ret = regulator_enable(base->lcpa_regulator); 3042 ret = regulator_enable(base->lcpa_regulator);
3043 3043
3044 return ret; 3044 return ret;
3045 } 3045 }
3046 3046
3047 static const struct dev_pm_ops dma40_pm_ops = { 3047 static const struct dev_pm_ops dma40_pm_ops = {
3048 .suspend = dma40_pm_suspend, 3048 .suspend = dma40_pm_suspend,
3049 .runtime_suspend = dma40_runtime_suspend, 3049 .runtime_suspend = dma40_runtime_suspend,
3050 .runtime_resume = dma40_runtime_resume, 3050 .runtime_resume = dma40_runtime_resume,
3051 .resume = dma40_resume, 3051 .resume = dma40_resume,
3052 }; 3052 };
3053 #define DMA40_PM_OPS (&dma40_pm_ops) 3053 #define DMA40_PM_OPS (&dma40_pm_ops)
3054 #else 3054 #else
3055 #define DMA40_PM_OPS NULL 3055 #define DMA40_PM_OPS NULL
3056 #endif 3056 #endif
3057 3057
3058 /* Initialization functions. */ 3058 /* Initialization functions. */
3059 3059
3060 static int __init d40_phy_res_init(struct d40_base *base) 3060 static int __init d40_phy_res_init(struct d40_base *base)
3061 { 3061 {
3062 int i; 3062 int i;
3063 int num_phy_chans_avail = 0; 3063 int num_phy_chans_avail = 0;
3064 u32 val[2]; 3064 u32 val[2];
3065 int odd_even_bit = -2; 3065 int odd_even_bit = -2;
3066 int gcc = D40_DREG_GCC_ENA; 3066 int gcc = D40_DREG_GCC_ENA;
3067 3067
3068 val[0] = readl(base->virtbase + D40_DREG_PRSME); 3068 val[0] = readl(base->virtbase + D40_DREG_PRSME);
3069 val[1] = readl(base->virtbase + D40_DREG_PRSMO); 3069 val[1] = readl(base->virtbase + D40_DREG_PRSMO);
3070 3070
3071 for (i = 0; i < base->num_phy_chans; i++) { 3071 for (i = 0; i < base->num_phy_chans; i++) {
3072 base->phy_res[i].num = i; 3072 base->phy_res[i].num = i;
3073 odd_even_bit += 2 * ((i % 2) == 0); 3073 odd_even_bit += 2 * ((i % 2) == 0);
3074 if (((val[i % 2] >> odd_even_bit) & 3) == 1) { 3074 if (((val[i % 2] >> odd_even_bit) & 3) == 1) {
3075 /* Mark security only channels as occupied */ 3075 /* Mark security only channels as occupied */
3076 base->phy_res[i].allocated_src = D40_ALLOC_PHY; 3076 base->phy_res[i].allocated_src = D40_ALLOC_PHY;
3077 base->phy_res[i].allocated_dst = D40_ALLOC_PHY; 3077 base->phy_res[i].allocated_dst = D40_ALLOC_PHY;
3078 base->phy_res[i].reserved = true; 3078 base->phy_res[i].reserved = true;
3079 gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(i), 3079 gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(i),
3080 D40_DREG_GCC_SRC); 3080 D40_DREG_GCC_SRC);
3081 gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(i), 3081 gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(i),
3082 D40_DREG_GCC_DST); 3082 D40_DREG_GCC_DST);
3083 3083
3084 3084
3085 } else { 3085 } else {
3086 base->phy_res[i].allocated_src = D40_ALLOC_FREE; 3086 base->phy_res[i].allocated_src = D40_ALLOC_FREE;
3087 base->phy_res[i].allocated_dst = D40_ALLOC_FREE; 3087 base->phy_res[i].allocated_dst = D40_ALLOC_FREE;
3088 base->phy_res[i].reserved = false; 3088 base->phy_res[i].reserved = false;
3089 num_phy_chans_avail++; 3089 num_phy_chans_avail++;
3090 } 3090 }
3091 spin_lock_init(&base->phy_res[i].lock); 3091 spin_lock_init(&base->phy_res[i].lock);
3092 } 3092 }
3093 3093
3094 /* Mark disabled channels as occupied */ 3094 /* Mark disabled channels as occupied */
3095 for (i = 0; base->plat_data->disabled_channels[i] != -1; i++) { 3095 for (i = 0; base->plat_data->disabled_channels[i] != -1; i++) {
3096 int chan = base->plat_data->disabled_channels[i]; 3096 int chan = base->plat_data->disabled_channels[i];
3097 3097
3098 base->phy_res[chan].allocated_src = D40_ALLOC_PHY; 3098 base->phy_res[chan].allocated_src = D40_ALLOC_PHY;
3099 base->phy_res[chan].allocated_dst = D40_ALLOC_PHY; 3099 base->phy_res[chan].allocated_dst = D40_ALLOC_PHY;
3100 base->phy_res[chan].reserved = true; 3100 base->phy_res[chan].reserved = true;
3101 gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(chan), 3101 gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(chan),
3102 D40_DREG_GCC_SRC); 3102 D40_DREG_GCC_SRC);
3103 gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(chan), 3103 gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(chan),
3104 D40_DREG_GCC_DST); 3104 D40_DREG_GCC_DST);
3105 num_phy_chans_avail--; 3105 num_phy_chans_avail--;
3106 } 3106 }
3107 3107
3108 /* Mark soft_lli channels */ 3108 /* Mark soft_lli channels */
3109 for (i = 0; i < base->plat_data->num_of_soft_lli_chans; i++) { 3109 for (i = 0; i < base->plat_data->num_of_soft_lli_chans; i++) {
3110 int chan = base->plat_data->soft_lli_chans[i]; 3110 int chan = base->plat_data->soft_lli_chans[i];
3111 3111
3112 base->phy_res[chan].use_soft_lli = true; 3112 base->phy_res[chan].use_soft_lli = true;
3113 } 3113 }
3114 3114
3115 dev_info(base->dev, "%d of %d physical DMA channels available\n", 3115 dev_info(base->dev, "%d of %d physical DMA channels available\n",
3116 num_phy_chans_avail, base->num_phy_chans); 3116 num_phy_chans_avail, base->num_phy_chans);
3117 3117
3118 /* Verify settings extended vs standard */ 3118 /* Verify settings extended vs standard */
3119 val[0] = readl(base->virtbase + D40_DREG_PRTYP); 3119 val[0] = readl(base->virtbase + D40_DREG_PRTYP);
3120 3120
3121 for (i = 0; i < base->num_phy_chans; i++) { 3121 for (i = 0; i < base->num_phy_chans; i++) {
3122 3122
3123 if (base->phy_res[i].allocated_src == D40_ALLOC_FREE && 3123 if (base->phy_res[i].allocated_src == D40_ALLOC_FREE &&
3124 (val[0] & 0x3) != 1) 3124 (val[0] & 0x3) != 1)
3125 dev_info(base->dev, 3125 dev_info(base->dev,
3126 "[%s] INFO: channel %d is misconfigured (%d)\n", 3126 "[%s] INFO: channel %d is misconfigured (%d)\n",
3127 __func__, i, val[0] & 0x3); 3127 __func__, i, val[0] & 0x3);
3128 3128
3129 val[0] = val[0] >> 2; 3129 val[0] = val[0] >> 2;
3130 } 3130 }
3131 3131
3132 /* 3132 /*
3133 * To keep things simple, Enable all clocks initially. 3133 * To keep things simple, Enable all clocks initially.
3134 * The clocks will get managed later post channel allocation. 3134 * The clocks will get managed later post channel allocation.
3135 * The clocks for the event lines on which reserved channels exists 3135 * The clocks for the event lines on which reserved channels exists
3136 * are not managed here. 3136 * are not managed here.
3137 */ 3137 */
3138 writel(D40_DREG_GCC_ENABLE_ALL, base->virtbase + D40_DREG_GCC); 3138 writel(D40_DREG_GCC_ENABLE_ALL, base->virtbase + D40_DREG_GCC);
3139 base->gcc_pwr_off_mask = gcc; 3139 base->gcc_pwr_off_mask = gcc;
3140 3140
3141 return num_phy_chans_avail; 3141 return num_phy_chans_avail;
3142 } 3142 }
3143 3143
3144 static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) 3144 static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
3145 { 3145 {
3146 struct stedma40_platform_data *plat_data = dev_get_platdata(&pdev->dev); 3146 struct stedma40_platform_data *plat_data = dev_get_platdata(&pdev->dev);
3147 struct clk *clk = NULL; 3147 struct clk *clk = NULL;
3148 void __iomem *virtbase = NULL; 3148 void __iomem *virtbase = NULL;
3149 struct resource *res = NULL; 3149 struct resource *res = NULL;
3150 struct d40_base *base = NULL; 3150 struct d40_base *base = NULL;
3151 int num_log_chans = 0; 3151 int num_log_chans = 0;
3152 int num_phy_chans; 3152 int num_phy_chans;
3153 int num_memcpy_chans; 3153 int num_memcpy_chans;
3154 int clk_ret = -EINVAL; 3154 int clk_ret = -EINVAL;
3155 int i; 3155 int i;
3156 u32 pid; 3156 u32 pid;
3157 u32 cid; 3157 u32 cid;
3158 u8 rev; 3158 u8 rev;
3159 3159
3160 clk = clk_get(&pdev->dev, NULL); 3160 clk = clk_get(&pdev->dev, NULL);
3161 if (IS_ERR(clk)) { 3161 if (IS_ERR(clk)) {
3162 d40_err(&pdev->dev, "No matching clock found\n"); 3162 d40_err(&pdev->dev, "No matching clock found\n");
3163 goto failure; 3163 goto failure;
3164 } 3164 }
3165 3165
3166 clk_ret = clk_prepare_enable(clk); 3166 clk_ret = clk_prepare_enable(clk);
3167 if (clk_ret) { 3167 if (clk_ret) {
3168 d40_err(&pdev->dev, "Failed to prepare/enable clock\n"); 3168 d40_err(&pdev->dev, "Failed to prepare/enable clock\n");
3169 goto failure; 3169 goto failure;
3170 } 3170 }
3171 3171
3172 /* Get IO for DMAC base address */ 3172 /* Get IO for DMAC base address */
3173 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base"); 3173 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base");
3174 if (!res) 3174 if (!res)
3175 goto failure; 3175 goto failure;
3176 3176
3177 if (request_mem_region(res->start, resource_size(res), 3177 if (request_mem_region(res->start, resource_size(res),
3178 D40_NAME " I/O base") == NULL) 3178 D40_NAME " I/O base") == NULL)
3179 goto failure; 3179 goto failure;
3180 3180
3181 virtbase = ioremap(res->start, resource_size(res)); 3181 virtbase = ioremap(res->start, resource_size(res));
3182 if (!virtbase) 3182 if (!virtbase)
3183 goto failure; 3183 goto failure;
3184 3184
3185 /* This is just a regular AMBA PrimeCell ID actually */ 3185 /* This is just a regular AMBA PrimeCell ID actually */
3186 for (pid = 0, i = 0; i < 4; i++) 3186 for (pid = 0, i = 0; i < 4; i++)
3187 pid |= (readl(virtbase + resource_size(res) - 0x20 + 4 * i) 3187 pid |= (readl(virtbase + resource_size(res) - 0x20 + 4 * i)
3188 & 255) << (i * 8); 3188 & 255) << (i * 8);
3189 for (cid = 0, i = 0; i < 4; i++) 3189 for (cid = 0, i = 0; i < 4; i++)
3190 cid |= (readl(virtbase + resource_size(res) - 0x10 + 4 * i) 3190 cid |= (readl(virtbase + resource_size(res) - 0x10 + 4 * i)
3191 & 255) << (i * 8); 3191 & 255) << (i * 8);
3192 3192
3193 if (cid != AMBA_CID) { 3193 if (cid != AMBA_CID) {
3194 d40_err(&pdev->dev, "Unknown hardware! No PrimeCell ID\n"); 3194 d40_err(&pdev->dev, "Unknown hardware! No PrimeCell ID\n");
3195 goto failure; 3195 goto failure;
3196 } 3196 }
3197 if (AMBA_MANF_BITS(pid) != AMBA_VENDOR_ST) { 3197 if (AMBA_MANF_BITS(pid) != AMBA_VENDOR_ST) {
3198 d40_err(&pdev->dev, "Unknown designer! Got %x wanted %x\n", 3198 d40_err(&pdev->dev, "Unknown designer! Got %x wanted %x\n",
3199 AMBA_MANF_BITS(pid), 3199 AMBA_MANF_BITS(pid),
3200 AMBA_VENDOR_ST); 3200 AMBA_VENDOR_ST);
3201 goto failure; 3201 goto failure;
3202 } 3202 }
3203 /* 3203 /*
3204 * HW revision: 3204 * HW revision:
3205 * DB8500ed has revision 0 3205 * DB8500ed has revision 0
3206 * ? has revision 1 3206 * ? has revision 1
3207 * DB8500v1 has revision 2 3207 * DB8500v1 has revision 2
3208 * DB8500v2 has revision 3 3208 * DB8500v2 has revision 3
3209 * AP9540v1 has revision 4 3209 * AP9540v1 has revision 4
3210 * DB8540v1 has revision 4 3210 * DB8540v1 has revision 4
3211 */ 3211 */
3212 rev = AMBA_REV_BITS(pid); 3212 rev = AMBA_REV_BITS(pid);
3213 if (rev < 2) { 3213 if (rev < 2) {
3214 d40_err(&pdev->dev, "hardware revision: %d is not supported", rev); 3214 d40_err(&pdev->dev, "hardware revision: %d is not supported", rev);
3215 goto failure; 3215 goto failure;
3216 } 3216 }
3217 3217
3218 /* The number of physical channels on this HW */ 3218 /* The number of physical channels on this HW */
3219 if (plat_data->num_of_phy_chans) 3219 if (plat_data->num_of_phy_chans)
3220 num_phy_chans = plat_data->num_of_phy_chans; 3220 num_phy_chans = plat_data->num_of_phy_chans;
3221 else 3221 else
3222 num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4; 3222 num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4;
3223 3223
3224 /* The number of channels used for memcpy */ 3224 /* The number of channels used for memcpy */
3225 if (plat_data->num_of_memcpy_chans) 3225 if (plat_data->num_of_memcpy_chans)
3226 num_memcpy_chans = plat_data->num_of_memcpy_chans; 3226 num_memcpy_chans = plat_data->num_of_memcpy_chans;
3227 else 3227 else
3228 num_memcpy_chans = ARRAY_SIZE(dma40_memcpy_channels); 3228 num_memcpy_chans = ARRAY_SIZE(dma40_memcpy_channels);
3229 3229
3230 num_log_chans = num_phy_chans * D40_MAX_LOG_CHAN_PER_PHY; 3230 num_log_chans = num_phy_chans * D40_MAX_LOG_CHAN_PER_PHY;
3231 3231
3232 dev_info(&pdev->dev, 3232 dev_info(&pdev->dev,
3233 "hardware rev: %d @ %pa with %d physical and %d logical channels\n", 3233 "hardware rev: %d @ %pa with %d physical and %d logical channels\n",
3234 rev, &res->start, num_phy_chans, num_log_chans); 3234 rev, &res->start, num_phy_chans, num_log_chans);
3235 3235
3236 base = kzalloc(ALIGN(sizeof(struct d40_base), 4) + 3236 base = kzalloc(ALIGN(sizeof(struct d40_base), 4) +
3237 (num_phy_chans + num_log_chans + num_memcpy_chans) * 3237 (num_phy_chans + num_log_chans + num_memcpy_chans) *
3238 sizeof(struct d40_chan), GFP_KERNEL); 3238 sizeof(struct d40_chan), GFP_KERNEL);
3239 3239
3240 if (base == NULL) { 3240 if (base == NULL) {
3241 d40_err(&pdev->dev, "Out of memory\n"); 3241 d40_err(&pdev->dev, "Out of memory\n");
3242 goto failure; 3242 goto failure;
3243 } 3243 }
3244 3244
3245 base->rev = rev; 3245 base->rev = rev;
3246 base->clk = clk; 3246 base->clk = clk;
3247 base->num_memcpy_chans = num_memcpy_chans; 3247 base->num_memcpy_chans = num_memcpy_chans;
3248 base->num_phy_chans = num_phy_chans; 3248 base->num_phy_chans = num_phy_chans;
3249 base->num_log_chans = num_log_chans; 3249 base->num_log_chans = num_log_chans;
3250 base->phy_start = res->start; 3250 base->phy_start = res->start;
3251 base->phy_size = resource_size(res); 3251 base->phy_size = resource_size(res);
3252 base->virtbase = virtbase; 3252 base->virtbase = virtbase;
3253 base->plat_data = plat_data; 3253 base->plat_data = plat_data;
3254 base->dev = &pdev->dev; 3254 base->dev = &pdev->dev;
3255 base->phy_chans = ((void *)base) + ALIGN(sizeof(struct d40_base), 4); 3255 base->phy_chans = ((void *)base) + ALIGN(sizeof(struct d40_base), 4);
3256 base->log_chans = &base->phy_chans[num_phy_chans]; 3256 base->log_chans = &base->phy_chans[num_phy_chans];
3257 3257
3258 if (base->plat_data->num_of_phy_chans == 14) { 3258 if (base->plat_data->num_of_phy_chans == 14) {
3259 base->gen_dmac.backup = d40_backup_regs_v4b; 3259 base->gen_dmac.backup = d40_backup_regs_v4b;
3260 base->gen_dmac.backup_size = BACKUP_REGS_SZ_V4B; 3260 base->gen_dmac.backup_size = BACKUP_REGS_SZ_V4B;
3261 base->gen_dmac.interrupt_en = D40_DREG_CPCMIS; 3261 base->gen_dmac.interrupt_en = D40_DREG_CPCMIS;
3262 base->gen_dmac.interrupt_clear = D40_DREG_CPCICR; 3262 base->gen_dmac.interrupt_clear = D40_DREG_CPCICR;
3263 base->gen_dmac.realtime_en = D40_DREG_CRSEG1; 3263 base->gen_dmac.realtime_en = D40_DREG_CRSEG1;
3264 base->gen_dmac.realtime_clear = D40_DREG_CRCEG1; 3264 base->gen_dmac.realtime_clear = D40_DREG_CRCEG1;
3265 base->gen_dmac.high_prio_en = D40_DREG_CPSEG1; 3265 base->gen_dmac.high_prio_en = D40_DREG_CPSEG1;
3266 base->gen_dmac.high_prio_clear = D40_DREG_CPCEG1; 3266 base->gen_dmac.high_prio_clear = D40_DREG_CPCEG1;
3267 base->gen_dmac.il = il_v4b; 3267 base->gen_dmac.il = il_v4b;
3268 base->gen_dmac.il_size = ARRAY_SIZE(il_v4b); 3268 base->gen_dmac.il_size = ARRAY_SIZE(il_v4b);
3269 base->gen_dmac.init_reg = dma_init_reg_v4b; 3269 base->gen_dmac.init_reg = dma_init_reg_v4b;
3270 base->gen_dmac.init_reg_size = ARRAY_SIZE(dma_init_reg_v4b); 3270 base->gen_dmac.init_reg_size = ARRAY_SIZE(dma_init_reg_v4b);
3271 } else { 3271 } else {
3272 if (base->rev >= 3) { 3272 if (base->rev >= 3) {
3273 base->gen_dmac.backup = d40_backup_regs_v4a; 3273 base->gen_dmac.backup = d40_backup_regs_v4a;
3274 base->gen_dmac.backup_size = BACKUP_REGS_SZ_V4A; 3274 base->gen_dmac.backup_size = BACKUP_REGS_SZ_V4A;
3275 } 3275 }
3276 base->gen_dmac.interrupt_en = D40_DREG_PCMIS; 3276 base->gen_dmac.interrupt_en = D40_DREG_PCMIS;
3277 base->gen_dmac.interrupt_clear = D40_DREG_PCICR; 3277 base->gen_dmac.interrupt_clear = D40_DREG_PCICR;
3278 base->gen_dmac.realtime_en = D40_DREG_RSEG1; 3278 base->gen_dmac.realtime_en = D40_DREG_RSEG1;
3279 base->gen_dmac.realtime_clear = D40_DREG_RCEG1; 3279 base->gen_dmac.realtime_clear = D40_DREG_RCEG1;
3280 base->gen_dmac.high_prio_en = D40_DREG_PSEG1; 3280 base->gen_dmac.high_prio_en = D40_DREG_PSEG1;
3281 base->gen_dmac.high_prio_clear = D40_DREG_PCEG1; 3281 base->gen_dmac.high_prio_clear = D40_DREG_PCEG1;
3282 base->gen_dmac.il = il_v4a; 3282 base->gen_dmac.il = il_v4a;
3283 base->gen_dmac.il_size = ARRAY_SIZE(il_v4a); 3283 base->gen_dmac.il_size = ARRAY_SIZE(il_v4a);
3284 base->gen_dmac.init_reg = dma_init_reg_v4a; 3284 base->gen_dmac.init_reg = dma_init_reg_v4a;
3285 base->gen_dmac.init_reg_size = ARRAY_SIZE(dma_init_reg_v4a); 3285 base->gen_dmac.init_reg_size = ARRAY_SIZE(dma_init_reg_v4a);
3286 } 3286 }
3287 3287
3288 base->phy_res = kzalloc(num_phy_chans * sizeof(struct d40_phy_res), 3288 base->phy_res = kzalloc(num_phy_chans * sizeof(struct d40_phy_res),
3289 GFP_KERNEL); 3289 GFP_KERNEL);
3290 if (!base->phy_res) 3290 if (!base->phy_res)
3291 goto failure; 3291 goto failure;
3292 3292
3293 base->lookup_phy_chans = kzalloc(num_phy_chans * 3293 base->lookup_phy_chans = kzalloc(num_phy_chans *
3294 sizeof(struct d40_chan *), 3294 sizeof(struct d40_chan *),
3295 GFP_KERNEL); 3295 GFP_KERNEL);
3296 if (!base->lookup_phy_chans) 3296 if (!base->lookup_phy_chans)
3297 goto failure; 3297 goto failure;
3298 3298
3299 base->lookup_log_chans = kzalloc(num_log_chans * 3299 base->lookup_log_chans = kzalloc(num_log_chans *
3300 sizeof(struct d40_chan *), 3300 sizeof(struct d40_chan *),
3301 GFP_KERNEL); 3301 GFP_KERNEL);
3302 if (!base->lookup_log_chans) 3302 if (!base->lookup_log_chans)
3303 goto failure; 3303 goto failure;
3304 3304
3305 base->reg_val_backup_chan = kmalloc(base->num_phy_chans * 3305 base->reg_val_backup_chan = kmalloc(base->num_phy_chans *
3306 sizeof(d40_backup_regs_chan), 3306 sizeof(d40_backup_regs_chan),
3307 GFP_KERNEL); 3307 GFP_KERNEL);
3308 if (!base->reg_val_backup_chan) 3308 if (!base->reg_val_backup_chan)
3309 goto failure; 3309 goto failure;
3310 3310
3311 base->lcla_pool.alloc_map = 3311 base->lcla_pool.alloc_map =
3312 kzalloc(num_phy_chans * sizeof(struct d40_desc *) 3312 kzalloc(num_phy_chans * sizeof(struct d40_desc *)
3313 * D40_LCLA_LINK_PER_EVENT_GRP, GFP_KERNEL); 3313 * D40_LCLA_LINK_PER_EVENT_GRP, GFP_KERNEL);
3314 if (!base->lcla_pool.alloc_map) 3314 if (!base->lcla_pool.alloc_map)
3315 goto failure; 3315 goto failure;
3316 3316
3317 base->desc_slab = kmem_cache_create(D40_NAME, sizeof(struct d40_desc), 3317 base->desc_slab = kmem_cache_create(D40_NAME, sizeof(struct d40_desc),
3318 0, SLAB_HWCACHE_ALIGN, 3318 0, SLAB_HWCACHE_ALIGN,
3319 NULL); 3319 NULL);
3320 if (base->desc_slab == NULL) 3320 if (base->desc_slab == NULL)
3321 goto failure; 3321 goto failure;
3322 3322
3323 return base; 3323 return base;
3324 3324
3325 failure: 3325 failure:
3326 if (!clk_ret) 3326 if (!clk_ret)
3327 clk_disable_unprepare(clk); 3327 clk_disable_unprepare(clk);
3328 if (!IS_ERR(clk)) 3328 if (!IS_ERR(clk))
3329 clk_put(clk); 3329 clk_put(clk);
3330 if (virtbase) 3330 if (virtbase)
3331 iounmap(virtbase); 3331 iounmap(virtbase);
3332 if (res) 3332 if (res)
3333 release_mem_region(res->start, 3333 release_mem_region(res->start,
3334 resource_size(res)); 3334 resource_size(res));
3335 if (virtbase) 3335 if (virtbase)
3336 iounmap(virtbase); 3336 iounmap(virtbase);
3337 3337
3338 if (base) { 3338 if (base) {
3339 kfree(base->lcla_pool.alloc_map); 3339 kfree(base->lcla_pool.alloc_map);
3340 kfree(base->reg_val_backup_chan); 3340 kfree(base->reg_val_backup_chan);
3341 kfree(base->lookup_log_chans); 3341 kfree(base->lookup_log_chans);
3342 kfree(base->lookup_phy_chans); 3342 kfree(base->lookup_phy_chans);
3343 kfree(base->phy_res); 3343 kfree(base->phy_res);
3344 kfree(base); 3344 kfree(base);
3345 } 3345 }
3346 3346
3347 return NULL; 3347 return NULL;
3348 } 3348 }
3349 3349
3350 static void __init d40_hw_init(struct d40_base *base) 3350 static void __init d40_hw_init(struct d40_base *base)
3351 { 3351 {
3352 3352
3353 int i; 3353 int i;
3354 u32 prmseo[2] = {0, 0}; 3354 u32 prmseo[2] = {0, 0};
3355 u32 activeo[2] = {0xFFFFFFFF, 0xFFFFFFFF}; 3355 u32 activeo[2] = {0xFFFFFFFF, 0xFFFFFFFF};
3356 u32 pcmis = 0; 3356 u32 pcmis = 0;
3357 u32 pcicr = 0; 3357 u32 pcicr = 0;
3358 struct d40_reg_val *dma_init_reg = base->gen_dmac.init_reg; 3358 struct d40_reg_val *dma_init_reg = base->gen_dmac.init_reg;
3359 u32 reg_size = base->gen_dmac.init_reg_size; 3359 u32 reg_size = base->gen_dmac.init_reg_size;
3360 3360
3361 for (i = 0; i < reg_size; i++) 3361 for (i = 0; i < reg_size; i++)
3362 writel(dma_init_reg[i].val, 3362 writel(dma_init_reg[i].val,
3363 base->virtbase + dma_init_reg[i].reg); 3363 base->virtbase + dma_init_reg[i].reg);
3364 3364
3365 /* Configure all our dma channels to default settings */ 3365 /* Configure all our dma channels to default settings */
3366 for (i = 0; i < base->num_phy_chans; i++) { 3366 for (i = 0; i < base->num_phy_chans; i++) {
3367 3367
3368 activeo[i % 2] = activeo[i % 2] << 2; 3368 activeo[i % 2] = activeo[i % 2] << 2;
3369 3369
3370 if (base->phy_res[base->num_phy_chans - i - 1].allocated_src 3370 if (base->phy_res[base->num_phy_chans - i - 1].allocated_src
3371 == D40_ALLOC_PHY) { 3371 == D40_ALLOC_PHY) {
3372 activeo[i % 2] |= 3; 3372 activeo[i % 2] |= 3;
3373 continue; 3373 continue;
3374 } 3374 }
3375 3375
3376 /* Enable interrupt # */ 3376 /* Enable interrupt # */
3377 pcmis = (pcmis << 1) | 1; 3377 pcmis = (pcmis << 1) | 1;
3378 3378
3379 /* Clear interrupt # */ 3379 /* Clear interrupt # */
3380 pcicr = (pcicr << 1) | 1; 3380 pcicr = (pcicr << 1) | 1;
3381 3381
3382 /* Set channel to physical mode */ 3382 /* Set channel to physical mode */
3383 prmseo[i % 2] = prmseo[i % 2] << 2; 3383 prmseo[i % 2] = prmseo[i % 2] << 2;
3384 prmseo[i % 2] |= 1; 3384 prmseo[i % 2] |= 1;
3385 3385
3386 } 3386 }
3387 3387
3388 writel(prmseo[1], base->virtbase + D40_DREG_PRMSE); 3388 writel(prmseo[1], base->virtbase + D40_DREG_PRMSE);
3389 writel(prmseo[0], base->virtbase + D40_DREG_PRMSO); 3389 writel(prmseo[0], base->virtbase + D40_DREG_PRMSO);
3390 writel(activeo[1], base->virtbase + D40_DREG_ACTIVE); 3390 writel(activeo[1], base->virtbase + D40_DREG_ACTIVE);
3391 writel(activeo[0], base->virtbase + D40_DREG_ACTIVO); 3391 writel(activeo[0], base->virtbase + D40_DREG_ACTIVO);
3392 3392
3393 /* Write which interrupt to enable */ 3393 /* Write which interrupt to enable */
3394 writel(pcmis, base->virtbase + base->gen_dmac.interrupt_en); 3394 writel(pcmis, base->virtbase + base->gen_dmac.interrupt_en);
3395 3395
3396 /* Write which interrupt to clear */ 3396 /* Write which interrupt to clear */
3397 writel(pcicr, base->virtbase + base->gen_dmac.interrupt_clear); 3397 writel(pcicr, base->virtbase + base->gen_dmac.interrupt_clear);
3398 3398
3399 /* These are __initdata and cannot be accessed after init */ 3399 /* These are __initdata and cannot be accessed after init */
3400 base->gen_dmac.init_reg = NULL; 3400 base->gen_dmac.init_reg = NULL;
3401 base->gen_dmac.init_reg_size = 0; 3401 base->gen_dmac.init_reg_size = 0;
3402 } 3402 }
3403 3403
3404 static int __init d40_lcla_allocate(struct d40_base *base) 3404 static int __init d40_lcla_allocate(struct d40_base *base)
3405 { 3405 {
3406 struct d40_lcla_pool *pool = &base->lcla_pool; 3406 struct d40_lcla_pool *pool = &base->lcla_pool;
3407 unsigned long *page_list; 3407 unsigned long *page_list;
3408 int i, j; 3408 int i, j;
3409 int ret = 0; 3409 int ret = 0;
3410 3410
3411 /* 3411 /*
3412 * This is somewhat ugly. We need 8192 bytes that are 18 bit aligned, 3412 * This is somewhat ugly. We need 8192 bytes that are 18 bit aligned,
3413 * To full fill this hardware requirement without wasting 256 kb 3413 * To full fill this hardware requirement without wasting 256 kb
3414 * we allocate pages until we get an aligned one. 3414 * we allocate pages until we get an aligned one.
3415 */ 3415 */
3416 page_list = kmalloc(sizeof(unsigned long) * MAX_LCLA_ALLOC_ATTEMPTS, 3416 page_list = kmalloc(sizeof(unsigned long) * MAX_LCLA_ALLOC_ATTEMPTS,
3417 GFP_KERNEL); 3417 GFP_KERNEL);
3418 3418
3419 if (!page_list) { 3419 if (!page_list) {
3420 ret = -ENOMEM; 3420 ret = -ENOMEM;
3421 goto failure; 3421 goto failure;
3422 } 3422 }
3423 3423
3424 /* Calculating how many pages that are required */ 3424 /* Calculating how many pages that are required */
3425 base->lcla_pool.pages = SZ_1K * base->num_phy_chans / PAGE_SIZE; 3425 base->lcla_pool.pages = SZ_1K * base->num_phy_chans / PAGE_SIZE;
3426 3426
3427 for (i = 0; i < MAX_LCLA_ALLOC_ATTEMPTS; i++) { 3427 for (i = 0; i < MAX_LCLA_ALLOC_ATTEMPTS; i++) {
3428 page_list[i] = __get_free_pages(GFP_KERNEL, 3428 page_list[i] = __get_free_pages(GFP_KERNEL,
3429 base->lcla_pool.pages); 3429 base->lcla_pool.pages);
3430 if (!page_list[i]) { 3430 if (!page_list[i]) {
3431 3431
3432 d40_err(base->dev, "Failed to allocate %d pages.\n", 3432 d40_err(base->dev, "Failed to allocate %d pages.\n",
3433 base->lcla_pool.pages); 3433 base->lcla_pool.pages);
3434 3434
3435 for (j = 0; j < i; j++) 3435 for (j = 0; j < i; j++)
3436 free_pages(page_list[j], base->lcla_pool.pages); 3436 free_pages(page_list[j], base->lcla_pool.pages);
3437 goto failure; 3437 goto failure;
3438 } 3438 }
3439 3439
3440 if ((virt_to_phys((void *)page_list[i]) & 3440 if ((virt_to_phys((void *)page_list[i]) &
3441 (LCLA_ALIGNMENT - 1)) == 0) 3441 (LCLA_ALIGNMENT - 1)) == 0)
3442 break; 3442 break;
3443 } 3443 }
3444 3444
3445 for (j = 0; j < i; j++) 3445 for (j = 0; j < i; j++)
3446 free_pages(page_list[j], base->lcla_pool.pages); 3446 free_pages(page_list[j], base->lcla_pool.pages);
3447 3447
3448 if (i < MAX_LCLA_ALLOC_ATTEMPTS) { 3448 if (i < MAX_LCLA_ALLOC_ATTEMPTS) {
3449 base->lcla_pool.base = (void *)page_list[i]; 3449 base->lcla_pool.base = (void *)page_list[i];
3450 } else { 3450 } else {
3451 /* 3451 /*
3452 * After many attempts and no succees with finding the correct 3452 * After many attempts and no succees with finding the correct
3453 * alignment, try with allocating a big buffer. 3453 * alignment, try with allocating a big buffer.
3454 */ 3454 */
3455 dev_warn(base->dev, 3455 dev_warn(base->dev,
3456 "[%s] Failed to get %d pages @ 18 bit align.\n", 3456 "[%s] Failed to get %d pages @ 18 bit align.\n",
3457 __func__, base->lcla_pool.pages); 3457 __func__, base->lcla_pool.pages);
3458 base->lcla_pool.base_unaligned = kmalloc(SZ_1K * 3458 base->lcla_pool.base_unaligned = kmalloc(SZ_1K *
3459 base->num_phy_chans + 3459 base->num_phy_chans +
3460 LCLA_ALIGNMENT, 3460 LCLA_ALIGNMENT,
3461 GFP_KERNEL); 3461 GFP_KERNEL);
3462 if (!base->lcla_pool.base_unaligned) { 3462 if (!base->lcla_pool.base_unaligned) {
3463 ret = -ENOMEM; 3463 ret = -ENOMEM;
3464 goto failure; 3464 goto failure;
3465 } 3465 }
3466 3466
3467 base->lcla_pool.base = PTR_ALIGN(base->lcla_pool.base_unaligned, 3467 base->lcla_pool.base = PTR_ALIGN(base->lcla_pool.base_unaligned,
3468 LCLA_ALIGNMENT); 3468 LCLA_ALIGNMENT);
3469 } 3469 }
3470 3470
3471 pool->dma_addr = dma_map_single(base->dev, pool->base, 3471 pool->dma_addr = dma_map_single(base->dev, pool->base,
3472 SZ_1K * base->num_phy_chans, 3472 SZ_1K * base->num_phy_chans,
3473 DMA_TO_DEVICE); 3473 DMA_TO_DEVICE);
3474 if (dma_mapping_error(base->dev, pool->dma_addr)) { 3474 if (dma_mapping_error(base->dev, pool->dma_addr)) {
3475 pool->dma_addr = 0; 3475 pool->dma_addr = 0;
3476 ret = -ENOMEM; 3476 ret = -ENOMEM;
3477 goto failure; 3477 goto failure;
3478 } 3478 }
3479 3479
3480 writel(virt_to_phys(base->lcla_pool.base), 3480 writel(virt_to_phys(base->lcla_pool.base),
3481 base->virtbase + D40_DREG_LCLA); 3481 base->virtbase + D40_DREG_LCLA);
3482 failure: 3482 failure:
3483 kfree(page_list); 3483 kfree(page_list);
3484 return ret; 3484 return ret;
3485 } 3485 }
3486 3486
3487 static int __init d40_of_probe(struct platform_device *pdev, 3487 static int __init d40_of_probe(struct platform_device *pdev,
3488 struct device_node *np) 3488 struct device_node *np)
3489 { 3489 {
3490 struct stedma40_platform_data *pdata; 3490 struct stedma40_platform_data *pdata;
3491 int num_phy = 0, num_memcpy = 0, num_disabled = 0; 3491 int num_phy = 0, num_memcpy = 0, num_disabled = 0;
3492 const __be32 *list; 3492 const __be32 *list;
3493 3493
3494 pdata = devm_kzalloc(&pdev->dev, 3494 pdata = devm_kzalloc(&pdev->dev,
3495 sizeof(struct stedma40_platform_data), 3495 sizeof(struct stedma40_platform_data),
3496 GFP_KERNEL); 3496 GFP_KERNEL);
3497 if (!pdata) 3497 if (!pdata)
3498 return -ENOMEM; 3498 return -ENOMEM;
3499 3499
3500 /* If absent this value will be obtained from h/w. */ 3500 /* If absent this value will be obtained from h/w. */
3501 of_property_read_u32(np, "dma-channels", &num_phy); 3501 of_property_read_u32(np, "dma-channels", &num_phy);
3502 if (num_phy > 0) 3502 if (num_phy > 0)
3503 pdata->num_of_phy_chans = num_phy; 3503 pdata->num_of_phy_chans = num_phy;
3504 3504
3505 list = of_get_property(np, "memcpy-channels", &num_memcpy); 3505 list = of_get_property(np, "memcpy-channels", &num_memcpy);
3506 num_memcpy /= sizeof(*list); 3506 num_memcpy /= sizeof(*list);
3507 3507
3508 if (num_memcpy > D40_MEMCPY_MAX_CHANS || num_memcpy <= 0) { 3508 if (num_memcpy > D40_MEMCPY_MAX_CHANS || num_memcpy <= 0) {
3509 d40_err(&pdev->dev, 3509 d40_err(&pdev->dev,
3510 "Invalid number of memcpy channels specified (%d)\n", 3510 "Invalid number of memcpy channels specified (%d)\n",
3511 num_memcpy); 3511 num_memcpy);
3512 return -EINVAL; 3512 return -EINVAL;
3513 } 3513 }
3514 pdata->num_of_memcpy_chans = num_memcpy; 3514 pdata->num_of_memcpy_chans = num_memcpy;
3515 3515
3516 of_property_read_u32_array(np, "memcpy-channels", 3516 of_property_read_u32_array(np, "memcpy-channels",
3517 dma40_memcpy_channels, 3517 dma40_memcpy_channels,
3518 num_memcpy); 3518 num_memcpy);
3519 3519
3520 list = of_get_property(np, "disabled-channels", &num_disabled); 3520 list = of_get_property(np, "disabled-channels", &num_disabled);
3521 num_disabled /= sizeof(*list); 3521 num_disabled /= sizeof(*list);
3522 3522
3523 if (num_disabled >= STEDMA40_MAX_PHYS || num_disabled < 0) { 3523 if (num_disabled >= STEDMA40_MAX_PHYS || num_disabled < 0) {
3524 d40_err(&pdev->dev, 3524 d40_err(&pdev->dev,
3525 "Invalid number of disabled channels specified (%d)\n", 3525 "Invalid number of disabled channels specified (%d)\n",
3526 num_disabled); 3526 num_disabled);
3527 return -EINVAL; 3527 return -EINVAL;
3528 } 3528 }
3529 3529
3530 of_property_read_u32_array(np, "disabled-channels", 3530 of_property_read_u32_array(np, "disabled-channels",
3531 pdata->disabled_channels, 3531 pdata->disabled_channels,
3532 num_disabled); 3532 num_disabled);
3533 pdata->disabled_channels[num_disabled] = -1; 3533 pdata->disabled_channels[num_disabled] = -1;
3534 3534
3535 pdev->dev.platform_data = pdata; 3535 pdev->dev.platform_data = pdata;
3536 3536
3537 return 0; 3537 return 0;
3538 } 3538 }
3539 3539
3540 static int __init d40_probe(struct platform_device *pdev) 3540 static int __init d40_probe(struct platform_device *pdev)
3541 { 3541 {
3542 struct stedma40_platform_data *plat_data = dev_get_platdata(&pdev->dev); 3542 struct stedma40_platform_data *plat_data = dev_get_platdata(&pdev->dev);
3543 struct device_node *np = pdev->dev.of_node; 3543 struct device_node *np = pdev->dev.of_node;
3544 int ret = -ENOENT; 3544 int ret = -ENOENT;
3545 struct d40_base *base = NULL; 3545 struct d40_base *base = NULL;
3546 struct resource *res = NULL; 3546 struct resource *res = NULL;
3547 int num_reserved_chans; 3547 int num_reserved_chans;
3548 u32 val; 3548 u32 val;
3549 3549
3550 if (!plat_data) { 3550 if (!plat_data) {
3551 if (np) { 3551 if (np) {
3552 if(d40_of_probe(pdev, np)) { 3552 if(d40_of_probe(pdev, np)) {
3553 ret = -ENOMEM; 3553 ret = -ENOMEM;
3554 goto failure; 3554 goto failure;
3555 } 3555 }
3556 } else { 3556 } else {
3557 d40_err(&pdev->dev, "No pdata or Device Tree provided\n"); 3557 d40_err(&pdev->dev, "No pdata or Device Tree provided\n");
3558 goto failure; 3558 goto failure;
3559 } 3559 }
3560 } 3560 }
3561 3561
3562 base = d40_hw_detect_init(pdev); 3562 base = d40_hw_detect_init(pdev);
3563 if (!base) 3563 if (!base)
3564 goto failure; 3564 goto failure;
3565 3565
3566 num_reserved_chans = d40_phy_res_init(base); 3566 num_reserved_chans = d40_phy_res_init(base);
3567 3567
3568 platform_set_drvdata(pdev, base); 3568 platform_set_drvdata(pdev, base);
3569 3569
3570 spin_lock_init(&base->interrupt_lock); 3570 spin_lock_init(&base->interrupt_lock);
3571 spin_lock_init(&base->execmd_lock); 3571 spin_lock_init(&base->execmd_lock);
3572 3572
3573 /* Get IO for logical channel parameter address */ 3573 /* Get IO for logical channel parameter address */
3574 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lcpa"); 3574 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lcpa");
3575 if (!res) { 3575 if (!res) {
3576 ret = -ENOENT; 3576 ret = -ENOENT;
3577 d40_err(&pdev->dev, "No \"lcpa\" memory resource\n"); 3577 d40_err(&pdev->dev, "No \"lcpa\" memory resource\n");
3578 goto failure; 3578 goto failure;
3579 } 3579 }
3580 base->lcpa_size = resource_size(res); 3580 base->lcpa_size = resource_size(res);
3581 base->phy_lcpa = res->start; 3581 base->phy_lcpa = res->start;
3582 3582
3583 if (request_mem_region(res->start, resource_size(res), 3583 if (request_mem_region(res->start, resource_size(res),
3584 D40_NAME " I/O lcpa") == NULL) { 3584 D40_NAME " I/O lcpa") == NULL) {
3585 ret = -EBUSY; 3585 ret = -EBUSY;
3586 d40_err(&pdev->dev, "Failed to request LCPA region %pR\n", res); 3586 d40_err(&pdev->dev, "Failed to request LCPA region %pR\n", res);
3587 goto failure; 3587 goto failure;
3588 } 3588 }
3589 3589
3590 /* We make use of ESRAM memory for this. */ 3590 /* We make use of ESRAM memory for this. */
3591 val = readl(base->virtbase + D40_DREG_LCPA); 3591 val = readl(base->virtbase + D40_DREG_LCPA);
3592 if (res->start != val && val != 0) { 3592 if (res->start != val && val != 0) {
3593 dev_warn(&pdev->dev, 3593 dev_warn(&pdev->dev,
3594 "[%s] Mismatch LCPA dma 0x%x, def %pa\n", 3594 "[%s] Mismatch LCPA dma 0x%x, def %pa\n",
3595 __func__, val, &res->start); 3595 __func__, val, &res->start);
3596 } else 3596 } else
3597 writel(res->start, base->virtbase + D40_DREG_LCPA); 3597 writel(res->start, base->virtbase + D40_DREG_LCPA);
3598 3598
3599 base->lcpa_base = ioremap(res->start, resource_size(res)); 3599 base->lcpa_base = ioremap(res->start, resource_size(res));
3600 if (!base->lcpa_base) { 3600 if (!base->lcpa_base) {
3601 ret = -ENOMEM; 3601 ret = -ENOMEM;
3602 d40_err(&pdev->dev, "Failed to ioremap LCPA region\n"); 3602 d40_err(&pdev->dev, "Failed to ioremap LCPA region\n");
3603 goto failure; 3603 goto failure;
3604 } 3604 }
3605 /* If lcla has to be located in ESRAM we don't need to allocate */ 3605 /* If lcla has to be located in ESRAM we don't need to allocate */
3606 if (base->plat_data->use_esram_lcla) { 3606 if (base->plat_data->use_esram_lcla) {
3607 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, 3607 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
3608 "lcla_esram"); 3608 "lcla_esram");
3609 if (!res) { 3609 if (!res) {
3610 ret = -ENOENT; 3610 ret = -ENOENT;
3611 d40_err(&pdev->dev, 3611 d40_err(&pdev->dev,
3612 "No \"lcla_esram\" memory resource\n"); 3612 "No \"lcla_esram\" memory resource\n");
3613 goto failure; 3613 goto failure;
3614 } 3614 }
3615 base->lcla_pool.base = ioremap(res->start, 3615 base->lcla_pool.base = ioremap(res->start,
3616 resource_size(res)); 3616 resource_size(res));
3617 if (!base->lcla_pool.base) { 3617 if (!base->lcla_pool.base) {
3618 ret = -ENOMEM; 3618 ret = -ENOMEM;
3619 d40_err(&pdev->dev, "Failed to ioremap LCLA region\n"); 3619 d40_err(&pdev->dev, "Failed to ioremap LCLA region\n");
3620 goto failure; 3620 goto failure;
3621 } 3621 }
3622 writel(res->start, base->virtbase + D40_DREG_LCLA); 3622 writel(res->start, base->virtbase + D40_DREG_LCLA);
3623 3623
3624 } else { 3624 } else {
3625 ret = d40_lcla_allocate(base); 3625 ret = d40_lcla_allocate(base);
3626 if (ret) { 3626 if (ret) {
3627 d40_err(&pdev->dev, "Failed to allocate LCLA area\n"); 3627 d40_err(&pdev->dev, "Failed to allocate LCLA area\n");
3628 goto failure; 3628 goto failure;
3629 } 3629 }
3630 } 3630 }
3631 3631
3632 spin_lock_init(&base->lcla_pool.lock); 3632 spin_lock_init(&base->lcla_pool.lock);
3633 3633
3634 base->irq = platform_get_irq(pdev, 0); 3634 base->irq = platform_get_irq(pdev, 0);
3635 3635
3636 ret = request_irq(base->irq, d40_handle_interrupt, 0, D40_NAME, base); 3636 ret = request_irq(base->irq, d40_handle_interrupt, 0, D40_NAME, base);
3637 if (ret) { 3637 if (ret) {
3638 d40_err(&pdev->dev, "No IRQ defined\n"); 3638 d40_err(&pdev->dev, "No IRQ defined\n");
3639 goto failure; 3639 goto failure;
3640 } 3640 }
3641 3641
3642 pm_runtime_irq_safe(base->dev); 3642 pm_runtime_irq_safe(base->dev);
3643 pm_runtime_set_autosuspend_delay(base->dev, DMA40_AUTOSUSPEND_DELAY); 3643 pm_runtime_set_autosuspend_delay(base->dev, DMA40_AUTOSUSPEND_DELAY);
3644 pm_runtime_use_autosuspend(base->dev); 3644 pm_runtime_use_autosuspend(base->dev);
3645 pm_runtime_enable(base->dev); 3645 pm_runtime_enable(base->dev);
3646 pm_runtime_resume(base->dev); 3646 pm_runtime_resume(base->dev);
3647 3647
3648 if (base->plat_data->use_esram_lcla) { 3648 if (base->plat_data->use_esram_lcla) {
3649 3649
3650 base->lcpa_regulator = regulator_get(base->dev, "lcla_esram"); 3650 base->lcpa_regulator = regulator_get(base->dev, "lcla_esram");
3651 if (IS_ERR(base->lcpa_regulator)) { 3651 if (IS_ERR(base->lcpa_regulator)) {
3652 d40_err(&pdev->dev, "Failed to get lcpa_regulator\n"); 3652 d40_err(&pdev->dev, "Failed to get lcpa_regulator\n");
3653 ret = PTR_ERR(base->lcpa_regulator); 3653 ret = PTR_ERR(base->lcpa_regulator);
3654 base->lcpa_regulator = NULL; 3654 base->lcpa_regulator = NULL;
3655 goto failure; 3655 goto failure;
3656 } 3656 }
3657 3657
3658 ret = regulator_enable(base->lcpa_regulator); 3658 ret = regulator_enable(base->lcpa_regulator);
3659 if (ret) { 3659 if (ret) {
3660 d40_err(&pdev->dev, 3660 d40_err(&pdev->dev,
3661 "Failed to enable lcpa_regulator\n"); 3661 "Failed to enable lcpa_regulator\n");
3662 regulator_put(base->lcpa_regulator); 3662 regulator_put(base->lcpa_regulator);
3663 base->lcpa_regulator = NULL; 3663 base->lcpa_regulator = NULL;
3664 goto failure; 3664 goto failure;
3665 } 3665 }
3666 } 3666 }
3667 3667
3668 base->initialized = true; 3668 base->initialized = true;
3669 ret = d40_dmaengine_init(base, num_reserved_chans); 3669 ret = d40_dmaengine_init(base, num_reserved_chans);
3670 if (ret) 3670 if (ret)
3671 goto failure; 3671 goto failure;
3672 3672
3673 base->dev->dma_parms = &base->dma_parms; 3673 base->dev->dma_parms = &base->dma_parms;
3674 ret = dma_set_max_seg_size(base->dev, STEDMA40_MAX_SEG_SIZE); 3674 ret = dma_set_max_seg_size(base->dev, STEDMA40_MAX_SEG_SIZE);
3675 if (ret) { 3675 if (ret) {
3676 d40_err(&pdev->dev, "Failed to set dma max seg size\n"); 3676 d40_err(&pdev->dev, "Failed to set dma max seg size\n");
3677 goto failure; 3677 goto failure;
3678 } 3678 }
3679 3679
3680 d40_hw_init(base); 3680 d40_hw_init(base);
3681 3681
3682 if (np) { 3682 if (np) {
3683 ret = of_dma_controller_register(np, d40_xlate, NULL); 3683 ret = of_dma_controller_register(np, d40_xlate, NULL);
3684 if (ret) 3684 if (ret)
3685 dev_err(&pdev->dev, 3685 dev_err(&pdev->dev,
3686 "could not register of_dma_controller\n"); 3686 "could not register of_dma_controller\n");
3687 } 3687 }
3688 3688
3689 dev_info(base->dev, "initialized\n"); 3689 dev_info(base->dev, "initialized\n");
3690 return 0; 3690 return 0;
3691 3691
3692 failure: 3692 failure:
3693 if (base) { 3693 if (base) {
3694 if (base->desc_slab) 3694 if (base->desc_slab)
3695 kmem_cache_destroy(base->desc_slab); 3695 kmem_cache_destroy(base->desc_slab);
3696 if (base->virtbase) 3696 if (base->virtbase)
3697 iounmap(base->virtbase); 3697 iounmap(base->virtbase);
3698 3698
3699 if (base->lcla_pool.base && base->plat_data->use_esram_lcla) { 3699 if (base->lcla_pool.base && base->plat_data->use_esram_lcla) {
3700 iounmap(base->lcla_pool.base); 3700 iounmap(base->lcla_pool.base);
3701 base->lcla_pool.base = NULL; 3701 base->lcla_pool.base = NULL;
3702 } 3702 }
3703 3703
3704 if (base->lcla_pool.dma_addr) 3704 if (base->lcla_pool.dma_addr)
3705 dma_unmap_single(base->dev, base->lcla_pool.dma_addr, 3705 dma_unmap_single(base->dev, base->lcla_pool.dma_addr,
3706 SZ_1K * base->num_phy_chans, 3706 SZ_1K * base->num_phy_chans,
3707 DMA_TO_DEVICE); 3707 DMA_TO_DEVICE);
3708 3708
3709 if (!base->lcla_pool.base_unaligned && base->lcla_pool.base) 3709 if (!base->lcla_pool.base_unaligned && base->lcla_pool.base)
3710 free_pages((unsigned long)base->lcla_pool.base, 3710 free_pages((unsigned long)base->lcla_pool.base,
3711 base->lcla_pool.pages); 3711 base->lcla_pool.pages);
3712 3712
3713 kfree(base->lcla_pool.base_unaligned); 3713 kfree(base->lcla_pool.base_unaligned);
3714 3714
3715 if (base->phy_lcpa) 3715 if (base->phy_lcpa)
3716 release_mem_region(base->phy_lcpa, 3716 release_mem_region(base->phy_lcpa,
3717 base->lcpa_size); 3717 base->lcpa_size);
3718 if (base->phy_start) 3718 if (base->phy_start)
3719 release_mem_region(base->phy_start, 3719 release_mem_region(base->phy_start,
3720 base->phy_size); 3720 base->phy_size);
3721 if (base->clk) { 3721 if (base->clk) {
3722 clk_disable_unprepare(base->clk); 3722 clk_disable_unprepare(base->clk);
3723 clk_put(base->clk); 3723 clk_put(base->clk);
3724 } 3724 }
3725 3725
3726 if (base->lcpa_regulator) { 3726 if (base->lcpa_regulator) {
3727 regulator_disable(base->lcpa_regulator); 3727 regulator_disable(base->lcpa_regulator);
3728 regulator_put(base->lcpa_regulator); 3728 regulator_put(base->lcpa_regulator);
3729 } 3729 }
3730 3730
3731 kfree(base->lcla_pool.alloc_map); 3731 kfree(base->lcla_pool.alloc_map);
3732 kfree(base->lookup_log_chans); 3732 kfree(base->lookup_log_chans);
3733 kfree(base->lookup_phy_chans); 3733 kfree(base->lookup_phy_chans);
3734 kfree(base->phy_res); 3734 kfree(base->phy_res);
3735 kfree(base); 3735 kfree(base);
3736 } 3736 }
3737 3737
3738 d40_err(&pdev->dev, "probe failed\n"); 3738 d40_err(&pdev->dev, "probe failed\n");
3739 return ret; 3739 return ret;
3740 } 3740 }
3741 3741
3742 static const struct of_device_id d40_match[] = { 3742 static const struct of_device_id d40_match[] = {
3743 { .compatible = "stericsson,dma40", }, 3743 { .compatible = "stericsson,dma40", },
3744 {} 3744 {}
3745 }; 3745 };
3746 3746
3747 static struct platform_driver d40_driver = { 3747 static struct platform_driver d40_driver = {
3748 .driver = { 3748 .driver = {
3749 .owner = THIS_MODULE, 3749 .owner = THIS_MODULE,
3750 .name = D40_NAME, 3750 .name = D40_NAME,
3751 .pm = DMA40_PM_OPS, 3751 .pm = DMA40_PM_OPS,
3752 .of_match_table = d40_match, 3752 .of_match_table = d40_match,
3753 }, 3753 },
3754 }; 3754 };
3755 3755
3756 static int __init stedma40_init(void) 3756 static int __init stedma40_init(void)
3757 { 3757 {
3758 return platform_driver_probe(&d40_driver, d40_probe); 3758 return platform_driver_probe(&d40_driver, d40_probe);
3759 } 3759 }
3760 subsys_initcall(stedma40_init); 3760 subsys_initcall(stedma40_init);
3761 3761
drivers/dma/tegra20-apb-dma.c
1 /* 1 /*
2 * DMA driver for Nvidia's Tegra20 APB DMA controller. 2 * DMA driver for Nvidia's Tegra20 APB DMA controller.
3 * 3 *
4 * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved. 4 * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License, 7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation. 8 * version 2, as published by the Free Software Foundation.
9 * 9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT 10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details. 13 * more details.
14 * 14 *
15 * You should have received a copy of the GNU General Public License 15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>. 16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */ 17 */
18 18
19 #include <linux/bitops.h> 19 #include <linux/bitops.h>
20 #include <linux/clk.h> 20 #include <linux/clk.h>
21 #include <linux/delay.h> 21 #include <linux/delay.h>
22 #include <linux/dmaengine.h> 22 #include <linux/dmaengine.h>
23 #include <linux/dma-mapping.h> 23 #include <linux/dma-mapping.h>
24 #include <linux/err.h> 24 #include <linux/err.h>
25 #include <linux/init.h> 25 #include <linux/init.h>
26 #include <linux/interrupt.h> 26 #include <linux/interrupt.h>
27 #include <linux/io.h> 27 #include <linux/io.h>
28 #include <linux/mm.h> 28 #include <linux/mm.h>
29 #include <linux/module.h> 29 #include <linux/module.h>
30 #include <linux/of.h> 30 #include <linux/of.h>
31 #include <linux/of_device.h> 31 #include <linux/of_device.h>
32 #include <linux/platform_device.h> 32 #include <linux/platform_device.h>
33 #include <linux/pm.h> 33 #include <linux/pm.h>
34 #include <linux/pm_runtime.h> 34 #include <linux/pm_runtime.h>
35 #include <linux/slab.h> 35 #include <linux/slab.h>
36 #include <linux/clk/tegra.h> 36 #include <linux/clk/tegra.h>
37 37
38 #include "dmaengine.h" 38 #include "dmaengine.h"
39 39
40 #define TEGRA_APBDMA_GENERAL 0x0 40 #define TEGRA_APBDMA_GENERAL 0x0
41 #define TEGRA_APBDMA_GENERAL_ENABLE BIT(31) 41 #define TEGRA_APBDMA_GENERAL_ENABLE BIT(31)
42 42
43 #define TEGRA_APBDMA_CONTROL 0x010 43 #define TEGRA_APBDMA_CONTROL 0x010
44 #define TEGRA_APBDMA_IRQ_MASK 0x01c 44 #define TEGRA_APBDMA_IRQ_MASK 0x01c
45 #define TEGRA_APBDMA_IRQ_MASK_SET 0x020 45 #define TEGRA_APBDMA_IRQ_MASK_SET 0x020
46 46
47 /* CSR register */ 47 /* CSR register */
48 #define TEGRA_APBDMA_CHAN_CSR 0x00 48 #define TEGRA_APBDMA_CHAN_CSR 0x00
49 #define TEGRA_APBDMA_CSR_ENB BIT(31) 49 #define TEGRA_APBDMA_CSR_ENB BIT(31)
50 #define TEGRA_APBDMA_CSR_IE_EOC BIT(30) 50 #define TEGRA_APBDMA_CSR_IE_EOC BIT(30)
51 #define TEGRA_APBDMA_CSR_HOLD BIT(29) 51 #define TEGRA_APBDMA_CSR_HOLD BIT(29)
52 #define TEGRA_APBDMA_CSR_DIR BIT(28) 52 #define TEGRA_APBDMA_CSR_DIR BIT(28)
53 #define TEGRA_APBDMA_CSR_ONCE BIT(27) 53 #define TEGRA_APBDMA_CSR_ONCE BIT(27)
54 #define TEGRA_APBDMA_CSR_FLOW BIT(21) 54 #define TEGRA_APBDMA_CSR_FLOW BIT(21)
55 #define TEGRA_APBDMA_CSR_REQ_SEL_SHIFT 16 55 #define TEGRA_APBDMA_CSR_REQ_SEL_SHIFT 16
56 #define TEGRA_APBDMA_CSR_WCOUNT_MASK 0xFFFC 56 #define TEGRA_APBDMA_CSR_WCOUNT_MASK 0xFFFC
57 57
58 /* STATUS register */ 58 /* STATUS register */
59 #define TEGRA_APBDMA_CHAN_STATUS 0x004 59 #define TEGRA_APBDMA_CHAN_STATUS 0x004
60 #define TEGRA_APBDMA_STATUS_BUSY BIT(31) 60 #define TEGRA_APBDMA_STATUS_BUSY BIT(31)
61 #define TEGRA_APBDMA_STATUS_ISE_EOC BIT(30) 61 #define TEGRA_APBDMA_STATUS_ISE_EOC BIT(30)
62 #define TEGRA_APBDMA_STATUS_HALT BIT(29) 62 #define TEGRA_APBDMA_STATUS_HALT BIT(29)
63 #define TEGRA_APBDMA_STATUS_PING_PONG BIT(28) 63 #define TEGRA_APBDMA_STATUS_PING_PONG BIT(28)
64 #define TEGRA_APBDMA_STATUS_COUNT_SHIFT 2 64 #define TEGRA_APBDMA_STATUS_COUNT_SHIFT 2
65 #define TEGRA_APBDMA_STATUS_COUNT_MASK 0xFFFC 65 #define TEGRA_APBDMA_STATUS_COUNT_MASK 0xFFFC
66 66
67 #define TEGRA_APBDMA_CHAN_CSRE 0x00C 67 #define TEGRA_APBDMA_CHAN_CSRE 0x00C
68 #define TEGRA_APBDMA_CHAN_CSRE_PAUSE (1 << 31) 68 #define TEGRA_APBDMA_CHAN_CSRE_PAUSE (1 << 31)
69 69
70 /* AHB memory address */ 70 /* AHB memory address */
71 #define TEGRA_APBDMA_CHAN_AHBPTR 0x010 71 #define TEGRA_APBDMA_CHAN_AHBPTR 0x010
72 72
73 /* AHB sequence register */ 73 /* AHB sequence register */
74 #define TEGRA_APBDMA_CHAN_AHBSEQ 0x14 74 #define TEGRA_APBDMA_CHAN_AHBSEQ 0x14
75 #define TEGRA_APBDMA_AHBSEQ_INTR_ENB BIT(31) 75 #define TEGRA_APBDMA_AHBSEQ_INTR_ENB BIT(31)
76 #define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_8 (0 << 28) 76 #define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_8 (0 << 28)
77 #define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_16 (1 << 28) 77 #define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_16 (1 << 28)
78 #define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_32 (2 << 28) 78 #define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_32 (2 << 28)
79 #define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_64 (3 << 28) 79 #define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_64 (3 << 28)
80 #define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_128 (4 << 28) 80 #define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_128 (4 << 28)
81 #define TEGRA_APBDMA_AHBSEQ_DATA_SWAP BIT(27) 81 #define TEGRA_APBDMA_AHBSEQ_DATA_SWAP BIT(27)
82 #define TEGRA_APBDMA_AHBSEQ_BURST_1 (4 << 24) 82 #define TEGRA_APBDMA_AHBSEQ_BURST_1 (4 << 24)
83 #define TEGRA_APBDMA_AHBSEQ_BURST_4 (5 << 24) 83 #define TEGRA_APBDMA_AHBSEQ_BURST_4 (5 << 24)
84 #define TEGRA_APBDMA_AHBSEQ_BURST_8 (6 << 24) 84 #define TEGRA_APBDMA_AHBSEQ_BURST_8 (6 << 24)
85 #define TEGRA_APBDMA_AHBSEQ_DBL_BUF BIT(19) 85 #define TEGRA_APBDMA_AHBSEQ_DBL_BUF BIT(19)
86 #define TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT 16 86 #define TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT 16
87 #define TEGRA_APBDMA_AHBSEQ_WRAP_NONE 0 87 #define TEGRA_APBDMA_AHBSEQ_WRAP_NONE 0
88 88
89 /* APB address */ 89 /* APB address */
90 #define TEGRA_APBDMA_CHAN_APBPTR 0x018 90 #define TEGRA_APBDMA_CHAN_APBPTR 0x018
91 91
92 /* APB sequence register */ 92 /* APB sequence register */
93 #define TEGRA_APBDMA_CHAN_APBSEQ 0x01c 93 #define TEGRA_APBDMA_CHAN_APBSEQ 0x01c
94 #define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_8 (0 << 28) 94 #define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_8 (0 << 28)
95 #define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_16 (1 << 28) 95 #define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_16 (1 << 28)
96 #define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_32 (2 << 28) 96 #define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_32 (2 << 28)
97 #define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_64 (3 << 28) 97 #define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_64 (3 << 28)
98 #define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_128 (4 << 28) 98 #define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_128 (4 << 28)
99 #define TEGRA_APBDMA_APBSEQ_DATA_SWAP BIT(27) 99 #define TEGRA_APBDMA_APBSEQ_DATA_SWAP BIT(27)
100 #define TEGRA_APBDMA_APBSEQ_WRAP_WORD_1 (1 << 16) 100 #define TEGRA_APBDMA_APBSEQ_WRAP_WORD_1 (1 << 16)
101 101
102 /* 102 /*
103 * If any burst is in flight and DMA paused then this is the time to complete 103 * If any burst is in flight and DMA paused then this is the time to complete
104 * on-flight burst and update DMA status register. 104 * on-flight burst and update DMA status register.
105 */ 105 */
106 #define TEGRA_APBDMA_BURST_COMPLETE_TIME 20 106 #define TEGRA_APBDMA_BURST_COMPLETE_TIME 20
107 107
108 /* Channel base address offset from APBDMA base address */ 108 /* Channel base address offset from APBDMA base address */
109 #define TEGRA_APBDMA_CHANNEL_BASE_ADD_OFFSET 0x1000 109 #define TEGRA_APBDMA_CHANNEL_BASE_ADD_OFFSET 0x1000
110 110
111 /* DMA channel register space size */ 111 /* DMA channel register space size */
112 #define TEGRA_APBDMA_CHANNEL_REGISTER_SIZE 0x20 112 #define TEGRA_APBDMA_CHANNEL_REGISTER_SIZE 0x20
113 113
114 struct tegra_dma; 114 struct tegra_dma;
115 115
116 /* 116 /*
117 * tegra_dma_chip_data Tegra chip specific DMA data 117 * tegra_dma_chip_data Tegra chip specific DMA data
118 * @nr_channels: Number of channels available in the controller. 118 * @nr_channels: Number of channels available in the controller.
119 * @max_dma_count: Maximum DMA transfer count supported by DMA controller. 119 * @max_dma_count: Maximum DMA transfer count supported by DMA controller.
120 * @support_channel_pause: Support channel wise pause of dma. 120 * @support_channel_pause: Support channel wise pause of dma.
121 */ 121 */
122 struct tegra_dma_chip_data { 122 struct tegra_dma_chip_data {
123 int nr_channels; 123 int nr_channels;
124 int max_dma_count; 124 int max_dma_count;
125 bool support_channel_pause; 125 bool support_channel_pause;
126 }; 126 };
127 127
128 /* DMA channel registers */ 128 /* DMA channel registers */
129 struct tegra_dma_channel_regs { 129 struct tegra_dma_channel_regs {
130 unsigned long csr; 130 unsigned long csr;
131 unsigned long ahb_ptr; 131 unsigned long ahb_ptr;
132 unsigned long apb_ptr; 132 unsigned long apb_ptr;
133 unsigned long ahb_seq; 133 unsigned long ahb_seq;
134 unsigned long apb_seq; 134 unsigned long apb_seq;
135 }; 135 };
136 136
137 /* 137 /*
138 * tegra_dma_sg_req: Dma request details to configure hardware. This 138 * tegra_dma_sg_req: Dma request details to configure hardware. This
139 * contains the details for one transfer to configure DMA hw. 139 * contains the details for one transfer to configure DMA hw.
140 * The client's request for data transfer can be broken into multiple 140 * The client's request for data transfer can be broken into multiple
141 * sub-transfer as per requester details and hw support. 141 * sub-transfer as per requester details and hw support.
142 * This sub transfer get added in the list of transfer and point to Tegra 142 * This sub transfer get added in the list of transfer and point to Tegra
143 * DMA descriptor which manages the transfer details. 143 * DMA descriptor which manages the transfer details.
144 */ 144 */
145 struct tegra_dma_sg_req { 145 struct tegra_dma_sg_req {
146 struct tegra_dma_channel_regs ch_regs; 146 struct tegra_dma_channel_regs ch_regs;
147 int req_len; 147 int req_len;
148 bool configured; 148 bool configured;
149 bool last_sg; 149 bool last_sg;
150 bool half_done; 150 bool half_done;
151 struct list_head node; 151 struct list_head node;
152 struct tegra_dma_desc *dma_desc; 152 struct tegra_dma_desc *dma_desc;
153 }; 153 };
154 154
155 /* 155 /*
156 * tegra_dma_desc: Tegra DMA descriptors which manages the client requests. 156 * tegra_dma_desc: Tegra DMA descriptors which manages the client requests.
157 * This descriptor keep track of transfer status, callbacks and request 157 * This descriptor keep track of transfer status, callbacks and request
158 * counts etc. 158 * counts etc.
159 */ 159 */
160 struct tegra_dma_desc { 160 struct tegra_dma_desc {
161 struct dma_async_tx_descriptor txd; 161 struct dma_async_tx_descriptor txd;
162 int bytes_requested; 162 int bytes_requested;
163 int bytes_transferred; 163 int bytes_transferred;
164 enum dma_status dma_status; 164 enum dma_status dma_status;
165 struct list_head node; 165 struct list_head node;
166 struct list_head tx_list; 166 struct list_head tx_list;
167 struct list_head cb_node; 167 struct list_head cb_node;
168 int cb_count; 168 int cb_count;
169 }; 169 };
170 170
171 struct tegra_dma_channel; 171 struct tegra_dma_channel;
172 172
173 typedef void (*dma_isr_handler)(struct tegra_dma_channel *tdc, 173 typedef void (*dma_isr_handler)(struct tegra_dma_channel *tdc,
174 bool to_terminate); 174 bool to_terminate);
175 175
176 /* tegra_dma_channel: Channel specific information */ 176 /* tegra_dma_channel: Channel specific information */
177 struct tegra_dma_channel { 177 struct tegra_dma_channel {
178 struct dma_chan dma_chan; 178 struct dma_chan dma_chan;
179 char name[30]; 179 char name[30];
180 bool config_init; 180 bool config_init;
181 int id; 181 int id;
182 int irq; 182 int irq;
183 unsigned long chan_base_offset; 183 unsigned long chan_base_offset;
184 spinlock_t lock; 184 spinlock_t lock;
185 bool busy; 185 bool busy;
186 struct tegra_dma *tdma; 186 struct tegra_dma *tdma;
187 bool cyclic; 187 bool cyclic;
188 188
189 /* Different lists for managing the requests */ 189 /* Different lists for managing the requests */
190 struct list_head free_sg_req; 190 struct list_head free_sg_req;
191 struct list_head pending_sg_req; 191 struct list_head pending_sg_req;
192 struct list_head free_dma_desc; 192 struct list_head free_dma_desc;
193 struct list_head cb_desc; 193 struct list_head cb_desc;
194 194
195 /* ISR handler and tasklet for bottom half of isr handling */ 195 /* ISR handler and tasklet for bottom half of isr handling */
196 dma_isr_handler isr_handler; 196 dma_isr_handler isr_handler;
197 struct tasklet_struct tasklet; 197 struct tasklet_struct tasklet;
198 dma_async_tx_callback callback; 198 dma_async_tx_callback callback;
199 void *callback_param; 199 void *callback_param;
200 200
201 /* Channel-slave specific configuration */ 201 /* Channel-slave specific configuration */
202 struct dma_slave_config dma_sconfig; 202 struct dma_slave_config dma_sconfig;
203 struct tegra_dma_channel_regs channel_reg; 203 struct tegra_dma_channel_regs channel_reg;
204 }; 204 };
205 205
206 /* tegra_dma: Tegra DMA specific information */ 206 /* tegra_dma: Tegra DMA specific information */
207 struct tegra_dma { 207 struct tegra_dma {
208 struct dma_device dma_dev; 208 struct dma_device dma_dev;
209 struct device *dev; 209 struct device *dev;
210 struct clk *dma_clk; 210 struct clk *dma_clk;
211 spinlock_t global_lock; 211 spinlock_t global_lock;
212 void __iomem *base_addr; 212 void __iomem *base_addr;
213 const struct tegra_dma_chip_data *chip_data; 213 const struct tegra_dma_chip_data *chip_data;
214 214
215 /* Some register need to be cache before suspend */ 215 /* Some register need to be cache before suspend */
216 u32 reg_gen; 216 u32 reg_gen;
217 217
218 /* Last member of the structure */ 218 /* Last member of the structure */
219 struct tegra_dma_channel channels[0]; 219 struct tegra_dma_channel channels[0];
220 }; 220 };
221 221
222 static inline void tdma_write(struct tegra_dma *tdma, u32 reg, u32 val) 222 static inline void tdma_write(struct tegra_dma *tdma, u32 reg, u32 val)
223 { 223 {
224 writel(val, tdma->base_addr + reg); 224 writel(val, tdma->base_addr + reg);
225 } 225 }
226 226
227 static inline u32 tdma_read(struct tegra_dma *tdma, u32 reg) 227 static inline u32 tdma_read(struct tegra_dma *tdma, u32 reg)
228 { 228 {
229 return readl(tdma->base_addr + reg); 229 return readl(tdma->base_addr + reg);
230 } 230 }
231 231
232 static inline void tdc_write(struct tegra_dma_channel *tdc, 232 static inline void tdc_write(struct tegra_dma_channel *tdc,
233 u32 reg, u32 val) 233 u32 reg, u32 val)
234 { 234 {
235 writel(val, tdc->tdma->base_addr + tdc->chan_base_offset + reg); 235 writel(val, tdc->tdma->base_addr + tdc->chan_base_offset + reg);
236 } 236 }
237 237
238 static inline u32 tdc_read(struct tegra_dma_channel *tdc, u32 reg) 238 static inline u32 tdc_read(struct tegra_dma_channel *tdc, u32 reg)
239 { 239 {
240 return readl(tdc->tdma->base_addr + tdc->chan_base_offset + reg); 240 return readl(tdc->tdma->base_addr + tdc->chan_base_offset + reg);
241 } 241 }
242 242
243 static inline struct tegra_dma_channel *to_tegra_dma_chan(struct dma_chan *dc) 243 static inline struct tegra_dma_channel *to_tegra_dma_chan(struct dma_chan *dc)
244 { 244 {
245 return container_of(dc, struct tegra_dma_channel, dma_chan); 245 return container_of(dc, struct tegra_dma_channel, dma_chan);
246 } 246 }
247 247
248 static inline struct tegra_dma_desc *txd_to_tegra_dma_desc( 248 static inline struct tegra_dma_desc *txd_to_tegra_dma_desc(
249 struct dma_async_tx_descriptor *td) 249 struct dma_async_tx_descriptor *td)
250 { 250 {
251 return container_of(td, struct tegra_dma_desc, txd); 251 return container_of(td, struct tegra_dma_desc, txd);
252 } 252 }
253 253
254 static inline struct device *tdc2dev(struct tegra_dma_channel *tdc) 254 static inline struct device *tdc2dev(struct tegra_dma_channel *tdc)
255 { 255 {
256 return &tdc->dma_chan.dev->device; 256 return &tdc->dma_chan.dev->device;
257 } 257 }
258 258
259 static dma_cookie_t tegra_dma_tx_submit(struct dma_async_tx_descriptor *tx); 259 static dma_cookie_t tegra_dma_tx_submit(struct dma_async_tx_descriptor *tx);
260 static int tegra_dma_runtime_suspend(struct device *dev); 260 static int tegra_dma_runtime_suspend(struct device *dev);
261 static int tegra_dma_runtime_resume(struct device *dev); 261 static int tegra_dma_runtime_resume(struct device *dev);
262 262
263 /* Get DMA desc from free list, if not there then allocate it. */ 263 /* Get DMA desc from free list, if not there then allocate it. */
264 static struct tegra_dma_desc *tegra_dma_desc_get( 264 static struct tegra_dma_desc *tegra_dma_desc_get(
265 struct tegra_dma_channel *tdc) 265 struct tegra_dma_channel *tdc)
266 { 266 {
267 struct tegra_dma_desc *dma_desc; 267 struct tegra_dma_desc *dma_desc;
268 unsigned long flags; 268 unsigned long flags;
269 269
270 spin_lock_irqsave(&tdc->lock, flags); 270 spin_lock_irqsave(&tdc->lock, flags);
271 271
272 /* Do not allocate if desc are waiting for ack */ 272 /* Do not allocate if desc are waiting for ack */
273 list_for_each_entry(dma_desc, &tdc->free_dma_desc, node) { 273 list_for_each_entry(dma_desc, &tdc->free_dma_desc, node) {
274 if (async_tx_test_ack(&dma_desc->txd)) { 274 if (async_tx_test_ack(&dma_desc->txd)) {
275 list_del(&dma_desc->node); 275 list_del(&dma_desc->node);
276 spin_unlock_irqrestore(&tdc->lock, flags); 276 spin_unlock_irqrestore(&tdc->lock, flags);
277 dma_desc->txd.flags = 0; 277 dma_desc->txd.flags = 0;
278 return dma_desc; 278 return dma_desc;
279 } 279 }
280 } 280 }
281 281
282 spin_unlock_irqrestore(&tdc->lock, flags); 282 spin_unlock_irqrestore(&tdc->lock, flags);
283 283
284 /* Allocate DMA desc */ 284 /* Allocate DMA desc */
285 dma_desc = kzalloc(sizeof(*dma_desc), GFP_ATOMIC); 285 dma_desc = kzalloc(sizeof(*dma_desc), GFP_ATOMIC);
286 if (!dma_desc) { 286 if (!dma_desc) {
287 dev_err(tdc2dev(tdc), "dma_desc alloc failed\n"); 287 dev_err(tdc2dev(tdc), "dma_desc alloc failed\n");
288 return NULL; 288 return NULL;
289 } 289 }
290 290
291 dma_async_tx_descriptor_init(&dma_desc->txd, &tdc->dma_chan); 291 dma_async_tx_descriptor_init(&dma_desc->txd, &tdc->dma_chan);
292 dma_desc->txd.tx_submit = tegra_dma_tx_submit; 292 dma_desc->txd.tx_submit = tegra_dma_tx_submit;
293 dma_desc->txd.flags = 0; 293 dma_desc->txd.flags = 0;
294 return dma_desc; 294 return dma_desc;
295 } 295 }
296 296
297 static void tegra_dma_desc_put(struct tegra_dma_channel *tdc, 297 static void tegra_dma_desc_put(struct tegra_dma_channel *tdc,
298 struct tegra_dma_desc *dma_desc) 298 struct tegra_dma_desc *dma_desc)
299 { 299 {
300 unsigned long flags; 300 unsigned long flags;
301 301
302 spin_lock_irqsave(&tdc->lock, flags); 302 spin_lock_irqsave(&tdc->lock, flags);
303 if (!list_empty(&dma_desc->tx_list)) 303 if (!list_empty(&dma_desc->tx_list))
304 list_splice_init(&dma_desc->tx_list, &tdc->free_sg_req); 304 list_splice_init(&dma_desc->tx_list, &tdc->free_sg_req);
305 list_add_tail(&dma_desc->node, &tdc->free_dma_desc); 305 list_add_tail(&dma_desc->node, &tdc->free_dma_desc);
306 spin_unlock_irqrestore(&tdc->lock, flags); 306 spin_unlock_irqrestore(&tdc->lock, flags);
307 } 307 }
308 308
309 static struct tegra_dma_sg_req *tegra_dma_sg_req_get( 309 static struct tegra_dma_sg_req *tegra_dma_sg_req_get(
310 struct tegra_dma_channel *tdc) 310 struct tegra_dma_channel *tdc)
311 { 311 {
312 struct tegra_dma_sg_req *sg_req = NULL; 312 struct tegra_dma_sg_req *sg_req = NULL;
313 unsigned long flags; 313 unsigned long flags;
314 314
315 spin_lock_irqsave(&tdc->lock, flags); 315 spin_lock_irqsave(&tdc->lock, flags);
316 if (!list_empty(&tdc->free_sg_req)) { 316 if (!list_empty(&tdc->free_sg_req)) {
317 sg_req = list_first_entry(&tdc->free_sg_req, 317 sg_req = list_first_entry(&tdc->free_sg_req,
318 typeof(*sg_req), node); 318 typeof(*sg_req), node);
319 list_del(&sg_req->node); 319 list_del(&sg_req->node);
320 spin_unlock_irqrestore(&tdc->lock, flags); 320 spin_unlock_irqrestore(&tdc->lock, flags);
321 return sg_req; 321 return sg_req;
322 } 322 }
323 spin_unlock_irqrestore(&tdc->lock, flags); 323 spin_unlock_irqrestore(&tdc->lock, flags);
324 324
325 sg_req = kzalloc(sizeof(struct tegra_dma_sg_req), GFP_ATOMIC); 325 sg_req = kzalloc(sizeof(struct tegra_dma_sg_req), GFP_ATOMIC);
326 if (!sg_req) 326 if (!sg_req)
327 dev_err(tdc2dev(tdc), "sg_req alloc failed\n"); 327 dev_err(tdc2dev(tdc), "sg_req alloc failed\n");
328 return sg_req; 328 return sg_req;
329 } 329 }
330 330
331 static int tegra_dma_slave_config(struct dma_chan *dc, 331 static int tegra_dma_slave_config(struct dma_chan *dc,
332 struct dma_slave_config *sconfig) 332 struct dma_slave_config *sconfig)
333 { 333 {
334 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); 334 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
335 335
336 if (!list_empty(&tdc->pending_sg_req)) { 336 if (!list_empty(&tdc->pending_sg_req)) {
337 dev_err(tdc2dev(tdc), "Configuration not allowed\n"); 337 dev_err(tdc2dev(tdc), "Configuration not allowed\n");
338 return -EBUSY; 338 return -EBUSY;
339 } 339 }
340 340
341 memcpy(&tdc->dma_sconfig, sconfig, sizeof(*sconfig)); 341 memcpy(&tdc->dma_sconfig, sconfig, sizeof(*sconfig));
342 tdc->config_init = true; 342 tdc->config_init = true;
343 return 0; 343 return 0;
344 } 344 }
345 345
346 static void tegra_dma_global_pause(struct tegra_dma_channel *tdc, 346 static void tegra_dma_global_pause(struct tegra_dma_channel *tdc,
347 bool wait_for_burst_complete) 347 bool wait_for_burst_complete)
348 { 348 {
349 struct tegra_dma *tdma = tdc->tdma; 349 struct tegra_dma *tdma = tdc->tdma;
350 350
351 spin_lock(&tdma->global_lock); 351 spin_lock(&tdma->global_lock);
352 tdma_write(tdma, TEGRA_APBDMA_GENERAL, 0); 352 tdma_write(tdma, TEGRA_APBDMA_GENERAL, 0);
353 if (wait_for_burst_complete) 353 if (wait_for_burst_complete)
354 udelay(TEGRA_APBDMA_BURST_COMPLETE_TIME); 354 udelay(TEGRA_APBDMA_BURST_COMPLETE_TIME);
355 } 355 }
356 356
357 static void tegra_dma_global_resume(struct tegra_dma_channel *tdc) 357 static void tegra_dma_global_resume(struct tegra_dma_channel *tdc)
358 { 358 {
359 struct tegra_dma *tdma = tdc->tdma; 359 struct tegra_dma *tdma = tdc->tdma;
360 360
361 tdma_write(tdma, TEGRA_APBDMA_GENERAL, TEGRA_APBDMA_GENERAL_ENABLE); 361 tdma_write(tdma, TEGRA_APBDMA_GENERAL, TEGRA_APBDMA_GENERAL_ENABLE);
362 spin_unlock(&tdma->global_lock); 362 spin_unlock(&tdma->global_lock);
363 } 363 }
364 364
365 static void tegra_dma_pause(struct tegra_dma_channel *tdc, 365 static void tegra_dma_pause(struct tegra_dma_channel *tdc,
366 bool wait_for_burst_complete) 366 bool wait_for_burst_complete)
367 { 367 {
368 struct tegra_dma *tdma = tdc->tdma; 368 struct tegra_dma *tdma = tdc->tdma;
369 369
370 if (tdma->chip_data->support_channel_pause) { 370 if (tdma->chip_data->support_channel_pause) {
371 tdc_write(tdc, TEGRA_APBDMA_CHAN_CSRE, 371 tdc_write(tdc, TEGRA_APBDMA_CHAN_CSRE,
372 TEGRA_APBDMA_CHAN_CSRE_PAUSE); 372 TEGRA_APBDMA_CHAN_CSRE_PAUSE);
373 if (wait_for_burst_complete) 373 if (wait_for_burst_complete)
374 udelay(TEGRA_APBDMA_BURST_COMPLETE_TIME); 374 udelay(TEGRA_APBDMA_BURST_COMPLETE_TIME);
375 } else { 375 } else {
376 tegra_dma_global_pause(tdc, wait_for_burst_complete); 376 tegra_dma_global_pause(tdc, wait_for_burst_complete);
377 } 377 }
378 } 378 }
379 379
380 static void tegra_dma_resume(struct tegra_dma_channel *tdc) 380 static void tegra_dma_resume(struct tegra_dma_channel *tdc)
381 { 381 {
382 struct tegra_dma *tdma = tdc->tdma; 382 struct tegra_dma *tdma = tdc->tdma;
383 383
384 if (tdma->chip_data->support_channel_pause) { 384 if (tdma->chip_data->support_channel_pause) {
385 tdc_write(tdc, TEGRA_APBDMA_CHAN_CSRE, 0); 385 tdc_write(tdc, TEGRA_APBDMA_CHAN_CSRE, 0);
386 } else { 386 } else {
387 tegra_dma_global_resume(tdc); 387 tegra_dma_global_resume(tdc);
388 } 388 }
389 } 389 }
390 390
391 static void tegra_dma_stop(struct tegra_dma_channel *tdc) 391 static void tegra_dma_stop(struct tegra_dma_channel *tdc)
392 { 392 {
393 u32 csr; 393 u32 csr;
394 u32 status; 394 u32 status;
395 395
396 /* Disable interrupts */ 396 /* Disable interrupts */
397 csr = tdc_read(tdc, TEGRA_APBDMA_CHAN_CSR); 397 csr = tdc_read(tdc, TEGRA_APBDMA_CHAN_CSR);
398 csr &= ~TEGRA_APBDMA_CSR_IE_EOC; 398 csr &= ~TEGRA_APBDMA_CSR_IE_EOC;
399 tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, csr); 399 tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, csr);
400 400
401 /* Disable DMA */ 401 /* Disable DMA */
402 csr &= ~TEGRA_APBDMA_CSR_ENB; 402 csr &= ~TEGRA_APBDMA_CSR_ENB;
403 tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, csr); 403 tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, csr);
404 404
405 /* Clear interrupt status if it is there */ 405 /* Clear interrupt status if it is there */
406 status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS); 406 status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
407 if (status & TEGRA_APBDMA_STATUS_ISE_EOC) { 407 if (status & TEGRA_APBDMA_STATUS_ISE_EOC) {
408 dev_dbg(tdc2dev(tdc), "%s():clearing interrupt\n", __func__); 408 dev_dbg(tdc2dev(tdc), "%s():clearing interrupt\n", __func__);
409 tdc_write(tdc, TEGRA_APBDMA_CHAN_STATUS, status); 409 tdc_write(tdc, TEGRA_APBDMA_CHAN_STATUS, status);
410 } 410 }
411 tdc->busy = false; 411 tdc->busy = false;
412 } 412 }
413 413
414 static void tegra_dma_start(struct tegra_dma_channel *tdc, 414 static void tegra_dma_start(struct tegra_dma_channel *tdc,
415 struct tegra_dma_sg_req *sg_req) 415 struct tegra_dma_sg_req *sg_req)
416 { 416 {
417 struct tegra_dma_channel_regs *ch_regs = &sg_req->ch_regs; 417 struct tegra_dma_channel_regs *ch_regs = &sg_req->ch_regs;
418 418
419 tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, ch_regs->csr); 419 tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, ch_regs->csr);
420 tdc_write(tdc, TEGRA_APBDMA_CHAN_APBSEQ, ch_regs->apb_seq); 420 tdc_write(tdc, TEGRA_APBDMA_CHAN_APBSEQ, ch_regs->apb_seq);
421 tdc_write(tdc, TEGRA_APBDMA_CHAN_APBPTR, ch_regs->apb_ptr); 421 tdc_write(tdc, TEGRA_APBDMA_CHAN_APBPTR, ch_regs->apb_ptr);
422 tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBSEQ, ch_regs->ahb_seq); 422 tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBSEQ, ch_regs->ahb_seq);
423 tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBPTR, ch_regs->ahb_ptr); 423 tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBPTR, ch_regs->ahb_ptr);
424 424
425 /* Start DMA */ 425 /* Start DMA */
426 tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, 426 tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR,
427 ch_regs->csr | TEGRA_APBDMA_CSR_ENB); 427 ch_regs->csr | TEGRA_APBDMA_CSR_ENB);
428 } 428 }
429 429
430 static void tegra_dma_configure_for_next(struct tegra_dma_channel *tdc, 430 static void tegra_dma_configure_for_next(struct tegra_dma_channel *tdc,
431 struct tegra_dma_sg_req *nsg_req) 431 struct tegra_dma_sg_req *nsg_req)
432 { 432 {
433 unsigned long status; 433 unsigned long status;
434 434
435 /* 435 /*
436 * The DMA controller reloads the new configuration for next transfer 436 * The DMA controller reloads the new configuration for next transfer
437 * after last burst of current transfer completes. 437 * after last burst of current transfer completes.
438 * If there is no IEC status then this makes sure that last burst 438 * If there is no IEC status then this makes sure that last burst
439 * has not be completed. There may be case that last burst is on 439 * has not be completed. There may be case that last burst is on
440 * flight and so it can complete but because DMA is paused, it 440 * flight and so it can complete but because DMA is paused, it
441 * will not generates interrupt as well as not reload the new 441 * will not generates interrupt as well as not reload the new
442 * configuration. 442 * configuration.
443 * If there is already IEC status then interrupt handler need to 443 * If there is already IEC status then interrupt handler need to
444 * load new configuration. 444 * load new configuration.
445 */ 445 */
446 tegra_dma_pause(tdc, false); 446 tegra_dma_pause(tdc, false);
447 status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS); 447 status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
448 448
449 /* 449 /*
450 * If interrupt is pending then do nothing as the ISR will handle 450 * If interrupt is pending then do nothing as the ISR will handle
451 * the programing for new request. 451 * the programing for new request.
452 */ 452 */
453 if (status & TEGRA_APBDMA_STATUS_ISE_EOC) { 453 if (status & TEGRA_APBDMA_STATUS_ISE_EOC) {
454 dev_err(tdc2dev(tdc), 454 dev_err(tdc2dev(tdc),
455 "Skipping new configuration as interrupt is pending\n"); 455 "Skipping new configuration as interrupt is pending\n");
456 tegra_dma_resume(tdc); 456 tegra_dma_resume(tdc);
457 return; 457 return;
458 } 458 }
459 459
460 /* Safe to program new configuration */ 460 /* Safe to program new configuration */
461 tdc_write(tdc, TEGRA_APBDMA_CHAN_APBPTR, nsg_req->ch_regs.apb_ptr); 461 tdc_write(tdc, TEGRA_APBDMA_CHAN_APBPTR, nsg_req->ch_regs.apb_ptr);
462 tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBPTR, nsg_req->ch_regs.ahb_ptr); 462 tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBPTR, nsg_req->ch_regs.ahb_ptr);
463 tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, 463 tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR,
464 nsg_req->ch_regs.csr | TEGRA_APBDMA_CSR_ENB); 464 nsg_req->ch_regs.csr | TEGRA_APBDMA_CSR_ENB);
465 nsg_req->configured = true; 465 nsg_req->configured = true;
466 466
467 tegra_dma_resume(tdc); 467 tegra_dma_resume(tdc);
468 } 468 }
469 469
470 static void tdc_start_head_req(struct tegra_dma_channel *tdc) 470 static void tdc_start_head_req(struct tegra_dma_channel *tdc)
471 { 471 {
472 struct tegra_dma_sg_req *sg_req; 472 struct tegra_dma_sg_req *sg_req;
473 473
474 if (list_empty(&tdc->pending_sg_req)) 474 if (list_empty(&tdc->pending_sg_req))
475 return; 475 return;
476 476
477 sg_req = list_first_entry(&tdc->pending_sg_req, 477 sg_req = list_first_entry(&tdc->pending_sg_req,
478 typeof(*sg_req), node); 478 typeof(*sg_req), node);
479 tegra_dma_start(tdc, sg_req); 479 tegra_dma_start(tdc, sg_req);
480 sg_req->configured = true; 480 sg_req->configured = true;
481 tdc->busy = true; 481 tdc->busy = true;
482 } 482 }
483 483
484 static void tdc_configure_next_head_desc(struct tegra_dma_channel *tdc) 484 static void tdc_configure_next_head_desc(struct tegra_dma_channel *tdc)
485 { 485 {
486 struct tegra_dma_sg_req *hsgreq; 486 struct tegra_dma_sg_req *hsgreq;
487 struct tegra_dma_sg_req *hnsgreq; 487 struct tegra_dma_sg_req *hnsgreq;
488 488
489 if (list_empty(&tdc->pending_sg_req)) 489 if (list_empty(&tdc->pending_sg_req))
490 return; 490 return;
491 491
492 hsgreq = list_first_entry(&tdc->pending_sg_req, typeof(*hsgreq), node); 492 hsgreq = list_first_entry(&tdc->pending_sg_req, typeof(*hsgreq), node);
493 if (!list_is_last(&hsgreq->node, &tdc->pending_sg_req)) { 493 if (!list_is_last(&hsgreq->node, &tdc->pending_sg_req)) {
494 hnsgreq = list_first_entry(&hsgreq->node, 494 hnsgreq = list_first_entry(&hsgreq->node,
495 typeof(*hnsgreq), node); 495 typeof(*hnsgreq), node);
496 tegra_dma_configure_for_next(tdc, hnsgreq); 496 tegra_dma_configure_for_next(tdc, hnsgreq);
497 } 497 }
498 } 498 }
499 499
500 static inline int get_current_xferred_count(struct tegra_dma_channel *tdc, 500 static inline int get_current_xferred_count(struct tegra_dma_channel *tdc,
501 struct tegra_dma_sg_req *sg_req, unsigned long status) 501 struct tegra_dma_sg_req *sg_req, unsigned long status)
502 { 502 {
503 return sg_req->req_len - (status & TEGRA_APBDMA_STATUS_COUNT_MASK) - 4; 503 return sg_req->req_len - (status & TEGRA_APBDMA_STATUS_COUNT_MASK) - 4;
504 } 504 }
505 505
506 static void tegra_dma_abort_all(struct tegra_dma_channel *tdc) 506 static void tegra_dma_abort_all(struct tegra_dma_channel *tdc)
507 { 507 {
508 struct tegra_dma_sg_req *sgreq; 508 struct tegra_dma_sg_req *sgreq;
509 struct tegra_dma_desc *dma_desc; 509 struct tegra_dma_desc *dma_desc;
510 510
511 while (!list_empty(&tdc->pending_sg_req)) { 511 while (!list_empty(&tdc->pending_sg_req)) {
512 sgreq = list_first_entry(&tdc->pending_sg_req, 512 sgreq = list_first_entry(&tdc->pending_sg_req,
513 typeof(*sgreq), node); 513 typeof(*sgreq), node);
514 list_move_tail(&sgreq->node, &tdc->free_sg_req); 514 list_move_tail(&sgreq->node, &tdc->free_sg_req);
515 if (sgreq->last_sg) { 515 if (sgreq->last_sg) {
516 dma_desc = sgreq->dma_desc; 516 dma_desc = sgreq->dma_desc;
517 dma_desc->dma_status = DMA_ERROR; 517 dma_desc->dma_status = DMA_ERROR;
518 list_add_tail(&dma_desc->node, &tdc->free_dma_desc); 518 list_add_tail(&dma_desc->node, &tdc->free_dma_desc);
519 519
520 /* Add in cb list if it is not there. */ 520 /* Add in cb list if it is not there. */
521 if (!dma_desc->cb_count) 521 if (!dma_desc->cb_count)
522 list_add_tail(&dma_desc->cb_node, 522 list_add_tail(&dma_desc->cb_node,
523 &tdc->cb_desc); 523 &tdc->cb_desc);
524 dma_desc->cb_count++; 524 dma_desc->cb_count++;
525 } 525 }
526 } 526 }
527 tdc->isr_handler = NULL; 527 tdc->isr_handler = NULL;
528 } 528 }
529 529
530 static bool handle_continuous_head_request(struct tegra_dma_channel *tdc, 530 static bool handle_continuous_head_request(struct tegra_dma_channel *tdc,
531 struct tegra_dma_sg_req *last_sg_req, bool to_terminate) 531 struct tegra_dma_sg_req *last_sg_req, bool to_terminate)
532 { 532 {
533 struct tegra_dma_sg_req *hsgreq = NULL; 533 struct tegra_dma_sg_req *hsgreq = NULL;
534 534
535 if (list_empty(&tdc->pending_sg_req)) { 535 if (list_empty(&tdc->pending_sg_req)) {
536 dev_err(tdc2dev(tdc), "Dma is running without req\n"); 536 dev_err(tdc2dev(tdc), "Dma is running without req\n");
537 tegra_dma_stop(tdc); 537 tegra_dma_stop(tdc);
538 return false; 538 return false;
539 } 539 }
540 540
541 /* 541 /*
542 * Check that head req on list should be in flight. 542 * Check that head req on list should be in flight.
543 * If it is not in flight then abort transfer as 543 * If it is not in flight then abort transfer as
544 * looping of transfer can not continue. 544 * looping of transfer can not continue.
545 */ 545 */
546 hsgreq = list_first_entry(&tdc->pending_sg_req, typeof(*hsgreq), node); 546 hsgreq = list_first_entry(&tdc->pending_sg_req, typeof(*hsgreq), node);
547 if (!hsgreq->configured) { 547 if (!hsgreq->configured) {
548 tegra_dma_stop(tdc); 548 tegra_dma_stop(tdc);
549 dev_err(tdc2dev(tdc), "Error in dma transfer, aborting dma\n"); 549 dev_err(tdc2dev(tdc), "Error in dma transfer, aborting dma\n");
550 tegra_dma_abort_all(tdc); 550 tegra_dma_abort_all(tdc);
551 return false; 551 return false;
552 } 552 }
553 553
554 /* Configure next request */ 554 /* Configure next request */
555 if (!to_terminate) 555 if (!to_terminate)
556 tdc_configure_next_head_desc(tdc); 556 tdc_configure_next_head_desc(tdc);
557 return true; 557 return true;
558 } 558 }
559 559
560 static void handle_once_dma_done(struct tegra_dma_channel *tdc, 560 static void handle_once_dma_done(struct tegra_dma_channel *tdc,
561 bool to_terminate) 561 bool to_terminate)
562 { 562 {
563 struct tegra_dma_sg_req *sgreq; 563 struct tegra_dma_sg_req *sgreq;
564 struct tegra_dma_desc *dma_desc; 564 struct tegra_dma_desc *dma_desc;
565 565
566 tdc->busy = false; 566 tdc->busy = false;
567 sgreq = list_first_entry(&tdc->pending_sg_req, typeof(*sgreq), node); 567 sgreq = list_first_entry(&tdc->pending_sg_req, typeof(*sgreq), node);
568 dma_desc = sgreq->dma_desc; 568 dma_desc = sgreq->dma_desc;
569 dma_desc->bytes_transferred += sgreq->req_len; 569 dma_desc->bytes_transferred += sgreq->req_len;
570 570
571 list_del(&sgreq->node); 571 list_del(&sgreq->node);
572 if (sgreq->last_sg) { 572 if (sgreq->last_sg) {
573 dma_desc->dma_status = DMA_SUCCESS; 573 dma_desc->dma_status = DMA_COMPLETE;
574 dma_cookie_complete(&dma_desc->txd); 574 dma_cookie_complete(&dma_desc->txd);
575 if (!dma_desc->cb_count) 575 if (!dma_desc->cb_count)
576 list_add_tail(&dma_desc->cb_node, &tdc->cb_desc); 576 list_add_tail(&dma_desc->cb_node, &tdc->cb_desc);
577 dma_desc->cb_count++; 577 dma_desc->cb_count++;
578 list_add_tail(&dma_desc->node, &tdc->free_dma_desc); 578 list_add_tail(&dma_desc->node, &tdc->free_dma_desc);
579 } 579 }
580 list_add_tail(&sgreq->node, &tdc->free_sg_req); 580 list_add_tail(&sgreq->node, &tdc->free_sg_req);
581 581
582 /* Do not start DMA if it is going to be terminate */ 582 /* Do not start DMA if it is going to be terminate */
583 if (to_terminate || list_empty(&tdc->pending_sg_req)) 583 if (to_terminate || list_empty(&tdc->pending_sg_req))
584 return; 584 return;
585 585
586 tdc_start_head_req(tdc); 586 tdc_start_head_req(tdc);
587 return; 587 return;
588 } 588 }
589 589
590 static void handle_cont_sngl_cycle_dma_done(struct tegra_dma_channel *tdc, 590 static void handle_cont_sngl_cycle_dma_done(struct tegra_dma_channel *tdc,
591 bool to_terminate) 591 bool to_terminate)
592 { 592 {
593 struct tegra_dma_sg_req *sgreq; 593 struct tegra_dma_sg_req *sgreq;
594 struct tegra_dma_desc *dma_desc; 594 struct tegra_dma_desc *dma_desc;
595 bool st; 595 bool st;
596 596
597 sgreq = list_first_entry(&tdc->pending_sg_req, typeof(*sgreq), node); 597 sgreq = list_first_entry(&tdc->pending_sg_req, typeof(*sgreq), node);
598 dma_desc = sgreq->dma_desc; 598 dma_desc = sgreq->dma_desc;
599 dma_desc->bytes_transferred += sgreq->req_len; 599 dma_desc->bytes_transferred += sgreq->req_len;
600 600
601 /* Callback need to be call */ 601 /* Callback need to be call */
602 if (!dma_desc->cb_count) 602 if (!dma_desc->cb_count)
603 list_add_tail(&dma_desc->cb_node, &tdc->cb_desc); 603 list_add_tail(&dma_desc->cb_node, &tdc->cb_desc);
604 dma_desc->cb_count++; 604 dma_desc->cb_count++;
605 605
606 /* If not last req then put at end of pending list */ 606 /* If not last req then put at end of pending list */
607 if (!list_is_last(&sgreq->node, &tdc->pending_sg_req)) { 607 if (!list_is_last(&sgreq->node, &tdc->pending_sg_req)) {
608 list_move_tail(&sgreq->node, &tdc->pending_sg_req); 608 list_move_tail(&sgreq->node, &tdc->pending_sg_req);
609 sgreq->configured = false; 609 sgreq->configured = false;
610 st = handle_continuous_head_request(tdc, sgreq, to_terminate); 610 st = handle_continuous_head_request(tdc, sgreq, to_terminate);
611 if (!st) 611 if (!st)
612 dma_desc->dma_status = DMA_ERROR; 612 dma_desc->dma_status = DMA_ERROR;
613 } 613 }
614 return; 614 return;
615 } 615 }
616 616
617 static void tegra_dma_tasklet(unsigned long data) 617 static void tegra_dma_tasklet(unsigned long data)
618 { 618 {
619 struct tegra_dma_channel *tdc = (struct tegra_dma_channel *)data; 619 struct tegra_dma_channel *tdc = (struct tegra_dma_channel *)data;
620 dma_async_tx_callback callback = NULL; 620 dma_async_tx_callback callback = NULL;
621 void *callback_param = NULL; 621 void *callback_param = NULL;
622 struct tegra_dma_desc *dma_desc; 622 struct tegra_dma_desc *dma_desc;
623 unsigned long flags; 623 unsigned long flags;
624 int cb_count; 624 int cb_count;
625 625
626 spin_lock_irqsave(&tdc->lock, flags); 626 spin_lock_irqsave(&tdc->lock, flags);
627 while (!list_empty(&tdc->cb_desc)) { 627 while (!list_empty(&tdc->cb_desc)) {
628 dma_desc = list_first_entry(&tdc->cb_desc, 628 dma_desc = list_first_entry(&tdc->cb_desc,
629 typeof(*dma_desc), cb_node); 629 typeof(*dma_desc), cb_node);
630 list_del(&dma_desc->cb_node); 630 list_del(&dma_desc->cb_node);
631 callback = dma_desc->txd.callback; 631 callback = dma_desc->txd.callback;
632 callback_param = dma_desc->txd.callback_param; 632 callback_param = dma_desc->txd.callback_param;
633 cb_count = dma_desc->cb_count; 633 cb_count = dma_desc->cb_count;
634 dma_desc->cb_count = 0; 634 dma_desc->cb_count = 0;
635 spin_unlock_irqrestore(&tdc->lock, flags); 635 spin_unlock_irqrestore(&tdc->lock, flags);
636 while (cb_count-- && callback) 636 while (cb_count-- && callback)
637 callback(callback_param); 637 callback(callback_param);
638 spin_lock_irqsave(&tdc->lock, flags); 638 spin_lock_irqsave(&tdc->lock, flags);
639 } 639 }
640 spin_unlock_irqrestore(&tdc->lock, flags); 640 spin_unlock_irqrestore(&tdc->lock, flags);
641 } 641 }
642 642
643 static irqreturn_t tegra_dma_isr(int irq, void *dev_id) 643 static irqreturn_t tegra_dma_isr(int irq, void *dev_id)
644 { 644 {
645 struct tegra_dma_channel *tdc = dev_id; 645 struct tegra_dma_channel *tdc = dev_id;
646 unsigned long status; 646 unsigned long status;
647 unsigned long flags; 647 unsigned long flags;
648 648
649 spin_lock_irqsave(&tdc->lock, flags); 649 spin_lock_irqsave(&tdc->lock, flags);
650 650
651 status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS); 651 status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
652 if (status & TEGRA_APBDMA_STATUS_ISE_EOC) { 652 if (status & TEGRA_APBDMA_STATUS_ISE_EOC) {
653 tdc_write(tdc, TEGRA_APBDMA_CHAN_STATUS, status); 653 tdc_write(tdc, TEGRA_APBDMA_CHAN_STATUS, status);
654 tdc->isr_handler(tdc, false); 654 tdc->isr_handler(tdc, false);
655 tasklet_schedule(&tdc->tasklet); 655 tasklet_schedule(&tdc->tasklet);
656 spin_unlock_irqrestore(&tdc->lock, flags); 656 spin_unlock_irqrestore(&tdc->lock, flags);
657 return IRQ_HANDLED; 657 return IRQ_HANDLED;
658 } 658 }
659 659
660 spin_unlock_irqrestore(&tdc->lock, flags); 660 spin_unlock_irqrestore(&tdc->lock, flags);
661 dev_info(tdc2dev(tdc), 661 dev_info(tdc2dev(tdc),
662 "Interrupt already served status 0x%08lx\n", status); 662 "Interrupt already served status 0x%08lx\n", status);
663 return IRQ_NONE; 663 return IRQ_NONE;
664 } 664 }
665 665
666 static dma_cookie_t tegra_dma_tx_submit(struct dma_async_tx_descriptor *txd) 666 static dma_cookie_t tegra_dma_tx_submit(struct dma_async_tx_descriptor *txd)
667 { 667 {
668 struct tegra_dma_desc *dma_desc = txd_to_tegra_dma_desc(txd); 668 struct tegra_dma_desc *dma_desc = txd_to_tegra_dma_desc(txd);
669 struct tegra_dma_channel *tdc = to_tegra_dma_chan(txd->chan); 669 struct tegra_dma_channel *tdc = to_tegra_dma_chan(txd->chan);
670 unsigned long flags; 670 unsigned long flags;
671 dma_cookie_t cookie; 671 dma_cookie_t cookie;
672 672
673 spin_lock_irqsave(&tdc->lock, flags); 673 spin_lock_irqsave(&tdc->lock, flags);
674 dma_desc->dma_status = DMA_IN_PROGRESS; 674 dma_desc->dma_status = DMA_IN_PROGRESS;
675 cookie = dma_cookie_assign(&dma_desc->txd); 675 cookie = dma_cookie_assign(&dma_desc->txd);
676 list_splice_tail_init(&dma_desc->tx_list, &tdc->pending_sg_req); 676 list_splice_tail_init(&dma_desc->tx_list, &tdc->pending_sg_req);
677 spin_unlock_irqrestore(&tdc->lock, flags); 677 spin_unlock_irqrestore(&tdc->lock, flags);
678 return cookie; 678 return cookie;
679 } 679 }
680 680
681 static void tegra_dma_issue_pending(struct dma_chan *dc) 681 static void tegra_dma_issue_pending(struct dma_chan *dc)
682 { 682 {
683 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); 683 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
684 unsigned long flags; 684 unsigned long flags;
685 685
686 spin_lock_irqsave(&tdc->lock, flags); 686 spin_lock_irqsave(&tdc->lock, flags);
687 if (list_empty(&tdc->pending_sg_req)) { 687 if (list_empty(&tdc->pending_sg_req)) {
688 dev_err(tdc2dev(tdc), "No DMA request\n"); 688 dev_err(tdc2dev(tdc), "No DMA request\n");
689 goto end; 689 goto end;
690 } 690 }
691 if (!tdc->busy) { 691 if (!tdc->busy) {
692 tdc_start_head_req(tdc); 692 tdc_start_head_req(tdc);
693 693
694 /* Continuous single mode: Configure next req */ 694 /* Continuous single mode: Configure next req */
695 if (tdc->cyclic) { 695 if (tdc->cyclic) {
696 /* 696 /*
697 * Wait for 1 burst time for configure DMA for 697 * Wait for 1 burst time for configure DMA for
698 * next transfer. 698 * next transfer.
699 */ 699 */
700 udelay(TEGRA_APBDMA_BURST_COMPLETE_TIME); 700 udelay(TEGRA_APBDMA_BURST_COMPLETE_TIME);
701 tdc_configure_next_head_desc(tdc); 701 tdc_configure_next_head_desc(tdc);
702 } 702 }
703 } 703 }
704 end: 704 end:
705 spin_unlock_irqrestore(&tdc->lock, flags); 705 spin_unlock_irqrestore(&tdc->lock, flags);
706 return; 706 return;
707 } 707 }
708 708
709 static void tegra_dma_terminate_all(struct dma_chan *dc) 709 static void tegra_dma_terminate_all(struct dma_chan *dc)
710 { 710 {
711 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); 711 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
712 struct tegra_dma_sg_req *sgreq; 712 struct tegra_dma_sg_req *sgreq;
713 struct tegra_dma_desc *dma_desc; 713 struct tegra_dma_desc *dma_desc;
714 unsigned long flags; 714 unsigned long flags;
715 unsigned long status; 715 unsigned long status;
716 bool was_busy; 716 bool was_busy;
717 717
718 spin_lock_irqsave(&tdc->lock, flags); 718 spin_lock_irqsave(&tdc->lock, flags);
719 if (list_empty(&tdc->pending_sg_req)) { 719 if (list_empty(&tdc->pending_sg_req)) {
720 spin_unlock_irqrestore(&tdc->lock, flags); 720 spin_unlock_irqrestore(&tdc->lock, flags);
721 return; 721 return;
722 } 722 }
723 723
724 if (!tdc->busy) 724 if (!tdc->busy)
725 goto skip_dma_stop; 725 goto skip_dma_stop;
726 726
727 /* Pause DMA before checking the queue status */ 727 /* Pause DMA before checking the queue status */
728 tegra_dma_pause(tdc, true); 728 tegra_dma_pause(tdc, true);
729 729
730 status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS); 730 status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
731 if (status & TEGRA_APBDMA_STATUS_ISE_EOC) { 731 if (status & TEGRA_APBDMA_STATUS_ISE_EOC) {
732 dev_dbg(tdc2dev(tdc), "%s():handling isr\n", __func__); 732 dev_dbg(tdc2dev(tdc), "%s():handling isr\n", __func__);
733 tdc->isr_handler(tdc, true); 733 tdc->isr_handler(tdc, true);
734 status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS); 734 status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
735 } 735 }
736 736
737 was_busy = tdc->busy; 737 was_busy = tdc->busy;
738 tegra_dma_stop(tdc); 738 tegra_dma_stop(tdc);
739 739
740 if (!list_empty(&tdc->pending_sg_req) && was_busy) { 740 if (!list_empty(&tdc->pending_sg_req) && was_busy) {
741 sgreq = list_first_entry(&tdc->pending_sg_req, 741 sgreq = list_first_entry(&tdc->pending_sg_req,
742 typeof(*sgreq), node); 742 typeof(*sgreq), node);
743 sgreq->dma_desc->bytes_transferred += 743 sgreq->dma_desc->bytes_transferred +=
744 get_current_xferred_count(tdc, sgreq, status); 744 get_current_xferred_count(tdc, sgreq, status);
745 } 745 }
746 tegra_dma_resume(tdc); 746 tegra_dma_resume(tdc);
747 747
748 skip_dma_stop: 748 skip_dma_stop:
749 tegra_dma_abort_all(tdc); 749 tegra_dma_abort_all(tdc);
750 750
751 while (!list_empty(&tdc->cb_desc)) { 751 while (!list_empty(&tdc->cb_desc)) {
752 dma_desc = list_first_entry(&tdc->cb_desc, 752 dma_desc = list_first_entry(&tdc->cb_desc,
753 typeof(*dma_desc), cb_node); 753 typeof(*dma_desc), cb_node);
754 list_del(&dma_desc->cb_node); 754 list_del(&dma_desc->cb_node);
755 dma_desc->cb_count = 0; 755 dma_desc->cb_count = 0;
756 } 756 }
757 spin_unlock_irqrestore(&tdc->lock, flags); 757 spin_unlock_irqrestore(&tdc->lock, flags);
758 } 758 }
759 759
760 static enum dma_status tegra_dma_tx_status(struct dma_chan *dc, 760 static enum dma_status tegra_dma_tx_status(struct dma_chan *dc,
761 dma_cookie_t cookie, struct dma_tx_state *txstate) 761 dma_cookie_t cookie, struct dma_tx_state *txstate)
762 { 762 {
763 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); 763 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
764 struct tegra_dma_desc *dma_desc; 764 struct tegra_dma_desc *dma_desc;
765 struct tegra_dma_sg_req *sg_req; 765 struct tegra_dma_sg_req *sg_req;
766 enum dma_status ret; 766 enum dma_status ret;
767 unsigned long flags; 767 unsigned long flags;
768 unsigned int residual; 768 unsigned int residual;
769 769
770 ret = dma_cookie_status(dc, cookie, txstate); 770 ret = dma_cookie_status(dc, cookie, txstate);
771 if (ret == DMA_SUCCESS) 771 if (ret == DMA_COMPLETE)
772 return ret; 772 return ret;
773 773
774 spin_lock_irqsave(&tdc->lock, flags); 774 spin_lock_irqsave(&tdc->lock, flags);
775 775
776 /* Check on wait_ack desc status */ 776 /* Check on wait_ack desc status */
777 list_for_each_entry(dma_desc, &tdc->free_dma_desc, node) { 777 list_for_each_entry(dma_desc, &tdc->free_dma_desc, node) {
778 if (dma_desc->txd.cookie == cookie) { 778 if (dma_desc->txd.cookie == cookie) {
779 residual = dma_desc->bytes_requested - 779 residual = dma_desc->bytes_requested -
780 (dma_desc->bytes_transferred % 780 (dma_desc->bytes_transferred %
781 dma_desc->bytes_requested); 781 dma_desc->bytes_requested);
782 dma_set_residue(txstate, residual); 782 dma_set_residue(txstate, residual);
783 ret = dma_desc->dma_status; 783 ret = dma_desc->dma_status;
784 spin_unlock_irqrestore(&tdc->lock, flags); 784 spin_unlock_irqrestore(&tdc->lock, flags);
785 return ret; 785 return ret;
786 } 786 }
787 } 787 }
788 788
789 /* Check in pending list */ 789 /* Check in pending list */
790 list_for_each_entry(sg_req, &tdc->pending_sg_req, node) { 790 list_for_each_entry(sg_req, &tdc->pending_sg_req, node) {
791 dma_desc = sg_req->dma_desc; 791 dma_desc = sg_req->dma_desc;
792 if (dma_desc->txd.cookie == cookie) { 792 if (dma_desc->txd.cookie == cookie) {
793 residual = dma_desc->bytes_requested - 793 residual = dma_desc->bytes_requested -
794 (dma_desc->bytes_transferred % 794 (dma_desc->bytes_transferred %
795 dma_desc->bytes_requested); 795 dma_desc->bytes_requested);
796 dma_set_residue(txstate, residual); 796 dma_set_residue(txstate, residual);
797 ret = dma_desc->dma_status; 797 ret = dma_desc->dma_status;
798 spin_unlock_irqrestore(&tdc->lock, flags); 798 spin_unlock_irqrestore(&tdc->lock, flags);
799 return ret; 799 return ret;
800 } 800 }
801 } 801 }
802 802
803 dev_dbg(tdc2dev(tdc), "cookie %d does not found\n", cookie); 803 dev_dbg(tdc2dev(tdc), "cookie %d does not found\n", cookie);
804 spin_unlock_irqrestore(&tdc->lock, flags); 804 spin_unlock_irqrestore(&tdc->lock, flags);
805 return ret; 805 return ret;
806 } 806 }
807 807
808 static int tegra_dma_device_control(struct dma_chan *dc, enum dma_ctrl_cmd cmd, 808 static int tegra_dma_device_control(struct dma_chan *dc, enum dma_ctrl_cmd cmd,
809 unsigned long arg) 809 unsigned long arg)
810 { 810 {
811 switch (cmd) { 811 switch (cmd) {
812 case DMA_SLAVE_CONFIG: 812 case DMA_SLAVE_CONFIG:
813 return tegra_dma_slave_config(dc, 813 return tegra_dma_slave_config(dc,
814 (struct dma_slave_config *)arg); 814 (struct dma_slave_config *)arg);
815 815
816 case DMA_TERMINATE_ALL: 816 case DMA_TERMINATE_ALL:
817 tegra_dma_terminate_all(dc); 817 tegra_dma_terminate_all(dc);
818 return 0; 818 return 0;
819 819
820 default: 820 default:
821 break; 821 break;
822 } 822 }
823 823
824 return -ENXIO; 824 return -ENXIO;
825 } 825 }
826 826
827 static inline int get_bus_width(struct tegra_dma_channel *tdc, 827 static inline int get_bus_width(struct tegra_dma_channel *tdc,
828 enum dma_slave_buswidth slave_bw) 828 enum dma_slave_buswidth slave_bw)
829 { 829 {
830 switch (slave_bw) { 830 switch (slave_bw) {
831 case DMA_SLAVE_BUSWIDTH_1_BYTE: 831 case DMA_SLAVE_BUSWIDTH_1_BYTE:
832 return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_8; 832 return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_8;
833 case DMA_SLAVE_BUSWIDTH_2_BYTES: 833 case DMA_SLAVE_BUSWIDTH_2_BYTES:
834 return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_16; 834 return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_16;
835 case DMA_SLAVE_BUSWIDTH_4_BYTES: 835 case DMA_SLAVE_BUSWIDTH_4_BYTES:
836 return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_32; 836 return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_32;
837 case DMA_SLAVE_BUSWIDTH_8_BYTES: 837 case DMA_SLAVE_BUSWIDTH_8_BYTES:
838 return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_64; 838 return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_64;
839 default: 839 default:
840 dev_warn(tdc2dev(tdc), 840 dev_warn(tdc2dev(tdc),
841 "slave bw is not supported, using 32bits\n"); 841 "slave bw is not supported, using 32bits\n");
842 return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_32; 842 return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_32;
843 } 843 }
844 } 844 }
845 845
846 static inline int get_burst_size(struct tegra_dma_channel *tdc, 846 static inline int get_burst_size(struct tegra_dma_channel *tdc,
847 u32 burst_size, enum dma_slave_buswidth slave_bw, int len) 847 u32 burst_size, enum dma_slave_buswidth slave_bw, int len)
848 { 848 {
849 int burst_byte; 849 int burst_byte;
850 int burst_ahb_width; 850 int burst_ahb_width;
851 851
852 /* 852 /*
853 * burst_size from client is in terms of the bus_width. 853 * burst_size from client is in terms of the bus_width.
854 * convert them into AHB memory width which is 4 byte. 854 * convert them into AHB memory width which is 4 byte.
855 */ 855 */
856 burst_byte = burst_size * slave_bw; 856 burst_byte = burst_size * slave_bw;
857 burst_ahb_width = burst_byte / 4; 857 burst_ahb_width = burst_byte / 4;
858 858
859 /* If burst size is 0 then calculate the burst size based on length */ 859 /* If burst size is 0 then calculate the burst size based on length */
860 if (!burst_ahb_width) { 860 if (!burst_ahb_width) {
861 if (len & 0xF) 861 if (len & 0xF)
862 return TEGRA_APBDMA_AHBSEQ_BURST_1; 862 return TEGRA_APBDMA_AHBSEQ_BURST_1;
863 else if ((len >> 4) & 0x1) 863 else if ((len >> 4) & 0x1)
864 return TEGRA_APBDMA_AHBSEQ_BURST_4; 864 return TEGRA_APBDMA_AHBSEQ_BURST_4;
865 else 865 else
866 return TEGRA_APBDMA_AHBSEQ_BURST_8; 866 return TEGRA_APBDMA_AHBSEQ_BURST_8;
867 } 867 }
868 if (burst_ahb_width < 4) 868 if (burst_ahb_width < 4)
869 return TEGRA_APBDMA_AHBSEQ_BURST_1; 869 return TEGRA_APBDMA_AHBSEQ_BURST_1;
870 else if (burst_ahb_width < 8) 870 else if (burst_ahb_width < 8)
871 return TEGRA_APBDMA_AHBSEQ_BURST_4; 871 return TEGRA_APBDMA_AHBSEQ_BURST_4;
872 else 872 else
873 return TEGRA_APBDMA_AHBSEQ_BURST_8; 873 return TEGRA_APBDMA_AHBSEQ_BURST_8;
874 } 874 }
875 875
876 static int get_transfer_param(struct tegra_dma_channel *tdc, 876 static int get_transfer_param(struct tegra_dma_channel *tdc,
877 enum dma_transfer_direction direction, unsigned long *apb_addr, 877 enum dma_transfer_direction direction, unsigned long *apb_addr,
878 unsigned long *apb_seq, unsigned long *csr, unsigned int *burst_size, 878 unsigned long *apb_seq, unsigned long *csr, unsigned int *burst_size,
879 enum dma_slave_buswidth *slave_bw) 879 enum dma_slave_buswidth *slave_bw)
880 { 880 {
881 881
882 switch (direction) { 882 switch (direction) {
883 case DMA_MEM_TO_DEV: 883 case DMA_MEM_TO_DEV:
884 *apb_addr = tdc->dma_sconfig.dst_addr; 884 *apb_addr = tdc->dma_sconfig.dst_addr;
885 *apb_seq = get_bus_width(tdc, tdc->dma_sconfig.dst_addr_width); 885 *apb_seq = get_bus_width(tdc, tdc->dma_sconfig.dst_addr_width);
886 *burst_size = tdc->dma_sconfig.dst_maxburst; 886 *burst_size = tdc->dma_sconfig.dst_maxburst;
887 *slave_bw = tdc->dma_sconfig.dst_addr_width; 887 *slave_bw = tdc->dma_sconfig.dst_addr_width;
888 *csr = TEGRA_APBDMA_CSR_DIR; 888 *csr = TEGRA_APBDMA_CSR_DIR;
889 return 0; 889 return 0;
890 890
891 case DMA_DEV_TO_MEM: 891 case DMA_DEV_TO_MEM:
892 *apb_addr = tdc->dma_sconfig.src_addr; 892 *apb_addr = tdc->dma_sconfig.src_addr;
893 *apb_seq = get_bus_width(tdc, tdc->dma_sconfig.src_addr_width); 893 *apb_seq = get_bus_width(tdc, tdc->dma_sconfig.src_addr_width);
894 *burst_size = tdc->dma_sconfig.src_maxburst; 894 *burst_size = tdc->dma_sconfig.src_maxburst;
895 *slave_bw = tdc->dma_sconfig.src_addr_width; 895 *slave_bw = tdc->dma_sconfig.src_addr_width;
896 *csr = 0; 896 *csr = 0;
897 return 0; 897 return 0;
898 898
899 default: 899 default:
900 dev_err(tdc2dev(tdc), "Dma direction is not supported\n"); 900 dev_err(tdc2dev(tdc), "Dma direction is not supported\n");
901 return -EINVAL; 901 return -EINVAL;
902 } 902 }
903 return -EINVAL; 903 return -EINVAL;
904 } 904 }
905 905
906 static struct dma_async_tx_descriptor *tegra_dma_prep_slave_sg( 906 static struct dma_async_tx_descriptor *tegra_dma_prep_slave_sg(
907 struct dma_chan *dc, struct scatterlist *sgl, unsigned int sg_len, 907 struct dma_chan *dc, struct scatterlist *sgl, unsigned int sg_len,
908 enum dma_transfer_direction direction, unsigned long flags, 908 enum dma_transfer_direction direction, unsigned long flags,
909 void *context) 909 void *context)
910 { 910 {
911 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); 911 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
912 struct tegra_dma_desc *dma_desc; 912 struct tegra_dma_desc *dma_desc;
913 unsigned int i; 913 unsigned int i;
914 struct scatterlist *sg; 914 struct scatterlist *sg;
915 unsigned long csr, ahb_seq, apb_ptr, apb_seq; 915 unsigned long csr, ahb_seq, apb_ptr, apb_seq;
916 struct list_head req_list; 916 struct list_head req_list;
917 struct tegra_dma_sg_req *sg_req = NULL; 917 struct tegra_dma_sg_req *sg_req = NULL;
918 u32 burst_size; 918 u32 burst_size;
919 enum dma_slave_buswidth slave_bw; 919 enum dma_slave_buswidth slave_bw;
920 int ret; 920 int ret;
921 921
922 if (!tdc->config_init) { 922 if (!tdc->config_init) {
923 dev_err(tdc2dev(tdc), "dma channel is not configured\n"); 923 dev_err(tdc2dev(tdc), "dma channel is not configured\n");
924 return NULL; 924 return NULL;
925 } 925 }
926 if (sg_len < 1) { 926 if (sg_len < 1) {
927 dev_err(tdc2dev(tdc), "Invalid segment length %d\n", sg_len); 927 dev_err(tdc2dev(tdc), "Invalid segment length %d\n", sg_len);
928 return NULL; 928 return NULL;
929 } 929 }
930 930
931 ret = get_transfer_param(tdc, direction, &apb_ptr, &apb_seq, &csr, 931 ret = get_transfer_param(tdc, direction, &apb_ptr, &apb_seq, &csr,
932 &burst_size, &slave_bw); 932 &burst_size, &slave_bw);
933 if (ret < 0) 933 if (ret < 0)
934 return NULL; 934 return NULL;
935 935
936 INIT_LIST_HEAD(&req_list); 936 INIT_LIST_HEAD(&req_list);
937 937
938 ahb_seq = TEGRA_APBDMA_AHBSEQ_INTR_ENB; 938 ahb_seq = TEGRA_APBDMA_AHBSEQ_INTR_ENB;
939 ahb_seq |= TEGRA_APBDMA_AHBSEQ_WRAP_NONE << 939 ahb_seq |= TEGRA_APBDMA_AHBSEQ_WRAP_NONE <<
940 TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT; 940 TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT;
941 ahb_seq |= TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_32; 941 ahb_seq |= TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_32;
942 942
943 csr |= TEGRA_APBDMA_CSR_ONCE | TEGRA_APBDMA_CSR_FLOW; 943 csr |= TEGRA_APBDMA_CSR_ONCE | TEGRA_APBDMA_CSR_FLOW;
944 csr |= tdc->dma_sconfig.slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT; 944 csr |= tdc->dma_sconfig.slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT;
945 if (flags & DMA_PREP_INTERRUPT) 945 if (flags & DMA_PREP_INTERRUPT)
946 csr |= TEGRA_APBDMA_CSR_IE_EOC; 946 csr |= TEGRA_APBDMA_CSR_IE_EOC;
947 947
948 apb_seq |= TEGRA_APBDMA_APBSEQ_WRAP_WORD_1; 948 apb_seq |= TEGRA_APBDMA_APBSEQ_WRAP_WORD_1;
949 949
950 dma_desc = tegra_dma_desc_get(tdc); 950 dma_desc = tegra_dma_desc_get(tdc);
951 if (!dma_desc) { 951 if (!dma_desc) {
952 dev_err(tdc2dev(tdc), "Dma descriptors not available\n"); 952 dev_err(tdc2dev(tdc), "Dma descriptors not available\n");
953 return NULL; 953 return NULL;
954 } 954 }
955 INIT_LIST_HEAD(&dma_desc->tx_list); 955 INIT_LIST_HEAD(&dma_desc->tx_list);
956 INIT_LIST_HEAD(&dma_desc->cb_node); 956 INIT_LIST_HEAD(&dma_desc->cb_node);
957 dma_desc->cb_count = 0; 957 dma_desc->cb_count = 0;
958 dma_desc->bytes_requested = 0; 958 dma_desc->bytes_requested = 0;
959 dma_desc->bytes_transferred = 0; 959 dma_desc->bytes_transferred = 0;
960 dma_desc->dma_status = DMA_IN_PROGRESS; 960 dma_desc->dma_status = DMA_IN_PROGRESS;
961 961
962 /* Make transfer requests */ 962 /* Make transfer requests */
963 for_each_sg(sgl, sg, sg_len, i) { 963 for_each_sg(sgl, sg, sg_len, i) {
964 u32 len, mem; 964 u32 len, mem;
965 965
966 mem = sg_dma_address(sg); 966 mem = sg_dma_address(sg);
967 len = sg_dma_len(sg); 967 len = sg_dma_len(sg);
968 968
969 if ((len & 3) || (mem & 3) || 969 if ((len & 3) || (mem & 3) ||
970 (len > tdc->tdma->chip_data->max_dma_count)) { 970 (len > tdc->tdma->chip_data->max_dma_count)) {
971 dev_err(tdc2dev(tdc), 971 dev_err(tdc2dev(tdc),
972 "Dma length/memory address is not supported\n"); 972 "Dma length/memory address is not supported\n");
973 tegra_dma_desc_put(tdc, dma_desc); 973 tegra_dma_desc_put(tdc, dma_desc);
974 return NULL; 974 return NULL;
975 } 975 }
976 976
977 sg_req = tegra_dma_sg_req_get(tdc); 977 sg_req = tegra_dma_sg_req_get(tdc);
978 if (!sg_req) { 978 if (!sg_req) {
979 dev_err(tdc2dev(tdc), "Dma sg-req not available\n"); 979 dev_err(tdc2dev(tdc), "Dma sg-req not available\n");
980 tegra_dma_desc_put(tdc, dma_desc); 980 tegra_dma_desc_put(tdc, dma_desc);
981 return NULL; 981 return NULL;
982 } 982 }
983 983
984 ahb_seq |= get_burst_size(tdc, burst_size, slave_bw, len); 984 ahb_seq |= get_burst_size(tdc, burst_size, slave_bw, len);
985 dma_desc->bytes_requested += len; 985 dma_desc->bytes_requested += len;
986 986
987 sg_req->ch_regs.apb_ptr = apb_ptr; 987 sg_req->ch_regs.apb_ptr = apb_ptr;
988 sg_req->ch_regs.ahb_ptr = mem; 988 sg_req->ch_regs.ahb_ptr = mem;
989 sg_req->ch_regs.csr = csr | ((len - 4) & 0xFFFC); 989 sg_req->ch_regs.csr = csr | ((len - 4) & 0xFFFC);
990 sg_req->ch_regs.apb_seq = apb_seq; 990 sg_req->ch_regs.apb_seq = apb_seq;
991 sg_req->ch_regs.ahb_seq = ahb_seq; 991 sg_req->ch_regs.ahb_seq = ahb_seq;
992 sg_req->configured = false; 992 sg_req->configured = false;
993 sg_req->last_sg = false; 993 sg_req->last_sg = false;
994 sg_req->dma_desc = dma_desc; 994 sg_req->dma_desc = dma_desc;
995 sg_req->req_len = len; 995 sg_req->req_len = len;
996 996
997 list_add_tail(&sg_req->node, &dma_desc->tx_list); 997 list_add_tail(&sg_req->node, &dma_desc->tx_list);
998 } 998 }
999 sg_req->last_sg = true; 999 sg_req->last_sg = true;
1000 if (flags & DMA_CTRL_ACK) 1000 if (flags & DMA_CTRL_ACK)
1001 dma_desc->txd.flags = DMA_CTRL_ACK; 1001 dma_desc->txd.flags = DMA_CTRL_ACK;
1002 1002
1003 /* 1003 /*
1004 * Make sure that mode should not be conflicting with currently 1004 * Make sure that mode should not be conflicting with currently
1005 * configured mode. 1005 * configured mode.
1006 */ 1006 */
1007 if (!tdc->isr_handler) { 1007 if (!tdc->isr_handler) {
1008 tdc->isr_handler = handle_once_dma_done; 1008 tdc->isr_handler = handle_once_dma_done;
1009 tdc->cyclic = false; 1009 tdc->cyclic = false;
1010 } else { 1010 } else {
1011 if (tdc->cyclic) { 1011 if (tdc->cyclic) {
1012 dev_err(tdc2dev(tdc), "DMA configured in cyclic mode\n"); 1012 dev_err(tdc2dev(tdc), "DMA configured in cyclic mode\n");
1013 tegra_dma_desc_put(tdc, dma_desc); 1013 tegra_dma_desc_put(tdc, dma_desc);
1014 return NULL; 1014 return NULL;
1015 } 1015 }
1016 } 1016 }
1017 1017
1018 return &dma_desc->txd; 1018 return &dma_desc->txd;
1019 } 1019 }
1020 1020
1021 static struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic( 1021 static struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic(
1022 struct dma_chan *dc, dma_addr_t buf_addr, size_t buf_len, 1022 struct dma_chan *dc, dma_addr_t buf_addr, size_t buf_len,
1023 size_t period_len, enum dma_transfer_direction direction, 1023 size_t period_len, enum dma_transfer_direction direction,
1024 unsigned long flags, void *context) 1024 unsigned long flags, void *context)
1025 { 1025 {
1026 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); 1026 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
1027 struct tegra_dma_desc *dma_desc = NULL; 1027 struct tegra_dma_desc *dma_desc = NULL;
1028 struct tegra_dma_sg_req *sg_req = NULL; 1028 struct tegra_dma_sg_req *sg_req = NULL;
1029 unsigned long csr, ahb_seq, apb_ptr, apb_seq; 1029 unsigned long csr, ahb_seq, apb_ptr, apb_seq;
1030 int len; 1030 int len;
1031 size_t remain_len; 1031 size_t remain_len;
1032 dma_addr_t mem = buf_addr; 1032 dma_addr_t mem = buf_addr;
1033 u32 burst_size; 1033 u32 burst_size;
1034 enum dma_slave_buswidth slave_bw; 1034 enum dma_slave_buswidth slave_bw;
1035 int ret; 1035 int ret;
1036 1036
1037 if (!buf_len || !period_len) { 1037 if (!buf_len || !period_len) {
1038 dev_err(tdc2dev(tdc), "Invalid buffer/period len\n"); 1038 dev_err(tdc2dev(tdc), "Invalid buffer/period len\n");
1039 return NULL; 1039 return NULL;
1040 } 1040 }
1041 1041
1042 if (!tdc->config_init) { 1042 if (!tdc->config_init) {
1043 dev_err(tdc2dev(tdc), "DMA slave is not configured\n"); 1043 dev_err(tdc2dev(tdc), "DMA slave is not configured\n");
1044 return NULL; 1044 return NULL;
1045 } 1045 }
1046 1046
1047 /* 1047 /*
1048 * We allow to take more number of requests till DMA is 1048 * We allow to take more number of requests till DMA is
1049 * not started. The driver will loop over all requests. 1049 * not started. The driver will loop over all requests.
1050 * Once DMA is started then new requests can be queued only after 1050 * Once DMA is started then new requests can be queued only after
1051 * terminating the DMA. 1051 * terminating the DMA.
1052 */ 1052 */
1053 if (tdc->busy) { 1053 if (tdc->busy) {
1054 dev_err(tdc2dev(tdc), "Request not allowed when dma running\n"); 1054 dev_err(tdc2dev(tdc), "Request not allowed when dma running\n");
1055 return NULL; 1055 return NULL;
1056 } 1056 }
1057 1057
1058 /* 1058 /*
1059 * We only support cycle transfer when buf_len is multiple of 1059 * We only support cycle transfer when buf_len is multiple of
1060 * period_len. 1060 * period_len.
1061 */ 1061 */
1062 if (buf_len % period_len) { 1062 if (buf_len % period_len) {
1063 dev_err(tdc2dev(tdc), "buf_len is not multiple of period_len\n"); 1063 dev_err(tdc2dev(tdc), "buf_len is not multiple of period_len\n");
1064 return NULL; 1064 return NULL;
1065 } 1065 }
1066 1066
1067 len = period_len; 1067 len = period_len;
1068 if ((len & 3) || (buf_addr & 3) || 1068 if ((len & 3) || (buf_addr & 3) ||
1069 (len > tdc->tdma->chip_data->max_dma_count)) { 1069 (len > tdc->tdma->chip_data->max_dma_count)) {
1070 dev_err(tdc2dev(tdc), "Req len/mem address is not correct\n"); 1070 dev_err(tdc2dev(tdc), "Req len/mem address is not correct\n");
1071 return NULL; 1071 return NULL;
1072 } 1072 }
1073 1073
1074 ret = get_transfer_param(tdc, direction, &apb_ptr, &apb_seq, &csr, 1074 ret = get_transfer_param(tdc, direction, &apb_ptr, &apb_seq, &csr,
1075 &burst_size, &slave_bw); 1075 &burst_size, &slave_bw);
1076 if (ret < 0) 1076 if (ret < 0)
1077 return NULL; 1077 return NULL;
1078 1078
1079 1079
1080 ahb_seq = TEGRA_APBDMA_AHBSEQ_INTR_ENB; 1080 ahb_seq = TEGRA_APBDMA_AHBSEQ_INTR_ENB;
1081 ahb_seq |= TEGRA_APBDMA_AHBSEQ_WRAP_NONE << 1081 ahb_seq |= TEGRA_APBDMA_AHBSEQ_WRAP_NONE <<
1082 TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT; 1082 TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT;
1083 ahb_seq |= TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_32; 1083 ahb_seq |= TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_32;
1084 1084
1085 csr |= TEGRA_APBDMA_CSR_FLOW; 1085 csr |= TEGRA_APBDMA_CSR_FLOW;
1086 if (flags & DMA_PREP_INTERRUPT) 1086 if (flags & DMA_PREP_INTERRUPT)
1087 csr |= TEGRA_APBDMA_CSR_IE_EOC; 1087 csr |= TEGRA_APBDMA_CSR_IE_EOC;
1088 csr |= tdc->dma_sconfig.slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT; 1088 csr |= tdc->dma_sconfig.slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT;
1089 1089
1090 apb_seq |= TEGRA_APBDMA_APBSEQ_WRAP_WORD_1; 1090 apb_seq |= TEGRA_APBDMA_APBSEQ_WRAP_WORD_1;
1091 1091
1092 dma_desc = tegra_dma_desc_get(tdc); 1092 dma_desc = tegra_dma_desc_get(tdc);
1093 if (!dma_desc) { 1093 if (!dma_desc) {
1094 dev_err(tdc2dev(tdc), "not enough descriptors available\n"); 1094 dev_err(tdc2dev(tdc), "not enough descriptors available\n");
1095 return NULL; 1095 return NULL;
1096 } 1096 }
1097 1097
1098 INIT_LIST_HEAD(&dma_desc->tx_list); 1098 INIT_LIST_HEAD(&dma_desc->tx_list);
1099 INIT_LIST_HEAD(&dma_desc->cb_node); 1099 INIT_LIST_HEAD(&dma_desc->cb_node);
1100 dma_desc->cb_count = 0; 1100 dma_desc->cb_count = 0;
1101 1101
1102 dma_desc->bytes_transferred = 0; 1102 dma_desc->bytes_transferred = 0;
1103 dma_desc->bytes_requested = buf_len; 1103 dma_desc->bytes_requested = buf_len;
1104 remain_len = buf_len; 1104 remain_len = buf_len;
1105 1105
1106 /* Split transfer equal to period size */ 1106 /* Split transfer equal to period size */
1107 while (remain_len) { 1107 while (remain_len) {
1108 sg_req = tegra_dma_sg_req_get(tdc); 1108 sg_req = tegra_dma_sg_req_get(tdc);
1109 if (!sg_req) { 1109 if (!sg_req) {
1110 dev_err(tdc2dev(tdc), "Dma sg-req not available\n"); 1110 dev_err(tdc2dev(tdc), "Dma sg-req not available\n");
1111 tegra_dma_desc_put(tdc, dma_desc); 1111 tegra_dma_desc_put(tdc, dma_desc);
1112 return NULL; 1112 return NULL;
1113 } 1113 }
1114 1114
1115 ahb_seq |= get_burst_size(tdc, burst_size, slave_bw, len); 1115 ahb_seq |= get_burst_size(tdc, burst_size, slave_bw, len);
1116 sg_req->ch_regs.apb_ptr = apb_ptr; 1116 sg_req->ch_regs.apb_ptr = apb_ptr;
1117 sg_req->ch_regs.ahb_ptr = mem; 1117 sg_req->ch_regs.ahb_ptr = mem;
1118 sg_req->ch_regs.csr = csr | ((len - 4) & 0xFFFC); 1118 sg_req->ch_regs.csr = csr | ((len - 4) & 0xFFFC);
1119 sg_req->ch_regs.apb_seq = apb_seq; 1119 sg_req->ch_regs.apb_seq = apb_seq;
1120 sg_req->ch_regs.ahb_seq = ahb_seq; 1120 sg_req->ch_regs.ahb_seq = ahb_seq;
1121 sg_req->configured = false; 1121 sg_req->configured = false;
1122 sg_req->half_done = false; 1122 sg_req->half_done = false;
1123 sg_req->last_sg = false; 1123 sg_req->last_sg = false;
1124 sg_req->dma_desc = dma_desc; 1124 sg_req->dma_desc = dma_desc;
1125 sg_req->req_len = len; 1125 sg_req->req_len = len;
1126 1126
1127 list_add_tail(&sg_req->node, &dma_desc->tx_list); 1127 list_add_tail(&sg_req->node, &dma_desc->tx_list);
1128 remain_len -= len; 1128 remain_len -= len;
1129 mem += len; 1129 mem += len;
1130 } 1130 }
1131 sg_req->last_sg = true; 1131 sg_req->last_sg = true;
1132 if (flags & DMA_CTRL_ACK) 1132 if (flags & DMA_CTRL_ACK)
1133 dma_desc->txd.flags = DMA_CTRL_ACK; 1133 dma_desc->txd.flags = DMA_CTRL_ACK;
1134 1134
1135 /* 1135 /*
1136 * Make sure that mode should not be conflicting with currently 1136 * Make sure that mode should not be conflicting with currently
1137 * configured mode. 1137 * configured mode.
1138 */ 1138 */
1139 if (!tdc->isr_handler) { 1139 if (!tdc->isr_handler) {
1140 tdc->isr_handler = handle_cont_sngl_cycle_dma_done; 1140 tdc->isr_handler = handle_cont_sngl_cycle_dma_done;
1141 tdc->cyclic = true; 1141 tdc->cyclic = true;
1142 } else { 1142 } else {
1143 if (!tdc->cyclic) { 1143 if (!tdc->cyclic) {
1144 dev_err(tdc2dev(tdc), "DMA configuration conflict\n"); 1144 dev_err(tdc2dev(tdc), "DMA configuration conflict\n");
1145 tegra_dma_desc_put(tdc, dma_desc); 1145 tegra_dma_desc_put(tdc, dma_desc);
1146 return NULL; 1146 return NULL;
1147 } 1147 }
1148 } 1148 }
1149 1149
1150 return &dma_desc->txd; 1150 return &dma_desc->txd;
1151 } 1151 }
1152 1152
1153 static int tegra_dma_alloc_chan_resources(struct dma_chan *dc) 1153 static int tegra_dma_alloc_chan_resources(struct dma_chan *dc)
1154 { 1154 {
1155 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); 1155 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
1156 struct tegra_dma *tdma = tdc->tdma; 1156 struct tegra_dma *tdma = tdc->tdma;
1157 int ret; 1157 int ret;
1158 1158
1159 dma_cookie_init(&tdc->dma_chan); 1159 dma_cookie_init(&tdc->dma_chan);
1160 tdc->config_init = false; 1160 tdc->config_init = false;
1161 ret = clk_prepare_enable(tdma->dma_clk); 1161 ret = clk_prepare_enable(tdma->dma_clk);
1162 if (ret < 0) 1162 if (ret < 0)
1163 dev_err(tdc2dev(tdc), "clk_prepare_enable failed: %d\n", ret); 1163 dev_err(tdc2dev(tdc), "clk_prepare_enable failed: %d\n", ret);
1164 return ret; 1164 return ret;
1165 } 1165 }
1166 1166
1167 static void tegra_dma_free_chan_resources(struct dma_chan *dc) 1167 static void tegra_dma_free_chan_resources(struct dma_chan *dc)
1168 { 1168 {
1169 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); 1169 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
1170 struct tegra_dma *tdma = tdc->tdma; 1170 struct tegra_dma *tdma = tdc->tdma;
1171 1171
1172 struct tegra_dma_desc *dma_desc; 1172 struct tegra_dma_desc *dma_desc;
1173 struct tegra_dma_sg_req *sg_req; 1173 struct tegra_dma_sg_req *sg_req;
1174 struct list_head dma_desc_list; 1174 struct list_head dma_desc_list;
1175 struct list_head sg_req_list; 1175 struct list_head sg_req_list;
1176 unsigned long flags; 1176 unsigned long flags;
1177 1177
1178 INIT_LIST_HEAD(&dma_desc_list); 1178 INIT_LIST_HEAD(&dma_desc_list);
1179 INIT_LIST_HEAD(&sg_req_list); 1179 INIT_LIST_HEAD(&sg_req_list);
1180 1180
1181 dev_dbg(tdc2dev(tdc), "Freeing channel %d\n", tdc->id); 1181 dev_dbg(tdc2dev(tdc), "Freeing channel %d\n", tdc->id);
1182 1182
1183 if (tdc->busy) 1183 if (tdc->busy)
1184 tegra_dma_terminate_all(dc); 1184 tegra_dma_terminate_all(dc);
1185 1185
1186 spin_lock_irqsave(&tdc->lock, flags); 1186 spin_lock_irqsave(&tdc->lock, flags);
1187 list_splice_init(&tdc->pending_sg_req, &sg_req_list); 1187 list_splice_init(&tdc->pending_sg_req, &sg_req_list);
1188 list_splice_init(&tdc->free_sg_req, &sg_req_list); 1188 list_splice_init(&tdc->free_sg_req, &sg_req_list);
1189 list_splice_init(&tdc->free_dma_desc, &dma_desc_list); 1189 list_splice_init(&tdc->free_dma_desc, &dma_desc_list);
1190 INIT_LIST_HEAD(&tdc->cb_desc); 1190 INIT_LIST_HEAD(&tdc->cb_desc);
1191 tdc->config_init = false; 1191 tdc->config_init = false;
1192 tdc->isr_handler = NULL; 1192 tdc->isr_handler = NULL;
1193 spin_unlock_irqrestore(&tdc->lock, flags); 1193 spin_unlock_irqrestore(&tdc->lock, flags);
1194 1194
1195 while (!list_empty(&dma_desc_list)) { 1195 while (!list_empty(&dma_desc_list)) {
1196 dma_desc = list_first_entry(&dma_desc_list, 1196 dma_desc = list_first_entry(&dma_desc_list,
1197 typeof(*dma_desc), node); 1197 typeof(*dma_desc), node);
1198 list_del(&dma_desc->node); 1198 list_del(&dma_desc->node);
1199 kfree(dma_desc); 1199 kfree(dma_desc);
1200 } 1200 }
1201 1201
1202 while (!list_empty(&sg_req_list)) { 1202 while (!list_empty(&sg_req_list)) {
1203 sg_req = list_first_entry(&sg_req_list, typeof(*sg_req), node); 1203 sg_req = list_first_entry(&sg_req_list, typeof(*sg_req), node);
1204 list_del(&sg_req->node); 1204 list_del(&sg_req->node);
1205 kfree(sg_req); 1205 kfree(sg_req);
1206 } 1206 }
1207 clk_disable_unprepare(tdma->dma_clk); 1207 clk_disable_unprepare(tdma->dma_clk);
1208 } 1208 }
1209 1209
1210 /* Tegra20 specific DMA controller information */ 1210 /* Tegra20 specific DMA controller information */
1211 static const struct tegra_dma_chip_data tegra20_dma_chip_data = { 1211 static const struct tegra_dma_chip_data tegra20_dma_chip_data = {
1212 .nr_channels = 16, 1212 .nr_channels = 16,
1213 .max_dma_count = 1024UL * 64, 1213 .max_dma_count = 1024UL * 64,
1214 .support_channel_pause = false, 1214 .support_channel_pause = false,
1215 }; 1215 };
1216 1216
1217 /* Tegra30 specific DMA controller information */ 1217 /* Tegra30 specific DMA controller information */
1218 static const struct tegra_dma_chip_data tegra30_dma_chip_data = { 1218 static const struct tegra_dma_chip_data tegra30_dma_chip_data = {
1219 .nr_channels = 32, 1219 .nr_channels = 32,
1220 .max_dma_count = 1024UL * 64, 1220 .max_dma_count = 1024UL * 64,
1221 .support_channel_pause = false, 1221 .support_channel_pause = false,
1222 }; 1222 };
1223 1223
1224 /* Tegra114 specific DMA controller information */ 1224 /* Tegra114 specific DMA controller information */
1225 static const struct tegra_dma_chip_data tegra114_dma_chip_data = { 1225 static const struct tegra_dma_chip_data tegra114_dma_chip_data = {
1226 .nr_channels = 32, 1226 .nr_channels = 32,
1227 .max_dma_count = 1024UL * 64, 1227 .max_dma_count = 1024UL * 64,
1228 .support_channel_pause = true, 1228 .support_channel_pause = true,
1229 }; 1229 };
1230 1230
1231 1231
1232 static const struct of_device_id tegra_dma_of_match[] = { 1232 static const struct of_device_id tegra_dma_of_match[] = {
1233 { 1233 {
1234 .compatible = "nvidia,tegra114-apbdma", 1234 .compatible = "nvidia,tegra114-apbdma",
1235 .data = &tegra114_dma_chip_data, 1235 .data = &tegra114_dma_chip_data,
1236 }, { 1236 }, {
1237 .compatible = "nvidia,tegra30-apbdma", 1237 .compatible = "nvidia,tegra30-apbdma",
1238 .data = &tegra30_dma_chip_data, 1238 .data = &tegra30_dma_chip_data,
1239 }, { 1239 }, {
1240 .compatible = "nvidia,tegra20-apbdma", 1240 .compatible = "nvidia,tegra20-apbdma",
1241 .data = &tegra20_dma_chip_data, 1241 .data = &tegra20_dma_chip_data,
1242 }, { 1242 }, {
1243 }, 1243 },
1244 }; 1244 };
1245 MODULE_DEVICE_TABLE(of, tegra_dma_of_match); 1245 MODULE_DEVICE_TABLE(of, tegra_dma_of_match);
1246 1246
1247 static int tegra_dma_probe(struct platform_device *pdev) 1247 static int tegra_dma_probe(struct platform_device *pdev)
1248 { 1248 {
1249 struct resource *res; 1249 struct resource *res;
1250 struct tegra_dma *tdma; 1250 struct tegra_dma *tdma;
1251 int ret; 1251 int ret;
1252 int i; 1252 int i;
1253 const struct tegra_dma_chip_data *cdata = NULL; 1253 const struct tegra_dma_chip_data *cdata = NULL;
1254 const struct of_device_id *match; 1254 const struct of_device_id *match;
1255 1255
1256 match = of_match_device(tegra_dma_of_match, &pdev->dev); 1256 match = of_match_device(tegra_dma_of_match, &pdev->dev);
1257 if (!match) { 1257 if (!match) {
1258 dev_err(&pdev->dev, "Error: No device match found\n"); 1258 dev_err(&pdev->dev, "Error: No device match found\n");
1259 return -ENODEV; 1259 return -ENODEV;
1260 } 1260 }
1261 cdata = match->data; 1261 cdata = match->data;
1262 1262
1263 tdma = devm_kzalloc(&pdev->dev, sizeof(*tdma) + cdata->nr_channels * 1263 tdma = devm_kzalloc(&pdev->dev, sizeof(*tdma) + cdata->nr_channels *
1264 sizeof(struct tegra_dma_channel), GFP_KERNEL); 1264 sizeof(struct tegra_dma_channel), GFP_KERNEL);
1265 if (!tdma) { 1265 if (!tdma) {
1266 dev_err(&pdev->dev, "Error: memory allocation failed\n"); 1266 dev_err(&pdev->dev, "Error: memory allocation failed\n");
1267 return -ENOMEM; 1267 return -ENOMEM;
1268 } 1268 }
1269 1269
1270 tdma->dev = &pdev->dev; 1270 tdma->dev = &pdev->dev;
1271 tdma->chip_data = cdata; 1271 tdma->chip_data = cdata;
1272 platform_set_drvdata(pdev, tdma); 1272 platform_set_drvdata(pdev, tdma);
1273 1273
1274 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1274 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1275 tdma->base_addr = devm_ioremap_resource(&pdev->dev, res); 1275 tdma->base_addr = devm_ioremap_resource(&pdev->dev, res);
1276 if (IS_ERR(tdma->base_addr)) 1276 if (IS_ERR(tdma->base_addr))
1277 return PTR_ERR(tdma->base_addr); 1277 return PTR_ERR(tdma->base_addr);
1278 1278
1279 tdma->dma_clk = devm_clk_get(&pdev->dev, NULL); 1279 tdma->dma_clk = devm_clk_get(&pdev->dev, NULL);
1280 if (IS_ERR(tdma->dma_clk)) { 1280 if (IS_ERR(tdma->dma_clk)) {
1281 dev_err(&pdev->dev, "Error: Missing controller clock\n"); 1281 dev_err(&pdev->dev, "Error: Missing controller clock\n");
1282 return PTR_ERR(tdma->dma_clk); 1282 return PTR_ERR(tdma->dma_clk);
1283 } 1283 }
1284 1284
1285 spin_lock_init(&tdma->global_lock); 1285 spin_lock_init(&tdma->global_lock);
1286 1286
1287 pm_runtime_enable(&pdev->dev); 1287 pm_runtime_enable(&pdev->dev);
1288 if (!pm_runtime_enabled(&pdev->dev)) { 1288 if (!pm_runtime_enabled(&pdev->dev)) {
1289 ret = tegra_dma_runtime_resume(&pdev->dev); 1289 ret = tegra_dma_runtime_resume(&pdev->dev);
1290 if (ret) { 1290 if (ret) {
1291 dev_err(&pdev->dev, "dma_runtime_resume failed %d\n", 1291 dev_err(&pdev->dev, "dma_runtime_resume failed %d\n",
1292 ret); 1292 ret);
1293 goto err_pm_disable; 1293 goto err_pm_disable;
1294 } 1294 }
1295 } 1295 }
1296 1296
1297 /* Enable clock before accessing registers */ 1297 /* Enable clock before accessing registers */
1298 ret = clk_prepare_enable(tdma->dma_clk); 1298 ret = clk_prepare_enable(tdma->dma_clk);
1299 if (ret < 0) { 1299 if (ret < 0) {
1300 dev_err(&pdev->dev, "clk_prepare_enable failed: %d\n", ret); 1300 dev_err(&pdev->dev, "clk_prepare_enable failed: %d\n", ret);
1301 goto err_pm_disable; 1301 goto err_pm_disable;
1302 } 1302 }
1303 1303
1304 /* Reset DMA controller */ 1304 /* Reset DMA controller */
1305 tegra_periph_reset_assert(tdma->dma_clk); 1305 tegra_periph_reset_assert(tdma->dma_clk);
1306 udelay(2); 1306 udelay(2);
1307 tegra_periph_reset_deassert(tdma->dma_clk); 1307 tegra_periph_reset_deassert(tdma->dma_clk);
1308 1308
1309 /* Enable global DMA registers */ 1309 /* Enable global DMA registers */
1310 tdma_write(tdma, TEGRA_APBDMA_GENERAL, TEGRA_APBDMA_GENERAL_ENABLE); 1310 tdma_write(tdma, TEGRA_APBDMA_GENERAL, TEGRA_APBDMA_GENERAL_ENABLE);
1311 tdma_write(tdma, TEGRA_APBDMA_CONTROL, 0); 1311 tdma_write(tdma, TEGRA_APBDMA_CONTROL, 0);
1312 tdma_write(tdma, TEGRA_APBDMA_IRQ_MASK_SET, 0xFFFFFFFFul); 1312 tdma_write(tdma, TEGRA_APBDMA_IRQ_MASK_SET, 0xFFFFFFFFul);
1313 1313
1314 clk_disable_unprepare(tdma->dma_clk); 1314 clk_disable_unprepare(tdma->dma_clk);
1315 1315
1316 INIT_LIST_HEAD(&tdma->dma_dev.channels); 1316 INIT_LIST_HEAD(&tdma->dma_dev.channels);
1317 for (i = 0; i < cdata->nr_channels; i++) { 1317 for (i = 0; i < cdata->nr_channels; i++) {
1318 struct tegra_dma_channel *tdc = &tdma->channels[i]; 1318 struct tegra_dma_channel *tdc = &tdma->channels[i];
1319 1319
1320 tdc->chan_base_offset = TEGRA_APBDMA_CHANNEL_BASE_ADD_OFFSET + 1320 tdc->chan_base_offset = TEGRA_APBDMA_CHANNEL_BASE_ADD_OFFSET +
1321 i * TEGRA_APBDMA_CHANNEL_REGISTER_SIZE; 1321 i * TEGRA_APBDMA_CHANNEL_REGISTER_SIZE;
1322 1322
1323 res = platform_get_resource(pdev, IORESOURCE_IRQ, i); 1323 res = platform_get_resource(pdev, IORESOURCE_IRQ, i);
1324 if (!res) { 1324 if (!res) {
1325 ret = -EINVAL; 1325 ret = -EINVAL;
1326 dev_err(&pdev->dev, "No irq resource for chan %d\n", i); 1326 dev_err(&pdev->dev, "No irq resource for chan %d\n", i);
1327 goto err_irq; 1327 goto err_irq;
1328 } 1328 }
1329 tdc->irq = res->start; 1329 tdc->irq = res->start;
1330 snprintf(tdc->name, sizeof(tdc->name), "apbdma.%d", i); 1330 snprintf(tdc->name, sizeof(tdc->name), "apbdma.%d", i);
1331 ret = devm_request_irq(&pdev->dev, tdc->irq, 1331 ret = devm_request_irq(&pdev->dev, tdc->irq,
1332 tegra_dma_isr, 0, tdc->name, tdc); 1332 tegra_dma_isr, 0, tdc->name, tdc);
1333 if (ret) { 1333 if (ret) {
1334 dev_err(&pdev->dev, 1334 dev_err(&pdev->dev,
1335 "request_irq failed with err %d channel %d\n", 1335 "request_irq failed with err %d channel %d\n",
1336 ret, i); 1336 ret, i);
1337 goto err_irq; 1337 goto err_irq;
1338 } 1338 }
1339 1339
1340 tdc->dma_chan.device = &tdma->dma_dev; 1340 tdc->dma_chan.device = &tdma->dma_dev;
1341 dma_cookie_init(&tdc->dma_chan); 1341 dma_cookie_init(&tdc->dma_chan);
1342 list_add_tail(&tdc->dma_chan.device_node, 1342 list_add_tail(&tdc->dma_chan.device_node,
1343 &tdma->dma_dev.channels); 1343 &tdma->dma_dev.channels);
1344 tdc->tdma = tdma; 1344 tdc->tdma = tdma;
1345 tdc->id = i; 1345 tdc->id = i;
1346 1346
1347 tasklet_init(&tdc->tasklet, tegra_dma_tasklet, 1347 tasklet_init(&tdc->tasklet, tegra_dma_tasklet,
1348 (unsigned long)tdc); 1348 (unsigned long)tdc);
1349 spin_lock_init(&tdc->lock); 1349 spin_lock_init(&tdc->lock);
1350 1350
1351 INIT_LIST_HEAD(&tdc->pending_sg_req); 1351 INIT_LIST_HEAD(&tdc->pending_sg_req);
1352 INIT_LIST_HEAD(&tdc->free_sg_req); 1352 INIT_LIST_HEAD(&tdc->free_sg_req);
1353 INIT_LIST_HEAD(&tdc->free_dma_desc); 1353 INIT_LIST_HEAD(&tdc->free_dma_desc);
1354 INIT_LIST_HEAD(&tdc->cb_desc); 1354 INIT_LIST_HEAD(&tdc->cb_desc);
1355 } 1355 }
1356 1356
1357 dma_cap_set(DMA_SLAVE, tdma->dma_dev.cap_mask); 1357 dma_cap_set(DMA_SLAVE, tdma->dma_dev.cap_mask);
1358 dma_cap_set(DMA_PRIVATE, tdma->dma_dev.cap_mask); 1358 dma_cap_set(DMA_PRIVATE, tdma->dma_dev.cap_mask);
1359 dma_cap_set(DMA_CYCLIC, tdma->dma_dev.cap_mask); 1359 dma_cap_set(DMA_CYCLIC, tdma->dma_dev.cap_mask);
1360 1360
1361 tdma->dma_dev.dev = &pdev->dev; 1361 tdma->dma_dev.dev = &pdev->dev;
1362 tdma->dma_dev.device_alloc_chan_resources = 1362 tdma->dma_dev.device_alloc_chan_resources =
1363 tegra_dma_alloc_chan_resources; 1363 tegra_dma_alloc_chan_resources;
1364 tdma->dma_dev.device_free_chan_resources = 1364 tdma->dma_dev.device_free_chan_resources =
1365 tegra_dma_free_chan_resources; 1365 tegra_dma_free_chan_resources;
1366 tdma->dma_dev.device_prep_slave_sg = tegra_dma_prep_slave_sg; 1366 tdma->dma_dev.device_prep_slave_sg = tegra_dma_prep_slave_sg;
1367 tdma->dma_dev.device_prep_dma_cyclic = tegra_dma_prep_dma_cyclic; 1367 tdma->dma_dev.device_prep_dma_cyclic = tegra_dma_prep_dma_cyclic;
1368 tdma->dma_dev.device_control = tegra_dma_device_control; 1368 tdma->dma_dev.device_control = tegra_dma_device_control;
1369 tdma->dma_dev.device_tx_status = tegra_dma_tx_status; 1369 tdma->dma_dev.device_tx_status = tegra_dma_tx_status;
1370 tdma->dma_dev.device_issue_pending = tegra_dma_issue_pending; 1370 tdma->dma_dev.device_issue_pending = tegra_dma_issue_pending;
1371 1371
1372 ret = dma_async_device_register(&tdma->dma_dev); 1372 ret = dma_async_device_register(&tdma->dma_dev);
1373 if (ret < 0) { 1373 if (ret < 0) {
1374 dev_err(&pdev->dev, 1374 dev_err(&pdev->dev,
1375 "Tegra20 APB DMA driver registration failed %d\n", ret); 1375 "Tegra20 APB DMA driver registration failed %d\n", ret);
1376 goto err_irq; 1376 goto err_irq;
1377 } 1377 }
1378 1378
1379 dev_info(&pdev->dev, "Tegra20 APB DMA driver register %d channels\n", 1379 dev_info(&pdev->dev, "Tegra20 APB DMA driver register %d channels\n",
1380 cdata->nr_channels); 1380 cdata->nr_channels);
1381 return 0; 1381 return 0;
1382 1382
1383 err_irq: 1383 err_irq:
1384 while (--i >= 0) { 1384 while (--i >= 0) {
1385 struct tegra_dma_channel *tdc = &tdma->channels[i]; 1385 struct tegra_dma_channel *tdc = &tdma->channels[i];
1386 tasklet_kill(&tdc->tasklet); 1386 tasklet_kill(&tdc->tasklet);
1387 } 1387 }
1388 1388
1389 err_pm_disable: 1389 err_pm_disable:
1390 pm_runtime_disable(&pdev->dev); 1390 pm_runtime_disable(&pdev->dev);
1391 if (!pm_runtime_status_suspended(&pdev->dev)) 1391 if (!pm_runtime_status_suspended(&pdev->dev))
1392 tegra_dma_runtime_suspend(&pdev->dev); 1392 tegra_dma_runtime_suspend(&pdev->dev);
1393 return ret; 1393 return ret;
1394 } 1394 }
1395 1395
1396 static int tegra_dma_remove(struct platform_device *pdev) 1396 static int tegra_dma_remove(struct platform_device *pdev)
1397 { 1397 {
1398 struct tegra_dma *tdma = platform_get_drvdata(pdev); 1398 struct tegra_dma *tdma = platform_get_drvdata(pdev);
1399 int i; 1399 int i;
1400 struct tegra_dma_channel *tdc; 1400 struct tegra_dma_channel *tdc;
1401 1401
1402 dma_async_device_unregister(&tdma->dma_dev); 1402 dma_async_device_unregister(&tdma->dma_dev);
1403 1403
1404 for (i = 0; i < tdma->chip_data->nr_channels; ++i) { 1404 for (i = 0; i < tdma->chip_data->nr_channels; ++i) {
1405 tdc = &tdma->channels[i]; 1405 tdc = &tdma->channels[i];
1406 tasklet_kill(&tdc->tasklet); 1406 tasklet_kill(&tdc->tasklet);
1407 } 1407 }
1408 1408
1409 pm_runtime_disable(&pdev->dev); 1409 pm_runtime_disable(&pdev->dev);
1410 if (!pm_runtime_status_suspended(&pdev->dev)) 1410 if (!pm_runtime_status_suspended(&pdev->dev))
1411 tegra_dma_runtime_suspend(&pdev->dev); 1411 tegra_dma_runtime_suspend(&pdev->dev);
1412 1412
1413 return 0; 1413 return 0;
1414 } 1414 }
1415 1415
1416 static int tegra_dma_runtime_suspend(struct device *dev) 1416 static int tegra_dma_runtime_suspend(struct device *dev)
1417 { 1417 {
1418 struct platform_device *pdev = to_platform_device(dev); 1418 struct platform_device *pdev = to_platform_device(dev);
1419 struct tegra_dma *tdma = platform_get_drvdata(pdev); 1419 struct tegra_dma *tdma = platform_get_drvdata(pdev);
1420 1420
1421 clk_disable_unprepare(tdma->dma_clk); 1421 clk_disable_unprepare(tdma->dma_clk);
1422 return 0; 1422 return 0;
1423 } 1423 }
1424 1424
1425 static int tegra_dma_runtime_resume(struct device *dev) 1425 static int tegra_dma_runtime_resume(struct device *dev)
1426 { 1426 {
1427 struct platform_device *pdev = to_platform_device(dev); 1427 struct platform_device *pdev = to_platform_device(dev);
1428 struct tegra_dma *tdma = platform_get_drvdata(pdev); 1428 struct tegra_dma *tdma = platform_get_drvdata(pdev);
1429 int ret; 1429 int ret;
1430 1430
1431 ret = clk_prepare_enable(tdma->dma_clk); 1431 ret = clk_prepare_enable(tdma->dma_clk);
1432 if (ret < 0) { 1432 if (ret < 0) {
1433 dev_err(dev, "clk_enable failed: %d\n", ret); 1433 dev_err(dev, "clk_enable failed: %d\n", ret);
1434 return ret; 1434 return ret;
1435 } 1435 }
1436 return 0; 1436 return 0;
1437 } 1437 }
1438 1438
1439 #ifdef CONFIG_PM_SLEEP 1439 #ifdef CONFIG_PM_SLEEP
1440 static int tegra_dma_pm_suspend(struct device *dev) 1440 static int tegra_dma_pm_suspend(struct device *dev)
1441 { 1441 {
1442 struct tegra_dma *tdma = dev_get_drvdata(dev); 1442 struct tegra_dma *tdma = dev_get_drvdata(dev);
1443 int i; 1443 int i;
1444 int ret; 1444 int ret;
1445 1445
1446 /* Enable clock before accessing register */ 1446 /* Enable clock before accessing register */
1447 ret = tegra_dma_runtime_resume(dev); 1447 ret = tegra_dma_runtime_resume(dev);
1448 if (ret < 0) 1448 if (ret < 0)
1449 return ret; 1449 return ret;
1450 1450
1451 tdma->reg_gen = tdma_read(tdma, TEGRA_APBDMA_GENERAL); 1451 tdma->reg_gen = tdma_read(tdma, TEGRA_APBDMA_GENERAL);
1452 for (i = 0; i < tdma->chip_data->nr_channels; i++) { 1452 for (i = 0; i < tdma->chip_data->nr_channels; i++) {
1453 struct tegra_dma_channel *tdc = &tdma->channels[i]; 1453 struct tegra_dma_channel *tdc = &tdma->channels[i];
1454 struct tegra_dma_channel_regs *ch_reg = &tdc->channel_reg; 1454 struct tegra_dma_channel_regs *ch_reg = &tdc->channel_reg;
1455 1455
1456 ch_reg->csr = tdc_read(tdc, TEGRA_APBDMA_CHAN_CSR); 1456 ch_reg->csr = tdc_read(tdc, TEGRA_APBDMA_CHAN_CSR);
1457 ch_reg->ahb_ptr = tdc_read(tdc, TEGRA_APBDMA_CHAN_AHBPTR); 1457 ch_reg->ahb_ptr = tdc_read(tdc, TEGRA_APBDMA_CHAN_AHBPTR);
1458 ch_reg->apb_ptr = tdc_read(tdc, TEGRA_APBDMA_CHAN_APBPTR); 1458 ch_reg->apb_ptr = tdc_read(tdc, TEGRA_APBDMA_CHAN_APBPTR);
1459 ch_reg->ahb_seq = tdc_read(tdc, TEGRA_APBDMA_CHAN_AHBSEQ); 1459 ch_reg->ahb_seq = tdc_read(tdc, TEGRA_APBDMA_CHAN_AHBSEQ);
1460 ch_reg->apb_seq = tdc_read(tdc, TEGRA_APBDMA_CHAN_APBSEQ); 1460 ch_reg->apb_seq = tdc_read(tdc, TEGRA_APBDMA_CHAN_APBSEQ);
1461 } 1461 }
1462 1462
1463 /* Disable clock */ 1463 /* Disable clock */
1464 tegra_dma_runtime_suspend(dev); 1464 tegra_dma_runtime_suspend(dev);
1465 return 0; 1465 return 0;
1466 } 1466 }
1467 1467
1468 static int tegra_dma_pm_resume(struct device *dev) 1468 static int tegra_dma_pm_resume(struct device *dev)
1469 { 1469 {
1470 struct tegra_dma *tdma = dev_get_drvdata(dev); 1470 struct tegra_dma *tdma = dev_get_drvdata(dev);
1471 int i; 1471 int i;
1472 int ret; 1472 int ret;
1473 1473
1474 /* Enable clock before accessing register */ 1474 /* Enable clock before accessing register */
1475 ret = tegra_dma_runtime_resume(dev); 1475 ret = tegra_dma_runtime_resume(dev);
1476 if (ret < 0) 1476 if (ret < 0)
1477 return ret; 1477 return ret;
1478 1478
1479 tdma_write(tdma, TEGRA_APBDMA_GENERAL, tdma->reg_gen); 1479 tdma_write(tdma, TEGRA_APBDMA_GENERAL, tdma->reg_gen);
1480 tdma_write(tdma, TEGRA_APBDMA_CONTROL, 0); 1480 tdma_write(tdma, TEGRA_APBDMA_CONTROL, 0);
1481 tdma_write(tdma, TEGRA_APBDMA_IRQ_MASK_SET, 0xFFFFFFFFul); 1481 tdma_write(tdma, TEGRA_APBDMA_IRQ_MASK_SET, 0xFFFFFFFFul);
1482 1482
1483 for (i = 0; i < tdma->chip_data->nr_channels; i++) { 1483 for (i = 0; i < tdma->chip_data->nr_channels; i++) {
1484 struct tegra_dma_channel *tdc = &tdma->channels[i]; 1484 struct tegra_dma_channel *tdc = &tdma->channels[i];
1485 struct tegra_dma_channel_regs *ch_reg = &tdc->channel_reg; 1485 struct tegra_dma_channel_regs *ch_reg = &tdc->channel_reg;
1486 1486
1487 tdc_write(tdc, TEGRA_APBDMA_CHAN_APBSEQ, ch_reg->apb_seq); 1487 tdc_write(tdc, TEGRA_APBDMA_CHAN_APBSEQ, ch_reg->apb_seq);
1488 tdc_write(tdc, TEGRA_APBDMA_CHAN_APBPTR, ch_reg->apb_ptr); 1488 tdc_write(tdc, TEGRA_APBDMA_CHAN_APBPTR, ch_reg->apb_ptr);
1489 tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBSEQ, ch_reg->ahb_seq); 1489 tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBSEQ, ch_reg->ahb_seq);
1490 tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBPTR, ch_reg->ahb_ptr); 1490 tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBPTR, ch_reg->ahb_ptr);
1491 tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, 1491 tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR,
1492 (ch_reg->csr & ~TEGRA_APBDMA_CSR_ENB)); 1492 (ch_reg->csr & ~TEGRA_APBDMA_CSR_ENB));
1493 } 1493 }
1494 1494
1495 /* Disable clock */ 1495 /* Disable clock */
1496 tegra_dma_runtime_suspend(dev); 1496 tegra_dma_runtime_suspend(dev);
1497 return 0; 1497 return 0;
1498 } 1498 }
1499 #endif 1499 #endif
1500 1500
1501 static const struct dev_pm_ops tegra_dma_dev_pm_ops = { 1501 static const struct dev_pm_ops tegra_dma_dev_pm_ops = {
1502 #ifdef CONFIG_PM_RUNTIME 1502 #ifdef CONFIG_PM_RUNTIME
1503 .runtime_suspend = tegra_dma_runtime_suspend, 1503 .runtime_suspend = tegra_dma_runtime_suspend,
1504 .runtime_resume = tegra_dma_runtime_resume, 1504 .runtime_resume = tegra_dma_runtime_resume,
1505 #endif 1505 #endif
1506 SET_SYSTEM_SLEEP_PM_OPS(tegra_dma_pm_suspend, tegra_dma_pm_resume) 1506 SET_SYSTEM_SLEEP_PM_OPS(tegra_dma_pm_suspend, tegra_dma_pm_resume)
1507 }; 1507 };
1508 1508
1509 static struct platform_driver tegra_dmac_driver = { 1509 static struct platform_driver tegra_dmac_driver = {
1510 .driver = { 1510 .driver = {
1511 .name = "tegra-apbdma", 1511 .name = "tegra-apbdma",
1512 .owner = THIS_MODULE, 1512 .owner = THIS_MODULE,
1513 .pm = &tegra_dma_dev_pm_ops, 1513 .pm = &tegra_dma_dev_pm_ops,
1514 .of_match_table = tegra_dma_of_match, 1514 .of_match_table = tegra_dma_of_match,
1515 }, 1515 },
1516 .probe = tegra_dma_probe, 1516 .probe = tegra_dma_probe,
1517 .remove = tegra_dma_remove, 1517 .remove = tegra_dma_remove,
1518 }; 1518 };
1519 1519
1520 module_platform_driver(tegra_dmac_driver); 1520 module_platform_driver(tegra_dmac_driver);
1521 1521
1522 MODULE_ALIAS("platform:tegra20-apbdma"); 1522 MODULE_ALIAS("platform:tegra20-apbdma");
1523 MODULE_DESCRIPTION("NVIDIA Tegra APB DMA Controller driver"); 1523 MODULE_DESCRIPTION("NVIDIA Tegra APB DMA Controller driver");
1524 MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>"); 1524 MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
1525 MODULE_LICENSE("GPL v2"); 1525 MODULE_LICENSE("GPL v2");
1526 1526
drivers/dma/txx9dmac.c
1 /* 1 /*
2 * Driver for the TXx9 SoC DMA Controller 2 * Driver for the TXx9 SoC DMA Controller
3 * 3 *
4 * Copyright (C) 2009 Atsushi Nemoto 4 * Copyright (C) 2009 Atsushi Nemoto
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as 7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation. 8 * published by the Free Software Foundation.
9 */ 9 */
10 #include <linux/dma-mapping.h> 10 #include <linux/dma-mapping.h>
11 #include <linux/init.h> 11 #include <linux/init.h>
12 #include <linux/interrupt.h> 12 #include <linux/interrupt.h>
13 #include <linux/io.h> 13 #include <linux/io.h>
14 #include <linux/module.h> 14 #include <linux/module.h>
15 #include <linux/platform_device.h> 15 #include <linux/platform_device.h>
16 #include <linux/slab.h> 16 #include <linux/slab.h>
17 #include <linux/scatterlist.h> 17 #include <linux/scatterlist.h>
18 18
19 #include "dmaengine.h" 19 #include "dmaengine.h"
20 #include "txx9dmac.h" 20 #include "txx9dmac.h"
21 21
22 static struct txx9dmac_chan *to_txx9dmac_chan(struct dma_chan *chan) 22 static struct txx9dmac_chan *to_txx9dmac_chan(struct dma_chan *chan)
23 { 23 {
24 return container_of(chan, struct txx9dmac_chan, chan); 24 return container_of(chan, struct txx9dmac_chan, chan);
25 } 25 }
26 26
27 static struct txx9dmac_cregs __iomem *__dma_regs(const struct txx9dmac_chan *dc) 27 static struct txx9dmac_cregs __iomem *__dma_regs(const struct txx9dmac_chan *dc)
28 { 28 {
29 return dc->ch_regs; 29 return dc->ch_regs;
30 } 30 }
31 31
32 static struct txx9dmac_cregs32 __iomem *__dma_regs32( 32 static struct txx9dmac_cregs32 __iomem *__dma_regs32(
33 const struct txx9dmac_chan *dc) 33 const struct txx9dmac_chan *dc)
34 { 34 {
35 return dc->ch_regs; 35 return dc->ch_regs;
36 } 36 }
37 37
38 #define channel64_readq(dc, name) \ 38 #define channel64_readq(dc, name) \
39 __raw_readq(&(__dma_regs(dc)->name)) 39 __raw_readq(&(__dma_regs(dc)->name))
40 #define channel64_writeq(dc, name, val) \ 40 #define channel64_writeq(dc, name, val) \
41 __raw_writeq((val), &(__dma_regs(dc)->name)) 41 __raw_writeq((val), &(__dma_regs(dc)->name))
42 #define channel64_readl(dc, name) \ 42 #define channel64_readl(dc, name) \
43 __raw_readl(&(__dma_regs(dc)->name)) 43 __raw_readl(&(__dma_regs(dc)->name))
44 #define channel64_writel(dc, name, val) \ 44 #define channel64_writel(dc, name, val) \
45 __raw_writel((val), &(__dma_regs(dc)->name)) 45 __raw_writel((val), &(__dma_regs(dc)->name))
46 46
47 #define channel32_readl(dc, name) \ 47 #define channel32_readl(dc, name) \
48 __raw_readl(&(__dma_regs32(dc)->name)) 48 __raw_readl(&(__dma_regs32(dc)->name))
49 #define channel32_writel(dc, name, val) \ 49 #define channel32_writel(dc, name, val) \
50 __raw_writel((val), &(__dma_regs32(dc)->name)) 50 __raw_writel((val), &(__dma_regs32(dc)->name))
51 51
52 #define channel_readq(dc, name) channel64_readq(dc, name) 52 #define channel_readq(dc, name) channel64_readq(dc, name)
53 #define channel_writeq(dc, name, val) channel64_writeq(dc, name, val) 53 #define channel_writeq(dc, name, val) channel64_writeq(dc, name, val)
54 #define channel_readl(dc, name) \ 54 #define channel_readl(dc, name) \
55 (is_dmac64(dc) ? \ 55 (is_dmac64(dc) ? \
56 channel64_readl(dc, name) : channel32_readl(dc, name)) 56 channel64_readl(dc, name) : channel32_readl(dc, name))
57 #define channel_writel(dc, name, val) \ 57 #define channel_writel(dc, name, val) \
58 (is_dmac64(dc) ? \ 58 (is_dmac64(dc) ? \
59 channel64_writel(dc, name, val) : channel32_writel(dc, name, val)) 59 channel64_writel(dc, name, val) : channel32_writel(dc, name, val))
60 60
61 static dma_addr_t channel64_read_CHAR(const struct txx9dmac_chan *dc) 61 static dma_addr_t channel64_read_CHAR(const struct txx9dmac_chan *dc)
62 { 62 {
63 if (sizeof(__dma_regs(dc)->CHAR) == sizeof(u64)) 63 if (sizeof(__dma_regs(dc)->CHAR) == sizeof(u64))
64 return channel64_readq(dc, CHAR); 64 return channel64_readq(dc, CHAR);
65 else 65 else
66 return channel64_readl(dc, CHAR); 66 return channel64_readl(dc, CHAR);
67 } 67 }
68 68
69 static void channel64_write_CHAR(const struct txx9dmac_chan *dc, dma_addr_t val) 69 static void channel64_write_CHAR(const struct txx9dmac_chan *dc, dma_addr_t val)
70 { 70 {
71 if (sizeof(__dma_regs(dc)->CHAR) == sizeof(u64)) 71 if (sizeof(__dma_regs(dc)->CHAR) == sizeof(u64))
72 channel64_writeq(dc, CHAR, val); 72 channel64_writeq(dc, CHAR, val);
73 else 73 else
74 channel64_writel(dc, CHAR, val); 74 channel64_writel(dc, CHAR, val);
75 } 75 }
76 76
77 static void channel64_clear_CHAR(const struct txx9dmac_chan *dc) 77 static void channel64_clear_CHAR(const struct txx9dmac_chan *dc)
78 { 78 {
79 #if defined(CONFIG_32BIT) && !defined(CONFIG_64BIT_PHYS_ADDR) 79 #if defined(CONFIG_32BIT) && !defined(CONFIG_64BIT_PHYS_ADDR)
80 channel64_writel(dc, CHAR, 0); 80 channel64_writel(dc, CHAR, 0);
81 channel64_writel(dc, __pad_CHAR, 0); 81 channel64_writel(dc, __pad_CHAR, 0);
82 #else 82 #else
83 channel64_writeq(dc, CHAR, 0); 83 channel64_writeq(dc, CHAR, 0);
84 #endif 84 #endif
85 } 85 }
86 86
87 static dma_addr_t channel_read_CHAR(const struct txx9dmac_chan *dc) 87 static dma_addr_t channel_read_CHAR(const struct txx9dmac_chan *dc)
88 { 88 {
89 if (is_dmac64(dc)) 89 if (is_dmac64(dc))
90 return channel64_read_CHAR(dc); 90 return channel64_read_CHAR(dc);
91 else 91 else
92 return channel32_readl(dc, CHAR); 92 return channel32_readl(dc, CHAR);
93 } 93 }
94 94
95 static void channel_write_CHAR(const struct txx9dmac_chan *dc, dma_addr_t val) 95 static void channel_write_CHAR(const struct txx9dmac_chan *dc, dma_addr_t val)
96 { 96 {
97 if (is_dmac64(dc)) 97 if (is_dmac64(dc))
98 channel64_write_CHAR(dc, val); 98 channel64_write_CHAR(dc, val);
99 else 99 else
100 channel32_writel(dc, CHAR, val); 100 channel32_writel(dc, CHAR, val);
101 } 101 }
102 102
103 static struct txx9dmac_regs __iomem *__txx9dmac_regs( 103 static struct txx9dmac_regs __iomem *__txx9dmac_regs(
104 const struct txx9dmac_dev *ddev) 104 const struct txx9dmac_dev *ddev)
105 { 105 {
106 return ddev->regs; 106 return ddev->regs;
107 } 107 }
108 108
109 static struct txx9dmac_regs32 __iomem *__txx9dmac_regs32( 109 static struct txx9dmac_regs32 __iomem *__txx9dmac_regs32(
110 const struct txx9dmac_dev *ddev) 110 const struct txx9dmac_dev *ddev)
111 { 111 {
112 return ddev->regs; 112 return ddev->regs;
113 } 113 }
114 114
115 #define dma64_readl(ddev, name) \ 115 #define dma64_readl(ddev, name) \
116 __raw_readl(&(__txx9dmac_regs(ddev)->name)) 116 __raw_readl(&(__txx9dmac_regs(ddev)->name))
117 #define dma64_writel(ddev, name, val) \ 117 #define dma64_writel(ddev, name, val) \
118 __raw_writel((val), &(__txx9dmac_regs(ddev)->name)) 118 __raw_writel((val), &(__txx9dmac_regs(ddev)->name))
119 119
120 #define dma32_readl(ddev, name) \ 120 #define dma32_readl(ddev, name) \
121 __raw_readl(&(__txx9dmac_regs32(ddev)->name)) 121 __raw_readl(&(__txx9dmac_regs32(ddev)->name))
122 #define dma32_writel(ddev, name, val) \ 122 #define dma32_writel(ddev, name, val) \
123 __raw_writel((val), &(__txx9dmac_regs32(ddev)->name)) 123 __raw_writel((val), &(__txx9dmac_regs32(ddev)->name))
124 124
125 #define dma_readl(ddev, name) \ 125 #define dma_readl(ddev, name) \
126 (__is_dmac64(ddev) ? \ 126 (__is_dmac64(ddev) ? \
127 dma64_readl(ddev, name) : dma32_readl(ddev, name)) 127 dma64_readl(ddev, name) : dma32_readl(ddev, name))
128 #define dma_writel(ddev, name, val) \ 128 #define dma_writel(ddev, name, val) \
129 (__is_dmac64(ddev) ? \ 129 (__is_dmac64(ddev) ? \
130 dma64_writel(ddev, name, val) : dma32_writel(ddev, name, val)) 130 dma64_writel(ddev, name, val) : dma32_writel(ddev, name, val))
131 131
132 static struct device *chan2dev(struct dma_chan *chan) 132 static struct device *chan2dev(struct dma_chan *chan)
133 { 133 {
134 return &chan->dev->device; 134 return &chan->dev->device;
135 } 135 }
136 static struct device *chan2parent(struct dma_chan *chan) 136 static struct device *chan2parent(struct dma_chan *chan)
137 { 137 {
138 return chan->dev->device.parent; 138 return chan->dev->device.parent;
139 } 139 }
140 140
141 static struct txx9dmac_desc * 141 static struct txx9dmac_desc *
142 txd_to_txx9dmac_desc(struct dma_async_tx_descriptor *txd) 142 txd_to_txx9dmac_desc(struct dma_async_tx_descriptor *txd)
143 { 143 {
144 return container_of(txd, struct txx9dmac_desc, txd); 144 return container_of(txd, struct txx9dmac_desc, txd);
145 } 145 }
146 146
147 static dma_addr_t desc_read_CHAR(const struct txx9dmac_chan *dc, 147 static dma_addr_t desc_read_CHAR(const struct txx9dmac_chan *dc,
148 const struct txx9dmac_desc *desc) 148 const struct txx9dmac_desc *desc)
149 { 149 {
150 return is_dmac64(dc) ? desc->hwdesc.CHAR : desc->hwdesc32.CHAR; 150 return is_dmac64(dc) ? desc->hwdesc.CHAR : desc->hwdesc32.CHAR;
151 } 151 }
152 152
153 static void desc_write_CHAR(const struct txx9dmac_chan *dc, 153 static void desc_write_CHAR(const struct txx9dmac_chan *dc,
154 struct txx9dmac_desc *desc, dma_addr_t val) 154 struct txx9dmac_desc *desc, dma_addr_t val)
155 { 155 {
156 if (is_dmac64(dc)) 156 if (is_dmac64(dc))
157 desc->hwdesc.CHAR = val; 157 desc->hwdesc.CHAR = val;
158 else 158 else
159 desc->hwdesc32.CHAR = val; 159 desc->hwdesc32.CHAR = val;
160 } 160 }
161 161
162 #define TXX9_DMA_MAX_COUNT 0x04000000 162 #define TXX9_DMA_MAX_COUNT 0x04000000
163 163
164 #define TXX9_DMA_INITIAL_DESC_COUNT 64 164 #define TXX9_DMA_INITIAL_DESC_COUNT 64
165 165
166 static struct txx9dmac_desc *txx9dmac_first_active(struct txx9dmac_chan *dc) 166 static struct txx9dmac_desc *txx9dmac_first_active(struct txx9dmac_chan *dc)
167 { 167 {
168 return list_entry(dc->active_list.next, 168 return list_entry(dc->active_list.next,
169 struct txx9dmac_desc, desc_node); 169 struct txx9dmac_desc, desc_node);
170 } 170 }
171 171
172 static struct txx9dmac_desc *txx9dmac_last_active(struct txx9dmac_chan *dc) 172 static struct txx9dmac_desc *txx9dmac_last_active(struct txx9dmac_chan *dc)
173 { 173 {
174 return list_entry(dc->active_list.prev, 174 return list_entry(dc->active_list.prev,
175 struct txx9dmac_desc, desc_node); 175 struct txx9dmac_desc, desc_node);
176 } 176 }
177 177
178 static struct txx9dmac_desc *txx9dmac_first_queued(struct txx9dmac_chan *dc) 178 static struct txx9dmac_desc *txx9dmac_first_queued(struct txx9dmac_chan *dc)
179 { 179 {
180 return list_entry(dc->queue.next, struct txx9dmac_desc, desc_node); 180 return list_entry(dc->queue.next, struct txx9dmac_desc, desc_node);
181 } 181 }
182 182
183 static struct txx9dmac_desc *txx9dmac_last_child(struct txx9dmac_desc *desc) 183 static struct txx9dmac_desc *txx9dmac_last_child(struct txx9dmac_desc *desc)
184 { 184 {
185 if (!list_empty(&desc->tx_list)) 185 if (!list_empty(&desc->tx_list))
186 desc = list_entry(desc->tx_list.prev, typeof(*desc), desc_node); 186 desc = list_entry(desc->tx_list.prev, typeof(*desc), desc_node);
187 return desc; 187 return desc;
188 } 188 }
189 189
190 static dma_cookie_t txx9dmac_tx_submit(struct dma_async_tx_descriptor *tx); 190 static dma_cookie_t txx9dmac_tx_submit(struct dma_async_tx_descriptor *tx);
191 191
192 static struct txx9dmac_desc *txx9dmac_desc_alloc(struct txx9dmac_chan *dc, 192 static struct txx9dmac_desc *txx9dmac_desc_alloc(struct txx9dmac_chan *dc,
193 gfp_t flags) 193 gfp_t flags)
194 { 194 {
195 struct txx9dmac_dev *ddev = dc->ddev; 195 struct txx9dmac_dev *ddev = dc->ddev;
196 struct txx9dmac_desc *desc; 196 struct txx9dmac_desc *desc;
197 197
198 desc = kzalloc(sizeof(*desc), flags); 198 desc = kzalloc(sizeof(*desc), flags);
199 if (!desc) 199 if (!desc)
200 return NULL; 200 return NULL;
201 INIT_LIST_HEAD(&desc->tx_list); 201 INIT_LIST_HEAD(&desc->tx_list);
202 dma_async_tx_descriptor_init(&desc->txd, &dc->chan); 202 dma_async_tx_descriptor_init(&desc->txd, &dc->chan);
203 desc->txd.tx_submit = txx9dmac_tx_submit; 203 desc->txd.tx_submit = txx9dmac_tx_submit;
204 /* txd.flags will be overwritten in prep funcs */ 204 /* txd.flags will be overwritten in prep funcs */
205 desc->txd.flags = DMA_CTRL_ACK; 205 desc->txd.flags = DMA_CTRL_ACK;
206 desc->txd.phys = dma_map_single(chan2parent(&dc->chan), &desc->hwdesc, 206 desc->txd.phys = dma_map_single(chan2parent(&dc->chan), &desc->hwdesc,
207 ddev->descsize, DMA_TO_DEVICE); 207 ddev->descsize, DMA_TO_DEVICE);
208 return desc; 208 return desc;
209 } 209 }
210 210
211 static struct txx9dmac_desc *txx9dmac_desc_get(struct txx9dmac_chan *dc) 211 static struct txx9dmac_desc *txx9dmac_desc_get(struct txx9dmac_chan *dc)
212 { 212 {
213 struct txx9dmac_desc *desc, *_desc; 213 struct txx9dmac_desc *desc, *_desc;
214 struct txx9dmac_desc *ret = NULL; 214 struct txx9dmac_desc *ret = NULL;
215 unsigned int i = 0; 215 unsigned int i = 0;
216 216
217 spin_lock_bh(&dc->lock); 217 spin_lock_bh(&dc->lock);
218 list_for_each_entry_safe(desc, _desc, &dc->free_list, desc_node) { 218 list_for_each_entry_safe(desc, _desc, &dc->free_list, desc_node) {
219 if (async_tx_test_ack(&desc->txd)) { 219 if (async_tx_test_ack(&desc->txd)) {
220 list_del(&desc->desc_node); 220 list_del(&desc->desc_node);
221 ret = desc; 221 ret = desc;
222 break; 222 break;
223 } 223 }
224 dev_dbg(chan2dev(&dc->chan), "desc %p not ACKed\n", desc); 224 dev_dbg(chan2dev(&dc->chan), "desc %p not ACKed\n", desc);
225 i++; 225 i++;
226 } 226 }
227 spin_unlock_bh(&dc->lock); 227 spin_unlock_bh(&dc->lock);
228 228
229 dev_vdbg(chan2dev(&dc->chan), "scanned %u descriptors on freelist\n", 229 dev_vdbg(chan2dev(&dc->chan), "scanned %u descriptors on freelist\n",
230 i); 230 i);
231 if (!ret) { 231 if (!ret) {
232 ret = txx9dmac_desc_alloc(dc, GFP_ATOMIC); 232 ret = txx9dmac_desc_alloc(dc, GFP_ATOMIC);
233 if (ret) { 233 if (ret) {
234 spin_lock_bh(&dc->lock); 234 spin_lock_bh(&dc->lock);
235 dc->descs_allocated++; 235 dc->descs_allocated++;
236 spin_unlock_bh(&dc->lock); 236 spin_unlock_bh(&dc->lock);
237 } else 237 } else
238 dev_err(chan2dev(&dc->chan), 238 dev_err(chan2dev(&dc->chan),
239 "not enough descriptors available\n"); 239 "not enough descriptors available\n");
240 } 240 }
241 return ret; 241 return ret;
242 } 242 }
243 243
244 static void txx9dmac_sync_desc_for_cpu(struct txx9dmac_chan *dc, 244 static void txx9dmac_sync_desc_for_cpu(struct txx9dmac_chan *dc,
245 struct txx9dmac_desc *desc) 245 struct txx9dmac_desc *desc)
246 { 246 {
247 struct txx9dmac_dev *ddev = dc->ddev; 247 struct txx9dmac_dev *ddev = dc->ddev;
248 struct txx9dmac_desc *child; 248 struct txx9dmac_desc *child;
249 249
250 list_for_each_entry(child, &desc->tx_list, desc_node) 250 list_for_each_entry(child, &desc->tx_list, desc_node)
251 dma_sync_single_for_cpu(chan2parent(&dc->chan), 251 dma_sync_single_for_cpu(chan2parent(&dc->chan),
252 child->txd.phys, ddev->descsize, 252 child->txd.phys, ddev->descsize,
253 DMA_TO_DEVICE); 253 DMA_TO_DEVICE);
254 dma_sync_single_for_cpu(chan2parent(&dc->chan), 254 dma_sync_single_for_cpu(chan2parent(&dc->chan),
255 desc->txd.phys, ddev->descsize, 255 desc->txd.phys, ddev->descsize,
256 DMA_TO_DEVICE); 256 DMA_TO_DEVICE);
257 } 257 }
258 258
259 /* 259 /*
260 * Move a descriptor, including any children, to the free list. 260 * Move a descriptor, including any children, to the free list.
261 * `desc' must not be on any lists. 261 * `desc' must not be on any lists.
262 */ 262 */
263 static void txx9dmac_desc_put(struct txx9dmac_chan *dc, 263 static void txx9dmac_desc_put(struct txx9dmac_chan *dc,
264 struct txx9dmac_desc *desc) 264 struct txx9dmac_desc *desc)
265 { 265 {
266 if (desc) { 266 if (desc) {
267 struct txx9dmac_desc *child; 267 struct txx9dmac_desc *child;
268 268
269 txx9dmac_sync_desc_for_cpu(dc, desc); 269 txx9dmac_sync_desc_for_cpu(dc, desc);
270 270
271 spin_lock_bh(&dc->lock); 271 spin_lock_bh(&dc->lock);
272 list_for_each_entry(child, &desc->tx_list, desc_node) 272 list_for_each_entry(child, &desc->tx_list, desc_node)
273 dev_vdbg(chan2dev(&dc->chan), 273 dev_vdbg(chan2dev(&dc->chan),
274 "moving child desc %p to freelist\n", 274 "moving child desc %p to freelist\n",
275 child); 275 child);
276 list_splice_init(&desc->tx_list, &dc->free_list); 276 list_splice_init(&desc->tx_list, &dc->free_list);
277 dev_vdbg(chan2dev(&dc->chan), "moving desc %p to freelist\n", 277 dev_vdbg(chan2dev(&dc->chan), "moving desc %p to freelist\n",
278 desc); 278 desc);
279 list_add(&desc->desc_node, &dc->free_list); 279 list_add(&desc->desc_node, &dc->free_list);
280 spin_unlock_bh(&dc->lock); 280 spin_unlock_bh(&dc->lock);
281 } 281 }
282 } 282 }
283 283
284 /*----------------------------------------------------------------------*/ 284 /*----------------------------------------------------------------------*/
285 285
286 static void txx9dmac_dump_regs(struct txx9dmac_chan *dc) 286 static void txx9dmac_dump_regs(struct txx9dmac_chan *dc)
287 { 287 {
288 if (is_dmac64(dc)) 288 if (is_dmac64(dc))
289 dev_err(chan2dev(&dc->chan), 289 dev_err(chan2dev(&dc->chan),
290 " CHAR: %#llx SAR: %#llx DAR: %#llx CNTR: %#x" 290 " CHAR: %#llx SAR: %#llx DAR: %#llx CNTR: %#x"
291 " SAIR: %#x DAIR: %#x CCR: %#x CSR: %#x\n", 291 " SAIR: %#x DAIR: %#x CCR: %#x CSR: %#x\n",
292 (u64)channel64_read_CHAR(dc), 292 (u64)channel64_read_CHAR(dc),
293 channel64_readq(dc, SAR), 293 channel64_readq(dc, SAR),
294 channel64_readq(dc, DAR), 294 channel64_readq(dc, DAR),
295 channel64_readl(dc, CNTR), 295 channel64_readl(dc, CNTR),
296 channel64_readl(dc, SAIR), 296 channel64_readl(dc, SAIR),
297 channel64_readl(dc, DAIR), 297 channel64_readl(dc, DAIR),
298 channel64_readl(dc, CCR), 298 channel64_readl(dc, CCR),
299 channel64_readl(dc, CSR)); 299 channel64_readl(dc, CSR));
300 else 300 else
301 dev_err(chan2dev(&dc->chan), 301 dev_err(chan2dev(&dc->chan),
302 " CHAR: %#x SAR: %#x DAR: %#x CNTR: %#x" 302 " CHAR: %#x SAR: %#x DAR: %#x CNTR: %#x"
303 " SAIR: %#x DAIR: %#x CCR: %#x CSR: %#x\n", 303 " SAIR: %#x DAIR: %#x CCR: %#x CSR: %#x\n",
304 channel32_readl(dc, CHAR), 304 channel32_readl(dc, CHAR),
305 channel32_readl(dc, SAR), 305 channel32_readl(dc, SAR),
306 channel32_readl(dc, DAR), 306 channel32_readl(dc, DAR),
307 channel32_readl(dc, CNTR), 307 channel32_readl(dc, CNTR),
308 channel32_readl(dc, SAIR), 308 channel32_readl(dc, SAIR),
309 channel32_readl(dc, DAIR), 309 channel32_readl(dc, DAIR),
310 channel32_readl(dc, CCR), 310 channel32_readl(dc, CCR),
311 channel32_readl(dc, CSR)); 311 channel32_readl(dc, CSR));
312 } 312 }
313 313
314 static void txx9dmac_reset_chan(struct txx9dmac_chan *dc) 314 static void txx9dmac_reset_chan(struct txx9dmac_chan *dc)
315 { 315 {
316 channel_writel(dc, CCR, TXX9_DMA_CCR_CHRST); 316 channel_writel(dc, CCR, TXX9_DMA_CCR_CHRST);
317 if (is_dmac64(dc)) { 317 if (is_dmac64(dc)) {
318 channel64_clear_CHAR(dc); 318 channel64_clear_CHAR(dc);
319 channel_writeq(dc, SAR, 0); 319 channel_writeq(dc, SAR, 0);
320 channel_writeq(dc, DAR, 0); 320 channel_writeq(dc, DAR, 0);
321 } else { 321 } else {
322 channel_writel(dc, CHAR, 0); 322 channel_writel(dc, CHAR, 0);
323 channel_writel(dc, SAR, 0); 323 channel_writel(dc, SAR, 0);
324 channel_writel(dc, DAR, 0); 324 channel_writel(dc, DAR, 0);
325 } 325 }
326 channel_writel(dc, CNTR, 0); 326 channel_writel(dc, CNTR, 0);
327 channel_writel(dc, SAIR, 0); 327 channel_writel(dc, SAIR, 0);
328 channel_writel(dc, DAIR, 0); 328 channel_writel(dc, DAIR, 0);
329 channel_writel(dc, CCR, 0); 329 channel_writel(dc, CCR, 0);
330 mmiowb(); 330 mmiowb();
331 } 331 }
332 332
333 /* Called with dc->lock held and bh disabled */ 333 /* Called with dc->lock held and bh disabled */
334 static void txx9dmac_dostart(struct txx9dmac_chan *dc, 334 static void txx9dmac_dostart(struct txx9dmac_chan *dc,
335 struct txx9dmac_desc *first) 335 struct txx9dmac_desc *first)
336 { 336 {
337 struct txx9dmac_slave *ds = dc->chan.private; 337 struct txx9dmac_slave *ds = dc->chan.private;
338 u32 sai, dai; 338 u32 sai, dai;
339 339
340 dev_vdbg(chan2dev(&dc->chan), "dostart %u %p\n", 340 dev_vdbg(chan2dev(&dc->chan), "dostart %u %p\n",
341 first->txd.cookie, first); 341 first->txd.cookie, first);
342 /* ASSERT: channel is idle */ 342 /* ASSERT: channel is idle */
343 if (channel_readl(dc, CSR) & TXX9_DMA_CSR_XFACT) { 343 if (channel_readl(dc, CSR) & TXX9_DMA_CSR_XFACT) {
344 dev_err(chan2dev(&dc->chan), 344 dev_err(chan2dev(&dc->chan),
345 "BUG: Attempted to start non-idle channel\n"); 345 "BUG: Attempted to start non-idle channel\n");
346 txx9dmac_dump_regs(dc); 346 txx9dmac_dump_regs(dc);
347 /* The tasklet will hopefully advance the queue... */ 347 /* The tasklet will hopefully advance the queue... */
348 return; 348 return;
349 } 349 }
350 350
351 if (is_dmac64(dc)) { 351 if (is_dmac64(dc)) {
352 channel64_writel(dc, CNTR, 0); 352 channel64_writel(dc, CNTR, 0);
353 channel64_writel(dc, CSR, 0xffffffff); 353 channel64_writel(dc, CSR, 0xffffffff);
354 if (ds) { 354 if (ds) {
355 if (ds->tx_reg) { 355 if (ds->tx_reg) {
356 sai = ds->reg_width; 356 sai = ds->reg_width;
357 dai = 0; 357 dai = 0;
358 } else { 358 } else {
359 sai = 0; 359 sai = 0;
360 dai = ds->reg_width; 360 dai = ds->reg_width;
361 } 361 }
362 } else { 362 } else {
363 sai = 8; 363 sai = 8;
364 dai = 8; 364 dai = 8;
365 } 365 }
366 channel64_writel(dc, SAIR, sai); 366 channel64_writel(dc, SAIR, sai);
367 channel64_writel(dc, DAIR, dai); 367 channel64_writel(dc, DAIR, dai);
368 /* All 64-bit DMAC supports SMPCHN */ 368 /* All 64-bit DMAC supports SMPCHN */
369 channel64_writel(dc, CCR, dc->ccr); 369 channel64_writel(dc, CCR, dc->ccr);
370 /* Writing a non zero value to CHAR will assert XFACT */ 370 /* Writing a non zero value to CHAR will assert XFACT */
371 channel64_write_CHAR(dc, first->txd.phys); 371 channel64_write_CHAR(dc, first->txd.phys);
372 } else { 372 } else {
373 channel32_writel(dc, CNTR, 0); 373 channel32_writel(dc, CNTR, 0);
374 channel32_writel(dc, CSR, 0xffffffff); 374 channel32_writel(dc, CSR, 0xffffffff);
375 if (ds) { 375 if (ds) {
376 if (ds->tx_reg) { 376 if (ds->tx_reg) {
377 sai = ds->reg_width; 377 sai = ds->reg_width;
378 dai = 0; 378 dai = 0;
379 } else { 379 } else {
380 sai = 0; 380 sai = 0;
381 dai = ds->reg_width; 381 dai = ds->reg_width;
382 } 382 }
383 } else { 383 } else {
384 sai = 4; 384 sai = 4;
385 dai = 4; 385 dai = 4;
386 } 386 }
387 channel32_writel(dc, SAIR, sai); 387 channel32_writel(dc, SAIR, sai);
388 channel32_writel(dc, DAIR, dai); 388 channel32_writel(dc, DAIR, dai);
389 if (txx9_dma_have_SMPCHN()) { 389 if (txx9_dma_have_SMPCHN()) {
390 channel32_writel(dc, CCR, dc->ccr); 390 channel32_writel(dc, CCR, dc->ccr);
391 /* Writing a non zero value to CHAR will assert XFACT */ 391 /* Writing a non zero value to CHAR will assert XFACT */
392 channel32_writel(dc, CHAR, first->txd.phys); 392 channel32_writel(dc, CHAR, first->txd.phys);
393 } else { 393 } else {
394 channel32_writel(dc, CHAR, first->txd.phys); 394 channel32_writel(dc, CHAR, first->txd.phys);
395 channel32_writel(dc, CCR, dc->ccr); 395 channel32_writel(dc, CCR, dc->ccr);
396 } 396 }
397 } 397 }
398 } 398 }
399 399
400 /*----------------------------------------------------------------------*/ 400 /*----------------------------------------------------------------------*/
401 401
402 static void 402 static void
403 txx9dmac_descriptor_complete(struct txx9dmac_chan *dc, 403 txx9dmac_descriptor_complete(struct txx9dmac_chan *dc,
404 struct txx9dmac_desc *desc) 404 struct txx9dmac_desc *desc)
405 { 405 {
406 dma_async_tx_callback callback; 406 dma_async_tx_callback callback;
407 void *param; 407 void *param;
408 struct dma_async_tx_descriptor *txd = &desc->txd; 408 struct dma_async_tx_descriptor *txd = &desc->txd;
409 struct txx9dmac_slave *ds = dc->chan.private; 409 struct txx9dmac_slave *ds = dc->chan.private;
410 410
411 dev_vdbg(chan2dev(&dc->chan), "descriptor %u %p complete\n", 411 dev_vdbg(chan2dev(&dc->chan), "descriptor %u %p complete\n",
412 txd->cookie, desc); 412 txd->cookie, desc);
413 413
414 dma_cookie_complete(txd); 414 dma_cookie_complete(txd);
415 callback = txd->callback; 415 callback = txd->callback;
416 param = txd->callback_param; 416 param = txd->callback_param;
417 417
418 txx9dmac_sync_desc_for_cpu(dc, desc); 418 txx9dmac_sync_desc_for_cpu(dc, desc);
419 list_splice_init(&desc->tx_list, &dc->free_list); 419 list_splice_init(&desc->tx_list, &dc->free_list);
420 list_move(&desc->desc_node, &dc->free_list); 420 list_move(&desc->desc_node, &dc->free_list);
421 421
422 if (!ds) { 422 if (!ds) {
423 dma_addr_t dmaaddr; 423 dma_addr_t dmaaddr;
424 if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) { 424 if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
425 dmaaddr = is_dmac64(dc) ? 425 dmaaddr = is_dmac64(dc) ?
426 desc->hwdesc.DAR : desc->hwdesc32.DAR; 426 desc->hwdesc.DAR : desc->hwdesc32.DAR;
427 if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE) 427 if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE)
428 dma_unmap_single(chan2parent(&dc->chan), 428 dma_unmap_single(chan2parent(&dc->chan),
429 dmaaddr, desc->len, DMA_FROM_DEVICE); 429 dmaaddr, desc->len, DMA_FROM_DEVICE);
430 else 430 else
431 dma_unmap_page(chan2parent(&dc->chan), 431 dma_unmap_page(chan2parent(&dc->chan),
432 dmaaddr, desc->len, DMA_FROM_DEVICE); 432 dmaaddr, desc->len, DMA_FROM_DEVICE);
433 } 433 }
434 if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) { 434 if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
435 dmaaddr = is_dmac64(dc) ? 435 dmaaddr = is_dmac64(dc) ?
436 desc->hwdesc.SAR : desc->hwdesc32.SAR; 436 desc->hwdesc.SAR : desc->hwdesc32.SAR;
437 if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE) 437 if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE)
438 dma_unmap_single(chan2parent(&dc->chan), 438 dma_unmap_single(chan2parent(&dc->chan),
439 dmaaddr, desc->len, DMA_TO_DEVICE); 439 dmaaddr, desc->len, DMA_TO_DEVICE);
440 else 440 else
441 dma_unmap_page(chan2parent(&dc->chan), 441 dma_unmap_page(chan2parent(&dc->chan),
442 dmaaddr, desc->len, DMA_TO_DEVICE); 442 dmaaddr, desc->len, DMA_TO_DEVICE);
443 } 443 }
444 } 444 }
445 445
446 /* 446 /*
447 * The API requires that no submissions are done from a 447 * The API requires that no submissions are done from a
448 * callback, so we don't need to drop the lock here 448 * callback, so we don't need to drop the lock here
449 */ 449 */
450 if (callback) 450 if (callback)
451 callback(param); 451 callback(param);
452 dma_run_dependencies(txd); 452 dma_run_dependencies(txd);
453 } 453 }
454 454
455 static void txx9dmac_dequeue(struct txx9dmac_chan *dc, struct list_head *list) 455 static void txx9dmac_dequeue(struct txx9dmac_chan *dc, struct list_head *list)
456 { 456 {
457 struct txx9dmac_dev *ddev = dc->ddev; 457 struct txx9dmac_dev *ddev = dc->ddev;
458 struct txx9dmac_desc *desc; 458 struct txx9dmac_desc *desc;
459 struct txx9dmac_desc *prev = NULL; 459 struct txx9dmac_desc *prev = NULL;
460 460
461 BUG_ON(!list_empty(list)); 461 BUG_ON(!list_empty(list));
462 do { 462 do {
463 desc = txx9dmac_first_queued(dc); 463 desc = txx9dmac_first_queued(dc);
464 if (prev) { 464 if (prev) {
465 desc_write_CHAR(dc, prev, desc->txd.phys); 465 desc_write_CHAR(dc, prev, desc->txd.phys);
466 dma_sync_single_for_device(chan2parent(&dc->chan), 466 dma_sync_single_for_device(chan2parent(&dc->chan),
467 prev->txd.phys, ddev->descsize, 467 prev->txd.phys, ddev->descsize,
468 DMA_TO_DEVICE); 468 DMA_TO_DEVICE);
469 } 469 }
470 prev = txx9dmac_last_child(desc); 470 prev = txx9dmac_last_child(desc);
471 list_move_tail(&desc->desc_node, list); 471 list_move_tail(&desc->desc_node, list);
472 /* Make chain-completion interrupt happen */ 472 /* Make chain-completion interrupt happen */
473 if ((desc->txd.flags & DMA_PREP_INTERRUPT) && 473 if ((desc->txd.flags & DMA_PREP_INTERRUPT) &&
474 !txx9dmac_chan_INTENT(dc)) 474 !txx9dmac_chan_INTENT(dc))
475 break; 475 break;
476 } while (!list_empty(&dc->queue)); 476 } while (!list_empty(&dc->queue));
477 } 477 }
478 478
479 static void txx9dmac_complete_all(struct txx9dmac_chan *dc) 479 static void txx9dmac_complete_all(struct txx9dmac_chan *dc)
480 { 480 {
481 struct txx9dmac_desc *desc, *_desc; 481 struct txx9dmac_desc *desc, *_desc;
482 LIST_HEAD(list); 482 LIST_HEAD(list);
483 483
484 /* 484 /*
485 * Submit queued descriptors ASAP, i.e. before we go through 485 * Submit queued descriptors ASAP, i.e. before we go through
486 * the completed ones. 486 * the completed ones.
487 */ 487 */
488 list_splice_init(&dc->active_list, &list); 488 list_splice_init(&dc->active_list, &list);
489 if (!list_empty(&dc->queue)) { 489 if (!list_empty(&dc->queue)) {
490 txx9dmac_dequeue(dc, &dc->active_list); 490 txx9dmac_dequeue(dc, &dc->active_list);
491 txx9dmac_dostart(dc, txx9dmac_first_active(dc)); 491 txx9dmac_dostart(dc, txx9dmac_first_active(dc));
492 } 492 }
493 493
494 list_for_each_entry_safe(desc, _desc, &list, desc_node) 494 list_for_each_entry_safe(desc, _desc, &list, desc_node)
495 txx9dmac_descriptor_complete(dc, desc); 495 txx9dmac_descriptor_complete(dc, desc);
496 } 496 }
497 497
498 static void txx9dmac_dump_desc(struct txx9dmac_chan *dc, 498 static void txx9dmac_dump_desc(struct txx9dmac_chan *dc,
499 struct txx9dmac_hwdesc *desc) 499 struct txx9dmac_hwdesc *desc)
500 { 500 {
501 if (is_dmac64(dc)) { 501 if (is_dmac64(dc)) {
502 #ifdef TXX9_DMA_USE_SIMPLE_CHAIN 502 #ifdef TXX9_DMA_USE_SIMPLE_CHAIN
503 dev_crit(chan2dev(&dc->chan), 503 dev_crit(chan2dev(&dc->chan),
504 " desc: ch%#llx s%#llx d%#llx c%#x\n", 504 " desc: ch%#llx s%#llx d%#llx c%#x\n",
505 (u64)desc->CHAR, desc->SAR, desc->DAR, desc->CNTR); 505 (u64)desc->CHAR, desc->SAR, desc->DAR, desc->CNTR);
506 #else 506 #else
507 dev_crit(chan2dev(&dc->chan), 507 dev_crit(chan2dev(&dc->chan),
508 " desc: ch%#llx s%#llx d%#llx c%#x" 508 " desc: ch%#llx s%#llx d%#llx c%#x"
509 " si%#x di%#x cc%#x cs%#x\n", 509 " si%#x di%#x cc%#x cs%#x\n",
510 (u64)desc->CHAR, desc->SAR, desc->DAR, desc->CNTR, 510 (u64)desc->CHAR, desc->SAR, desc->DAR, desc->CNTR,
511 desc->SAIR, desc->DAIR, desc->CCR, desc->CSR); 511 desc->SAIR, desc->DAIR, desc->CCR, desc->CSR);
512 #endif 512 #endif
513 } else { 513 } else {
514 struct txx9dmac_hwdesc32 *d = (struct txx9dmac_hwdesc32 *)desc; 514 struct txx9dmac_hwdesc32 *d = (struct txx9dmac_hwdesc32 *)desc;
515 #ifdef TXX9_DMA_USE_SIMPLE_CHAIN 515 #ifdef TXX9_DMA_USE_SIMPLE_CHAIN
516 dev_crit(chan2dev(&dc->chan), 516 dev_crit(chan2dev(&dc->chan),
517 " desc: ch%#x s%#x d%#x c%#x\n", 517 " desc: ch%#x s%#x d%#x c%#x\n",
518 d->CHAR, d->SAR, d->DAR, d->CNTR); 518 d->CHAR, d->SAR, d->DAR, d->CNTR);
519 #else 519 #else
520 dev_crit(chan2dev(&dc->chan), 520 dev_crit(chan2dev(&dc->chan),
521 " desc: ch%#x s%#x d%#x c%#x" 521 " desc: ch%#x s%#x d%#x c%#x"
522 " si%#x di%#x cc%#x cs%#x\n", 522 " si%#x di%#x cc%#x cs%#x\n",
523 d->CHAR, d->SAR, d->DAR, d->CNTR, 523 d->CHAR, d->SAR, d->DAR, d->CNTR,
524 d->SAIR, d->DAIR, d->CCR, d->CSR); 524 d->SAIR, d->DAIR, d->CCR, d->CSR);
525 #endif 525 #endif
526 } 526 }
527 } 527 }
528 528
529 static void txx9dmac_handle_error(struct txx9dmac_chan *dc, u32 csr) 529 static void txx9dmac_handle_error(struct txx9dmac_chan *dc, u32 csr)
530 { 530 {
531 struct txx9dmac_desc *bad_desc; 531 struct txx9dmac_desc *bad_desc;
532 struct txx9dmac_desc *child; 532 struct txx9dmac_desc *child;
533 u32 errors; 533 u32 errors;
534 534
535 /* 535 /*
536 * The descriptor currently at the head of the active list is 536 * The descriptor currently at the head of the active list is
537 * borked. Since we don't have any way to report errors, we'll 537 * borked. Since we don't have any way to report errors, we'll
538 * just have to scream loudly and try to carry on. 538 * just have to scream loudly and try to carry on.
539 */ 539 */
540 dev_crit(chan2dev(&dc->chan), "Abnormal Chain Completion\n"); 540 dev_crit(chan2dev(&dc->chan), "Abnormal Chain Completion\n");
541 txx9dmac_dump_regs(dc); 541 txx9dmac_dump_regs(dc);
542 542
543 bad_desc = txx9dmac_first_active(dc); 543 bad_desc = txx9dmac_first_active(dc);
544 list_del_init(&bad_desc->desc_node); 544 list_del_init(&bad_desc->desc_node);
545 545
546 /* Clear all error flags and try to restart the controller */ 546 /* Clear all error flags and try to restart the controller */
547 errors = csr & (TXX9_DMA_CSR_ABCHC | 547 errors = csr & (TXX9_DMA_CSR_ABCHC |
548 TXX9_DMA_CSR_CFERR | TXX9_DMA_CSR_CHERR | 548 TXX9_DMA_CSR_CFERR | TXX9_DMA_CSR_CHERR |
549 TXX9_DMA_CSR_DESERR | TXX9_DMA_CSR_SORERR); 549 TXX9_DMA_CSR_DESERR | TXX9_DMA_CSR_SORERR);
550 channel_writel(dc, CSR, errors); 550 channel_writel(dc, CSR, errors);
551 551
552 if (list_empty(&dc->active_list) && !list_empty(&dc->queue)) 552 if (list_empty(&dc->active_list) && !list_empty(&dc->queue))
553 txx9dmac_dequeue(dc, &dc->active_list); 553 txx9dmac_dequeue(dc, &dc->active_list);
554 if (!list_empty(&dc->active_list)) 554 if (!list_empty(&dc->active_list))
555 txx9dmac_dostart(dc, txx9dmac_first_active(dc)); 555 txx9dmac_dostart(dc, txx9dmac_first_active(dc));
556 556
557 dev_crit(chan2dev(&dc->chan), 557 dev_crit(chan2dev(&dc->chan),
558 "Bad descriptor submitted for DMA! (cookie: %d)\n", 558 "Bad descriptor submitted for DMA! (cookie: %d)\n",
559 bad_desc->txd.cookie); 559 bad_desc->txd.cookie);
560 txx9dmac_dump_desc(dc, &bad_desc->hwdesc); 560 txx9dmac_dump_desc(dc, &bad_desc->hwdesc);
561 list_for_each_entry(child, &bad_desc->tx_list, desc_node) 561 list_for_each_entry(child, &bad_desc->tx_list, desc_node)
562 txx9dmac_dump_desc(dc, &child->hwdesc); 562 txx9dmac_dump_desc(dc, &child->hwdesc);
563 /* Pretend the descriptor completed successfully */ 563 /* Pretend the descriptor completed successfully */
564 txx9dmac_descriptor_complete(dc, bad_desc); 564 txx9dmac_descriptor_complete(dc, bad_desc);
565 } 565 }
566 566
567 static void txx9dmac_scan_descriptors(struct txx9dmac_chan *dc) 567 static void txx9dmac_scan_descriptors(struct txx9dmac_chan *dc)
568 { 568 {
569 dma_addr_t chain; 569 dma_addr_t chain;
570 struct txx9dmac_desc *desc, *_desc; 570 struct txx9dmac_desc *desc, *_desc;
571 struct txx9dmac_desc *child; 571 struct txx9dmac_desc *child;
572 u32 csr; 572 u32 csr;
573 573
574 if (is_dmac64(dc)) { 574 if (is_dmac64(dc)) {
575 chain = channel64_read_CHAR(dc); 575 chain = channel64_read_CHAR(dc);
576 csr = channel64_readl(dc, CSR); 576 csr = channel64_readl(dc, CSR);
577 channel64_writel(dc, CSR, csr); 577 channel64_writel(dc, CSR, csr);
578 } else { 578 } else {
579 chain = channel32_readl(dc, CHAR); 579 chain = channel32_readl(dc, CHAR);
580 csr = channel32_readl(dc, CSR); 580 csr = channel32_readl(dc, CSR);
581 channel32_writel(dc, CSR, csr); 581 channel32_writel(dc, CSR, csr);
582 } 582 }
583 /* For dynamic chain, we should look at XFACT instead of NCHNC */ 583 /* For dynamic chain, we should look at XFACT instead of NCHNC */
584 if (!(csr & (TXX9_DMA_CSR_XFACT | TXX9_DMA_CSR_ABCHC))) { 584 if (!(csr & (TXX9_DMA_CSR_XFACT | TXX9_DMA_CSR_ABCHC))) {
585 /* Everything we've submitted is done */ 585 /* Everything we've submitted is done */
586 txx9dmac_complete_all(dc); 586 txx9dmac_complete_all(dc);
587 return; 587 return;
588 } 588 }
589 if (!(csr & TXX9_DMA_CSR_CHNEN)) 589 if (!(csr & TXX9_DMA_CSR_CHNEN))
590 chain = 0; /* last descriptor of this chain */ 590 chain = 0; /* last descriptor of this chain */
591 591
592 dev_vdbg(chan2dev(&dc->chan), "scan_descriptors: char=%#llx\n", 592 dev_vdbg(chan2dev(&dc->chan), "scan_descriptors: char=%#llx\n",
593 (u64)chain); 593 (u64)chain);
594 594
595 list_for_each_entry_safe(desc, _desc, &dc->active_list, desc_node) { 595 list_for_each_entry_safe(desc, _desc, &dc->active_list, desc_node) {
596 if (desc_read_CHAR(dc, desc) == chain) { 596 if (desc_read_CHAR(dc, desc) == chain) {
597 /* This one is currently in progress */ 597 /* This one is currently in progress */
598 if (csr & TXX9_DMA_CSR_ABCHC) 598 if (csr & TXX9_DMA_CSR_ABCHC)
599 goto scan_done; 599 goto scan_done;
600 return; 600 return;
601 } 601 }
602 602
603 list_for_each_entry(child, &desc->tx_list, desc_node) 603 list_for_each_entry(child, &desc->tx_list, desc_node)
604 if (desc_read_CHAR(dc, child) == chain) { 604 if (desc_read_CHAR(dc, child) == chain) {
605 /* Currently in progress */ 605 /* Currently in progress */
606 if (csr & TXX9_DMA_CSR_ABCHC) 606 if (csr & TXX9_DMA_CSR_ABCHC)
607 goto scan_done; 607 goto scan_done;
608 return; 608 return;
609 } 609 }
610 610
611 /* 611 /*
612 * No descriptors so far seem to be in progress, i.e. 612 * No descriptors so far seem to be in progress, i.e.
613 * this one must be done. 613 * this one must be done.
614 */ 614 */
615 txx9dmac_descriptor_complete(dc, desc); 615 txx9dmac_descriptor_complete(dc, desc);
616 } 616 }
617 scan_done: 617 scan_done:
618 if (csr & TXX9_DMA_CSR_ABCHC) { 618 if (csr & TXX9_DMA_CSR_ABCHC) {
619 txx9dmac_handle_error(dc, csr); 619 txx9dmac_handle_error(dc, csr);
620 return; 620 return;
621 } 621 }
622 622
623 dev_err(chan2dev(&dc->chan), 623 dev_err(chan2dev(&dc->chan),
624 "BUG: All descriptors done, but channel not idle!\n"); 624 "BUG: All descriptors done, but channel not idle!\n");
625 625
626 /* Try to continue after resetting the channel... */ 626 /* Try to continue after resetting the channel... */
627 txx9dmac_reset_chan(dc); 627 txx9dmac_reset_chan(dc);
628 628
629 if (!list_empty(&dc->queue)) { 629 if (!list_empty(&dc->queue)) {
630 txx9dmac_dequeue(dc, &dc->active_list); 630 txx9dmac_dequeue(dc, &dc->active_list);
631 txx9dmac_dostart(dc, txx9dmac_first_active(dc)); 631 txx9dmac_dostart(dc, txx9dmac_first_active(dc));
632 } 632 }
633 } 633 }
634 634
635 static void txx9dmac_chan_tasklet(unsigned long data) 635 static void txx9dmac_chan_tasklet(unsigned long data)
636 { 636 {
637 int irq; 637 int irq;
638 u32 csr; 638 u32 csr;
639 struct txx9dmac_chan *dc; 639 struct txx9dmac_chan *dc;
640 640
641 dc = (struct txx9dmac_chan *)data; 641 dc = (struct txx9dmac_chan *)data;
642 csr = channel_readl(dc, CSR); 642 csr = channel_readl(dc, CSR);
643 dev_vdbg(chan2dev(&dc->chan), "tasklet: status=%x\n", csr); 643 dev_vdbg(chan2dev(&dc->chan), "tasklet: status=%x\n", csr);
644 644
645 spin_lock(&dc->lock); 645 spin_lock(&dc->lock);
646 if (csr & (TXX9_DMA_CSR_ABCHC | TXX9_DMA_CSR_NCHNC | 646 if (csr & (TXX9_DMA_CSR_ABCHC | TXX9_DMA_CSR_NCHNC |
647 TXX9_DMA_CSR_NTRNFC)) 647 TXX9_DMA_CSR_NTRNFC))
648 txx9dmac_scan_descriptors(dc); 648 txx9dmac_scan_descriptors(dc);
649 spin_unlock(&dc->lock); 649 spin_unlock(&dc->lock);
650 irq = dc->irq; 650 irq = dc->irq;
651 651
652 enable_irq(irq); 652 enable_irq(irq);
653 } 653 }
654 654
655 static irqreturn_t txx9dmac_chan_interrupt(int irq, void *dev_id) 655 static irqreturn_t txx9dmac_chan_interrupt(int irq, void *dev_id)
656 { 656 {
657 struct txx9dmac_chan *dc = dev_id; 657 struct txx9dmac_chan *dc = dev_id;
658 658
659 dev_vdbg(chan2dev(&dc->chan), "interrupt: status=%#x\n", 659 dev_vdbg(chan2dev(&dc->chan), "interrupt: status=%#x\n",
660 channel_readl(dc, CSR)); 660 channel_readl(dc, CSR));
661 661
662 tasklet_schedule(&dc->tasklet); 662 tasklet_schedule(&dc->tasklet);
663 /* 663 /*
664 * Just disable the interrupts. We'll turn them back on in the 664 * Just disable the interrupts. We'll turn them back on in the
665 * softirq handler. 665 * softirq handler.
666 */ 666 */
667 disable_irq_nosync(irq); 667 disable_irq_nosync(irq);
668 668
669 return IRQ_HANDLED; 669 return IRQ_HANDLED;
670 } 670 }
671 671
672 static void txx9dmac_tasklet(unsigned long data) 672 static void txx9dmac_tasklet(unsigned long data)
673 { 673 {
674 int irq; 674 int irq;
675 u32 csr; 675 u32 csr;
676 struct txx9dmac_chan *dc; 676 struct txx9dmac_chan *dc;
677 677
678 struct txx9dmac_dev *ddev = (struct txx9dmac_dev *)data; 678 struct txx9dmac_dev *ddev = (struct txx9dmac_dev *)data;
679 u32 mcr; 679 u32 mcr;
680 int i; 680 int i;
681 681
682 mcr = dma_readl(ddev, MCR); 682 mcr = dma_readl(ddev, MCR);
683 dev_vdbg(ddev->chan[0]->dma.dev, "tasklet: mcr=%x\n", mcr); 683 dev_vdbg(ddev->chan[0]->dma.dev, "tasklet: mcr=%x\n", mcr);
684 for (i = 0; i < TXX9_DMA_MAX_NR_CHANNELS; i++) { 684 for (i = 0; i < TXX9_DMA_MAX_NR_CHANNELS; i++) {
685 if ((mcr >> (24 + i)) & 0x11) { 685 if ((mcr >> (24 + i)) & 0x11) {
686 dc = ddev->chan[i]; 686 dc = ddev->chan[i];
687 csr = channel_readl(dc, CSR); 687 csr = channel_readl(dc, CSR);
688 dev_vdbg(chan2dev(&dc->chan), "tasklet: status=%x\n", 688 dev_vdbg(chan2dev(&dc->chan), "tasklet: status=%x\n",
689 csr); 689 csr);
690 spin_lock(&dc->lock); 690 spin_lock(&dc->lock);
691 if (csr & (TXX9_DMA_CSR_ABCHC | TXX9_DMA_CSR_NCHNC | 691 if (csr & (TXX9_DMA_CSR_ABCHC | TXX9_DMA_CSR_NCHNC |
692 TXX9_DMA_CSR_NTRNFC)) 692 TXX9_DMA_CSR_NTRNFC))
693 txx9dmac_scan_descriptors(dc); 693 txx9dmac_scan_descriptors(dc);
694 spin_unlock(&dc->lock); 694 spin_unlock(&dc->lock);
695 } 695 }
696 } 696 }
697 irq = ddev->irq; 697 irq = ddev->irq;
698 698
699 enable_irq(irq); 699 enable_irq(irq);
700 } 700 }
701 701
702 static irqreturn_t txx9dmac_interrupt(int irq, void *dev_id) 702 static irqreturn_t txx9dmac_interrupt(int irq, void *dev_id)
703 { 703 {
704 struct txx9dmac_dev *ddev = dev_id; 704 struct txx9dmac_dev *ddev = dev_id;
705 705
706 dev_vdbg(ddev->chan[0]->dma.dev, "interrupt: status=%#x\n", 706 dev_vdbg(ddev->chan[0]->dma.dev, "interrupt: status=%#x\n",
707 dma_readl(ddev, MCR)); 707 dma_readl(ddev, MCR));
708 708
709 tasklet_schedule(&ddev->tasklet); 709 tasklet_schedule(&ddev->tasklet);
710 /* 710 /*
711 * Just disable the interrupts. We'll turn them back on in the 711 * Just disable the interrupts. We'll turn them back on in the
712 * softirq handler. 712 * softirq handler.
713 */ 713 */
714 disable_irq_nosync(irq); 714 disable_irq_nosync(irq);
715 715
716 return IRQ_HANDLED; 716 return IRQ_HANDLED;
717 } 717 }
718 718
719 /*----------------------------------------------------------------------*/ 719 /*----------------------------------------------------------------------*/
720 720
721 static dma_cookie_t txx9dmac_tx_submit(struct dma_async_tx_descriptor *tx) 721 static dma_cookie_t txx9dmac_tx_submit(struct dma_async_tx_descriptor *tx)
722 { 722 {
723 struct txx9dmac_desc *desc = txd_to_txx9dmac_desc(tx); 723 struct txx9dmac_desc *desc = txd_to_txx9dmac_desc(tx);
724 struct txx9dmac_chan *dc = to_txx9dmac_chan(tx->chan); 724 struct txx9dmac_chan *dc = to_txx9dmac_chan(tx->chan);
725 dma_cookie_t cookie; 725 dma_cookie_t cookie;
726 726
727 spin_lock_bh(&dc->lock); 727 spin_lock_bh(&dc->lock);
728 cookie = dma_cookie_assign(tx); 728 cookie = dma_cookie_assign(tx);
729 729
730 dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u %p\n", 730 dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u %p\n",
731 desc->txd.cookie, desc); 731 desc->txd.cookie, desc);
732 732
733 list_add_tail(&desc->desc_node, &dc->queue); 733 list_add_tail(&desc->desc_node, &dc->queue);
734 spin_unlock_bh(&dc->lock); 734 spin_unlock_bh(&dc->lock);
735 735
736 return cookie; 736 return cookie;
737 } 737 }
738 738
739 static struct dma_async_tx_descriptor * 739 static struct dma_async_tx_descriptor *
740 txx9dmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, 740 txx9dmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
741 size_t len, unsigned long flags) 741 size_t len, unsigned long flags)
742 { 742 {
743 struct txx9dmac_chan *dc = to_txx9dmac_chan(chan); 743 struct txx9dmac_chan *dc = to_txx9dmac_chan(chan);
744 struct txx9dmac_dev *ddev = dc->ddev; 744 struct txx9dmac_dev *ddev = dc->ddev;
745 struct txx9dmac_desc *desc; 745 struct txx9dmac_desc *desc;
746 struct txx9dmac_desc *first; 746 struct txx9dmac_desc *first;
747 struct txx9dmac_desc *prev; 747 struct txx9dmac_desc *prev;
748 size_t xfer_count; 748 size_t xfer_count;
749 size_t offset; 749 size_t offset;
750 750
751 dev_vdbg(chan2dev(chan), "prep_dma_memcpy d%#llx s%#llx l%#zx f%#lx\n", 751 dev_vdbg(chan2dev(chan), "prep_dma_memcpy d%#llx s%#llx l%#zx f%#lx\n",
752 (u64)dest, (u64)src, len, flags); 752 (u64)dest, (u64)src, len, flags);
753 753
754 if (unlikely(!len)) { 754 if (unlikely(!len)) {
755 dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n"); 755 dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n");
756 return NULL; 756 return NULL;
757 } 757 }
758 758
759 prev = first = NULL; 759 prev = first = NULL;
760 760
761 for (offset = 0; offset < len; offset += xfer_count) { 761 for (offset = 0; offset < len; offset += xfer_count) {
762 xfer_count = min_t(size_t, len - offset, TXX9_DMA_MAX_COUNT); 762 xfer_count = min_t(size_t, len - offset, TXX9_DMA_MAX_COUNT);
763 /* 763 /*
764 * Workaround for ERT-TX49H2-033, ERT-TX49H3-020, 764 * Workaround for ERT-TX49H2-033, ERT-TX49H3-020,
765 * ERT-TX49H4-016 (slightly conservative) 765 * ERT-TX49H4-016 (slightly conservative)
766 */ 766 */
767 if (__is_dmac64(ddev)) { 767 if (__is_dmac64(ddev)) {
768 if (xfer_count > 0x100 && 768 if (xfer_count > 0x100 &&
769 (xfer_count & 0xff) >= 0xfa && 769 (xfer_count & 0xff) >= 0xfa &&
770 (xfer_count & 0xff) <= 0xff) 770 (xfer_count & 0xff) <= 0xff)
771 xfer_count -= 0x20; 771 xfer_count -= 0x20;
772 } else { 772 } else {
773 if (xfer_count > 0x80 && 773 if (xfer_count > 0x80 &&
774 (xfer_count & 0x7f) >= 0x7e && 774 (xfer_count & 0x7f) >= 0x7e &&
775 (xfer_count & 0x7f) <= 0x7f) 775 (xfer_count & 0x7f) <= 0x7f)
776 xfer_count -= 0x20; 776 xfer_count -= 0x20;
777 } 777 }
778 778
779 desc = txx9dmac_desc_get(dc); 779 desc = txx9dmac_desc_get(dc);
780 if (!desc) { 780 if (!desc) {
781 txx9dmac_desc_put(dc, first); 781 txx9dmac_desc_put(dc, first);
782 return NULL; 782 return NULL;
783 } 783 }
784 784
785 if (__is_dmac64(ddev)) { 785 if (__is_dmac64(ddev)) {
786 desc->hwdesc.SAR = src + offset; 786 desc->hwdesc.SAR = src + offset;
787 desc->hwdesc.DAR = dest + offset; 787 desc->hwdesc.DAR = dest + offset;
788 desc->hwdesc.CNTR = xfer_count; 788 desc->hwdesc.CNTR = xfer_count;
789 txx9dmac_desc_set_nosimple(ddev, desc, 8, 8, 789 txx9dmac_desc_set_nosimple(ddev, desc, 8, 8,
790 dc->ccr | TXX9_DMA_CCR_XFACT); 790 dc->ccr | TXX9_DMA_CCR_XFACT);
791 } else { 791 } else {
792 desc->hwdesc32.SAR = src + offset; 792 desc->hwdesc32.SAR = src + offset;
793 desc->hwdesc32.DAR = dest + offset; 793 desc->hwdesc32.DAR = dest + offset;
794 desc->hwdesc32.CNTR = xfer_count; 794 desc->hwdesc32.CNTR = xfer_count;
795 txx9dmac_desc_set_nosimple(ddev, desc, 4, 4, 795 txx9dmac_desc_set_nosimple(ddev, desc, 4, 4,
796 dc->ccr | TXX9_DMA_CCR_XFACT); 796 dc->ccr | TXX9_DMA_CCR_XFACT);
797 } 797 }
798 798
799 /* 799 /*
800 * The descriptors on tx_list are not reachable from 800 * The descriptors on tx_list are not reachable from
801 * the dc->queue list or dc->active_list after a 801 * the dc->queue list or dc->active_list after a
802 * submit. If we put all descriptors on active_list, 802 * submit. If we put all descriptors on active_list,
803 * calling of callback on the completion will be more 803 * calling of callback on the completion will be more
804 * complex. 804 * complex.
805 */ 805 */
806 if (!first) { 806 if (!first) {
807 first = desc; 807 first = desc;
808 } else { 808 } else {
809 desc_write_CHAR(dc, prev, desc->txd.phys); 809 desc_write_CHAR(dc, prev, desc->txd.phys);
810 dma_sync_single_for_device(chan2parent(&dc->chan), 810 dma_sync_single_for_device(chan2parent(&dc->chan),
811 prev->txd.phys, ddev->descsize, 811 prev->txd.phys, ddev->descsize,
812 DMA_TO_DEVICE); 812 DMA_TO_DEVICE);
813 list_add_tail(&desc->desc_node, &first->tx_list); 813 list_add_tail(&desc->desc_node, &first->tx_list);
814 } 814 }
815 prev = desc; 815 prev = desc;
816 } 816 }
817 817
818 /* Trigger interrupt after last block */ 818 /* Trigger interrupt after last block */
819 if (flags & DMA_PREP_INTERRUPT) 819 if (flags & DMA_PREP_INTERRUPT)
820 txx9dmac_desc_set_INTENT(ddev, prev); 820 txx9dmac_desc_set_INTENT(ddev, prev);
821 821
822 desc_write_CHAR(dc, prev, 0); 822 desc_write_CHAR(dc, prev, 0);
823 dma_sync_single_for_device(chan2parent(&dc->chan), 823 dma_sync_single_for_device(chan2parent(&dc->chan),
824 prev->txd.phys, ddev->descsize, 824 prev->txd.phys, ddev->descsize,
825 DMA_TO_DEVICE); 825 DMA_TO_DEVICE);
826 826
827 first->txd.flags = flags; 827 first->txd.flags = flags;
828 first->len = len; 828 first->len = len;
829 829
830 return &first->txd; 830 return &first->txd;
831 } 831 }
832 832
833 static struct dma_async_tx_descriptor * 833 static struct dma_async_tx_descriptor *
834 txx9dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, 834 txx9dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
835 unsigned int sg_len, enum dma_transfer_direction direction, 835 unsigned int sg_len, enum dma_transfer_direction direction,
836 unsigned long flags, void *context) 836 unsigned long flags, void *context)
837 { 837 {
838 struct txx9dmac_chan *dc = to_txx9dmac_chan(chan); 838 struct txx9dmac_chan *dc = to_txx9dmac_chan(chan);
839 struct txx9dmac_dev *ddev = dc->ddev; 839 struct txx9dmac_dev *ddev = dc->ddev;
840 struct txx9dmac_slave *ds = chan->private; 840 struct txx9dmac_slave *ds = chan->private;
841 struct txx9dmac_desc *prev; 841 struct txx9dmac_desc *prev;
842 struct txx9dmac_desc *first; 842 struct txx9dmac_desc *first;
843 unsigned int i; 843 unsigned int i;
844 struct scatterlist *sg; 844 struct scatterlist *sg;
845 845
846 dev_vdbg(chan2dev(chan), "prep_dma_slave\n"); 846 dev_vdbg(chan2dev(chan), "prep_dma_slave\n");
847 847
848 BUG_ON(!ds || !ds->reg_width); 848 BUG_ON(!ds || !ds->reg_width);
849 if (ds->tx_reg) 849 if (ds->tx_reg)
850 BUG_ON(direction != DMA_MEM_TO_DEV); 850 BUG_ON(direction != DMA_MEM_TO_DEV);
851 else 851 else
852 BUG_ON(direction != DMA_DEV_TO_MEM); 852 BUG_ON(direction != DMA_DEV_TO_MEM);
853 if (unlikely(!sg_len)) 853 if (unlikely(!sg_len))
854 return NULL; 854 return NULL;
855 855
856 prev = first = NULL; 856 prev = first = NULL;
857 857
858 for_each_sg(sgl, sg, sg_len, i) { 858 for_each_sg(sgl, sg, sg_len, i) {
859 struct txx9dmac_desc *desc; 859 struct txx9dmac_desc *desc;
860 dma_addr_t mem; 860 dma_addr_t mem;
861 u32 sai, dai; 861 u32 sai, dai;
862 862
863 desc = txx9dmac_desc_get(dc); 863 desc = txx9dmac_desc_get(dc);
864 if (!desc) { 864 if (!desc) {
865 txx9dmac_desc_put(dc, first); 865 txx9dmac_desc_put(dc, first);
866 return NULL; 866 return NULL;
867 } 867 }
868 868
869 mem = sg_dma_address(sg); 869 mem = sg_dma_address(sg);
870 870
871 if (__is_dmac64(ddev)) { 871 if (__is_dmac64(ddev)) {
872 if (direction == DMA_MEM_TO_DEV) { 872 if (direction == DMA_MEM_TO_DEV) {
873 desc->hwdesc.SAR = mem; 873 desc->hwdesc.SAR = mem;
874 desc->hwdesc.DAR = ds->tx_reg; 874 desc->hwdesc.DAR = ds->tx_reg;
875 } else { 875 } else {
876 desc->hwdesc.SAR = ds->rx_reg; 876 desc->hwdesc.SAR = ds->rx_reg;
877 desc->hwdesc.DAR = mem; 877 desc->hwdesc.DAR = mem;
878 } 878 }
879 desc->hwdesc.CNTR = sg_dma_len(sg); 879 desc->hwdesc.CNTR = sg_dma_len(sg);
880 } else { 880 } else {
881 if (direction == DMA_MEM_TO_DEV) { 881 if (direction == DMA_MEM_TO_DEV) {
882 desc->hwdesc32.SAR = mem; 882 desc->hwdesc32.SAR = mem;
883 desc->hwdesc32.DAR = ds->tx_reg; 883 desc->hwdesc32.DAR = ds->tx_reg;
884 } else { 884 } else {
885 desc->hwdesc32.SAR = ds->rx_reg; 885 desc->hwdesc32.SAR = ds->rx_reg;
886 desc->hwdesc32.DAR = mem; 886 desc->hwdesc32.DAR = mem;
887 } 887 }
888 desc->hwdesc32.CNTR = sg_dma_len(sg); 888 desc->hwdesc32.CNTR = sg_dma_len(sg);
889 } 889 }
890 if (direction == DMA_MEM_TO_DEV) { 890 if (direction == DMA_MEM_TO_DEV) {
891 sai = ds->reg_width; 891 sai = ds->reg_width;
892 dai = 0; 892 dai = 0;
893 } else { 893 } else {
894 sai = 0; 894 sai = 0;
895 dai = ds->reg_width; 895 dai = ds->reg_width;
896 } 896 }
897 txx9dmac_desc_set_nosimple(ddev, desc, sai, dai, 897 txx9dmac_desc_set_nosimple(ddev, desc, sai, dai,
898 dc->ccr | TXX9_DMA_CCR_XFACT); 898 dc->ccr | TXX9_DMA_CCR_XFACT);
899 899
900 if (!first) { 900 if (!first) {
901 first = desc; 901 first = desc;
902 } else { 902 } else {
903 desc_write_CHAR(dc, prev, desc->txd.phys); 903 desc_write_CHAR(dc, prev, desc->txd.phys);
904 dma_sync_single_for_device(chan2parent(&dc->chan), 904 dma_sync_single_for_device(chan2parent(&dc->chan),
905 prev->txd.phys, 905 prev->txd.phys,
906 ddev->descsize, 906 ddev->descsize,
907 DMA_TO_DEVICE); 907 DMA_TO_DEVICE);
908 list_add_tail(&desc->desc_node, &first->tx_list); 908 list_add_tail(&desc->desc_node, &first->tx_list);
909 } 909 }
910 prev = desc; 910 prev = desc;
911 } 911 }
912 912
913 /* Trigger interrupt after last block */ 913 /* Trigger interrupt after last block */
914 if (flags & DMA_PREP_INTERRUPT) 914 if (flags & DMA_PREP_INTERRUPT)
915 txx9dmac_desc_set_INTENT(ddev, prev); 915 txx9dmac_desc_set_INTENT(ddev, prev);
916 916
917 desc_write_CHAR(dc, prev, 0); 917 desc_write_CHAR(dc, prev, 0);
918 dma_sync_single_for_device(chan2parent(&dc->chan), 918 dma_sync_single_for_device(chan2parent(&dc->chan),
919 prev->txd.phys, ddev->descsize, 919 prev->txd.phys, ddev->descsize,
920 DMA_TO_DEVICE); 920 DMA_TO_DEVICE);
921 921
922 first->txd.flags = flags; 922 first->txd.flags = flags;
923 first->len = 0; 923 first->len = 0;
924 924
925 return &first->txd; 925 return &first->txd;
926 } 926 }
927 927
928 static int txx9dmac_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 928 static int txx9dmac_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
929 unsigned long arg) 929 unsigned long arg)
930 { 930 {
931 struct txx9dmac_chan *dc = to_txx9dmac_chan(chan); 931 struct txx9dmac_chan *dc = to_txx9dmac_chan(chan);
932 struct txx9dmac_desc *desc, *_desc; 932 struct txx9dmac_desc *desc, *_desc;
933 LIST_HEAD(list); 933 LIST_HEAD(list);
934 934
935 /* Only supports DMA_TERMINATE_ALL */ 935 /* Only supports DMA_TERMINATE_ALL */
936 if (cmd != DMA_TERMINATE_ALL) 936 if (cmd != DMA_TERMINATE_ALL)
937 return -EINVAL; 937 return -EINVAL;
938 938
939 dev_vdbg(chan2dev(chan), "terminate_all\n"); 939 dev_vdbg(chan2dev(chan), "terminate_all\n");
940 spin_lock_bh(&dc->lock); 940 spin_lock_bh(&dc->lock);
941 941
942 txx9dmac_reset_chan(dc); 942 txx9dmac_reset_chan(dc);
943 943
944 /* active_list entries will end up before queued entries */ 944 /* active_list entries will end up before queued entries */
945 list_splice_init(&dc->queue, &list); 945 list_splice_init(&dc->queue, &list);
946 list_splice_init(&dc->active_list, &list); 946 list_splice_init(&dc->active_list, &list);
947 947
948 spin_unlock_bh(&dc->lock); 948 spin_unlock_bh(&dc->lock);
949 949
950 /* Flush all pending and queued descriptors */ 950 /* Flush all pending and queued descriptors */
951 list_for_each_entry_safe(desc, _desc, &list, desc_node) 951 list_for_each_entry_safe(desc, _desc, &list, desc_node)
952 txx9dmac_descriptor_complete(dc, desc); 952 txx9dmac_descriptor_complete(dc, desc);
953 953
954 return 0; 954 return 0;
955 } 955 }
956 956
957 static enum dma_status 957 static enum dma_status
958 txx9dmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie, 958 txx9dmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
959 struct dma_tx_state *txstate) 959 struct dma_tx_state *txstate)
960 { 960 {
961 struct txx9dmac_chan *dc = to_txx9dmac_chan(chan); 961 struct txx9dmac_chan *dc = to_txx9dmac_chan(chan);
962 enum dma_status ret; 962 enum dma_status ret;
963 963
964 ret = dma_cookie_status(chan, cookie, txstate); 964 ret = dma_cookie_status(chan, cookie, txstate);
965 if (ret == DMA_SUCCESS) 965 if (ret == DMA_COMPLETE)
966 return DMA_SUCCESS; 966 return DMA_COMPLETE;
967 967
968 spin_lock_bh(&dc->lock); 968 spin_lock_bh(&dc->lock);
969 txx9dmac_scan_descriptors(dc); 969 txx9dmac_scan_descriptors(dc);
970 spin_unlock_bh(&dc->lock); 970 spin_unlock_bh(&dc->lock);
971 971
972 return dma_cookie_status(chan, cookie, txstate); 972 return dma_cookie_status(chan, cookie, txstate);
973 } 973 }
974 974
975 static void txx9dmac_chain_dynamic(struct txx9dmac_chan *dc, 975 static void txx9dmac_chain_dynamic(struct txx9dmac_chan *dc,
976 struct txx9dmac_desc *prev) 976 struct txx9dmac_desc *prev)
977 { 977 {
978 struct txx9dmac_dev *ddev = dc->ddev; 978 struct txx9dmac_dev *ddev = dc->ddev;
979 struct txx9dmac_desc *desc; 979 struct txx9dmac_desc *desc;
980 LIST_HEAD(list); 980 LIST_HEAD(list);
981 981
982 prev = txx9dmac_last_child(prev); 982 prev = txx9dmac_last_child(prev);
983 txx9dmac_dequeue(dc, &list); 983 txx9dmac_dequeue(dc, &list);
984 desc = list_entry(list.next, struct txx9dmac_desc, desc_node); 984 desc = list_entry(list.next, struct txx9dmac_desc, desc_node);
985 desc_write_CHAR(dc, prev, desc->txd.phys); 985 desc_write_CHAR(dc, prev, desc->txd.phys);
986 dma_sync_single_for_device(chan2parent(&dc->chan), 986 dma_sync_single_for_device(chan2parent(&dc->chan),
987 prev->txd.phys, ddev->descsize, 987 prev->txd.phys, ddev->descsize,
988 DMA_TO_DEVICE); 988 DMA_TO_DEVICE);
989 mmiowb(); 989 mmiowb();
990 if (!(channel_readl(dc, CSR) & TXX9_DMA_CSR_CHNEN) && 990 if (!(channel_readl(dc, CSR) & TXX9_DMA_CSR_CHNEN) &&
991 channel_read_CHAR(dc) == prev->txd.phys) 991 channel_read_CHAR(dc) == prev->txd.phys)
992 /* Restart chain DMA */ 992 /* Restart chain DMA */
993 channel_write_CHAR(dc, desc->txd.phys); 993 channel_write_CHAR(dc, desc->txd.phys);
994 list_splice_tail(&list, &dc->active_list); 994 list_splice_tail(&list, &dc->active_list);
995 } 995 }
996 996
997 static void txx9dmac_issue_pending(struct dma_chan *chan) 997 static void txx9dmac_issue_pending(struct dma_chan *chan)
998 { 998 {
999 struct txx9dmac_chan *dc = to_txx9dmac_chan(chan); 999 struct txx9dmac_chan *dc = to_txx9dmac_chan(chan);
1000 1000
1001 spin_lock_bh(&dc->lock); 1001 spin_lock_bh(&dc->lock);
1002 1002
1003 if (!list_empty(&dc->active_list)) 1003 if (!list_empty(&dc->active_list))
1004 txx9dmac_scan_descriptors(dc); 1004 txx9dmac_scan_descriptors(dc);
1005 if (!list_empty(&dc->queue)) { 1005 if (!list_empty(&dc->queue)) {
1006 if (list_empty(&dc->active_list)) { 1006 if (list_empty(&dc->active_list)) {
1007 txx9dmac_dequeue(dc, &dc->active_list); 1007 txx9dmac_dequeue(dc, &dc->active_list);
1008 txx9dmac_dostart(dc, txx9dmac_first_active(dc)); 1008 txx9dmac_dostart(dc, txx9dmac_first_active(dc));
1009 } else if (txx9_dma_have_SMPCHN()) { 1009 } else if (txx9_dma_have_SMPCHN()) {
1010 struct txx9dmac_desc *prev = txx9dmac_last_active(dc); 1010 struct txx9dmac_desc *prev = txx9dmac_last_active(dc);
1011 1011
1012 if (!(prev->txd.flags & DMA_PREP_INTERRUPT) || 1012 if (!(prev->txd.flags & DMA_PREP_INTERRUPT) ||
1013 txx9dmac_chan_INTENT(dc)) 1013 txx9dmac_chan_INTENT(dc))
1014 txx9dmac_chain_dynamic(dc, prev); 1014 txx9dmac_chain_dynamic(dc, prev);
1015 } 1015 }
1016 } 1016 }
1017 1017
1018 spin_unlock_bh(&dc->lock); 1018 spin_unlock_bh(&dc->lock);
1019 } 1019 }
1020 1020
1021 static int txx9dmac_alloc_chan_resources(struct dma_chan *chan) 1021 static int txx9dmac_alloc_chan_resources(struct dma_chan *chan)
1022 { 1022 {
1023 struct txx9dmac_chan *dc = to_txx9dmac_chan(chan); 1023 struct txx9dmac_chan *dc = to_txx9dmac_chan(chan);
1024 struct txx9dmac_slave *ds = chan->private; 1024 struct txx9dmac_slave *ds = chan->private;
1025 struct txx9dmac_desc *desc; 1025 struct txx9dmac_desc *desc;
1026 int i; 1026 int i;
1027 1027
1028 dev_vdbg(chan2dev(chan), "alloc_chan_resources\n"); 1028 dev_vdbg(chan2dev(chan), "alloc_chan_resources\n");
1029 1029
1030 /* ASSERT: channel is idle */ 1030 /* ASSERT: channel is idle */
1031 if (channel_readl(dc, CSR) & TXX9_DMA_CSR_XFACT) { 1031 if (channel_readl(dc, CSR) & TXX9_DMA_CSR_XFACT) {
1032 dev_dbg(chan2dev(chan), "DMA channel not idle?\n"); 1032 dev_dbg(chan2dev(chan), "DMA channel not idle?\n");
1033 return -EIO; 1033 return -EIO;
1034 } 1034 }
1035 1035
1036 dma_cookie_init(chan); 1036 dma_cookie_init(chan);
1037 1037
1038 dc->ccr = TXX9_DMA_CCR_IMMCHN | TXX9_DMA_CCR_INTENE | CCR_LE; 1038 dc->ccr = TXX9_DMA_CCR_IMMCHN | TXX9_DMA_CCR_INTENE | CCR_LE;
1039 txx9dmac_chan_set_SMPCHN(dc); 1039 txx9dmac_chan_set_SMPCHN(dc);
1040 if (!txx9_dma_have_SMPCHN() || (dc->ccr & TXX9_DMA_CCR_SMPCHN)) 1040 if (!txx9_dma_have_SMPCHN() || (dc->ccr & TXX9_DMA_CCR_SMPCHN))
1041 dc->ccr |= TXX9_DMA_CCR_INTENC; 1041 dc->ccr |= TXX9_DMA_CCR_INTENC;
1042 if (chan->device->device_prep_dma_memcpy) { 1042 if (chan->device->device_prep_dma_memcpy) {
1043 if (ds) 1043 if (ds)
1044 return -EINVAL; 1044 return -EINVAL;
1045 dc->ccr |= TXX9_DMA_CCR_XFSZ_X8; 1045 dc->ccr |= TXX9_DMA_CCR_XFSZ_X8;
1046 } else { 1046 } else {
1047 if (!ds || 1047 if (!ds ||
1048 (ds->tx_reg && ds->rx_reg) || (!ds->tx_reg && !ds->rx_reg)) 1048 (ds->tx_reg && ds->rx_reg) || (!ds->tx_reg && !ds->rx_reg))
1049 return -EINVAL; 1049 return -EINVAL;
1050 dc->ccr |= TXX9_DMA_CCR_EXTRQ | 1050 dc->ccr |= TXX9_DMA_CCR_EXTRQ |
1051 TXX9_DMA_CCR_XFSZ(__ffs(ds->reg_width)); 1051 TXX9_DMA_CCR_XFSZ(__ffs(ds->reg_width));
1052 txx9dmac_chan_set_INTENT(dc); 1052 txx9dmac_chan_set_INTENT(dc);
1053 } 1053 }
1054 1054
1055 spin_lock_bh(&dc->lock); 1055 spin_lock_bh(&dc->lock);
1056 i = dc->descs_allocated; 1056 i = dc->descs_allocated;
1057 while (dc->descs_allocated < TXX9_DMA_INITIAL_DESC_COUNT) { 1057 while (dc->descs_allocated < TXX9_DMA_INITIAL_DESC_COUNT) {
1058 spin_unlock_bh(&dc->lock); 1058 spin_unlock_bh(&dc->lock);
1059 1059
1060 desc = txx9dmac_desc_alloc(dc, GFP_KERNEL); 1060 desc = txx9dmac_desc_alloc(dc, GFP_KERNEL);
1061 if (!desc) { 1061 if (!desc) {
1062 dev_info(chan2dev(chan), 1062 dev_info(chan2dev(chan),
1063 "only allocated %d descriptors\n", i); 1063 "only allocated %d descriptors\n", i);
1064 spin_lock_bh(&dc->lock); 1064 spin_lock_bh(&dc->lock);
1065 break; 1065 break;
1066 } 1066 }
1067 txx9dmac_desc_put(dc, desc); 1067 txx9dmac_desc_put(dc, desc);
1068 1068
1069 spin_lock_bh(&dc->lock); 1069 spin_lock_bh(&dc->lock);
1070 i = ++dc->descs_allocated; 1070 i = ++dc->descs_allocated;
1071 } 1071 }
1072 spin_unlock_bh(&dc->lock); 1072 spin_unlock_bh(&dc->lock);
1073 1073
1074 dev_dbg(chan2dev(chan), 1074 dev_dbg(chan2dev(chan),
1075 "alloc_chan_resources allocated %d descriptors\n", i); 1075 "alloc_chan_resources allocated %d descriptors\n", i);
1076 1076
1077 return i; 1077 return i;
1078 } 1078 }
1079 1079
1080 static void txx9dmac_free_chan_resources(struct dma_chan *chan) 1080 static void txx9dmac_free_chan_resources(struct dma_chan *chan)
1081 { 1081 {
1082 struct txx9dmac_chan *dc = to_txx9dmac_chan(chan); 1082 struct txx9dmac_chan *dc = to_txx9dmac_chan(chan);
1083 struct txx9dmac_dev *ddev = dc->ddev; 1083 struct txx9dmac_dev *ddev = dc->ddev;
1084 struct txx9dmac_desc *desc, *_desc; 1084 struct txx9dmac_desc *desc, *_desc;
1085 LIST_HEAD(list); 1085 LIST_HEAD(list);
1086 1086
1087 dev_dbg(chan2dev(chan), "free_chan_resources (descs allocated=%u)\n", 1087 dev_dbg(chan2dev(chan), "free_chan_resources (descs allocated=%u)\n",
1088 dc->descs_allocated); 1088 dc->descs_allocated);
1089 1089
1090 /* ASSERT: channel is idle */ 1090 /* ASSERT: channel is idle */
1091 BUG_ON(!list_empty(&dc->active_list)); 1091 BUG_ON(!list_empty(&dc->active_list));
1092 BUG_ON(!list_empty(&dc->queue)); 1092 BUG_ON(!list_empty(&dc->queue));
1093 BUG_ON(channel_readl(dc, CSR) & TXX9_DMA_CSR_XFACT); 1093 BUG_ON(channel_readl(dc, CSR) & TXX9_DMA_CSR_XFACT);
1094 1094
1095 spin_lock_bh(&dc->lock); 1095 spin_lock_bh(&dc->lock);
1096 list_splice_init(&dc->free_list, &list); 1096 list_splice_init(&dc->free_list, &list);
1097 dc->descs_allocated = 0; 1097 dc->descs_allocated = 0;
1098 spin_unlock_bh(&dc->lock); 1098 spin_unlock_bh(&dc->lock);
1099 1099
1100 list_for_each_entry_safe(desc, _desc, &list, desc_node) { 1100 list_for_each_entry_safe(desc, _desc, &list, desc_node) {
1101 dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc); 1101 dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc);
1102 dma_unmap_single(chan2parent(chan), desc->txd.phys, 1102 dma_unmap_single(chan2parent(chan), desc->txd.phys,
1103 ddev->descsize, DMA_TO_DEVICE); 1103 ddev->descsize, DMA_TO_DEVICE);
1104 kfree(desc); 1104 kfree(desc);
1105 } 1105 }
1106 1106
1107 dev_vdbg(chan2dev(chan), "free_chan_resources done\n"); 1107 dev_vdbg(chan2dev(chan), "free_chan_resources done\n");
1108 } 1108 }
1109 1109
1110 /*----------------------------------------------------------------------*/ 1110 /*----------------------------------------------------------------------*/
1111 1111
1112 static void txx9dmac_off(struct txx9dmac_dev *ddev) 1112 static void txx9dmac_off(struct txx9dmac_dev *ddev)
1113 { 1113 {
1114 dma_writel(ddev, MCR, 0); 1114 dma_writel(ddev, MCR, 0);
1115 mmiowb(); 1115 mmiowb();
1116 } 1116 }
1117 1117
1118 static int __init txx9dmac_chan_probe(struct platform_device *pdev) 1118 static int __init txx9dmac_chan_probe(struct platform_device *pdev)
1119 { 1119 {
1120 struct txx9dmac_chan_platform_data *cpdata = 1120 struct txx9dmac_chan_platform_data *cpdata =
1121 dev_get_platdata(&pdev->dev); 1121 dev_get_platdata(&pdev->dev);
1122 struct platform_device *dmac_dev = cpdata->dmac_dev; 1122 struct platform_device *dmac_dev = cpdata->dmac_dev;
1123 struct txx9dmac_platform_data *pdata = dev_get_platdata(&dmac_dev->dev); 1123 struct txx9dmac_platform_data *pdata = dev_get_platdata(&dmac_dev->dev);
1124 struct txx9dmac_chan *dc; 1124 struct txx9dmac_chan *dc;
1125 int err; 1125 int err;
1126 int ch = pdev->id % TXX9_DMA_MAX_NR_CHANNELS; 1126 int ch = pdev->id % TXX9_DMA_MAX_NR_CHANNELS;
1127 int irq; 1127 int irq;
1128 1128
1129 dc = devm_kzalloc(&pdev->dev, sizeof(*dc), GFP_KERNEL); 1129 dc = devm_kzalloc(&pdev->dev, sizeof(*dc), GFP_KERNEL);
1130 if (!dc) 1130 if (!dc)
1131 return -ENOMEM; 1131 return -ENOMEM;
1132 1132
1133 dc->dma.dev = &pdev->dev; 1133 dc->dma.dev = &pdev->dev;
1134 dc->dma.device_alloc_chan_resources = txx9dmac_alloc_chan_resources; 1134 dc->dma.device_alloc_chan_resources = txx9dmac_alloc_chan_resources;
1135 dc->dma.device_free_chan_resources = txx9dmac_free_chan_resources; 1135 dc->dma.device_free_chan_resources = txx9dmac_free_chan_resources;
1136 dc->dma.device_control = txx9dmac_control; 1136 dc->dma.device_control = txx9dmac_control;
1137 dc->dma.device_tx_status = txx9dmac_tx_status; 1137 dc->dma.device_tx_status = txx9dmac_tx_status;
1138 dc->dma.device_issue_pending = txx9dmac_issue_pending; 1138 dc->dma.device_issue_pending = txx9dmac_issue_pending;
1139 if (pdata && pdata->memcpy_chan == ch) { 1139 if (pdata && pdata->memcpy_chan == ch) {
1140 dc->dma.device_prep_dma_memcpy = txx9dmac_prep_dma_memcpy; 1140 dc->dma.device_prep_dma_memcpy = txx9dmac_prep_dma_memcpy;
1141 dma_cap_set(DMA_MEMCPY, dc->dma.cap_mask); 1141 dma_cap_set(DMA_MEMCPY, dc->dma.cap_mask);
1142 } else { 1142 } else {
1143 dc->dma.device_prep_slave_sg = txx9dmac_prep_slave_sg; 1143 dc->dma.device_prep_slave_sg = txx9dmac_prep_slave_sg;
1144 dma_cap_set(DMA_SLAVE, dc->dma.cap_mask); 1144 dma_cap_set(DMA_SLAVE, dc->dma.cap_mask);
1145 dma_cap_set(DMA_PRIVATE, dc->dma.cap_mask); 1145 dma_cap_set(DMA_PRIVATE, dc->dma.cap_mask);
1146 } 1146 }
1147 1147
1148 INIT_LIST_HEAD(&dc->dma.channels); 1148 INIT_LIST_HEAD(&dc->dma.channels);
1149 dc->ddev = platform_get_drvdata(dmac_dev); 1149 dc->ddev = platform_get_drvdata(dmac_dev);
1150 if (dc->ddev->irq < 0) { 1150 if (dc->ddev->irq < 0) {
1151 irq = platform_get_irq(pdev, 0); 1151 irq = platform_get_irq(pdev, 0);
1152 if (irq < 0) 1152 if (irq < 0)
1153 return irq; 1153 return irq;
1154 tasklet_init(&dc->tasklet, txx9dmac_chan_tasklet, 1154 tasklet_init(&dc->tasklet, txx9dmac_chan_tasklet,
1155 (unsigned long)dc); 1155 (unsigned long)dc);
1156 dc->irq = irq; 1156 dc->irq = irq;
1157 err = devm_request_irq(&pdev->dev, dc->irq, 1157 err = devm_request_irq(&pdev->dev, dc->irq,
1158 txx9dmac_chan_interrupt, 0, dev_name(&pdev->dev), dc); 1158 txx9dmac_chan_interrupt, 0, dev_name(&pdev->dev), dc);
1159 if (err) 1159 if (err)
1160 return err; 1160 return err;
1161 } else 1161 } else
1162 dc->irq = -1; 1162 dc->irq = -1;
1163 dc->ddev->chan[ch] = dc; 1163 dc->ddev->chan[ch] = dc;
1164 dc->chan.device = &dc->dma; 1164 dc->chan.device = &dc->dma;
1165 list_add_tail(&dc->chan.device_node, &dc->chan.device->channels); 1165 list_add_tail(&dc->chan.device_node, &dc->chan.device->channels);
1166 dma_cookie_init(&dc->chan); 1166 dma_cookie_init(&dc->chan);
1167 1167
1168 if (is_dmac64(dc)) 1168 if (is_dmac64(dc))
1169 dc->ch_regs = &__txx9dmac_regs(dc->ddev)->CHAN[ch]; 1169 dc->ch_regs = &__txx9dmac_regs(dc->ddev)->CHAN[ch];
1170 else 1170 else
1171 dc->ch_regs = &__txx9dmac_regs32(dc->ddev)->CHAN[ch]; 1171 dc->ch_regs = &__txx9dmac_regs32(dc->ddev)->CHAN[ch];
1172 spin_lock_init(&dc->lock); 1172 spin_lock_init(&dc->lock);
1173 1173
1174 INIT_LIST_HEAD(&dc->active_list); 1174 INIT_LIST_HEAD(&dc->active_list);
1175 INIT_LIST_HEAD(&dc->queue); 1175 INIT_LIST_HEAD(&dc->queue);
1176 INIT_LIST_HEAD(&dc->free_list); 1176 INIT_LIST_HEAD(&dc->free_list);
1177 1177
1178 txx9dmac_reset_chan(dc); 1178 txx9dmac_reset_chan(dc);
1179 1179
1180 platform_set_drvdata(pdev, dc); 1180 platform_set_drvdata(pdev, dc);
1181 1181
1182 err = dma_async_device_register(&dc->dma); 1182 err = dma_async_device_register(&dc->dma);
1183 if (err) 1183 if (err)
1184 return err; 1184 return err;
1185 dev_dbg(&pdev->dev, "TXx9 DMA Channel (dma%d%s%s)\n", 1185 dev_dbg(&pdev->dev, "TXx9 DMA Channel (dma%d%s%s)\n",
1186 dc->dma.dev_id, 1186 dc->dma.dev_id,
1187 dma_has_cap(DMA_MEMCPY, dc->dma.cap_mask) ? " memcpy" : "", 1187 dma_has_cap(DMA_MEMCPY, dc->dma.cap_mask) ? " memcpy" : "",
1188 dma_has_cap(DMA_SLAVE, dc->dma.cap_mask) ? " slave" : ""); 1188 dma_has_cap(DMA_SLAVE, dc->dma.cap_mask) ? " slave" : "");
1189 1189
1190 return 0; 1190 return 0;
1191 } 1191 }
1192 1192
1193 static int txx9dmac_chan_remove(struct platform_device *pdev) 1193 static int txx9dmac_chan_remove(struct platform_device *pdev)
1194 { 1194 {
1195 struct txx9dmac_chan *dc = platform_get_drvdata(pdev); 1195 struct txx9dmac_chan *dc = platform_get_drvdata(pdev);
1196 1196
1197 dma_async_device_unregister(&dc->dma); 1197 dma_async_device_unregister(&dc->dma);
1198 if (dc->irq >= 0) 1198 if (dc->irq >= 0)
1199 tasklet_kill(&dc->tasklet); 1199 tasklet_kill(&dc->tasklet);
1200 dc->ddev->chan[pdev->id % TXX9_DMA_MAX_NR_CHANNELS] = NULL; 1200 dc->ddev->chan[pdev->id % TXX9_DMA_MAX_NR_CHANNELS] = NULL;
1201 return 0; 1201 return 0;
1202 } 1202 }
1203 1203
1204 static int __init txx9dmac_probe(struct platform_device *pdev) 1204 static int __init txx9dmac_probe(struct platform_device *pdev)
1205 { 1205 {
1206 struct txx9dmac_platform_data *pdata = dev_get_platdata(&pdev->dev); 1206 struct txx9dmac_platform_data *pdata = dev_get_platdata(&pdev->dev);
1207 struct resource *io; 1207 struct resource *io;
1208 struct txx9dmac_dev *ddev; 1208 struct txx9dmac_dev *ddev;
1209 u32 mcr; 1209 u32 mcr;
1210 int err; 1210 int err;
1211 1211
1212 io = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1212 io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1213 if (!io) 1213 if (!io)
1214 return -EINVAL; 1214 return -EINVAL;
1215 1215
1216 ddev = devm_kzalloc(&pdev->dev, sizeof(*ddev), GFP_KERNEL); 1216 ddev = devm_kzalloc(&pdev->dev, sizeof(*ddev), GFP_KERNEL);
1217 if (!ddev) 1217 if (!ddev)
1218 return -ENOMEM; 1218 return -ENOMEM;
1219 1219
1220 if (!devm_request_mem_region(&pdev->dev, io->start, resource_size(io), 1220 if (!devm_request_mem_region(&pdev->dev, io->start, resource_size(io),
1221 dev_name(&pdev->dev))) 1221 dev_name(&pdev->dev)))
1222 return -EBUSY; 1222 return -EBUSY;
1223 1223
1224 ddev->regs = devm_ioremap(&pdev->dev, io->start, resource_size(io)); 1224 ddev->regs = devm_ioremap(&pdev->dev, io->start, resource_size(io));
1225 if (!ddev->regs) 1225 if (!ddev->regs)
1226 return -ENOMEM; 1226 return -ENOMEM;
1227 ddev->have_64bit_regs = pdata->have_64bit_regs; 1227 ddev->have_64bit_regs = pdata->have_64bit_regs;
1228 if (__is_dmac64(ddev)) 1228 if (__is_dmac64(ddev))
1229 ddev->descsize = sizeof(struct txx9dmac_hwdesc); 1229 ddev->descsize = sizeof(struct txx9dmac_hwdesc);
1230 else 1230 else
1231 ddev->descsize = sizeof(struct txx9dmac_hwdesc32); 1231 ddev->descsize = sizeof(struct txx9dmac_hwdesc32);
1232 1232
1233 /* force dma off, just in case */ 1233 /* force dma off, just in case */
1234 txx9dmac_off(ddev); 1234 txx9dmac_off(ddev);
1235 1235
1236 ddev->irq = platform_get_irq(pdev, 0); 1236 ddev->irq = platform_get_irq(pdev, 0);
1237 if (ddev->irq >= 0) { 1237 if (ddev->irq >= 0) {
1238 tasklet_init(&ddev->tasklet, txx9dmac_tasklet, 1238 tasklet_init(&ddev->tasklet, txx9dmac_tasklet,
1239 (unsigned long)ddev); 1239 (unsigned long)ddev);
1240 err = devm_request_irq(&pdev->dev, ddev->irq, 1240 err = devm_request_irq(&pdev->dev, ddev->irq,
1241 txx9dmac_interrupt, 0, dev_name(&pdev->dev), ddev); 1241 txx9dmac_interrupt, 0, dev_name(&pdev->dev), ddev);
1242 if (err) 1242 if (err)
1243 return err; 1243 return err;
1244 } 1244 }
1245 1245
1246 mcr = TXX9_DMA_MCR_MSTEN | MCR_LE; 1246 mcr = TXX9_DMA_MCR_MSTEN | MCR_LE;
1247 if (pdata && pdata->memcpy_chan >= 0) 1247 if (pdata && pdata->memcpy_chan >= 0)
1248 mcr |= TXX9_DMA_MCR_FIFUM(pdata->memcpy_chan); 1248 mcr |= TXX9_DMA_MCR_FIFUM(pdata->memcpy_chan);
1249 dma_writel(ddev, MCR, mcr); 1249 dma_writel(ddev, MCR, mcr);
1250 1250
1251 platform_set_drvdata(pdev, ddev); 1251 platform_set_drvdata(pdev, ddev);
1252 return 0; 1252 return 0;
1253 } 1253 }
1254 1254
1255 static int txx9dmac_remove(struct platform_device *pdev) 1255 static int txx9dmac_remove(struct platform_device *pdev)
1256 { 1256 {
1257 struct txx9dmac_dev *ddev = platform_get_drvdata(pdev); 1257 struct txx9dmac_dev *ddev = platform_get_drvdata(pdev);
1258 1258
1259 txx9dmac_off(ddev); 1259 txx9dmac_off(ddev);
1260 if (ddev->irq >= 0) 1260 if (ddev->irq >= 0)
1261 tasklet_kill(&ddev->tasklet); 1261 tasklet_kill(&ddev->tasklet);
1262 return 0; 1262 return 0;
1263 } 1263 }
1264 1264
1265 static void txx9dmac_shutdown(struct platform_device *pdev) 1265 static void txx9dmac_shutdown(struct platform_device *pdev)
1266 { 1266 {
1267 struct txx9dmac_dev *ddev = platform_get_drvdata(pdev); 1267 struct txx9dmac_dev *ddev = platform_get_drvdata(pdev);
1268 1268
1269 txx9dmac_off(ddev); 1269 txx9dmac_off(ddev);
1270 } 1270 }
1271 1271
1272 static int txx9dmac_suspend_noirq(struct device *dev) 1272 static int txx9dmac_suspend_noirq(struct device *dev)
1273 { 1273 {
1274 struct platform_device *pdev = to_platform_device(dev); 1274 struct platform_device *pdev = to_platform_device(dev);
1275 struct txx9dmac_dev *ddev = platform_get_drvdata(pdev); 1275 struct txx9dmac_dev *ddev = platform_get_drvdata(pdev);
1276 1276
1277 txx9dmac_off(ddev); 1277 txx9dmac_off(ddev);
1278 return 0; 1278 return 0;
1279 } 1279 }
1280 1280
1281 static int txx9dmac_resume_noirq(struct device *dev) 1281 static int txx9dmac_resume_noirq(struct device *dev)
1282 { 1282 {
1283 struct platform_device *pdev = to_platform_device(dev); 1283 struct platform_device *pdev = to_platform_device(dev);
1284 struct txx9dmac_dev *ddev = platform_get_drvdata(pdev); 1284 struct txx9dmac_dev *ddev = platform_get_drvdata(pdev);
1285 struct txx9dmac_platform_data *pdata = dev_get_platdata(&pdev->dev); 1285 struct txx9dmac_platform_data *pdata = dev_get_platdata(&pdev->dev);
1286 u32 mcr; 1286 u32 mcr;
1287 1287
1288 mcr = TXX9_DMA_MCR_MSTEN | MCR_LE; 1288 mcr = TXX9_DMA_MCR_MSTEN | MCR_LE;
1289 if (pdata && pdata->memcpy_chan >= 0) 1289 if (pdata && pdata->memcpy_chan >= 0)
1290 mcr |= TXX9_DMA_MCR_FIFUM(pdata->memcpy_chan); 1290 mcr |= TXX9_DMA_MCR_FIFUM(pdata->memcpy_chan);
1291 dma_writel(ddev, MCR, mcr); 1291 dma_writel(ddev, MCR, mcr);
1292 return 0; 1292 return 0;
1293 1293
1294 } 1294 }
1295 1295
1296 static const struct dev_pm_ops txx9dmac_dev_pm_ops = { 1296 static const struct dev_pm_ops txx9dmac_dev_pm_ops = {
1297 .suspend_noirq = txx9dmac_suspend_noirq, 1297 .suspend_noirq = txx9dmac_suspend_noirq,
1298 .resume_noirq = txx9dmac_resume_noirq, 1298 .resume_noirq = txx9dmac_resume_noirq,
1299 }; 1299 };
1300 1300
1301 static struct platform_driver txx9dmac_chan_driver = { 1301 static struct platform_driver txx9dmac_chan_driver = {
1302 .remove = txx9dmac_chan_remove, 1302 .remove = txx9dmac_chan_remove,
1303 .driver = { 1303 .driver = {
1304 .name = "txx9dmac-chan", 1304 .name = "txx9dmac-chan",
1305 }, 1305 },
1306 }; 1306 };
1307 1307
1308 static struct platform_driver txx9dmac_driver = { 1308 static struct platform_driver txx9dmac_driver = {
1309 .remove = txx9dmac_remove, 1309 .remove = txx9dmac_remove,
1310 .shutdown = txx9dmac_shutdown, 1310 .shutdown = txx9dmac_shutdown,
1311 .driver = { 1311 .driver = {
1312 .name = "txx9dmac", 1312 .name = "txx9dmac",
1313 .pm = &txx9dmac_dev_pm_ops, 1313 .pm = &txx9dmac_dev_pm_ops,
1314 }, 1314 },
1315 }; 1315 };
1316 1316
1317 static int __init txx9dmac_init(void) 1317 static int __init txx9dmac_init(void)
1318 { 1318 {
1319 int rc; 1319 int rc;
1320 1320
1321 rc = platform_driver_probe(&txx9dmac_driver, txx9dmac_probe); 1321 rc = platform_driver_probe(&txx9dmac_driver, txx9dmac_probe);
1322 if (!rc) { 1322 if (!rc) {
1323 rc = platform_driver_probe(&txx9dmac_chan_driver, 1323 rc = platform_driver_probe(&txx9dmac_chan_driver,
1324 txx9dmac_chan_probe); 1324 txx9dmac_chan_probe);
1325 if (rc) 1325 if (rc)
1326 platform_driver_unregister(&txx9dmac_driver); 1326 platform_driver_unregister(&txx9dmac_driver);
1327 } 1327 }
1328 return rc; 1328 return rc;
1329 } 1329 }
1330 module_init(txx9dmac_init); 1330 module_init(txx9dmac_init);
1331 1331
1332 static void __exit txx9dmac_exit(void) 1332 static void __exit txx9dmac_exit(void)
1333 { 1333 {
1334 platform_driver_unregister(&txx9dmac_chan_driver); 1334 platform_driver_unregister(&txx9dmac_chan_driver);
1335 platform_driver_unregister(&txx9dmac_driver); 1335 platform_driver_unregister(&txx9dmac_driver);
1336 } 1336 }
1337 module_exit(txx9dmac_exit); 1337 module_exit(txx9dmac_exit);
1338 1338
1339 MODULE_LICENSE("GPL"); 1339 MODULE_LICENSE("GPL");
1340 MODULE_DESCRIPTION("TXx9 DMA Controller driver"); 1340 MODULE_DESCRIPTION("TXx9 DMA Controller driver");
1341 MODULE_AUTHOR("Atsushi Nemoto <anemo@mba.ocn.ne.jp>"); 1341 MODULE_AUTHOR("Atsushi Nemoto <anemo@mba.ocn.ne.jp>");
1342 MODULE_ALIAS("platform:txx9dmac"); 1342 MODULE_ALIAS("platform:txx9dmac");
1343 MODULE_ALIAS("platform:txx9dmac-chan"); 1343 MODULE_ALIAS("platform:txx9dmac-chan");
1344 1344
drivers/tty/serial/sh-sci.c
1 /* 1 /*
2 * SuperH on-chip serial module support. (SCI with no FIFO / with FIFO) 2 * SuperH on-chip serial module support. (SCI with no FIFO / with FIFO)
3 * 3 *
4 * Copyright (C) 2002 - 2011 Paul Mundt 4 * Copyright (C) 2002 - 2011 Paul Mundt
5 * Modified to support SH7720 SCIF. Markus Brunner, Mark Jonas (Jul 2007). 5 * Modified to support SH7720 SCIF. Markus Brunner, Mark Jonas (Jul 2007).
6 * 6 *
7 * based off of the old drivers/char/sh-sci.c by: 7 * based off of the old drivers/char/sh-sci.c by:
8 * 8 *
9 * Copyright (C) 1999, 2000 Niibe Yutaka 9 * Copyright (C) 1999, 2000 Niibe Yutaka
10 * Copyright (C) 2000 Sugioka Toshinobu 10 * Copyright (C) 2000 Sugioka Toshinobu
11 * Modified to support multiple serial ports. Stuart Menefy (May 2000). 11 * Modified to support multiple serial ports. Stuart Menefy (May 2000).
12 * Modified to support SecureEdge. David McCullough (2002) 12 * Modified to support SecureEdge. David McCullough (2002)
13 * Modified to support SH7300 SCIF. Takashi Kusuda (Jun 2003). 13 * Modified to support SH7300 SCIF. Takashi Kusuda (Jun 2003).
14 * Removed SH7300 support (Jul 2007). 14 * Removed SH7300 support (Jul 2007).
15 * 15 *
16 * This file is subject to the terms and conditions of the GNU General Public 16 * This file is subject to the terms and conditions of the GNU General Public
17 * License. See the file "COPYING" in the main directory of this archive 17 * License. See the file "COPYING" in the main directory of this archive
18 * for more details. 18 * for more details.
19 */ 19 */
20 #if defined(CONFIG_SERIAL_SH_SCI_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) 20 #if defined(CONFIG_SERIAL_SH_SCI_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
21 #define SUPPORT_SYSRQ 21 #define SUPPORT_SYSRQ
22 #endif 22 #endif
23 23
24 #undef DEBUG 24 #undef DEBUG
25 25
26 #include <linux/module.h> 26 #include <linux/module.h>
27 #include <linux/errno.h> 27 #include <linux/errno.h>
28 #include <linux/sh_dma.h> 28 #include <linux/sh_dma.h>
29 #include <linux/timer.h> 29 #include <linux/timer.h>
30 #include <linux/interrupt.h> 30 #include <linux/interrupt.h>
31 #include <linux/tty.h> 31 #include <linux/tty.h>
32 #include <linux/tty_flip.h> 32 #include <linux/tty_flip.h>
33 #include <linux/serial.h> 33 #include <linux/serial.h>
34 #include <linux/major.h> 34 #include <linux/major.h>
35 #include <linux/string.h> 35 #include <linux/string.h>
36 #include <linux/sysrq.h> 36 #include <linux/sysrq.h>
37 #include <linux/ioport.h> 37 #include <linux/ioport.h>
38 #include <linux/mm.h> 38 #include <linux/mm.h>
39 #include <linux/init.h> 39 #include <linux/init.h>
40 #include <linux/delay.h> 40 #include <linux/delay.h>
41 #include <linux/console.h> 41 #include <linux/console.h>
42 #include <linux/platform_device.h> 42 #include <linux/platform_device.h>
43 #include <linux/serial_sci.h> 43 #include <linux/serial_sci.h>
44 #include <linux/notifier.h> 44 #include <linux/notifier.h>
45 #include <linux/pm_runtime.h> 45 #include <linux/pm_runtime.h>
46 #include <linux/cpufreq.h> 46 #include <linux/cpufreq.h>
47 #include <linux/clk.h> 47 #include <linux/clk.h>
48 #include <linux/ctype.h> 48 #include <linux/ctype.h>
49 #include <linux/err.h> 49 #include <linux/err.h>
50 #include <linux/dmaengine.h> 50 #include <linux/dmaengine.h>
51 #include <linux/dma-mapping.h> 51 #include <linux/dma-mapping.h>
52 #include <linux/scatterlist.h> 52 #include <linux/scatterlist.h>
53 #include <linux/slab.h> 53 #include <linux/slab.h>
54 #include <linux/gpio.h> 54 #include <linux/gpio.h>
55 55
56 #ifdef CONFIG_SUPERH 56 #ifdef CONFIG_SUPERH
57 #include <asm/sh_bios.h> 57 #include <asm/sh_bios.h>
58 #endif 58 #endif
59 59
60 #include "sh-sci.h" 60 #include "sh-sci.h"
61 61
62 struct sci_port { 62 struct sci_port {
63 struct uart_port port; 63 struct uart_port port;
64 64
65 /* Platform configuration */ 65 /* Platform configuration */
66 struct plat_sci_port *cfg; 66 struct plat_sci_port *cfg;
67 67
68 /* Break timer */ 68 /* Break timer */
69 struct timer_list break_timer; 69 struct timer_list break_timer;
70 int break_flag; 70 int break_flag;
71 71
72 /* Interface clock */ 72 /* Interface clock */
73 struct clk *iclk; 73 struct clk *iclk;
74 /* Function clock */ 74 /* Function clock */
75 struct clk *fclk; 75 struct clk *fclk;
76 76
77 char *irqstr[SCIx_NR_IRQS]; 77 char *irqstr[SCIx_NR_IRQS];
78 char *gpiostr[SCIx_NR_FNS]; 78 char *gpiostr[SCIx_NR_FNS];
79 79
80 struct dma_chan *chan_tx; 80 struct dma_chan *chan_tx;
81 struct dma_chan *chan_rx; 81 struct dma_chan *chan_rx;
82 82
83 #ifdef CONFIG_SERIAL_SH_SCI_DMA 83 #ifdef CONFIG_SERIAL_SH_SCI_DMA
84 struct dma_async_tx_descriptor *desc_tx; 84 struct dma_async_tx_descriptor *desc_tx;
85 struct dma_async_tx_descriptor *desc_rx[2]; 85 struct dma_async_tx_descriptor *desc_rx[2];
86 dma_cookie_t cookie_tx; 86 dma_cookie_t cookie_tx;
87 dma_cookie_t cookie_rx[2]; 87 dma_cookie_t cookie_rx[2];
88 dma_cookie_t active_rx; 88 dma_cookie_t active_rx;
89 struct scatterlist sg_tx; 89 struct scatterlist sg_tx;
90 unsigned int sg_len_tx; 90 unsigned int sg_len_tx;
91 struct scatterlist sg_rx[2]; 91 struct scatterlist sg_rx[2];
92 size_t buf_len_rx; 92 size_t buf_len_rx;
93 struct sh_dmae_slave param_tx; 93 struct sh_dmae_slave param_tx;
94 struct sh_dmae_slave param_rx; 94 struct sh_dmae_slave param_rx;
95 struct work_struct work_tx; 95 struct work_struct work_tx;
96 struct work_struct work_rx; 96 struct work_struct work_rx;
97 struct timer_list rx_timer; 97 struct timer_list rx_timer;
98 unsigned int rx_timeout; 98 unsigned int rx_timeout;
99 #endif 99 #endif
100 100
101 struct notifier_block freq_transition; 101 struct notifier_block freq_transition;
102 }; 102 };
103 103
104 /* Function prototypes */ 104 /* Function prototypes */
105 static void sci_start_tx(struct uart_port *port); 105 static void sci_start_tx(struct uart_port *port);
106 static void sci_stop_tx(struct uart_port *port); 106 static void sci_stop_tx(struct uart_port *port);
107 static void sci_start_rx(struct uart_port *port); 107 static void sci_start_rx(struct uart_port *port);
108 108
109 #define SCI_NPORTS CONFIG_SERIAL_SH_SCI_NR_UARTS 109 #define SCI_NPORTS CONFIG_SERIAL_SH_SCI_NR_UARTS
110 110
111 static struct sci_port sci_ports[SCI_NPORTS]; 111 static struct sci_port sci_ports[SCI_NPORTS];
112 static struct uart_driver sci_uart_driver; 112 static struct uart_driver sci_uart_driver;
113 113
114 static inline struct sci_port * 114 static inline struct sci_port *
115 to_sci_port(struct uart_port *uart) 115 to_sci_port(struct uart_port *uart)
116 { 116 {
117 return container_of(uart, struct sci_port, port); 117 return container_of(uart, struct sci_port, port);
118 } 118 }
119 119
120 struct plat_sci_reg { 120 struct plat_sci_reg {
121 u8 offset, size; 121 u8 offset, size;
122 }; 122 };
123 123
124 /* Helper for invalidating specific entries of an inherited map. */ 124 /* Helper for invalidating specific entries of an inherited map. */
125 #define sci_reg_invalid { .offset = 0, .size = 0 } 125 #define sci_reg_invalid { .offset = 0, .size = 0 }
126 126
127 static struct plat_sci_reg sci_regmap[SCIx_NR_REGTYPES][SCIx_NR_REGS] = { 127 static struct plat_sci_reg sci_regmap[SCIx_NR_REGTYPES][SCIx_NR_REGS] = {
128 [SCIx_PROBE_REGTYPE] = { 128 [SCIx_PROBE_REGTYPE] = {
129 [0 ... SCIx_NR_REGS - 1] = sci_reg_invalid, 129 [0 ... SCIx_NR_REGS - 1] = sci_reg_invalid,
130 }, 130 },
131 131
132 /* 132 /*
133 * Common SCI definitions, dependent on the port's regshift 133 * Common SCI definitions, dependent on the port's regshift
134 * value. 134 * value.
135 */ 135 */
136 [SCIx_SCI_REGTYPE] = { 136 [SCIx_SCI_REGTYPE] = {
137 [SCSMR] = { 0x00, 8 }, 137 [SCSMR] = { 0x00, 8 },
138 [SCBRR] = { 0x01, 8 }, 138 [SCBRR] = { 0x01, 8 },
139 [SCSCR] = { 0x02, 8 }, 139 [SCSCR] = { 0x02, 8 },
140 [SCxTDR] = { 0x03, 8 }, 140 [SCxTDR] = { 0x03, 8 },
141 [SCxSR] = { 0x04, 8 }, 141 [SCxSR] = { 0x04, 8 },
142 [SCxRDR] = { 0x05, 8 }, 142 [SCxRDR] = { 0x05, 8 },
143 [SCFCR] = sci_reg_invalid, 143 [SCFCR] = sci_reg_invalid,
144 [SCFDR] = sci_reg_invalid, 144 [SCFDR] = sci_reg_invalid,
145 [SCTFDR] = sci_reg_invalid, 145 [SCTFDR] = sci_reg_invalid,
146 [SCRFDR] = sci_reg_invalid, 146 [SCRFDR] = sci_reg_invalid,
147 [SCSPTR] = sci_reg_invalid, 147 [SCSPTR] = sci_reg_invalid,
148 [SCLSR] = sci_reg_invalid, 148 [SCLSR] = sci_reg_invalid,
149 [HSSRR] = sci_reg_invalid, 149 [HSSRR] = sci_reg_invalid,
150 }, 150 },
151 151
152 /* 152 /*
153 * Common definitions for legacy IrDA ports, dependent on 153 * Common definitions for legacy IrDA ports, dependent on
154 * regshift value. 154 * regshift value.
155 */ 155 */
156 [SCIx_IRDA_REGTYPE] = { 156 [SCIx_IRDA_REGTYPE] = {
157 [SCSMR] = { 0x00, 8 }, 157 [SCSMR] = { 0x00, 8 },
158 [SCBRR] = { 0x01, 8 }, 158 [SCBRR] = { 0x01, 8 },
159 [SCSCR] = { 0x02, 8 }, 159 [SCSCR] = { 0x02, 8 },
160 [SCxTDR] = { 0x03, 8 }, 160 [SCxTDR] = { 0x03, 8 },
161 [SCxSR] = { 0x04, 8 }, 161 [SCxSR] = { 0x04, 8 },
162 [SCxRDR] = { 0x05, 8 }, 162 [SCxRDR] = { 0x05, 8 },
163 [SCFCR] = { 0x06, 8 }, 163 [SCFCR] = { 0x06, 8 },
164 [SCFDR] = { 0x07, 16 }, 164 [SCFDR] = { 0x07, 16 },
165 [SCTFDR] = sci_reg_invalid, 165 [SCTFDR] = sci_reg_invalid,
166 [SCRFDR] = sci_reg_invalid, 166 [SCRFDR] = sci_reg_invalid,
167 [SCSPTR] = sci_reg_invalid, 167 [SCSPTR] = sci_reg_invalid,
168 [SCLSR] = sci_reg_invalid, 168 [SCLSR] = sci_reg_invalid,
169 [HSSRR] = sci_reg_invalid, 169 [HSSRR] = sci_reg_invalid,
170 }, 170 },
171 171
172 /* 172 /*
173 * Common SCIFA definitions. 173 * Common SCIFA definitions.
174 */ 174 */
175 [SCIx_SCIFA_REGTYPE] = { 175 [SCIx_SCIFA_REGTYPE] = {
176 [SCSMR] = { 0x00, 16 }, 176 [SCSMR] = { 0x00, 16 },
177 [SCBRR] = { 0x04, 8 }, 177 [SCBRR] = { 0x04, 8 },
178 [SCSCR] = { 0x08, 16 }, 178 [SCSCR] = { 0x08, 16 },
179 [SCxTDR] = { 0x20, 8 }, 179 [SCxTDR] = { 0x20, 8 },
180 [SCxSR] = { 0x14, 16 }, 180 [SCxSR] = { 0x14, 16 },
181 [SCxRDR] = { 0x24, 8 }, 181 [SCxRDR] = { 0x24, 8 },
182 [SCFCR] = { 0x18, 16 }, 182 [SCFCR] = { 0x18, 16 },
183 [SCFDR] = { 0x1c, 16 }, 183 [SCFDR] = { 0x1c, 16 },
184 [SCTFDR] = sci_reg_invalid, 184 [SCTFDR] = sci_reg_invalid,
185 [SCRFDR] = sci_reg_invalid, 185 [SCRFDR] = sci_reg_invalid,
186 [SCSPTR] = sci_reg_invalid, 186 [SCSPTR] = sci_reg_invalid,
187 [SCLSR] = sci_reg_invalid, 187 [SCLSR] = sci_reg_invalid,
188 [HSSRR] = sci_reg_invalid, 188 [HSSRR] = sci_reg_invalid,
189 }, 189 },
190 190
191 /* 191 /*
192 * Common SCIFB definitions. 192 * Common SCIFB definitions.
193 */ 193 */
194 [SCIx_SCIFB_REGTYPE] = { 194 [SCIx_SCIFB_REGTYPE] = {
195 [SCSMR] = { 0x00, 16 }, 195 [SCSMR] = { 0x00, 16 },
196 [SCBRR] = { 0x04, 8 }, 196 [SCBRR] = { 0x04, 8 },
197 [SCSCR] = { 0x08, 16 }, 197 [SCSCR] = { 0x08, 16 },
198 [SCxTDR] = { 0x40, 8 }, 198 [SCxTDR] = { 0x40, 8 },
199 [SCxSR] = { 0x14, 16 }, 199 [SCxSR] = { 0x14, 16 },
200 [SCxRDR] = { 0x60, 8 }, 200 [SCxRDR] = { 0x60, 8 },
201 [SCFCR] = { 0x18, 16 }, 201 [SCFCR] = { 0x18, 16 },
202 [SCFDR] = sci_reg_invalid, 202 [SCFDR] = sci_reg_invalid,
203 [SCTFDR] = { 0x38, 16 }, 203 [SCTFDR] = { 0x38, 16 },
204 [SCRFDR] = { 0x3c, 16 }, 204 [SCRFDR] = { 0x3c, 16 },
205 [SCSPTR] = sci_reg_invalid, 205 [SCSPTR] = sci_reg_invalid,
206 [SCLSR] = sci_reg_invalid, 206 [SCLSR] = sci_reg_invalid,
207 [HSSRR] = sci_reg_invalid, 207 [HSSRR] = sci_reg_invalid,
208 }, 208 },
209 209
210 /* 210 /*
211 * Common SH-2(A) SCIF definitions for ports with FIFO data 211 * Common SH-2(A) SCIF definitions for ports with FIFO data
212 * count registers. 212 * count registers.
213 */ 213 */
214 [SCIx_SH2_SCIF_FIFODATA_REGTYPE] = { 214 [SCIx_SH2_SCIF_FIFODATA_REGTYPE] = {
215 [SCSMR] = { 0x00, 16 }, 215 [SCSMR] = { 0x00, 16 },
216 [SCBRR] = { 0x04, 8 }, 216 [SCBRR] = { 0x04, 8 },
217 [SCSCR] = { 0x08, 16 }, 217 [SCSCR] = { 0x08, 16 },
218 [SCxTDR] = { 0x0c, 8 }, 218 [SCxTDR] = { 0x0c, 8 },
219 [SCxSR] = { 0x10, 16 }, 219 [SCxSR] = { 0x10, 16 },
220 [SCxRDR] = { 0x14, 8 }, 220 [SCxRDR] = { 0x14, 8 },
221 [SCFCR] = { 0x18, 16 }, 221 [SCFCR] = { 0x18, 16 },
222 [SCFDR] = { 0x1c, 16 }, 222 [SCFDR] = { 0x1c, 16 },
223 [SCTFDR] = sci_reg_invalid, 223 [SCTFDR] = sci_reg_invalid,
224 [SCRFDR] = sci_reg_invalid, 224 [SCRFDR] = sci_reg_invalid,
225 [SCSPTR] = { 0x20, 16 }, 225 [SCSPTR] = { 0x20, 16 },
226 [SCLSR] = { 0x24, 16 }, 226 [SCLSR] = { 0x24, 16 },
227 [HSSRR] = sci_reg_invalid, 227 [HSSRR] = sci_reg_invalid,
228 }, 228 },
229 229
230 /* 230 /*
231 * Common SH-3 SCIF definitions. 231 * Common SH-3 SCIF definitions.
232 */ 232 */
233 [SCIx_SH3_SCIF_REGTYPE] = { 233 [SCIx_SH3_SCIF_REGTYPE] = {
234 [SCSMR] = { 0x00, 8 }, 234 [SCSMR] = { 0x00, 8 },
235 [SCBRR] = { 0x02, 8 }, 235 [SCBRR] = { 0x02, 8 },
236 [SCSCR] = { 0x04, 8 }, 236 [SCSCR] = { 0x04, 8 },
237 [SCxTDR] = { 0x06, 8 }, 237 [SCxTDR] = { 0x06, 8 },
238 [SCxSR] = { 0x08, 16 }, 238 [SCxSR] = { 0x08, 16 },
239 [SCxRDR] = { 0x0a, 8 }, 239 [SCxRDR] = { 0x0a, 8 },
240 [SCFCR] = { 0x0c, 8 }, 240 [SCFCR] = { 0x0c, 8 },
241 [SCFDR] = { 0x0e, 16 }, 241 [SCFDR] = { 0x0e, 16 },
242 [SCTFDR] = sci_reg_invalid, 242 [SCTFDR] = sci_reg_invalid,
243 [SCRFDR] = sci_reg_invalid, 243 [SCRFDR] = sci_reg_invalid,
244 [SCSPTR] = sci_reg_invalid, 244 [SCSPTR] = sci_reg_invalid,
245 [SCLSR] = sci_reg_invalid, 245 [SCLSR] = sci_reg_invalid,
246 [HSSRR] = sci_reg_invalid, 246 [HSSRR] = sci_reg_invalid,
247 }, 247 },
248 248
249 /* 249 /*
250 * Common SH-4(A) SCIF(B) definitions. 250 * Common SH-4(A) SCIF(B) definitions.
251 */ 251 */
252 [SCIx_SH4_SCIF_REGTYPE] = { 252 [SCIx_SH4_SCIF_REGTYPE] = {
253 [SCSMR] = { 0x00, 16 }, 253 [SCSMR] = { 0x00, 16 },
254 [SCBRR] = { 0x04, 8 }, 254 [SCBRR] = { 0x04, 8 },
255 [SCSCR] = { 0x08, 16 }, 255 [SCSCR] = { 0x08, 16 },
256 [SCxTDR] = { 0x0c, 8 }, 256 [SCxTDR] = { 0x0c, 8 },
257 [SCxSR] = { 0x10, 16 }, 257 [SCxSR] = { 0x10, 16 },
258 [SCxRDR] = { 0x14, 8 }, 258 [SCxRDR] = { 0x14, 8 },
259 [SCFCR] = { 0x18, 16 }, 259 [SCFCR] = { 0x18, 16 },
260 [SCFDR] = { 0x1c, 16 }, 260 [SCFDR] = { 0x1c, 16 },
261 [SCTFDR] = sci_reg_invalid, 261 [SCTFDR] = sci_reg_invalid,
262 [SCRFDR] = sci_reg_invalid, 262 [SCRFDR] = sci_reg_invalid,
263 [SCSPTR] = { 0x20, 16 }, 263 [SCSPTR] = { 0x20, 16 },
264 [SCLSR] = { 0x24, 16 }, 264 [SCLSR] = { 0x24, 16 },
265 [HSSRR] = sci_reg_invalid, 265 [HSSRR] = sci_reg_invalid,
266 }, 266 },
267 267
268 /* 268 /*
269 * Common HSCIF definitions. 269 * Common HSCIF definitions.
270 */ 270 */
271 [SCIx_HSCIF_REGTYPE] = { 271 [SCIx_HSCIF_REGTYPE] = {
272 [SCSMR] = { 0x00, 16 }, 272 [SCSMR] = { 0x00, 16 },
273 [SCBRR] = { 0x04, 8 }, 273 [SCBRR] = { 0x04, 8 },
274 [SCSCR] = { 0x08, 16 }, 274 [SCSCR] = { 0x08, 16 },
275 [SCxTDR] = { 0x0c, 8 }, 275 [SCxTDR] = { 0x0c, 8 },
276 [SCxSR] = { 0x10, 16 }, 276 [SCxSR] = { 0x10, 16 },
277 [SCxRDR] = { 0x14, 8 }, 277 [SCxRDR] = { 0x14, 8 },
278 [SCFCR] = { 0x18, 16 }, 278 [SCFCR] = { 0x18, 16 },
279 [SCFDR] = { 0x1c, 16 }, 279 [SCFDR] = { 0x1c, 16 },
280 [SCTFDR] = sci_reg_invalid, 280 [SCTFDR] = sci_reg_invalid,
281 [SCRFDR] = sci_reg_invalid, 281 [SCRFDR] = sci_reg_invalid,
282 [SCSPTR] = { 0x20, 16 }, 282 [SCSPTR] = { 0x20, 16 },
283 [SCLSR] = { 0x24, 16 }, 283 [SCLSR] = { 0x24, 16 },
284 [HSSRR] = { 0x40, 16 }, 284 [HSSRR] = { 0x40, 16 },
285 }, 285 },
286 286
287 /* 287 /*
288 * Common SH-4(A) SCIF(B) definitions for ports without an SCSPTR 288 * Common SH-4(A) SCIF(B) definitions for ports without an SCSPTR
289 * register. 289 * register.
290 */ 290 */
291 [SCIx_SH4_SCIF_NO_SCSPTR_REGTYPE] = { 291 [SCIx_SH4_SCIF_NO_SCSPTR_REGTYPE] = {
292 [SCSMR] = { 0x00, 16 }, 292 [SCSMR] = { 0x00, 16 },
293 [SCBRR] = { 0x04, 8 }, 293 [SCBRR] = { 0x04, 8 },
294 [SCSCR] = { 0x08, 16 }, 294 [SCSCR] = { 0x08, 16 },
295 [SCxTDR] = { 0x0c, 8 }, 295 [SCxTDR] = { 0x0c, 8 },
296 [SCxSR] = { 0x10, 16 }, 296 [SCxSR] = { 0x10, 16 },
297 [SCxRDR] = { 0x14, 8 }, 297 [SCxRDR] = { 0x14, 8 },
298 [SCFCR] = { 0x18, 16 }, 298 [SCFCR] = { 0x18, 16 },
299 [SCFDR] = { 0x1c, 16 }, 299 [SCFDR] = { 0x1c, 16 },
300 [SCTFDR] = sci_reg_invalid, 300 [SCTFDR] = sci_reg_invalid,
301 [SCRFDR] = sci_reg_invalid, 301 [SCRFDR] = sci_reg_invalid,
302 [SCSPTR] = sci_reg_invalid, 302 [SCSPTR] = sci_reg_invalid,
303 [SCLSR] = { 0x24, 16 }, 303 [SCLSR] = { 0x24, 16 },
304 [HSSRR] = sci_reg_invalid, 304 [HSSRR] = sci_reg_invalid,
305 }, 305 },
306 306
307 /* 307 /*
308 * Common SH-4(A) SCIF(B) definitions for ports with FIFO data 308 * Common SH-4(A) SCIF(B) definitions for ports with FIFO data
309 * count registers. 309 * count registers.
310 */ 310 */
311 [SCIx_SH4_SCIF_FIFODATA_REGTYPE] = { 311 [SCIx_SH4_SCIF_FIFODATA_REGTYPE] = {
312 [SCSMR] = { 0x00, 16 }, 312 [SCSMR] = { 0x00, 16 },
313 [SCBRR] = { 0x04, 8 }, 313 [SCBRR] = { 0x04, 8 },
314 [SCSCR] = { 0x08, 16 }, 314 [SCSCR] = { 0x08, 16 },
315 [SCxTDR] = { 0x0c, 8 }, 315 [SCxTDR] = { 0x0c, 8 },
316 [SCxSR] = { 0x10, 16 }, 316 [SCxSR] = { 0x10, 16 },
317 [SCxRDR] = { 0x14, 8 }, 317 [SCxRDR] = { 0x14, 8 },
318 [SCFCR] = { 0x18, 16 }, 318 [SCFCR] = { 0x18, 16 },
319 [SCFDR] = { 0x1c, 16 }, 319 [SCFDR] = { 0x1c, 16 },
320 [SCTFDR] = { 0x1c, 16 }, /* aliased to SCFDR */ 320 [SCTFDR] = { 0x1c, 16 }, /* aliased to SCFDR */
321 [SCRFDR] = { 0x20, 16 }, 321 [SCRFDR] = { 0x20, 16 },
322 [SCSPTR] = { 0x24, 16 }, 322 [SCSPTR] = { 0x24, 16 },
323 [SCLSR] = { 0x28, 16 }, 323 [SCLSR] = { 0x28, 16 },
324 [HSSRR] = sci_reg_invalid, 324 [HSSRR] = sci_reg_invalid,
325 }, 325 },
326 326
327 /* 327 /*
328 * SH7705-style SCIF(B) ports, lacking both SCSPTR and SCLSR 328 * SH7705-style SCIF(B) ports, lacking both SCSPTR and SCLSR
329 * registers. 329 * registers.
330 */ 330 */
331 [SCIx_SH7705_SCIF_REGTYPE] = { 331 [SCIx_SH7705_SCIF_REGTYPE] = {
332 [SCSMR] = { 0x00, 16 }, 332 [SCSMR] = { 0x00, 16 },
333 [SCBRR] = { 0x04, 8 }, 333 [SCBRR] = { 0x04, 8 },
334 [SCSCR] = { 0x08, 16 }, 334 [SCSCR] = { 0x08, 16 },
335 [SCxTDR] = { 0x20, 8 }, 335 [SCxTDR] = { 0x20, 8 },
336 [SCxSR] = { 0x14, 16 }, 336 [SCxSR] = { 0x14, 16 },
337 [SCxRDR] = { 0x24, 8 }, 337 [SCxRDR] = { 0x24, 8 },
338 [SCFCR] = { 0x18, 16 }, 338 [SCFCR] = { 0x18, 16 },
339 [SCFDR] = { 0x1c, 16 }, 339 [SCFDR] = { 0x1c, 16 },
340 [SCTFDR] = sci_reg_invalid, 340 [SCTFDR] = sci_reg_invalid,
341 [SCRFDR] = sci_reg_invalid, 341 [SCRFDR] = sci_reg_invalid,
342 [SCSPTR] = sci_reg_invalid, 342 [SCSPTR] = sci_reg_invalid,
343 [SCLSR] = sci_reg_invalid, 343 [SCLSR] = sci_reg_invalid,
344 [HSSRR] = sci_reg_invalid, 344 [HSSRR] = sci_reg_invalid,
345 }, 345 },
346 }; 346 };
347 347
348 #define sci_getreg(up, offset) (sci_regmap[to_sci_port(up)->cfg->regtype] + offset) 348 #define sci_getreg(up, offset) (sci_regmap[to_sci_port(up)->cfg->regtype] + offset)
349 349
350 /* 350 /*
351 * The "offset" here is rather misleading, in that it refers to an enum 351 * The "offset" here is rather misleading, in that it refers to an enum
352 * value relative to the port mapping rather than the fixed offset 352 * value relative to the port mapping rather than the fixed offset
353 * itself, which needs to be manually retrieved from the platform's 353 * itself, which needs to be manually retrieved from the platform's
354 * register map for the given port. 354 * register map for the given port.
355 */ 355 */
356 static unsigned int sci_serial_in(struct uart_port *p, int offset) 356 static unsigned int sci_serial_in(struct uart_port *p, int offset)
357 { 357 {
358 struct plat_sci_reg *reg = sci_getreg(p, offset); 358 struct plat_sci_reg *reg = sci_getreg(p, offset);
359 359
360 if (reg->size == 8) 360 if (reg->size == 8)
361 return ioread8(p->membase + (reg->offset << p->regshift)); 361 return ioread8(p->membase + (reg->offset << p->regshift));
362 else if (reg->size == 16) 362 else if (reg->size == 16)
363 return ioread16(p->membase + (reg->offset << p->regshift)); 363 return ioread16(p->membase + (reg->offset << p->regshift));
364 else 364 else
365 WARN(1, "Invalid register access\n"); 365 WARN(1, "Invalid register access\n");
366 366
367 return 0; 367 return 0;
368 } 368 }
369 369
370 static void sci_serial_out(struct uart_port *p, int offset, int value) 370 static void sci_serial_out(struct uart_port *p, int offset, int value)
371 { 371 {
372 struct plat_sci_reg *reg = sci_getreg(p, offset); 372 struct plat_sci_reg *reg = sci_getreg(p, offset);
373 373
374 if (reg->size == 8) 374 if (reg->size == 8)
375 iowrite8(value, p->membase + (reg->offset << p->regshift)); 375 iowrite8(value, p->membase + (reg->offset << p->regshift));
376 else if (reg->size == 16) 376 else if (reg->size == 16)
377 iowrite16(value, p->membase + (reg->offset << p->regshift)); 377 iowrite16(value, p->membase + (reg->offset << p->regshift));
378 else 378 else
379 WARN(1, "Invalid register access\n"); 379 WARN(1, "Invalid register access\n");
380 } 380 }
381 381
382 static int sci_probe_regmap(struct plat_sci_port *cfg) 382 static int sci_probe_regmap(struct plat_sci_port *cfg)
383 { 383 {
384 switch (cfg->type) { 384 switch (cfg->type) {
385 case PORT_SCI: 385 case PORT_SCI:
386 cfg->regtype = SCIx_SCI_REGTYPE; 386 cfg->regtype = SCIx_SCI_REGTYPE;
387 break; 387 break;
388 case PORT_IRDA: 388 case PORT_IRDA:
389 cfg->regtype = SCIx_IRDA_REGTYPE; 389 cfg->regtype = SCIx_IRDA_REGTYPE;
390 break; 390 break;
391 case PORT_SCIFA: 391 case PORT_SCIFA:
392 cfg->regtype = SCIx_SCIFA_REGTYPE; 392 cfg->regtype = SCIx_SCIFA_REGTYPE;
393 break; 393 break;
394 case PORT_SCIFB: 394 case PORT_SCIFB:
395 cfg->regtype = SCIx_SCIFB_REGTYPE; 395 cfg->regtype = SCIx_SCIFB_REGTYPE;
396 break; 396 break;
397 case PORT_SCIF: 397 case PORT_SCIF:
398 /* 398 /*
399 * The SH-4 is a bit of a misnomer here, although that's 399 * The SH-4 is a bit of a misnomer here, although that's
400 * where this particular port layout originated. This 400 * where this particular port layout originated. This
401 * configuration (or some slight variation thereof) 401 * configuration (or some slight variation thereof)
402 * remains the dominant model for all SCIFs. 402 * remains the dominant model for all SCIFs.
403 */ 403 */
404 cfg->regtype = SCIx_SH4_SCIF_REGTYPE; 404 cfg->regtype = SCIx_SH4_SCIF_REGTYPE;
405 break; 405 break;
406 case PORT_HSCIF: 406 case PORT_HSCIF:
407 cfg->regtype = SCIx_HSCIF_REGTYPE; 407 cfg->regtype = SCIx_HSCIF_REGTYPE;
408 break; 408 break;
409 default: 409 default:
410 printk(KERN_ERR "Can't probe register map for given port\n"); 410 printk(KERN_ERR "Can't probe register map for given port\n");
411 return -EINVAL; 411 return -EINVAL;
412 } 412 }
413 413
414 return 0; 414 return 0;
415 } 415 }
416 416
417 static void sci_port_enable(struct sci_port *sci_port) 417 static void sci_port_enable(struct sci_port *sci_port)
418 { 418 {
419 if (!sci_port->port.dev) 419 if (!sci_port->port.dev)
420 return; 420 return;
421 421
422 pm_runtime_get_sync(sci_port->port.dev); 422 pm_runtime_get_sync(sci_port->port.dev);
423 423
424 clk_enable(sci_port->iclk); 424 clk_enable(sci_port->iclk);
425 sci_port->port.uartclk = clk_get_rate(sci_port->iclk); 425 sci_port->port.uartclk = clk_get_rate(sci_port->iclk);
426 clk_enable(sci_port->fclk); 426 clk_enable(sci_port->fclk);
427 } 427 }
428 428
429 static void sci_port_disable(struct sci_port *sci_port) 429 static void sci_port_disable(struct sci_port *sci_port)
430 { 430 {
431 if (!sci_port->port.dev) 431 if (!sci_port->port.dev)
432 return; 432 return;
433 433
434 clk_disable(sci_port->fclk); 434 clk_disable(sci_port->fclk);
435 clk_disable(sci_port->iclk); 435 clk_disable(sci_port->iclk);
436 436
437 pm_runtime_put_sync(sci_port->port.dev); 437 pm_runtime_put_sync(sci_port->port.dev);
438 } 438 }
439 439
440 #if defined(CONFIG_CONSOLE_POLL) || defined(CONFIG_SERIAL_SH_SCI_CONSOLE) 440 #if defined(CONFIG_CONSOLE_POLL) || defined(CONFIG_SERIAL_SH_SCI_CONSOLE)
441 441
442 #ifdef CONFIG_CONSOLE_POLL 442 #ifdef CONFIG_CONSOLE_POLL
443 static int sci_poll_get_char(struct uart_port *port) 443 static int sci_poll_get_char(struct uart_port *port)
444 { 444 {
445 unsigned short status; 445 unsigned short status;
446 int c; 446 int c;
447 447
448 do { 448 do {
449 status = serial_port_in(port, SCxSR); 449 status = serial_port_in(port, SCxSR);
450 if (status & SCxSR_ERRORS(port)) { 450 if (status & SCxSR_ERRORS(port)) {
451 serial_port_out(port, SCxSR, SCxSR_ERROR_CLEAR(port)); 451 serial_port_out(port, SCxSR, SCxSR_ERROR_CLEAR(port));
452 continue; 452 continue;
453 } 453 }
454 break; 454 break;
455 } while (1); 455 } while (1);
456 456
457 if (!(status & SCxSR_RDxF(port))) 457 if (!(status & SCxSR_RDxF(port)))
458 return NO_POLL_CHAR; 458 return NO_POLL_CHAR;
459 459
460 c = serial_port_in(port, SCxRDR); 460 c = serial_port_in(port, SCxRDR);
461 461
462 /* Dummy read */ 462 /* Dummy read */
463 serial_port_in(port, SCxSR); 463 serial_port_in(port, SCxSR);
464 serial_port_out(port, SCxSR, SCxSR_RDxF_CLEAR(port)); 464 serial_port_out(port, SCxSR, SCxSR_RDxF_CLEAR(port));
465 465
466 return c; 466 return c;
467 } 467 }
468 #endif 468 #endif
469 469
470 static void sci_poll_put_char(struct uart_port *port, unsigned char c) 470 static void sci_poll_put_char(struct uart_port *port, unsigned char c)
471 { 471 {
472 unsigned short status; 472 unsigned short status;
473 473
474 do { 474 do {
475 status = serial_port_in(port, SCxSR); 475 status = serial_port_in(port, SCxSR);
476 } while (!(status & SCxSR_TDxE(port))); 476 } while (!(status & SCxSR_TDxE(port)));
477 477
478 serial_port_out(port, SCxTDR, c); 478 serial_port_out(port, SCxTDR, c);
479 serial_port_out(port, SCxSR, SCxSR_TDxE_CLEAR(port) & ~SCxSR_TEND(port)); 479 serial_port_out(port, SCxSR, SCxSR_TDxE_CLEAR(port) & ~SCxSR_TEND(port));
480 } 480 }
481 #endif /* CONFIG_CONSOLE_POLL || CONFIG_SERIAL_SH_SCI_CONSOLE */ 481 #endif /* CONFIG_CONSOLE_POLL || CONFIG_SERIAL_SH_SCI_CONSOLE */
482 482
483 static void sci_init_pins(struct uart_port *port, unsigned int cflag) 483 static void sci_init_pins(struct uart_port *port, unsigned int cflag)
484 { 484 {
485 struct sci_port *s = to_sci_port(port); 485 struct sci_port *s = to_sci_port(port);
486 struct plat_sci_reg *reg = sci_regmap[s->cfg->regtype] + SCSPTR; 486 struct plat_sci_reg *reg = sci_regmap[s->cfg->regtype] + SCSPTR;
487 487
488 /* 488 /*
489 * Use port-specific handler if provided. 489 * Use port-specific handler if provided.
490 */ 490 */
491 if (s->cfg->ops && s->cfg->ops->init_pins) { 491 if (s->cfg->ops && s->cfg->ops->init_pins) {
492 s->cfg->ops->init_pins(port, cflag); 492 s->cfg->ops->init_pins(port, cflag);
493 return; 493 return;
494 } 494 }
495 495
496 /* 496 /*
497 * For the generic path SCSPTR is necessary. Bail out if that's 497 * For the generic path SCSPTR is necessary. Bail out if that's
498 * unavailable, too. 498 * unavailable, too.
499 */ 499 */
500 if (!reg->size) 500 if (!reg->size)
501 return; 501 return;
502 502
503 if ((s->cfg->capabilities & SCIx_HAVE_RTSCTS) && 503 if ((s->cfg->capabilities & SCIx_HAVE_RTSCTS) &&
504 ((!(cflag & CRTSCTS)))) { 504 ((!(cflag & CRTSCTS)))) {
505 unsigned short status; 505 unsigned short status;
506 506
507 status = serial_port_in(port, SCSPTR); 507 status = serial_port_in(port, SCSPTR);
508 status &= ~SCSPTR_CTSIO; 508 status &= ~SCSPTR_CTSIO;
509 status |= SCSPTR_RTSIO; 509 status |= SCSPTR_RTSIO;
510 serial_port_out(port, SCSPTR, status); /* Set RTS = 1 */ 510 serial_port_out(port, SCSPTR, status); /* Set RTS = 1 */
511 } 511 }
512 } 512 }
513 513
514 static int sci_txfill(struct uart_port *port) 514 static int sci_txfill(struct uart_port *port)
515 { 515 {
516 struct plat_sci_reg *reg; 516 struct plat_sci_reg *reg;
517 517
518 reg = sci_getreg(port, SCTFDR); 518 reg = sci_getreg(port, SCTFDR);
519 if (reg->size) 519 if (reg->size)
520 return serial_port_in(port, SCTFDR) & ((port->fifosize << 1) - 1); 520 return serial_port_in(port, SCTFDR) & ((port->fifosize << 1) - 1);
521 521
522 reg = sci_getreg(port, SCFDR); 522 reg = sci_getreg(port, SCFDR);
523 if (reg->size) 523 if (reg->size)
524 return serial_port_in(port, SCFDR) >> 8; 524 return serial_port_in(port, SCFDR) >> 8;
525 525
526 return !(serial_port_in(port, SCxSR) & SCI_TDRE); 526 return !(serial_port_in(port, SCxSR) & SCI_TDRE);
527 } 527 }
528 528
529 static int sci_txroom(struct uart_port *port) 529 static int sci_txroom(struct uart_port *port)
530 { 530 {
531 return port->fifosize - sci_txfill(port); 531 return port->fifosize - sci_txfill(port);
532 } 532 }
533 533
534 static int sci_rxfill(struct uart_port *port) 534 static int sci_rxfill(struct uart_port *port)
535 { 535 {
536 struct plat_sci_reg *reg; 536 struct plat_sci_reg *reg;
537 537
538 reg = sci_getreg(port, SCRFDR); 538 reg = sci_getreg(port, SCRFDR);
539 if (reg->size) 539 if (reg->size)
540 return serial_port_in(port, SCRFDR) & ((port->fifosize << 1) - 1); 540 return serial_port_in(port, SCRFDR) & ((port->fifosize << 1) - 1);
541 541
542 reg = sci_getreg(port, SCFDR); 542 reg = sci_getreg(port, SCFDR);
543 if (reg->size) 543 if (reg->size)
544 return serial_port_in(port, SCFDR) & ((port->fifosize << 1) - 1); 544 return serial_port_in(port, SCFDR) & ((port->fifosize << 1) - 1);
545 545
546 return (serial_port_in(port, SCxSR) & SCxSR_RDxF(port)) != 0; 546 return (serial_port_in(port, SCxSR) & SCxSR_RDxF(port)) != 0;
547 } 547 }
548 548
549 /* 549 /*
550 * SCI helper for checking the state of the muxed port/RXD pins. 550 * SCI helper for checking the state of the muxed port/RXD pins.
551 */ 551 */
552 static inline int sci_rxd_in(struct uart_port *port) 552 static inline int sci_rxd_in(struct uart_port *port)
553 { 553 {
554 struct sci_port *s = to_sci_port(port); 554 struct sci_port *s = to_sci_port(port);
555 555
556 if (s->cfg->port_reg <= 0) 556 if (s->cfg->port_reg <= 0)
557 return 1; 557 return 1;
558 558
559 /* Cast for ARM damage */ 559 /* Cast for ARM damage */
560 return !!__raw_readb((void __iomem *)s->cfg->port_reg); 560 return !!__raw_readb((void __iomem *)s->cfg->port_reg);
561 } 561 }
562 562
563 /* ********************************************************************** * 563 /* ********************************************************************** *
564 * the interrupt related routines * 564 * the interrupt related routines *
565 * ********************************************************************** */ 565 * ********************************************************************** */
566 566
567 static void sci_transmit_chars(struct uart_port *port) 567 static void sci_transmit_chars(struct uart_port *port)
568 { 568 {
569 struct circ_buf *xmit = &port->state->xmit; 569 struct circ_buf *xmit = &port->state->xmit;
570 unsigned int stopped = uart_tx_stopped(port); 570 unsigned int stopped = uart_tx_stopped(port);
571 unsigned short status; 571 unsigned short status;
572 unsigned short ctrl; 572 unsigned short ctrl;
573 int count; 573 int count;
574 574
575 status = serial_port_in(port, SCxSR); 575 status = serial_port_in(port, SCxSR);
576 if (!(status & SCxSR_TDxE(port))) { 576 if (!(status & SCxSR_TDxE(port))) {
577 ctrl = serial_port_in(port, SCSCR); 577 ctrl = serial_port_in(port, SCSCR);
578 if (uart_circ_empty(xmit)) 578 if (uart_circ_empty(xmit))
579 ctrl &= ~SCSCR_TIE; 579 ctrl &= ~SCSCR_TIE;
580 else 580 else
581 ctrl |= SCSCR_TIE; 581 ctrl |= SCSCR_TIE;
582 serial_port_out(port, SCSCR, ctrl); 582 serial_port_out(port, SCSCR, ctrl);
583 return; 583 return;
584 } 584 }
585 585
586 count = sci_txroom(port); 586 count = sci_txroom(port);
587 587
588 do { 588 do {
589 unsigned char c; 589 unsigned char c;
590 590
591 if (port->x_char) { 591 if (port->x_char) {
592 c = port->x_char; 592 c = port->x_char;
593 port->x_char = 0; 593 port->x_char = 0;
594 } else if (!uart_circ_empty(xmit) && !stopped) { 594 } else if (!uart_circ_empty(xmit) && !stopped) {
595 c = xmit->buf[xmit->tail]; 595 c = xmit->buf[xmit->tail];
596 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); 596 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
597 } else { 597 } else {
598 break; 598 break;
599 } 599 }
600 600
601 serial_port_out(port, SCxTDR, c); 601 serial_port_out(port, SCxTDR, c);
602 602
603 port->icount.tx++; 603 port->icount.tx++;
604 } while (--count > 0); 604 } while (--count > 0);
605 605
606 serial_port_out(port, SCxSR, SCxSR_TDxE_CLEAR(port)); 606 serial_port_out(port, SCxSR, SCxSR_TDxE_CLEAR(port));
607 607
608 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) 608 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
609 uart_write_wakeup(port); 609 uart_write_wakeup(port);
610 if (uart_circ_empty(xmit)) { 610 if (uart_circ_empty(xmit)) {
611 sci_stop_tx(port); 611 sci_stop_tx(port);
612 } else { 612 } else {
613 ctrl = serial_port_in(port, SCSCR); 613 ctrl = serial_port_in(port, SCSCR);
614 614
615 if (port->type != PORT_SCI) { 615 if (port->type != PORT_SCI) {
616 serial_port_in(port, SCxSR); /* Dummy read */ 616 serial_port_in(port, SCxSR); /* Dummy read */
617 serial_port_out(port, SCxSR, SCxSR_TDxE_CLEAR(port)); 617 serial_port_out(port, SCxSR, SCxSR_TDxE_CLEAR(port));
618 } 618 }
619 619
620 ctrl |= SCSCR_TIE; 620 ctrl |= SCSCR_TIE;
621 serial_port_out(port, SCSCR, ctrl); 621 serial_port_out(port, SCSCR, ctrl);
622 } 622 }
623 } 623 }
624 624
625 /* On SH3, SCIF may read end-of-break as a space->mark char */ 625 /* On SH3, SCIF may read end-of-break as a space->mark char */
626 #define STEPFN(c) ({int __c = (c); (((__c-1)|(__c)) == -1); }) 626 #define STEPFN(c) ({int __c = (c); (((__c-1)|(__c)) == -1); })
627 627
628 static void sci_receive_chars(struct uart_port *port) 628 static void sci_receive_chars(struct uart_port *port)
629 { 629 {
630 struct sci_port *sci_port = to_sci_port(port); 630 struct sci_port *sci_port = to_sci_port(port);
631 struct tty_port *tport = &port->state->port; 631 struct tty_port *tport = &port->state->port;
632 int i, count, copied = 0; 632 int i, count, copied = 0;
633 unsigned short status; 633 unsigned short status;
634 unsigned char flag; 634 unsigned char flag;
635 635
636 status = serial_port_in(port, SCxSR); 636 status = serial_port_in(port, SCxSR);
637 if (!(status & SCxSR_RDxF(port))) 637 if (!(status & SCxSR_RDxF(port)))
638 return; 638 return;
639 639
640 while (1) { 640 while (1) {
641 /* Don't copy more bytes than there is room for in the buffer */ 641 /* Don't copy more bytes than there is room for in the buffer */
642 count = tty_buffer_request_room(tport, sci_rxfill(port)); 642 count = tty_buffer_request_room(tport, sci_rxfill(port));
643 643
644 /* If for any reason we can't copy more data, we're done! */ 644 /* If for any reason we can't copy more data, we're done! */
645 if (count == 0) 645 if (count == 0)
646 break; 646 break;
647 647
648 if (port->type == PORT_SCI) { 648 if (port->type == PORT_SCI) {
649 char c = serial_port_in(port, SCxRDR); 649 char c = serial_port_in(port, SCxRDR);
650 if (uart_handle_sysrq_char(port, c) || 650 if (uart_handle_sysrq_char(port, c) ||
651 sci_port->break_flag) 651 sci_port->break_flag)
652 count = 0; 652 count = 0;
653 else 653 else
654 tty_insert_flip_char(tport, c, TTY_NORMAL); 654 tty_insert_flip_char(tport, c, TTY_NORMAL);
655 } else { 655 } else {
656 for (i = 0; i < count; i++) { 656 for (i = 0; i < count; i++) {
657 char c = serial_port_in(port, SCxRDR); 657 char c = serial_port_in(port, SCxRDR);
658 658
659 status = serial_port_in(port, SCxSR); 659 status = serial_port_in(port, SCxSR);
660 #if defined(CONFIG_CPU_SH3) 660 #if defined(CONFIG_CPU_SH3)
661 /* Skip "chars" during break */ 661 /* Skip "chars" during break */
662 if (sci_port->break_flag) { 662 if (sci_port->break_flag) {
663 if ((c == 0) && 663 if ((c == 0) &&
664 (status & SCxSR_FER(port))) { 664 (status & SCxSR_FER(port))) {
665 count--; i--; 665 count--; i--;
666 continue; 666 continue;
667 } 667 }
668 668
669 /* Nonzero => end-of-break */ 669 /* Nonzero => end-of-break */
670 dev_dbg(port->dev, "debounce<%02x>\n", c); 670 dev_dbg(port->dev, "debounce<%02x>\n", c);
671 sci_port->break_flag = 0; 671 sci_port->break_flag = 0;
672 672
673 if (STEPFN(c)) { 673 if (STEPFN(c)) {
674 count--; i--; 674 count--; i--;
675 continue; 675 continue;
676 } 676 }
677 } 677 }
678 #endif /* CONFIG_CPU_SH3 */ 678 #endif /* CONFIG_CPU_SH3 */
679 if (uart_handle_sysrq_char(port, c)) { 679 if (uart_handle_sysrq_char(port, c)) {
680 count--; i--; 680 count--; i--;
681 continue; 681 continue;
682 } 682 }
683 683
684 /* Store data and status */ 684 /* Store data and status */
685 if (status & SCxSR_FER(port)) { 685 if (status & SCxSR_FER(port)) {
686 flag = TTY_FRAME; 686 flag = TTY_FRAME;
687 port->icount.frame++; 687 port->icount.frame++;
688 dev_notice(port->dev, "frame error\n"); 688 dev_notice(port->dev, "frame error\n");
689 } else if (status & SCxSR_PER(port)) { 689 } else if (status & SCxSR_PER(port)) {
690 flag = TTY_PARITY; 690 flag = TTY_PARITY;
691 port->icount.parity++; 691 port->icount.parity++;
692 dev_notice(port->dev, "parity error\n"); 692 dev_notice(port->dev, "parity error\n");
693 } else 693 } else
694 flag = TTY_NORMAL; 694 flag = TTY_NORMAL;
695 695
696 tty_insert_flip_char(tport, c, flag); 696 tty_insert_flip_char(tport, c, flag);
697 } 697 }
698 } 698 }
699 699
700 serial_port_in(port, SCxSR); /* dummy read */ 700 serial_port_in(port, SCxSR); /* dummy read */
701 serial_port_out(port, SCxSR, SCxSR_RDxF_CLEAR(port)); 701 serial_port_out(port, SCxSR, SCxSR_RDxF_CLEAR(port));
702 702
703 copied += count; 703 copied += count;
704 port->icount.rx += count; 704 port->icount.rx += count;
705 } 705 }
706 706
707 if (copied) { 707 if (copied) {
708 /* Tell the rest of the system the news. New characters! */ 708 /* Tell the rest of the system the news. New characters! */
709 tty_flip_buffer_push(tport); 709 tty_flip_buffer_push(tport);
710 } else { 710 } else {
711 serial_port_in(port, SCxSR); /* dummy read */ 711 serial_port_in(port, SCxSR); /* dummy read */
712 serial_port_out(port, SCxSR, SCxSR_RDxF_CLEAR(port)); 712 serial_port_out(port, SCxSR, SCxSR_RDxF_CLEAR(port));
713 } 713 }
714 } 714 }
715 715
716 #define SCI_BREAK_JIFFIES (HZ/20) 716 #define SCI_BREAK_JIFFIES (HZ/20)
717 717
718 /* 718 /*
719 * The sci generates interrupts during the break, 719 * The sci generates interrupts during the break,
720 * 1 per millisecond or so during the break period, for 9600 baud. 720 * 1 per millisecond or so during the break period, for 9600 baud.
721 * So dont bother disabling interrupts. 721 * So dont bother disabling interrupts.
722 * But dont want more than 1 break event. 722 * But dont want more than 1 break event.
723 * Use a kernel timer to periodically poll the rx line until 723 * Use a kernel timer to periodically poll the rx line until
724 * the break is finished. 724 * the break is finished.
725 */ 725 */
726 static inline void sci_schedule_break_timer(struct sci_port *port) 726 static inline void sci_schedule_break_timer(struct sci_port *port)
727 { 727 {
728 mod_timer(&port->break_timer, jiffies + SCI_BREAK_JIFFIES); 728 mod_timer(&port->break_timer, jiffies + SCI_BREAK_JIFFIES);
729 } 729 }
730 730
731 /* Ensure that two consecutive samples find the break over. */ 731 /* Ensure that two consecutive samples find the break over. */
732 static void sci_break_timer(unsigned long data) 732 static void sci_break_timer(unsigned long data)
733 { 733 {
734 struct sci_port *port = (struct sci_port *)data; 734 struct sci_port *port = (struct sci_port *)data;
735 735
736 sci_port_enable(port); 736 sci_port_enable(port);
737 737
738 if (sci_rxd_in(&port->port) == 0) { 738 if (sci_rxd_in(&port->port) == 0) {
739 port->break_flag = 1; 739 port->break_flag = 1;
740 sci_schedule_break_timer(port); 740 sci_schedule_break_timer(port);
741 } else if (port->break_flag == 1) { 741 } else if (port->break_flag == 1) {
742 /* break is over. */ 742 /* break is over. */
743 port->break_flag = 2; 743 port->break_flag = 2;
744 sci_schedule_break_timer(port); 744 sci_schedule_break_timer(port);
745 } else 745 } else
746 port->break_flag = 0; 746 port->break_flag = 0;
747 747
748 sci_port_disable(port); 748 sci_port_disable(port);
749 } 749 }
750 750
751 static int sci_handle_errors(struct uart_port *port) 751 static int sci_handle_errors(struct uart_port *port)
752 { 752 {
753 int copied = 0; 753 int copied = 0;
754 unsigned short status = serial_port_in(port, SCxSR); 754 unsigned short status = serial_port_in(port, SCxSR);
755 struct tty_port *tport = &port->state->port; 755 struct tty_port *tport = &port->state->port;
756 struct sci_port *s = to_sci_port(port); 756 struct sci_port *s = to_sci_port(port);
757 757
758 /* 758 /*
759 * Handle overruns, if supported. 759 * Handle overruns, if supported.
760 */ 760 */
761 if (s->cfg->overrun_bit != SCIx_NOT_SUPPORTED) { 761 if (s->cfg->overrun_bit != SCIx_NOT_SUPPORTED) {
762 if (status & (1 << s->cfg->overrun_bit)) { 762 if (status & (1 << s->cfg->overrun_bit)) {
763 port->icount.overrun++; 763 port->icount.overrun++;
764 764
765 /* overrun error */ 765 /* overrun error */
766 if (tty_insert_flip_char(tport, 0, TTY_OVERRUN)) 766 if (tty_insert_flip_char(tport, 0, TTY_OVERRUN))
767 copied++; 767 copied++;
768 768
769 dev_notice(port->dev, "overrun error"); 769 dev_notice(port->dev, "overrun error");
770 } 770 }
771 } 771 }
772 772
773 if (status & SCxSR_FER(port)) { 773 if (status & SCxSR_FER(port)) {
774 if (sci_rxd_in(port) == 0) { 774 if (sci_rxd_in(port) == 0) {
775 /* Notify of BREAK */ 775 /* Notify of BREAK */
776 struct sci_port *sci_port = to_sci_port(port); 776 struct sci_port *sci_port = to_sci_port(port);
777 777
778 if (!sci_port->break_flag) { 778 if (!sci_port->break_flag) {
779 port->icount.brk++; 779 port->icount.brk++;
780 780
781 sci_port->break_flag = 1; 781 sci_port->break_flag = 1;
782 sci_schedule_break_timer(sci_port); 782 sci_schedule_break_timer(sci_port);
783 783
784 /* Do sysrq handling. */ 784 /* Do sysrq handling. */
785 if (uart_handle_break(port)) 785 if (uart_handle_break(port))
786 return 0; 786 return 0;
787 787
788 dev_dbg(port->dev, "BREAK detected\n"); 788 dev_dbg(port->dev, "BREAK detected\n");
789 789
790 if (tty_insert_flip_char(tport, 0, TTY_BREAK)) 790 if (tty_insert_flip_char(tport, 0, TTY_BREAK))
791 copied++; 791 copied++;
792 } 792 }
793 793
794 } else { 794 } else {
795 /* frame error */ 795 /* frame error */
796 port->icount.frame++; 796 port->icount.frame++;
797 797
798 if (tty_insert_flip_char(tport, 0, TTY_FRAME)) 798 if (tty_insert_flip_char(tport, 0, TTY_FRAME))
799 copied++; 799 copied++;
800 800
801 dev_notice(port->dev, "frame error\n"); 801 dev_notice(port->dev, "frame error\n");
802 } 802 }
803 } 803 }
804 804
805 if (status & SCxSR_PER(port)) { 805 if (status & SCxSR_PER(port)) {
806 /* parity error */ 806 /* parity error */
807 port->icount.parity++; 807 port->icount.parity++;
808 808
809 if (tty_insert_flip_char(tport, 0, TTY_PARITY)) 809 if (tty_insert_flip_char(tport, 0, TTY_PARITY))
810 copied++; 810 copied++;
811 811
812 dev_notice(port->dev, "parity error"); 812 dev_notice(port->dev, "parity error");
813 } 813 }
814 814
815 if (copied) 815 if (copied)
816 tty_flip_buffer_push(tport); 816 tty_flip_buffer_push(tport);
817 817
818 return copied; 818 return copied;
819 } 819 }
820 820
821 static int sci_handle_fifo_overrun(struct uart_port *port) 821 static int sci_handle_fifo_overrun(struct uart_port *port)
822 { 822 {
823 struct tty_port *tport = &port->state->port; 823 struct tty_port *tport = &port->state->port;
824 struct sci_port *s = to_sci_port(port); 824 struct sci_port *s = to_sci_port(port);
825 struct plat_sci_reg *reg; 825 struct plat_sci_reg *reg;
826 int copied = 0; 826 int copied = 0;
827 827
828 reg = sci_getreg(port, SCLSR); 828 reg = sci_getreg(port, SCLSR);
829 if (!reg->size) 829 if (!reg->size)
830 return 0; 830 return 0;
831 831
832 if ((serial_port_in(port, SCLSR) & (1 << s->cfg->overrun_bit))) { 832 if ((serial_port_in(port, SCLSR) & (1 << s->cfg->overrun_bit))) {
833 serial_port_out(port, SCLSR, 0); 833 serial_port_out(port, SCLSR, 0);
834 834
835 port->icount.overrun++; 835 port->icount.overrun++;
836 836
837 tty_insert_flip_char(tport, 0, TTY_OVERRUN); 837 tty_insert_flip_char(tport, 0, TTY_OVERRUN);
838 tty_flip_buffer_push(tport); 838 tty_flip_buffer_push(tport);
839 839
840 dev_notice(port->dev, "overrun error\n"); 840 dev_notice(port->dev, "overrun error\n");
841 copied++; 841 copied++;
842 } 842 }
843 843
844 return copied; 844 return copied;
845 } 845 }
846 846
847 static int sci_handle_breaks(struct uart_port *port) 847 static int sci_handle_breaks(struct uart_port *port)
848 { 848 {
849 int copied = 0; 849 int copied = 0;
850 unsigned short status = serial_port_in(port, SCxSR); 850 unsigned short status = serial_port_in(port, SCxSR);
851 struct tty_port *tport = &port->state->port; 851 struct tty_port *tport = &port->state->port;
852 struct sci_port *s = to_sci_port(port); 852 struct sci_port *s = to_sci_port(port);
853 853
854 if (uart_handle_break(port)) 854 if (uart_handle_break(port))
855 return 0; 855 return 0;
856 856
857 if (!s->break_flag && status & SCxSR_BRK(port)) { 857 if (!s->break_flag && status & SCxSR_BRK(port)) {
858 #if defined(CONFIG_CPU_SH3) 858 #if defined(CONFIG_CPU_SH3)
859 /* Debounce break */ 859 /* Debounce break */
860 s->break_flag = 1; 860 s->break_flag = 1;
861 #endif 861 #endif
862 862
863 port->icount.brk++; 863 port->icount.brk++;
864 864
865 /* Notify of BREAK */ 865 /* Notify of BREAK */
866 if (tty_insert_flip_char(tport, 0, TTY_BREAK)) 866 if (tty_insert_flip_char(tport, 0, TTY_BREAK))
867 copied++; 867 copied++;
868 868
869 dev_dbg(port->dev, "BREAK detected\n"); 869 dev_dbg(port->dev, "BREAK detected\n");
870 } 870 }
871 871
872 if (copied) 872 if (copied)
873 tty_flip_buffer_push(tport); 873 tty_flip_buffer_push(tport);
874 874
875 copied += sci_handle_fifo_overrun(port); 875 copied += sci_handle_fifo_overrun(port);
876 876
877 return copied; 877 return copied;
878 } 878 }
879 879
880 static irqreturn_t sci_rx_interrupt(int irq, void *ptr) 880 static irqreturn_t sci_rx_interrupt(int irq, void *ptr)
881 { 881 {
882 #ifdef CONFIG_SERIAL_SH_SCI_DMA 882 #ifdef CONFIG_SERIAL_SH_SCI_DMA
883 struct uart_port *port = ptr; 883 struct uart_port *port = ptr;
884 struct sci_port *s = to_sci_port(port); 884 struct sci_port *s = to_sci_port(port);
885 885
886 if (s->chan_rx) { 886 if (s->chan_rx) {
887 u16 scr = serial_port_in(port, SCSCR); 887 u16 scr = serial_port_in(port, SCSCR);
888 u16 ssr = serial_port_in(port, SCxSR); 888 u16 ssr = serial_port_in(port, SCxSR);
889 889
890 /* Disable future Rx interrupts */ 890 /* Disable future Rx interrupts */
891 if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) { 891 if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
892 disable_irq_nosync(irq); 892 disable_irq_nosync(irq);
893 scr |= 0x4000; 893 scr |= 0x4000;
894 } else { 894 } else {
895 scr &= ~SCSCR_RIE; 895 scr &= ~SCSCR_RIE;
896 } 896 }
897 serial_port_out(port, SCSCR, scr); 897 serial_port_out(port, SCSCR, scr);
898 /* Clear current interrupt */ 898 /* Clear current interrupt */
899 serial_port_out(port, SCxSR, ssr & ~(1 | SCxSR_RDxF(port))); 899 serial_port_out(port, SCxSR, ssr & ~(1 | SCxSR_RDxF(port)));
900 dev_dbg(port->dev, "Rx IRQ %lu: setup t-out in %u jiffies\n", 900 dev_dbg(port->dev, "Rx IRQ %lu: setup t-out in %u jiffies\n",
901 jiffies, s->rx_timeout); 901 jiffies, s->rx_timeout);
902 mod_timer(&s->rx_timer, jiffies + s->rx_timeout); 902 mod_timer(&s->rx_timer, jiffies + s->rx_timeout);
903 903
904 return IRQ_HANDLED; 904 return IRQ_HANDLED;
905 } 905 }
906 #endif 906 #endif
907 907
908 /* I think sci_receive_chars has to be called irrespective 908 /* I think sci_receive_chars has to be called irrespective
909 * of whether the I_IXOFF is set, otherwise, how is the interrupt 909 * of whether the I_IXOFF is set, otherwise, how is the interrupt
910 * to be disabled? 910 * to be disabled?
911 */ 911 */
912 sci_receive_chars(ptr); 912 sci_receive_chars(ptr);
913 913
914 return IRQ_HANDLED; 914 return IRQ_HANDLED;
915 } 915 }
916 916
917 static irqreturn_t sci_tx_interrupt(int irq, void *ptr) 917 static irqreturn_t sci_tx_interrupt(int irq, void *ptr)
918 { 918 {
919 struct uart_port *port = ptr; 919 struct uart_port *port = ptr;
920 unsigned long flags; 920 unsigned long flags;
921 921
922 spin_lock_irqsave(&port->lock, flags); 922 spin_lock_irqsave(&port->lock, flags);
923 sci_transmit_chars(port); 923 sci_transmit_chars(port);
924 spin_unlock_irqrestore(&port->lock, flags); 924 spin_unlock_irqrestore(&port->lock, flags);
925 925
926 return IRQ_HANDLED; 926 return IRQ_HANDLED;
927 } 927 }
928 928
929 static irqreturn_t sci_er_interrupt(int irq, void *ptr) 929 static irqreturn_t sci_er_interrupt(int irq, void *ptr)
930 { 930 {
931 struct uart_port *port = ptr; 931 struct uart_port *port = ptr;
932 932
933 /* Handle errors */ 933 /* Handle errors */
934 if (port->type == PORT_SCI) { 934 if (port->type == PORT_SCI) {
935 if (sci_handle_errors(port)) { 935 if (sci_handle_errors(port)) {
936 /* discard character in rx buffer */ 936 /* discard character in rx buffer */
937 serial_port_in(port, SCxSR); 937 serial_port_in(port, SCxSR);
938 serial_port_out(port, SCxSR, SCxSR_RDxF_CLEAR(port)); 938 serial_port_out(port, SCxSR, SCxSR_RDxF_CLEAR(port));
939 } 939 }
940 } else { 940 } else {
941 sci_handle_fifo_overrun(port); 941 sci_handle_fifo_overrun(port);
942 sci_rx_interrupt(irq, ptr); 942 sci_rx_interrupt(irq, ptr);
943 } 943 }
944 944
945 serial_port_out(port, SCxSR, SCxSR_ERROR_CLEAR(port)); 945 serial_port_out(port, SCxSR, SCxSR_ERROR_CLEAR(port));
946 946
947 /* Kick the transmission */ 947 /* Kick the transmission */
948 sci_tx_interrupt(irq, ptr); 948 sci_tx_interrupt(irq, ptr);
949 949
950 return IRQ_HANDLED; 950 return IRQ_HANDLED;
951 } 951 }
952 952
953 static irqreturn_t sci_br_interrupt(int irq, void *ptr) 953 static irqreturn_t sci_br_interrupt(int irq, void *ptr)
954 { 954 {
955 struct uart_port *port = ptr; 955 struct uart_port *port = ptr;
956 956
957 /* Handle BREAKs */ 957 /* Handle BREAKs */
958 sci_handle_breaks(port); 958 sci_handle_breaks(port);
959 serial_port_out(port, SCxSR, SCxSR_BREAK_CLEAR(port)); 959 serial_port_out(port, SCxSR, SCxSR_BREAK_CLEAR(port));
960 960
961 return IRQ_HANDLED; 961 return IRQ_HANDLED;
962 } 962 }
963 963
964 static inline unsigned long port_rx_irq_mask(struct uart_port *port) 964 static inline unsigned long port_rx_irq_mask(struct uart_port *port)
965 { 965 {
966 /* 966 /*
967 * Not all ports (such as SCIFA) will support REIE. Rather than 967 * Not all ports (such as SCIFA) will support REIE. Rather than
968 * special-casing the port type, we check the port initialization 968 * special-casing the port type, we check the port initialization
969 * IRQ enable mask to see whether the IRQ is desired at all. If 969 * IRQ enable mask to see whether the IRQ is desired at all. If
970 * it's unset, it's logically inferred that there's no point in 970 * it's unset, it's logically inferred that there's no point in
971 * testing for it. 971 * testing for it.
972 */ 972 */
973 return SCSCR_RIE | (to_sci_port(port)->cfg->scscr & SCSCR_REIE); 973 return SCSCR_RIE | (to_sci_port(port)->cfg->scscr & SCSCR_REIE);
974 } 974 }
975 975
976 static irqreturn_t sci_mpxed_interrupt(int irq, void *ptr) 976 static irqreturn_t sci_mpxed_interrupt(int irq, void *ptr)
977 { 977 {
978 unsigned short ssr_status, scr_status, err_enabled; 978 unsigned short ssr_status, scr_status, err_enabled;
979 struct uart_port *port = ptr; 979 struct uart_port *port = ptr;
980 struct sci_port *s = to_sci_port(port); 980 struct sci_port *s = to_sci_port(port);
981 irqreturn_t ret = IRQ_NONE; 981 irqreturn_t ret = IRQ_NONE;
982 982
983 ssr_status = serial_port_in(port, SCxSR); 983 ssr_status = serial_port_in(port, SCxSR);
984 scr_status = serial_port_in(port, SCSCR); 984 scr_status = serial_port_in(port, SCSCR);
985 err_enabled = scr_status & port_rx_irq_mask(port); 985 err_enabled = scr_status & port_rx_irq_mask(port);
986 986
987 /* Tx Interrupt */ 987 /* Tx Interrupt */
988 if ((ssr_status & SCxSR_TDxE(port)) && (scr_status & SCSCR_TIE) && 988 if ((ssr_status & SCxSR_TDxE(port)) && (scr_status & SCSCR_TIE) &&
989 !s->chan_tx) 989 !s->chan_tx)
990 ret = sci_tx_interrupt(irq, ptr); 990 ret = sci_tx_interrupt(irq, ptr);
991 991
992 /* 992 /*
993 * Rx Interrupt: if we're using DMA, the DMA controller clears RDF / 993 * Rx Interrupt: if we're using DMA, the DMA controller clears RDF /
994 * DR flags 994 * DR flags
995 */ 995 */
996 if (((ssr_status & SCxSR_RDxF(port)) || s->chan_rx) && 996 if (((ssr_status & SCxSR_RDxF(port)) || s->chan_rx) &&
997 (scr_status & SCSCR_RIE)) 997 (scr_status & SCSCR_RIE))
998 ret = sci_rx_interrupt(irq, ptr); 998 ret = sci_rx_interrupt(irq, ptr);
999 999
1000 /* Error Interrupt */ 1000 /* Error Interrupt */
1001 if ((ssr_status & SCxSR_ERRORS(port)) && err_enabled) 1001 if ((ssr_status & SCxSR_ERRORS(port)) && err_enabled)
1002 ret = sci_er_interrupt(irq, ptr); 1002 ret = sci_er_interrupt(irq, ptr);
1003 1003
1004 /* Break Interrupt */ 1004 /* Break Interrupt */
1005 if ((ssr_status & SCxSR_BRK(port)) && err_enabled) 1005 if ((ssr_status & SCxSR_BRK(port)) && err_enabled)
1006 ret = sci_br_interrupt(irq, ptr); 1006 ret = sci_br_interrupt(irq, ptr);
1007 1007
1008 return ret; 1008 return ret;
1009 } 1009 }
1010 1010
1011 /* 1011 /*
1012 * Here we define a transition notifier so that we can update all of our 1012 * Here we define a transition notifier so that we can update all of our
1013 * ports' baud rate when the peripheral clock changes. 1013 * ports' baud rate when the peripheral clock changes.
1014 */ 1014 */
1015 static int sci_notifier(struct notifier_block *self, 1015 static int sci_notifier(struct notifier_block *self,
1016 unsigned long phase, void *p) 1016 unsigned long phase, void *p)
1017 { 1017 {
1018 struct sci_port *sci_port; 1018 struct sci_port *sci_port;
1019 unsigned long flags; 1019 unsigned long flags;
1020 1020
1021 sci_port = container_of(self, struct sci_port, freq_transition); 1021 sci_port = container_of(self, struct sci_port, freq_transition);
1022 1022
1023 if ((phase == CPUFREQ_POSTCHANGE) || 1023 if ((phase == CPUFREQ_POSTCHANGE) ||
1024 (phase == CPUFREQ_RESUMECHANGE)) { 1024 (phase == CPUFREQ_RESUMECHANGE)) {
1025 struct uart_port *port = &sci_port->port; 1025 struct uart_port *port = &sci_port->port;
1026 1026
1027 spin_lock_irqsave(&port->lock, flags); 1027 spin_lock_irqsave(&port->lock, flags);
1028 port->uartclk = clk_get_rate(sci_port->iclk); 1028 port->uartclk = clk_get_rate(sci_port->iclk);
1029 spin_unlock_irqrestore(&port->lock, flags); 1029 spin_unlock_irqrestore(&port->lock, flags);
1030 } 1030 }
1031 1031
1032 return NOTIFY_OK; 1032 return NOTIFY_OK;
1033 } 1033 }
1034 1034
1035 static struct sci_irq_desc { 1035 static struct sci_irq_desc {
1036 const char *desc; 1036 const char *desc;
1037 irq_handler_t handler; 1037 irq_handler_t handler;
1038 } sci_irq_desc[] = { 1038 } sci_irq_desc[] = {
1039 /* 1039 /*
1040 * Split out handlers, the default case. 1040 * Split out handlers, the default case.
1041 */ 1041 */
1042 [SCIx_ERI_IRQ] = { 1042 [SCIx_ERI_IRQ] = {
1043 .desc = "rx err", 1043 .desc = "rx err",
1044 .handler = sci_er_interrupt, 1044 .handler = sci_er_interrupt,
1045 }, 1045 },
1046 1046
1047 [SCIx_RXI_IRQ] = { 1047 [SCIx_RXI_IRQ] = {
1048 .desc = "rx full", 1048 .desc = "rx full",
1049 .handler = sci_rx_interrupt, 1049 .handler = sci_rx_interrupt,
1050 }, 1050 },
1051 1051
1052 [SCIx_TXI_IRQ] = { 1052 [SCIx_TXI_IRQ] = {
1053 .desc = "tx empty", 1053 .desc = "tx empty",
1054 .handler = sci_tx_interrupt, 1054 .handler = sci_tx_interrupt,
1055 }, 1055 },
1056 1056
1057 [SCIx_BRI_IRQ] = { 1057 [SCIx_BRI_IRQ] = {
1058 .desc = "break", 1058 .desc = "break",
1059 .handler = sci_br_interrupt, 1059 .handler = sci_br_interrupt,
1060 }, 1060 },
1061 1061
1062 /* 1062 /*
1063 * Special muxed handler. 1063 * Special muxed handler.
1064 */ 1064 */
1065 [SCIx_MUX_IRQ] = { 1065 [SCIx_MUX_IRQ] = {
1066 .desc = "mux", 1066 .desc = "mux",
1067 .handler = sci_mpxed_interrupt, 1067 .handler = sci_mpxed_interrupt,
1068 }, 1068 },
1069 }; 1069 };
1070 1070
1071 static int sci_request_irq(struct sci_port *port) 1071 static int sci_request_irq(struct sci_port *port)
1072 { 1072 {
1073 struct uart_port *up = &port->port; 1073 struct uart_port *up = &port->port;
1074 int i, j, ret = 0; 1074 int i, j, ret = 0;
1075 1075
1076 for (i = j = 0; i < SCIx_NR_IRQS; i++, j++) { 1076 for (i = j = 0; i < SCIx_NR_IRQS; i++, j++) {
1077 struct sci_irq_desc *desc; 1077 struct sci_irq_desc *desc;
1078 unsigned int irq; 1078 unsigned int irq;
1079 1079
1080 if (SCIx_IRQ_IS_MUXED(port)) { 1080 if (SCIx_IRQ_IS_MUXED(port)) {
1081 i = SCIx_MUX_IRQ; 1081 i = SCIx_MUX_IRQ;
1082 irq = up->irq; 1082 irq = up->irq;
1083 } else { 1083 } else {
1084 irq = port->cfg->irqs[i]; 1084 irq = port->cfg->irqs[i];
1085 1085
1086 /* 1086 /*
1087 * Certain port types won't support all of the 1087 * Certain port types won't support all of the
1088 * available interrupt sources. 1088 * available interrupt sources.
1089 */ 1089 */
1090 if (unlikely(!irq)) 1090 if (unlikely(!irq))
1091 continue; 1091 continue;
1092 } 1092 }
1093 1093
1094 desc = sci_irq_desc + i; 1094 desc = sci_irq_desc + i;
1095 port->irqstr[j] = kasprintf(GFP_KERNEL, "%s:%s", 1095 port->irqstr[j] = kasprintf(GFP_KERNEL, "%s:%s",
1096 dev_name(up->dev), desc->desc); 1096 dev_name(up->dev), desc->desc);
1097 if (!port->irqstr[j]) { 1097 if (!port->irqstr[j]) {
1098 dev_err(up->dev, "Failed to allocate %s IRQ string\n", 1098 dev_err(up->dev, "Failed to allocate %s IRQ string\n",
1099 desc->desc); 1099 desc->desc);
1100 goto out_nomem; 1100 goto out_nomem;
1101 } 1101 }
1102 1102
1103 ret = request_irq(irq, desc->handler, up->irqflags, 1103 ret = request_irq(irq, desc->handler, up->irqflags,
1104 port->irqstr[j], port); 1104 port->irqstr[j], port);
1105 if (unlikely(ret)) { 1105 if (unlikely(ret)) {
1106 dev_err(up->dev, "Can't allocate %s IRQ\n", desc->desc); 1106 dev_err(up->dev, "Can't allocate %s IRQ\n", desc->desc);
1107 goto out_noirq; 1107 goto out_noirq;
1108 } 1108 }
1109 } 1109 }
1110 1110
1111 return 0; 1111 return 0;
1112 1112
1113 out_noirq: 1113 out_noirq:
1114 while (--i >= 0) 1114 while (--i >= 0)
1115 free_irq(port->cfg->irqs[i], port); 1115 free_irq(port->cfg->irqs[i], port);
1116 1116
1117 out_nomem: 1117 out_nomem:
1118 while (--j >= 0) 1118 while (--j >= 0)
1119 kfree(port->irqstr[j]); 1119 kfree(port->irqstr[j]);
1120 1120
1121 return ret; 1121 return ret;
1122 } 1122 }
1123 1123
1124 static void sci_free_irq(struct sci_port *port) 1124 static void sci_free_irq(struct sci_port *port)
1125 { 1125 {
1126 int i; 1126 int i;
1127 1127
1128 /* 1128 /*
1129 * Intentionally in reverse order so we iterate over the muxed 1129 * Intentionally in reverse order so we iterate over the muxed
1130 * IRQ first. 1130 * IRQ first.
1131 */ 1131 */
1132 for (i = 0; i < SCIx_NR_IRQS; i++) { 1132 for (i = 0; i < SCIx_NR_IRQS; i++) {
1133 unsigned int irq = port->cfg->irqs[i]; 1133 unsigned int irq = port->cfg->irqs[i];
1134 1134
1135 /* 1135 /*
1136 * Certain port types won't support all of the available 1136 * Certain port types won't support all of the available
1137 * interrupt sources. 1137 * interrupt sources.
1138 */ 1138 */
1139 if (unlikely(!irq)) 1139 if (unlikely(!irq))
1140 continue; 1140 continue;
1141 1141
1142 free_irq(port->cfg->irqs[i], port); 1142 free_irq(port->cfg->irqs[i], port);
1143 kfree(port->irqstr[i]); 1143 kfree(port->irqstr[i]);
1144 1144
1145 if (SCIx_IRQ_IS_MUXED(port)) { 1145 if (SCIx_IRQ_IS_MUXED(port)) {
1146 /* If there's only one IRQ, we're done. */ 1146 /* If there's only one IRQ, we're done. */
1147 return; 1147 return;
1148 } 1148 }
1149 } 1149 }
1150 } 1150 }
1151 1151
1152 static const char *sci_gpio_names[SCIx_NR_FNS] = { 1152 static const char *sci_gpio_names[SCIx_NR_FNS] = {
1153 "sck", "rxd", "txd", "cts", "rts", 1153 "sck", "rxd", "txd", "cts", "rts",
1154 }; 1154 };
1155 1155
1156 static const char *sci_gpio_str(unsigned int index) 1156 static const char *sci_gpio_str(unsigned int index)
1157 { 1157 {
1158 return sci_gpio_names[index]; 1158 return sci_gpio_names[index];
1159 } 1159 }
1160 1160
1161 static void sci_init_gpios(struct sci_port *port) 1161 static void sci_init_gpios(struct sci_port *port)
1162 { 1162 {
1163 struct uart_port *up = &port->port; 1163 struct uart_port *up = &port->port;
1164 int i; 1164 int i;
1165 1165
1166 if (!port->cfg) 1166 if (!port->cfg)
1167 return; 1167 return;
1168 1168
1169 for (i = 0; i < SCIx_NR_FNS; i++) { 1169 for (i = 0; i < SCIx_NR_FNS; i++) {
1170 const char *desc; 1170 const char *desc;
1171 int ret; 1171 int ret;
1172 1172
1173 if (!port->cfg->gpios[i]) 1173 if (!port->cfg->gpios[i])
1174 continue; 1174 continue;
1175 1175
1176 desc = sci_gpio_str(i); 1176 desc = sci_gpio_str(i);
1177 1177
1178 port->gpiostr[i] = kasprintf(GFP_KERNEL, "%s:%s", 1178 port->gpiostr[i] = kasprintf(GFP_KERNEL, "%s:%s",
1179 dev_name(up->dev), desc); 1179 dev_name(up->dev), desc);
1180 1180
1181 /* 1181 /*
1182 * If we've failed the allocation, we can still continue 1182 * If we've failed the allocation, we can still continue
1183 * on with a NULL string. 1183 * on with a NULL string.
1184 */ 1184 */
1185 if (!port->gpiostr[i]) 1185 if (!port->gpiostr[i])
1186 dev_notice(up->dev, "%s string allocation failure\n", 1186 dev_notice(up->dev, "%s string allocation failure\n",
1187 desc); 1187 desc);
1188 1188
1189 ret = gpio_request(port->cfg->gpios[i], port->gpiostr[i]); 1189 ret = gpio_request(port->cfg->gpios[i], port->gpiostr[i]);
1190 if (unlikely(ret != 0)) { 1190 if (unlikely(ret != 0)) {
1191 dev_notice(up->dev, "failed %s gpio request\n", desc); 1191 dev_notice(up->dev, "failed %s gpio request\n", desc);
1192 1192
1193 /* 1193 /*
1194 * If we can't get the GPIO for whatever reason, 1194 * If we can't get the GPIO for whatever reason,
1195 * no point in keeping the verbose string around. 1195 * no point in keeping the verbose string around.
1196 */ 1196 */
1197 kfree(port->gpiostr[i]); 1197 kfree(port->gpiostr[i]);
1198 } 1198 }
1199 } 1199 }
1200 } 1200 }
1201 1201
1202 static void sci_free_gpios(struct sci_port *port) 1202 static void sci_free_gpios(struct sci_port *port)
1203 { 1203 {
1204 int i; 1204 int i;
1205 1205
1206 for (i = 0; i < SCIx_NR_FNS; i++) 1206 for (i = 0; i < SCIx_NR_FNS; i++)
1207 if (port->cfg->gpios[i]) { 1207 if (port->cfg->gpios[i]) {
1208 gpio_free(port->cfg->gpios[i]); 1208 gpio_free(port->cfg->gpios[i]);
1209 kfree(port->gpiostr[i]); 1209 kfree(port->gpiostr[i]);
1210 } 1210 }
1211 } 1211 }
1212 1212
1213 static unsigned int sci_tx_empty(struct uart_port *port) 1213 static unsigned int sci_tx_empty(struct uart_port *port)
1214 { 1214 {
1215 unsigned short status = serial_port_in(port, SCxSR); 1215 unsigned short status = serial_port_in(port, SCxSR);
1216 unsigned short in_tx_fifo = sci_txfill(port); 1216 unsigned short in_tx_fifo = sci_txfill(port);
1217 1217
1218 return (status & SCxSR_TEND(port)) && !in_tx_fifo ? TIOCSER_TEMT : 0; 1218 return (status & SCxSR_TEND(port)) && !in_tx_fifo ? TIOCSER_TEMT : 0;
1219 } 1219 }
1220 1220
1221 /* 1221 /*
1222 * Modem control is a bit of a mixed bag for SCI(F) ports. Generally 1222 * Modem control is a bit of a mixed bag for SCI(F) ports. Generally
1223 * CTS/RTS is supported in hardware by at least one port and controlled 1223 * CTS/RTS is supported in hardware by at least one port and controlled
1224 * via SCSPTR (SCxPCR for SCIFA/B parts), or external pins (presently 1224 * via SCSPTR (SCxPCR for SCIFA/B parts), or external pins (presently
1225 * handled via the ->init_pins() op, which is a bit of a one-way street, 1225 * handled via the ->init_pins() op, which is a bit of a one-way street,
1226 * lacking any ability to defer pin control -- this will later be 1226 * lacking any ability to defer pin control -- this will later be
1227 * converted over to the GPIO framework). 1227 * converted over to the GPIO framework).
1228 * 1228 *
1229 * Other modes (such as loopback) are supported generically on certain 1229 * Other modes (such as loopback) are supported generically on certain
1230 * port types, but not others. For these it's sufficient to test for the 1230 * port types, but not others. For these it's sufficient to test for the
1231 * existence of the support register and simply ignore the port type. 1231 * existence of the support register and simply ignore the port type.
1232 */ 1232 */
1233 static void sci_set_mctrl(struct uart_port *port, unsigned int mctrl) 1233 static void sci_set_mctrl(struct uart_port *port, unsigned int mctrl)
1234 { 1234 {
1235 if (mctrl & TIOCM_LOOP) { 1235 if (mctrl & TIOCM_LOOP) {
1236 struct plat_sci_reg *reg; 1236 struct plat_sci_reg *reg;
1237 1237
1238 /* 1238 /*
1239 * Standard loopback mode for SCFCR ports. 1239 * Standard loopback mode for SCFCR ports.
1240 */ 1240 */
1241 reg = sci_getreg(port, SCFCR); 1241 reg = sci_getreg(port, SCFCR);
1242 if (reg->size) 1242 if (reg->size)
1243 serial_port_out(port, SCFCR, serial_port_in(port, SCFCR) | 1); 1243 serial_port_out(port, SCFCR, serial_port_in(port, SCFCR) | 1);
1244 } 1244 }
1245 } 1245 }
1246 1246
1247 static unsigned int sci_get_mctrl(struct uart_port *port) 1247 static unsigned int sci_get_mctrl(struct uart_port *port)
1248 { 1248 {
1249 /* 1249 /*
1250 * CTS/RTS is handled in hardware when supported, while nothing 1250 * CTS/RTS is handled in hardware when supported, while nothing
1251 * else is wired up. Keep it simple and simply assert DSR/CAR. 1251 * else is wired up. Keep it simple and simply assert DSR/CAR.
1252 */ 1252 */
1253 return TIOCM_DSR | TIOCM_CAR; 1253 return TIOCM_DSR | TIOCM_CAR;
1254 } 1254 }
1255 1255
1256 #ifdef CONFIG_SERIAL_SH_SCI_DMA 1256 #ifdef CONFIG_SERIAL_SH_SCI_DMA
1257 static void sci_dma_tx_complete(void *arg) 1257 static void sci_dma_tx_complete(void *arg)
1258 { 1258 {
1259 struct sci_port *s = arg; 1259 struct sci_port *s = arg;
1260 struct uart_port *port = &s->port; 1260 struct uart_port *port = &s->port;
1261 struct circ_buf *xmit = &port->state->xmit; 1261 struct circ_buf *xmit = &port->state->xmit;
1262 unsigned long flags; 1262 unsigned long flags;
1263 1263
1264 dev_dbg(port->dev, "%s(%d)\n", __func__, port->line); 1264 dev_dbg(port->dev, "%s(%d)\n", __func__, port->line);
1265 1265
1266 spin_lock_irqsave(&port->lock, flags); 1266 spin_lock_irqsave(&port->lock, flags);
1267 1267
1268 xmit->tail += sg_dma_len(&s->sg_tx); 1268 xmit->tail += sg_dma_len(&s->sg_tx);
1269 xmit->tail &= UART_XMIT_SIZE - 1; 1269 xmit->tail &= UART_XMIT_SIZE - 1;
1270 1270
1271 port->icount.tx += sg_dma_len(&s->sg_tx); 1271 port->icount.tx += sg_dma_len(&s->sg_tx);
1272 1272
1273 async_tx_ack(s->desc_tx); 1273 async_tx_ack(s->desc_tx);
1274 s->desc_tx = NULL; 1274 s->desc_tx = NULL;
1275 1275
1276 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) 1276 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
1277 uart_write_wakeup(port); 1277 uart_write_wakeup(port);
1278 1278
1279 if (!uart_circ_empty(xmit)) { 1279 if (!uart_circ_empty(xmit)) {
1280 s->cookie_tx = 0; 1280 s->cookie_tx = 0;
1281 schedule_work(&s->work_tx); 1281 schedule_work(&s->work_tx);
1282 } else { 1282 } else {
1283 s->cookie_tx = -EINVAL; 1283 s->cookie_tx = -EINVAL;
1284 if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) { 1284 if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
1285 u16 ctrl = serial_port_in(port, SCSCR); 1285 u16 ctrl = serial_port_in(port, SCSCR);
1286 serial_port_out(port, SCSCR, ctrl & ~SCSCR_TIE); 1286 serial_port_out(port, SCSCR, ctrl & ~SCSCR_TIE);
1287 } 1287 }
1288 } 1288 }
1289 1289
1290 spin_unlock_irqrestore(&port->lock, flags); 1290 spin_unlock_irqrestore(&port->lock, flags);
1291 } 1291 }
1292 1292
1293 /* Locking: called with port lock held */ 1293 /* Locking: called with port lock held */
1294 static int sci_dma_rx_push(struct sci_port *s, size_t count) 1294 static int sci_dma_rx_push(struct sci_port *s, size_t count)
1295 { 1295 {
1296 struct uart_port *port = &s->port; 1296 struct uart_port *port = &s->port;
1297 struct tty_port *tport = &port->state->port; 1297 struct tty_port *tport = &port->state->port;
1298 int i, active, room; 1298 int i, active, room;
1299 1299
1300 room = tty_buffer_request_room(tport, count); 1300 room = tty_buffer_request_room(tport, count);
1301 1301
1302 if (s->active_rx == s->cookie_rx[0]) { 1302 if (s->active_rx == s->cookie_rx[0]) {
1303 active = 0; 1303 active = 0;
1304 } else if (s->active_rx == s->cookie_rx[1]) { 1304 } else if (s->active_rx == s->cookie_rx[1]) {
1305 active = 1; 1305 active = 1;
1306 } else { 1306 } else {
1307 dev_err(port->dev, "cookie %d not found!\n", s->active_rx); 1307 dev_err(port->dev, "cookie %d not found!\n", s->active_rx);
1308 return 0; 1308 return 0;
1309 } 1309 }
1310 1310
1311 if (room < count) 1311 if (room < count)
1312 dev_warn(port->dev, "Rx overrun: dropping %u bytes\n", 1312 dev_warn(port->dev, "Rx overrun: dropping %u bytes\n",
1313 count - room); 1313 count - room);
1314 if (!room) 1314 if (!room)
1315 return room; 1315 return room;
1316 1316
1317 for (i = 0; i < room; i++) 1317 for (i = 0; i < room; i++)
1318 tty_insert_flip_char(tport, ((u8 *)sg_virt(&s->sg_rx[active]))[i], 1318 tty_insert_flip_char(tport, ((u8 *)sg_virt(&s->sg_rx[active]))[i],
1319 TTY_NORMAL); 1319 TTY_NORMAL);
1320 1320
1321 port->icount.rx += room; 1321 port->icount.rx += room;
1322 1322
1323 return room; 1323 return room;
1324 } 1324 }
1325 1325
1326 static void sci_dma_rx_complete(void *arg) 1326 static void sci_dma_rx_complete(void *arg)
1327 { 1327 {
1328 struct sci_port *s = arg; 1328 struct sci_port *s = arg;
1329 struct uart_port *port = &s->port; 1329 struct uart_port *port = &s->port;
1330 unsigned long flags; 1330 unsigned long flags;
1331 int count; 1331 int count;
1332 1332
1333 dev_dbg(port->dev, "%s(%d) active #%d\n", __func__, port->line, s->active_rx); 1333 dev_dbg(port->dev, "%s(%d) active #%d\n", __func__, port->line, s->active_rx);
1334 1334
1335 spin_lock_irqsave(&port->lock, flags); 1335 spin_lock_irqsave(&port->lock, flags);
1336 1336
1337 count = sci_dma_rx_push(s, s->buf_len_rx); 1337 count = sci_dma_rx_push(s, s->buf_len_rx);
1338 1338
1339 mod_timer(&s->rx_timer, jiffies + s->rx_timeout); 1339 mod_timer(&s->rx_timer, jiffies + s->rx_timeout);
1340 1340
1341 spin_unlock_irqrestore(&port->lock, flags); 1341 spin_unlock_irqrestore(&port->lock, flags);
1342 1342
1343 if (count) 1343 if (count)
1344 tty_flip_buffer_push(&port->state->port); 1344 tty_flip_buffer_push(&port->state->port);
1345 1345
1346 schedule_work(&s->work_rx); 1346 schedule_work(&s->work_rx);
1347 } 1347 }
1348 1348
1349 static void sci_rx_dma_release(struct sci_port *s, bool enable_pio) 1349 static void sci_rx_dma_release(struct sci_port *s, bool enable_pio)
1350 { 1350 {
1351 struct dma_chan *chan = s->chan_rx; 1351 struct dma_chan *chan = s->chan_rx;
1352 struct uart_port *port = &s->port; 1352 struct uart_port *port = &s->port;
1353 1353
1354 s->chan_rx = NULL; 1354 s->chan_rx = NULL;
1355 s->cookie_rx[0] = s->cookie_rx[1] = -EINVAL; 1355 s->cookie_rx[0] = s->cookie_rx[1] = -EINVAL;
1356 dma_release_channel(chan); 1356 dma_release_channel(chan);
1357 if (sg_dma_address(&s->sg_rx[0])) 1357 if (sg_dma_address(&s->sg_rx[0]))
1358 dma_free_coherent(port->dev, s->buf_len_rx * 2, 1358 dma_free_coherent(port->dev, s->buf_len_rx * 2,
1359 sg_virt(&s->sg_rx[0]), sg_dma_address(&s->sg_rx[0])); 1359 sg_virt(&s->sg_rx[0]), sg_dma_address(&s->sg_rx[0]));
1360 if (enable_pio) 1360 if (enable_pio)
1361 sci_start_rx(port); 1361 sci_start_rx(port);
1362 } 1362 }
1363 1363
1364 static void sci_tx_dma_release(struct sci_port *s, bool enable_pio) 1364 static void sci_tx_dma_release(struct sci_port *s, bool enable_pio)
1365 { 1365 {
1366 struct dma_chan *chan = s->chan_tx; 1366 struct dma_chan *chan = s->chan_tx;
1367 struct uart_port *port = &s->port; 1367 struct uart_port *port = &s->port;
1368 1368
1369 s->chan_tx = NULL; 1369 s->chan_tx = NULL;
1370 s->cookie_tx = -EINVAL; 1370 s->cookie_tx = -EINVAL;
1371 dma_release_channel(chan); 1371 dma_release_channel(chan);
1372 if (enable_pio) 1372 if (enable_pio)
1373 sci_start_tx(port); 1373 sci_start_tx(port);
1374 } 1374 }
1375 1375
1376 static void sci_submit_rx(struct sci_port *s) 1376 static void sci_submit_rx(struct sci_port *s)
1377 { 1377 {
1378 struct dma_chan *chan = s->chan_rx; 1378 struct dma_chan *chan = s->chan_rx;
1379 int i; 1379 int i;
1380 1380
1381 for (i = 0; i < 2; i++) { 1381 for (i = 0; i < 2; i++) {
1382 struct scatterlist *sg = &s->sg_rx[i]; 1382 struct scatterlist *sg = &s->sg_rx[i];
1383 struct dma_async_tx_descriptor *desc; 1383 struct dma_async_tx_descriptor *desc;
1384 1384
1385 desc = dmaengine_prep_slave_sg(chan, 1385 desc = dmaengine_prep_slave_sg(chan,
1386 sg, 1, DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT); 1386 sg, 1, DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT);
1387 1387
1388 if (desc) { 1388 if (desc) {
1389 s->desc_rx[i] = desc; 1389 s->desc_rx[i] = desc;
1390 desc->callback = sci_dma_rx_complete; 1390 desc->callback = sci_dma_rx_complete;
1391 desc->callback_param = s; 1391 desc->callback_param = s;
1392 s->cookie_rx[i] = desc->tx_submit(desc); 1392 s->cookie_rx[i] = desc->tx_submit(desc);
1393 } 1393 }
1394 1394
1395 if (!desc || s->cookie_rx[i] < 0) { 1395 if (!desc || s->cookie_rx[i] < 0) {
1396 if (i) { 1396 if (i) {
1397 async_tx_ack(s->desc_rx[0]); 1397 async_tx_ack(s->desc_rx[0]);
1398 s->cookie_rx[0] = -EINVAL; 1398 s->cookie_rx[0] = -EINVAL;
1399 } 1399 }
1400 if (desc) { 1400 if (desc) {
1401 async_tx_ack(desc); 1401 async_tx_ack(desc);
1402 s->cookie_rx[i] = -EINVAL; 1402 s->cookie_rx[i] = -EINVAL;
1403 } 1403 }
1404 dev_warn(s->port.dev, 1404 dev_warn(s->port.dev,
1405 "failed to re-start DMA, using PIO\n"); 1405 "failed to re-start DMA, using PIO\n");
1406 sci_rx_dma_release(s, true); 1406 sci_rx_dma_release(s, true);
1407 return; 1407 return;
1408 } 1408 }
1409 dev_dbg(s->port.dev, "%s(): cookie %d to #%d\n", __func__, 1409 dev_dbg(s->port.dev, "%s(): cookie %d to #%d\n", __func__,
1410 s->cookie_rx[i], i); 1410 s->cookie_rx[i], i);
1411 } 1411 }
1412 1412
1413 s->active_rx = s->cookie_rx[0]; 1413 s->active_rx = s->cookie_rx[0];
1414 1414
1415 dma_async_issue_pending(chan); 1415 dma_async_issue_pending(chan);
1416 } 1416 }
1417 1417
1418 static void work_fn_rx(struct work_struct *work) 1418 static void work_fn_rx(struct work_struct *work)
1419 { 1419 {
1420 struct sci_port *s = container_of(work, struct sci_port, work_rx); 1420 struct sci_port *s = container_of(work, struct sci_port, work_rx);
1421 struct uart_port *port = &s->port; 1421 struct uart_port *port = &s->port;
1422 struct dma_async_tx_descriptor *desc; 1422 struct dma_async_tx_descriptor *desc;
1423 int new; 1423 int new;
1424 1424
1425 if (s->active_rx == s->cookie_rx[0]) { 1425 if (s->active_rx == s->cookie_rx[0]) {
1426 new = 0; 1426 new = 0;
1427 } else if (s->active_rx == s->cookie_rx[1]) { 1427 } else if (s->active_rx == s->cookie_rx[1]) {
1428 new = 1; 1428 new = 1;
1429 } else { 1429 } else {
1430 dev_err(port->dev, "cookie %d not found!\n", s->active_rx); 1430 dev_err(port->dev, "cookie %d not found!\n", s->active_rx);
1431 return; 1431 return;
1432 } 1432 }
1433 desc = s->desc_rx[new]; 1433 desc = s->desc_rx[new];
1434 1434
1435 if (dma_async_is_tx_complete(s->chan_rx, s->active_rx, NULL, NULL) != 1435 if (dma_async_is_tx_complete(s->chan_rx, s->active_rx, NULL, NULL) !=
1436 DMA_SUCCESS) { 1436 DMA_COMPLETE) {
1437 /* Handle incomplete DMA receive */ 1437 /* Handle incomplete DMA receive */
1438 struct dma_chan *chan = s->chan_rx; 1438 struct dma_chan *chan = s->chan_rx;
1439 struct shdma_desc *sh_desc = container_of(desc, 1439 struct shdma_desc *sh_desc = container_of(desc,
1440 struct shdma_desc, async_tx); 1440 struct shdma_desc, async_tx);
1441 unsigned long flags; 1441 unsigned long flags;
1442 int count; 1442 int count;
1443 1443
1444 chan->device->device_control(chan, DMA_TERMINATE_ALL, 0); 1444 chan->device->device_control(chan, DMA_TERMINATE_ALL, 0);
1445 dev_dbg(port->dev, "Read %u bytes with cookie %d\n", 1445 dev_dbg(port->dev, "Read %u bytes with cookie %d\n",
1446 sh_desc->partial, sh_desc->cookie); 1446 sh_desc->partial, sh_desc->cookie);
1447 1447
1448 spin_lock_irqsave(&port->lock, flags); 1448 spin_lock_irqsave(&port->lock, flags);
1449 count = sci_dma_rx_push(s, sh_desc->partial); 1449 count = sci_dma_rx_push(s, sh_desc->partial);
1450 spin_unlock_irqrestore(&port->lock, flags); 1450 spin_unlock_irqrestore(&port->lock, flags);
1451 1451
1452 if (count) 1452 if (count)
1453 tty_flip_buffer_push(&port->state->port); 1453 tty_flip_buffer_push(&port->state->port);
1454 1454
1455 sci_submit_rx(s); 1455 sci_submit_rx(s);
1456 1456
1457 return; 1457 return;
1458 } 1458 }
1459 1459
1460 s->cookie_rx[new] = desc->tx_submit(desc); 1460 s->cookie_rx[new] = desc->tx_submit(desc);
1461 if (s->cookie_rx[new] < 0) { 1461 if (s->cookie_rx[new] < 0) {
1462 dev_warn(port->dev, "Failed submitting Rx DMA descriptor\n"); 1462 dev_warn(port->dev, "Failed submitting Rx DMA descriptor\n");
1463 sci_rx_dma_release(s, true); 1463 sci_rx_dma_release(s, true);
1464 return; 1464 return;
1465 } 1465 }
1466 1466
1467 s->active_rx = s->cookie_rx[!new]; 1467 s->active_rx = s->cookie_rx[!new];
1468 1468
1469 dev_dbg(port->dev, "%s: cookie %d #%d, new active #%d\n", __func__, 1469 dev_dbg(port->dev, "%s: cookie %d #%d, new active #%d\n", __func__,
1470 s->cookie_rx[new], new, s->active_rx); 1470 s->cookie_rx[new], new, s->active_rx);
1471 } 1471 }
1472 1472
1473 static void work_fn_tx(struct work_struct *work) 1473 static void work_fn_tx(struct work_struct *work)
1474 { 1474 {
1475 struct sci_port *s = container_of(work, struct sci_port, work_tx); 1475 struct sci_port *s = container_of(work, struct sci_port, work_tx);
1476 struct dma_async_tx_descriptor *desc; 1476 struct dma_async_tx_descriptor *desc;
1477 struct dma_chan *chan = s->chan_tx; 1477 struct dma_chan *chan = s->chan_tx;
1478 struct uart_port *port = &s->port; 1478 struct uart_port *port = &s->port;
1479 struct circ_buf *xmit = &port->state->xmit; 1479 struct circ_buf *xmit = &port->state->xmit;
1480 struct scatterlist *sg = &s->sg_tx; 1480 struct scatterlist *sg = &s->sg_tx;
1481 1481
1482 /* 1482 /*
1483 * DMA is idle now. 1483 * DMA is idle now.
1484 * Port xmit buffer is already mapped, and it is one page... Just adjust 1484 * Port xmit buffer is already mapped, and it is one page... Just adjust
1485 * offsets and lengths. Since it is a circular buffer, we have to 1485 * offsets and lengths. Since it is a circular buffer, we have to
1486 * transmit till the end, and then the rest. Take the port lock to get a 1486 * transmit till the end, and then the rest. Take the port lock to get a
1487 * consistent xmit buffer state. 1487 * consistent xmit buffer state.
1488 */ 1488 */
1489 spin_lock_irq(&port->lock); 1489 spin_lock_irq(&port->lock);
1490 sg->offset = xmit->tail & (UART_XMIT_SIZE - 1); 1490 sg->offset = xmit->tail & (UART_XMIT_SIZE - 1);
1491 sg_dma_address(sg) = (sg_dma_address(sg) & ~(UART_XMIT_SIZE - 1)) + 1491 sg_dma_address(sg) = (sg_dma_address(sg) & ~(UART_XMIT_SIZE - 1)) +
1492 sg->offset; 1492 sg->offset;
1493 sg_dma_len(sg) = min((int)CIRC_CNT(xmit->head, xmit->tail, UART_XMIT_SIZE), 1493 sg_dma_len(sg) = min((int)CIRC_CNT(xmit->head, xmit->tail, UART_XMIT_SIZE),
1494 CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE)); 1494 CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE));
1495 spin_unlock_irq(&port->lock); 1495 spin_unlock_irq(&port->lock);
1496 1496
1497 BUG_ON(!sg_dma_len(sg)); 1497 BUG_ON(!sg_dma_len(sg));
1498 1498
1499 desc = dmaengine_prep_slave_sg(chan, 1499 desc = dmaengine_prep_slave_sg(chan,
1500 sg, s->sg_len_tx, DMA_MEM_TO_DEV, 1500 sg, s->sg_len_tx, DMA_MEM_TO_DEV,
1501 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 1501 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1502 if (!desc) { 1502 if (!desc) {
1503 /* switch to PIO */ 1503 /* switch to PIO */
1504 sci_tx_dma_release(s, true); 1504 sci_tx_dma_release(s, true);
1505 return; 1505 return;
1506 } 1506 }
1507 1507
1508 dma_sync_sg_for_device(port->dev, sg, 1, DMA_TO_DEVICE); 1508 dma_sync_sg_for_device(port->dev, sg, 1, DMA_TO_DEVICE);
1509 1509
1510 spin_lock_irq(&port->lock); 1510 spin_lock_irq(&port->lock);
1511 s->desc_tx = desc; 1511 s->desc_tx = desc;
1512 desc->callback = sci_dma_tx_complete; 1512 desc->callback = sci_dma_tx_complete;
1513 desc->callback_param = s; 1513 desc->callback_param = s;
1514 spin_unlock_irq(&port->lock); 1514 spin_unlock_irq(&port->lock);
1515 s->cookie_tx = desc->tx_submit(desc); 1515 s->cookie_tx = desc->tx_submit(desc);
1516 if (s->cookie_tx < 0) { 1516 if (s->cookie_tx < 0) {
1517 dev_warn(port->dev, "Failed submitting Tx DMA descriptor\n"); 1517 dev_warn(port->dev, "Failed submitting Tx DMA descriptor\n");
1518 /* switch to PIO */ 1518 /* switch to PIO */
1519 sci_tx_dma_release(s, true); 1519 sci_tx_dma_release(s, true);
1520 return; 1520 return;
1521 } 1521 }
1522 1522
1523 dev_dbg(port->dev, "%s: %p: %d...%d, cookie %d\n", __func__, 1523 dev_dbg(port->dev, "%s: %p: %d...%d, cookie %d\n", __func__,
1524 xmit->buf, xmit->tail, xmit->head, s->cookie_tx); 1524 xmit->buf, xmit->tail, xmit->head, s->cookie_tx);
1525 1525
1526 dma_async_issue_pending(chan); 1526 dma_async_issue_pending(chan);
1527 } 1527 }
1528 #endif 1528 #endif
1529 1529
1530 static void sci_start_tx(struct uart_port *port) 1530 static void sci_start_tx(struct uart_port *port)
1531 { 1531 {
1532 struct sci_port *s = to_sci_port(port); 1532 struct sci_port *s = to_sci_port(port);
1533 unsigned short ctrl; 1533 unsigned short ctrl;
1534 1534
1535 #ifdef CONFIG_SERIAL_SH_SCI_DMA 1535 #ifdef CONFIG_SERIAL_SH_SCI_DMA
1536 if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) { 1536 if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
1537 u16 new, scr = serial_port_in(port, SCSCR); 1537 u16 new, scr = serial_port_in(port, SCSCR);
1538 if (s->chan_tx) 1538 if (s->chan_tx)
1539 new = scr | 0x8000; 1539 new = scr | 0x8000;
1540 else 1540 else
1541 new = scr & ~0x8000; 1541 new = scr & ~0x8000;
1542 if (new != scr) 1542 if (new != scr)
1543 serial_port_out(port, SCSCR, new); 1543 serial_port_out(port, SCSCR, new);
1544 } 1544 }
1545 1545
1546 if (s->chan_tx && !uart_circ_empty(&s->port.state->xmit) && 1546 if (s->chan_tx && !uart_circ_empty(&s->port.state->xmit) &&
1547 s->cookie_tx < 0) { 1547 s->cookie_tx < 0) {
1548 s->cookie_tx = 0; 1548 s->cookie_tx = 0;
1549 schedule_work(&s->work_tx); 1549 schedule_work(&s->work_tx);
1550 } 1550 }
1551 #endif 1551 #endif
1552 1552
1553 if (!s->chan_tx || port->type == PORT_SCIFA || port->type == PORT_SCIFB) { 1553 if (!s->chan_tx || port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
1554 /* Set TIE (Transmit Interrupt Enable) bit in SCSCR */ 1554 /* Set TIE (Transmit Interrupt Enable) bit in SCSCR */
1555 ctrl = serial_port_in(port, SCSCR); 1555 ctrl = serial_port_in(port, SCSCR);
1556 serial_port_out(port, SCSCR, ctrl | SCSCR_TIE); 1556 serial_port_out(port, SCSCR, ctrl | SCSCR_TIE);
1557 } 1557 }
1558 } 1558 }
1559 1559
1560 static void sci_stop_tx(struct uart_port *port) 1560 static void sci_stop_tx(struct uart_port *port)
1561 { 1561 {
1562 unsigned short ctrl; 1562 unsigned short ctrl;
1563 1563
1564 /* Clear TIE (Transmit Interrupt Enable) bit in SCSCR */ 1564 /* Clear TIE (Transmit Interrupt Enable) bit in SCSCR */
1565 ctrl = serial_port_in(port, SCSCR); 1565 ctrl = serial_port_in(port, SCSCR);
1566 1566
1567 if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) 1567 if (port->type == PORT_SCIFA || port->type == PORT_SCIFB)
1568 ctrl &= ~0x8000; 1568 ctrl &= ~0x8000;
1569 1569
1570 ctrl &= ~SCSCR_TIE; 1570 ctrl &= ~SCSCR_TIE;
1571 1571
1572 serial_port_out(port, SCSCR, ctrl); 1572 serial_port_out(port, SCSCR, ctrl);
1573 } 1573 }
1574 1574
1575 static void sci_start_rx(struct uart_port *port) 1575 static void sci_start_rx(struct uart_port *port)
1576 { 1576 {
1577 unsigned short ctrl; 1577 unsigned short ctrl;
1578 1578
1579 ctrl = serial_port_in(port, SCSCR) | port_rx_irq_mask(port); 1579 ctrl = serial_port_in(port, SCSCR) | port_rx_irq_mask(port);
1580 1580
1581 if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) 1581 if (port->type == PORT_SCIFA || port->type == PORT_SCIFB)
1582 ctrl &= ~0x4000; 1582 ctrl &= ~0x4000;
1583 1583
1584 serial_port_out(port, SCSCR, ctrl); 1584 serial_port_out(port, SCSCR, ctrl);
1585 } 1585 }
1586 1586
1587 static void sci_stop_rx(struct uart_port *port) 1587 static void sci_stop_rx(struct uart_port *port)
1588 { 1588 {
1589 unsigned short ctrl; 1589 unsigned short ctrl;
1590 1590
1591 ctrl = serial_port_in(port, SCSCR); 1591 ctrl = serial_port_in(port, SCSCR);
1592 1592
1593 if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) 1593 if (port->type == PORT_SCIFA || port->type == PORT_SCIFB)
1594 ctrl &= ~0x4000; 1594 ctrl &= ~0x4000;
1595 1595
1596 ctrl &= ~port_rx_irq_mask(port); 1596 ctrl &= ~port_rx_irq_mask(port);
1597 1597
1598 serial_port_out(port, SCSCR, ctrl); 1598 serial_port_out(port, SCSCR, ctrl);
1599 } 1599 }
1600 1600
1601 static void sci_enable_ms(struct uart_port *port) 1601 static void sci_enable_ms(struct uart_port *port)
1602 { 1602 {
1603 /* 1603 /*
1604 * Not supported by hardware, always a nop. 1604 * Not supported by hardware, always a nop.
1605 */ 1605 */
1606 } 1606 }
1607 1607
1608 static void sci_break_ctl(struct uart_port *port, int break_state) 1608 static void sci_break_ctl(struct uart_port *port, int break_state)
1609 { 1609 {
1610 struct sci_port *s = to_sci_port(port); 1610 struct sci_port *s = to_sci_port(port);
1611 struct plat_sci_reg *reg = sci_regmap[s->cfg->regtype] + SCSPTR; 1611 struct plat_sci_reg *reg = sci_regmap[s->cfg->regtype] + SCSPTR;
1612 unsigned short scscr, scsptr; 1612 unsigned short scscr, scsptr;
1613 1613
1614 /* check wheter the port has SCSPTR */ 1614 /* check wheter the port has SCSPTR */
1615 if (!reg->size) { 1615 if (!reg->size) {
1616 /* 1616 /*
1617 * Not supported by hardware. Most parts couple break and rx 1617 * Not supported by hardware. Most parts couple break and rx
1618 * interrupts together, with break detection always enabled. 1618 * interrupts together, with break detection always enabled.
1619 */ 1619 */
1620 return; 1620 return;
1621 } 1621 }
1622 1622
1623 scsptr = serial_port_in(port, SCSPTR); 1623 scsptr = serial_port_in(port, SCSPTR);
1624 scscr = serial_port_in(port, SCSCR); 1624 scscr = serial_port_in(port, SCSCR);
1625 1625
1626 if (break_state == -1) { 1626 if (break_state == -1) {
1627 scsptr = (scsptr | SCSPTR_SPB2IO) & ~SCSPTR_SPB2DT; 1627 scsptr = (scsptr | SCSPTR_SPB2IO) & ~SCSPTR_SPB2DT;
1628 scscr &= ~SCSCR_TE; 1628 scscr &= ~SCSCR_TE;
1629 } else { 1629 } else {
1630 scsptr = (scsptr | SCSPTR_SPB2DT) & ~SCSPTR_SPB2IO; 1630 scsptr = (scsptr | SCSPTR_SPB2DT) & ~SCSPTR_SPB2IO;
1631 scscr |= SCSCR_TE; 1631 scscr |= SCSCR_TE;
1632 } 1632 }
1633 1633
1634 serial_port_out(port, SCSPTR, scsptr); 1634 serial_port_out(port, SCSPTR, scsptr);
1635 serial_port_out(port, SCSCR, scscr); 1635 serial_port_out(port, SCSCR, scscr);
1636 } 1636 }
1637 1637
1638 #ifdef CONFIG_SERIAL_SH_SCI_DMA 1638 #ifdef CONFIG_SERIAL_SH_SCI_DMA
1639 static bool filter(struct dma_chan *chan, void *slave) 1639 static bool filter(struct dma_chan *chan, void *slave)
1640 { 1640 {
1641 struct sh_dmae_slave *param = slave; 1641 struct sh_dmae_slave *param = slave;
1642 1642
1643 dev_dbg(chan->device->dev, "%s: slave ID %d\n", __func__, 1643 dev_dbg(chan->device->dev, "%s: slave ID %d\n", __func__,
1644 param->shdma_slave.slave_id); 1644 param->shdma_slave.slave_id);
1645 1645
1646 chan->private = &param->shdma_slave; 1646 chan->private = &param->shdma_slave;
1647 return true; 1647 return true;
1648 } 1648 }
1649 1649
1650 static void rx_timer_fn(unsigned long arg) 1650 static void rx_timer_fn(unsigned long arg)
1651 { 1651 {
1652 struct sci_port *s = (struct sci_port *)arg; 1652 struct sci_port *s = (struct sci_port *)arg;
1653 struct uart_port *port = &s->port; 1653 struct uart_port *port = &s->port;
1654 u16 scr = serial_port_in(port, SCSCR); 1654 u16 scr = serial_port_in(port, SCSCR);
1655 1655
1656 if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) { 1656 if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
1657 scr &= ~0x4000; 1657 scr &= ~0x4000;
1658 enable_irq(s->cfg->irqs[1]); 1658 enable_irq(s->cfg->irqs[1]);
1659 } 1659 }
1660 serial_port_out(port, SCSCR, scr | SCSCR_RIE); 1660 serial_port_out(port, SCSCR, scr | SCSCR_RIE);
1661 dev_dbg(port->dev, "DMA Rx timed out\n"); 1661 dev_dbg(port->dev, "DMA Rx timed out\n");
1662 schedule_work(&s->work_rx); 1662 schedule_work(&s->work_rx);
1663 } 1663 }
1664 1664
1665 static void sci_request_dma(struct uart_port *port) 1665 static void sci_request_dma(struct uart_port *port)
1666 { 1666 {
1667 struct sci_port *s = to_sci_port(port); 1667 struct sci_port *s = to_sci_port(port);
1668 struct sh_dmae_slave *param; 1668 struct sh_dmae_slave *param;
1669 struct dma_chan *chan; 1669 struct dma_chan *chan;
1670 dma_cap_mask_t mask; 1670 dma_cap_mask_t mask;
1671 int nent; 1671 int nent;
1672 1672
1673 dev_dbg(port->dev, "%s: port %d\n", __func__, 1673 dev_dbg(port->dev, "%s: port %d\n", __func__,
1674 port->line); 1674 port->line);
1675 1675
1676 if (s->cfg->dma_slave_tx <= 0 || s->cfg->dma_slave_rx <= 0) 1676 if (s->cfg->dma_slave_tx <= 0 || s->cfg->dma_slave_rx <= 0)
1677 return; 1677 return;
1678 1678
1679 dma_cap_zero(mask); 1679 dma_cap_zero(mask);
1680 dma_cap_set(DMA_SLAVE, mask); 1680 dma_cap_set(DMA_SLAVE, mask);
1681 1681
1682 param = &s->param_tx; 1682 param = &s->param_tx;
1683 1683
1684 /* Slave ID, e.g., SHDMA_SLAVE_SCIF0_TX */ 1684 /* Slave ID, e.g., SHDMA_SLAVE_SCIF0_TX */
1685 param->shdma_slave.slave_id = s->cfg->dma_slave_tx; 1685 param->shdma_slave.slave_id = s->cfg->dma_slave_tx;
1686 1686
1687 s->cookie_tx = -EINVAL; 1687 s->cookie_tx = -EINVAL;
1688 chan = dma_request_channel(mask, filter, param); 1688 chan = dma_request_channel(mask, filter, param);
1689 dev_dbg(port->dev, "%s: TX: got channel %p\n", __func__, chan); 1689 dev_dbg(port->dev, "%s: TX: got channel %p\n", __func__, chan);
1690 if (chan) { 1690 if (chan) {
1691 s->chan_tx = chan; 1691 s->chan_tx = chan;
1692 sg_init_table(&s->sg_tx, 1); 1692 sg_init_table(&s->sg_tx, 1);
1693 /* UART circular tx buffer is an aligned page. */ 1693 /* UART circular tx buffer is an aligned page. */
1694 BUG_ON((int)port->state->xmit.buf & ~PAGE_MASK); 1694 BUG_ON((int)port->state->xmit.buf & ~PAGE_MASK);
1695 sg_set_page(&s->sg_tx, virt_to_page(port->state->xmit.buf), 1695 sg_set_page(&s->sg_tx, virt_to_page(port->state->xmit.buf),
1696 UART_XMIT_SIZE, (int)port->state->xmit.buf & ~PAGE_MASK); 1696 UART_XMIT_SIZE, (int)port->state->xmit.buf & ~PAGE_MASK);
1697 nent = dma_map_sg(port->dev, &s->sg_tx, 1, DMA_TO_DEVICE); 1697 nent = dma_map_sg(port->dev, &s->sg_tx, 1, DMA_TO_DEVICE);
1698 if (!nent) 1698 if (!nent)
1699 sci_tx_dma_release(s, false); 1699 sci_tx_dma_release(s, false);
1700 else 1700 else
1701 dev_dbg(port->dev, "%s: mapped %d@%p to %x\n", __func__, 1701 dev_dbg(port->dev, "%s: mapped %d@%p to %x\n", __func__,
1702 sg_dma_len(&s->sg_tx), 1702 sg_dma_len(&s->sg_tx),
1703 port->state->xmit.buf, sg_dma_address(&s->sg_tx)); 1703 port->state->xmit.buf, sg_dma_address(&s->sg_tx));
1704 1704
1705 s->sg_len_tx = nent; 1705 s->sg_len_tx = nent;
1706 1706
1707 INIT_WORK(&s->work_tx, work_fn_tx); 1707 INIT_WORK(&s->work_tx, work_fn_tx);
1708 } 1708 }
1709 1709
1710 param = &s->param_rx; 1710 param = &s->param_rx;
1711 1711
1712 /* Slave ID, e.g., SHDMA_SLAVE_SCIF0_RX */ 1712 /* Slave ID, e.g., SHDMA_SLAVE_SCIF0_RX */
1713 param->shdma_slave.slave_id = s->cfg->dma_slave_rx; 1713 param->shdma_slave.slave_id = s->cfg->dma_slave_rx;
1714 1714
1715 chan = dma_request_channel(mask, filter, param); 1715 chan = dma_request_channel(mask, filter, param);
1716 dev_dbg(port->dev, "%s: RX: got channel %p\n", __func__, chan); 1716 dev_dbg(port->dev, "%s: RX: got channel %p\n", __func__, chan);
1717 if (chan) { 1717 if (chan) {
1718 dma_addr_t dma[2]; 1718 dma_addr_t dma[2];
1719 void *buf[2]; 1719 void *buf[2];
1720 int i; 1720 int i;
1721 1721
1722 s->chan_rx = chan; 1722 s->chan_rx = chan;
1723 1723
1724 s->buf_len_rx = 2 * max(16, (int)port->fifosize); 1724 s->buf_len_rx = 2 * max(16, (int)port->fifosize);
1725 buf[0] = dma_alloc_coherent(port->dev, s->buf_len_rx * 2, 1725 buf[0] = dma_alloc_coherent(port->dev, s->buf_len_rx * 2,
1726 &dma[0], GFP_KERNEL); 1726 &dma[0], GFP_KERNEL);
1727 1727
1728 if (!buf[0]) { 1728 if (!buf[0]) {
1729 dev_warn(port->dev, 1729 dev_warn(port->dev,
1730 "failed to allocate dma buffer, using PIO\n"); 1730 "failed to allocate dma buffer, using PIO\n");
1731 sci_rx_dma_release(s, true); 1731 sci_rx_dma_release(s, true);
1732 return; 1732 return;
1733 } 1733 }
1734 1734
1735 buf[1] = buf[0] + s->buf_len_rx; 1735 buf[1] = buf[0] + s->buf_len_rx;
1736 dma[1] = dma[0] + s->buf_len_rx; 1736 dma[1] = dma[0] + s->buf_len_rx;
1737 1737
1738 for (i = 0; i < 2; i++) { 1738 for (i = 0; i < 2; i++) {
1739 struct scatterlist *sg = &s->sg_rx[i]; 1739 struct scatterlist *sg = &s->sg_rx[i];
1740 1740
1741 sg_init_table(sg, 1); 1741 sg_init_table(sg, 1);
1742 sg_set_page(sg, virt_to_page(buf[i]), s->buf_len_rx, 1742 sg_set_page(sg, virt_to_page(buf[i]), s->buf_len_rx,
1743 (int)buf[i] & ~PAGE_MASK); 1743 (int)buf[i] & ~PAGE_MASK);
1744 sg_dma_address(sg) = dma[i]; 1744 sg_dma_address(sg) = dma[i];
1745 } 1745 }
1746 1746
1747 INIT_WORK(&s->work_rx, work_fn_rx); 1747 INIT_WORK(&s->work_rx, work_fn_rx);
1748 setup_timer(&s->rx_timer, rx_timer_fn, (unsigned long)s); 1748 setup_timer(&s->rx_timer, rx_timer_fn, (unsigned long)s);
1749 1749
1750 sci_submit_rx(s); 1750 sci_submit_rx(s);
1751 } 1751 }
1752 } 1752 }
1753 1753
1754 static void sci_free_dma(struct uart_port *port) 1754 static void sci_free_dma(struct uart_port *port)
1755 { 1755 {
1756 struct sci_port *s = to_sci_port(port); 1756 struct sci_port *s = to_sci_port(port);
1757 1757
1758 if (s->chan_tx) 1758 if (s->chan_tx)
1759 sci_tx_dma_release(s, false); 1759 sci_tx_dma_release(s, false);
1760 if (s->chan_rx) 1760 if (s->chan_rx)
1761 sci_rx_dma_release(s, false); 1761 sci_rx_dma_release(s, false);
1762 } 1762 }
1763 #else 1763 #else
1764 static inline void sci_request_dma(struct uart_port *port) 1764 static inline void sci_request_dma(struct uart_port *port)
1765 { 1765 {
1766 } 1766 }
1767 1767
1768 static inline void sci_free_dma(struct uart_port *port) 1768 static inline void sci_free_dma(struct uart_port *port)
1769 { 1769 {
1770 } 1770 }
1771 #endif 1771 #endif
1772 1772
1773 static int sci_startup(struct uart_port *port) 1773 static int sci_startup(struct uart_port *port)
1774 { 1774 {
1775 struct sci_port *s = to_sci_port(port); 1775 struct sci_port *s = to_sci_port(port);
1776 unsigned long flags; 1776 unsigned long flags;
1777 int ret; 1777 int ret;
1778 1778
1779 dev_dbg(port->dev, "%s(%d)\n", __func__, port->line); 1779 dev_dbg(port->dev, "%s(%d)\n", __func__, port->line);
1780 1780
1781 ret = sci_request_irq(s); 1781 ret = sci_request_irq(s);
1782 if (unlikely(ret < 0)) 1782 if (unlikely(ret < 0))
1783 return ret; 1783 return ret;
1784 1784
1785 sci_request_dma(port); 1785 sci_request_dma(port);
1786 1786
1787 spin_lock_irqsave(&port->lock, flags); 1787 spin_lock_irqsave(&port->lock, flags);
1788 sci_start_tx(port); 1788 sci_start_tx(port);
1789 sci_start_rx(port); 1789 sci_start_rx(port);
1790 spin_unlock_irqrestore(&port->lock, flags); 1790 spin_unlock_irqrestore(&port->lock, flags);
1791 1791
1792 return 0; 1792 return 0;
1793 } 1793 }
1794 1794
1795 static void sci_shutdown(struct uart_port *port) 1795 static void sci_shutdown(struct uart_port *port)
1796 { 1796 {
1797 struct sci_port *s = to_sci_port(port); 1797 struct sci_port *s = to_sci_port(port);
1798 unsigned long flags; 1798 unsigned long flags;
1799 1799
1800 dev_dbg(port->dev, "%s(%d)\n", __func__, port->line); 1800 dev_dbg(port->dev, "%s(%d)\n", __func__, port->line);
1801 1801
1802 spin_lock_irqsave(&port->lock, flags); 1802 spin_lock_irqsave(&port->lock, flags);
1803 sci_stop_rx(port); 1803 sci_stop_rx(port);
1804 sci_stop_tx(port); 1804 sci_stop_tx(port);
1805 spin_unlock_irqrestore(&port->lock, flags); 1805 spin_unlock_irqrestore(&port->lock, flags);
1806 1806
1807 sci_free_dma(port); 1807 sci_free_dma(port);
1808 sci_free_irq(s); 1808 sci_free_irq(s);
1809 } 1809 }
1810 1810
1811 static unsigned int sci_scbrr_calc(unsigned int algo_id, unsigned int bps, 1811 static unsigned int sci_scbrr_calc(unsigned int algo_id, unsigned int bps,
1812 unsigned long freq) 1812 unsigned long freq)
1813 { 1813 {
1814 switch (algo_id) { 1814 switch (algo_id) {
1815 case SCBRR_ALGO_1: 1815 case SCBRR_ALGO_1:
1816 return ((freq + 16 * bps) / (16 * bps) - 1); 1816 return ((freq + 16 * bps) / (16 * bps) - 1);
1817 case SCBRR_ALGO_2: 1817 case SCBRR_ALGO_2:
1818 return ((freq + 16 * bps) / (32 * bps) - 1); 1818 return ((freq + 16 * bps) / (32 * bps) - 1);
1819 case SCBRR_ALGO_3: 1819 case SCBRR_ALGO_3:
1820 return (((freq * 2) + 16 * bps) / (16 * bps) - 1); 1820 return (((freq * 2) + 16 * bps) / (16 * bps) - 1);
1821 case SCBRR_ALGO_4: 1821 case SCBRR_ALGO_4:
1822 return (((freq * 2) + 16 * bps) / (32 * bps) - 1); 1822 return (((freq * 2) + 16 * bps) / (32 * bps) - 1);
1823 case SCBRR_ALGO_5: 1823 case SCBRR_ALGO_5:
1824 return (((freq * 1000 / 32) / bps) - 1); 1824 return (((freq * 1000 / 32) / bps) - 1);
1825 } 1825 }
1826 1826
1827 /* Warn, but use a safe default */ 1827 /* Warn, but use a safe default */
1828 WARN_ON(1); 1828 WARN_ON(1);
1829 1829
1830 return ((freq + 16 * bps) / (32 * bps) - 1); 1830 return ((freq + 16 * bps) / (32 * bps) - 1);
1831 } 1831 }
1832 1832
1833 /* calculate sample rate, BRR, and clock select for HSCIF */ 1833 /* calculate sample rate, BRR, and clock select for HSCIF */
1834 static void sci_baud_calc_hscif(unsigned int bps, unsigned long freq, 1834 static void sci_baud_calc_hscif(unsigned int bps, unsigned long freq,
1835 int *brr, unsigned int *srr, 1835 int *brr, unsigned int *srr,
1836 unsigned int *cks) 1836 unsigned int *cks)
1837 { 1837 {
1838 int sr, c, br, err; 1838 int sr, c, br, err;
1839 int min_err = 1000; /* 100% */ 1839 int min_err = 1000; /* 100% */
1840 1840
1841 /* Find the combination of sample rate and clock select with the 1841 /* Find the combination of sample rate and clock select with the
1842 smallest deviation from the desired baud rate. */ 1842 smallest deviation from the desired baud rate. */
1843 for (sr = 8; sr <= 32; sr++) { 1843 for (sr = 8; sr <= 32; sr++) {
1844 for (c = 0; c <= 3; c++) { 1844 for (c = 0; c <= 3; c++) {
1845 /* integerized formulas from HSCIF documentation */ 1845 /* integerized formulas from HSCIF documentation */
1846 br = freq / (sr * (1 << (2 * c + 1)) * bps) - 1; 1846 br = freq / (sr * (1 << (2 * c + 1)) * bps) - 1;
1847 if (br < 0 || br > 255) 1847 if (br < 0 || br > 255)
1848 continue; 1848 continue;
1849 err = freq / ((br + 1) * bps * sr * 1849 err = freq / ((br + 1) * bps * sr *
1850 (1 << (2 * c + 1)) / 1000) - 1000; 1850 (1 << (2 * c + 1)) / 1000) - 1000;
1851 if (min_err > err) { 1851 if (min_err > err) {
1852 min_err = err; 1852 min_err = err;
1853 *brr = br; 1853 *brr = br;
1854 *srr = sr - 1; 1854 *srr = sr - 1;
1855 *cks = c; 1855 *cks = c;
1856 } 1856 }
1857 } 1857 }
1858 } 1858 }
1859 1859
1860 if (min_err == 1000) { 1860 if (min_err == 1000) {
1861 WARN_ON(1); 1861 WARN_ON(1);
1862 /* use defaults */ 1862 /* use defaults */
1863 *brr = 255; 1863 *brr = 255;
1864 *srr = 15; 1864 *srr = 15;
1865 *cks = 0; 1865 *cks = 0;
1866 } 1866 }
1867 } 1867 }
1868 1868
1869 static void sci_reset(struct uart_port *port) 1869 static void sci_reset(struct uart_port *port)
1870 { 1870 {
1871 struct plat_sci_reg *reg; 1871 struct plat_sci_reg *reg;
1872 unsigned int status; 1872 unsigned int status;
1873 1873
1874 do { 1874 do {
1875 status = serial_port_in(port, SCxSR); 1875 status = serial_port_in(port, SCxSR);
1876 } while (!(status & SCxSR_TEND(port))); 1876 } while (!(status & SCxSR_TEND(port)));
1877 1877
1878 serial_port_out(port, SCSCR, 0x00); /* TE=0, RE=0, CKE1=0 */ 1878 serial_port_out(port, SCSCR, 0x00); /* TE=0, RE=0, CKE1=0 */
1879 1879
1880 reg = sci_getreg(port, SCFCR); 1880 reg = sci_getreg(port, SCFCR);
1881 if (reg->size) 1881 if (reg->size)
1882 serial_port_out(port, SCFCR, SCFCR_RFRST | SCFCR_TFRST); 1882 serial_port_out(port, SCFCR, SCFCR_RFRST | SCFCR_TFRST);
1883 } 1883 }
1884 1884
1885 static void sci_set_termios(struct uart_port *port, struct ktermios *termios, 1885 static void sci_set_termios(struct uart_port *port, struct ktermios *termios,
1886 struct ktermios *old) 1886 struct ktermios *old)
1887 { 1887 {
1888 struct sci_port *s = to_sci_port(port); 1888 struct sci_port *s = to_sci_port(port);
1889 struct plat_sci_reg *reg; 1889 struct plat_sci_reg *reg;
1890 unsigned int baud, smr_val, max_baud, cks = 0; 1890 unsigned int baud, smr_val, max_baud, cks = 0;
1891 int t = -1; 1891 int t = -1;
1892 unsigned int srr = 15; 1892 unsigned int srr = 15;
1893 1893
1894 /* 1894 /*
1895 * earlyprintk comes here early on with port->uartclk set to zero. 1895 * earlyprintk comes here early on with port->uartclk set to zero.
1896 * the clock framework is not up and running at this point so here 1896 * the clock framework is not up and running at this point so here
1897 * we assume that 115200 is the maximum baud rate. please note that 1897 * we assume that 115200 is the maximum baud rate. please note that
1898 * the baud rate is not programmed during earlyprintk - it is assumed 1898 * the baud rate is not programmed during earlyprintk - it is assumed
1899 * that the previous boot loader has enabled required clocks and 1899 * that the previous boot loader has enabled required clocks and
1900 * setup the baud rate generator hardware for us already. 1900 * setup the baud rate generator hardware for us already.
1901 */ 1901 */
1902 max_baud = port->uartclk ? port->uartclk / 16 : 115200; 1902 max_baud = port->uartclk ? port->uartclk / 16 : 115200;
1903 1903
1904 baud = uart_get_baud_rate(port, termios, old, 0, max_baud); 1904 baud = uart_get_baud_rate(port, termios, old, 0, max_baud);
1905 if (likely(baud && port->uartclk)) { 1905 if (likely(baud && port->uartclk)) {
1906 if (s->cfg->scbrr_algo_id == SCBRR_ALGO_6) { 1906 if (s->cfg->scbrr_algo_id == SCBRR_ALGO_6) {
1907 sci_baud_calc_hscif(baud, port->uartclk, &t, &srr, 1907 sci_baud_calc_hscif(baud, port->uartclk, &t, &srr,
1908 &cks); 1908 &cks);
1909 } else { 1909 } else {
1910 t = sci_scbrr_calc(s->cfg->scbrr_algo_id, baud, 1910 t = sci_scbrr_calc(s->cfg->scbrr_algo_id, baud,
1911 port->uartclk); 1911 port->uartclk);
1912 for (cks = 0; t >= 256 && cks <= 3; cks++) 1912 for (cks = 0; t >= 256 && cks <= 3; cks++)
1913 t >>= 2; 1913 t >>= 2;
1914 } 1914 }
1915 } 1915 }
1916 1916
1917 sci_port_enable(s); 1917 sci_port_enable(s);
1918 1918
1919 sci_reset(port); 1919 sci_reset(port);
1920 1920
1921 smr_val = serial_port_in(port, SCSMR) & 3; 1921 smr_val = serial_port_in(port, SCSMR) & 3;
1922 1922
1923 if ((termios->c_cflag & CSIZE) == CS7) 1923 if ((termios->c_cflag & CSIZE) == CS7)
1924 smr_val |= 0x40; 1924 smr_val |= 0x40;
1925 if (termios->c_cflag & PARENB) 1925 if (termios->c_cflag & PARENB)
1926 smr_val |= 0x20; 1926 smr_val |= 0x20;
1927 if (termios->c_cflag & PARODD) 1927 if (termios->c_cflag & PARODD)
1928 smr_val |= 0x30; 1928 smr_val |= 0x30;
1929 if (termios->c_cflag & CSTOPB) 1929 if (termios->c_cflag & CSTOPB)
1930 smr_val |= 0x08; 1930 smr_val |= 0x08;
1931 1931
1932 uart_update_timeout(port, termios->c_cflag, baud); 1932 uart_update_timeout(port, termios->c_cflag, baud);
1933 1933
1934 dev_dbg(port->dev, "%s: SMR %x, cks %x, t %x, SCSCR %x\n", 1934 dev_dbg(port->dev, "%s: SMR %x, cks %x, t %x, SCSCR %x\n",
1935 __func__, smr_val, cks, t, s->cfg->scscr); 1935 __func__, smr_val, cks, t, s->cfg->scscr);
1936 1936
1937 if (t >= 0) { 1937 if (t >= 0) {
1938 serial_port_out(port, SCSMR, (smr_val & ~3) | cks); 1938 serial_port_out(port, SCSMR, (smr_val & ~3) | cks);
1939 serial_port_out(port, SCBRR, t); 1939 serial_port_out(port, SCBRR, t);
1940 reg = sci_getreg(port, HSSRR); 1940 reg = sci_getreg(port, HSSRR);
1941 if (reg->size) 1941 if (reg->size)
1942 serial_port_out(port, HSSRR, srr | HSCIF_SRE); 1942 serial_port_out(port, HSSRR, srr | HSCIF_SRE);
1943 udelay((1000000+(baud-1)) / baud); /* Wait one bit interval */ 1943 udelay((1000000+(baud-1)) / baud); /* Wait one bit interval */
1944 } else 1944 } else
1945 serial_port_out(port, SCSMR, smr_val); 1945 serial_port_out(port, SCSMR, smr_val);
1946 1946
1947 sci_init_pins(port, termios->c_cflag); 1947 sci_init_pins(port, termios->c_cflag);
1948 1948
1949 reg = sci_getreg(port, SCFCR); 1949 reg = sci_getreg(port, SCFCR);
1950 if (reg->size) { 1950 if (reg->size) {
1951 unsigned short ctrl = serial_port_in(port, SCFCR); 1951 unsigned short ctrl = serial_port_in(port, SCFCR);
1952 1952
1953 if (s->cfg->capabilities & SCIx_HAVE_RTSCTS) { 1953 if (s->cfg->capabilities & SCIx_HAVE_RTSCTS) {
1954 if (termios->c_cflag & CRTSCTS) 1954 if (termios->c_cflag & CRTSCTS)
1955 ctrl |= SCFCR_MCE; 1955 ctrl |= SCFCR_MCE;
1956 else 1956 else
1957 ctrl &= ~SCFCR_MCE; 1957 ctrl &= ~SCFCR_MCE;
1958 } 1958 }
1959 1959
1960 /* 1960 /*
1961 * As we've done a sci_reset() above, ensure we don't 1961 * As we've done a sci_reset() above, ensure we don't
1962 * interfere with the FIFOs while toggling MCE. As the 1962 * interfere with the FIFOs while toggling MCE. As the
1963 * reset values could still be set, simply mask them out. 1963 * reset values could still be set, simply mask them out.
1964 */ 1964 */
1965 ctrl &= ~(SCFCR_RFRST | SCFCR_TFRST); 1965 ctrl &= ~(SCFCR_RFRST | SCFCR_TFRST);
1966 1966
1967 serial_port_out(port, SCFCR, ctrl); 1967 serial_port_out(port, SCFCR, ctrl);
1968 } 1968 }
1969 1969
1970 serial_port_out(port, SCSCR, s->cfg->scscr); 1970 serial_port_out(port, SCSCR, s->cfg->scscr);
1971 1971
1972 #ifdef CONFIG_SERIAL_SH_SCI_DMA 1972 #ifdef CONFIG_SERIAL_SH_SCI_DMA
1973 /* 1973 /*
1974 * Calculate delay for 1.5 DMA buffers: see 1974 * Calculate delay for 1.5 DMA buffers: see
1975 * drivers/serial/serial_core.c::uart_update_timeout(). With 10 bits 1975 * drivers/serial/serial_core.c::uart_update_timeout(). With 10 bits
1976 * (CS8), 250Hz, 115200 baud and 64 bytes FIFO, the above function 1976 * (CS8), 250Hz, 115200 baud and 64 bytes FIFO, the above function
1977 * calculates 1 jiffie for the data plus 5 jiffies for the "slop(e)." 1977 * calculates 1 jiffie for the data plus 5 jiffies for the "slop(e)."
1978 * Then below we calculate 3 jiffies (12ms) for 1.5 DMA buffers (3 FIFO 1978 * Then below we calculate 3 jiffies (12ms) for 1.5 DMA buffers (3 FIFO
1979 * sizes), but it has been found out experimentally, that this is not 1979 * sizes), but it has been found out experimentally, that this is not
1980 * enough: the driver too often needlessly runs on a DMA timeout. 20ms 1980 * enough: the driver too often needlessly runs on a DMA timeout. 20ms
1981 * as a minimum seem to work perfectly. 1981 * as a minimum seem to work perfectly.
1982 */ 1982 */
1983 if (s->chan_rx) { 1983 if (s->chan_rx) {
1984 s->rx_timeout = (port->timeout - HZ / 50) * s->buf_len_rx * 3 / 1984 s->rx_timeout = (port->timeout - HZ / 50) * s->buf_len_rx * 3 /
1985 port->fifosize / 2; 1985 port->fifosize / 2;
1986 dev_dbg(port->dev, 1986 dev_dbg(port->dev,
1987 "DMA Rx t-out %ums, tty t-out %u jiffies\n", 1987 "DMA Rx t-out %ums, tty t-out %u jiffies\n",
1988 s->rx_timeout * 1000 / HZ, port->timeout); 1988 s->rx_timeout * 1000 / HZ, port->timeout);
1989 if (s->rx_timeout < msecs_to_jiffies(20)) 1989 if (s->rx_timeout < msecs_to_jiffies(20))
1990 s->rx_timeout = msecs_to_jiffies(20); 1990 s->rx_timeout = msecs_to_jiffies(20);
1991 } 1991 }
1992 #endif 1992 #endif
1993 1993
1994 if ((termios->c_cflag & CREAD) != 0) 1994 if ((termios->c_cflag & CREAD) != 0)
1995 sci_start_rx(port); 1995 sci_start_rx(port);
1996 1996
1997 sci_port_disable(s); 1997 sci_port_disable(s);
1998 } 1998 }
1999 1999
2000 static void sci_pm(struct uart_port *port, unsigned int state, 2000 static void sci_pm(struct uart_port *port, unsigned int state,
2001 unsigned int oldstate) 2001 unsigned int oldstate)
2002 { 2002 {
2003 struct sci_port *sci_port = to_sci_port(port); 2003 struct sci_port *sci_port = to_sci_port(port);
2004 2004
2005 switch (state) { 2005 switch (state) {
2006 case 3: 2006 case 3:
2007 sci_port_disable(sci_port); 2007 sci_port_disable(sci_port);
2008 break; 2008 break;
2009 default: 2009 default:
2010 sci_port_enable(sci_port); 2010 sci_port_enable(sci_port);
2011 break; 2011 break;
2012 } 2012 }
2013 } 2013 }
2014 2014
2015 static const char *sci_type(struct uart_port *port) 2015 static const char *sci_type(struct uart_port *port)
2016 { 2016 {
2017 switch (port->type) { 2017 switch (port->type) {
2018 case PORT_IRDA: 2018 case PORT_IRDA:
2019 return "irda"; 2019 return "irda";
2020 case PORT_SCI: 2020 case PORT_SCI:
2021 return "sci"; 2021 return "sci";
2022 case PORT_SCIF: 2022 case PORT_SCIF:
2023 return "scif"; 2023 return "scif";
2024 case PORT_SCIFA: 2024 case PORT_SCIFA:
2025 return "scifa"; 2025 return "scifa";
2026 case PORT_SCIFB: 2026 case PORT_SCIFB:
2027 return "scifb"; 2027 return "scifb";
2028 case PORT_HSCIF: 2028 case PORT_HSCIF:
2029 return "hscif"; 2029 return "hscif";
2030 } 2030 }
2031 2031
2032 return NULL; 2032 return NULL;
2033 } 2033 }
2034 2034
2035 static inline unsigned long sci_port_size(struct uart_port *port) 2035 static inline unsigned long sci_port_size(struct uart_port *port)
2036 { 2036 {
2037 /* 2037 /*
2038 * Pick an arbitrary size that encapsulates all of the base 2038 * Pick an arbitrary size that encapsulates all of the base
2039 * registers by default. This can be optimized later, or derived 2039 * registers by default. This can be optimized later, or derived
2040 * from platform resource data at such a time that ports begin to 2040 * from platform resource data at such a time that ports begin to
2041 * behave more erratically. 2041 * behave more erratically.
2042 */ 2042 */
2043 if (port->type == PORT_HSCIF) 2043 if (port->type == PORT_HSCIF)
2044 return 96; 2044 return 96;
2045 else 2045 else
2046 return 64; 2046 return 64;
2047 } 2047 }
2048 2048
2049 static int sci_remap_port(struct uart_port *port) 2049 static int sci_remap_port(struct uart_port *port)
2050 { 2050 {
2051 unsigned long size = sci_port_size(port); 2051 unsigned long size = sci_port_size(port);
2052 2052
2053 /* 2053 /*
2054 * Nothing to do if there's already an established membase. 2054 * Nothing to do if there's already an established membase.
2055 */ 2055 */
2056 if (port->membase) 2056 if (port->membase)
2057 return 0; 2057 return 0;
2058 2058
2059 if (port->flags & UPF_IOREMAP) { 2059 if (port->flags & UPF_IOREMAP) {
2060 port->membase = ioremap_nocache(port->mapbase, size); 2060 port->membase = ioremap_nocache(port->mapbase, size);
2061 if (unlikely(!port->membase)) { 2061 if (unlikely(!port->membase)) {
2062 dev_err(port->dev, "can't remap port#%d\n", port->line); 2062 dev_err(port->dev, "can't remap port#%d\n", port->line);
2063 return -ENXIO; 2063 return -ENXIO;
2064 } 2064 }
2065 } else { 2065 } else {
2066 /* 2066 /*
2067 * For the simple (and majority of) cases where we don't 2067 * For the simple (and majority of) cases where we don't
2068 * need to do any remapping, just cast the cookie 2068 * need to do any remapping, just cast the cookie
2069 * directly. 2069 * directly.
2070 */ 2070 */
2071 port->membase = (void __iomem *)port->mapbase; 2071 port->membase = (void __iomem *)port->mapbase;
2072 } 2072 }
2073 2073
2074 return 0; 2074 return 0;
2075 } 2075 }
2076 2076
2077 static void sci_release_port(struct uart_port *port) 2077 static void sci_release_port(struct uart_port *port)
2078 { 2078 {
2079 if (port->flags & UPF_IOREMAP) { 2079 if (port->flags & UPF_IOREMAP) {
2080 iounmap(port->membase); 2080 iounmap(port->membase);
2081 port->membase = NULL; 2081 port->membase = NULL;
2082 } 2082 }
2083 2083
2084 release_mem_region(port->mapbase, sci_port_size(port)); 2084 release_mem_region(port->mapbase, sci_port_size(port));
2085 } 2085 }
2086 2086
2087 static int sci_request_port(struct uart_port *port) 2087 static int sci_request_port(struct uart_port *port)
2088 { 2088 {
2089 unsigned long size = sci_port_size(port); 2089 unsigned long size = sci_port_size(port);
2090 struct resource *res; 2090 struct resource *res;
2091 int ret; 2091 int ret;
2092 2092
2093 res = request_mem_region(port->mapbase, size, dev_name(port->dev)); 2093 res = request_mem_region(port->mapbase, size, dev_name(port->dev));
2094 if (unlikely(res == NULL)) 2094 if (unlikely(res == NULL))
2095 return -EBUSY; 2095 return -EBUSY;
2096 2096
2097 ret = sci_remap_port(port); 2097 ret = sci_remap_port(port);
2098 if (unlikely(ret != 0)) { 2098 if (unlikely(ret != 0)) {
2099 release_resource(res); 2099 release_resource(res);
2100 return ret; 2100 return ret;
2101 } 2101 }
2102 2102
2103 return 0; 2103 return 0;
2104 } 2104 }
2105 2105
2106 static void sci_config_port(struct uart_port *port, int flags) 2106 static void sci_config_port(struct uart_port *port, int flags)
2107 { 2107 {
2108 if (flags & UART_CONFIG_TYPE) { 2108 if (flags & UART_CONFIG_TYPE) {
2109 struct sci_port *sport = to_sci_port(port); 2109 struct sci_port *sport = to_sci_port(port);
2110 2110
2111 port->type = sport->cfg->type; 2111 port->type = sport->cfg->type;
2112 sci_request_port(port); 2112 sci_request_port(port);
2113 } 2113 }
2114 } 2114 }
2115 2115
2116 static int sci_verify_port(struct uart_port *port, struct serial_struct *ser) 2116 static int sci_verify_port(struct uart_port *port, struct serial_struct *ser)
2117 { 2117 {
2118 struct sci_port *s = to_sci_port(port); 2118 struct sci_port *s = to_sci_port(port);
2119 2119
2120 if (ser->irq != s->cfg->irqs[SCIx_TXI_IRQ] || ser->irq > nr_irqs) 2120 if (ser->irq != s->cfg->irqs[SCIx_TXI_IRQ] || ser->irq > nr_irqs)
2121 return -EINVAL; 2121 return -EINVAL;
2122 if (ser->baud_base < 2400) 2122 if (ser->baud_base < 2400)
2123 /* No paper tape reader for Mitch.. */ 2123 /* No paper tape reader for Mitch.. */
2124 return -EINVAL; 2124 return -EINVAL;
2125 2125
2126 return 0; 2126 return 0;
2127 } 2127 }
2128 2128
2129 static struct uart_ops sci_uart_ops = { 2129 static struct uart_ops sci_uart_ops = {
2130 .tx_empty = sci_tx_empty, 2130 .tx_empty = sci_tx_empty,
2131 .set_mctrl = sci_set_mctrl, 2131 .set_mctrl = sci_set_mctrl,
2132 .get_mctrl = sci_get_mctrl, 2132 .get_mctrl = sci_get_mctrl,
2133 .start_tx = sci_start_tx, 2133 .start_tx = sci_start_tx,
2134 .stop_tx = sci_stop_tx, 2134 .stop_tx = sci_stop_tx,
2135 .stop_rx = sci_stop_rx, 2135 .stop_rx = sci_stop_rx,
2136 .enable_ms = sci_enable_ms, 2136 .enable_ms = sci_enable_ms,
2137 .break_ctl = sci_break_ctl, 2137 .break_ctl = sci_break_ctl,
2138 .startup = sci_startup, 2138 .startup = sci_startup,
2139 .shutdown = sci_shutdown, 2139 .shutdown = sci_shutdown,
2140 .set_termios = sci_set_termios, 2140 .set_termios = sci_set_termios,
2141 .pm = sci_pm, 2141 .pm = sci_pm,
2142 .type = sci_type, 2142 .type = sci_type,
2143 .release_port = sci_release_port, 2143 .release_port = sci_release_port,
2144 .request_port = sci_request_port, 2144 .request_port = sci_request_port,
2145 .config_port = sci_config_port, 2145 .config_port = sci_config_port,
2146 .verify_port = sci_verify_port, 2146 .verify_port = sci_verify_port,
2147 #ifdef CONFIG_CONSOLE_POLL 2147 #ifdef CONFIG_CONSOLE_POLL
2148 .poll_get_char = sci_poll_get_char, 2148 .poll_get_char = sci_poll_get_char,
2149 .poll_put_char = sci_poll_put_char, 2149 .poll_put_char = sci_poll_put_char,
2150 #endif 2150 #endif
2151 }; 2151 };
2152 2152
2153 static int sci_init_single(struct platform_device *dev, 2153 static int sci_init_single(struct platform_device *dev,
2154 struct sci_port *sci_port, 2154 struct sci_port *sci_port,
2155 unsigned int index, 2155 unsigned int index,
2156 struct plat_sci_port *p) 2156 struct plat_sci_port *p)
2157 { 2157 {
2158 struct uart_port *port = &sci_port->port; 2158 struct uart_port *port = &sci_port->port;
2159 int ret; 2159 int ret;
2160 2160
2161 sci_port->cfg = p; 2161 sci_port->cfg = p;
2162 2162
2163 port->ops = &sci_uart_ops; 2163 port->ops = &sci_uart_ops;
2164 port->iotype = UPIO_MEM; 2164 port->iotype = UPIO_MEM;
2165 port->line = index; 2165 port->line = index;
2166 2166
2167 switch (p->type) { 2167 switch (p->type) {
2168 case PORT_SCIFB: 2168 case PORT_SCIFB:
2169 port->fifosize = 256; 2169 port->fifosize = 256;
2170 break; 2170 break;
2171 case PORT_HSCIF: 2171 case PORT_HSCIF:
2172 port->fifosize = 128; 2172 port->fifosize = 128;
2173 break; 2173 break;
2174 case PORT_SCIFA: 2174 case PORT_SCIFA:
2175 port->fifosize = 64; 2175 port->fifosize = 64;
2176 break; 2176 break;
2177 case PORT_SCIF: 2177 case PORT_SCIF:
2178 port->fifosize = 16; 2178 port->fifosize = 16;
2179 break; 2179 break;
2180 default: 2180 default:
2181 port->fifosize = 1; 2181 port->fifosize = 1;
2182 break; 2182 break;
2183 } 2183 }
2184 2184
2185 if (p->regtype == SCIx_PROBE_REGTYPE) { 2185 if (p->regtype == SCIx_PROBE_REGTYPE) {
2186 ret = sci_probe_regmap(p); 2186 ret = sci_probe_regmap(p);
2187 if (unlikely(ret)) 2187 if (unlikely(ret))
2188 return ret; 2188 return ret;
2189 } 2189 }
2190 2190
2191 if (dev) { 2191 if (dev) {
2192 sci_port->iclk = clk_get(&dev->dev, "sci_ick"); 2192 sci_port->iclk = clk_get(&dev->dev, "sci_ick");
2193 if (IS_ERR(sci_port->iclk)) { 2193 if (IS_ERR(sci_port->iclk)) {
2194 sci_port->iclk = clk_get(&dev->dev, "peripheral_clk"); 2194 sci_port->iclk = clk_get(&dev->dev, "peripheral_clk");
2195 if (IS_ERR(sci_port->iclk)) { 2195 if (IS_ERR(sci_port->iclk)) {
2196 dev_err(&dev->dev, "can't get iclk\n"); 2196 dev_err(&dev->dev, "can't get iclk\n");
2197 return PTR_ERR(sci_port->iclk); 2197 return PTR_ERR(sci_port->iclk);
2198 } 2198 }
2199 } 2199 }
2200 2200
2201 /* 2201 /*
2202 * The function clock is optional, ignore it if we can't 2202 * The function clock is optional, ignore it if we can't
2203 * find it. 2203 * find it.
2204 */ 2204 */
2205 sci_port->fclk = clk_get(&dev->dev, "sci_fck"); 2205 sci_port->fclk = clk_get(&dev->dev, "sci_fck");
2206 if (IS_ERR(sci_port->fclk)) 2206 if (IS_ERR(sci_port->fclk))
2207 sci_port->fclk = NULL; 2207 sci_port->fclk = NULL;
2208 2208
2209 port->dev = &dev->dev; 2209 port->dev = &dev->dev;
2210 2210
2211 sci_init_gpios(sci_port); 2211 sci_init_gpios(sci_port);
2212 2212
2213 pm_runtime_enable(&dev->dev); 2213 pm_runtime_enable(&dev->dev);
2214 } 2214 }
2215 2215
2216 sci_port->break_timer.data = (unsigned long)sci_port; 2216 sci_port->break_timer.data = (unsigned long)sci_port;
2217 sci_port->break_timer.function = sci_break_timer; 2217 sci_port->break_timer.function = sci_break_timer;
2218 init_timer(&sci_port->break_timer); 2218 init_timer(&sci_port->break_timer);
2219 2219
2220 /* 2220 /*
2221 * Establish some sensible defaults for the error detection. 2221 * Establish some sensible defaults for the error detection.
2222 */ 2222 */
2223 if (!p->error_mask) 2223 if (!p->error_mask)
2224 p->error_mask = (p->type == PORT_SCI) ? 2224 p->error_mask = (p->type == PORT_SCI) ?
2225 SCI_DEFAULT_ERROR_MASK : SCIF_DEFAULT_ERROR_MASK; 2225 SCI_DEFAULT_ERROR_MASK : SCIF_DEFAULT_ERROR_MASK;
2226 2226
2227 /* 2227 /*
2228 * Establish sensible defaults for the overrun detection, unless 2228 * Establish sensible defaults for the overrun detection, unless
2229 * the part has explicitly disabled support for it. 2229 * the part has explicitly disabled support for it.
2230 */ 2230 */
2231 if (p->overrun_bit != SCIx_NOT_SUPPORTED) { 2231 if (p->overrun_bit != SCIx_NOT_SUPPORTED) {
2232 if (p->type == PORT_SCI) 2232 if (p->type == PORT_SCI)
2233 p->overrun_bit = 5; 2233 p->overrun_bit = 5;
2234 else if (p->scbrr_algo_id == SCBRR_ALGO_4) 2234 else if (p->scbrr_algo_id == SCBRR_ALGO_4)
2235 p->overrun_bit = 9; 2235 p->overrun_bit = 9;
2236 else 2236 else
2237 p->overrun_bit = 0; 2237 p->overrun_bit = 0;
2238 2238
2239 /* 2239 /*
2240 * Make the error mask inclusive of overrun detection, if 2240 * Make the error mask inclusive of overrun detection, if
2241 * supported. 2241 * supported.
2242 */ 2242 */
2243 p->error_mask |= (1 << p->overrun_bit); 2243 p->error_mask |= (1 << p->overrun_bit);
2244 } 2244 }
2245 2245
2246 port->mapbase = p->mapbase; 2246 port->mapbase = p->mapbase;
2247 port->type = p->type; 2247 port->type = p->type;
2248 port->flags = p->flags; 2248 port->flags = p->flags;
2249 port->regshift = p->regshift; 2249 port->regshift = p->regshift;
2250 2250
2251 /* 2251 /*
2252 * The UART port needs an IRQ value, so we peg this to the RX IRQ 2252 * The UART port needs an IRQ value, so we peg this to the RX IRQ
2253 * for the multi-IRQ ports, which is where we are primarily 2253 * for the multi-IRQ ports, which is where we are primarily
2254 * concerned with the shutdown path synchronization. 2254 * concerned with the shutdown path synchronization.
2255 * 2255 *
2256 * For the muxed case there's nothing more to do. 2256 * For the muxed case there's nothing more to do.
2257 */ 2257 */
2258 port->irq = p->irqs[SCIx_RXI_IRQ]; 2258 port->irq = p->irqs[SCIx_RXI_IRQ];
2259 port->irqflags = 0; 2259 port->irqflags = 0;
2260 2260
2261 port->serial_in = sci_serial_in; 2261 port->serial_in = sci_serial_in;
2262 port->serial_out = sci_serial_out; 2262 port->serial_out = sci_serial_out;
2263 2263
2264 if (p->dma_slave_tx > 0 && p->dma_slave_rx > 0) 2264 if (p->dma_slave_tx > 0 && p->dma_slave_rx > 0)
2265 dev_dbg(port->dev, "DMA tx %d, rx %d\n", 2265 dev_dbg(port->dev, "DMA tx %d, rx %d\n",
2266 p->dma_slave_tx, p->dma_slave_rx); 2266 p->dma_slave_tx, p->dma_slave_rx);
2267 2267
2268 return 0; 2268 return 0;
2269 } 2269 }
2270 2270
2271 static void sci_cleanup_single(struct sci_port *port) 2271 static void sci_cleanup_single(struct sci_port *port)
2272 { 2272 {
2273 sci_free_gpios(port); 2273 sci_free_gpios(port);
2274 2274
2275 clk_put(port->iclk); 2275 clk_put(port->iclk);
2276 clk_put(port->fclk); 2276 clk_put(port->fclk);
2277 2277
2278 pm_runtime_disable(port->port.dev); 2278 pm_runtime_disable(port->port.dev);
2279 } 2279 }
2280 2280
2281 #ifdef CONFIG_SERIAL_SH_SCI_CONSOLE 2281 #ifdef CONFIG_SERIAL_SH_SCI_CONSOLE
2282 static void serial_console_putchar(struct uart_port *port, int ch) 2282 static void serial_console_putchar(struct uart_port *port, int ch)
2283 { 2283 {
2284 sci_poll_put_char(port, ch); 2284 sci_poll_put_char(port, ch);
2285 } 2285 }
2286 2286
2287 /* 2287 /*
2288 * Print a string to the serial port trying not to disturb 2288 * Print a string to the serial port trying not to disturb
2289 * any possible real use of the port... 2289 * any possible real use of the port...
2290 */ 2290 */
2291 static void serial_console_write(struct console *co, const char *s, 2291 static void serial_console_write(struct console *co, const char *s,
2292 unsigned count) 2292 unsigned count)
2293 { 2293 {
2294 struct sci_port *sci_port = &sci_ports[co->index]; 2294 struct sci_port *sci_port = &sci_ports[co->index];
2295 struct uart_port *port = &sci_port->port; 2295 struct uart_port *port = &sci_port->port;
2296 unsigned short bits, ctrl; 2296 unsigned short bits, ctrl;
2297 unsigned long flags; 2297 unsigned long flags;
2298 int locked = 1; 2298 int locked = 1;
2299 2299
2300 local_irq_save(flags); 2300 local_irq_save(flags);
2301 if (port->sysrq) 2301 if (port->sysrq)
2302 locked = 0; 2302 locked = 0;
2303 else if (oops_in_progress) 2303 else if (oops_in_progress)
2304 locked = spin_trylock(&port->lock); 2304 locked = spin_trylock(&port->lock);
2305 else 2305 else
2306 spin_lock(&port->lock); 2306 spin_lock(&port->lock);
2307 2307
2308 /* first save the SCSCR then disable the interrupts */ 2308 /* first save the SCSCR then disable the interrupts */
2309 ctrl = serial_port_in(port, SCSCR); 2309 ctrl = serial_port_in(port, SCSCR);
2310 serial_port_out(port, SCSCR, sci_port->cfg->scscr); 2310 serial_port_out(port, SCSCR, sci_port->cfg->scscr);
2311 2311
2312 uart_console_write(port, s, count, serial_console_putchar); 2312 uart_console_write(port, s, count, serial_console_putchar);
2313 2313
2314 /* wait until fifo is empty and last bit has been transmitted */ 2314 /* wait until fifo is empty and last bit has been transmitted */
2315 bits = SCxSR_TDxE(port) | SCxSR_TEND(port); 2315 bits = SCxSR_TDxE(port) | SCxSR_TEND(port);
2316 while ((serial_port_in(port, SCxSR) & bits) != bits) 2316 while ((serial_port_in(port, SCxSR) & bits) != bits)
2317 cpu_relax(); 2317 cpu_relax();
2318 2318
2319 /* restore the SCSCR */ 2319 /* restore the SCSCR */
2320 serial_port_out(port, SCSCR, ctrl); 2320 serial_port_out(port, SCSCR, ctrl);
2321 2321
2322 if (locked) 2322 if (locked)
2323 spin_unlock(&port->lock); 2323 spin_unlock(&port->lock);
2324 local_irq_restore(flags); 2324 local_irq_restore(flags);
2325 } 2325 }
2326 2326
2327 static int serial_console_setup(struct console *co, char *options) 2327 static int serial_console_setup(struct console *co, char *options)
2328 { 2328 {
2329 struct sci_port *sci_port; 2329 struct sci_port *sci_port;
2330 struct uart_port *port; 2330 struct uart_port *port;
2331 int baud = 115200; 2331 int baud = 115200;
2332 int bits = 8; 2332 int bits = 8;
2333 int parity = 'n'; 2333 int parity = 'n';
2334 int flow = 'n'; 2334 int flow = 'n';
2335 int ret; 2335 int ret;
2336 2336
2337 /* 2337 /*
2338 * Refuse to handle any bogus ports. 2338 * Refuse to handle any bogus ports.
2339 */ 2339 */
2340 if (co->index < 0 || co->index >= SCI_NPORTS) 2340 if (co->index < 0 || co->index >= SCI_NPORTS)
2341 return -ENODEV; 2341 return -ENODEV;
2342 2342
2343 sci_port = &sci_ports[co->index]; 2343 sci_port = &sci_ports[co->index];
2344 port = &sci_port->port; 2344 port = &sci_port->port;
2345 2345
2346 /* 2346 /*
2347 * Refuse to handle uninitialized ports. 2347 * Refuse to handle uninitialized ports.
2348 */ 2348 */
2349 if (!port->ops) 2349 if (!port->ops)
2350 return -ENODEV; 2350 return -ENODEV;
2351 2351
2352 ret = sci_remap_port(port); 2352 ret = sci_remap_port(port);
2353 if (unlikely(ret != 0)) 2353 if (unlikely(ret != 0))
2354 return ret; 2354 return ret;
2355 2355
2356 if (options) 2356 if (options)
2357 uart_parse_options(options, &baud, &parity, &bits, &flow); 2357 uart_parse_options(options, &baud, &parity, &bits, &flow);
2358 2358
2359 return uart_set_options(port, co, baud, parity, bits, flow); 2359 return uart_set_options(port, co, baud, parity, bits, flow);
2360 } 2360 }
2361 2361
2362 static struct console serial_console = { 2362 static struct console serial_console = {
2363 .name = "ttySC", 2363 .name = "ttySC",
2364 .device = uart_console_device, 2364 .device = uart_console_device,
2365 .write = serial_console_write, 2365 .write = serial_console_write,
2366 .setup = serial_console_setup, 2366 .setup = serial_console_setup,
2367 .flags = CON_PRINTBUFFER, 2367 .flags = CON_PRINTBUFFER,
2368 .index = -1, 2368 .index = -1,
2369 .data = &sci_uart_driver, 2369 .data = &sci_uart_driver,
2370 }; 2370 };
2371 2371
2372 static struct console early_serial_console = { 2372 static struct console early_serial_console = {
2373 .name = "early_ttySC", 2373 .name = "early_ttySC",
2374 .write = serial_console_write, 2374 .write = serial_console_write,
2375 .flags = CON_PRINTBUFFER, 2375 .flags = CON_PRINTBUFFER,
2376 .index = -1, 2376 .index = -1,
2377 }; 2377 };
2378 2378
2379 static char early_serial_buf[32]; 2379 static char early_serial_buf[32];
2380 2380
2381 static int sci_probe_earlyprintk(struct platform_device *pdev) 2381 static int sci_probe_earlyprintk(struct platform_device *pdev)
2382 { 2382 {
2383 struct plat_sci_port *cfg = dev_get_platdata(&pdev->dev); 2383 struct plat_sci_port *cfg = dev_get_platdata(&pdev->dev);
2384 2384
2385 if (early_serial_console.data) 2385 if (early_serial_console.data)
2386 return -EEXIST; 2386 return -EEXIST;
2387 2387
2388 early_serial_console.index = pdev->id; 2388 early_serial_console.index = pdev->id;
2389 2389
2390 sci_init_single(NULL, &sci_ports[pdev->id], pdev->id, cfg); 2390 sci_init_single(NULL, &sci_ports[pdev->id], pdev->id, cfg);
2391 2391
2392 serial_console_setup(&early_serial_console, early_serial_buf); 2392 serial_console_setup(&early_serial_console, early_serial_buf);
2393 2393
2394 if (!strstr(early_serial_buf, "keep")) 2394 if (!strstr(early_serial_buf, "keep"))
2395 early_serial_console.flags |= CON_BOOT; 2395 early_serial_console.flags |= CON_BOOT;
2396 2396
2397 register_console(&early_serial_console); 2397 register_console(&early_serial_console);
2398 return 0; 2398 return 0;
2399 } 2399 }
2400 2400
2401 #define SCI_CONSOLE (&serial_console) 2401 #define SCI_CONSOLE (&serial_console)
2402 2402
2403 #else 2403 #else
2404 static inline int sci_probe_earlyprintk(struct platform_device *pdev) 2404 static inline int sci_probe_earlyprintk(struct platform_device *pdev)
2405 { 2405 {
2406 return -EINVAL; 2406 return -EINVAL;
2407 } 2407 }
2408 2408
2409 #define SCI_CONSOLE NULL 2409 #define SCI_CONSOLE NULL
2410 2410
2411 #endif /* CONFIG_SERIAL_SH_SCI_CONSOLE */ 2411 #endif /* CONFIG_SERIAL_SH_SCI_CONSOLE */
2412 2412
2413 static char banner[] __initdata = 2413 static char banner[] __initdata =
2414 KERN_INFO "SuperH (H)SCI(F) driver initialized\n"; 2414 KERN_INFO "SuperH (H)SCI(F) driver initialized\n";
2415 2415
2416 static struct uart_driver sci_uart_driver = { 2416 static struct uart_driver sci_uart_driver = {
2417 .owner = THIS_MODULE, 2417 .owner = THIS_MODULE,
2418 .driver_name = "sci", 2418 .driver_name = "sci",
2419 .dev_name = "ttySC", 2419 .dev_name = "ttySC",
2420 .major = SCI_MAJOR, 2420 .major = SCI_MAJOR,
2421 .minor = SCI_MINOR_START, 2421 .minor = SCI_MINOR_START,
2422 .nr = SCI_NPORTS, 2422 .nr = SCI_NPORTS,
2423 .cons = SCI_CONSOLE, 2423 .cons = SCI_CONSOLE,
2424 }; 2424 };
2425 2425
2426 static int sci_remove(struct platform_device *dev) 2426 static int sci_remove(struct platform_device *dev)
2427 { 2427 {
2428 struct sci_port *port = platform_get_drvdata(dev); 2428 struct sci_port *port = platform_get_drvdata(dev);
2429 2429
2430 cpufreq_unregister_notifier(&port->freq_transition, 2430 cpufreq_unregister_notifier(&port->freq_transition,
2431 CPUFREQ_TRANSITION_NOTIFIER); 2431 CPUFREQ_TRANSITION_NOTIFIER);
2432 2432
2433 uart_remove_one_port(&sci_uart_driver, &port->port); 2433 uart_remove_one_port(&sci_uart_driver, &port->port);
2434 2434
2435 sci_cleanup_single(port); 2435 sci_cleanup_single(port);
2436 2436
2437 return 0; 2437 return 0;
2438 } 2438 }
2439 2439
2440 static int sci_probe_single(struct platform_device *dev, 2440 static int sci_probe_single(struct platform_device *dev,
2441 unsigned int index, 2441 unsigned int index,
2442 struct plat_sci_port *p, 2442 struct plat_sci_port *p,
2443 struct sci_port *sciport) 2443 struct sci_port *sciport)
2444 { 2444 {
2445 int ret; 2445 int ret;
2446 2446
2447 /* Sanity check */ 2447 /* Sanity check */
2448 if (unlikely(index >= SCI_NPORTS)) { 2448 if (unlikely(index >= SCI_NPORTS)) {
2449 dev_notice(&dev->dev, "Attempting to register port " 2449 dev_notice(&dev->dev, "Attempting to register port "
2450 "%d when only %d are available.\n", 2450 "%d when only %d are available.\n",
2451 index+1, SCI_NPORTS); 2451 index+1, SCI_NPORTS);
2452 dev_notice(&dev->dev, "Consider bumping " 2452 dev_notice(&dev->dev, "Consider bumping "
2453 "CONFIG_SERIAL_SH_SCI_NR_UARTS!\n"); 2453 "CONFIG_SERIAL_SH_SCI_NR_UARTS!\n");
2454 return -EINVAL; 2454 return -EINVAL;
2455 } 2455 }
2456 2456
2457 ret = sci_init_single(dev, sciport, index, p); 2457 ret = sci_init_single(dev, sciport, index, p);
2458 if (ret) 2458 if (ret)
2459 return ret; 2459 return ret;
2460 2460
2461 ret = uart_add_one_port(&sci_uart_driver, &sciport->port); 2461 ret = uart_add_one_port(&sci_uart_driver, &sciport->port);
2462 if (ret) { 2462 if (ret) {
2463 sci_cleanup_single(sciport); 2463 sci_cleanup_single(sciport);
2464 return ret; 2464 return ret;
2465 } 2465 }
2466 2466
2467 return 0; 2467 return 0;
2468 } 2468 }
2469 2469
2470 static int sci_probe(struct platform_device *dev) 2470 static int sci_probe(struct platform_device *dev)
2471 { 2471 {
2472 struct plat_sci_port *p = dev_get_platdata(&dev->dev); 2472 struct plat_sci_port *p = dev_get_platdata(&dev->dev);
2473 struct sci_port *sp = &sci_ports[dev->id]; 2473 struct sci_port *sp = &sci_ports[dev->id];
2474 int ret; 2474 int ret;
2475 2475
2476 /* 2476 /*
2477 * If we've come here via earlyprintk initialization, head off to 2477 * If we've come here via earlyprintk initialization, head off to
2478 * the special early probe. We don't have sufficient device state 2478 * the special early probe. We don't have sufficient device state
2479 * to make it beyond this yet. 2479 * to make it beyond this yet.
2480 */ 2480 */
2481 if (is_early_platform_device(dev)) 2481 if (is_early_platform_device(dev))
2482 return sci_probe_earlyprintk(dev); 2482 return sci_probe_earlyprintk(dev);
2483 2483
2484 platform_set_drvdata(dev, sp); 2484 platform_set_drvdata(dev, sp);
2485 2485
2486 ret = sci_probe_single(dev, dev->id, p, sp); 2486 ret = sci_probe_single(dev, dev->id, p, sp);
2487 if (ret) 2487 if (ret)
2488 return ret; 2488 return ret;
2489 2489
2490 sp->freq_transition.notifier_call = sci_notifier; 2490 sp->freq_transition.notifier_call = sci_notifier;
2491 2491
2492 ret = cpufreq_register_notifier(&sp->freq_transition, 2492 ret = cpufreq_register_notifier(&sp->freq_transition,
2493 CPUFREQ_TRANSITION_NOTIFIER); 2493 CPUFREQ_TRANSITION_NOTIFIER);
2494 if (unlikely(ret < 0)) { 2494 if (unlikely(ret < 0)) {
2495 sci_cleanup_single(sp); 2495 sci_cleanup_single(sp);
2496 return ret; 2496 return ret;
2497 } 2497 }
2498 2498
2499 #ifdef CONFIG_SH_STANDARD_BIOS 2499 #ifdef CONFIG_SH_STANDARD_BIOS
2500 sh_bios_gdb_detach(); 2500 sh_bios_gdb_detach();
2501 #endif 2501 #endif
2502 2502
2503 return 0; 2503 return 0;
2504 } 2504 }
2505 2505
2506 static int sci_suspend(struct device *dev) 2506 static int sci_suspend(struct device *dev)
2507 { 2507 {
2508 struct sci_port *sport = dev_get_drvdata(dev); 2508 struct sci_port *sport = dev_get_drvdata(dev);
2509 2509
2510 if (sport) 2510 if (sport)
2511 uart_suspend_port(&sci_uart_driver, &sport->port); 2511 uart_suspend_port(&sci_uart_driver, &sport->port);
2512 2512
2513 return 0; 2513 return 0;
2514 } 2514 }
2515 2515
2516 static int sci_resume(struct device *dev) 2516 static int sci_resume(struct device *dev)
2517 { 2517 {
2518 struct sci_port *sport = dev_get_drvdata(dev); 2518 struct sci_port *sport = dev_get_drvdata(dev);
2519 2519
2520 if (sport) 2520 if (sport)
2521 uart_resume_port(&sci_uart_driver, &sport->port); 2521 uart_resume_port(&sci_uart_driver, &sport->port);
2522 2522
2523 return 0; 2523 return 0;
2524 } 2524 }
2525 2525
2526 static const struct dev_pm_ops sci_dev_pm_ops = { 2526 static const struct dev_pm_ops sci_dev_pm_ops = {
2527 .suspend = sci_suspend, 2527 .suspend = sci_suspend,
2528 .resume = sci_resume, 2528 .resume = sci_resume,
2529 }; 2529 };
2530 2530
2531 static struct platform_driver sci_driver = { 2531 static struct platform_driver sci_driver = {
2532 .probe = sci_probe, 2532 .probe = sci_probe,
2533 .remove = sci_remove, 2533 .remove = sci_remove,
2534 .driver = { 2534 .driver = {
2535 .name = "sh-sci", 2535 .name = "sh-sci",
2536 .owner = THIS_MODULE, 2536 .owner = THIS_MODULE,
2537 .pm = &sci_dev_pm_ops, 2537 .pm = &sci_dev_pm_ops,
2538 }, 2538 },
2539 }; 2539 };
2540 2540
2541 static int __init sci_init(void) 2541 static int __init sci_init(void)
2542 { 2542 {
2543 int ret; 2543 int ret;
2544 2544
2545 printk(banner); 2545 printk(banner);
2546 2546
2547 ret = uart_register_driver(&sci_uart_driver); 2547 ret = uart_register_driver(&sci_uart_driver);
2548 if (likely(ret == 0)) { 2548 if (likely(ret == 0)) {
2549 ret = platform_driver_register(&sci_driver); 2549 ret = platform_driver_register(&sci_driver);
2550 if (unlikely(ret)) 2550 if (unlikely(ret))
2551 uart_unregister_driver(&sci_uart_driver); 2551 uart_unregister_driver(&sci_uart_driver);
2552 } 2552 }
2553 2553
2554 return ret; 2554 return ret;
2555 } 2555 }
2556 2556
2557 static void __exit sci_exit(void) 2557 static void __exit sci_exit(void)
2558 { 2558 {
2559 platform_driver_unregister(&sci_driver); 2559 platform_driver_unregister(&sci_driver);
2560 uart_unregister_driver(&sci_uart_driver); 2560 uart_unregister_driver(&sci_uart_driver);
2561 } 2561 }
2562 2562
2563 #ifdef CONFIG_SERIAL_SH_SCI_CONSOLE 2563 #ifdef CONFIG_SERIAL_SH_SCI_CONSOLE
2564 early_platform_init_buffer("earlyprintk", &sci_driver, 2564 early_platform_init_buffer("earlyprintk", &sci_driver,
2565 early_serial_buf, ARRAY_SIZE(early_serial_buf)); 2565 early_serial_buf, ARRAY_SIZE(early_serial_buf));
2566 #endif 2566 #endif
2567 module_init(sci_init); 2567 module_init(sci_init);
2568 module_exit(sci_exit); 2568 module_exit(sci_exit);
2569 2569
2570 MODULE_LICENSE("GPL"); 2570 MODULE_LICENSE("GPL");
2571 MODULE_ALIAS("platform:sh-sci"); 2571 MODULE_ALIAS("platform:sh-sci");
2572 MODULE_AUTHOR("Paul Mundt"); 2572 MODULE_AUTHOR("Paul Mundt");
2573 MODULE_DESCRIPTION("SuperH (H)SCI(F) serial driver"); 2573 MODULE_DESCRIPTION("SuperH (H)SCI(F) serial driver");
2574 2574
include/linux/dmaengine.h
1 /* 1 /*
2 * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved. 2 * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
3 * 3 *
4 * This program is free software; you can redistribute it and/or modify it 4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the Free 5 * under the terms of the GNU General Public License as published by the Free
6 * Software Foundation; either version 2 of the License, or (at your option) 6 * Software Foundation; either version 2 of the License, or (at your option)
7 * any later version. 7 * any later version.
8 * 8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT 9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details. 12 * more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License along with 14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59 15 * this program; if not, write to the Free Software Foundation, Inc., 59
16 * Temple Place - Suite 330, Boston, MA 02111-1307, USA. 16 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 * 17 *
18 * The full GNU General Public License is included in this distribution in the 18 * The full GNU General Public License is included in this distribution in the
19 * file called COPYING. 19 * file called COPYING.
20 */ 20 */
21 #ifndef LINUX_DMAENGINE_H 21 #ifndef LINUX_DMAENGINE_H
22 #define LINUX_DMAENGINE_H 22 #define LINUX_DMAENGINE_H
23 23
24 #include <linux/device.h> 24 #include <linux/device.h>
25 #include <linux/uio.h> 25 #include <linux/uio.h>
26 #include <linux/bug.h> 26 #include <linux/bug.h>
27 #include <linux/scatterlist.h> 27 #include <linux/scatterlist.h>
28 #include <linux/bitmap.h> 28 #include <linux/bitmap.h>
29 #include <linux/types.h> 29 #include <linux/types.h>
30 #include <asm/page.h> 30 #include <asm/page.h>
31 31
32 /** 32 /**
33 * typedef dma_cookie_t - an opaque DMA cookie 33 * typedef dma_cookie_t - an opaque DMA cookie
34 * 34 *
35 * if dma_cookie_t is >0 it's a DMA request cookie, <0 it's an error code 35 * if dma_cookie_t is >0 it's a DMA request cookie, <0 it's an error code
36 */ 36 */
37 typedef s32 dma_cookie_t; 37 typedef s32 dma_cookie_t;
38 #define DMA_MIN_COOKIE 1 38 #define DMA_MIN_COOKIE 1
39 #define DMA_MAX_COOKIE INT_MAX 39 #define DMA_MAX_COOKIE INT_MAX
40 40
41 static inline int dma_submit_error(dma_cookie_t cookie) 41 static inline int dma_submit_error(dma_cookie_t cookie)
42 { 42 {
43 return cookie < 0 ? cookie : 0; 43 return cookie < 0 ? cookie : 0;
44 } 44 }
45 45
46 /** 46 /**
47 * enum dma_status - DMA transaction status 47 * enum dma_status - DMA transaction status
48 * @DMA_SUCCESS: transaction completed successfully 48 * @DMA_COMPLETE: transaction completed
49 * @DMA_IN_PROGRESS: transaction not yet processed 49 * @DMA_IN_PROGRESS: transaction not yet processed
50 * @DMA_PAUSED: transaction is paused 50 * @DMA_PAUSED: transaction is paused
51 * @DMA_ERROR: transaction failed 51 * @DMA_ERROR: transaction failed
52 */ 52 */
53 enum dma_status { 53 enum dma_status {
54 DMA_SUCCESS, 54 DMA_COMPLETE,
55 DMA_IN_PROGRESS, 55 DMA_IN_PROGRESS,
56 DMA_PAUSED, 56 DMA_PAUSED,
57 DMA_ERROR, 57 DMA_ERROR,
58 }; 58 };
59 59
60 /** 60 /**
61 * enum dma_transaction_type - DMA transaction types/indexes 61 * enum dma_transaction_type - DMA transaction types/indexes
62 * 62 *
63 * Note: The DMA_ASYNC_TX capability is not to be set by drivers. It is 63 * Note: The DMA_ASYNC_TX capability is not to be set by drivers. It is
64 * automatically set as dma devices are registered. 64 * automatically set as dma devices are registered.
65 */ 65 */
66 enum dma_transaction_type { 66 enum dma_transaction_type {
67 DMA_MEMCPY, 67 DMA_MEMCPY,
68 DMA_XOR, 68 DMA_XOR,
69 DMA_PQ, 69 DMA_PQ,
70 DMA_XOR_VAL, 70 DMA_XOR_VAL,
71 DMA_PQ_VAL, 71 DMA_PQ_VAL,
72 DMA_INTERRUPT, 72 DMA_INTERRUPT,
73 DMA_SG, 73 DMA_SG,
74 DMA_PRIVATE, 74 DMA_PRIVATE,
75 DMA_ASYNC_TX, 75 DMA_ASYNC_TX,
76 DMA_SLAVE, 76 DMA_SLAVE,
77 DMA_CYCLIC, 77 DMA_CYCLIC,
78 DMA_INTERLEAVE, 78 DMA_INTERLEAVE,
79 /* last transaction type for creation of the capabilities mask */ 79 /* last transaction type for creation of the capabilities mask */
80 DMA_TX_TYPE_END, 80 DMA_TX_TYPE_END,
81 }; 81 };
82 82
83 /** 83 /**
84 * enum dma_transfer_direction - dma transfer mode and direction indicator 84 * enum dma_transfer_direction - dma transfer mode and direction indicator
85 * @DMA_MEM_TO_MEM: Async/Memcpy mode 85 * @DMA_MEM_TO_MEM: Async/Memcpy mode
86 * @DMA_MEM_TO_DEV: Slave mode & From Memory to Device 86 * @DMA_MEM_TO_DEV: Slave mode & From Memory to Device
87 * @DMA_DEV_TO_MEM: Slave mode & From Device to Memory 87 * @DMA_DEV_TO_MEM: Slave mode & From Device to Memory
88 * @DMA_DEV_TO_DEV: Slave mode & From Device to Device 88 * @DMA_DEV_TO_DEV: Slave mode & From Device to Device
89 */ 89 */
90 enum dma_transfer_direction { 90 enum dma_transfer_direction {
91 DMA_MEM_TO_MEM, 91 DMA_MEM_TO_MEM,
92 DMA_MEM_TO_DEV, 92 DMA_MEM_TO_DEV,
93 DMA_DEV_TO_MEM, 93 DMA_DEV_TO_MEM,
94 DMA_DEV_TO_DEV, 94 DMA_DEV_TO_DEV,
95 DMA_TRANS_NONE, 95 DMA_TRANS_NONE,
96 }; 96 };
97 97
98 /** 98 /**
99 * Interleaved Transfer Request 99 * Interleaved Transfer Request
100 * ---------------------------- 100 * ----------------------------
101 * A chunk is collection of contiguous bytes to be transfered. 101 * A chunk is collection of contiguous bytes to be transfered.
102 * The gap(in bytes) between two chunks is called inter-chunk-gap(ICG). 102 * The gap(in bytes) between two chunks is called inter-chunk-gap(ICG).
103 * ICGs may or maynot change between chunks. 103 * ICGs may or maynot change between chunks.
104 * A FRAME is the smallest series of contiguous {chunk,icg} pairs, 104 * A FRAME is the smallest series of contiguous {chunk,icg} pairs,
105 * that when repeated an integral number of times, specifies the transfer. 105 * that when repeated an integral number of times, specifies the transfer.
106 * A transfer template is specification of a Frame, the number of times 106 * A transfer template is specification of a Frame, the number of times
107 * it is to be repeated and other per-transfer attributes. 107 * it is to be repeated and other per-transfer attributes.
108 * 108 *
109 * Practically, a client driver would have ready a template for each 109 * Practically, a client driver would have ready a template for each
110 * type of transfer it is going to need during its lifetime and 110 * type of transfer it is going to need during its lifetime and
111 * set only 'src_start' and 'dst_start' before submitting the requests. 111 * set only 'src_start' and 'dst_start' before submitting the requests.
112 * 112 *
113 * 113 *
114 * | Frame-1 | Frame-2 | ~ | Frame-'numf' | 114 * | Frame-1 | Frame-2 | ~ | Frame-'numf' |
115 * |====....==.===...=...|====....==.===...=...| ~ |====....==.===...=...| 115 * |====....==.===...=...|====....==.===...=...| ~ |====....==.===...=...|
116 * 116 *
117 * == Chunk size 117 * == Chunk size
118 * ... ICG 118 * ... ICG
119 */ 119 */
120 120
121 /** 121 /**
122 * struct data_chunk - Element of scatter-gather list that makes a frame. 122 * struct data_chunk - Element of scatter-gather list that makes a frame.
123 * @size: Number of bytes to read from source. 123 * @size: Number of bytes to read from source.
124 * size_dst := fn(op, size_src), so doesn't mean much for destination. 124 * size_dst := fn(op, size_src), so doesn't mean much for destination.
125 * @icg: Number of bytes to jump after last src/dst address of this 125 * @icg: Number of bytes to jump after last src/dst address of this
126 * chunk and before first src/dst address for next chunk. 126 * chunk and before first src/dst address for next chunk.
127 * Ignored for dst(assumed 0), if dst_inc is true and dst_sgl is false. 127 * Ignored for dst(assumed 0), if dst_inc is true and dst_sgl is false.
128 * Ignored for src(assumed 0), if src_inc is true and src_sgl is false. 128 * Ignored for src(assumed 0), if src_inc is true and src_sgl is false.
129 */ 129 */
130 struct data_chunk { 130 struct data_chunk {
131 size_t size; 131 size_t size;
132 size_t icg; 132 size_t icg;
133 }; 133 };
134 134
135 /** 135 /**
136 * struct dma_interleaved_template - Template to convey DMAC the transfer pattern 136 * struct dma_interleaved_template - Template to convey DMAC the transfer pattern
137 * and attributes. 137 * and attributes.
138 * @src_start: Bus address of source for the first chunk. 138 * @src_start: Bus address of source for the first chunk.
139 * @dst_start: Bus address of destination for the first chunk. 139 * @dst_start: Bus address of destination for the first chunk.
140 * @dir: Specifies the type of Source and Destination. 140 * @dir: Specifies the type of Source and Destination.
141 * @src_inc: If the source address increments after reading from it. 141 * @src_inc: If the source address increments after reading from it.
142 * @dst_inc: If the destination address increments after writing to it. 142 * @dst_inc: If the destination address increments after writing to it.
143 * @src_sgl: If the 'icg' of sgl[] applies to Source (scattered read). 143 * @src_sgl: If the 'icg' of sgl[] applies to Source (scattered read).
144 * Otherwise, source is read contiguously (icg ignored). 144 * Otherwise, source is read contiguously (icg ignored).
145 * Ignored if src_inc is false. 145 * Ignored if src_inc is false.
146 * @dst_sgl: If the 'icg' of sgl[] applies to Destination (scattered write). 146 * @dst_sgl: If the 'icg' of sgl[] applies to Destination (scattered write).
147 * Otherwise, destination is filled contiguously (icg ignored). 147 * Otherwise, destination is filled contiguously (icg ignored).
148 * Ignored if dst_inc is false. 148 * Ignored if dst_inc is false.
149 * @numf: Number of frames in this template. 149 * @numf: Number of frames in this template.
150 * @frame_size: Number of chunks in a frame i.e, size of sgl[]. 150 * @frame_size: Number of chunks in a frame i.e, size of sgl[].
151 * @sgl: Array of {chunk,icg} pairs that make up a frame. 151 * @sgl: Array of {chunk,icg} pairs that make up a frame.
152 */ 152 */
153 struct dma_interleaved_template { 153 struct dma_interleaved_template {
154 dma_addr_t src_start; 154 dma_addr_t src_start;
155 dma_addr_t dst_start; 155 dma_addr_t dst_start;
156 enum dma_transfer_direction dir; 156 enum dma_transfer_direction dir;
157 bool src_inc; 157 bool src_inc;
158 bool dst_inc; 158 bool dst_inc;
159 bool src_sgl; 159 bool src_sgl;
160 bool dst_sgl; 160 bool dst_sgl;
161 size_t numf; 161 size_t numf;
162 size_t frame_size; 162 size_t frame_size;
163 struct data_chunk sgl[0]; 163 struct data_chunk sgl[0];
164 }; 164 };
165 165
166 /** 166 /**
167 * enum dma_ctrl_flags - DMA flags to augment operation preparation, 167 * enum dma_ctrl_flags - DMA flags to augment operation preparation,
168 * control completion, and communicate status. 168 * control completion, and communicate status.
169 * @DMA_PREP_INTERRUPT - trigger an interrupt (callback) upon completion of 169 * @DMA_PREP_INTERRUPT - trigger an interrupt (callback) upon completion of
170 * this transaction 170 * this transaction
171 * @DMA_CTRL_ACK - if clear, the descriptor cannot be reused until the client 171 * @DMA_CTRL_ACK - if clear, the descriptor cannot be reused until the client
172 * acknowledges receipt, i.e. has has a chance to establish any dependency 172 * acknowledges receipt, i.e. has has a chance to establish any dependency
173 * chains 173 * chains
174 * @DMA_COMPL_SKIP_SRC_UNMAP - set to disable dma-unmapping the source buffer(s) 174 * @DMA_COMPL_SKIP_SRC_UNMAP - set to disable dma-unmapping the source buffer(s)
175 * @DMA_COMPL_SKIP_DEST_UNMAP - set to disable dma-unmapping the destination(s) 175 * @DMA_COMPL_SKIP_DEST_UNMAP - set to disable dma-unmapping the destination(s)
176 * @DMA_COMPL_SRC_UNMAP_SINGLE - set to do the source dma-unmapping as single 176 * @DMA_COMPL_SRC_UNMAP_SINGLE - set to do the source dma-unmapping as single
177 * (if not set, do the source dma-unmapping as page) 177 * (if not set, do the source dma-unmapping as page)
178 * @DMA_COMPL_DEST_UNMAP_SINGLE - set to do the destination dma-unmapping as single 178 * @DMA_COMPL_DEST_UNMAP_SINGLE - set to do the destination dma-unmapping as single
179 * (if not set, do the destination dma-unmapping as page) 179 * (if not set, do the destination dma-unmapping as page)
180 * @DMA_PREP_PQ_DISABLE_P - prevent generation of P while generating Q 180 * @DMA_PREP_PQ_DISABLE_P - prevent generation of P while generating Q
181 * @DMA_PREP_PQ_DISABLE_Q - prevent generation of Q while generating P 181 * @DMA_PREP_PQ_DISABLE_Q - prevent generation of Q while generating P
182 * @DMA_PREP_CONTINUE - indicate to a driver that it is reusing buffers as 182 * @DMA_PREP_CONTINUE - indicate to a driver that it is reusing buffers as
183 * sources that were the result of a previous operation, in the case of a PQ 183 * sources that were the result of a previous operation, in the case of a PQ
184 * operation it continues the calculation with new sources 184 * operation it continues the calculation with new sources
185 * @DMA_PREP_FENCE - tell the driver that subsequent operations depend 185 * @DMA_PREP_FENCE - tell the driver that subsequent operations depend
186 * on the result of this operation 186 * on the result of this operation
187 */ 187 */
188 enum dma_ctrl_flags { 188 enum dma_ctrl_flags {
189 DMA_PREP_INTERRUPT = (1 << 0), 189 DMA_PREP_INTERRUPT = (1 << 0),
190 DMA_CTRL_ACK = (1 << 1), 190 DMA_CTRL_ACK = (1 << 1),
191 DMA_COMPL_SKIP_SRC_UNMAP = (1 << 2), 191 DMA_COMPL_SKIP_SRC_UNMAP = (1 << 2),
192 DMA_COMPL_SKIP_DEST_UNMAP = (1 << 3), 192 DMA_COMPL_SKIP_DEST_UNMAP = (1 << 3),
193 DMA_COMPL_SRC_UNMAP_SINGLE = (1 << 4), 193 DMA_COMPL_SRC_UNMAP_SINGLE = (1 << 4),
194 DMA_COMPL_DEST_UNMAP_SINGLE = (1 << 5), 194 DMA_COMPL_DEST_UNMAP_SINGLE = (1 << 5),
195 DMA_PREP_PQ_DISABLE_P = (1 << 6), 195 DMA_PREP_PQ_DISABLE_P = (1 << 6),
196 DMA_PREP_PQ_DISABLE_Q = (1 << 7), 196 DMA_PREP_PQ_DISABLE_Q = (1 << 7),
197 DMA_PREP_CONTINUE = (1 << 8), 197 DMA_PREP_CONTINUE = (1 << 8),
198 DMA_PREP_FENCE = (1 << 9), 198 DMA_PREP_FENCE = (1 << 9),
199 }; 199 };
200 200
201 /** 201 /**
202 * enum dma_ctrl_cmd - DMA operations that can optionally be exercised 202 * enum dma_ctrl_cmd - DMA operations that can optionally be exercised
203 * on a running channel. 203 * on a running channel.
204 * @DMA_TERMINATE_ALL: terminate all ongoing transfers 204 * @DMA_TERMINATE_ALL: terminate all ongoing transfers
205 * @DMA_PAUSE: pause ongoing transfers 205 * @DMA_PAUSE: pause ongoing transfers
206 * @DMA_RESUME: resume paused transfer 206 * @DMA_RESUME: resume paused transfer
207 * @DMA_SLAVE_CONFIG: this command is only implemented by DMA controllers 207 * @DMA_SLAVE_CONFIG: this command is only implemented by DMA controllers
208 * that need to runtime reconfigure the slave channels (as opposed to passing 208 * that need to runtime reconfigure the slave channels (as opposed to passing
209 * configuration data in statically from the platform). An additional 209 * configuration data in statically from the platform). An additional
210 * argument of struct dma_slave_config must be passed in with this 210 * argument of struct dma_slave_config must be passed in with this
211 * command. 211 * command.
212 * @FSLDMA_EXTERNAL_START: this command will put the Freescale DMA controller 212 * @FSLDMA_EXTERNAL_START: this command will put the Freescale DMA controller
213 * into external start mode. 213 * into external start mode.
214 */ 214 */
215 enum dma_ctrl_cmd { 215 enum dma_ctrl_cmd {
216 DMA_TERMINATE_ALL, 216 DMA_TERMINATE_ALL,
217 DMA_PAUSE, 217 DMA_PAUSE,
218 DMA_RESUME, 218 DMA_RESUME,
219 DMA_SLAVE_CONFIG, 219 DMA_SLAVE_CONFIG,
220 FSLDMA_EXTERNAL_START, 220 FSLDMA_EXTERNAL_START,
221 }; 221 };
222 222
223 /** 223 /**
224 * enum sum_check_bits - bit position of pq_check_flags 224 * enum sum_check_bits - bit position of pq_check_flags
225 */ 225 */
226 enum sum_check_bits { 226 enum sum_check_bits {
227 SUM_CHECK_P = 0, 227 SUM_CHECK_P = 0,
228 SUM_CHECK_Q = 1, 228 SUM_CHECK_Q = 1,
229 }; 229 };
230 230
231 /** 231 /**
232 * enum pq_check_flags - result of async_{xor,pq}_zero_sum operations 232 * enum pq_check_flags - result of async_{xor,pq}_zero_sum operations
233 * @SUM_CHECK_P_RESULT - 1 if xor zero sum error, 0 otherwise 233 * @SUM_CHECK_P_RESULT - 1 if xor zero sum error, 0 otherwise
234 * @SUM_CHECK_Q_RESULT - 1 if reed-solomon zero sum error, 0 otherwise 234 * @SUM_CHECK_Q_RESULT - 1 if reed-solomon zero sum error, 0 otherwise
235 */ 235 */
236 enum sum_check_flags { 236 enum sum_check_flags {
237 SUM_CHECK_P_RESULT = (1 << SUM_CHECK_P), 237 SUM_CHECK_P_RESULT = (1 << SUM_CHECK_P),
238 SUM_CHECK_Q_RESULT = (1 << SUM_CHECK_Q), 238 SUM_CHECK_Q_RESULT = (1 << SUM_CHECK_Q),
239 }; 239 };
240 240
241 241
242 /** 242 /**
243 * dma_cap_mask_t - capabilities bitmap modeled after cpumask_t. 243 * dma_cap_mask_t - capabilities bitmap modeled after cpumask_t.
244 * See linux/cpumask.h 244 * See linux/cpumask.h
245 */ 245 */
246 typedef struct { DECLARE_BITMAP(bits, DMA_TX_TYPE_END); } dma_cap_mask_t; 246 typedef struct { DECLARE_BITMAP(bits, DMA_TX_TYPE_END); } dma_cap_mask_t;
247 247
248 /** 248 /**
249 * struct dma_chan_percpu - the per-CPU part of struct dma_chan 249 * struct dma_chan_percpu - the per-CPU part of struct dma_chan
250 * @memcpy_count: transaction counter 250 * @memcpy_count: transaction counter
251 * @bytes_transferred: byte counter 251 * @bytes_transferred: byte counter
252 */ 252 */
253 253
254 struct dma_chan_percpu { 254 struct dma_chan_percpu {
255 /* stats */ 255 /* stats */
256 unsigned long memcpy_count; 256 unsigned long memcpy_count;
257 unsigned long bytes_transferred; 257 unsigned long bytes_transferred;
258 }; 258 };
259 259
260 /** 260 /**
261 * struct dma_chan - devices supply DMA channels, clients use them 261 * struct dma_chan - devices supply DMA channels, clients use them
262 * @device: ptr to the dma device who supplies this channel, always !%NULL 262 * @device: ptr to the dma device who supplies this channel, always !%NULL
263 * @cookie: last cookie value returned to client 263 * @cookie: last cookie value returned to client
264 * @completed_cookie: last completed cookie for this channel 264 * @completed_cookie: last completed cookie for this channel
265 * @chan_id: channel ID for sysfs 265 * @chan_id: channel ID for sysfs
266 * @dev: class device for sysfs 266 * @dev: class device for sysfs
267 * @device_node: used to add this to the device chan list 267 * @device_node: used to add this to the device chan list
268 * @local: per-cpu pointer to a struct dma_chan_percpu 268 * @local: per-cpu pointer to a struct dma_chan_percpu
269 * @client-count: how many clients are using this channel 269 * @client-count: how many clients are using this channel
270 * @table_count: number of appearances in the mem-to-mem allocation table 270 * @table_count: number of appearances in the mem-to-mem allocation table
271 * @private: private data for certain client-channel associations 271 * @private: private data for certain client-channel associations
272 */ 272 */
273 struct dma_chan { 273 struct dma_chan {
274 struct dma_device *device; 274 struct dma_device *device;
275 dma_cookie_t cookie; 275 dma_cookie_t cookie;
276 dma_cookie_t completed_cookie; 276 dma_cookie_t completed_cookie;
277 277
278 /* sysfs */ 278 /* sysfs */
279 int chan_id; 279 int chan_id;
280 struct dma_chan_dev *dev; 280 struct dma_chan_dev *dev;
281 281
282 struct list_head device_node; 282 struct list_head device_node;
283 struct dma_chan_percpu __percpu *local; 283 struct dma_chan_percpu __percpu *local;
284 int client_count; 284 int client_count;
285 int table_count; 285 int table_count;
286 void *private; 286 void *private;
287 }; 287 };
288 288
289 /** 289 /**
290 * struct dma_chan_dev - relate sysfs device node to backing channel device 290 * struct dma_chan_dev - relate sysfs device node to backing channel device
291 * @chan - driver channel device 291 * @chan - driver channel device
292 * @device - sysfs device 292 * @device - sysfs device
293 * @dev_id - parent dma_device dev_id 293 * @dev_id - parent dma_device dev_id
294 * @idr_ref - reference count to gate release of dma_device dev_id 294 * @idr_ref - reference count to gate release of dma_device dev_id
295 */ 295 */
296 struct dma_chan_dev { 296 struct dma_chan_dev {
297 struct dma_chan *chan; 297 struct dma_chan *chan;
298 struct device device; 298 struct device device;
299 int dev_id; 299 int dev_id;
300 atomic_t *idr_ref; 300 atomic_t *idr_ref;
301 }; 301 };
302 302
303 /** 303 /**
304 * enum dma_slave_buswidth - defines bus with of the DMA slave 304 * enum dma_slave_buswidth - defines bus with of the DMA slave
305 * device, source or target buses 305 * device, source or target buses
306 */ 306 */
307 enum dma_slave_buswidth { 307 enum dma_slave_buswidth {
308 DMA_SLAVE_BUSWIDTH_UNDEFINED = 0, 308 DMA_SLAVE_BUSWIDTH_UNDEFINED = 0,
309 DMA_SLAVE_BUSWIDTH_1_BYTE = 1, 309 DMA_SLAVE_BUSWIDTH_1_BYTE = 1,
310 DMA_SLAVE_BUSWIDTH_2_BYTES = 2, 310 DMA_SLAVE_BUSWIDTH_2_BYTES = 2,
311 DMA_SLAVE_BUSWIDTH_4_BYTES = 4, 311 DMA_SLAVE_BUSWIDTH_4_BYTES = 4,
312 DMA_SLAVE_BUSWIDTH_8_BYTES = 8, 312 DMA_SLAVE_BUSWIDTH_8_BYTES = 8,
313 }; 313 };
314 314
315 /** 315 /**
316 * struct dma_slave_config - dma slave channel runtime config 316 * struct dma_slave_config - dma slave channel runtime config
317 * @direction: whether the data shall go in or out on this slave 317 * @direction: whether the data shall go in or out on this slave
318 * channel, right now. DMA_TO_DEVICE and DMA_FROM_DEVICE are 318 * channel, right now. DMA_TO_DEVICE and DMA_FROM_DEVICE are
319 * legal values, DMA_BIDIRECTIONAL is not acceptable since we 319 * legal values, DMA_BIDIRECTIONAL is not acceptable since we
320 * need to differentiate source and target addresses. 320 * need to differentiate source and target addresses.
321 * @src_addr: this is the physical address where DMA slave data 321 * @src_addr: this is the physical address where DMA slave data
322 * should be read (RX), if the source is memory this argument is 322 * should be read (RX), if the source is memory this argument is
323 * ignored. 323 * ignored.
324 * @dst_addr: this is the physical address where DMA slave data 324 * @dst_addr: this is the physical address where DMA slave data
325 * should be written (TX), if the source is memory this argument 325 * should be written (TX), if the source is memory this argument
326 * is ignored. 326 * is ignored.
327 * @src_addr_width: this is the width in bytes of the source (RX) 327 * @src_addr_width: this is the width in bytes of the source (RX)
328 * register where DMA data shall be read. If the source 328 * register where DMA data shall be read. If the source
329 * is memory this may be ignored depending on architecture. 329 * is memory this may be ignored depending on architecture.
330 * Legal values: 1, 2, 4, 8. 330 * Legal values: 1, 2, 4, 8.
331 * @dst_addr_width: same as src_addr_width but for destination 331 * @dst_addr_width: same as src_addr_width but for destination
332 * target (TX) mutatis mutandis. 332 * target (TX) mutatis mutandis.
333 * @src_maxburst: the maximum number of words (note: words, as in 333 * @src_maxburst: the maximum number of words (note: words, as in
334 * units of the src_addr_width member, not bytes) that can be sent 334 * units of the src_addr_width member, not bytes) that can be sent
335 * in one burst to the device. Typically something like half the 335 * in one burst to the device. Typically something like half the
336 * FIFO depth on I/O peripherals so you don't overflow it. This 336 * FIFO depth on I/O peripherals so you don't overflow it. This
337 * may or may not be applicable on memory sources. 337 * may or may not be applicable on memory sources.
338 * @dst_maxburst: same as src_maxburst but for destination target 338 * @dst_maxburst: same as src_maxburst but for destination target
339 * mutatis mutandis. 339 * mutatis mutandis.
340 * @device_fc: Flow Controller Settings. Only valid for slave channels. Fill 340 * @device_fc: Flow Controller Settings. Only valid for slave channels. Fill
341 * with 'true' if peripheral should be flow controller. Direction will be 341 * with 'true' if peripheral should be flow controller. Direction will be
342 * selected at Runtime. 342 * selected at Runtime.
343 * @slave_id: Slave requester id. Only valid for slave channels. The dma 343 * @slave_id: Slave requester id. Only valid for slave channels. The dma
344 * slave peripheral will have unique id as dma requester which need to be 344 * slave peripheral will have unique id as dma requester which need to be
345 * pass as slave config. 345 * pass as slave config.
346 * 346 *
347 * This struct is passed in as configuration data to a DMA engine 347 * This struct is passed in as configuration data to a DMA engine
348 * in order to set up a certain channel for DMA transport at runtime. 348 * in order to set up a certain channel for DMA transport at runtime.
349 * The DMA device/engine has to provide support for an additional 349 * The DMA device/engine has to provide support for an additional
350 * command in the channel config interface, DMA_SLAVE_CONFIG 350 * command in the channel config interface, DMA_SLAVE_CONFIG
351 * and this struct will then be passed in as an argument to the 351 * and this struct will then be passed in as an argument to the
352 * DMA engine device_control() function. 352 * DMA engine device_control() function.
353 * 353 *
354 * The rationale for adding configuration information to this struct 354 * The rationale for adding configuration information to this struct
355 * is as follows: if it is likely that most DMA slave controllers in 355 * is as follows: if it is likely that most DMA slave controllers in
356 * the world will support the configuration option, then make it 356 * the world will support the configuration option, then make it
357 * generic. If not: if it is fixed so that it be sent in static from 357 * generic. If not: if it is fixed so that it be sent in static from
358 * the platform data, then prefer to do that. Else, if it is neither 358 * the platform data, then prefer to do that. Else, if it is neither
359 * fixed at runtime, nor generic enough (such as bus mastership on 359 * fixed at runtime, nor generic enough (such as bus mastership on
360 * some CPU family and whatnot) then create a custom slave config 360 * some CPU family and whatnot) then create a custom slave config
361 * struct and pass that, then make this config a member of that 361 * struct and pass that, then make this config a member of that
362 * struct, if applicable. 362 * struct, if applicable.
363 */ 363 */
364 struct dma_slave_config { 364 struct dma_slave_config {
365 enum dma_transfer_direction direction; 365 enum dma_transfer_direction direction;
366 dma_addr_t src_addr; 366 dma_addr_t src_addr;
367 dma_addr_t dst_addr; 367 dma_addr_t dst_addr;
368 enum dma_slave_buswidth src_addr_width; 368 enum dma_slave_buswidth src_addr_width;
369 enum dma_slave_buswidth dst_addr_width; 369 enum dma_slave_buswidth dst_addr_width;
370 u32 src_maxburst; 370 u32 src_maxburst;
371 u32 dst_maxburst; 371 u32 dst_maxburst;
372 bool device_fc; 372 bool device_fc;
373 unsigned int slave_id; 373 unsigned int slave_id;
374 }; 374 };
375 375
376 /* struct dma_slave_caps - expose capabilities of a slave channel only 376 /* struct dma_slave_caps - expose capabilities of a slave channel only
377 * 377 *
378 * @src_addr_widths: bit mask of src addr widths the channel supports 378 * @src_addr_widths: bit mask of src addr widths the channel supports
379 * @dstn_addr_widths: bit mask of dstn addr widths the channel supports 379 * @dstn_addr_widths: bit mask of dstn addr widths the channel supports
380 * @directions: bit mask of slave direction the channel supported 380 * @directions: bit mask of slave direction the channel supported
381 * since the enum dma_transfer_direction is not defined as bits for each 381 * since the enum dma_transfer_direction is not defined as bits for each
382 * type of direction, the dma controller should fill (1 << <TYPE>) and same 382 * type of direction, the dma controller should fill (1 << <TYPE>) and same
383 * should be checked by controller as well 383 * should be checked by controller as well
384 * @cmd_pause: true, if pause and thereby resume is supported 384 * @cmd_pause: true, if pause and thereby resume is supported
385 * @cmd_terminate: true, if terminate cmd is supported 385 * @cmd_terminate: true, if terminate cmd is supported
386 */ 386 */
387 struct dma_slave_caps { 387 struct dma_slave_caps {
388 u32 src_addr_widths; 388 u32 src_addr_widths;
389 u32 dstn_addr_widths; 389 u32 dstn_addr_widths;
390 u32 directions; 390 u32 directions;
391 bool cmd_pause; 391 bool cmd_pause;
392 bool cmd_terminate; 392 bool cmd_terminate;
393 }; 393 };
394 394
395 static inline const char *dma_chan_name(struct dma_chan *chan) 395 static inline const char *dma_chan_name(struct dma_chan *chan)
396 { 396 {
397 return dev_name(&chan->dev->device); 397 return dev_name(&chan->dev->device);
398 } 398 }
399 399
400 void dma_chan_cleanup(struct kref *kref); 400 void dma_chan_cleanup(struct kref *kref);
401 401
402 /** 402 /**
403 * typedef dma_filter_fn - callback filter for dma_request_channel 403 * typedef dma_filter_fn - callback filter for dma_request_channel
404 * @chan: channel to be reviewed 404 * @chan: channel to be reviewed
405 * @filter_param: opaque parameter passed through dma_request_channel 405 * @filter_param: opaque parameter passed through dma_request_channel
406 * 406 *
407 * When this optional parameter is specified in a call to dma_request_channel a 407 * When this optional parameter is specified in a call to dma_request_channel a
408 * suitable channel is passed to this routine for further dispositioning before 408 * suitable channel is passed to this routine for further dispositioning before
409 * being returned. Where 'suitable' indicates a non-busy channel that 409 * being returned. Where 'suitable' indicates a non-busy channel that
410 * satisfies the given capability mask. It returns 'true' to indicate that the 410 * satisfies the given capability mask. It returns 'true' to indicate that the
411 * channel is suitable. 411 * channel is suitable.
412 */ 412 */
413 typedef bool (*dma_filter_fn)(struct dma_chan *chan, void *filter_param); 413 typedef bool (*dma_filter_fn)(struct dma_chan *chan, void *filter_param);
414 414
415 typedef void (*dma_async_tx_callback)(void *dma_async_param); 415 typedef void (*dma_async_tx_callback)(void *dma_async_param);
416 /** 416 /**
417 * struct dma_async_tx_descriptor - async transaction descriptor 417 * struct dma_async_tx_descriptor - async transaction descriptor
418 * ---dma generic offload fields--- 418 * ---dma generic offload fields---
419 * @cookie: tracking cookie for this transaction, set to -EBUSY if 419 * @cookie: tracking cookie for this transaction, set to -EBUSY if
420 * this tx is sitting on a dependency list 420 * this tx is sitting on a dependency list
421 * @flags: flags to augment operation preparation, control completion, and 421 * @flags: flags to augment operation preparation, control completion, and
422 * communicate status 422 * communicate status
423 * @phys: physical address of the descriptor 423 * @phys: physical address of the descriptor
424 * @chan: target channel for this operation 424 * @chan: target channel for this operation
425 * @tx_submit: set the prepared descriptor(s) to be executed by the engine 425 * @tx_submit: set the prepared descriptor(s) to be executed by the engine
426 * @callback: routine to call after this operation is complete 426 * @callback: routine to call after this operation is complete
427 * @callback_param: general parameter to pass to the callback routine 427 * @callback_param: general parameter to pass to the callback routine
428 * ---async_tx api specific fields--- 428 * ---async_tx api specific fields---
429 * @next: at completion submit this descriptor 429 * @next: at completion submit this descriptor
430 * @parent: pointer to the next level up in the dependency chain 430 * @parent: pointer to the next level up in the dependency chain
431 * @lock: protect the parent and next pointers 431 * @lock: protect the parent and next pointers
432 */ 432 */
433 struct dma_async_tx_descriptor { 433 struct dma_async_tx_descriptor {
434 dma_cookie_t cookie; 434 dma_cookie_t cookie;
435 enum dma_ctrl_flags flags; /* not a 'long' to pack with cookie */ 435 enum dma_ctrl_flags flags; /* not a 'long' to pack with cookie */
436 dma_addr_t phys; 436 dma_addr_t phys;
437 struct dma_chan *chan; 437 struct dma_chan *chan;
438 dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx); 438 dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx);
439 dma_async_tx_callback callback; 439 dma_async_tx_callback callback;
440 void *callback_param; 440 void *callback_param;
441 #ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH 441 #ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
442 struct dma_async_tx_descriptor *next; 442 struct dma_async_tx_descriptor *next;
443 struct dma_async_tx_descriptor *parent; 443 struct dma_async_tx_descriptor *parent;
444 spinlock_t lock; 444 spinlock_t lock;
445 #endif 445 #endif
446 }; 446 };
447 447
448 #ifndef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH 448 #ifndef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
449 static inline void txd_lock(struct dma_async_tx_descriptor *txd) 449 static inline void txd_lock(struct dma_async_tx_descriptor *txd)
450 { 450 {
451 } 451 }
452 static inline void txd_unlock(struct dma_async_tx_descriptor *txd) 452 static inline void txd_unlock(struct dma_async_tx_descriptor *txd)
453 { 453 {
454 } 454 }
455 static inline void txd_chain(struct dma_async_tx_descriptor *txd, struct dma_async_tx_descriptor *next) 455 static inline void txd_chain(struct dma_async_tx_descriptor *txd, struct dma_async_tx_descriptor *next)
456 { 456 {
457 BUG(); 457 BUG();
458 } 458 }
459 static inline void txd_clear_parent(struct dma_async_tx_descriptor *txd) 459 static inline void txd_clear_parent(struct dma_async_tx_descriptor *txd)
460 { 460 {
461 } 461 }
462 static inline void txd_clear_next(struct dma_async_tx_descriptor *txd) 462 static inline void txd_clear_next(struct dma_async_tx_descriptor *txd)
463 { 463 {
464 } 464 }
465 static inline struct dma_async_tx_descriptor *txd_next(struct dma_async_tx_descriptor *txd) 465 static inline struct dma_async_tx_descriptor *txd_next(struct dma_async_tx_descriptor *txd)
466 { 466 {
467 return NULL; 467 return NULL;
468 } 468 }
469 static inline struct dma_async_tx_descriptor *txd_parent(struct dma_async_tx_descriptor *txd) 469 static inline struct dma_async_tx_descriptor *txd_parent(struct dma_async_tx_descriptor *txd)
470 { 470 {
471 return NULL; 471 return NULL;
472 } 472 }
473 473
474 #else 474 #else
475 static inline void txd_lock(struct dma_async_tx_descriptor *txd) 475 static inline void txd_lock(struct dma_async_tx_descriptor *txd)
476 { 476 {
477 spin_lock_bh(&txd->lock); 477 spin_lock_bh(&txd->lock);
478 } 478 }
479 static inline void txd_unlock(struct dma_async_tx_descriptor *txd) 479 static inline void txd_unlock(struct dma_async_tx_descriptor *txd)
480 { 480 {
481 spin_unlock_bh(&txd->lock); 481 spin_unlock_bh(&txd->lock);
482 } 482 }
483 static inline void txd_chain(struct dma_async_tx_descriptor *txd, struct dma_async_tx_descriptor *next) 483 static inline void txd_chain(struct dma_async_tx_descriptor *txd, struct dma_async_tx_descriptor *next)
484 { 484 {
485 txd->next = next; 485 txd->next = next;
486 next->parent = txd; 486 next->parent = txd;
487 } 487 }
488 static inline void txd_clear_parent(struct dma_async_tx_descriptor *txd) 488 static inline void txd_clear_parent(struct dma_async_tx_descriptor *txd)
489 { 489 {
490 txd->parent = NULL; 490 txd->parent = NULL;
491 } 491 }
492 static inline void txd_clear_next(struct dma_async_tx_descriptor *txd) 492 static inline void txd_clear_next(struct dma_async_tx_descriptor *txd)
493 { 493 {
494 txd->next = NULL; 494 txd->next = NULL;
495 } 495 }
496 static inline struct dma_async_tx_descriptor *txd_parent(struct dma_async_tx_descriptor *txd) 496 static inline struct dma_async_tx_descriptor *txd_parent(struct dma_async_tx_descriptor *txd)
497 { 497 {
498 return txd->parent; 498 return txd->parent;
499 } 499 }
500 static inline struct dma_async_tx_descriptor *txd_next(struct dma_async_tx_descriptor *txd) 500 static inline struct dma_async_tx_descriptor *txd_next(struct dma_async_tx_descriptor *txd)
501 { 501 {
502 return txd->next; 502 return txd->next;
503 } 503 }
504 #endif 504 #endif
505 505
506 /** 506 /**
507 * struct dma_tx_state - filled in to report the status of 507 * struct dma_tx_state - filled in to report the status of
508 * a transfer. 508 * a transfer.
509 * @last: last completed DMA cookie 509 * @last: last completed DMA cookie
510 * @used: last issued DMA cookie (i.e. the one in progress) 510 * @used: last issued DMA cookie (i.e. the one in progress)
511 * @residue: the remaining number of bytes left to transmit 511 * @residue: the remaining number of bytes left to transmit
512 * on the selected transfer for states DMA_IN_PROGRESS and 512 * on the selected transfer for states DMA_IN_PROGRESS and
513 * DMA_PAUSED if this is implemented in the driver, else 0 513 * DMA_PAUSED if this is implemented in the driver, else 0
514 */ 514 */
515 struct dma_tx_state { 515 struct dma_tx_state {
516 dma_cookie_t last; 516 dma_cookie_t last;
517 dma_cookie_t used; 517 dma_cookie_t used;
518 u32 residue; 518 u32 residue;
519 }; 519 };
520 520
521 /** 521 /**
522 * struct dma_device - info on the entity supplying DMA services 522 * struct dma_device - info on the entity supplying DMA services
523 * @chancnt: how many DMA channels are supported 523 * @chancnt: how many DMA channels are supported
524 * @privatecnt: how many DMA channels are requested by dma_request_channel 524 * @privatecnt: how many DMA channels are requested by dma_request_channel
525 * @channels: the list of struct dma_chan 525 * @channels: the list of struct dma_chan
526 * @global_node: list_head for global dma_device_list 526 * @global_node: list_head for global dma_device_list
527 * @cap_mask: one or more dma_capability flags 527 * @cap_mask: one or more dma_capability flags
528 * @max_xor: maximum number of xor sources, 0 if no capability 528 * @max_xor: maximum number of xor sources, 0 if no capability
529 * @max_pq: maximum number of PQ sources and PQ-continue capability 529 * @max_pq: maximum number of PQ sources and PQ-continue capability
530 * @copy_align: alignment shift for memcpy operations 530 * @copy_align: alignment shift for memcpy operations
531 * @xor_align: alignment shift for xor operations 531 * @xor_align: alignment shift for xor operations
532 * @pq_align: alignment shift for pq operations 532 * @pq_align: alignment shift for pq operations
533 * @fill_align: alignment shift for memset operations 533 * @fill_align: alignment shift for memset operations
534 * @dev_id: unique device ID 534 * @dev_id: unique device ID
535 * @dev: struct device reference for dma mapping api 535 * @dev: struct device reference for dma mapping api
536 * @device_alloc_chan_resources: allocate resources and return the 536 * @device_alloc_chan_resources: allocate resources and return the
537 * number of allocated descriptors 537 * number of allocated descriptors
538 * @device_free_chan_resources: release DMA channel's resources 538 * @device_free_chan_resources: release DMA channel's resources
539 * @device_prep_dma_memcpy: prepares a memcpy operation 539 * @device_prep_dma_memcpy: prepares a memcpy operation
540 * @device_prep_dma_xor: prepares a xor operation 540 * @device_prep_dma_xor: prepares a xor operation
541 * @device_prep_dma_xor_val: prepares a xor validation operation 541 * @device_prep_dma_xor_val: prepares a xor validation operation
542 * @device_prep_dma_pq: prepares a pq operation 542 * @device_prep_dma_pq: prepares a pq operation
543 * @device_prep_dma_pq_val: prepares a pqzero_sum operation 543 * @device_prep_dma_pq_val: prepares a pqzero_sum operation
544 * @device_prep_dma_interrupt: prepares an end of chain interrupt operation 544 * @device_prep_dma_interrupt: prepares an end of chain interrupt operation
545 * @device_prep_slave_sg: prepares a slave dma operation 545 * @device_prep_slave_sg: prepares a slave dma operation
546 * @device_prep_dma_cyclic: prepare a cyclic dma operation suitable for audio. 546 * @device_prep_dma_cyclic: prepare a cyclic dma operation suitable for audio.
547 * The function takes a buffer of size buf_len. The callback function will 547 * The function takes a buffer of size buf_len. The callback function will
548 * be called after period_len bytes have been transferred. 548 * be called after period_len bytes have been transferred.
549 * @device_prep_interleaved_dma: Transfer expression in a generic way. 549 * @device_prep_interleaved_dma: Transfer expression in a generic way.
550 * @device_control: manipulate all pending operations on a channel, returns 550 * @device_control: manipulate all pending operations on a channel, returns
551 * zero or error code 551 * zero or error code
552 * @device_tx_status: poll for transaction completion, the optional 552 * @device_tx_status: poll for transaction completion, the optional
553 * txstate parameter can be supplied with a pointer to get a 553 * txstate parameter can be supplied with a pointer to get a
554 * struct with auxiliary transfer status information, otherwise the call 554 * struct with auxiliary transfer status information, otherwise the call
555 * will just return a simple status code 555 * will just return a simple status code
556 * @device_issue_pending: push pending transactions to hardware 556 * @device_issue_pending: push pending transactions to hardware
557 * @device_slave_caps: return the slave channel capabilities 557 * @device_slave_caps: return the slave channel capabilities
558 */ 558 */
559 struct dma_device { 559 struct dma_device {
560 560
561 unsigned int chancnt; 561 unsigned int chancnt;
562 unsigned int privatecnt; 562 unsigned int privatecnt;
563 struct list_head channels; 563 struct list_head channels;
564 struct list_head global_node; 564 struct list_head global_node;
565 dma_cap_mask_t cap_mask; 565 dma_cap_mask_t cap_mask;
566 unsigned short max_xor; 566 unsigned short max_xor;
567 unsigned short max_pq; 567 unsigned short max_pq;
568 u8 copy_align; 568 u8 copy_align;
569 u8 xor_align; 569 u8 xor_align;
570 u8 pq_align; 570 u8 pq_align;
571 u8 fill_align; 571 u8 fill_align;
572 #define DMA_HAS_PQ_CONTINUE (1 << 15) 572 #define DMA_HAS_PQ_CONTINUE (1 << 15)
573 573
574 int dev_id; 574 int dev_id;
575 struct device *dev; 575 struct device *dev;
576 576
577 int (*device_alloc_chan_resources)(struct dma_chan *chan); 577 int (*device_alloc_chan_resources)(struct dma_chan *chan);
578 void (*device_free_chan_resources)(struct dma_chan *chan); 578 void (*device_free_chan_resources)(struct dma_chan *chan);
579 579
580 struct dma_async_tx_descriptor *(*device_prep_dma_memcpy)( 580 struct dma_async_tx_descriptor *(*device_prep_dma_memcpy)(
581 struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, 581 struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
582 size_t len, unsigned long flags); 582 size_t len, unsigned long flags);
583 struct dma_async_tx_descriptor *(*device_prep_dma_xor)( 583 struct dma_async_tx_descriptor *(*device_prep_dma_xor)(
584 struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, 584 struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
585 unsigned int src_cnt, size_t len, unsigned long flags); 585 unsigned int src_cnt, size_t len, unsigned long flags);
586 struct dma_async_tx_descriptor *(*device_prep_dma_xor_val)( 586 struct dma_async_tx_descriptor *(*device_prep_dma_xor_val)(
587 struct dma_chan *chan, dma_addr_t *src, unsigned int src_cnt, 587 struct dma_chan *chan, dma_addr_t *src, unsigned int src_cnt,
588 size_t len, enum sum_check_flags *result, unsigned long flags); 588 size_t len, enum sum_check_flags *result, unsigned long flags);
589 struct dma_async_tx_descriptor *(*device_prep_dma_pq)( 589 struct dma_async_tx_descriptor *(*device_prep_dma_pq)(
590 struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src, 590 struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
591 unsigned int src_cnt, const unsigned char *scf, 591 unsigned int src_cnt, const unsigned char *scf,
592 size_t len, unsigned long flags); 592 size_t len, unsigned long flags);
593 struct dma_async_tx_descriptor *(*device_prep_dma_pq_val)( 593 struct dma_async_tx_descriptor *(*device_prep_dma_pq_val)(
594 struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src, 594 struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
595 unsigned int src_cnt, const unsigned char *scf, size_t len, 595 unsigned int src_cnt, const unsigned char *scf, size_t len,
596 enum sum_check_flags *pqres, unsigned long flags); 596 enum sum_check_flags *pqres, unsigned long flags);
597 struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)( 597 struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)(
598 struct dma_chan *chan, unsigned long flags); 598 struct dma_chan *chan, unsigned long flags);
599 struct dma_async_tx_descriptor *(*device_prep_dma_sg)( 599 struct dma_async_tx_descriptor *(*device_prep_dma_sg)(
600 struct dma_chan *chan, 600 struct dma_chan *chan,
601 struct scatterlist *dst_sg, unsigned int dst_nents, 601 struct scatterlist *dst_sg, unsigned int dst_nents,
602 struct scatterlist *src_sg, unsigned int src_nents, 602 struct scatterlist *src_sg, unsigned int src_nents,
603 unsigned long flags); 603 unsigned long flags);
604 604
605 struct dma_async_tx_descriptor *(*device_prep_slave_sg)( 605 struct dma_async_tx_descriptor *(*device_prep_slave_sg)(
606 struct dma_chan *chan, struct scatterlist *sgl, 606 struct dma_chan *chan, struct scatterlist *sgl,
607 unsigned int sg_len, enum dma_transfer_direction direction, 607 unsigned int sg_len, enum dma_transfer_direction direction,
608 unsigned long flags, void *context); 608 unsigned long flags, void *context);
609 struct dma_async_tx_descriptor *(*device_prep_dma_cyclic)( 609 struct dma_async_tx_descriptor *(*device_prep_dma_cyclic)(
610 struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, 610 struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
611 size_t period_len, enum dma_transfer_direction direction, 611 size_t period_len, enum dma_transfer_direction direction,
612 unsigned long flags, void *context); 612 unsigned long flags, void *context);
613 struct dma_async_tx_descriptor *(*device_prep_interleaved_dma)( 613 struct dma_async_tx_descriptor *(*device_prep_interleaved_dma)(
614 struct dma_chan *chan, struct dma_interleaved_template *xt, 614 struct dma_chan *chan, struct dma_interleaved_template *xt,
615 unsigned long flags); 615 unsigned long flags);
616 int (*device_control)(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 616 int (*device_control)(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
617 unsigned long arg); 617 unsigned long arg);
618 618
619 enum dma_status (*device_tx_status)(struct dma_chan *chan, 619 enum dma_status (*device_tx_status)(struct dma_chan *chan,
620 dma_cookie_t cookie, 620 dma_cookie_t cookie,
621 struct dma_tx_state *txstate); 621 struct dma_tx_state *txstate);
622 void (*device_issue_pending)(struct dma_chan *chan); 622 void (*device_issue_pending)(struct dma_chan *chan);
623 int (*device_slave_caps)(struct dma_chan *chan, struct dma_slave_caps *caps); 623 int (*device_slave_caps)(struct dma_chan *chan, struct dma_slave_caps *caps);
624 }; 624 };
625 625
626 static inline int dmaengine_device_control(struct dma_chan *chan, 626 static inline int dmaengine_device_control(struct dma_chan *chan,
627 enum dma_ctrl_cmd cmd, 627 enum dma_ctrl_cmd cmd,
628 unsigned long arg) 628 unsigned long arg)
629 { 629 {
630 if (chan->device->device_control) 630 if (chan->device->device_control)
631 return chan->device->device_control(chan, cmd, arg); 631 return chan->device->device_control(chan, cmd, arg);
632 632
633 return -ENOSYS; 633 return -ENOSYS;
634 } 634 }
635 635
636 static inline int dmaengine_slave_config(struct dma_chan *chan, 636 static inline int dmaengine_slave_config(struct dma_chan *chan,
637 struct dma_slave_config *config) 637 struct dma_slave_config *config)
638 { 638 {
639 return dmaengine_device_control(chan, DMA_SLAVE_CONFIG, 639 return dmaengine_device_control(chan, DMA_SLAVE_CONFIG,
640 (unsigned long)config); 640 (unsigned long)config);
641 } 641 }
642 642
643 static inline bool is_slave_direction(enum dma_transfer_direction direction) 643 static inline bool is_slave_direction(enum dma_transfer_direction direction)
644 { 644 {
645 return (direction == DMA_MEM_TO_DEV) || (direction == DMA_DEV_TO_MEM); 645 return (direction == DMA_MEM_TO_DEV) || (direction == DMA_DEV_TO_MEM);
646 } 646 }
647 647
648 static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_single( 648 static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_single(
649 struct dma_chan *chan, dma_addr_t buf, size_t len, 649 struct dma_chan *chan, dma_addr_t buf, size_t len,
650 enum dma_transfer_direction dir, unsigned long flags) 650 enum dma_transfer_direction dir, unsigned long flags)
651 { 651 {
652 struct scatterlist sg; 652 struct scatterlist sg;
653 sg_init_table(&sg, 1); 653 sg_init_table(&sg, 1);
654 sg_dma_address(&sg) = buf; 654 sg_dma_address(&sg) = buf;
655 sg_dma_len(&sg) = len; 655 sg_dma_len(&sg) = len;
656 656
657 return chan->device->device_prep_slave_sg(chan, &sg, 1, 657 return chan->device->device_prep_slave_sg(chan, &sg, 1,
658 dir, flags, NULL); 658 dir, flags, NULL);
659 } 659 }
660 660
661 static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_sg( 661 static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_sg(
662 struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len, 662 struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
663 enum dma_transfer_direction dir, unsigned long flags) 663 enum dma_transfer_direction dir, unsigned long flags)
664 { 664 {
665 return chan->device->device_prep_slave_sg(chan, sgl, sg_len, 665 return chan->device->device_prep_slave_sg(chan, sgl, sg_len,
666 dir, flags, NULL); 666 dir, flags, NULL);
667 } 667 }
668 668
669 #ifdef CONFIG_RAPIDIO_DMA_ENGINE 669 #ifdef CONFIG_RAPIDIO_DMA_ENGINE
670 struct rio_dma_ext; 670 struct rio_dma_ext;
671 static inline struct dma_async_tx_descriptor *dmaengine_prep_rio_sg( 671 static inline struct dma_async_tx_descriptor *dmaengine_prep_rio_sg(
672 struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len, 672 struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
673 enum dma_transfer_direction dir, unsigned long flags, 673 enum dma_transfer_direction dir, unsigned long flags,
674 struct rio_dma_ext *rio_ext) 674 struct rio_dma_ext *rio_ext)
675 { 675 {
676 return chan->device->device_prep_slave_sg(chan, sgl, sg_len, 676 return chan->device->device_prep_slave_sg(chan, sgl, sg_len,
677 dir, flags, rio_ext); 677 dir, flags, rio_ext);
678 } 678 }
679 #endif 679 #endif
680 680
681 static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_cyclic( 681 static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_cyclic(
682 struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, 682 struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
683 size_t period_len, enum dma_transfer_direction dir, 683 size_t period_len, enum dma_transfer_direction dir,
684 unsigned long flags) 684 unsigned long flags)
685 { 685 {
686 return chan->device->device_prep_dma_cyclic(chan, buf_addr, buf_len, 686 return chan->device->device_prep_dma_cyclic(chan, buf_addr, buf_len,
687 period_len, dir, flags, NULL); 687 period_len, dir, flags, NULL);
688 } 688 }
689 689
690 static inline struct dma_async_tx_descriptor *dmaengine_prep_interleaved_dma( 690 static inline struct dma_async_tx_descriptor *dmaengine_prep_interleaved_dma(
691 struct dma_chan *chan, struct dma_interleaved_template *xt, 691 struct dma_chan *chan, struct dma_interleaved_template *xt,
692 unsigned long flags) 692 unsigned long flags)
693 { 693 {
694 return chan->device->device_prep_interleaved_dma(chan, xt, flags); 694 return chan->device->device_prep_interleaved_dma(chan, xt, flags);
695 } 695 }
696 696
697 static inline int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps) 697 static inline int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps)
698 { 698 {
699 if (!chan || !caps) 699 if (!chan || !caps)
700 return -EINVAL; 700 return -EINVAL;
701 701
702 /* check if the channel supports slave transactions */ 702 /* check if the channel supports slave transactions */
703 if (!test_bit(DMA_SLAVE, chan->device->cap_mask.bits)) 703 if (!test_bit(DMA_SLAVE, chan->device->cap_mask.bits))
704 return -ENXIO; 704 return -ENXIO;
705 705
706 if (chan->device->device_slave_caps) 706 if (chan->device->device_slave_caps)
707 return chan->device->device_slave_caps(chan, caps); 707 return chan->device->device_slave_caps(chan, caps);
708 708
709 return -ENXIO; 709 return -ENXIO;
710 } 710 }
711 711
712 static inline int dmaengine_terminate_all(struct dma_chan *chan) 712 static inline int dmaengine_terminate_all(struct dma_chan *chan)
713 { 713 {
714 return dmaengine_device_control(chan, DMA_TERMINATE_ALL, 0); 714 return dmaengine_device_control(chan, DMA_TERMINATE_ALL, 0);
715 } 715 }
716 716
717 static inline int dmaengine_pause(struct dma_chan *chan) 717 static inline int dmaengine_pause(struct dma_chan *chan)
718 { 718 {
719 return dmaengine_device_control(chan, DMA_PAUSE, 0); 719 return dmaengine_device_control(chan, DMA_PAUSE, 0);
720 } 720 }
721 721
722 static inline int dmaengine_resume(struct dma_chan *chan) 722 static inline int dmaengine_resume(struct dma_chan *chan)
723 { 723 {
724 return dmaengine_device_control(chan, DMA_RESUME, 0); 724 return dmaengine_device_control(chan, DMA_RESUME, 0);
725 } 725 }
726 726
727 static inline enum dma_status dmaengine_tx_status(struct dma_chan *chan, 727 static inline enum dma_status dmaengine_tx_status(struct dma_chan *chan,
728 dma_cookie_t cookie, struct dma_tx_state *state) 728 dma_cookie_t cookie, struct dma_tx_state *state)
729 { 729 {
730 return chan->device->device_tx_status(chan, cookie, state); 730 return chan->device->device_tx_status(chan, cookie, state);
731 } 731 }
732 732
733 static inline dma_cookie_t dmaengine_submit(struct dma_async_tx_descriptor *desc) 733 static inline dma_cookie_t dmaengine_submit(struct dma_async_tx_descriptor *desc)
734 { 734 {
735 return desc->tx_submit(desc); 735 return desc->tx_submit(desc);
736 } 736 }
737 737
738 static inline bool dmaengine_check_align(u8 align, size_t off1, size_t off2, size_t len) 738 static inline bool dmaengine_check_align(u8 align, size_t off1, size_t off2, size_t len)
739 { 739 {
740 size_t mask; 740 size_t mask;
741 741
742 if (!align) 742 if (!align)
743 return true; 743 return true;
744 mask = (1 << align) - 1; 744 mask = (1 << align) - 1;
745 if (mask & (off1 | off2 | len)) 745 if (mask & (off1 | off2 | len))
746 return false; 746 return false;
747 return true; 747 return true;
748 } 748 }
749 749
750 static inline bool is_dma_copy_aligned(struct dma_device *dev, size_t off1, 750 static inline bool is_dma_copy_aligned(struct dma_device *dev, size_t off1,
751 size_t off2, size_t len) 751 size_t off2, size_t len)
752 { 752 {
753 return dmaengine_check_align(dev->copy_align, off1, off2, len); 753 return dmaengine_check_align(dev->copy_align, off1, off2, len);
754 } 754 }
755 755
756 static inline bool is_dma_xor_aligned(struct dma_device *dev, size_t off1, 756 static inline bool is_dma_xor_aligned(struct dma_device *dev, size_t off1,
757 size_t off2, size_t len) 757 size_t off2, size_t len)
758 { 758 {
759 return dmaengine_check_align(dev->xor_align, off1, off2, len); 759 return dmaengine_check_align(dev->xor_align, off1, off2, len);
760 } 760 }
761 761
762 static inline bool is_dma_pq_aligned(struct dma_device *dev, size_t off1, 762 static inline bool is_dma_pq_aligned(struct dma_device *dev, size_t off1,
763 size_t off2, size_t len) 763 size_t off2, size_t len)
764 { 764 {
765 return dmaengine_check_align(dev->pq_align, off1, off2, len); 765 return dmaengine_check_align(dev->pq_align, off1, off2, len);
766 } 766 }
767 767
768 static inline bool is_dma_fill_aligned(struct dma_device *dev, size_t off1, 768 static inline bool is_dma_fill_aligned(struct dma_device *dev, size_t off1,
769 size_t off2, size_t len) 769 size_t off2, size_t len)
770 { 770 {
771 return dmaengine_check_align(dev->fill_align, off1, off2, len); 771 return dmaengine_check_align(dev->fill_align, off1, off2, len);
772 } 772 }
773 773
774 static inline void 774 static inline void
775 dma_set_maxpq(struct dma_device *dma, int maxpq, int has_pq_continue) 775 dma_set_maxpq(struct dma_device *dma, int maxpq, int has_pq_continue)
776 { 776 {
777 dma->max_pq = maxpq; 777 dma->max_pq = maxpq;
778 if (has_pq_continue) 778 if (has_pq_continue)
779 dma->max_pq |= DMA_HAS_PQ_CONTINUE; 779 dma->max_pq |= DMA_HAS_PQ_CONTINUE;
780 } 780 }
781 781
782 static inline bool dmaf_continue(enum dma_ctrl_flags flags) 782 static inline bool dmaf_continue(enum dma_ctrl_flags flags)
783 { 783 {
784 return (flags & DMA_PREP_CONTINUE) == DMA_PREP_CONTINUE; 784 return (flags & DMA_PREP_CONTINUE) == DMA_PREP_CONTINUE;
785 } 785 }
786 786
787 static inline bool dmaf_p_disabled_continue(enum dma_ctrl_flags flags) 787 static inline bool dmaf_p_disabled_continue(enum dma_ctrl_flags flags)
788 { 788 {
789 enum dma_ctrl_flags mask = DMA_PREP_CONTINUE | DMA_PREP_PQ_DISABLE_P; 789 enum dma_ctrl_flags mask = DMA_PREP_CONTINUE | DMA_PREP_PQ_DISABLE_P;
790 790
791 return (flags & mask) == mask; 791 return (flags & mask) == mask;
792 } 792 }
793 793
794 static inline bool dma_dev_has_pq_continue(struct dma_device *dma) 794 static inline bool dma_dev_has_pq_continue(struct dma_device *dma)
795 { 795 {
796 return (dma->max_pq & DMA_HAS_PQ_CONTINUE) == DMA_HAS_PQ_CONTINUE; 796 return (dma->max_pq & DMA_HAS_PQ_CONTINUE) == DMA_HAS_PQ_CONTINUE;
797 } 797 }
798 798
799 static inline unsigned short dma_dev_to_maxpq(struct dma_device *dma) 799 static inline unsigned short dma_dev_to_maxpq(struct dma_device *dma)
800 { 800 {
801 return dma->max_pq & ~DMA_HAS_PQ_CONTINUE; 801 return dma->max_pq & ~DMA_HAS_PQ_CONTINUE;
802 } 802 }
803 803
804 /* dma_maxpq - reduce maxpq in the face of continued operations 804 /* dma_maxpq - reduce maxpq in the face of continued operations
805 * @dma - dma device with PQ capability 805 * @dma - dma device with PQ capability
806 * @flags - to check if DMA_PREP_CONTINUE and DMA_PREP_PQ_DISABLE_P are set 806 * @flags - to check if DMA_PREP_CONTINUE and DMA_PREP_PQ_DISABLE_P are set
807 * 807 *
808 * When an engine does not support native continuation we need 3 extra 808 * When an engine does not support native continuation we need 3 extra
809 * source slots to reuse P and Q with the following coefficients: 809 * source slots to reuse P and Q with the following coefficients:
810 * 1/ {00} * P : remove P from Q', but use it as a source for P' 810 * 1/ {00} * P : remove P from Q', but use it as a source for P'
811 * 2/ {01} * Q : use Q to continue Q' calculation 811 * 2/ {01} * Q : use Q to continue Q' calculation
812 * 3/ {00} * Q : subtract Q from P' to cancel (2) 812 * 3/ {00} * Q : subtract Q from P' to cancel (2)
813 * 813 *
814 * In the case where P is disabled we only need 1 extra source: 814 * In the case where P is disabled we only need 1 extra source:
815 * 1/ {01} * Q : use Q to continue Q' calculation 815 * 1/ {01} * Q : use Q to continue Q' calculation
816 */ 816 */
817 static inline int dma_maxpq(struct dma_device *dma, enum dma_ctrl_flags flags) 817 static inline int dma_maxpq(struct dma_device *dma, enum dma_ctrl_flags flags)
818 { 818 {
819 if (dma_dev_has_pq_continue(dma) || !dmaf_continue(flags)) 819 if (dma_dev_has_pq_continue(dma) || !dmaf_continue(flags))
820 return dma_dev_to_maxpq(dma); 820 return dma_dev_to_maxpq(dma);
821 else if (dmaf_p_disabled_continue(flags)) 821 else if (dmaf_p_disabled_continue(flags))
822 return dma_dev_to_maxpq(dma) - 1; 822 return dma_dev_to_maxpq(dma) - 1;
823 else if (dmaf_continue(flags)) 823 else if (dmaf_continue(flags))
824 return dma_dev_to_maxpq(dma) - 3; 824 return dma_dev_to_maxpq(dma) - 3;
825 BUG(); 825 BUG();
826 } 826 }
827 827
828 /* --- public DMA engine API --- */ 828 /* --- public DMA engine API --- */
829 829
830 #ifdef CONFIG_DMA_ENGINE 830 #ifdef CONFIG_DMA_ENGINE
831 void dmaengine_get(void); 831 void dmaengine_get(void);
832 void dmaengine_put(void); 832 void dmaengine_put(void);
833 #else 833 #else
834 static inline void dmaengine_get(void) 834 static inline void dmaengine_get(void)
835 { 835 {
836 } 836 }
837 static inline void dmaengine_put(void) 837 static inline void dmaengine_put(void)
838 { 838 {
839 } 839 }
840 #endif 840 #endif
841 841
842 #ifdef CONFIG_NET_DMA 842 #ifdef CONFIG_NET_DMA
843 #define net_dmaengine_get() dmaengine_get() 843 #define net_dmaengine_get() dmaengine_get()
844 #define net_dmaengine_put() dmaengine_put() 844 #define net_dmaengine_put() dmaengine_put()
845 #else 845 #else
846 static inline void net_dmaengine_get(void) 846 static inline void net_dmaengine_get(void)
847 { 847 {
848 } 848 }
849 static inline void net_dmaengine_put(void) 849 static inline void net_dmaengine_put(void)
850 { 850 {
851 } 851 }
852 #endif 852 #endif
853 853
854 #ifdef CONFIG_ASYNC_TX_DMA 854 #ifdef CONFIG_ASYNC_TX_DMA
855 #define async_dmaengine_get() dmaengine_get() 855 #define async_dmaengine_get() dmaengine_get()
856 #define async_dmaengine_put() dmaengine_put() 856 #define async_dmaengine_put() dmaengine_put()
857 #ifndef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH 857 #ifndef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
858 #define async_dma_find_channel(type) dma_find_channel(DMA_ASYNC_TX) 858 #define async_dma_find_channel(type) dma_find_channel(DMA_ASYNC_TX)
859 #else 859 #else
860 #define async_dma_find_channel(type) dma_find_channel(type) 860 #define async_dma_find_channel(type) dma_find_channel(type)
861 #endif /* CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH */ 861 #endif /* CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH */
862 #else 862 #else
863 static inline void async_dmaengine_get(void) 863 static inline void async_dmaengine_get(void)
864 { 864 {
865 } 865 }
866 static inline void async_dmaengine_put(void) 866 static inline void async_dmaengine_put(void)
867 { 867 {
868 } 868 }
869 static inline struct dma_chan * 869 static inline struct dma_chan *
870 async_dma_find_channel(enum dma_transaction_type type) 870 async_dma_find_channel(enum dma_transaction_type type)
871 { 871 {
872 return NULL; 872 return NULL;
873 } 873 }
874 #endif /* CONFIG_ASYNC_TX_DMA */ 874 #endif /* CONFIG_ASYNC_TX_DMA */
875 875
876 dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan, 876 dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan,
877 void *dest, void *src, size_t len); 877 void *dest, void *src, size_t len);
878 dma_cookie_t dma_async_memcpy_buf_to_pg(struct dma_chan *chan, 878 dma_cookie_t dma_async_memcpy_buf_to_pg(struct dma_chan *chan,
879 struct page *page, unsigned int offset, void *kdata, size_t len); 879 struct page *page, unsigned int offset, void *kdata, size_t len);
880 dma_cookie_t dma_async_memcpy_pg_to_pg(struct dma_chan *chan, 880 dma_cookie_t dma_async_memcpy_pg_to_pg(struct dma_chan *chan,
881 struct page *dest_pg, unsigned int dest_off, struct page *src_pg, 881 struct page *dest_pg, unsigned int dest_off, struct page *src_pg,
882 unsigned int src_off, size_t len); 882 unsigned int src_off, size_t len);
883 void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx, 883 void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
884 struct dma_chan *chan); 884 struct dma_chan *chan);
885 885
886 static inline void async_tx_ack(struct dma_async_tx_descriptor *tx) 886 static inline void async_tx_ack(struct dma_async_tx_descriptor *tx)
887 { 887 {
888 tx->flags |= DMA_CTRL_ACK; 888 tx->flags |= DMA_CTRL_ACK;
889 } 889 }
890 890
891 static inline void async_tx_clear_ack(struct dma_async_tx_descriptor *tx) 891 static inline void async_tx_clear_ack(struct dma_async_tx_descriptor *tx)
892 { 892 {
893 tx->flags &= ~DMA_CTRL_ACK; 893 tx->flags &= ~DMA_CTRL_ACK;
894 } 894 }
895 895
896 static inline bool async_tx_test_ack(struct dma_async_tx_descriptor *tx) 896 static inline bool async_tx_test_ack(struct dma_async_tx_descriptor *tx)
897 { 897 {
898 return (tx->flags & DMA_CTRL_ACK) == DMA_CTRL_ACK; 898 return (tx->flags & DMA_CTRL_ACK) == DMA_CTRL_ACK;
899 } 899 }
900 900
901 #define dma_cap_set(tx, mask) __dma_cap_set((tx), &(mask)) 901 #define dma_cap_set(tx, mask) __dma_cap_set((tx), &(mask))
902 static inline void 902 static inline void
903 __dma_cap_set(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp) 903 __dma_cap_set(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp)
904 { 904 {
905 set_bit(tx_type, dstp->bits); 905 set_bit(tx_type, dstp->bits);
906 } 906 }
907 907
908 #define dma_cap_clear(tx, mask) __dma_cap_clear((tx), &(mask)) 908 #define dma_cap_clear(tx, mask) __dma_cap_clear((tx), &(mask))
909 static inline void 909 static inline void
910 __dma_cap_clear(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp) 910 __dma_cap_clear(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp)
911 { 911 {
912 clear_bit(tx_type, dstp->bits); 912 clear_bit(tx_type, dstp->bits);
913 } 913 }
914 914
915 #define dma_cap_zero(mask) __dma_cap_zero(&(mask)) 915 #define dma_cap_zero(mask) __dma_cap_zero(&(mask))
916 static inline void __dma_cap_zero(dma_cap_mask_t *dstp) 916 static inline void __dma_cap_zero(dma_cap_mask_t *dstp)
917 { 917 {
918 bitmap_zero(dstp->bits, DMA_TX_TYPE_END); 918 bitmap_zero(dstp->bits, DMA_TX_TYPE_END);
919 } 919 }
920 920
921 #define dma_has_cap(tx, mask) __dma_has_cap((tx), &(mask)) 921 #define dma_has_cap(tx, mask) __dma_has_cap((tx), &(mask))
922 static inline int 922 static inline int
923 __dma_has_cap(enum dma_transaction_type tx_type, dma_cap_mask_t *srcp) 923 __dma_has_cap(enum dma_transaction_type tx_type, dma_cap_mask_t *srcp)
924 { 924 {
925 return test_bit(tx_type, srcp->bits); 925 return test_bit(tx_type, srcp->bits);
926 } 926 }
927 927
928 #define for_each_dma_cap_mask(cap, mask) \ 928 #define for_each_dma_cap_mask(cap, mask) \
929 for_each_set_bit(cap, mask.bits, DMA_TX_TYPE_END) 929 for_each_set_bit(cap, mask.bits, DMA_TX_TYPE_END)
930 930
931 /** 931 /**
932 * dma_async_issue_pending - flush pending transactions to HW 932 * dma_async_issue_pending - flush pending transactions to HW
933 * @chan: target DMA channel 933 * @chan: target DMA channel
934 * 934 *
935 * This allows drivers to push copies to HW in batches, 935 * This allows drivers to push copies to HW in batches,
936 * reducing MMIO writes where possible. 936 * reducing MMIO writes where possible.
937 */ 937 */
938 static inline void dma_async_issue_pending(struct dma_chan *chan) 938 static inline void dma_async_issue_pending(struct dma_chan *chan)
939 { 939 {
940 chan->device->device_issue_pending(chan); 940 chan->device->device_issue_pending(chan);
941 } 941 }
942 942
943 /** 943 /**
944 * dma_async_is_tx_complete - poll for transaction completion 944 * dma_async_is_tx_complete - poll for transaction completion
945 * @chan: DMA channel 945 * @chan: DMA channel
946 * @cookie: transaction identifier to check status of 946 * @cookie: transaction identifier to check status of
947 * @last: returns last completed cookie, can be NULL 947 * @last: returns last completed cookie, can be NULL
948 * @used: returns last issued cookie, can be NULL 948 * @used: returns last issued cookie, can be NULL
949 * 949 *
950 * If @last and @used are passed in, upon return they reflect the driver 950 * If @last and @used are passed in, upon return they reflect the driver
951 * internal state and can be used with dma_async_is_complete() to check 951 * internal state and can be used with dma_async_is_complete() to check
952 * the status of multiple cookies without re-checking hardware state. 952 * the status of multiple cookies without re-checking hardware state.
953 */ 953 */
954 static inline enum dma_status dma_async_is_tx_complete(struct dma_chan *chan, 954 static inline enum dma_status dma_async_is_tx_complete(struct dma_chan *chan,
955 dma_cookie_t cookie, dma_cookie_t *last, dma_cookie_t *used) 955 dma_cookie_t cookie, dma_cookie_t *last, dma_cookie_t *used)
956 { 956 {
957 struct dma_tx_state state; 957 struct dma_tx_state state;
958 enum dma_status status; 958 enum dma_status status;
959 959
960 status = chan->device->device_tx_status(chan, cookie, &state); 960 status = chan->device->device_tx_status(chan, cookie, &state);
961 if (last) 961 if (last)
962 *last = state.last; 962 *last = state.last;
963 if (used) 963 if (used)
964 *used = state.used; 964 *used = state.used;
965 return status; 965 return status;
966 } 966 }
967 967
968 /** 968 /**
969 * dma_async_is_complete - test a cookie against chan state 969 * dma_async_is_complete - test a cookie against chan state
970 * @cookie: transaction identifier to test status of 970 * @cookie: transaction identifier to test status of
971 * @last_complete: last know completed transaction 971 * @last_complete: last know completed transaction
972 * @last_used: last cookie value handed out 972 * @last_used: last cookie value handed out
973 * 973 *
974 * dma_async_is_complete() is used in dma_async_is_tx_complete() 974 * dma_async_is_complete() is used in dma_async_is_tx_complete()
975 * the test logic is separated for lightweight testing of multiple cookies 975 * the test logic is separated for lightweight testing of multiple cookies
976 */ 976 */
977 static inline enum dma_status dma_async_is_complete(dma_cookie_t cookie, 977 static inline enum dma_status dma_async_is_complete(dma_cookie_t cookie,
978 dma_cookie_t last_complete, dma_cookie_t last_used) 978 dma_cookie_t last_complete, dma_cookie_t last_used)
979 { 979 {
980 if (last_complete <= last_used) { 980 if (last_complete <= last_used) {
981 if ((cookie <= last_complete) || (cookie > last_used)) 981 if ((cookie <= last_complete) || (cookie > last_used))
982 return DMA_SUCCESS; 982 return DMA_COMPLETE;
983 } else { 983 } else {
984 if ((cookie <= last_complete) && (cookie > last_used)) 984 if ((cookie <= last_complete) && (cookie > last_used))
985 return DMA_SUCCESS; 985 return DMA_COMPLETE;
986 } 986 }
987 return DMA_IN_PROGRESS; 987 return DMA_IN_PROGRESS;
988 } 988 }
989 989
990 static inline void 990 static inline void
991 dma_set_tx_state(struct dma_tx_state *st, dma_cookie_t last, dma_cookie_t used, u32 residue) 991 dma_set_tx_state(struct dma_tx_state *st, dma_cookie_t last, dma_cookie_t used, u32 residue)
992 { 992 {
993 if (st) { 993 if (st) {
994 st->last = last; 994 st->last = last;
995 st->used = used; 995 st->used = used;
996 st->residue = residue; 996 st->residue = residue;
997 } 997 }
998 } 998 }
999 999
1000 #ifdef CONFIG_DMA_ENGINE 1000 #ifdef CONFIG_DMA_ENGINE
1001 struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type); 1001 struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type);
1002 enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie); 1002 enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie);
1003 enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx); 1003 enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx);
1004 void dma_issue_pending_all(void); 1004 void dma_issue_pending_all(void);
1005 struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask, 1005 struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
1006 dma_filter_fn fn, void *fn_param); 1006 dma_filter_fn fn, void *fn_param);
1007 struct dma_chan *dma_request_slave_channel(struct device *dev, const char *name); 1007 struct dma_chan *dma_request_slave_channel(struct device *dev, const char *name);
1008 void dma_release_channel(struct dma_chan *chan); 1008 void dma_release_channel(struct dma_chan *chan);
1009 #else 1009 #else
1010 static inline struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type) 1010 static inline struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
1011 { 1011 {
1012 return NULL; 1012 return NULL;
1013 } 1013 }
1014 static inline enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie) 1014 static inline enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
1015 { 1015 {
1016 return DMA_SUCCESS; 1016 return DMA_COMPLETE;
1017 } 1017 }
1018 static inline enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx) 1018 static inline enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
1019 { 1019 {
1020 return DMA_SUCCESS; 1020 return DMA_COMPLETE;
1021 } 1021 }
1022 static inline void dma_issue_pending_all(void) 1022 static inline void dma_issue_pending_all(void)
1023 { 1023 {
1024 } 1024 }
1025 static inline struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask, 1025 static inline struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
1026 dma_filter_fn fn, void *fn_param) 1026 dma_filter_fn fn, void *fn_param)
1027 { 1027 {
1028 return NULL; 1028 return NULL;
1029 } 1029 }
1030 static inline struct dma_chan *dma_request_slave_channel(struct device *dev, 1030 static inline struct dma_chan *dma_request_slave_channel(struct device *dev,
1031 const char *name) 1031 const char *name)
1032 { 1032 {
1033 return NULL; 1033 return NULL;
1034 } 1034 }
1035 static inline void dma_release_channel(struct dma_chan *chan) 1035 static inline void dma_release_channel(struct dma_chan *chan)
1036 { 1036 {
1037 } 1037 }
1038 #endif 1038 #endif
1039 1039
1040 /* --- DMA device --- */ 1040 /* --- DMA device --- */
1041 1041
1042 int dma_async_device_register(struct dma_device *device); 1042 int dma_async_device_register(struct dma_device *device);
1043 void dma_async_device_unregister(struct dma_device *device); 1043 void dma_async_device_unregister(struct dma_device *device);
1044 void dma_run_dependencies(struct dma_async_tx_descriptor *tx); 1044 void dma_run_dependencies(struct dma_async_tx_descriptor *tx);
1045 struct dma_chan *dma_get_slave_channel(struct dma_chan *chan); 1045 struct dma_chan *dma_get_slave_channel(struct dma_chan *chan);
1046 struct dma_chan *net_dma_find_channel(void); 1046 struct dma_chan *net_dma_find_channel(void);
1047 #define dma_request_channel(mask, x, y) __dma_request_channel(&(mask), x, y) 1047 #define dma_request_channel(mask, x, y) __dma_request_channel(&(mask), x, y)
1048 #define dma_request_slave_channel_compat(mask, x, y, dev, name) \ 1048 #define dma_request_slave_channel_compat(mask, x, y, dev, name) \
1049 __dma_request_slave_channel_compat(&(mask), x, y, dev, name) 1049 __dma_request_slave_channel_compat(&(mask), x, y, dev, name)
1050 1050
1051 static inline struct dma_chan 1051 static inline struct dma_chan
1052 *__dma_request_slave_channel_compat(const dma_cap_mask_t *mask, 1052 *__dma_request_slave_channel_compat(const dma_cap_mask_t *mask,
1053 dma_filter_fn fn, void *fn_param, 1053 dma_filter_fn fn, void *fn_param,
1054 struct device *dev, char *name) 1054 struct device *dev, char *name)
1055 { 1055 {
1056 struct dma_chan *chan; 1056 struct dma_chan *chan;
1057 1057
1058 chan = dma_request_slave_channel(dev, name); 1058 chan = dma_request_slave_channel(dev, name);
1059 if (chan) 1059 if (chan)
1060 return chan; 1060 return chan;
1061 1061
1062 return __dma_request_channel(mask, fn, fn_param); 1062 return __dma_request_channel(mask, fn, fn_param);
1063 } 1063 }
1064 1064
1065 /* --- Helper iov-locking functions --- */ 1065 /* --- Helper iov-locking functions --- */
1066 1066
1067 struct dma_page_list { 1067 struct dma_page_list {
1068 char __user *base_address; 1068 char __user *base_address;
1069 int nr_pages; 1069 int nr_pages;
1070 struct page **pages; 1070 struct page **pages;
1071 }; 1071 };
1072 1072
1073 struct dma_pinned_list { 1073 struct dma_pinned_list {
1074 int nr_iovecs; 1074 int nr_iovecs;
1075 struct dma_page_list page_list[0]; 1075 struct dma_page_list page_list[0];
1076 }; 1076 };
1077 1077
1078 struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len); 1078 struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len);
1079 void dma_unpin_iovec_pages(struct dma_pinned_list* pinned_list); 1079 void dma_unpin_iovec_pages(struct dma_pinned_list* pinned_list);
1080 1080
1081 dma_cookie_t dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov, 1081 dma_cookie_t dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
1082 struct dma_pinned_list *pinned_list, unsigned char *kdata, size_t len); 1082 struct dma_pinned_list *pinned_list, unsigned char *kdata, size_t len);
1083 dma_cookie_t dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov, 1083 dma_cookie_t dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
1084 struct dma_pinned_list *pinned_list, struct page *page, 1084 struct dma_pinned_list *pinned_list, struct page *page,
1085 unsigned int offset, size_t len); 1085 unsigned int offset, size_t len);
1086 1086
1087 #endif /* DMAENGINE_H */ 1087 #endif /* DMAENGINE_H */
1088 1088
1 /* 1 /*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX 2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket 3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level. 4 * interface as the means of communication with the user level.
5 * 5 *
6 * Implementation of the Transmission Control Protocol(TCP). 6 * Implementation of the Transmission Control Protocol(TCP).
7 * 7 *
8 * Authors: Ross Biro 8 * Authors: Ross Biro
9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Mark Evans, <evansmp@uhura.aston.ac.uk> 10 * Mark Evans, <evansmp@uhura.aston.ac.uk>
11 * Corey Minyard <wf-rch!minyard@relay.EU.net> 11 * Corey Minyard <wf-rch!minyard@relay.EU.net>
12 * Florian La Roche, <flla@stud.uni-sb.de> 12 * Florian La Roche, <flla@stud.uni-sb.de>
13 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu> 13 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
14 * Linus Torvalds, <torvalds@cs.helsinki.fi> 14 * Linus Torvalds, <torvalds@cs.helsinki.fi>
15 * Alan Cox, <gw4pts@gw4pts.ampr.org> 15 * Alan Cox, <gw4pts@gw4pts.ampr.org>
16 * Matthew Dillon, <dillon@apollo.west.oic.com> 16 * Matthew Dillon, <dillon@apollo.west.oic.com>
17 * Arnt Gulbrandsen, <agulbra@nvg.unit.no> 17 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
18 * Jorge Cwik, <jorge@laser.satlink.net> 18 * Jorge Cwik, <jorge@laser.satlink.net>
19 * 19 *
20 * Fixes: 20 * Fixes:
21 * Alan Cox : Numerous verify_area() calls 21 * Alan Cox : Numerous verify_area() calls
22 * Alan Cox : Set the ACK bit on a reset 22 * Alan Cox : Set the ACK bit on a reset
23 * Alan Cox : Stopped it crashing if it closed while 23 * Alan Cox : Stopped it crashing if it closed while
24 * sk->inuse=1 and was trying to connect 24 * sk->inuse=1 and was trying to connect
25 * (tcp_err()). 25 * (tcp_err()).
26 * Alan Cox : All icmp error handling was broken 26 * Alan Cox : All icmp error handling was broken
27 * pointers passed where wrong and the 27 * pointers passed where wrong and the
28 * socket was looked up backwards. Nobody 28 * socket was looked up backwards. Nobody
29 * tested any icmp error code obviously. 29 * tested any icmp error code obviously.
30 * Alan Cox : tcp_err() now handled properly. It 30 * Alan Cox : tcp_err() now handled properly. It
31 * wakes people on errors. poll 31 * wakes people on errors. poll
32 * behaves and the icmp error race 32 * behaves and the icmp error race
33 * has gone by moving it into sock.c 33 * has gone by moving it into sock.c
34 * Alan Cox : tcp_send_reset() fixed to work for 34 * Alan Cox : tcp_send_reset() fixed to work for
35 * everything not just packets for 35 * everything not just packets for
36 * unknown sockets. 36 * unknown sockets.
37 * Alan Cox : tcp option processing. 37 * Alan Cox : tcp option processing.
38 * Alan Cox : Reset tweaked (still not 100%) [Had 38 * Alan Cox : Reset tweaked (still not 100%) [Had
39 * syn rule wrong] 39 * syn rule wrong]
40 * Herp Rosmanith : More reset fixes 40 * Herp Rosmanith : More reset fixes
41 * Alan Cox : No longer acks invalid rst frames. 41 * Alan Cox : No longer acks invalid rst frames.
42 * Acking any kind of RST is right out. 42 * Acking any kind of RST is right out.
43 * Alan Cox : Sets an ignore me flag on an rst 43 * Alan Cox : Sets an ignore me flag on an rst
44 * receive otherwise odd bits of prattle 44 * receive otherwise odd bits of prattle
45 * escape still 45 * escape still
46 * Alan Cox : Fixed another acking RST frame bug. 46 * Alan Cox : Fixed another acking RST frame bug.
47 * Should stop LAN workplace lockups. 47 * Should stop LAN workplace lockups.
48 * Alan Cox : Some tidyups using the new skb list 48 * Alan Cox : Some tidyups using the new skb list
49 * facilities 49 * facilities
50 * Alan Cox : sk->keepopen now seems to work 50 * Alan Cox : sk->keepopen now seems to work
51 * Alan Cox : Pulls options out correctly on accepts 51 * Alan Cox : Pulls options out correctly on accepts
52 * Alan Cox : Fixed assorted sk->rqueue->next errors 52 * Alan Cox : Fixed assorted sk->rqueue->next errors
53 * Alan Cox : PSH doesn't end a TCP read. Switched a 53 * Alan Cox : PSH doesn't end a TCP read. Switched a
54 * bit to skb ops. 54 * bit to skb ops.
55 * Alan Cox : Tidied tcp_data to avoid a potential 55 * Alan Cox : Tidied tcp_data to avoid a potential
56 * nasty. 56 * nasty.
57 * Alan Cox : Added some better commenting, as the 57 * Alan Cox : Added some better commenting, as the
58 * tcp is hard to follow 58 * tcp is hard to follow
59 * Alan Cox : Removed incorrect check for 20 * psh 59 * Alan Cox : Removed incorrect check for 20 * psh
60 * Michael O'Reilly : ack < copied bug fix. 60 * Michael O'Reilly : ack < copied bug fix.
61 * Johannes Stille : Misc tcp fixes (not all in yet). 61 * Johannes Stille : Misc tcp fixes (not all in yet).
62 * Alan Cox : FIN with no memory -> CRASH 62 * Alan Cox : FIN with no memory -> CRASH
63 * Alan Cox : Added socket option proto entries. 63 * Alan Cox : Added socket option proto entries.
64 * Also added awareness of them to accept. 64 * Also added awareness of them to accept.
65 * Alan Cox : Added TCP options (SOL_TCP) 65 * Alan Cox : Added TCP options (SOL_TCP)
66 * Alan Cox : Switched wakeup calls to callbacks, 66 * Alan Cox : Switched wakeup calls to callbacks,
67 * so the kernel can layer network 67 * so the kernel can layer network
68 * sockets. 68 * sockets.
69 * Alan Cox : Use ip_tos/ip_ttl settings. 69 * Alan Cox : Use ip_tos/ip_ttl settings.
70 * Alan Cox : Handle FIN (more) properly (we hope). 70 * Alan Cox : Handle FIN (more) properly (we hope).
71 * Alan Cox : RST frames sent on unsynchronised 71 * Alan Cox : RST frames sent on unsynchronised
72 * state ack error. 72 * state ack error.
73 * Alan Cox : Put in missing check for SYN bit. 73 * Alan Cox : Put in missing check for SYN bit.
74 * Alan Cox : Added tcp_select_window() aka NET2E 74 * Alan Cox : Added tcp_select_window() aka NET2E
75 * window non shrink trick. 75 * window non shrink trick.
76 * Alan Cox : Added a couple of small NET2E timer 76 * Alan Cox : Added a couple of small NET2E timer
77 * fixes 77 * fixes
78 * Charles Hedrick : TCP fixes 78 * Charles Hedrick : TCP fixes
79 * Toomas Tamm : TCP window fixes 79 * Toomas Tamm : TCP window fixes
80 * Alan Cox : Small URG fix to rlogin ^C ack fight 80 * Alan Cox : Small URG fix to rlogin ^C ack fight
81 * Charles Hedrick : Rewrote most of it to actually work 81 * Charles Hedrick : Rewrote most of it to actually work
82 * Linus : Rewrote tcp_read() and URG handling 82 * Linus : Rewrote tcp_read() and URG handling
83 * completely 83 * completely
84 * Gerhard Koerting: Fixed some missing timer handling 84 * Gerhard Koerting: Fixed some missing timer handling
85 * Matthew Dillon : Reworked TCP machine states as per RFC 85 * Matthew Dillon : Reworked TCP machine states as per RFC
86 * Gerhard Koerting: PC/TCP workarounds 86 * Gerhard Koerting: PC/TCP workarounds
87 * Adam Caldwell : Assorted timer/timing errors 87 * Adam Caldwell : Assorted timer/timing errors
88 * Matthew Dillon : Fixed another RST bug 88 * Matthew Dillon : Fixed another RST bug
89 * Alan Cox : Move to kernel side addressing changes. 89 * Alan Cox : Move to kernel side addressing changes.
90 * Alan Cox : Beginning work on TCP fastpathing 90 * Alan Cox : Beginning work on TCP fastpathing
91 * (not yet usable) 91 * (not yet usable)
92 * Arnt Gulbrandsen: Turbocharged tcp_check() routine. 92 * Arnt Gulbrandsen: Turbocharged tcp_check() routine.
93 * Alan Cox : TCP fast path debugging 93 * Alan Cox : TCP fast path debugging
94 * Alan Cox : Window clamping 94 * Alan Cox : Window clamping
95 * Michael Riepe : Bug in tcp_check() 95 * Michael Riepe : Bug in tcp_check()
96 * Matt Dillon : More TCP improvements and RST bug fixes 96 * Matt Dillon : More TCP improvements and RST bug fixes
97 * Matt Dillon : Yet more small nasties remove from the 97 * Matt Dillon : Yet more small nasties remove from the
98 * TCP code (Be very nice to this man if 98 * TCP code (Be very nice to this man if
99 * tcp finally works 100%) 8) 99 * tcp finally works 100%) 8)
100 * Alan Cox : BSD accept semantics. 100 * Alan Cox : BSD accept semantics.
101 * Alan Cox : Reset on closedown bug. 101 * Alan Cox : Reset on closedown bug.
102 * Peter De Schrijver : ENOTCONN check missing in tcp_sendto(). 102 * Peter De Schrijver : ENOTCONN check missing in tcp_sendto().
103 * Michael Pall : Handle poll() after URG properly in 103 * Michael Pall : Handle poll() after URG properly in
104 * all cases. 104 * all cases.
105 * Michael Pall : Undo the last fix in tcp_read_urg() 105 * Michael Pall : Undo the last fix in tcp_read_urg()
106 * (multi URG PUSH broke rlogin). 106 * (multi URG PUSH broke rlogin).
107 * Michael Pall : Fix the multi URG PUSH problem in 107 * Michael Pall : Fix the multi URG PUSH problem in
108 * tcp_readable(), poll() after URG 108 * tcp_readable(), poll() after URG
109 * works now. 109 * works now.
110 * Michael Pall : recv(...,MSG_OOB) never blocks in the 110 * Michael Pall : recv(...,MSG_OOB) never blocks in the
111 * BSD api. 111 * BSD api.
112 * Alan Cox : Changed the semantics of sk->socket to 112 * Alan Cox : Changed the semantics of sk->socket to
113 * fix a race and a signal problem with 113 * fix a race and a signal problem with
114 * accept() and async I/O. 114 * accept() and async I/O.
115 * Alan Cox : Relaxed the rules on tcp_sendto(). 115 * Alan Cox : Relaxed the rules on tcp_sendto().
116 * Yury Shevchuk : Really fixed accept() blocking problem. 116 * Yury Shevchuk : Really fixed accept() blocking problem.
117 * Craig I. Hagan : Allow for BSD compatible TIME_WAIT for 117 * Craig I. Hagan : Allow for BSD compatible TIME_WAIT for
118 * clients/servers which listen in on 118 * clients/servers which listen in on
119 * fixed ports. 119 * fixed ports.
120 * Alan Cox : Cleaned the above up and shrank it to 120 * Alan Cox : Cleaned the above up and shrank it to
121 * a sensible code size. 121 * a sensible code size.
122 * Alan Cox : Self connect lockup fix. 122 * Alan Cox : Self connect lockup fix.
123 * Alan Cox : No connect to multicast. 123 * Alan Cox : No connect to multicast.
124 * Ross Biro : Close unaccepted children on master 124 * Ross Biro : Close unaccepted children on master
125 * socket close. 125 * socket close.
126 * Alan Cox : Reset tracing code. 126 * Alan Cox : Reset tracing code.
127 * Alan Cox : Spurious resets on shutdown. 127 * Alan Cox : Spurious resets on shutdown.
128 * Alan Cox : Giant 15 minute/60 second timer error 128 * Alan Cox : Giant 15 minute/60 second timer error
129 * Alan Cox : Small whoops in polling before an 129 * Alan Cox : Small whoops in polling before an
130 * accept. 130 * accept.
131 * Alan Cox : Kept the state trace facility since 131 * Alan Cox : Kept the state trace facility since
132 * it's handy for debugging. 132 * it's handy for debugging.
133 * Alan Cox : More reset handler fixes. 133 * Alan Cox : More reset handler fixes.
134 * Alan Cox : Started rewriting the code based on 134 * Alan Cox : Started rewriting the code based on
135 * the RFC's for other useful protocol 135 * the RFC's for other useful protocol
136 * references see: Comer, KA9Q NOS, and 136 * references see: Comer, KA9Q NOS, and
137 * for a reference on the difference 137 * for a reference on the difference
138 * between specifications and how BSD 138 * between specifications and how BSD
139 * works see the 4.4lite source. 139 * works see the 4.4lite source.
140 * A.N.Kuznetsov : Don't time wait on completion of tidy 140 * A.N.Kuznetsov : Don't time wait on completion of tidy
141 * close. 141 * close.
142 * Linus Torvalds : Fin/Shutdown & copied_seq changes. 142 * Linus Torvalds : Fin/Shutdown & copied_seq changes.
143 * Linus Torvalds : Fixed BSD port reuse to work first syn 143 * Linus Torvalds : Fixed BSD port reuse to work first syn
144 * Alan Cox : Reimplemented timers as per the RFC 144 * Alan Cox : Reimplemented timers as per the RFC
145 * and using multiple timers for sanity. 145 * and using multiple timers for sanity.
146 * Alan Cox : Small bug fixes, and a lot of new 146 * Alan Cox : Small bug fixes, and a lot of new
147 * comments. 147 * comments.
148 * Alan Cox : Fixed dual reader crash by locking 148 * Alan Cox : Fixed dual reader crash by locking
149 * the buffers (much like datagram.c) 149 * the buffers (much like datagram.c)
150 * Alan Cox : Fixed stuck sockets in probe. A probe 150 * Alan Cox : Fixed stuck sockets in probe. A probe
151 * now gets fed up of retrying without 151 * now gets fed up of retrying without
152 * (even a no space) answer. 152 * (even a no space) answer.
153 * Alan Cox : Extracted closing code better 153 * Alan Cox : Extracted closing code better
154 * Alan Cox : Fixed the closing state machine to 154 * Alan Cox : Fixed the closing state machine to
155 * resemble the RFC. 155 * resemble the RFC.
156 * Alan Cox : More 'per spec' fixes. 156 * Alan Cox : More 'per spec' fixes.
157 * Jorge Cwik : Even faster checksumming. 157 * Jorge Cwik : Even faster checksumming.
158 * Alan Cox : tcp_data() doesn't ack illegal PSH 158 * Alan Cox : tcp_data() doesn't ack illegal PSH
159 * only frames. At least one pc tcp stack 159 * only frames. At least one pc tcp stack
160 * generates them. 160 * generates them.
161 * Alan Cox : Cache last socket. 161 * Alan Cox : Cache last socket.
162 * Alan Cox : Per route irtt. 162 * Alan Cox : Per route irtt.
163 * Matt Day : poll()->select() match BSD precisely on error 163 * Matt Day : poll()->select() match BSD precisely on error
164 * Alan Cox : New buffers 164 * Alan Cox : New buffers
165 * Marc Tamsky : Various sk->prot->retransmits and 165 * Marc Tamsky : Various sk->prot->retransmits and
166 * sk->retransmits misupdating fixed. 166 * sk->retransmits misupdating fixed.
167 * Fixed tcp_write_timeout: stuck close, 167 * Fixed tcp_write_timeout: stuck close,
168 * and TCP syn retries gets used now. 168 * and TCP syn retries gets used now.
169 * Mark Yarvis : In tcp_read_wakeup(), don't send an 169 * Mark Yarvis : In tcp_read_wakeup(), don't send an
170 * ack if state is TCP_CLOSED. 170 * ack if state is TCP_CLOSED.
171 * Alan Cox : Look up device on a retransmit - routes may 171 * Alan Cox : Look up device on a retransmit - routes may
172 * change. Doesn't yet cope with MSS shrink right 172 * change. Doesn't yet cope with MSS shrink right
173 * but it's a start! 173 * but it's a start!
174 * Marc Tamsky : Closing in closing fixes. 174 * Marc Tamsky : Closing in closing fixes.
175 * Mike Shaver : RFC1122 verifications. 175 * Mike Shaver : RFC1122 verifications.
176 * Alan Cox : rcv_saddr errors. 176 * Alan Cox : rcv_saddr errors.
177 * Alan Cox : Block double connect(). 177 * Alan Cox : Block double connect().
178 * Alan Cox : Small hooks for enSKIP. 178 * Alan Cox : Small hooks for enSKIP.
179 * Alexey Kuznetsov: Path MTU discovery. 179 * Alexey Kuznetsov: Path MTU discovery.
180 * Alan Cox : Support soft errors. 180 * Alan Cox : Support soft errors.
181 * Alan Cox : Fix MTU discovery pathological case 181 * Alan Cox : Fix MTU discovery pathological case
182 * when the remote claims no mtu! 182 * when the remote claims no mtu!
183 * Marc Tamsky : TCP_CLOSE fix. 183 * Marc Tamsky : TCP_CLOSE fix.
184 * Colin (G3TNE) : Send a reset on syn ack replies in 184 * Colin (G3TNE) : Send a reset on syn ack replies in
185 * window but wrong (fixes NT lpd problems) 185 * window but wrong (fixes NT lpd problems)
186 * Pedro Roque : Better TCP window handling, delayed ack. 186 * Pedro Roque : Better TCP window handling, delayed ack.
187 * Joerg Reuter : No modification of locked buffers in 187 * Joerg Reuter : No modification of locked buffers in
188 * tcp_do_retransmit() 188 * tcp_do_retransmit()
189 * Eric Schenk : Changed receiver side silly window 189 * Eric Schenk : Changed receiver side silly window
190 * avoidance algorithm to BSD style 190 * avoidance algorithm to BSD style
191 * algorithm. This doubles throughput 191 * algorithm. This doubles throughput
192 * against machines running Solaris, 192 * against machines running Solaris,
193 * and seems to result in general 193 * and seems to result in general
194 * improvement. 194 * improvement.
195 * Stefan Magdalinski : adjusted tcp_readable() to fix FIONREAD 195 * Stefan Magdalinski : adjusted tcp_readable() to fix FIONREAD
196 * Willy Konynenberg : Transparent proxying support. 196 * Willy Konynenberg : Transparent proxying support.
197 * Mike McLagan : Routing by source 197 * Mike McLagan : Routing by source
198 * Keith Owens : Do proper merging with partial SKB's in 198 * Keith Owens : Do proper merging with partial SKB's in
199 * tcp_do_sendmsg to avoid burstiness. 199 * tcp_do_sendmsg to avoid burstiness.
200 * Eric Schenk : Fix fast close down bug with 200 * Eric Schenk : Fix fast close down bug with
201 * shutdown() followed by close(). 201 * shutdown() followed by close().
202 * Andi Kleen : Make poll agree with SIGIO 202 * Andi Kleen : Make poll agree with SIGIO
203 * Salvatore Sanfilippo : Support SO_LINGER with linger == 1 and 203 * Salvatore Sanfilippo : Support SO_LINGER with linger == 1 and
204 * lingertime == 0 (RFC 793 ABORT Call) 204 * lingertime == 0 (RFC 793 ABORT Call)
205 * Hirokazu Takahashi : Use copy_from_user() instead of 205 * Hirokazu Takahashi : Use copy_from_user() instead of
206 * csum_and_copy_from_user() if possible. 206 * csum_and_copy_from_user() if possible.
207 * 207 *
208 * This program is free software; you can redistribute it and/or 208 * This program is free software; you can redistribute it and/or
209 * modify it under the terms of the GNU General Public License 209 * modify it under the terms of the GNU General Public License
210 * as published by the Free Software Foundation; either version 210 * as published by the Free Software Foundation; either version
211 * 2 of the License, or(at your option) any later version. 211 * 2 of the License, or(at your option) any later version.
212 * 212 *
213 * Description of States: 213 * Description of States:
214 * 214 *
215 * TCP_SYN_SENT sent a connection request, waiting for ack 215 * TCP_SYN_SENT sent a connection request, waiting for ack
216 * 216 *
217 * TCP_SYN_RECV received a connection request, sent ack, 217 * TCP_SYN_RECV received a connection request, sent ack,
218 * waiting for final ack in three-way handshake. 218 * waiting for final ack in three-way handshake.
219 * 219 *
220 * TCP_ESTABLISHED connection established 220 * TCP_ESTABLISHED connection established
221 * 221 *
222 * TCP_FIN_WAIT1 our side has shutdown, waiting to complete 222 * TCP_FIN_WAIT1 our side has shutdown, waiting to complete
223 * transmission of remaining buffered data 223 * transmission of remaining buffered data
224 * 224 *
225 * TCP_FIN_WAIT2 all buffered data sent, waiting for remote 225 * TCP_FIN_WAIT2 all buffered data sent, waiting for remote
226 * to shutdown 226 * to shutdown
227 * 227 *
228 * TCP_CLOSING both sides have shutdown but we still have 228 * TCP_CLOSING both sides have shutdown but we still have
229 * data we have to finish sending 229 * data we have to finish sending
230 * 230 *
231 * TCP_TIME_WAIT timeout to catch resent junk before entering 231 * TCP_TIME_WAIT timeout to catch resent junk before entering
232 * closed, can only be entered from FIN_WAIT2 232 * closed, can only be entered from FIN_WAIT2
233 * or CLOSING. Required because the other end 233 * or CLOSING. Required because the other end
234 * may not have gotten our last ACK causing it 234 * may not have gotten our last ACK causing it
235 * to retransmit the data packet (which we ignore) 235 * to retransmit the data packet (which we ignore)
236 * 236 *
237 * TCP_CLOSE_WAIT remote side has shutdown and is waiting for 237 * TCP_CLOSE_WAIT remote side has shutdown and is waiting for
238 * us to finish writing our data and to shutdown 238 * us to finish writing our data and to shutdown
239 * (we have to close() to move on to LAST_ACK) 239 * (we have to close() to move on to LAST_ACK)
240 * 240 *
241 * TCP_LAST_ACK out side has shutdown after remote has 241 * TCP_LAST_ACK out side has shutdown after remote has
242 * shutdown. There may still be data in our 242 * shutdown. There may still be data in our
243 * buffer that we have to finish sending 243 * buffer that we have to finish sending
244 * 244 *
245 * TCP_CLOSE socket is finished 245 * TCP_CLOSE socket is finished
246 */ 246 */
247 247
248 #define pr_fmt(fmt) "TCP: " fmt 248 #define pr_fmt(fmt) "TCP: " fmt
249 249
250 #include <linux/kernel.h> 250 #include <linux/kernel.h>
251 #include <linux/module.h> 251 #include <linux/module.h>
252 #include <linux/types.h> 252 #include <linux/types.h>
253 #include <linux/fcntl.h> 253 #include <linux/fcntl.h>
254 #include <linux/poll.h> 254 #include <linux/poll.h>
255 #include <linux/init.h> 255 #include <linux/init.h>
256 #include <linux/fs.h> 256 #include <linux/fs.h>
257 #include <linux/skbuff.h> 257 #include <linux/skbuff.h>
258 #include <linux/scatterlist.h> 258 #include <linux/scatterlist.h>
259 #include <linux/splice.h> 259 #include <linux/splice.h>
260 #include <linux/net.h> 260 #include <linux/net.h>
261 #include <linux/socket.h> 261 #include <linux/socket.h>
262 #include <linux/random.h> 262 #include <linux/random.h>
263 #include <linux/bootmem.h> 263 #include <linux/bootmem.h>
264 #include <linux/highmem.h> 264 #include <linux/highmem.h>
265 #include <linux/swap.h> 265 #include <linux/swap.h>
266 #include <linux/cache.h> 266 #include <linux/cache.h>
267 #include <linux/err.h> 267 #include <linux/err.h>
268 #include <linux/crypto.h> 268 #include <linux/crypto.h>
269 #include <linux/time.h> 269 #include <linux/time.h>
270 #include <linux/slab.h> 270 #include <linux/slab.h>
271 271
272 #include <net/icmp.h> 272 #include <net/icmp.h>
273 #include <net/inet_common.h> 273 #include <net/inet_common.h>
274 #include <net/tcp.h> 274 #include <net/tcp.h>
275 #include <net/xfrm.h> 275 #include <net/xfrm.h>
276 #include <net/ip.h> 276 #include <net/ip.h>
277 #include <net/netdma.h> 277 #include <net/netdma.h>
278 #include <net/sock.h> 278 #include <net/sock.h>
279 279
280 #include <asm/uaccess.h> 280 #include <asm/uaccess.h>
281 #include <asm/ioctls.h> 281 #include <asm/ioctls.h>
282 #include <net/busy_poll.h> 282 #include <net/busy_poll.h>
283 283
284 int sysctl_tcp_fin_timeout __read_mostly = TCP_FIN_TIMEOUT; 284 int sysctl_tcp_fin_timeout __read_mostly = TCP_FIN_TIMEOUT;
285 285
286 int sysctl_tcp_min_tso_segs __read_mostly = 2; 286 int sysctl_tcp_min_tso_segs __read_mostly = 2;
287 287
288 struct percpu_counter tcp_orphan_count; 288 struct percpu_counter tcp_orphan_count;
289 EXPORT_SYMBOL_GPL(tcp_orphan_count); 289 EXPORT_SYMBOL_GPL(tcp_orphan_count);
290 290
291 int sysctl_tcp_wmem[3] __read_mostly; 291 int sysctl_tcp_wmem[3] __read_mostly;
292 int sysctl_tcp_rmem[3] __read_mostly; 292 int sysctl_tcp_rmem[3] __read_mostly;
293 293
294 EXPORT_SYMBOL(sysctl_tcp_rmem); 294 EXPORT_SYMBOL(sysctl_tcp_rmem);
295 EXPORT_SYMBOL(sysctl_tcp_wmem); 295 EXPORT_SYMBOL(sysctl_tcp_wmem);
296 296
297 atomic_long_t tcp_memory_allocated; /* Current allocated memory. */ 297 atomic_long_t tcp_memory_allocated; /* Current allocated memory. */
298 EXPORT_SYMBOL(tcp_memory_allocated); 298 EXPORT_SYMBOL(tcp_memory_allocated);
299 299
300 /* 300 /*
301 * Current number of TCP sockets. 301 * Current number of TCP sockets.
302 */ 302 */
303 struct percpu_counter tcp_sockets_allocated; 303 struct percpu_counter tcp_sockets_allocated;
304 EXPORT_SYMBOL(tcp_sockets_allocated); 304 EXPORT_SYMBOL(tcp_sockets_allocated);
305 305
306 /* 306 /*
307 * TCP splice context 307 * TCP splice context
308 */ 308 */
309 struct tcp_splice_state { 309 struct tcp_splice_state {
310 struct pipe_inode_info *pipe; 310 struct pipe_inode_info *pipe;
311 size_t len; 311 size_t len;
312 unsigned int flags; 312 unsigned int flags;
313 }; 313 };
314 314
315 /* 315 /*
316 * Pressure flag: try to collapse. 316 * Pressure flag: try to collapse.
317 * Technical note: it is used by multiple contexts non atomically. 317 * Technical note: it is used by multiple contexts non atomically.
318 * All the __sk_mem_schedule() is of this nature: accounting 318 * All the __sk_mem_schedule() is of this nature: accounting
319 * is strict, actions are advisory and have some latency. 319 * is strict, actions are advisory and have some latency.
320 */ 320 */
321 int tcp_memory_pressure __read_mostly; 321 int tcp_memory_pressure __read_mostly;
322 EXPORT_SYMBOL(tcp_memory_pressure); 322 EXPORT_SYMBOL(tcp_memory_pressure);
323 323
324 void tcp_enter_memory_pressure(struct sock *sk) 324 void tcp_enter_memory_pressure(struct sock *sk)
325 { 325 {
326 if (!tcp_memory_pressure) { 326 if (!tcp_memory_pressure) {
327 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMEMORYPRESSURES); 327 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMEMORYPRESSURES);
328 tcp_memory_pressure = 1; 328 tcp_memory_pressure = 1;
329 } 329 }
330 } 330 }
331 EXPORT_SYMBOL(tcp_enter_memory_pressure); 331 EXPORT_SYMBOL(tcp_enter_memory_pressure);
332 332
333 /* Convert seconds to retransmits based on initial and max timeout */ 333 /* Convert seconds to retransmits based on initial and max timeout */
334 static u8 secs_to_retrans(int seconds, int timeout, int rto_max) 334 static u8 secs_to_retrans(int seconds, int timeout, int rto_max)
335 { 335 {
336 u8 res = 0; 336 u8 res = 0;
337 337
338 if (seconds > 0) { 338 if (seconds > 0) {
339 int period = timeout; 339 int period = timeout;
340 340
341 res = 1; 341 res = 1;
342 while (seconds > period && res < 255) { 342 while (seconds > period && res < 255) {
343 res++; 343 res++;
344 timeout <<= 1; 344 timeout <<= 1;
345 if (timeout > rto_max) 345 if (timeout > rto_max)
346 timeout = rto_max; 346 timeout = rto_max;
347 period += timeout; 347 period += timeout;
348 } 348 }
349 } 349 }
350 return res; 350 return res;
351 } 351 }
352 352
353 /* Convert retransmits to seconds based on initial and max timeout */ 353 /* Convert retransmits to seconds based on initial and max timeout */
354 static int retrans_to_secs(u8 retrans, int timeout, int rto_max) 354 static int retrans_to_secs(u8 retrans, int timeout, int rto_max)
355 { 355 {
356 int period = 0; 356 int period = 0;
357 357
358 if (retrans > 0) { 358 if (retrans > 0) {
359 period = timeout; 359 period = timeout;
360 while (--retrans) { 360 while (--retrans) {
361 timeout <<= 1; 361 timeout <<= 1;
362 if (timeout > rto_max) 362 if (timeout > rto_max)
363 timeout = rto_max; 363 timeout = rto_max;
364 period += timeout; 364 period += timeout;
365 } 365 }
366 } 366 }
367 return period; 367 return period;
368 } 368 }
369 369
370 /* Address-family independent initialization for a tcp_sock. 370 /* Address-family independent initialization for a tcp_sock.
371 * 371 *
372 * NOTE: A lot of things set to zero explicitly by call to 372 * NOTE: A lot of things set to zero explicitly by call to
373 * sk_alloc() so need not be done here. 373 * sk_alloc() so need not be done here.
374 */ 374 */
375 void tcp_init_sock(struct sock *sk) 375 void tcp_init_sock(struct sock *sk)
376 { 376 {
377 struct inet_connection_sock *icsk = inet_csk(sk); 377 struct inet_connection_sock *icsk = inet_csk(sk);
378 struct tcp_sock *tp = tcp_sk(sk); 378 struct tcp_sock *tp = tcp_sk(sk);
379 379
380 skb_queue_head_init(&tp->out_of_order_queue); 380 skb_queue_head_init(&tp->out_of_order_queue);
381 tcp_init_xmit_timers(sk); 381 tcp_init_xmit_timers(sk);
382 tcp_prequeue_init(tp); 382 tcp_prequeue_init(tp);
383 INIT_LIST_HEAD(&tp->tsq_node); 383 INIT_LIST_HEAD(&tp->tsq_node);
384 384
385 icsk->icsk_rto = TCP_TIMEOUT_INIT; 385 icsk->icsk_rto = TCP_TIMEOUT_INIT;
386 tp->mdev = TCP_TIMEOUT_INIT; 386 tp->mdev = TCP_TIMEOUT_INIT;
387 387
388 /* So many TCP implementations out there (incorrectly) count the 388 /* So many TCP implementations out there (incorrectly) count the
389 * initial SYN frame in their delayed-ACK and congestion control 389 * initial SYN frame in their delayed-ACK and congestion control
390 * algorithms that we must have the following bandaid to talk 390 * algorithms that we must have the following bandaid to talk
391 * efficiently to them. -DaveM 391 * efficiently to them. -DaveM
392 */ 392 */
393 tp->snd_cwnd = TCP_INIT_CWND; 393 tp->snd_cwnd = TCP_INIT_CWND;
394 394
395 /* See draft-stevens-tcpca-spec-01 for discussion of the 395 /* See draft-stevens-tcpca-spec-01 for discussion of the
396 * initialization of these values. 396 * initialization of these values.
397 */ 397 */
398 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; 398 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
399 tp->snd_cwnd_clamp = ~0; 399 tp->snd_cwnd_clamp = ~0;
400 tp->mss_cache = TCP_MSS_DEFAULT; 400 tp->mss_cache = TCP_MSS_DEFAULT;
401 401
402 tp->reordering = sysctl_tcp_reordering; 402 tp->reordering = sysctl_tcp_reordering;
403 tcp_enable_early_retrans(tp); 403 tcp_enable_early_retrans(tp);
404 icsk->icsk_ca_ops = &tcp_init_congestion_ops; 404 icsk->icsk_ca_ops = &tcp_init_congestion_ops;
405 405
406 tp->tsoffset = 0; 406 tp->tsoffset = 0;
407 407
408 sk->sk_state = TCP_CLOSE; 408 sk->sk_state = TCP_CLOSE;
409 409
410 sk->sk_write_space = sk_stream_write_space; 410 sk->sk_write_space = sk_stream_write_space;
411 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE); 411 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
412 412
413 icsk->icsk_sync_mss = tcp_sync_mss; 413 icsk->icsk_sync_mss = tcp_sync_mss;
414 414
415 sk->sk_sndbuf = sysctl_tcp_wmem[1]; 415 sk->sk_sndbuf = sysctl_tcp_wmem[1];
416 sk->sk_rcvbuf = sysctl_tcp_rmem[1]; 416 sk->sk_rcvbuf = sysctl_tcp_rmem[1];
417 417
418 local_bh_disable(); 418 local_bh_disable();
419 sock_update_memcg(sk); 419 sock_update_memcg(sk);
420 sk_sockets_allocated_inc(sk); 420 sk_sockets_allocated_inc(sk);
421 local_bh_enable(); 421 local_bh_enable();
422 } 422 }
423 EXPORT_SYMBOL(tcp_init_sock); 423 EXPORT_SYMBOL(tcp_init_sock);
424 424
425 /* 425 /*
426 * Wait for a TCP event. 426 * Wait for a TCP event.
427 * 427 *
428 * Note that we don't need to lock the socket, as the upper poll layers 428 * Note that we don't need to lock the socket, as the upper poll layers
429 * take care of normal races (between the test and the event) and we don't 429 * take care of normal races (between the test and the event) and we don't
430 * go look at any of the socket buffers directly. 430 * go look at any of the socket buffers directly.
431 */ 431 */
432 unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait) 432 unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
433 { 433 {
434 unsigned int mask; 434 unsigned int mask;
435 struct sock *sk = sock->sk; 435 struct sock *sk = sock->sk;
436 const struct tcp_sock *tp = tcp_sk(sk); 436 const struct tcp_sock *tp = tcp_sk(sk);
437 437
438 sock_rps_record_flow(sk); 438 sock_rps_record_flow(sk);
439 439
440 sock_poll_wait(file, sk_sleep(sk), wait); 440 sock_poll_wait(file, sk_sleep(sk), wait);
441 if (sk->sk_state == TCP_LISTEN) 441 if (sk->sk_state == TCP_LISTEN)
442 return inet_csk_listen_poll(sk); 442 return inet_csk_listen_poll(sk);
443 443
444 /* Socket is not locked. We are protected from async events 444 /* Socket is not locked. We are protected from async events
445 * by poll logic and correct handling of state changes 445 * by poll logic and correct handling of state changes
446 * made by other threads is impossible in any case. 446 * made by other threads is impossible in any case.
447 */ 447 */
448 448
449 mask = 0; 449 mask = 0;
450 450
451 /* 451 /*
452 * POLLHUP is certainly not done right. But poll() doesn't 452 * POLLHUP is certainly not done right. But poll() doesn't
453 * have a notion of HUP in just one direction, and for a 453 * have a notion of HUP in just one direction, and for a
454 * socket the read side is more interesting. 454 * socket the read side is more interesting.
455 * 455 *
456 * Some poll() documentation says that POLLHUP is incompatible 456 * Some poll() documentation says that POLLHUP is incompatible
457 * with the POLLOUT/POLLWR flags, so somebody should check this 457 * with the POLLOUT/POLLWR flags, so somebody should check this
458 * all. But careful, it tends to be safer to return too many 458 * all. But careful, it tends to be safer to return too many
459 * bits than too few, and you can easily break real applications 459 * bits than too few, and you can easily break real applications
460 * if you don't tell them that something has hung up! 460 * if you don't tell them that something has hung up!
461 * 461 *
462 * Check-me. 462 * Check-me.
463 * 463 *
464 * Check number 1. POLLHUP is _UNMASKABLE_ event (see UNIX98 and 464 * Check number 1. POLLHUP is _UNMASKABLE_ event (see UNIX98 and
465 * our fs/select.c). It means that after we received EOF, 465 * our fs/select.c). It means that after we received EOF,
466 * poll always returns immediately, making impossible poll() on write() 466 * poll always returns immediately, making impossible poll() on write()
467 * in state CLOSE_WAIT. One solution is evident --- to set POLLHUP 467 * in state CLOSE_WAIT. One solution is evident --- to set POLLHUP
468 * if and only if shutdown has been made in both directions. 468 * if and only if shutdown has been made in both directions.
469 * Actually, it is interesting to look how Solaris and DUX 469 * Actually, it is interesting to look how Solaris and DUX
470 * solve this dilemma. I would prefer, if POLLHUP were maskable, 470 * solve this dilemma. I would prefer, if POLLHUP were maskable,
471 * then we could set it on SND_SHUTDOWN. BTW examples given 471 * then we could set it on SND_SHUTDOWN. BTW examples given
472 * in Stevens' books assume exactly this behaviour, it explains 472 * in Stevens' books assume exactly this behaviour, it explains
473 * why POLLHUP is incompatible with POLLOUT. --ANK 473 * why POLLHUP is incompatible with POLLOUT. --ANK
474 * 474 *
475 * NOTE. Check for TCP_CLOSE is added. The goal is to prevent 475 * NOTE. Check for TCP_CLOSE is added. The goal is to prevent
476 * blocking on fresh not-connected or disconnected socket. --ANK 476 * blocking on fresh not-connected or disconnected socket. --ANK
477 */ 477 */
478 if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == TCP_CLOSE) 478 if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == TCP_CLOSE)
479 mask |= POLLHUP; 479 mask |= POLLHUP;
480 if (sk->sk_shutdown & RCV_SHUTDOWN) 480 if (sk->sk_shutdown & RCV_SHUTDOWN)
481 mask |= POLLIN | POLLRDNORM | POLLRDHUP; 481 mask |= POLLIN | POLLRDNORM | POLLRDHUP;
482 482
483 /* Connected or passive Fast Open socket? */ 483 /* Connected or passive Fast Open socket? */
484 if (sk->sk_state != TCP_SYN_SENT && 484 if (sk->sk_state != TCP_SYN_SENT &&
485 (sk->sk_state != TCP_SYN_RECV || tp->fastopen_rsk != NULL)) { 485 (sk->sk_state != TCP_SYN_RECV || tp->fastopen_rsk != NULL)) {
486 int target = sock_rcvlowat(sk, 0, INT_MAX); 486 int target = sock_rcvlowat(sk, 0, INT_MAX);
487 487
488 if (tp->urg_seq == tp->copied_seq && 488 if (tp->urg_seq == tp->copied_seq &&
489 !sock_flag(sk, SOCK_URGINLINE) && 489 !sock_flag(sk, SOCK_URGINLINE) &&
490 tp->urg_data) 490 tp->urg_data)
491 target++; 491 target++;
492 492
493 /* Potential race condition. If read of tp below will 493 /* Potential race condition. If read of tp below will
494 * escape above sk->sk_state, we can be illegally awaken 494 * escape above sk->sk_state, we can be illegally awaken
495 * in SYN_* states. */ 495 * in SYN_* states. */
496 if (tp->rcv_nxt - tp->copied_seq >= target) 496 if (tp->rcv_nxt - tp->copied_seq >= target)
497 mask |= POLLIN | POLLRDNORM; 497 mask |= POLLIN | POLLRDNORM;
498 498
499 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) { 499 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
500 if (sk_stream_is_writeable(sk)) { 500 if (sk_stream_is_writeable(sk)) {
501 mask |= POLLOUT | POLLWRNORM; 501 mask |= POLLOUT | POLLWRNORM;
502 } else { /* send SIGIO later */ 502 } else { /* send SIGIO later */
503 set_bit(SOCK_ASYNC_NOSPACE, 503 set_bit(SOCK_ASYNC_NOSPACE,
504 &sk->sk_socket->flags); 504 &sk->sk_socket->flags);
505 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 505 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
506 506
507 /* Race breaker. If space is freed after 507 /* Race breaker. If space is freed after
508 * wspace test but before the flags are set, 508 * wspace test but before the flags are set,
509 * IO signal will be lost. 509 * IO signal will be lost.
510 */ 510 */
511 if (sk_stream_is_writeable(sk)) 511 if (sk_stream_is_writeable(sk))
512 mask |= POLLOUT | POLLWRNORM; 512 mask |= POLLOUT | POLLWRNORM;
513 } 513 }
514 } else 514 } else
515 mask |= POLLOUT | POLLWRNORM; 515 mask |= POLLOUT | POLLWRNORM;
516 516
517 if (tp->urg_data & TCP_URG_VALID) 517 if (tp->urg_data & TCP_URG_VALID)
518 mask |= POLLPRI; 518 mask |= POLLPRI;
519 } 519 }
520 /* This barrier is coupled with smp_wmb() in tcp_reset() */ 520 /* This barrier is coupled with smp_wmb() in tcp_reset() */
521 smp_rmb(); 521 smp_rmb();
522 if (sk->sk_err) 522 if (sk->sk_err)
523 mask |= POLLERR; 523 mask |= POLLERR;
524 524
525 return mask; 525 return mask;
526 } 526 }
527 EXPORT_SYMBOL(tcp_poll); 527 EXPORT_SYMBOL(tcp_poll);
528 528
529 int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg) 529 int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
530 { 530 {
531 struct tcp_sock *tp = tcp_sk(sk); 531 struct tcp_sock *tp = tcp_sk(sk);
532 int answ; 532 int answ;
533 bool slow; 533 bool slow;
534 534
535 switch (cmd) { 535 switch (cmd) {
536 case SIOCINQ: 536 case SIOCINQ:
537 if (sk->sk_state == TCP_LISTEN) 537 if (sk->sk_state == TCP_LISTEN)
538 return -EINVAL; 538 return -EINVAL;
539 539
540 slow = lock_sock_fast(sk); 540 slow = lock_sock_fast(sk);
541 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) 541 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
542 answ = 0; 542 answ = 0;
543 else if (sock_flag(sk, SOCK_URGINLINE) || 543 else if (sock_flag(sk, SOCK_URGINLINE) ||
544 !tp->urg_data || 544 !tp->urg_data ||
545 before(tp->urg_seq, tp->copied_seq) || 545 before(tp->urg_seq, tp->copied_seq) ||
546 !before(tp->urg_seq, tp->rcv_nxt)) { 546 !before(tp->urg_seq, tp->rcv_nxt)) {
547 547
548 answ = tp->rcv_nxt - tp->copied_seq; 548 answ = tp->rcv_nxt - tp->copied_seq;
549 549
550 /* Subtract 1, if FIN was received */ 550 /* Subtract 1, if FIN was received */
551 if (answ && sock_flag(sk, SOCK_DONE)) 551 if (answ && sock_flag(sk, SOCK_DONE))
552 answ--; 552 answ--;
553 } else 553 } else
554 answ = tp->urg_seq - tp->copied_seq; 554 answ = tp->urg_seq - tp->copied_seq;
555 unlock_sock_fast(sk, slow); 555 unlock_sock_fast(sk, slow);
556 break; 556 break;
557 case SIOCATMARK: 557 case SIOCATMARK:
558 answ = tp->urg_data && tp->urg_seq == tp->copied_seq; 558 answ = tp->urg_data && tp->urg_seq == tp->copied_seq;
559 break; 559 break;
560 case SIOCOUTQ: 560 case SIOCOUTQ:
561 if (sk->sk_state == TCP_LISTEN) 561 if (sk->sk_state == TCP_LISTEN)
562 return -EINVAL; 562 return -EINVAL;
563 563
564 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) 564 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
565 answ = 0; 565 answ = 0;
566 else 566 else
567 answ = tp->write_seq - tp->snd_una; 567 answ = tp->write_seq - tp->snd_una;
568 break; 568 break;
569 case SIOCOUTQNSD: 569 case SIOCOUTQNSD:
570 if (sk->sk_state == TCP_LISTEN) 570 if (sk->sk_state == TCP_LISTEN)
571 return -EINVAL; 571 return -EINVAL;
572 572
573 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) 573 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
574 answ = 0; 574 answ = 0;
575 else 575 else
576 answ = tp->write_seq - tp->snd_nxt; 576 answ = tp->write_seq - tp->snd_nxt;
577 break; 577 break;
578 default: 578 default:
579 return -ENOIOCTLCMD; 579 return -ENOIOCTLCMD;
580 } 580 }
581 581
582 return put_user(answ, (int __user *)arg); 582 return put_user(answ, (int __user *)arg);
583 } 583 }
584 EXPORT_SYMBOL(tcp_ioctl); 584 EXPORT_SYMBOL(tcp_ioctl);
585 585
586 static inline void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb) 586 static inline void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb)
587 { 587 {
588 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH; 588 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH;
589 tp->pushed_seq = tp->write_seq; 589 tp->pushed_seq = tp->write_seq;
590 } 590 }
591 591
592 static inline bool forced_push(const struct tcp_sock *tp) 592 static inline bool forced_push(const struct tcp_sock *tp)
593 { 593 {
594 return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1)); 594 return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1));
595 } 595 }
596 596
597 static inline void skb_entail(struct sock *sk, struct sk_buff *skb) 597 static inline void skb_entail(struct sock *sk, struct sk_buff *skb)
598 { 598 {
599 struct tcp_sock *tp = tcp_sk(sk); 599 struct tcp_sock *tp = tcp_sk(sk);
600 struct tcp_skb_cb *tcb = TCP_SKB_CB(skb); 600 struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
601 601
602 skb->csum = 0; 602 skb->csum = 0;
603 tcb->seq = tcb->end_seq = tp->write_seq; 603 tcb->seq = tcb->end_seq = tp->write_seq;
604 tcb->tcp_flags = TCPHDR_ACK; 604 tcb->tcp_flags = TCPHDR_ACK;
605 tcb->sacked = 0; 605 tcb->sacked = 0;
606 skb_header_release(skb); 606 skb_header_release(skb);
607 tcp_add_write_queue_tail(sk, skb); 607 tcp_add_write_queue_tail(sk, skb);
608 sk->sk_wmem_queued += skb->truesize; 608 sk->sk_wmem_queued += skb->truesize;
609 sk_mem_charge(sk, skb->truesize); 609 sk_mem_charge(sk, skb->truesize);
610 if (tp->nonagle & TCP_NAGLE_PUSH) 610 if (tp->nonagle & TCP_NAGLE_PUSH)
611 tp->nonagle &= ~TCP_NAGLE_PUSH; 611 tp->nonagle &= ~TCP_NAGLE_PUSH;
612 } 612 }
613 613
614 static inline void tcp_mark_urg(struct tcp_sock *tp, int flags) 614 static inline void tcp_mark_urg(struct tcp_sock *tp, int flags)
615 { 615 {
616 if (flags & MSG_OOB) 616 if (flags & MSG_OOB)
617 tp->snd_up = tp->write_seq; 617 tp->snd_up = tp->write_seq;
618 } 618 }
619 619
620 static inline void tcp_push(struct sock *sk, int flags, int mss_now, 620 static inline void tcp_push(struct sock *sk, int flags, int mss_now,
621 int nonagle) 621 int nonagle)
622 { 622 {
623 if (tcp_send_head(sk)) { 623 if (tcp_send_head(sk)) {
624 struct tcp_sock *tp = tcp_sk(sk); 624 struct tcp_sock *tp = tcp_sk(sk);
625 625
626 if (!(flags & MSG_MORE) || forced_push(tp)) 626 if (!(flags & MSG_MORE) || forced_push(tp))
627 tcp_mark_push(tp, tcp_write_queue_tail(sk)); 627 tcp_mark_push(tp, tcp_write_queue_tail(sk));
628 628
629 tcp_mark_urg(tp, flags); 629 tcp_mark_urg(tp, flags);
630 __tcp_push_pending_frames(sk, mss_now, 630 __tcp_push_pending_frames(sk, mss_now,
631 (flags & MSG_MORE) ? TCP_NAGLE_CORK : nonagle); 631 (flags & MSG_MORE) ? TCP_NAGLE_CORK : nonagle);
632 } 632 }
633 } 633 }
634 634
635 static int tcp_splice_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb, 635 static int tcp_splice_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb,
636 unsigned int offset, size_t len) 636 unsigned int offset, size_t len)
637 { 637 {
638 struct tcp_splice_state *tss = rd_desc->arg.data; 638 struct tcp_splice_state *tss = rd_desc->arg.data;
639 int ret; 639 int ret;
640 640
641 ret = skb_splice_bits(skb, offset, tss->pipe, min(rd_desc->count, len), 641 ret = skb_splice_bits(skb, offset, tss->pipe, min(rd_desc->count, len),
642 tss->flags); 642 tss->flags);
643 if (ret > 0) 643 if (ret > 0)
644 rd_desc->count -= ret; 644 rd_desc->count -= ret;
645 return ret; 645 return ret;
646 } 646 }
647 647
648 static int __tcp_splice_read(struct sock *sk, struct tcp_splice_state *tss) 648 static int __tcp_splice_read(struct sock *sk, struct tcp_splice_state *tss)
649 { 649 {
650 /* Store TCP splice context information in read_descriptor_t. */ 650 /* Store TCP splice context information in read_descriptor_t. */
651 read_descriptor_t rd_desc = { 651 read_descriptor_t rd_desc = {
652 .arg.data = tss, 652 .arg.data = tss,
653 .count = tss->len, 653 .count = tss->len,
654 }; 654 };
655 655
656 return tcp_read_sock(sk, &rd_desc, tcp_splice_data_recv); 656 return tcp_read_sock(sk, &rd_desc, tcp_splice_data_recv);
657 } 657 }
658 658
659 /** 659 /**
660 * tcp_splice_read - splice data from TCP socket to a pipe 660 * tcp_splice_read - splice data from TCP socket to a pipe
661 * @sock: socket to splice from 661 * @sock: socket to splice from
662 * @ppos: position (not valid) 662 * @ppos: position (not valid)
663 * @pipe: pipe to splice to 663 * @pipe: pipe to splice to
664 * @len: number of bytes to splice 664 * @len: number of bytes to splice
665 * @flags: splice modifier flags 665 * @flags: splice modifier flags
666 * 666 *
667 * Description: 667 * Description:
668 * Will read pages from given socket and fill them into a pipe. 668 * Will read pages from given socket and fill them into a pipe.
669 * 669 *
670 **/ 670 **/
671 ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos, 671 ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos,
672 struct pipe_inode_info *pipe, size_t len, 672 struct pipe_inode_info *pipe, size_t len,
673 unsigned int flags) 673 unsigned int flags)
674 { 674 {
675 struct sock *sk = sock->sk; 675 struct sock *sk = sock->sk;
676 struct tcp_splice_state tss = { 676 struct tcp_splice_state tss = {
677 .pipe = pipe, 677 .pipe = pipe,
678 .len = len, 678 .len = len,
679 .flags = flags, 679 .flags = flags,
680 }; 680 };
681 long timeo; 681 long timeo;
682 ssize_t spliced; 682 ssize_t spliced;
683 int ret; 683 int ret;
684 684
685 sock_rps_record_flow(sk); 685 sock_rps_record_flow(sk);
686 /* 686 /*
687 * We can't seek on a socket input 687 * We can't seek on a socket input
688 */ 688 */
689 if (unlikely(*ppos)) 689 if (unlikely(*ppos))
690 return -ESPIPE; 690 return -ESPIPE;
691 691
692 ret = spliced = 0; 692 ret = spliced = 0;
693 693
694 lock_sock(sk); 694 lock_sock(sk);
695 695
696 timeo = sock_rcvtimeo(sk, sock->file->f_flags & O_NONBLOCK); 696 timeo = sock_rcvtimeo(sk, sock->file->f_flags & O_NONBLOCK);
697 while (tss.len) { 697 while (tss.len) {
698 ret = __tcp_splice_read(sk, &tss); 698 ret = __tcp_splice_read(sk, &tss);
699 if (ret < 0) 699 if (ret < 0)
700 break; 700 break;
701 else if (!ret) { 701 else if (!ret) {
702 if (spliced) 702 if (spliced)
703 break; 703 break;
704 if (sock_flag(sk, SOCK_DONE)) 704 if (sock_flag(sk, SOCK_DONE))
705 break; 705 break;
706 if (sk->sk_err) { 706 if (sk->sk_err) {
707 ret = sock_error(sk); 707 ret = sock_error(sk);
708 break; 708 break;
709 } 709 }
710 if (sk->sk_shutdown & RCV_SHUTDOWN) 710 if (sk->sk_shutdown & RCV_SHUTDOWN)
711 break; 711 break;
712 if (sk->sk_state == TCP_CLOSE) { 712 if (sk->sk_state == TCP_CLOSE) {
713 /* 713 /*
714 * This occurs when user tries to read 714 * This occurs when user tries to read
715 * from never connected socket. 715 * from never connected socket.
716 */ 716 */
717 if (!sock_flag(sk, SOCK_DONE)) 717 if (!sock_flag(sk, SOCK_DONE))
718 ret = -ENOTCONN; 718 ret = -ENOTCONN;
719 break; 719 break;
720 } 720 }
721 if (!timeo) { 721 if (!timeo) {
722 ret = -EAGAIN; 722 ret = -EAGAIN;
723 break; 723 break;
724 } 724 }
725 sk_wait_data(sk, &timeo); 725 sk_wait_data(sk, &timeo);
726 if (signal_pending(current)) { 726 if (signal_pending(current)) {
727 ret = sock_intr_errno(timeo); 727 ret = sock_intr_errno(timeo);
728 break; 728 break;
729 } 729 }
730 continue; 730 continue;
731 } 731 }
732 tss.len -= ret; 732 tss.len -= ret;
733 spliced += ret; 733 spliced += ret;
734 734
735 if (!timeo) 735 if (!timeo)
736 break; 736 break;
737 release_sock(sk); 737 release_sock(sk);
738 lock_sock(sk); 738 lock_sock(sk);
739 739
740 if (sk->sk_err || sk->sk_state == TCP_CLOSE || 740 if (sk->sk_err || sk->sk_state == TCP_CLOSE ||
741 (sk->sk_shutdown & RCV_SHUTDOWN) || 741 (sk->sk_shutdown & RCV_SHUTDOWN) ||
742 signal_pending(current)) 742 signal_pending(current))
743 break; 743 break;
744 } 744 }
745 745
746 release_sock(sk); 746 release_sock(sk);
747 747
748 if (spliced) 748 if (spliced)
749 return spliced; 749 return spliced;
750 750
751 return ret; 751 return ret;
752 } 752 }
753 EXPORT_SYMBOL(tcp_splice_read); 753 EXPORT_SYMBOL(tcp_splice_read);
754 754
755 struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp) 755 struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp)
756 { 756 {
757 struct sk_buff *skb; 757 struct sk_buff *skb;
758 758
759 /* The TCP header must be at least 32-bit aligned. */ 759 /* The TCP header must be at least 32-bit aligned. */
760 size = ALIGN(size, 4); 760 size = ALIGN(size, 4);
761 761
762 skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp); 762 skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp);
763 if (skb) { 763 if (skb) {
764 if (sk_wmem_schedule(sk, skb->truesize)) { 764 if (sk_wmem_schedule(sk, skb->truesize)) {
765 skb_reserve(skb, sk->sk_prot->max_header); 765 skb_reserve(skb, sk->sk_prot->max_header);
766 /* 766 /*
767 * Make sure that we have exactly size bytes 767 * Make sure that we have exactly size bytes
768 * available to the caller, no more, no less. 768 * available to the caller, no more, no less.
769 */ 769 */
770 skb->reserved_tailroom = skb->end - skb->tail - size; 770 skb->reserved_tailroom = skb->end - skb->tail - size;
771 return skb; 771 return skb;
772 } 772 }
773 __kfree_skb(skb); 773 __kfree_skb(skb);
774 } else { 774 } else {
775 sk->sk_prot->enter_memory_pressure(sk); 775 sk->sk_prot->enter_memory_pressure(sk);
776 sk_stream_moderate_sndbuf(sk); 776 sk_stream_moderate_sndbuf(sk);
777 } 777 }
778 return NULL; 778 return NULL;
779 } 779 }
780 780
781 static unsigned int tcp_xmit_size_goal(struct sock *sk, u32 mss_now, 781 static unsigned int tcp_xmit_size_goal(struct sock *sk, u32 mss_now,
782 int large_allowed) 782 int large_allowed)
783 { 783 {
784 struct tcp_sock *tp = tcp_sk(sk); 784 struct tcp_sock *tp = tcp_sk(sk);
785 u32 xmit_size_goal, old_size_goal; 785 u32 xmit_size_goal, old_size_goal;
786 786
787 xmit_size_goal = mss_now; 787 xmit_size_goal = mss_now;
788 788
789 if (large_allowed && sk_can_gso(sk)) { 789 if (large_allowed && sk_can_gso(sk)) {
790 u32 gso_size, hlen; 790 u32 gso_size, hlen;
791 791
792 /* Maybe we should/could use sk->sk_prot->max_header here ? */ 792 /* Maybe we should/could use sk->sk_prot->max_header here ? */
793 hlen = inet_csk(sk)->icsk_af_ops->net_header_len + 793 hlen = inet_csk(sk)->icsk_af_ops->net_header_len +
794 inet_csk(sk)->icsk_ext_hdr_len + 794 inet_csk(sk)->icsk_ext_hdr_len +
795 tp->tcp_header_len; 795 tp->tcp_header_len;
796 796
797 /* Goal is to send at least one packet per ms, 797 /* Goal is to send at least one packet per ms,
798 * not one big TSO packet every 100 ms. 798 * not one big TSO packet every 100 ms.
799 * This preserves ACK clocking and is consistent 799 * This preserves ACK clocking and is consistent
800 * with tcp_tso_should_defer() heuristic. 800 * with tcp_tso_should_defer() heuristic.
801 */ 801 */
802 gso_size = sk->sk_pacing_rate / (2 * MSEC_PER_SEC); 802 gso_size = sk->sk_pacing_rate / (2 * MSEC_PER_SEC);
803 gso_size = max_t(u32, gso_size, 803 gso_size = max_t(u32, gso_size,
804 sysctl_tcp_min_tso_segs * mss_now); 804 sysctl_tcp_min_tso_segs * mss_now);
805 805
806 xmit_size_goal = min_t(u32, gso_size, 806 xmit_size_goal = min_t(u32, gso_size,
807 sk->sk_gso_max_size - 1 - hlen); 807 sk->sk_gso_max_size - 1 - hlen);
808 808
809 /* TSQ : try to have at least two segments in flight 809 /* TSQ : try to have at least two segments in flight
810 * (one in NIC TX ring, another in Qdisc) 810 * (one in NIC TX ring, another in Qdisc)
811 */ 811 */
812 xmit_size_goal = min_t(u32, xmit_size_goal, 812 xmit_size_goal = min_t(u32, xmit_size_goal,
813 sysctl_tcp_limit_output_bytes >> 1); 813 sysctl_tcp_limit_output_bytes >> 1);
814 814
815 xmit_size_goal = tcp_bound_to_half_wnd(tp, xmit_size_goal); 815 xmit_size_goal = tcp_bound_to_half_wnd(tp, xmit_size_goal);
816 816
817 /* We try hard to avoid divides here */ 817 /* We try hard to avoid divides here */
818 old_size_goal = tp->xmit_size_goal_segs * mss_now; 818 old_size_goal = tp->xmit_size_goal_segs * mss_now;
819 819
820 if (likely(old_size_goal <= xmit_size_goal && 820 if (likely(old_size_goal <= xmit_size_goal &&
821 old_size_goal + mss_now > xmit_size_goal)) { 821 old_size_goal + mss_now > xmit_size_goal)) {
822 xmit_size_goal = old_size_goal; 822 xmit_size_goal = old_size_goal;
823 } else { 823 } else {
824 tp->xmit_size_goal_segs = 824 tp->xmit_size_goal_segs =
825 min_t(u16, xmit_size_goal / mss_now, 825 min_t(u16, xmit_size_goal / mss_now,
826 sk->sk_gso_max_segs); 826 sk->sk_gso_max_segs);
827 xmit_size_goal = tp->xmit_size_goal_segs * mss_now; 827 xmit_size_goal = tp->xmit_size_goal_segs * mss_now;
828 } 828 }
829 } 829 }
830 830
831 return max(xmit_size_goal, mss_now); 831 return max(xmit_size_goal, mss_now);
832 } 832 }
833 833
834 static int tcp_send_mss(struct sock *sk, int *size_goal, int flags) 834 static int tcp_send_mss(struct sock *sk, int *size_goal, int flags)
835 { 835 {
836 int mss_now; 836 int mss_now;
837 837
838 mss_now = tcp_current_mss(sk); 838 mss_now = tcp_current_mss(sk);
839 *size_goal = tcp_xmit_size_goal(sk, mss_now, !(flags & MSG_OOB)); 839 *size_goal = tcp_xmit_size_goal(sk, mss_now, !(flags & MSG_OOB));
840 840
841 return mss_now; 841 return mss_now;
842 } 842 }
843 843
844 static ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset, 844 static ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset,
845 size_t size, int flags) 845 size_t size, int flags)
846 { 846 {
847 struct tcp_sock *tp = tcp_sk(sk); 847 struct tcp_sock *tp = tcp_sk(sk);
848 int mss_now, size_goal; 848 int mss_now, size_goal;
849 int err; 849 int err;
850 ssize_t copied; 850 ssize_t copied;
851 long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT); 851 long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
852 852
853 /* Wait for a connection to finish. One exception is TCP Fast Open 853 /* Wait for a connection to finish. One exception is TCP Fast Open
854 * (passive side) where data is allowed to be sent before a connection 854 * (passive side) where data is allowed to be sent before a connection
855 * is fully established. 855 * is fully established.
856 */ 856 */
857 if (((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) && 857 if (((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) &&
858 !tcp_passive_fastopen(sk)) { 858 !tcp_passive_fastopen(sk)) {
859 if ((err = sk_stream_wait_connect(sk, &timeo)) != 0) 859 if ((err = sk_stream_wait_connect(sk, &timeo)) != 0)
860 goto out_err; 860 goto out_err;
861 } 861 }
862 862
863 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); 863 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
864 864
865 mss_now = tcp_send_mss(sk, &size_goal, flags); 865 mss_now = tcp_send_mss(sk, &size_goal, flags);
866 copied = 0; 866 copied = 0;
867 867
868 err = -EPIPE; 868 err = -EPIPE;
869 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) 869 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
870 goto out_err; 870 goto out_err;
871 871
872 while (size > 0) { 872 while (size > 0) {
873 struct sk_buff *skb = tcp_write_queue_tail(sk); 873 struct sk_buff *skb = tcp_write_queue_tail(sk);
874 int copy, i; 874 int copy, i;
875 bool can_coalesce; 875 bool can_coalesce;
876 876
877 if (!tcp_send_head(sk) || (copy = size_goal - skb->len) <= 0) { 877 if (!tcp_send_head(sk) || (copy = size_goal - skb->len) <= 0) {
878 new_segment: 878 new_segment:
879 if (!sk_stream_memory_free(sk)) 879 if (!sk_stream_memory_free(sk))
880 goto wait_for_sndbuf; 880 goto wait_for_sndbuf;
881 881
882 skb = sk_stream_alloc_skb(sk, 0, sk->sk_allocation); 882 skb = sk_stream_alloc_skb(sk, 0, sk->sk_allocation);
883 if (!skb) 883 if (!skb)
884 goto wait_for_memory; 884 goto wait_for_memory;
885 885
886 skb_entail(sk, skb); 886 skb_entail(sk, skb);
887 copy = size_goal; 887 copy = size_goal;
888 } 888 }
889 889
890 if (copy > size) 890 if (copy > size)
891 copy = size; 891 copy = size;
892 892
893 i = skb_shinfo(skb)->nr_frags; 893 i = skb_shinfo(skb)->nr_frags;
894 can_coalesce = skb_can_coalesce(skb, i, page, offset); 894 can_coalesce = skb_can_coalesce(skb, i, page, offset);
895 if (!can_coalesce && i >= MAX_SKB_FRAGS) { 895 if (!can_coalesce && i >= MAX_SKB_FRAGS) {
896 tcp_mark_push(tp, skb); 896 tcp_mark_push(tp, skb);
897 goto new_segment; 897 goto new_segment;
898 } 898 }
899 if (!sk_wmem_schedule(sk, copy)) 899 if (!sk_wmem_schedule(sk, copy))
900 goto wait_for_memory; 900 goto wait_for_memory;
901 901
902 if (can_coalesce) { 902 if (can_coalesce) {
903 skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy); 903 skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
904 } else { 904 } else {
905 get_page(page); 905 get_page(page);
906 skb_fill_page_desc(skb, i, page, offset, copy); 906 skb_fill_page_desc(skb, i, page, offset, copy);
907 } 907 }
908 skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG; 908 skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
909 909
910 skb->len += copy; 910 skb->len += copy;
911 skb->data_len += copy; 911 skb->data_len += copy;
912 skb->truesize += copy; 912 skb->truesize += copy;
913 sk->sk_wmem_queued += copy; 913 sk->sk_wmem_queued += copy;
914 sk_mem_charge(sk, copy); 914 sk_mem_charge(sk, copy);
915 skb->ip_summed = CHECKSUM_PARTIAL; 915 skb->ip_summed = CHECKSUM_PARTIAL;
916 tp->write_seq += copy; 916 tp->write_seq += copy;
917 TCP_SKB_CB(skb)->end_seq += copy; 917 TCP_SKB_CB(skb)->end_seq += copy;
918 skb_shinfo(skb)->gso_segs = 0; 918 skb_shinfo(skb)->gso_segs = 0;
919 919
920 if (!copied) 920 if (!copied)
921 TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_PSH; 921 TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_PSH;
922 922
923 copied += copy; 923 copied += copy;
924 offset += copy; 924 offset += copy;
925 if (!(size -= copy)) 925 if (!(size -= copy))
926 goto out; 926 goto out;
927 927
928 if (skb->len < size_goal || (flags & MSG_OOB)) 928 if (skb->len < size_goal || (flags & MSG_OOB))
929 continue; 929 continue;
930 930
931 if (forced_push(tp)) { 931 if (forced_push(tp)) {
932 tcp_mark_push(tp, skb); 932 tcp_mark_push(tp, skb);
933 __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH); 933 __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH);
934 } else if (skb == tcp_send_head(sk)) 934 } else if (skb == tcp_send_head(sk))
935 tcp_push_one(sk, mss_now); 935 tcp_push_one(sk, mss_now);
936 continue; 936 continue;
937 937
938 wait_for_sndbuf: 938 wait_for_sndbuf:
939 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 939 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
940 wait_for_memory: 940 wait_for_memory:
941 tcp_push(sk, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH); 941 tcp_push(sk, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
942 942
943 if ((err = sk_stream_wait_memory(sk, &timeo)) != 0) 943 if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
944 goto do_error; 944 goto do_error;
945 945
946 mss_now = tcp_send_mss(sk, &size_goal, flags); 946 mss_now = tcp_send_mss(sk, &size_goal, flags);
947 } 947 }
948 948
949 out: 949 out:
950 if (copied && !(flags & MSG_SENDPAGE_NOTLAST)) 950 if (copied && !(flags & MSG_SENDPAGE_NOTLAST))
951 tcp_push(sk, flags, mss_now, tp->nonagle); 951 tcp_push(sk, flags, mss_now, tp->nonagle);
952 return copied; 952 return copied;
953 953
954 do_error: 954 do_error:
955 if (copied) 955 if (copied)
956 goto out; 956 goto out;
957 out_err: 957 out_err:
958 return sk_stream_error(sk, flags, err); 958 return sk_stream_error(sk, flags, err);
959 } 959 }
960 960
961 int tcp_sendpage(struct sock *sk, struct page *page, int offset, 961 int tcp_sendpage(struct sock *sk, struct page *page, int offset,
962 size_t size, int flags) 962 size_t size, int flags)
963 { 963 {
964 ssize_t res; 964 ssize_t res;
965 965
966 if (!(sk->sk_route_caps & NETIF_F_SG) || 966 if (!(sk->sk_route_caps & NETIF_F_SG) ||
967 !(sk->sk_route_caps & NETIF_F_ALL_CSUM)) 967 !(sk->sk_route_caps & NETIF_F_ALL_CSUM))
968 return sock_no_sendpage(sk->sk_socket, page, offset, size, 968 return sock_no_sendpage(sk->sk_socket, page, offset, size,
969 flags); 969 flags);
970 970
971 lock_sock(sk); 971 lock_sock(sk);
972 res = do_tcp_sendpages(sk, page, offset, size, flags); 972 res = do_tcp_sendpages(sk, page, offset, size, flags);
973 release_sock(sk); 973 release_sock(sk);
974 return res; 974 return res;
975 } 975 }
976 EXPORT_SYMBOL(tcp_sendpage); 976 EXPORT_SYMBOL(tcp_sendpage);
977 977
978 static inline int select_size(const struct sock *sk, bool sg) 978 static inline int select_size(const struct sock *sk, bool sg)
979 { 979 {
980 const struct tcp_sock *tp = tcp_sk(sk); 980 const struct tcp_sock *tp = tcp_sk(sk);
981 int tmp = tp->mss_cache; 981 int tmp = tp->mss_cache;
982 982
983 if (sg) { 983 if (sg) {
984 if (sk_can_gso(sk)) { 984 if (sk_can_gso(sk)) {
985 /* Small frames wont use a full page: 985 /* Small frames wont use a full page:
986 * Payload will immediately follow tcp header. 986 * Payload will immediately follow tcp header.
987 */ 987 */
988 tmp = SKB_WITH_OVERHEAD(2048 - MAX_TCP_HEADER); 988 tmp = SKB_WITH_OVERHEAD(2048 - MAX_TCP_HEADER);
989 } else { 989 } else {
990 int pgbreak = SKB_MAX_HEAD(MAX_TCP_HEADER); 990 int pgbreak = SKB_MAX_HEAD(MAX_TCP_HEADER);
991 991
992 if (tmp >= pgbreak && 992 if (tmp >= pgbreak &&
993 tmp <= pgbreak + (MAX_SKB_FRAGS - 1) * PAGE_SIZE) 993 tmp <= pgbreak + (MAX_SKB_FRAGS - 1) * PAGE_SIZE)
994 tmp = pgbreak; 994 tmp = pgbreak;
995 } 995 }
996 } 996 }
997 997
998 return tmp; 998 return tmp;
999 } 999 }
1000 1000
1001 void tcp_free_fastopen_req(struct tcp_sock *tp) 1001 void tcp_free_fastopen_req(struct tcp_sock *tp)
1002 { 1002 {
1003 if (tp->fastopen_req != NULL) { 1003 if (tp->fastopen_req != NULL) {
1004 kfree(tp->fastopen_req); 1004 kfree(tp->fastopen_req);
1005 tp->fastopen_req = NULL; 1005 tp->fastopen_req = NULL;
1006 } 1006 }
1007 } 1007 }
1008 1008
1009 static int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg, int *size) 1009 static int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg, int *size)
1010 { 1010 {
1011 struct tcp_sock *tp = tcp_sk(sk); 1011 struct tcp_sock *tp = tcp_sk(sk);
1012 int err, flags; 1012 int err, flags;
1013 1013
1014 if (!(sysctl_tcp_fastopen & TFO_CLIENT_ENABLE)) 1014 if (!(sysctl_tcp_fastopen & TFO_CLIENT_ENABLE))
1015 return -EOPNOTSUPP; 1015 return -EOPNOTSUPP;
1016 if (tp->fastopen_req != NULL) 1016 if (tp->fastopen_req != NULL)
1017 return -EALREADY; /* Another Fast Open is in progress */ 1017 return -EALREADY; /* Another Fast Open is in progress */
1018 1018
1019 tp->fastopen_req = kzalloc(sizeof(struct tcp_fastopen_request), 1019 tp->fastopen_req = kzalloc(sizeof(struct tcp_fastopen_request),
1020 sk->sk_allocation); 1020 sk->sk_allocation);
1021 if (unlikely(tp->fastopen_req == NULL)) 1021 if (unlikely(tp->fastopen_req == NULL))
1022 return -ENOBUFS; 1022 return -ENOBUFS;
1023 tp->fastopen_req->data = msg; 1023 tp->fastopen_req->data = msg;
1024 1024
1025 flags = (msg->msg_flags & MSG_DONTWAIT) ? O_NONBLOCK : 0; 1025 flags = (msg->msg_flags & MSG_DONTWAIT) ? O_NONBLOCK : 0;
1026 err = __inet_stream_connect(sk->sk_socket, msg->msg_name, 1026 err = __inet_stream_connect(sk->sk_socket, msg->msg_name,
1027 msg->msg_namelen, flags); 1027 msg->msg_namelen, flags);
1028 *size = tp->fastopen_req->copied; 1028 *size = tp->fastopen_req->copied;
1029 tcp_free_fastopen_req(tp); 1029 tcp_free_fastopen_req(tp);
1030 return err; 1030 return err;
1031 } 1031 }
1032 1032
1033 int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, 1033 int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1034 size_t size) 1034 size_t size)
1035 { 1035 {
1036 struct iovec *iov; 1036 struct iovec *iov;
1037 struct tcp_sock *tp = tcp_sk(sk); 1037 struct tcp_sock *tp = tcp_sk(sk);
1038 struct sk_buff *skb; 1038 struct sk_buff *skb;
1039 int iovlen, flags, err, copied = 0; 1039 int iovlen, flags, err, copied = 0;
1040 int mss_now = 0, size_goal, copied_syn = 0, offset = 0; 1040 int mss_now = 0, size_goal, copied_syn = 0, offset = 0;
1041 bool sg; 1041 bool sg;
1042 long timeo; 1042 long timeo;
1043 1043
1044 lock_sock(sk); 1044 lock_sock(sk);
1045 1045
1046 flags = msg->msg_flags; 1046 flags = msg->msg_flags;
1047 if (flags & MSG_FASTOPEN) { 1047 if (flags & MSG_FASTOPEN) {
1048 err = tcp_sendmsg_fastopen(sk, msg, &copied_syn); 1048 err = tcp_sendmsg_fastopen(sk, msg, &copied_syn);
1049 if (err == -EINPROGRESS && copied_syn > 0) 1049 if (err == -EINPROGRESS && copied_syn > 0)
1050 goto out; 1050 goto out;
1051 else if (err) 1051 else if (err)
1052 goto out_err; 1052 goto out_err;
1053 offset = copied_syn; 1053 offset = copied_syn;
1054 } 1054 }
1055 1055
1056 timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT); 1056 timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
1057 1057
1058 /* Wait for a connection to finish. One exception is TCP Fast Open 1058 /* Wait for a connection to finish. One exception is TCP Fast Open
1059 * (passive side) where data is allowed to be sent before a connection 1059 * (passive side) where data is allowed to be sent before a connection
1060 * is fully established. 1060 * is fully established.
1061 */ 1061 */
1062 if (((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) && 1062 if (((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) &&
1063 !tcp_passive_fastopen(sk)) { 1063 !tcp_passive_fastopen(sk)) {
1064 if ((err = sk_stream_wait_connect(sk, &timeo)) != 0) 1064 if ((err = sk_stream_wait_connect(sk, &timeo)) != 0)
1065 goto do_error; 1065 goto do_error;
1066 } 1066 }
1067 1067
1068 if (unlikely(tp->repair)) { 1068 if (unlikely(tp->repair)) {
1069 if (tp->repair_queue == TCP_RECV_QUEUE) { 1069 if (tp->repair_queue == TCP_RECV_QUEUE) {
1070 copied = tcp_send_rcvq(sk, msg, size); 1070 copied = tcp_send_rcvq(sk, msg, size);
1071 goto out; 1071 goto out;
1072 } 1072 }
1073 1073
1074 err = -EINVAL; 1074 err = -EINVAL;
1075 if (tp->repair_queue == TCP_NO_QUEUE) 1075 if (tp->repair_queue == TCP_NO_QUEUE)
1076 goto out_err; 1076 goto out_err;
1077 1077
1078 /* 'common' sending to sendq */ 1078 /* 'common' sending to sendq */
1079 } 1079 }
1080 1080
1081 /* This should be in poll */ 1081 /* This should be in poll */
1082 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); 1082 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
1083 1083
1084 mss_now = tcp_send_mss(sk, &size_goal, flags); 1084 mss_now = tcp_send_mss(sk, &size_goal, flags);
1085 1085
1086 /* Ok commence sending. */ 1086 /* Ok commence sending. */
1087 iovlen = msg->msg_iovlen; 1087 iovlen = msg->msg_iovlen;
1088 iov = msg->msg_iov; 1088 iov = msg->msg_iov;
1089 copied = 0; 1089 copied = 0;
1090 1090
1091 err = -EPIPE; 1091 err = -EPIPE;
1092 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) 1092 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
1093 goto out_err; 1093 goto out_err;
1094 1094
1095 sg = !!(sk->sk_route_caps & NETIF_F_SG); 1095 sg = !!(sk->sk_route_caps & NETIF_F_SG);
1096 1096
1097 while (--iovlen >= 0) { 1097 while (--iovlen >= 0) {
1098 size_t seglen = iov->iov_len; 1098 size_t seglen = iov->iov_len;
1099 unsigned char __user *from = iov->iov_base; 1099 unsigned char __user *from = iov->iov_base;
1100 1100
1101 iov++; 1101 iov++;
1102 if (unlikely(offset > 0)) { /* Skip bytes copied in SYN */ 1102 if (unlikely(offset > 0)) { /* Skip bytes copied in SYN */
1103 if (offset >= seglen) { 1103 if (offset >= seglen) {
1104 offset -= seglen; 1104 offset -= seglen;
1105 continue; 1105 continue;
1106 } 1106 }
1107 seglen -= offset; 1107 seglen -= offset;
1108 from += offset; 1108 from += offset;
1109 offset = 0; 1109 offset = 0;
1110 } 1110 }
1111 1111
1112 while (seglen > 0) { 1112 while (seglen > 0) {
1113 int copy = 0; 1113 int copy = 0;
1114 int max = size_goal; 1114 int max = size_goal;
1115 1115
1116 skb = tcp_write_queue_tail(sk); 1116 skb = tcp_write_queue_tail(sk);
1117 if (tcp_send_head(sk)) { 1117 if (tcp_send_head(sk)) {
1118 if (skb->ip_summed == CHECKSUM_NONE) 1118 if (skb->ip_summed == CHECKSUM_NONE)
1119 max = mss_now; 1119 max = mss_now;
1120 copy = max - skb->len; 1120 copy = max - skb->len;
1121 } 1121 }
1122 1122
1123 if (copy <= 0) { 1123 if (copy <= 0) {
1124 new_segment: 1124 new_segment:
1125 /* Allocate new segment. If the interface is SG, 1125 /* Allocate new segment. If the interface is SG,
1126 * allocate skb fitting to single page. 1126 * allocate skb fitting to single page.
1127 */ 1127 */
1128 if (!sk_stream_memory_free(sk)) 1128 if (!sk_stream_memory_free(sk))
1129 goto wait_for_sndbuf; 1129 goto wait_for_sndbuf;
1130 1130
1131 skb = sk_stream_alloc_skb(sk, 1131 skb = sk_stream_alloc_skb(sk,
1132 select_size(sk, sg), 1132 select_size(sk, sg),
1133 sk->sk_allocation); 1133 sk->sk_allocation);
1134 if (!skb) 1134 if (!skb)
1135 goto wait_for_memory; 1135 goto wait_for_memory;
1136 1136
1137 /* 1137 /*
1138 * All packets are restored as if they have 1138 * All packets are restored as if they have
1139 * already been sent. 1139 * already been sent.
1140 */ 1140 */
1141 if (tp->repair) 1141 if (tp->repair)
1142 TCP_SKB_CB(skb)->when = tcp_time_stamp; 1142 TCP_SKB_CB(skb)->when = tcp_time_stamp;
1143 1143
1144 /* 1144 /*
1145 * Check whether we can use HW checksum. 1145 * Check whether we can use HW checksum.
1146 */ 1146 */
1147 if (sk->sk_route_caps & NETIF_F_ALL_CSUM) 1147 if (sk->sk_route_caps & NETIF_F_ALL_CSUM)
1148 skb->ip_summed = CHECKSUM_PARTIAL; 1148 skb->ip_summed = CHECKSUM_PARTIAL;
1149 1149
1150 skb_entail(sk, skb); 1150 skb_entail(sk, skb);
1151 copy = size_goal; 1151 copy = size_goal;
1152 max = size_goal; 1152 max = size_goal;
1153 } 1153 }
1154 1154
1155 /* Try to append data to the end of skb. */ 1155 /* Try to append data to the end of skb. */
1156 if (copy > seglen) 1156 if (copy > seglen)
1157 copy = seglen; 1157 copy = seglen;
1158 1158
1159 /* Where to copy to? */ 1159 /* Where to copy to? */
1160 if (skb_availroom(skb) > 0) { 1160 if (skb_availroom(skb) > 0) {
1161 /* We have some space in skb head. Superb! */ 1161 /* We have some space in skb head. Superb! */
1162 copy = min_t(int, copy, skb_availroom(skb)); 1162 copy = min_t(int, copy, skb_availroom(skb));
1163 err = skb_add_data_nocache(sk, skb, from, copy); 1163 err = skb_add_data_nocache(sk, skb, from, copy);
1164 if (err) 1164 if (err)
1165 goto do_fault; 1165 goto do_fault;
1166 } else { 1166 } else {
1167 bool merge = true; 1167 bool merge = true;
1168 int i = skb_shinfo(skb)->nr_frags; 1168 int i = skb_shinfo(skb)->nr_frags;
1169 struct page_frag *pfrag = sk_page_frag(sk); 1169 struct page_frag *pfrag = sk_page_frag(sk);
1170 1170
1171 if (!sk_page_frag_refill(sk, pfrag)) 1171 if (!sk_page_frag_refill(sk, pfrag))
1172 goto wait_for_memory; 1172 goto wait_for_memory;
1173 1173
1174 if (!skb_can_coalesce(skb, i, pfrag->page, 1174 if (!skb_can_coalesce(skb, i, pfrag->page,
1175 pfrag->offset)) { 1175 pfrag->offset)) {
1176 if (i == MAX_SKB_FRAGS || !sg) { 1176 if (i == MAX_SKB_FRAGS || !sg) {
1177 tcp_mark_push(tp, skb); 1177 tcp_mark_push(tp, skb);
1178 goto new_segment; 1178 goto new_segment;
1179 } 1179 }
1180 merge = false; 1180 merge = false;
1181 } 1181 }
1182 1182
1183 copy = min_t(int, copy, pfrag->size - pfrag->offset); 1183 copy = min_t(int, copy, pfrag->size - pfrag->offset);
1184 1184
1185 if (!sk_wmem_schedule(sk, copy)) 1185 if (!sk_wmem_schedule(sk, copy))
1186 goto wait_for_memory; 1186 goto wait_for_memory;
1187 1187
1188 err = skb_copy_to_page_nocache(sk, from, skb, 1188 err = skb_copy_to_page_nocache(sk, from, skb,
1189 pfrag->page, 1189 pfrag->page,
1190 pfrag->offset, 1190 pfrag->offset,
1191 copy); 1191 copy);
1192 if (err) 1192 if (err)
1193 goto do_error; 1193 goto do_error;
1194 1194
1195 /* Update the skb. */ 1195 /* Update the skb. */
1196 if (merge) { 1196 if (merge) {
1197 skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy); 1197 skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
1198 } else { 1198 } else {
1199 skb_fill_page_desc(skb, i, pfrag->page, 1199 skb_fill_page_desc(skb, i, pfrag->page,
1200 pfrag->offset, copy); 1200 pfrag->offset, copy);
1201 get_page(pfrag->page); 1201 get_page(pfrag->page);
1202 } 1202 }
1203 pfrag->offset += copy; 1203 pfrag->offset += copy;
1204 } 1204 }
1205 1205
1206 if (!copied) 1206 if (!copied)
1207 TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_PSH; 1207 TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_PSH;
1208 1208
1209 tp->write_seq += copy; 1209 tp->write_seq += copy;
1210 TCP_SKB_CB(skb)->end_seq += copy; 1210 TCP_SKB_CB(skb)->end_seq += copy;
1211 skb_shinfo(skb)->gso_segs = 0; 1211 skb_shinfo(skb)->gso_segs = 0;
1212 1212
1213 from += copy; 1213 from += copy;
1214 copied += copy; 1214 copied += copy;
1215 if ((seglen -= copy) == 0 && iovlen == 0) 1215 if ((seglen -= copy) == 0 && iovlen == 0)
1216 goto out; 1216 goto out;
1217 1217
1218 if (skb->len < max || (flags & MSG_OOB) || unlikely(tp->repair)) 1218 if (skb->len < max || (flags & MSG_OOB) || unlikely(tp->repair))
1219 continue; 1219 continue;
1220 1220
1221 if (forced_push(tp)) { 1221 if (forced_push(tp)) {
1222 tcp_mark_push(tp, skb); 1222 tcp_mark_push(tp, skb);
1223 __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH); 1223 __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH);
1224 } else if (skb == tcp_send_head(sk)) 1224 } else if (skb == tcp_send_head(sk))
1225 tcp_push_one(sk, mss_now); 1225 tcp_push_one(sk, mss_now);
1226 continue; 1226 continue;
1227 1227
1228 wait_for_sndbuf: 1228 wait_for_sndbuf:
1229 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 1229 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1230 wait_for_memory: 1230 wait_for_memory:
1231 if (copied) 1231 if (copied)
1232 tcp_push(sk, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH); 1232 tcp_push(sk, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
1233 1233
1234 if ((err = sk_stream_wait_memory(sk, &timeo)) != 0) 1234 if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
1235 goto do_error; 1235 goto do_error;
1236 1236
1237 mss_now = tcp_send_mss(sk, &size_goal, flags); 1237 mss_now = tcp_send_mss(sk, &size_goal, flags);
1238 } 1238 }
1239 } 1239 }
1240 1240
1241 out: 1241 out:
1242 if (copied) 1242 if (copied)
1243 tcp_push(sk, flags, mss_now, tp->nonagle); 1243 tcp_push(sk, flags, mss_now, tp->nonagle);
1244 release_sock(sk); 1244 release_sock(sk);
1245 return copied + copied_syn; 1245 return copied + copied_syn;
1246 1246
1247 do_fault: 1247 do_fault:
1248 if (!skb->len) { 1248 if (!skb->len) {
1249 tcp_unlink_write_queue(skb, sk); 1249 tcp_unlink_write_queue(skb, sk);
1250 /* It is the one place in all of TCP, except connection 1250 /* It is the one place in all of TCP, except connection
1251 * reset, where we can be unlinking the send_head. 1251 * reset, where we can be unlinking the send_head.
1252 */ 1252 */
1253 tcp_check_send_head(sk, skb); 1253 tcp_check_send_head(sk, skb);
1254 sk_wmem_free_skb(sk, skb); 1254 sk_wmem_free_skb(sk, skb);
1255 } 1255 }
1256 1256
1257 do_error: 1257 do_error:
1258 if (copied + copied_syn) 1258 if (copied + copied_syn)
1259 goto out; 1259 goto out;
1260 out_err: 1260 out_err:
1261 err = sk_stream_error(sk, flags, err); 1261 err = sk_stream_error(sk, flags, err);
1262 release_sock(sk); 1262 release_sock(sk);
1263 return err; 1263 return err;
1264 } 1264 }
1265 EXPORT_SYMBOL(tcp_sendmsg); 1265 EXPORT_SYMBOL(tcp_sendmsg);
1266 1266
1267 /* 1267 /*
1268 * Handle reading urgent data. BSD has very simple semantics for 1268 * Handle reading urgent data. BSD has very simple semantics for
1269 * this, no blocking and very strange errors 8) 1269 * this, no blocking and very strange errors 8)
1270 */ 1270 */
1271 1271
1272 static int tcp_recv_urg(struct sock *sk, struct msghdr *msg, int len, int flags) 1272 static int tcp_recv_urg(struct sock *sk, struct msghdr *msg, int len, int flags)
1273 { 1273 {
1274 struct tcp_sock *tp = tcp_sk(sk); 1274 struct tcp_sock *tp = tcp_sk(sk);
1275 1275
1276 /* No URG data to read. */ 1276 /* No URG data to read. */
1277 if (sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data || 1277 if (sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data ||
1278 tp->urg_data == TCP_URG_READ) 1278 tp->urg_data == TCP_URG_READ)
1279 return -EINVAL; /* Yes this is right ! */ 1279 return -EINVAL; /* Yes this is right ! */
1280 1280
1281 if (sk->sk_state == TCP_CLOSE && !sock_flag(sk, SOCK_DONE)) 1281 if (sk->sk_state == TCP_CLOSE && !sock_flag(sk, SOCK_DONE))
1282 return -ENOTCONN; 1282 return -ENOTCONN;
1283 1283
1284 if (tp->urg_data & TCP_URG_VALID) { 1284 if (tp->urg_data & TCP_URG_VALID) {
1285 int err = 0; 1285 int err = 0;
1286 char c = tp->urg_data; 1286 char c = tp->urg_data;
1287 1287
1288 if (!(flags & MSG_PEEK)) 1288 if (!(flags & MSG_PEEK))
1289 tp->urg_data = TCP_URG_READ; 1289 tp->urg_data = TCP_URG_READ;
1290 1290
1291 /* Read urgent data. */ 1291 /* Read urgent data. */
1292 msg->msg_flags |= MSG_OOB; 1292 msg->msg_flags |= MSG_OOB;
1293 1293
1294 if (len > 0) { 1294 if (len > 0) {
1295 if (!(flags & MSG_TRUNC)) 1295 if (!(flags & MSG_TRUNC))
1296 err = memcpy_toiovec(msg->msg_iov, &c, 1); 1296 err = memcpy_toiovec(msg->msg_iov, &c, 1);
1297 len = 1; 1297 len = 1;
1298 } else 1298 } else
1299 msg->msg_flags |= MSG_TRUNC; 1299 msg->msg_flags |= MSG_TRUNC;
1300 1300
1301 return err ? -EFAULT : len; 1301 return err ? -EFAULT : len;
1302 } 1302 }
1303 1303
1304 if (sk->sk_state == TCP_CLOSE || (sk->sk_shutdown & RCV_SHUTDOWN)) 1304 if (sk->sk_state == TCP_CLOSE || (sk->sk_shutdown & RCV_SHUTDOWN))
1305 return 0; 1305 return 0;
1306 1306
1307 /* Fixed the recv(..., MSG_OOB) behaviour. BSD docs and 1307 /* Fixed the recv(..., MSG_OOB) behaviour. BSD docs and
1308 * the available implementations agree in this case: 1308 * the available implementations agree in this case:
1309 * this call should never block, independent of the 1309 * this call should never block, independent of the
1310 * blocking state of the socket. 1310 * blocking state of the socket.
1311 * Mike <pall@rz.uni-karlsruhe.de> 1311 * Mike <pall@rz.uni-karlsruhe.de>
1312 */ 1312 */
1313 return -EAGAIN; 1313 return -EAGAIN;
1314 } 1314 }
1315 1315
1316 static int tcp_peek_sndq(struct sock *sk, struct msghdr *msg, int len) 1316 static int tcp_peek_sndq(struct sock *sk, struct msghdr *msg, int len)
1317 { 1317 {
1318 struct sk_buff *skb; 1318 struct sk_buff *skb;
1319 int copied = 0, err = 0; 1319 int copied = 0, err = 0;
1320 1320
1321 /* XXX -- need to support SO_PEEK_OFF */ 1321 /* XXX -- need to support SO_PEEK_OFF */
1322 1322
1323 skb_queue_walk(&sk->sk_write_queue, skb) { 1323 skb_queue_walk(&sk->sk_write_queue, skb) {
1324 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, skb->len); 1324 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, skb->len);
1325 if (err) 1325 if (err)
1326 break; 1326 break;
1327 1327
1328 copied += skb->len; 1328 copied += skb->len;
1329 } 1329 }
1330 1330
1331 return err ?: copied; 1331 return err ?: copied;
1332 } 1332 }
1333 1333
1334 /* Clean up the receive buffer for full frames taken by the user, 1334 /* Clean up the receive buffer for full frames taken by the user,
1335 * then send an ACK if necessary. COPIED is the number of bytes 1335 * then send an ACK if necessary. COPIED is the number of bytes
1336 * tcp_recvmsg has given to the user so far, it speeds up the 1336 * tcp_recvmsg has given to the user so far, it speeds up the
1337 * calculation of whether or not we must ACK for the sake of 1337 * calculation of whether or not we must ACK for the sake of
1338 * a window update. 1338 * a window update.
1339 */ 1339 */
1340 void tcp_cleanup_rbuf(struct sock *sk, int copied) 1340 void tcp_cleanup_rbuf(struct sock *sk, int copied)
1341 { 1341 {
1342 struct tcp_sock *tp = tcp_sk(sk); 1342 struct tcp_sock *tp = tcp_sk(sk);
1343 bool time_to_ack = false; 1343 bool time_to_ack = false;
1344 1344
1345 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue); 1345 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
1346 1346
1347 WARN(skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq), 1347 WARN(skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq),
1348 "cleanup rbuf bug: copied %X seq %X rcvnxt %X\n", 1348 "cleanup rbuf bug: copied %X seq %X rcvnxt %X\n",
1349 tp->copied_seq, TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt); 1349 tp->copied_seq, TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt);
1350 1350
1351 if (inet_csk_ack_scheduled(sk)) { 1351 if (inet_csk_ack_scheduled(sk)) {
1352 const struct inet_connection_sock *icsk = inet_csk(sk); 1352 const struct inet_connection_sock *icsk = inet_csk(sk);
1353 /* Delayed ACKs frequently hit locked sockets during bulk 1353 /* Delayed ACKs frequently hit locked sockets during bulk
1354 * receive. */ 1354 * receive. */
1355 if (icsk->icsk_ack.blocked || 1355 if (icsk->icsk_ack.blocked ||
1356 /* Once-per-two-segments ACK was not sent by tcp_input.c */ 1356 /* Once-per-two-segments ACK was not sent by tcp_input.c */
1357 tp->rcv_nxt - tp->rcv_wup > icsk->icsk_ack.rcv_mss || 1357 tp->rcv_nxt - tp->rcv_wup > icsk->icsk_ack.rcv_mss ||
1358 /* 1358 /*
1359 * If this read emptied read buffer, we send ACK, if 1359 * If this read emptied read buffer, we send ACK, if
1360 * connection is not bidirectional, user drained 1360 * connection is not bidirectional, user drained
1361 * receive buffer and there was a small segment 1361 * receive buffer and there was a small segment
1362 * in queue. 1362 * in queue.
1363 */ 1363 */
1364 (copied > 0 && 1364 (copied > 0 &&
1365 ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED2) || 1365 ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED2) ||
1366 ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED) && 1366 ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED) &&
1367 !icsk->icsk_ack.pingpong)) && 1367 !icsk->icsk_ack.pingpong)) &&
1368 !atomic_read(&sk->sk_rmem_alloc))) 1368 !atomic_read(&sk->sk_rmem_alloc)))
1369 time_to_ack = true; 1369 time_to_ack = true;
1370 } 1370 }
1371 1371
1372 /* We send an ACK if we can now advertise a non-zero window 1372 /* We send an ACK if we can now advertise a non-zero window
1373 * which has been raised "significantly". 1373 * which has been raised "significantly".
1374 * 1374 *
1375 * Even if window raised up to infinity, do not send window open ACK 1375 * Even if window raised up to infinity, do not send window open ACK
1376 * in states, where we will not receive more. It is useless. 1376 * in states, where we will not receive more. It is useless.
1377 */ 1377 */
1378 if (copied > 0 && !time_to_ack && !(sk->sk_shutdown & RCV_SHUTDOWN)) { 1378 if (copied > 0 && !time_to_ack && !(sk->sk_shutdown & RCV_SHUTDOWN)) {
1379 __u32 rcv_window_now = tcp_receive_window(tp); 1379 __u32 rcv_window_now = tcp_receive_window(tp);
1380 1380
1381 /* Optimize, __tcp_select_window() is not cheap. */ 1381 /* Optimize, __tcp_select_window() is not cheap. */
1382 if (2*rcv_window_now <= tp->window_clamp) { 1382 if (2*rcv_window_now <= tp->window_clamp) {
1383 __u32 new_window = __tcp_select_window(sk); 1383 __u32 new_window = __tcp_select_window(sk);
1384 1384
1385 /* Send ACK now, if this read freed lots of space 1385 /* Send ACK now, if this read freed lots of space
1386 * in our buffer. Certainly, new_window is new window. 1386 * in our buffer. Certainly, new_window is new window.
1387 * We can advertise it now, if it is not less than current one. 1387 * We can advertise it now, if it is not less than current one.
1388 * "Lots" means "at least twice" here. 1388 * "Lots" means "at least twice" here.
1389 */ 1389 */
1390 if (new_window && new_window >= 2 * rcv_window_now) 1390 if (new_window && new_window >= 2 * rcv_window_now)
1391 time_to_ack = true; 1391 time_to_ack = true;
1392 } 1392 }
1393 } 1393 }
1394 if (time_to_ack) 1394 if (time_to_ack)
1395 tcp_send_ack(sk); 1395 tcp_send_ack(sk);
1396 } 1396 }
1397 1397
1398 static void tcp_prequeue_process(struct sock *sk) 1398 static void tcp_prequeue_process(struct sock *sk)
1399 { 1399 {
1400 struct sk_buff *skb; 1400 struct sk_buff *skb;
1401 struct tcp_sock *tp = tcp_sk(sk); 1401 struct tcp_sock *tp = tcp_sk(sk);
1402 1402
1403 NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPPREQUEUED); 1403 NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPPREQUEUED);
1404 1404
1405 /* RX process wants to run with disabled BHs, though it is not 1405 /* RX process wants to run with disabled BHs, though it is not
1406 * necessary */ 1406 * necessary */
1407 local_bh_disable(); 1407 local_bh_disable();
1408 while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) 1408 while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL)
1409 sk_backlog_rcv(sk, skb); 1409 sk_backlog_rcv(sk, skb);
1410 local_bh_enable(); 1410 local_bh_enable();
1411 1411
1412 /* Clear memory counter. */ 1412 /* Clear memory counter. */
1413 tp->ucopy.memory = 0; 1413 tp->ucopy.memory = 0;
1414 } 1414 }
1415 1415
1416 #ifdef CONFIG_NET_DMA 1416 #ifdef CONFIG_NET_DMA
1417 static void tcp_service_net_dma(struct sock *sk, bool wait) 1417 static void tcp_service_net_dma(struct sock *sk, bool wait)
1418 { 1418 {
1419 dma_cookie_t done, used; 1419 dma_cookie_t done, used;
1420 dma_cookie_t last_issued; 1420 dma_cookie_t last_issued;
1421 struct tcp_sock *tp = tcp_sk(sk); 1421 struct tcp_sock *tp = tcp_sk(sk);
1422 1422
1423 if (!tp->ucopy.dma_chan) 1423 if (!tp->ucopy.dma_chan)
1424 return; 1424 return;
1425 1425
1426 last_issued = tp->ucopy.dma_cookie; 1426 last_issued = tp->ucopy.dma_cookie;
1427 dma_async_issue_pending(tp->ucopy.dma_chan); 1427 dma_async_issue_pending(tp->ucopy.dma_chan);
1428 1428
1429 do { 1429 do {
1430 if (dma_async_is_tx_complete(tp->ucopy.dma_chan, 1430 if (dma_async_is_tx_complete(tp->ucopy.dma_chan,
1431 last_issued, &done, 1431 last_issued, &done,
1432 &used) == DMA_SUCCESS) { 1432 &used) == DMA_COMPLETE) {
1433 /* Safe to free early-copied skbs now */ 1433 /* Safe to free early-copied skbs now */
1434 __skb_queue_purge(&sk->sk_async_wait_queue); 1434 __skb_queue_purge(&sk->sk_async_wait_queue);
1435 break; 1435 break;
1436 } else { 1436 } else {
1437 struct sk_buff *skb; 1437 struct sk_buff *skb;
1438 while ((skb = skb_peek(&sk->sk_async_wait_queue)) && 1438 while ((skb = skb_peek(&sk->sk_async_wait_queue)) &&
1439 (dma_async_is_complete(skb->dma_cookie, done, 1439 (dma_async_is_complete(skb->dma_cookie, done,
1440 used) == DMA_SUCCESS)) { 1440 used) == DMA_COMPLETE)) {
1441 __skb_dequeue(&sk->sk_async_wait_queue); 1441 __skb_dequeue(&sk->sk_async_wait_queue);
1442 kfree_skb(skb); 1442 kfree_skb(skb);
1443 } 1443 }
1444 } 1444 }
1445 } while (wait); 1445 } while (wait);
1446 } 1446 }
1447 #endif 1447 #endif
1448 1448
1449 static struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off) 1449 static struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off)
1450 { 1450 {
1451 struct sk_buff *skb; 1451 struct sk_buff *skb;
1452 u32 offset; 1452 u32 offset;
1453 1453
1454 while ((skb = skb_peek(&sk->sk_receive_queue)) != NULL) { 1454 while ((skb = skb_peek(&sk->sk_receive_queue)) != NULL) {
1455 offset = seq - TCP_SKB_CB(skb)->seq; 1455 offset = seq - TCP_SKB_CB(skb)->seq;
1456 if (tcp_hdr(skb)->syn) 1456 if (tcp_hdr(skb)->syn)
1457 offset--; 1457 offset--;
1458 if (offset < skb->len || tcp_hdr(skb)->fin) { 1458 if (offset < skb->len || tcp_hdr(skb)->fin) {
1459 *off = offset; 1459 *off = offset;
1460 return skb; 1460 return skb;
1461 } 1461 }
1462 /* This looks weird, but this can happen if TCP collapsing 1462 /* This looks weird, but this can happen if TCP collapsing
1463 * splitted a fat GRO packet, while we released socket lock 1463 * splitted a fat GRO packet, while we released socket lock
1464 * in skb_splice_bits() 1464 * in skb_splice_bits()
1465 */ 1465 */
1466 sk_eat_skb(sk, skb, false); 1466 sk_eat_skb(sk, skb, false);
1467 } 1467 }
1468 return NULL; 1468 return NULL;
1469 } 1469 }
1470 1470
1471 /* 1471 /*
1472 * This routine provides an alternative to tcp_recvmsg() for routines 1472 * This routine provides an alternative to tcp_recvmsg() for routines
1473 * that would like to handle copying from skbuffs directly in 'sendfile' 1473 * that would like to handle copying from skbuffs directly in 'sendfile'
1474 * fashion. 1474 * fashion.
1475 * Note: 1475 * Note:
1476 * - It is assumed that the socket was locked by the caller. 1476 * - It is assumed that the socket was locked by the caller.
1477 * - The routine does not block. 1477 * - The routine does not block.
1478 * - At present, there is no support for reading OOB data 1478 * - At present, there is no support for reading OOB data
1479 * or for 'peeking' the socket using this routine 1479 * or for 'peeking' the socket using this routine
1480 * (although both would be easy to implement). 1480 * (although both would be easy to implement).
1481 */ 1481 */
1482 int tcp_read_sock(struct sock *sk, read_descriptor_t *desc, 1482 int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
1483 sk_read_actor_t recv_actor) 1483 sk_read_actor_t recv_actor)
1484 { 1484 {
1485 struct sk_buff *skb; 1485 struct sk_buff *skb;
1486 struct tcp_sock *tp = tcp_sk(sk); 1486 struct tcp_sock *tp = tcp_sk(sk);
1487 u32 seq = tp->copied_seq; 1487 u32 seq = tp->copied_seq;
1488 u32 offset; 1488 u32 offset;
1489 int copied = 0; 1489 int copied = 0;
1490 1490
1491 if (sk->sk_state == TCP_LISTEN) 1491 if (sk->sk_state == TCP_LISTEN)
1492 return -ENOTCONN; 1492 return -ENOTCONN;
1493 while ((skb = tcp_recv_skb(sk, seq, &offset)) != NULL) { 1493 while ((skb = tcp_recv_skb(sk, seq, &offset)) != NULL) {
1494 if (offset < skb->len) { 1494 if (offset < skb->len) {
1495 int used; 1495 int used;
1496 size_t len; 1496 size_t len;
1497 1497
1498 len = skb->len - offset; 1498 len = skb->len - offset;
1499 /* Stop reading if we hit a patch of urgent data */ 1499 /* Stop reading if we hit a patch of urgent data */
1500 if (tp->urg_data) { 1500 if (tp->urg_data) {
1501 u32 urg_offset = tp->urg_seq - seq; 1501 u32 urg_offset = tp->urg_seq - seq;
1502 if (urg_offset < len) 1502 if (urg_offset < len)
1503 len = urg_offset; 1503 len = urg_offset;
1504 if (!len) 1504 if (!len)
1505 break; 1505 break;
1506 } 1506 }
1507 used = recv_actor(desc, skb, offset, len); 1507 used = recv_actor(desc, skb, offset, len);
1508 if (used <= 0) { 1508 if (used <= 0) {
1509 if (!copied) 1509 if (!copied)
1510 copied = used; 1510 copied = used;
1511 break; 1511 break;
1512 } else if (used <= len) { 1512 } else if (used <= len) {
1513 seq += used; 1513 seq += used;
1514 copied += used; 1514 copied += used;
1515 offset += used; 1515 offset += used;
1516 } 1516 }
1517 /* If recv_actor drops the lock (e.g. TCP splice 1517 /* If recv_actor drops the lock (e.g. TCP splice
1518 * receive) the skb pointer might be invalid when 1518 * receive) the skb pointer might be invalid when
1519 * getting here: tcp_collapse might have deleted it 1519 * getting here: tcp_collapse might have deleted it
1520 * while aggregating skbs from the socket queue. 1520 * while aggregating skbs from the socket queue.
1521 */ 1521 */
1522 skb = tcp_recv_skb(sk, seq - 1, &offset); 1522 skb = tcp_recv_skb(sk, seq - 1, &offset);
1523 if (!skb) 1523 if (!skb)
1524 break; 1524 break;
1525 /* TCP coalescing might have appended data to the skb. 1525 /* TCP coalescing might have appended data to the skb.
1526 * Try to splice more frags 1526 * Try to splice more frags
1527 */ 1527 */
1528 if (offset + 1 != skb->len) 1528 if (offset + 1 != skb->len)
1529 continue; 1529 continue;
1530 } 1530 }
1531 if (tcp_hdr(skb)->fin) { 1531 if (tcp_hdr(skb)->fin) {
1532 sk_eat_skb(sk, skb, false); 1532 sk_eat_skb(sk, skb, false);
1533 ++seq; 1533 ++seq;
1534 break; 1534 break;
1535 } 1535 }
1536 sk_eat_skb(sk, skb, false); 1536 sk_eat_skb(sk, skb, false);
1537 if (!desc->count) 1537 if (!desc->count)
1538 break; 1538 break;
1539 tp->copied_seq = seq; 1539 tp->copied_seq = seq;
1540 } 1540 }
1541 tp->copied_seq = seq; 1541 tp->copied_seq = seq;
1542 1542
1543 tcp_rcv_space_adjust(sk); 1543 tcp_rcv_space_adjust(sk);
1544 1544
1545 /* Clean up data we have read: This will do ACK frames. */ 1545 /* Clean up data we have read: This will do ACK frames. */
1546 if (copied > 0) { 1546 if (copied > 0) {
1547 tcp_recv_skb(sk, seq, &offset); 1547 tcp_recv_skb(sk, seq, &offset);
1548 tcp_cleanup_rbuf(sk, copied); 1548 tcp_cleanup_rbuf(sk, copied);
1549 } 1549 }
1550 return copied; 1550 return copied;
1551 } 1551 }
1552 EXPORT_SYMBOL(tcp_read_sock); 1552 EXPORT_SYMBOL(tcp_read_sock);
1553 1553
1554 /* 1554 /*
1555 * This routine copies from a sock struct into the user buffer. 1555 * This routine copies from a sock struct into the user buffer.
1556 * 1556 *
1557 * Technical note: in 2.3 we work on _locked_ socket, so that 1557 * Technical note: in 2.3 we work on _locked_ socket, so that
1558 * tricks with *seq access order and skb->users are not required. 1558 * tricks with *seq access order and skb->users are not required.
1559 * Probably, code can be easily improved even more. 1559 * Probably, code can be easily improved even more.
1560 */ 1560 */
1561 1561
1562 int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, 1562 int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1563 size_t len, int nonblock, int flags, int *addr_len) 1563 size_t len, int nonblock, int flags, int *addr_len)
1564 { 1564 {
1565 struct tcp_sock *tp = tcp_sk(sk); 1565 struct tcp_sock *tp = tcp_sk(sk);
1566 int copied = 0; 1566 int copied = 0;
1567 u32 peek_seq; 1567 u32 peek_seq;
1568 u32 *seq; 1568 u32 *seq;
1569 unsigned long used; 1569 unsigned long used;
1570 int err; 1570 int err;
1571 int target; /* Read at least this many bytes */ 1571 int target; /* Read at least this many bytes */
1572 long timeo; 1572 long timeo;
1573 struct task_struct *user_recv = NULL; 1573 struct task_struct *user_recv = NULL;
1574 bool copied_early = false; 1574 bool copied_early = false;
1575 struct sk_buff *skb; 1575 struct sk_buff *skb;
1576 u32 urg_hole = 0; 1576 u32 urg_hole = 0;
1577 1577
1578 if (sk_can_busy_loop(sk) && skb_queue_empty(&sk->sk_receive_queue) && 1578 if (sk_can_busy_loop(sk) && skb_queue_empty(&sk->sk_receive_queue) &&
1579 (sk->sk_state == TCP_ESTABLISHED)) 1579 (sk->sk_state == TCP_ESTABLISHED))
1580 sk_busy_loop(sk, nonblock); 1580 sk_busy_loop(sk, nonblock);
1581 1581
1582 lock_sock(sk); 1582 lock_sock(sk);
1583 1583
1584 err = -ENOTCONN; 1584 err = -ENOTCONN;
1585 if (sk->sk_state == TCP_LISTEN) 1585 if (sk->sk_state == TCP_LISTEN)
1586 goto out; 1586 goto out;
1587 1587
1588 timeo = sock_rcvtimeo(sk, nonblock); 1588 timeo = sock_rcvtimeo(sk, nonblock);
1589 1589
1590 /* Urgent data needs to be handled specially. */ 1590 /* Urgent data needs to be handled specially. */
1591 if (flags & MSG_OOB) 1591 if (flags & MSG_OOB)
1592 goto recv_urg; 1592 goto recv_urg;
1593 1593
1594 if (unlikely(tp->repair)) { 1594 if (unlikely(tp->repair)) {
1595 err = -EPERM; 1595 err = -EPERM;
1596 if (!(flags & MSG_PEEK)) 1596 if (!(flags & MSG_PEEK))
1597 goto out; 1597 goto out;
1598 1598
1599 if (tp->repair_queue == TCP_SEND_QUEUE) 1599 if (tp->repair_queue == TCP_SEND_QUEUE)
1600 goto recv_sndq; 1600 goto recv_sndq;
1601 1601
1602 err = -EINVAL; 1602 err = -EINVAL;
1603 if (tp->repair_queue == TCP_NO_QUEUE) 1603 if (tp->repair_queue == TCP_NO_QUEUE)
1604 goto out; 1604 goto out;
1605 1605
1606 /* 'common' recv queue MSG_PEEK-ing */ 1606 /* 'common' recv queue MSG_PEEK-ing */
1607 } 1607 }
1608 1608
1609 seq = &tp->copied_seq; 1609 seq = &tp->copied_seq;
1610 if (flags & MSG_PEEK) { 1610 if (flags & MSG_PEEK) {
1611 peek_seq = tp->copied_seq; 1611 peek_seq = tp->copied_seq;
1612 seq = &peek_seq; 1612 seq = &peek_seq;
1613 } 1613 }
1614 1614
1615 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len); 1615 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
1616 1616
1617 #ifdef CONFIG_NET_DMA 1617 #ifdef CONFIG_NET_DMA
1618 tp->ucopy.dma_chan = NULL; 1618 tp->ucopy.dma_chan = NULL;
1619 preempt_disable(); 1619 preempt_disable();
1620 skb = skb_peek_tail(&sk->sk_receive_queue); 1620 skb = skb_peek_tail(&sk->sk_receive_queue);
1621 { 1621 {
1622 int available = 0; 1622 int available = 0;
1623 1623
1624 if (skb) 1624 if (skb)
1625 available = TCP_SKB_CB(skb)->seq + skb->len - (*seq); 1625 available = TCP_SKB_CB(skb)->seq + skb->len - (*seq);
1626 if ((available < target) && 1626 if ((available < target) &&
1627 (len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) && 1627 (len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) &&
1628 !sysctl_tcp_low_latency && 1628 !sysctl_tcp_low_latency &&
1629 net_dma_find_channel()) { 1629 net_dma_find_channel()) {
1630 preempt_enable_no_resched(); 1630 preempt_enable_no_resched();
1631 tp->ucopy.pinned_list = 1631 tp->ucopy.pinned_list =
1632 dma_pin_iovec_pages(msg->msg_iov, len); 1632 dma_pin_iovec_pages(msg->msg_iov, len);
1633 } else { 1633 } else {
1634 preempt_enable_no_resched(); 1634 preempt_enable_no_resched();
1635 } 1635 }
1636 } 1636 }
1637 #endif 1637 #endif
1638 1638
1639 do { 1639 do {
1640 u32 offset; 1640 u32 offset;
1641 1641
1642 /* Are we at urgent data? Stop if we have read anything or have SIGURG pending. */ 1642 /* Are we at urgent data? Stop if we have read anything or have SIGURG pending. */
1643 if (tp->urg_data && tp->urg_seq == *seq) { 1643 if (tp->urg_data && tp->urg_seq == *seq) {
1644 if (copied) 1644 if (copied)
1645 break; 1645 break;
1646 if (signal_pending(current)) { 1646 if (signal_pending(current)) {
1647 copied = timeo ? sock_intr_errno(timeo) : -EAGAIN; 1647 copied = timeo ? sock_intr_errno(timeo) : -EAGAIN;
1648 break; 1648 break;
1649 } 1649 }
1650 } 1650 }
1651 1651
1652 /* Next get a buffer. */ 1652 /* Next get a buffer. */
1653 1653
1654 skb_queue_walk(&sk->sk_receive_queue, skb) { 1654 skb_queue_walk(&sk->sk_receive_queue, skb) {
1655 /* Now that we have two receive queues this 1655 /* Now that we have two receive queues this
1656 * shouldn't happen. 1656 * shouldn't happen.
1657 */ 1657 */
1658 if (WARN(before(*seq, TCP_SKB_CB(skb)->seq), 1658 if (WARN(before(*seq, TCP_SKB_CB(skb)->seq),
1659 "recvmsg bug: copied %X seq %X rcvnxt %X fl %X\n", 1659 "recvmsg bug: copied %X seq %X rcvnxt %X fl %X\n",
1660 *seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt, 1660 *seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt,
1661 flags)) 1661 flags))
1662 break; 1662 break;
1663 1663
1664 offset = *seq - TCP_SKB_CB(skb)->seq; 1664 offset = *seq - TCP_SKB_CB(skb)->seq;
1665 if (tcp_hdr(skb)->syn) 1665 if (tcp_hdr(skb)->syn)
1666 offset--; 1666 offset--;
1667 if (offset < skb->len) 1667 if (offset < skb->len)
1668 goto found_ok_skb; 1668 goto found_ok_skb;
1669 if (tcp_hdr(skb)->fin) 1669 if (tcp_hdr(skb)->fin)
1670 goto found_fin_ok; 1670 goto found_fin_ok;
1671 WARN(!(flags & MSG_PEEK), 1671 WARN(!(flags & MSG_PEEK),
1672 "recvmsg bug 2: copied %X seq %X rcvnxt %X fl %X\n", 1672 "recvmsg bug 2: copied %X seq %X rcvnxt %X fl %X\n",
1673 *seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt, flags); 1673 *seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt, flags);
1674 } 1674 }
1675 1675
1676 /* Well, if we have backlog, try to process it now yet. */ 1676 /* Well, if we have backlog, try to process it now yet. */
1677 1677
1678 if (copied >= target && !sk->sk_backlog.tail) 1678 if (copied >= target && !sk->sk_backlog.tail)
1679 break; 1679 break;
1680 1680
1681 if (copied) { 1681 if (copied) {
1682 if (sk->sk_err || 1682 if (sk->sk_err ||
1683 sk->sk_state == TCP_CLOSE || 1683 sk->sk_state == TCP_CLOSE ||
1684 (sk->sk_shutdown & RCV_SHUTDOWN) || 1684 (sk->sk_shutdown & RCV_SHUTDOWN) ||
1685 !timeo || 1685 !timeo ||
1686 signal_pending(current)) 1686 signal_pending(current))
1687 break; 1687 break;
1688 } else { 1688 } else {
1689 if (sock_flag(sk, SOCK_DONE)) 1689 if (sock_flag(sk, SOCK_DONE))
1690 break; 1690 break;
1691 1691
1692 if (sk->sk_err) { 1692 if (sk->sk_err) {
1693 copied = sock_error(sk); 1693 copied = sock_error(sk);
1694 break; 1694 break;
1695 } 1695 }
1696 1696
1697 if (sk->sk_shutdown & RCV_SHUTDOWN) 1697 if (sk->sk_shutdown & RCV_SHUTDOWN)
1698 break; 1698 break;
1699 1699
1700 if (sk->sk_state == TCP_CLOSE) { 1700 if (sk->sk_state == TCP_CLOSE) {
1701 if (!sock_flag(sk, SOCK_DONE)) { 1701 if (!sock_flag(sk, SOCK_DONE)) {
1702 /* This occurs when user tries to read 1702 /* This occurs when user tries to read
1703 * from never connected socket. 1703 * from never connected socket.
1704 */ 1704 */
1705 copied = -ENOTCONN; 1705 copied = -ENOTCONN;
1706 break; 1706 break;
1707 } 1707 }
1708 break; 1708 break;
1709 } 1709 }
1710 1710
1711 if (!timeo) { 1711 if (!timeo) {
1712 copied = -EAGAIN; 1712 copied = -EAGAIN;
1713 break; 1713 break;
1714 } 1714 }
1715 1715
1716 if (signal_pending(current)) { 1716 if (signal_pending(current)) {
1717 copied = sock_intr_errno(timeo); 1717 copied = sock_intr_errno(timeo);
1718 break; 1718 break;
1719 } 1719 }
1720 } 1720 }
1721 1721
1722 tcp_cleanup_rbuf(sk, copied); 1722 tcp_cleanup_rbuf(sk, copied);
1723 1723
1724 if (!sysctl_tcp_low_latency && tp->ucopy.task == user_recv) { 1724 if (!sysctl_tcp_low_latency && tp->ucopy.task == user_recv) {
1725 /* Install new reader */ 1725 /* Install new reader */
1726 if (!user_recv && !(flags & (MSG_TRUNC | MSG_PEEK))) { 1726 if (!user_recv && !(flags & (MSG_TRUNC | MSG_PEEK))) {
1727 user_recv = current; 1727 user_recv = current;
1728 tp->ucopy.task = user_recv; 1728 tp->ucopy.task = user_recv;
1729 tp->ucopy.iov = msg->msg_iov; 1729 tp->ucopy.iov = msg->msg_iov;
1730 } 1730 }
1731 1731
1732 tp->ucopy.len = len; 1732 tp->ucopy.len = len;
1733 1733
1734 WARN_ON(tp->copied_seq != tp->rcv_nxt && 1734 WARN_ON(tp->copied_seq != tp->rcv_nxt &&
1735 !(flags & (MSG_PEEK | MSG_TRUNC))); 1735 !(flags & (MSG_PEEK | MSG_TRUNC)));
1736 1736
1737 /* Ugly... If prequeue is not empty, we have to 1737 /* Ugly... If prequeue is not empty, we have to
1738 * process it before releasing socket, otherwise 1738 * process it before releasing socket, otherwise
1739 * order will be broken at second iteration. 1739 * order will be broken at second iteration.
1740 * More elegant solution is required!!! 1740 * More elegant solution is required!!!
1741 * 1741 *
1742 * Look: we have the following (pseudo)queues: 1742 * Look: we have the following (pseudo)queues:
1743 * 1743 *
1744 * 1. packets in flight 1744 * 1. packets in flight
1745 * 2. backlog 1745 * 2. backlog
1746 * 3. prequeue 1746 * 3. prequeue
1747 * 4. receive_queue 1747 * 4. receive_queue
1748 * 1748 *
1749 * Each queue can be processed only if the next ones 1749 * Each queue can be processed only if the next ones
1750 * are empty. At this point we have empty receive_queue. 1750 * are empty. At this point we have empty receive_queue.
1751 * But prequeue _can_ be not empty after 2nd iteration, 1751 * But prequeue _can_ be not empty after 2nd iteration,
1752 * when we jumped to start of loop because backlog 1752 * when we jumped to start of loop because backlog
1753 * processing added something to receive_queue. 1753 * processing added something to receive_queue.
1754 * We cannot release_sock(), because backlog contains 1754 * We cannot release_sock(), because backlog contains
1755 * packets arrived _after_ prequeued ones. 1755 * packets arrived _after_ prequeued ones.
1756 * 1756 *
1757 * Shortly, algorithm is clear --- to process all 1757 * Shortly, algorithm is clear --- to process all
1758 * the queues in order. We could make it more directly, 1758 * the queues in order. We could make it more directly,
1759 * requeueing packets from backlog to prequeue, if 1759 * requeueing packets from backlog to prequeue, if
1760 * is not empty. It is more elegant, but eats cycles, 1760 * is not empty. It is more elegant, but eats cycles,
1761 * unfortunately. 1761 * unfortunately.
1762 */ 1762 */
1763 if (!skb_queue_empty(&tp->ucopy.prequeue)) 1763 if (!skb_queue_empty(&tp->ucopy.prequeue))
1764 goto do_prequeue; 1764 goto do_prequeue;
1765 1765
1766 /* __ Set realtime policy in scheduler __ */ 1766 /* __ Set realtime policy in scheduler __ */
1767 } 1767 }
1768 1768
1769 #ifdef CONFIG_NET_DMA 1769 #ifdef CONFIG_NET_DMA
1770 if (tp->ucopy.dma_chan) { 1770 if (tp->ucopy.dma_chan) {
1771 if (tp->rcv_wnd == 0 && 1771 if (tp->rcv_wnd == 0 &&
1772 !skb_queue_empty(&sk->sk_async_wait_queue)) { 1772 !skb_queue_empty(&sk->sk_async_wait_queue)) {
1773 tcp_service_net_dma(sk, true); 1773 tcp_service_net_dma(sk, true);
1774 tcp_cleanup_rbuf(sk, copied); 1774 tcp_cleanup_rbuf(sk, copied);
1775 } else 1775 } else
1776 dma_async_issue_pending(tp->ucopy.dma_chan); 1776 dma_async_issue_pending(tp->ucopy.dma_chan);
1777 } 1777 }
1778 #endif 1778 #endif
1779 if (copied >= target) { 1779 if (copied >= target) {
1780 /* Do not sleep, just process backlog. */ 1780 /* Do not sleep, just process backlog. */
1781 release_sock(sk); 1781 release_sock(sk);
1782 lock_sock(sk); 1782 lock_sock(sk);
1783 } else 1783 } else
1784 sk_wait_data(sk, &timeo); 1784 sk_wait_data(sk, &timeo);
1785 1785
1786 #ifdef CONFIG_NET_DMA 1786 #ifdef CONFIG_NET_DMA
1787 tcp_service_net_dma(sk, false); /* Don't block */ 1787 tcp_service_net_dma(sk, false); /* Don't block */
1788 tp->ucopy.wakeup = 0; 1788 tp->ucopy.wakeup = 0;
1789 #endif 1789 #endif
1790 1790
1791 if (user_recv) { 1791 if (user_recv) {
1792 int chunk; 1792 int chunk;
1793 1793
1794 /* __ Restore normal policy in scheduler __ */ 1794 /* __ Restore normal policy in scheduler __ */
1795 1795
1796 if ((chunk = len - tp->ucopy.len) != 0) { 1796 if ((chunk = len - tp->ucopy.len) != 0) {
1797 NET_ADD_STATS_USER(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMBACKLOG, chunk); 1797 NET_ADD_STATS_USER(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMBACKLOG, chunk);
1798 len -= chunk; 1798 len -= chunk;
1799 copied += chunk; 1799 copied += chunk;
1800 } 1800 }
1801 1801
1802 if (tp->rcv_nxt == tp->copied_seq && 1802 if (tp->rcv_nxt == tp->copied_seq &&
1803 !skb_queue_empty(&tp->ucopy.prequeue)) { 1803 !skb_queue_empty(&tp->ucopy.prequeue)) {
1804 do_prequeue: 1804 do_prequeue:
1805 tcp_prequeue_process(sk); 1805 tcp_prequeue_process(sk);
1806 1806
1807 if ((chunk = len - tp->ucopy.len) != 0) { 1807 if ((chunk = len - tp->ucopy.len) != 0) {
1808 NET_ADD_STATS_USER(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk); 1808 NET_ADD_STATS_USER(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
1809 len -= chunk; 1809 len -= chunk;
1810 copied += chunk; 1810 copied += chunk;
1811 } 1811 }
1812 } 1812 }
1813 } 1813 }
1814 if ((flags & MSG_PEEK) && 1814 if ((flags & MSG_PEEK) &&
1815 (peek_seq - copied - urg_hole != tp->copied_seq)) { 1815 (peek_seq - copied - urg_hole != tp->copied_seq)) {
1816 net_dbg_ratelimited("TCP(%s:%d): Application bug, race in MSG_PEEK\n", 1816 net_dbg_ratelimited("TCP(%s:%d): Application bug, race in MSG_PEEK\n",
1817 current->comm, 1817 current->comm,
1818 task_pid_nr(current)); 1818 task_pid_nr(current));
1819 peek_seq = tp->copied_seq; 1819 peek_seq = tp->copied_seq;
1820 } 1820 }
1821 continue; 1821 continue;
1822 1822
1823 found_ok_skb: 1823 found_ok_skb:
1824 /* Ok so how much can we use? */ 1824 /* Ok so how much can we use? */
1825 used = skb->len - offset; 1825 used = skb->len - offset;
1826 if (len < used) 1826 if (len < used)
1827 used = len; 1827 used = len;
1828 1828
1829 /* Do we have urgent data here? */ 1829 /* Do we have urgent data here? */
1830 if (tp->urg_data) { 1830 if (tp->urg_data) {
1831 u32 urg_offset = tp->urg_seq - *seq; 1831 u32 urg_offset = tp->urg_seq - *seq;
1832 if (urg_offset < used) { 1832 if (urg_offset < used) {
1833 if (!urg_offset) { 1833 if (!urg_offset) {
1834 if (!sock_flag(sk, SOCK_URGINLINE)) { 1834 if (!sock_flag(sk, SOCK_URGINLINE)) {
1835 ++*seq; 1835 ++*seq;
1836 urg_hole++; 1836 urg_hole++;
1837 offset++; 1837 offset++;
1838 used--; 1838 used--;
1839 if (!used) 1839 if (!used)
1840 goto skip_copy; 1840 goto skip_copy;
1841 } 1841 }
1842 } else 1842 } else
1843 used = urg_offset; 1843 used = urg_offset;
1844 } 1844 }
1845 } 1845 }
1846 1846
1847 if (!(flags & MSG_TRUNC)) { 1847 if (!(flags & MSG_TRUNC)) {
1848 #ifdef CONFIG_NET_DMA 1848 #ifdef CONFIG_NET_DMA
1849 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) 1849 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1850 tp->ucopy.dma_chan = net_dma_find_channel(); 1850 tp->ucopy.dma_chan = net_dma_find_channel();
1851 1851
1852 if (tp->ucopy.dma_chan) { 1852 if (tp->ucopy.dma_chan) {
1853 tp->ucopy.dma_cookie = dma_skb_copy_datagram_iovec( 1853 tp->ucopy.dma_cookie = dma_skb_copy_datagram_iovec(
1854 tp->ucopy.dma_chan, skb, offset, 1854 tp->ucopy.dma_chan, skb, offset,
1855 msg->msg_iov, used, 1855 msg->msg_iov, used,
1856 tp->ucopy.pinned_list); 1856 tp->ucopy.pinned_list);
1857 1857
1858 if (tp->ucopy.dma_cookie < 0) { 1858 if (tp->ucopy.dma_cookie < 0) {
1859 1859
1860 pr_alert("%s: dma_cookie < 0\n", 1860 pr_alert("%s: dma_cookie < 0\n",
1861 __func__); 1861 __func__);
1862 1862
1863 /* Exception. Bailout! */ 1863 /* Exception. Bailout! */
1864 if (!copied) 1864 if (!copied)
1865 copied = -EFAULT; 1865 copied = -EFAULT;
1866 break; 1866 break;
1867 } 1867 }
1868 1868
1869 dma_async_issue_pending(tp->ucopy.dma_chan); 1869 dma_async_issue_pending(tp->ucopy.dma_chan);
1870 1870
1871 if ((offset + used) == skb->len) 1871 if ((offset + used) == skb->len)
1872 copied_early = true; 1872 copied_early = true;
1873 1873
1874 } else 1874 } else
1875 #endif 1875 #endif
1876 { 1876 {
1877 err = skb_copy_datagram_iovec(skb, offset, 1877 err = skb_copy_datagram_iovec(skb, offset,
1878 msg->msg_iov, used); 1878 msg->msg_iov, used);
1879 if (err) { 1879 if (err) {
1880 /* Exception. Bailout! */ 1880 /* Exception. Bailout! */
1881 if (!copied) 1881 if (!copied)
1882 copied = -EFAULT; 1882 copied = -EFAULT;
1883 break; 1883 break;
1884 } 1884 }
1885 } 1885 }
1886 } 1886 }
1887 1887
1888 *seq += used; 1888 *seq += used;
1889 copied += used; 1889 copied += used;
1890 len -= used; 1890 len -= used;
1891 1891
1892 tcp_rcv_space_adjust(sk); 1892 tcp_rcv_space_adjust(sk);
1893 1893
1894 skip_copy: 1894 skip_copy:
1895 if (tp->urg_data && after(tp->copied_seq, tp->urg_seq)) { 1895 if (tp->urg_data && after(tp->copied_seq, tp->urg_seq)) {
1896 tp->urg_data = 0; 1896 tp->urg_data = 0;
1897 tcp_fast_path_check(sk); 1897 tcp_fast_path_check(sk);
1898 } 1898 }
1899 if (used + offset < skb->len) 1899 if (used + offset < skb->len)
1900 continue; 1900 continue;
1901 1901
1902 if (tcp_hdr(skb)->fin) 1902 if (tcp_hdr(skb)->fin)
1903 goto found_fin_ok; 1903 goto found_fin_ok;
1904 if (!(flags & MSG_PEEK)) { 1904 if (!(flags & MSG_PEEK)) {
1905 sk_eat_skb(sk, skb, copied_early); 1905 sk_eat_skb(sk, skb, copied_early);
1906 copied_early = false; 1906 copied_early = false;
1907 } 1907 }
1908 continue; 1908 continue;
1909 1909
1910 found_fin_ok: 1910 found_fin_ok:
1911 /* Process the FIN. */ 1911 /* Process the FIN. */
1912 ++*seq; 1912 ++*seq;
1913 if (!(flags & MSG_PEEK)) { 1913 if (!(flags & MSG_PEEK)) {
1914 sk_eat_skb(sk, skb, copied_early); 1914 sk_eat_skb(sk, skb, copied_early);
1915 copied_early = false; 1915 copied_early = false;
1916 } 1916 }
1917 break; 1917 break;
1918 } while (len > 0); 1918 } while (len > 0);
1919 1919
1920 if (user_recv) { 1920 if (user_recv) {
1921 if (!skb_queue_empty(&tp->ucopy.prequeue)) { 1921 if (!skb_queue_empty(&tp->ucopy.prequeue)) {
1922 int chunk; 1922 int chunk;
1923 1923
1924 tp->ucopy.len = copied > 0 ? len : 0; 1924 tp->ucopy.len = copied > 0 ? len : 0;
1925 1925
1926 tcp_prequeue_process(sk); 1926 tcp_prequeue_process(sk);
1927 1927
1928 if (copied > 0 && (chunk = len - tp->ucopy.len) != 0) { 1928 if (copied > 0 && (chunk = len - tp->ucopy.len) != 0) {
1929 NET_ADD_STATS_USER(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk); 1929 NET_ADD_STATS_USER(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
1930 len -= chunk; 1930 len -= chunk;
1931 copied += chunk; 1931 copied += chunk;
1932 } 1932 }
1933 } 1933 }
1934 1934
1935 tp->ucopy.task = NULL; 1935 tp->ucopy.task = NULL;
1936 tp->ucopy.len = 0; 1936 tp->ucopy.len = 0;
1937 } 1937 }
1938 1938
1939 #ifdef CONFIG_NET_DMA 1939 #ifdef CONFIG_NET_DMA
1940 tcp_service_net_dma(sk, true); /* Wait for queue to drain */ 1940 tcp_service_net_dma(sk, true); /* Wait for queue to drain */
1941 tp->ucopy.dma_chan = NULL; 1941 tp->ucopy.dma_chan = NULL;
1942 1942
1943 if (tp->ucopy.pinned_list) { 1943 if (tp->ucopy.pinned_list) {
1944 dma_unpin_iovec_pages(tp->ucopy.pinned_list); 1944 dma_unpin_iovec_pages(tp->ucopy.pinned_list);
1945 tp->ucopy.pinned_list = NULL; 1945 tp->ucopy.pinned_list = NULL;
1946 } 1946 }
1947 #endif 1947 #endif
1948 1948
1949 /* According to UNIX98, msg_name/msg_namelen are ignored 1949 /* According to UNIX98, msg_name/msg_namelen are ignored
1950 * on connected socket. I was just happy when found this 8) --ANK 1950 * on connected socket. I was just happy when found this 8) --ANK
1951 */ 1951 */
1952 1952
1953 /* Clean up data we have read: This will do ACK frames. */ 1953 /* Clean up data we have read: This will do ACK frames. */
1954 tcp_cleanup_rbuf(sk, copied); 1954 tcp_cleanup_rbuf(sk, copied);
1955 1955
1956 release_sock(sk); 1956 release_sock(sk);
1957 return copied; 1957 return copied;
1958 1958
1959 out: 1959 out:
1960 release_sock(sk); 1960 release_sock(sk);
1961 return err; 1961 return err;
1962 1962
1963 recv_urg: 1963 recv_urg:
1964 err = tcp_recv_urg(sk, msg, len, flags); 1964 err = tcp_recv_urg(sk, msg, len, flags);
1965 goto out; 1965 goto out;
1966 1966
1967 recv_sndq: 1967 recv_sndq:
1968 err = tcp_peek_sndq(sk, msg, len); 1968 err = tcp_peek_sndq(sk, msg, len);
1969 goto out; 1969 goto out;
1970 } 1970 }
1971 EXPORT_SYMBOL(tcp_recvmsg); 1971 EXPORT_SYMBOL(tcp_recvmsg);
1972 1972
1973 void tcp_set_state(struct sock *sk, int state) 1973 void tcp_set_state(struct sock *sk, int state)
1974 { 1974 {
1975 int oldstate = sk->sk_state; 1975 int oldstate = sk->sk_state;
1976 1976
1977 switch (state) { 1977 switch (state) {
1978 case TCP_ESTABLISHED: 1978 case TCP_ESTABLISHED:
1979 if (oldstate != TCP_ESTABLISHED) 1979 if (oldstate != TCP_ESTABLISHED)
1980 TCP_INC_STATS(sock_net(sk), TCP_MIB_CURRESTAB); 1980 TCP_INC_STATS(sock_net(sk), TCP_MIB_CURRESTAB);
1981 break; 1981 break;
1982 1982
1983 case TCP_CLOSE: 1983 case TCP_CLOSE:
1984 if (oldstate == TCP_CLOSE_WAIT || oldstate == TCP_ESTABLISHED) 1984 if (oldstate == TCP_CLOSE_WAIT || oldstate == TCP_ESTABLISHED)
1985 TCP_INC_STATS(sock_net(sk), TCP_MIB_ESTABRESETS); 1985 TCP_INC_STATS(sock_net(sk), TCP_MIB_ESTABRESETS);
1986 1986
1987 sk->sk_prot->unhash(sk); 1987 sk->sk_prot->unhash(sk);
1988 if (inet_csk(sk)->icsk_bind_hash && 1988 if (inet_csk(sk)->icsk_bind_hash &&
1989 !(sk->sk_userlocks & SOCK_BINDPORT_LOCK)) 1989 !(sk->sk_userlocks & SOCK_BINDPORT_LOCK))
1990 inet_put_port(sk); 1990 inet_put_port(sk);
1991 /* fall through */ 1991 /* fall through */
1992 default: 1992 default:
1993 if (oldstate == TCP_ESTABLISHED) 1993 if (oldstate == TCP_ESTABLISHED)
1994 TCP_DEC_STATS(sock_net(sk), TCP_MIB_CURRESTAB); 1994 TCP_DEC_STATS(sock_net(sk), TCP_MIB_CURRESTAB);
1995 } 1995 }
1996 1996
1997 /* Change state AFTER socket is unhashed to avoid closed 1997 /* Change state AFTER socket is unhashed to avoid closed
1998 * socket sitting in hash tables. 1998 * socket sitting in hash tables.
1999 */ 1999 */
2000 sk->sk_state = state; 2000 sk->sk_state = state;
2001 2001
2002 #ifdef STATE_TRACE 2002 #ifdef STATE_TRACE
2003 SOCK_DEBUG(sk, "TCP sk=%p, State %s -> %s\n", sk, statename[oldstate], statename[state]); 2003 SOCK_DEBUG(sk, "TCP sk=%p, State %s -> %s\n", sk, statename[oldstate], statename[state]);
2004 #endif 2004 #endif
2005 } 2005 }
2006 EXPORT_SYMBOL_GPL(tcp_set_state); 2006 EXPORT_SYMBOL_GPL(tcp_set_state);
2007 2007
2008 /* 2008 /*
2009 * State processing on a close. This implements the state shift for 2009 * State processing on a close. This implements the state shift for
2010 * sending our FIN frame. Note that we only send a FIN for some 2010 * sending our FIN frame. Note that we only send a FIN for some
2011 * states. A shutdown() may have already sent the FIN, or we may be 2011 * states. A shutdown() may have already sent the FIN, or we may be
2012 * closed. 2012 * closed.
2013 */ 2013 */
2014 2014
2015 static const unsigned char new_state[16] = { 2015 static const unsigned char new_state[16] = {
2016 /* current state: new state: action: */ 2016 /* current state: new state: action: */
2017 /* (Invalid) */ TCP_CLOSE, 2017 /* (Invalid) */ TCP_CLOSE,
2018 /* TCP_ESTABLISHED */ TCP_FIN_WAIT1 | TCP_ACTION_FIN, 2018 /* TCP_ESTABLISHED */ TCP_FIN_WAIT1 | TCP_ACTION_FIN,
2019 /* TCP_SYN_SENT */ TCP_CLOSE, 2019 /* TCP_SYN_SENT */ TCP_CLOSE,
2020 /* TCP_SYN_RECV */ TCP_FIN_WAIT1 | TCP_ACTION_FIN, 2020 /* TCP_SYN_RECV */ TCP_FIN_WAIT1 | TCP_ACTION_FIN,
2021 /* TCP_FIN_WAIT1 */ TCP_FIN_WAIT1, 2021 /* TCP_FIN_WAIT1 */ TCP_FIN_WAIT1,
2022 /* TCP_FIN_WAIT2 */ TCP_FIN_WAIT2, 2022 /* TCP_FIN_WAIT2 */ TCP_FIN_WAIT2,
2023 /* TCP_TIME_WAIT */ TCP_CLOSE, 2023 /* TCP_TIME_WAIT */ TCP_CLOSE,
2024 /* TCP_CLOSE */ TCP_CLOSE, 2024 /* TCP_CLOSE */ TCP_CLOSE,
2025 /* TCP_CLOSE_WAIT */ TCP_LAST_ACK | TCP_ACTION_FIN, 2025 /* TCP_CLOSE_WAIT */ TCP_LAST_ACK | TCP_ACTION_FIN,
2026 /* TCP_LAST_ACK */ TCP_LAST_ACK, 2026 /* TCP_LAST_ACK */ TCP_LAST_ACK,
2027 /* TCP_LISTEN */ TCP_CLOSE, 2027 /* TCP_LISTEN */ TCP_CLOSE,
2028 /* TCP_CLOSING */ TCP_CLOSING, 2028 /* TCP_CLOSING */ TCP_CLOSING,
2029 }; 2029 };
2030 2030
2031 static int tcp_close_state(struct sock *sk) 2031 static int tcp_close_state(struct sock *sk)
2032 { 2032 {
2033 int next = (int)new_state[sk->sk_state]; 2033 int next = (int)new_state[sk->sk_state];
2034 int ns = next & TCP_STATE_MASK; 2034 int ns = next & TCP_STATE_MASK;
2035 2035
2036 tcp_set_state(sk, ns); 2036 tcp_set_state(sk, ns);
2037 2037
2038 return next & TCP_ACTION_FIN; 2038 return next & TCP_ACTION_FIN;
2039 } 2039 }
2040 2040
2041 /* 2041 /*
2042 * Shutdown the sending side of a connection. Much like close except 2042 * Shutdown the sending side of a connection. Much like close except
2043 * that we don't receive shut down or sock_set_flag(sk, SOCK_DEAD). 2043 * that we don't receive shut down or sock_set_flag(sk, SOCK_DEAD).
2044 */ 2044 */
2045 2045
2046 void tcp_shutdown(struct sock *sk, int how) 2046 void tcp_shutdown(struct sock *sk, int how)
2047 { 2047 {
2048 /* We need to grab some memory, and put together a FIN, 2048 /* We need to grab some memory, and put together a FIN,
2049 * and then put it into the queue to be sent. 2049 * and then put it into the queue to be sent.
2050 * Tim MacKenzie(tym@dibbler.cs.monash.edu.au) 4 Dec '92. 2050 * Tim MacKenzie(tym@dibbler.cs.monash.edu.au) 4 Dec '92.
2051 */ 2051 */
2052 if (!(how & SEND_SHUTDOWN)) 2052 if (!(how & SEND_SHUTDOWN))
2053 return; 2053 return;
2054 2054
2055 /* If we've already sent a FIN, or it's a closed state, skip this. */ 2055 /* If we've already sent a FIN, or it's a closed state, skip this. */
2056 if ((1 << sk->sk_state) & 2056 if ((1 << sk->sk_state) &
2057 (TCPF_ESTABLISHED | TCPF_SYN_SENT | 2057 (TCPF_ESTABLISHED | TCPF_SYN_SENT |
2058 TCPF_SYN_RECV | TCPF_CLOSE_WAIT)) { 2058 TCPF_SYN_RECV | TCPF_CLOSE_WAIT)) {
2059 /* Clear out any half completed packets. FIN if needed. */ 2059 /* Clear out any half completed packets. FIN if needed. */
2060 if (tcp_close_state(sk)) 2060 if (tcp_close_state(sk))
2061 tcp_send_fin(sk); 2061 tcp_send_fin(sk);
2062 } 2062 }
2063 } 2063 }
2064 EXPORT_SYMBOL(tcp_shutdown); 2064 EXPORT_SYMBOL(tcp_shutdown);
2065 2065
2066 bool tcp_check_oom(struct sock *sk, int shift) 2066 bool tcp_check_oom(struct sock *sk, int shift)
2067 { 2067 {
2068 bool too_many_orphans, out_of_socket_memory; 2068 bool too_many_orphans, out_of_socket_memory;
2069 2069
2070 too_many_orphans = tcp_too_many_orphans(sk, shift); 2070 too_many_orphans = tcp_too_many_orphans(sk, shift);
2071 out_of_socket_memory = tcp_out_of_memory(sk); 2071 out_of_socket_memory = tcp_out_of_memory(sk);
2072 2072
2073 if (too_many_orphans) 2073 if (too_many_orphans)
2074 net_info_ratelimited("too many orphaned sockets\n"); 2074 net_info_ratelimited("too many orphaned sockets\n");
2075 if (out_of_socket_memory) 2075 if (out_of_socket_memory)
2076 net_info_ratelimited("out of memory -- consider tuning tcp_mem\n"); 2076 net_info_ratelimited("out of memory -- consider tuning tcp_mem\n");
2077 return too_many_orphans || out_of_socket_memory; 2077 return too_many_orphans || out_of_socket_memory;
2078 } 2078 }
2079 2079
2080 void tcp_close(struct sock *sk, long timeout) 2080 void tcp_close(struct sock *sk, long timeout)
2081 { 2081 {
2082 struct sk_buff *skb; 2082 struct sk_buff *skb;
2083 int data_was_unread = 0; 2083 int data_was_unread = 0;
2084 int state; 2084 int state;
2085 2085
2086 lock_sock(sk); 2086 lock_sock(sk);
2087 sk->sk_shutdown = SHUTDOWN_MASK; 2087 sk->sk_shutdown = SHUTDOWN_MASK;
2088 2088
2089 if (sk->sk_state == TCP_LISTEN) { 2089 if (sk->sk_state == TCP_LISTEN) {
2090 tcp_set_state(sk, TCP_CLOSE); 2090 tcp_set_state(sk, TCP_CLOSE);
2091 2091
2092 /* Special case. */ 2092 /* Special case. */
2093 inet_csk_listen_stop(sk); 2093 inet_csk_listen_stop(sk);
2094 2094
2095 goto adjudge_to_death; 2095 goto adjudge_to_death;
2096 } 2096 }
2097 2097
2098 /* We need to flush the recv. buffs. We do this only on the 2098 /* We need to flush the recv. buffs. We do this only on the
2099 * descriptor close, not protocol-sourced closes, because the 2099 * descriptor close, not protocol-sourced closes, because the
2100 * reader process may not have drained the data yet! 2100 * reader process may not have drained the data yet!
2101 */ 2101 */
2102 while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) { 2102 while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
2103 u32 len = TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq - 2103 u32 len = TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq -
2104 tcp_hdr(skb)->fin; 2104 tcp_hdr(skb)->fin;
2105 data_was_unread += len; 2105 data_was_unread += len;
2106 __kfree_skb(skb); 2106 __kfree_skb(skb);
2107 } 2107 }
2108 2108
2109 sk_mem_reclaim(sk); 2109 sk_mem_reclaim(sk);
2110 2110
2111 /* If socket has been already reset (e.g. in tcp_reset()) - kill it. */ 2111 /* If socket has been already reset (e.g. in tcp_reset()) - kill it. */
2112 if (sk->sk_state == TCP_CLOSE) 2112 if (sk->sk_state == TCP_CLOSE)
2113 goto adjudge_to_death; 2113 goto adjudge_to_death;
2114 2114
2115 /* As outlined in RFC 2525, section 2.17, we send a RST here because 2115 /* As outlined in RFC 2525, section 2.17, we send a RST here because
2116 * data was lost. To witness the awful effects of the old behavior of 2116 * data was lost. To witness the awful effects of the old behavior of
2117 * always doing a FIN, run an older 2.1.x kernel or 2.0.x, start a bulk 2117 * always doing a FIN, run an older 2.1.x kernel or 2.0.x, start a bulk
2118 * GET in an FTP client, suspend the process, wait for the client to 2118 * GET in an FTP client, suspend the process, wait for the client to
2119 * advertise a zero window, then kill -9 the FTP client, wheee... 2119 * advertise a zero window, then kill -9 the FTP client, wheee...
2120 * Note: timeout is always zero in such a case. 2120 * Note: timeout is always zero in such a case.
2121 */ 2121 */
2122 if (unlikely(tcp_sk(sk)->repair)) { 2122 if (unlikely(tcp_sk(sk)->repair)) {
2123 sk->sk_prot->disconnect(sk, 0); 2123 sk->sk_prot->disconnect(sk, 0);
2124 } else if (data_was_unread) { 2124 } else if (data_was_unread) {
2125 /* Unread data was tossed, zap the connection. */ 2125 /* Unread data was tossed, zap the connection. */
2126 NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPABORTONCLOSE); 2126 NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPABORTONCLOSE);
2127 tcp_set_state(sk, TCP_CLOSE); 2127 tcp_set_state(sk, TCP_CLOSE);
2128 tcp_send_active_reset(sk, sk->sk_allocation); 2128 tcp_send_active_reset(sk, sk->sk_allocation);
2129 } else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) { 2129 } else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
2130 /* Check zero linger _after_ checking for unread data. */ 2130 /* Check zero linger _after_ checking for unread data. */
2131 sk->sk_prot->disconnect(sk, 0); 2131 sk->sk_prot->disconnect(sk, 0);
2132 NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPABORTONDATA); 2132 NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPABORTONDATA);
2133 } else if (tcp_close_state(sk)) { 2133 } else if (tcp_close_state(sk)) {
2134 /* We FIN if the application ate all the data before 2134 /* We FIN if the application ate all the data before
2135 * zapping the connection. 2135 * zapping the connection.
2136 */ 2136 */
2137 2137
2138 /* RED-PEN. Formally speaking, we have broken TCP state 2138 /* RED-PEN. Formally speaking, we have broken TCP state
2139 * machine. State transitions: 2139 * machine. State transitions:
2140 * 2140 *
2141 * TCP_ESTABLISHED -> TCP_FIN_WAIT1 2141 * TCP_ESTABLISHED -> TCP_FIN_WAIT1
2142 * TCP_SYN_RECV -> TCP_FIN_WAIT1 (forget it, it's impossible) 2142 * TCP_SYN_RECV -> TCP_FIN_WAIT1 (forget it, it's impossible)
2143 * TCP_CLOSE_WAIT -> TCP_LAST_ACK 2143 * TCP_CLOSE_WAIT -> TCP_LAST_ACK
2144 * 2144 *
2145 * are legal only when FIN has been sent (i.e. in window), 2145 * are legal only when FIN has been sent (i.e. in window),
2146 * rather than queued out of window. Purists blame. 2146 * rather than queued out of window. Purists blame.
2147 * 2147 *
2148 * F.e. "RFC state" is ESTABLISHED, 2148 * F.e. "RFC state" is ESTABLISHED,
2149 * if Linux state is FIN-WAIT-1, but FIN is still not sent. 2149 * if Linux state is FIN-WAIT-1, but FIN is still not sent.
2150 * 2150 *
2151 * The visible declinations are that sometimes 2151 * The visible declinations are that sometimes
2152 * we enter time-wait state, when it is not required really 2152 * we enter time-wait state, when it is not required really
2153 * (harmless), do not send active resets, when they are 2153 * (harmless), do not send active resets, when they are
2154 * required by specs (TCP_ESTABLISHED, TCP_CLOSE_WAIT, when 2154 * required by specs (TCP_ESTABLISHED, TCP_CLOSE_WAIT, when
2155 * they look as CLOSING or LAST_ACK for Linux) 2155 * they look as CLOSING or LAST_ACK for Linux)
2156 * Probably, I missed some more holelets. 2156 * Probably, I missed some more holelets.
2157 * --ANK 2157 * --ANK
2158 * XXX (TFO) - To start off we don't support SYN+ACK+FIN 2158 * XXX (TFO) - To start off we don't support SYN+ACK+FIN
2159 * in a single packet! (May consider it later but will 2159 * in a single packet! (May consider it later but will
2160 * probably need API support or TCP_CORK SYN-ACK until 2160 * probably need API support or TCP_CORK SYN-ACK until
2161 * data is written and socket is closed.) 2161 * data is written and socket is closed.)
2162 */ 2162 */
2163 tcp_send_fin(sk); 2163 tcp_send_fin(sk);
2164 } 2164 }
2165 2165
2166 sk_stream_wait_close(sk, timeout); 2166 sk_stream_wait_close(sk, timeout);
2167 2167
2168 adjudge_to_death: 2168 adjudge_to_death:
2169 state = sk->sk_state; 2169 state = sk->sk_state;
2170 sock_hold(sk); 2170 sock_hold(sk);
2171 sock_orphan(sk); 2171 sock_orphan(sk);
2172 2172
2173 /* It is the last release_sock in its life. It will remove backlog. */ 2173 /* It is the last release_sock in its life. It will remove backlog. */
2174 release_sock(sk); 2174 release_sock(sk);
2175 2175
2176 2176
2177 /* Now socket is owned by kernel and we acquire BH lock 2177 /* Now socket is owned by kernel and we acquire BH lock
2178 to finish close. No need to check for user refs. 2178 to finish close. No need to check for user refs.
2179 */ 2179 */
2180 local_bh_disable(); 2180 local_bh_disable();
2181 bh_lock_sock(sk); 2181 bh_lock_sock(sk);
2182 WARN_ON(sock_owned_by_user(sk)); 2182 WARN_ON(sock_owned_by_user(sk));
2183 2183
2184 percpu_counter_inc(sk->sk_prot->orphan_count); 2184 percpu_counter_inc(sk->sk_prot->orphan_count);
2185 2185
2186 /* Have we already been destroyed by a softirq or backlog? */ 2186 /* Have we already been destroyed by a softirq or backlog? */
2187 if (state != TCP_CLOSE && sk->sk_state == TCP_CLOSE) 2187 if (state != TCP_CLOSE && sk->sk_state == TCP_CLOSE)
2188 goto out; 2188 goto out;
2189 2189
2190 /* This is a (useful) BSD violating of the RFC. There is a 2190 /* This is a (useful) BSD violating of the RFC. There is a
2191 * problem with TCP as specified in that the other end could 2191 * problem with TCP as specified in that the other end could
2192 * keep a socket open forever with no application left this end. 2192 * keep a socket open forever with no application left this end.
2193 * We use a 3 minute timeout (about the same as BSD) then kill 2193 * We use a 3 minute timeout (about the same as BSD) then kill
2194 * our end. If they send after that then tough - BUT: long enough 2194 * our end. If they send after that then tough - BUT: long enough
2195 * that we won't make the old 4*rto = almost no time - whoops 2195 * that we won't make the old 4*rto = almost no time - whoops
2196 * reset mistake. 2196 * reset mistake.
2197 * 2197 *
2198 * Nope, it was not mistake. It is really desired behaviour 2198 * Nope, it was not mistake. It is really desired behaviour
2199 * f.e. on http servers, when such sockets are useless, but 2199 * f.e. on http servers, when such sockets are useless, but
2200 * consume significant resources. Let's do it with special 2200 * consume significant resources. Let's do it with special
2201 * linger2 option. --ANK 2201 * linger2 option. --ANK
2202 */ 2202 */
2203 2203
2204 if (sk->sk_state == TCP_FIN_WAIT2) { 2204 if (sk->sk_state == TCP_FIN_WAIT2) {
2205 struct tcp_sock *tp = tcp_sk(sk); 2205 struct tcp_sock *tp = tcp_sk(sk);
2206 if (tp->linger2 < 0) { 2206 if (tp->linger2 < 0) {
2207 tcp_set_state(sk, TCP_CLOSE); 2207 tcp_set_state(sk, TCP_CLOSE);
2208 tcp_send_active_reset(sk, GFP_ATOMIC); 2208 tcp_send_active_reset(sk, GFP_ATOMIC);
2209 NET_INC_STATS_BH(sock_net(sk), 2209 NET_INC_STATS_BH(sock_net(sk),
2210 LINUX_MIB_TCPABORTONLINGER); 2210 LINUX_MIB_TCPABORTONLINGER);
2211 } else { 2211 } else {
2212 const int tmo = tcp_fin_time(sk); 2212 const int tmo = tcp_fin_time(sk);
2213 2213
2214 if (tmo > TCP_TIMEWAIT_LEN) { 2214 if (tmo > TCP_TIMEWAIT_LEN) {
2215 inet_csk_reset_keepalive_timer(sk, 2215 inet_csk_reset_keepalive_timer(sk,
2216 tmo - TCP_TIMEWAIT_LEN); 2216 tmo - TCP_TIMEWAIT_LEN);
2217 } else { 2217 } else {
2218 tcp_time_wait(sk, TCP_FIN_WAIT2, tmo); 2218 tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
2219 goto out; 2219 goto out;
2220 } 2220 }
2221 } 2221 }
2222 } 2222 }
2223 if (sk->sk_state != TCP_CLOSE) { 2223 if (sk->sk_state != TCP_CLOSE) {
2224 sk_mem_reclaim(sk); 2224 sk_mem_reclaim(sk);
2225 if (tcp_check_oom(sk, 0)) { 2225 if (tcp_check_oom(sk, 0)) {
2226 tcp_set_state(sk, TCP_CLOSE); 2226 tcp_set_state(sk, TCP_CLOSE);
2227 tcp_send_active_reset(sk, GFP_ATOMIC); 2227 tcp_send_active_reset(sk, GFP_ATOMIC);
2228 NET_INC_STATS_BH(sock_net(sk), 2228 NET_INC_STATS_BH(sock_net(sk),
2229 LINUX_MIB_TCPABORTONMEMORY); 2229 LINUX_MIB_TCPABORTONMEMORY);
2230 } 2230 }
2231 } 2231 }
2232 2232
2233 if (sk->sk_state == TCP_CLOSE) { 2233 if (sk->sk_state == TCP_CLOSE) {
2234 struct request_sock *req = tcp_sk(sk)->fastopen_rsk; 2234 struct request_sock *req = tcp_sk(sk)->fastopen_rsk;
2235 /* We could get here with a non-NULL req if the socket is 2235 /* We could get here with a non-NULL req if the socket is
2236 * aborted (e.g., closed with unread data) before 3WHS 2236 * aborted (e.g., closed with unread data) before 3WHS
2237 * finishes. 2237 * finishes.
2238 */ 2238 */
2239 if (req != NULL) 2239 if (req != NULL)
2240 reqsk_fastopen_remove(sk, req, false); 2240 reqsk_fastopen_remove(sk, req, false);
2241 inet_csk_destroy_sock(sk); 2241 inet_csk_destroy_sock(sk);
2242 } 2242 }
2243 /* Otherwise, socket is reprieved until protocol close. */ 2243 /* Otherwise, socket is reprieved until protocol close. */
2244 2244
2245 out: 2245 out:
2246 bh_unlock_sock(sk); 2246 bh_unlock_sock(sk);
2247 local_bh_enable(); 2247 local_bh_enable();
2248 sock_put(sk); 2248 sock_put(sk);
2249 } 2249 }
2250 EXPORT_SYMBOL(tcp_close); 2250 EXPORT_SYMBOL(tcp_close);
2251 2251
2252 /* These states need RST on ABORT according to RFC793 */ 2252 /* These states need RST on ABORT according to RFC793 */
2253 2253
2254 static inline bool tcp_need_reset(int state) 2254 static inline bool tcp_need_reset(int state)
2255 { 2255 {
2256 return (1 << state) & 2256 return (1 << state) &
2257 (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_FIN_WAIT1 | 2257 (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_FIN_WAIT1 |
2258 TCPF_FIN_WAIT2 | TCPF_SYN_RECV); 2258 TCPF_FIN_WAIT2 | TCPF_SYN_RECV);
2259 } 2259 }
2260 2260
2261 int tcp_disconnect(struct sock *sk, int flags) 2261 int tcp_disconnect(struct sock *sk, int flags)
2262 { 2262 {
2263 struct inet_sock *inet = inet_sk(sk); 2263 struct inet_sock *inet = inet_sk(sk);
2264 struct inet_connection_sock *icsk = inet_csk(sk); 2264 struct inet_connection_sock *icsk = inet_csk(sk);
2265 struct tcp_sock *tp = tcp_sk(sk); 2265 struct tcp_sock *tp = tcp_sk(sk);
2266 int err = 0; 2266 int err = 0;
2267 int old_state = sk->sk_state; 2267 int old_state = sk->sk_state;
2268 2268
2269 if (old_state != TCP_CLOSE) 2269 if (old_state != TCP_CLOSE)
2270 tcp_set_state(sk, TCP_CLOSE); 2270 tcp_set_state(sk, TCP_CLOSE);
2271 2271
2272 /* ABORT function of RFC793 */ 2272 /* ABORT function of RFC793 */
2273 if (old_state == TCP_LISTEN) { 2273 if (old_state == TCP_LISTEN) {
2274 inet_csk_listen_stop(sk); 2274 inet_csk_listen_stop(sk);
2275 } else if (unlikely(tp->repair)) { 2275 } else if (unlikely(tp->repair)) {
2276 sk->sk_err = ECONNABORTED; 2276 sk->sk_err = ECONNABORTED;
2277 } else if (tcp_need_reset(old_state) || 2277 } else if (tcp_need_reset(old_state) ||
2278 (tp->snd_nxt != tp->write_seq && 2278 (tp->snd_nxt != tp->write_seq &&
2279 (1 << old_state) & (TCPF_CLOSING | TCPF_LAST_ACK))) { 2279 (1 << old_state) & (TCPF_CLOSING | TCPF_LAST_ACK))) {
2280 /* The last check adjusts for discrepancy of Linux wrt. RFC 2280 /* The last check adjusts for discrepancy of Linux wrt. RFC
2281 * states 2281 * states
2282 */ 2282 */
2283 tcp_send_active_reset(sk, gfp_any()); 2283 tcp_send_active_reset(sk, gfp_any());
2284 sk->sk_err = ECONNRESET; 2284 sk->sk_err = ECONNRESET;
2285 } else if (old_state == TCP_SYN_SENT) 2285 } else if (old_state == TCP_SYN_SENT)
2286 sk->sk_err = ECONNRESET; 2286 sk->sk_err = ECONNRESET;
2287 2287
2288 tcp_clear_xmit_timers(sk); 2288 tcp_clear_xmit_timers(sk);
2289 __skb_queue_purge(&sk->sk_receive_queue); 2289 __skb_queue_purge(&sk->sk_receive_queue);
2290 tcp_write_queue_purge(sk); 2290 tcp_write_queue_purge(sk);
2291 __skb_queue_purge(&tp->out_of_order_queue); 2291 __skb_queue_purge(&tp->out_of_order_queue);
2292 #ifdef CONFIG_NET_DMA 2292 #ifdef CONFIG_NET_DMA
2293 __skb_queue_purge(&sk->sk_async_wait_queue); 2293 __skb_queue_purge(&sk->sk_async_wait_queue);
2294 #endif 2294 #endif
2295 2295
2296 inet->inet_dport = 0; 2296 inet->inet_dport = 0;
2297 2297
2298 if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK)) 2298 if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
2299 inet_reset_saddr(sk); 2299 inet_reset_saddr(sk);
2300 2300
2301 sk->sk_shutdown = 0; 2301 sk->sk_shutdown = 0;
2302 sock_reset_flag(sk, SOCK_DONE); 2302 sock_reset_flag(sk, SOCK_DONE);
2303 tp->srtt = 0; 2303 tp->srtt = 0;
2304 if ((tp->write_seq += tp->max_window + 2) == 0) 2304 if ((tp->write_seq += tp->max_window + 2) == 0)
2305 tp->write_seq = 1; 2305 tp->write_seq = 1;
2306 icsk->icsk_backoff = 0; 2306 icsk->icsk_backoff = 0;
2307 tp->snd_cwnd = 2; 2307 tp->snd_cwnd = 2;
2308 icsk->icsk_probes_out = 0; 2308 icsk->icsk_probes_out = 0;
2309 tp->packets_out = 0; 2309 tp->packets_out = 0;
2310 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; 2310 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
2311 tp->snd_cwnd_cnt = 0; 2311 tp->snd_cwnd_cnt = 0;
2312 tp->window_clamp = 0; 2312 tp->window_clamp = 0;
2313 tcp_set_ca_state(sk, TCP_CA_Open); 2313 tcp_set_ca_state(sk, TCP_CA_Open);
2314 tcp_clear_retrans(tp); 2314 tcp_clear_retrans(tp);
2315 inet_csk_delack_init(sk); 2315 inet_csk_delack_init(sk);
2316 tcp_init_send_head(sk); 2316 tcp_init_send_head(sk);
2317 memset(&tp->rx_opt, 0, sizeof(tp->rx_opt)); 2317 memset(&tp->rx_opt, 0, sizeof(tp->rx_opt));
2318 __sk_dst_reset(sk); 2318 __sk_dst_reset(sk);
2319 2319
2320 WARN_ON(inet->inet_num && !icsk->icsk_bind_hash); 2320 WARN_ON(inet->inet_num && !icsk->icsk_bind_hash);
2321 2321
2322 sk->sk_error_report(sk); 2322 sk->sk_error_report(sk);
2323 return err; 2323 return err;
2324 } 2324 }
2325 EXPORT_SYMBOL(tcp_disconnect); 2325 EXPORT_SYMBOL(tcp_disconnect);
2326 2326
2327 void tcp_sock_destruct(struct sock *sk) 2327 void tcp_sock_destruct(struct sock *sk)
2328 { 2328 {
2329 inet_sock_destruct(sk); 2329 inet_sock_destruct(sk);
2330 2330
2331 kfree(inet_csk(sk)->icsk_accept_queue.fastopenq); 2331 kfree(inet_csk(sk)->icsk_accept_queue.fastopenq);
2332 } 2332 }
2333 2333
2334 static inline bool tcp_can_repair_sock(const struct sock *sk) 2334 static inline bool tcp_can_repair_sock(const struct sock *sk)
2335 { 2335 {
2336 return ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN) && 2336 return ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN) &&
2337 ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_ESTABLISHED)); 2337 ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_ESTABLISHED));
2338 } 2338 }
2339 2339
2340 static int tcp_repair_options_est(struct tcp_sock *tp, 2340 static int tcp_repair_options_est(struct tcp_sock *tp,
2341 struct tcp_repair_opt __user *optbuf, unsigned int len) 2341 struct tcp_repair_opt __user *optbuf, unsigned int len)
2342 { 2342 {
2343 struct tcp_repair_opt opt; 2343 struct tcp_repair_opt opt;
2344 2344
2345 while (len >= sizeof(opt)) { 2345 while (len >= sizeof(opt)) {
2346 if (copy_from_user(&opt, optbuf, sizeof(opt))) 2346 if (copy_from_user(&opt, optbuf, sizeof(opt)))
2347 return -EFAULT; 2347 return -EFAULT;
2348 2348
2349 optbuf++; 2349 optbuf++;
2350 len -= sizeof(opt); 2350 len -= sizeof(opt);
2351 2351
2352 switch (opt.opt_code) { 2352 switch (opt.opt_code) {
2353 case TCPOPT_MSS: 2353 case TCPOPT_MSS:
2354 tp->rx_opt.mss_clamp = opt.opt_val; 2354 tp->rx_opt.mss_clamp = opt.opt_val;
2355 break; 2355 break;
2356 case TCPOPT_WINDOW: 2356 case TCPOPT_WINDOW:
2357 { 2357 {
2358 u16 snd_wscale = opt.opt_val & 0xFFFF; 2358 u16 snd_wscale = opt.opt_val & 0xFFFF;
2359 u16 rcv_wscale = opt.opt_val >> 16; 2359 u16 rcv_wscale = opt.opt_val >> 16;
2360 2360
2361 if (snd_wscale > 14 || rcv_wscale > 14) 2361 if (snd_wscale > 14 || rcv_wscale > 14)
2362 return -EFBIG; 2362 return -EFBIG;
2363 2363
2364 tp->rx_opt.snd_wscale = snd_wscale; 2364 tp->rx_opt.snd_wscale = snd_wscale;
2365 tp->rx_opt.rcv_wscale = rcv_wscale; 2365 tp->rx_opt.rcv_wscale = rcv_wscale;
2366 tp->rx_opt.wscale_ok = 1; 2366 tp->rx_opt.wscale_ok = 1;
2367 } 2367 }
2368 break; 2368 break;
2369 case TCPOPT_SACK_PERM: 2369 case TCPOPT_SACK_PERM:
2370 if (opt.opt_val != 0) 2370 if (opt.opt_val != 0)
2371 return -EINVAL; 2371 return -EINVAL;
2372 2372
2373 tp->rx_opt.sack_ok |= TCP_SACK_SEEN; 2373 tp->rx_opt.sack_ok |= TCP_SACK_SEEN;
2374 if (sysctl_tcp_fack) 2374 if (sysctl_tcp_fack)
2375 tcp_enable_fack(tp); 2375 tcp_enable_fack(tp);
2376 break; 2376 break;
2377 case TCPOPT_TIMESTAMP: 2377 case TCPOPT_TIMESTAMP:
2378 if (opt.opt_val != 0) 2378 if (opt.opt_val != 0)
2379 return -EINVAL; 2379 return -EINVAL;
2380 2380
2381 tp->rx_opt.tstamp_ok = 1; 2381 tp->rx_opt.tstamp_ok = 1;
2382 break; 2382 break;
2383 } 2383 }
2384 } 2384 }
2385 2385
2386 return 0; 2386 return 0;
2387 } 2387 }
2388 2388
2389 /* 2389 /*
2390 * Socket option code for TCP. 2390 * Socket option code for TCP.
2391 */ 2391 */
2392 static int do_tcp_setsockopt(struct sock *sk, int level, 2392 static int do_tcp_setsockopt(struct sock *sk, int level,
2393 int optname, char __user *optval, unsigned int optlen) 2393 int optname, char __user *optval, unsigned int optlen)
2394 { 2394 {
2395 struct tcp_sock *tp = tcp_sk(sk); 2395 struct tcp_sock *tp = tcp_sk(sk);
2396 struct inet_connection_sock *icsk = inet_csk(sk); 2396 struct inet_connection_sock *icsk = inet_csk(sk);
2397 int val; 2397 int val;
2398 int err = 0; 2398 int err = 0;
2399 2399
2400 /* These are data/string values, all the others are ints */ 2400 /* These are data/string values, all the others are ints */
2401 switch (optname) { 2401 switch (optname) {
2402 case TCP_CONGESTION: { 2402 case TCP_CONGESTION: {
2403 char name[TCP_CA_NAME_MAX]; 2403 char name[TCP_CA_NAME_MAX];
2404 2404
2405 if (optlen < 1) 2405 if (optlen < 1)
2406 return -EINVAL; 2406 return -EINVAL;
2407 2407
2408 val = strncpy_from_user(name, optval, 2408 val = strncpy_from_user(name, optval,
2409 min_t(long, TCP_CA_NAME_MAX-1, optlen)); 2409 min_t(long, TCP_CA_NAME_MAX-1, optlen));
2410 if (val < 0) 2410 if (val < 0)
2411 return -EFAULT; 2411 return -EFAULT;
2412 name[val] = 0; 2412 name[val] = 0;
2413 2413
2414 lock_sock(sk); 2414 lock_sock(sk);
2415 err = tcp_set_congestion_control(sk, name); 2415 err = tcp_set_congestion_control(sk, name);
2416 release_sock(sk); 2416 release_sock(sk);
2417 return err; 2417 return err;
2418 } 2418 }
2419 default: 2419 default:
2420 /* fallthru */ 2420 /* fallthru */
2421 break; 2421 break;
2422 } 2422 }
2423 2423
2424 if (optlen < sizeof(int)) 2424 if (optlen < sizeof(int))
2425 return -EINVAL; 2425 return -EINVAL;
2426 2426
2427 if (get_user(val, (int __user *)optval)) 2427 if (get_user(val, (int __user *)optval))
2428 return -EFAULT; 2428 return -EFAULT;
2429 2429
2430 lock_sock(sk); 2430 lock_sock(sk);
2431 2431
2432 switch (optname) { 2432 switch (optname) {
2433 case TCP_MAXSEG: 2433 case TCP_MAXSEG:
2434 /* Values greater than interface MTU won't take effect. However 2434 /* Values greater than interface MTU won't take effect. However
2435 * at the point when this call is done we typically don't yet 2435 * at the point when this call is done we typically don't yet
2436 * know which interface is going to be used */ 2436 * know which interface is going to be used */
2437 if (val < TCP_MIN_MSS || val > MAX_TCP_WINDOW) { 2437 if (val < TCP_MIN_MSS || val > MAX_TCP_WINDOW) {
2438 err = -EINVAL; 2438 err = -EINVAL;
2439 break; 2439 break;
2440 } 2440 }
2441 tp->rx_opt.user_mss = val; 2441 tp->rx_opt.user_mss = val;
2442 break; 2442 break;
2443 2443
2444 case TCP_NODELAY: 2444 case TCP_NODELAY:
2445 if (val) { 2445 if (val) {
2446 /* TCP_NODELAY is weaker than TCP_CORK, so that 2446 /* TCP_NODELAY is weaker than TCP_CORK, so that
2447 * this option on corked socket is remembered, but 2447 * this option on corked socket is remembered, but
2448 * it is not activated until cork is cleared. 2448 * it is not activated until cork is cleared.
2449 * 2449 *
2450 * However, when TCP_NODELAY is set we make 2450 * However, when TCP_NODELAY is set we make
2451 * an explicit push, which overrides even TCP_CORK 2451 * an explicit push, which overrides even TCP_CORK
2452 * for currently queued segments. 2452 * for currently queued segments.
2453 */ 2453 */
2454 tp->nonagle |= TCP_NAGLE_OFF|TCP_NAGLE_PUSH; 2454 tp->nonagle |= TCP_NAGLE_OFF|TCP_NAGLE_PUSH;
2455 tcp_push_pending_frames(sk); 2455 tcp_push_pending_frames(sk);
2456 } else { 2456 } else {
2457 tp->nonagle &= ~TCP_NAGLE_OFF; 2457 tp->nonagle &= ~TCP_NAGLE_OFF;
2458 } 2458 }
2459 break; 2459 break;
2460 2460
2461 case TCP_THIN_LINEAR_TIMEOUTS: 2461 case TCP_THIN_LINEAR_TIMEOUTS:
2462 if (val < 0 || val > 1) 2462 if (val < 0 || val > 1)
2463 err = -EINVAL; 2463 err = -EINVAL;
2464 else 2464 else
2465 tp->thin_lto = val; 2465 tp->thin_lto = val;
2466 break; 2466 break;
2467 2467
2468 case TCP_THIN_DUPACK: 2468 case TCP_THIN_DUPACK:
2469 if (val < 0 || val > 1) 2469 if (val < 0 || val > 1)
2470 err = -EINVAL; 2470 err = -EINVAL;
2471 else { 2471 else {
2472 tp->thin_dupack = val; 2472 tp->thin_dupack = val;
2473 if (tp->thin_dupack) 2473 if (tp->thin_dupack)
2474 tcp_disable_early_retrans(tp); 2474 tcp_disable_early_retrans(tp);
2475 } 2475 }
2476 break; 2476 break;
2477 2477
2478 case TCP_REPAIR: 2478 case TCP_REPAIR:
2479 if (!tcp_can_repair_sock(sk)) 2479 if (!tcp_can_repair_sock(sk))
2480 err = -EPERM; 2480 err = -EPERM;
2481 else if (val == 1) { 2481 else if (val == 1) {
2482 tp->repair = 1; 2482 tp->repair = 1;
2483 sk->sk_reuse = SK_FORCE_REUSE; 2483 sk->sk_reuse = SK_FORCE_REUSE;
2484 tp->repair_queue = TCP_NO_QUEUE; 2484 tp->repair_queue = TCP_NO_QUEUE;
2485 } else if (val == 0) { 2485 } else if (val == 0) {
2486 tp->repair = 0; 2486 tp->repair = 0;
2487 sk->sk_reuse = SK_NO_REUSE; 2487 sk->sk_reuse = SK_NO_REUSE;
2488 tcp_send_window_probe(sk); 2488 tcp_send_window_probe(sk);
2489 } else 2489 } else
2490 err = -EINVAL; 2490 err = -EINVAL;
2491 2491
2492 break; 2492 break;
2493 2493
2494 case TCP_REPAIR_QUEUE: 2494 case TCP_REPAIR_QUEUE:
2495 if (!tp->repair) 2495 if (!tp->repair)
2496 err = -EPERM; 2496 err = -EPERM;
2497 else if (val < TCP_QUEUES_NR) 2497 else if (val < TCP_QUEUES_NR)
2498 tp->repair_queue = val; 2498 tp->repair_queue = val;
2499 else 2499 else
2500 err = -EINVAL; 2500 err = -EINVAL;
2501 break; 2501 break;
2502 2502
2503 case TCP_QUEUE_SEQ: 2503 case TCP_QUEUE_SEQ:
2504 if (sk->sk_state != TCP_CLOSE) 2504 if (sk->sk_state != TCP_CLOSE)
2505 err = -EPERM; 2505 err = -EPERM;
2506 else if (tp->repair_queue == TCP_SEND_QUEUE) 2506 else if (tp->repair_queue == TCP_SEND_QUEUE)
2507 tp->write_seq = val; 2507 tp->write_seq = val;
2508 else if (tp->repair_queue == TCP_RECV_QUEUE) 2508 else if (tp->repair_queue == TCP_RECV_QUEUE)
2509 tp->rcv_nxt = val; 2509 tp->rcv_nxt = val;
2510 else 2510 else
2511 err = -EINVAL; 2511 err = -EINVAL;
2512 break; 2512 break;
2513 2513
2514 case TCP_REPAIR_OPTIONS: 2514 case TCP_REPAIR_OPTIONS:
2515 if (!tp->repair) 2515 if (!tp->repair)
2516 err = -EINVAL; 2516 err = -EINVAL;
2517 else if (sk->sk_state == TCP_ESTABLISHED) 2517 else if (sk->sk_state == TCP_ESTABLISHED)
2518 err = tcp_repair_options_est(tp, 2518 err = tcp_repair_options_est(tp,
2519 (struct tcp_repair_opt __user *)optval, 2519 (struct tcp_repair_opt __user *)optval,
2520 optlen); 2520 optlen);
2521 else 2521 else
2522 err = -EPERM; 2522 err = -EPERM;
2523 break; 2523 break;
2524 2524
2525 case TCP_CORK: 2525 case TCP_CORK:
2526 /* When set indicates to always queue non-full frames. 2526 /* When set indicates to always queue non-full frames.
2527 * Later the user clears this option and we transmit 2527 * Later the user clears this option and we transmit
2528 * any pending partial frames in the queue. This is 2528 * any pending partial frames in the queue. This is
2529 * meant to be used alongside sendfile() to get properly 2529 * meant to be used alongside sendfile() to get properly
2530 * filled frames when the user (for example) must write 2530 * filled frames when the user (for example) must write
2531 * out headers with a write() call first and then use 2531 * out headers with a write() call first and then use
2532 * sendfile to send out the data parts. 2532 * sendfile to send out the data parts.
2533 * 2533 *
2534 * TCP_CORK can be set together with TCP_NODELAY and it is 2534 * TCP_CORK can be set together with TCP_NODELAY and it is
2535 * stronger than TCP_NODELAY. 2535 * stronger than TCP_NODELAY.
2536 */ 2536 */
2537 if (val) { 2537 if (val) {
2538 tp->nonagle |= TCP_NAGLE_CORK; 2538 tp->nonagle |= TCP_NAGLE_CORK;
2539 } else { 2539 } else {
2540 tp->nonagle &= ~TCP_NAGLE_CORK; 2540 tp->nonagle &= ~TCP_NAGLE_CORK;
2541 if (tp->nonagle&TCP_NAGLE_OFF) 2541 if (tp->nonagle&TCP_NAGLE_OFF)
2542 tp->nonagle |= TCP_NAGLE_PUSH; 2542 tp->nonagle |= TCP_NAGLE_PUSH;
2543 tcp_push_pending_frames(sk); 2543 tcp_push_pending_frames(sk);
2544 } 2544 }
2545 break; 2545 break;
2546 2546
2547 case TCP_KEEPIDLE: 2547 case TCP_KEEPIDLE:
2548 if (val < 1 || val > MAX_TCP_KEEPIDLE) 2548 if (val < 1 || val > MAX_TCP_KEEPIDLE)
2549 err = -EINVAL; 2549 err = -EINVAL;
2550 else { 2550 else {
2551 tp->keepalive_time = val * HZ; 2551 tp->keepalive_time = val * HZ;
2552 if (sock_flag(sk, SOCK_KEEPOPEN) && 2552 if (sock_flag(sk, SOCK_KEEPOPEN) &&
2553 !((1 << sk->sk_state) & 2553 !((1 << sk->sk_state) &
2554 (TCPF_CLOSE | TCPF_LISTEN))) { 2554 (TCPF_CLOSE | TCPF_LISTEN))) {
2555 u32 elapsed = keepalive_time_elapsed(tp); 2555 u32 elapsed = keepalive_time_elapsed(tp);
2556 if (tp->keepalive_time > elapsed) 2556 if (tp->keepalive_time > elapsed)
2557 elapsed = tp->keepalive_time - elapsed; 2557 elapsed = tp->keepalive_time - elapsed;
2558 else 2558 else
2559 elapsed = 0; 2559 elapsed = 0;
2560 inet_csk_reset_keepalive_timer(sk, elapsed); 2560 inet_csk_reset_keepalive_timer(sk, elapsed);
2561 } 2561 }
2562 } 2562 }
2563 break; 2563 break;
2564 case TCP_KEEPINTVL: 2564 case TCP_KEEPINTVL:
2565 if (val < 1 || val > MAX_TCP_KEEPINTVL) 2565 if (val < 1 || val > MAX_TCP_KEEPINTVL)
2566 err = -EINVAL; 2566 err = -EINVAL;
2567 else 2567 else
2568 tp->keepalive_intvl = val * HZ; 2568 tp->keepalive_intvl = val * HZ;
2569 break; 2569 break;
2570 case TCP_KEEPCNT: 2570 case TCP_KEEPCNT:
2571 if (val < 1 || val > MAX_TCP_KEEPCNT) 2571 if (val < 1 || val > MAX_TCP_KEEPCNT)
2572 err = -EINVAL; 2572 err = -EINVAL;
2573 else 2573 else
2574 tp->keepalive_probes = val; 2574 tp->keepalive_probes = val;
2575 break; 2575 break;
2576 case TCP_SYNCNT: 2576 case TCP_SYNCNT:
2577 if (val < 1 || val > MAX_TCP_SYNCNT) 2577 if (val < 1 || val > MAX_TCP_SYNCNT)
2578 err = -EINVAL; 2578 err = -EINVAL;
2579 else 2579 else
2580 icsk->icsk_syn_retries = val; 2580 icsk->icsk_syn_retries = val;
2581 break; 2581 break;
2582 2582
2583 case TCP_LINGER2: 2583 case TCP_LINGER2:
2584 if (val < 0) 2584 if (val < 0)
2585 tp->linger2 = -1; 2585 tp->linger2 = -1;
2586 else if (val > sysctl_tcp_fin_timeout / HZ) 2586 else if (val > sysctl_tcp_fin_timeout / HZ)
2587 tp->linger2 = 0; 2587 tp->linger2 = 0;
2588 else 2588 else
2589 tp->linger2 = val * HZ; 2589 tp->linger2 = val * HZ;
2590 break; 2590 break;
2591 2591
2592 case TCP_DEFER_ACCEPT: 2592 case TCP_DEFER_ACCEPT:
2593 /* Translate value in seconds to number of retransmits */ 2593 /* Translate value in seconds to number of retransmits */
2594 icsk->icsk_accept_queue.rskq_defer_accept = 2594 icsk->icsk_accept_queue.rskq_defer_accept =
2595 secs_to_retrans(val, TCP_TIMEOUT_INIT / HZ, 2595 secs_to_retrans(val, TCP_TIMEOUT_INIT / HZ,
2596 TCP_RTO_MAX / HZ); 2596 TCP_RTO_MAX / HZ);
2597 break; 2597 break;
2598 2598
2599 case TCP_WINDOW_CLAMP: 2599 case TCP_WINDOW_CLAMP:
2600 if (!val) { 2600 if (!val) {
2601 if (sk->sk_state != TCP_CLOSE) { 2601 if (sk->sk_state != TCP_CLOSE) {
2602 err = -EINVAL; 2602 err = -EINVAL;
2603 break; 2603 break;
2604 } 2604 }
2605 tp->window_clamp = 0; 2605 tp->window_clamp = 0;
2606 } else 2606 } else
2607 tp->window_clamp = val < SOCK_MIN_RCVBUF / 2 ? 2607 tp->window_clamp = val < SOCK_MIN_RCVBUF / 2 ?
2608 SOCK_MIN_RCVBUF / 2 : val; 2608 SOCK_MIN_RCVBUF / 2 : val;
2609 break; 2609 break;
2610 2610
2611 case TCP_QUICKACK: 2611 case TCP_QUICKACK:
2612 if (!val) { 2612 if (!val) {
2613 icsk->icsk_ack.pingpong = 1; 2613 icsk->icsk_ack.pingpong = 1;
2614 } else { 2614 } else {
2615 icsk->icsk_ack.pingpong = 0; 2615 icsk->icsk_ack.pingpong = 0;
2616 if ((1 << sk->sk_state) & 2616 if ((1 << sk->sk_state) &
2617 (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT) && 2617 (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT) &&
2618 inet_csk_ack_scheduled(sk)) { 2618 inet_csk_ack_scheduled(sk)) {
2619 icsk->icsk_ack.pending |= ICSK_ACK_PUSHED; 2619 icsk->icsk_ack.pending |= ICSK_ACK_PUSHED;
2620 tcp_cleanup_rbuf(sk, 1); 2620 tcp_cleanup_rbuf(sk, 1);
2621 if (!(val & 1)) 2621 if (!(val & 1))
2622 icsk->icsk_ack.pingpong = 1; 2622 icsk->icsk_ack.pingpong = 1;
2623 } 2623 }
2624 } 2624 }
2625 break; 2625 break;
2626 2626
2627 #ifdef CONFIG_TCP_MD5SIG 2627 #ifdef CONFIG_TCP_MD5SIG
2628 case TCP_MD5SIG: 2628 case TCP_MD5SIG:
2629 /* Read the IP->Key mappings from userspace */ 2629 /* Read the IP->Key mappings from userspace */
2630 err = tp->af_specific->md5_parse(sk, optval, optlen); 2630 err = tp->af_specific->md5_parse(sk, optval, optlen);
2631 break; 2631 break;
2632 #endif 2632 #endif
2633 case TCP_USER_TIMEOUT: 2633 case TCP_USER_TIMEOUT:
2634 /* Cap the max timeout in ms TCP will retry/retrans 2634 /* Cap the max timeout in ms TCP will retry/retrans
2635 * before giving up and aborting (ETIMEDOUT) a connection. 2635 * before giving up and aborting (ETIMEDOUT) a connection.
2636 */ 2636 */
2637 if (val < 0) 2637 if (val < 0)
2638 err = -EINVAL; 2638 err = -EINVAL;
2639 else 2639 else
2640 icsk->icsk_user_timeout = msecs_to_jiffies(val); 2640 icsk->icsk_user_timeout = msecs_to_jiffies(val);
2641 break; 2641 break;
2642 2642
2643 case TCP_FASTOPEN: 2643 case TCP_FASTOPEN:
2644 if (val >= 0 && ((1 << sk->sk_state) & (TCPF_CLOSE | 2644 if (val >= 0 && ((1 << sk->sk_state) & (TCPF_CLOSE |
2645 TCPF_LISTEN))) 2645 TCPF_LISTEN)))
2646 err = fastopen_init_queue(sk, val); 2646 err = fastopen_init_queue(sk, val);
2647 else 2647 else
2648 err = -EINVAL; 2648 err = -EINVAL;
2649 break; 2649 break;
2650 case TCP_TIMESTAMP: 2650 case TCP_TIMESTAMP:
2651 if (!tp->repair) 2651 if (!tp->repair)
2652 err = -EPERM; 2652 err = -EPERM;
2653 else 2653 else
2654 tp->tsoffset = val - tcp_time_stamp; 2654 tp->tsoffset = val - tcp_time_stamp;
2655 break; 2655 break;
2656 case TCP_NOTSENT_LOWAT: 2656 case TCP_NOTSENT_LOWAT:
2657 tp->notsent_lowat = val; 2657 tp->notsent_lowat = val;
2658 sk->sk_write_space(sk); 2658 sk->sk_write_space(sk);
2659 break; 2659 break;
2660 default: 2660 default:
2661 err = -ENOPROTOOPT; 2661 err = -ENOPROTOOPT;
2662 break; 2662 break;
2663 } 2663 }
2664 2664
2665 release_sock(sk); 2665 release_sock(sk);
2666 return err; 2666 return err;
2667 } 2667 }
2668 2668
2669 int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval, 2669 int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval,
2670 unsigned int optlen) 2670 unsigned int optlen)
2671 { 2671 {
2672 const struct inet_connection_sock *icsk = inet_csk(sk); 2672 const struct inet_connection_sock *icsk = inet_csk(sk);
2673 2673
2674 if (level != SOL_TCP) 2674 if (level != SOL_TCP)
2675 return icsk->icsk_af_ops->setsockopt(sk, level, optname, 2675 return icsk->icsk_af_ops->setsockopt(sk, level, optname,
2676 optval, optlen); 2676 optval, optlen);
2677 return do_tcp_setsockopt(sk, level, optname, optval, optlen); 2677 return do_tcp_setsockopt(sk, level, optname, optval, optlen);
2678 } 2678 }
2679 EXPORT_SYMBOL(tcp_setsockopt); 2679 EXPORT_SYMBOL(tcp_setsockopt);
2680 2680
2681 #ifdef CONFIG_COMPAT 2681 #ifdef CONFIG_COMPAT
2682 int compat_tcp_setsockopt(struct sock *sk, int level, int optname, 2682 int compat_tcp_setsockopt(struct sock *sk, int level, int optname,
2683 char __user *optval, unsigned int optlen) 2683 char __user *optval, unsigned int optlen)
2684 { 2684 {
2685 if (level != SOL_TCP) 2685 if (level != SOL_TCP)
2686 return inet_csk_compat_setsockopt(sk, level, optname, 2686 return inet_csk_compat_setsockopt(sk, level, optname,
2687 optval, optlen); 2687 optval, optlen);
2688 return do_tcp_setsockopt(sk, level, optname, optval, optlen); 2688 return do_tcp_setsockopt(sk, level, optname, optval, optlen);
2689 } 2689 }
2690 EXPORT_SYMBOL(compat_tcp_setsockopt); 2690 EXPORT_SYMBOL(compat_tcp_setsockopt);
2691 #endif 2691 #endif
2692 2692
2693 /* Return information about state of tcp endpoint in API format. */ 2693 /* Return information about state of tcp endpoint in API format. */
2694 void tcp_get_info(const struct sock *sk, struct tcp_info *info) 2694 void tcp_get_info(const struct sock *sk, struct tcp_info *info)
2695 { 2695 {
2696 const struct tcp_sock *tp = tcp_sk(sk); 2696 const struct tcp_sock *tp = tcp_sk(sk);
2697 const struct inet_connection_sock *icsk = inet_csk(sk); 2697 const struct inet_connection_sock *icsk = inet_csk(sk);
2698 u32 now = tcp_time_stamp; 2698 u32 now = tcp_time_stamp;
2699 2699
2700 memset(info, 0, sizeof(*info)); 2700 memset(info, 0, sizeof(*info));
2701 2701
2702 info->tcpi_state = sk->sk_state; 2702 info->tcpi_state = sk->sk_state;
2703 info->tcpi_ca_state = icsk->icsk_ca_state; 2703 info->tcpi_ca_state = icsk->icsk_ca_state;
2704 info->tcpi_retransmits = icsk->icsk_retransmits; 2704 info->tcpi_retransmits = icsk->icsk_retransmits;
2705 info->tcpi_probes = icsk->icsk_probes_out; 2705 info->tcpi_probes = icsk->icsk_probes_out;
2706 info->tcpi_backoff = icsk->icsk_backoff; 2706 info->tcpi_backoff = icsk->icsk_backoff;
2707 2707
2708 if (tp->rx_opt.tstamp_ok) 2708 if (tp->rx_opt.tstamp_ok)
2709 info->tcpi_options |= TCPI_OPT_TIMESTAMPS; 2709 info->tcpi_options |= TCPI_OPT_TIMESTAMPS;
2710 if (tcp_is_sack(tp)) 2710 if (tcp_is_sack(tp))
2711 info->tcpi_options |= TCPI_OPT_SACK; 2711 info->tcpi_options |= TCPI_OPT_SACK;
2712 if (tp->rx_opt.wscale_ok) { 2712 if (tp->rx_opt.wscale_ok) {
2713 info->tcpi_options |= TCPI_OPT_WSCALE; 2713 info->tcpi_options |= TCPI_OPT_WSCALE;
2714 info->tcpi_snd_wscale = tp->rx_opt.snd_wscale; 2714 info->tcpi_snd_wscale = tp->rx_opt.snd_wscale;
2715 info->tcpi_rcv_wscale = tp->rx_opt.rcv_wscale; 2715 info->tcpi_rcv_wscale = tp->rx_opt.rcv_wscale;
2716 } 2716 }
2717 2717
2718 if (tp->ecn_flags & TCP_ECN_OK) 2718 if (tp->ecn_flags & TCP_ECN_OK)
2719 info->tcpi_options |= TCPI_OPT_ECN; 2719 info->tcpi_options |= TCPI_OPT_ECN;
2720 if (tp->ecn_flags & TCP_ECN_SEEN) 2720 if (tp->ecn_flags & TCP_ECN_SEEN)
2721 info->tcpi_options |= TCPI_OPT_ECN_SEEN; 2721 info->tcpi_options |= TCPI_OPT_ECN_SEEN;
2722 if (tp->syn_data_acked) 2722 if (tp->syn_data_acked)
2723 info->tcpi_options |= TCPI_OPT_SYN_DATA; 2723 info->tcpi_options |= TCPI_OPT_SYN_DATA;
2724 2724
2725 info->tcpi_rto = jiffies_to_usecs(icsk->icsk_rto); 2725 info->tcpi_rto = jiffies_to_usecs(icsk->icsk_rto);
2726 info->tcpi_ato = jiffies_to_usecs(icsk->icsk_ack.ato); 2726 info->tcpi_ato = jiffies_to_usecs(icsk->icsk_ack.ato);
2727 info->tcpi_snd_mss = tp->mss_cache; 2727 info->tcpi_snd_mss = tp->mss_cache;
2728 info->tcpi_rcv_mss = icsk->icsk_ack.rcv_mss; 2728 info->tcpi_rcv_mss = icsk->icsk_ack.rcv_mss;
2729 2729
2730 if (sk->sk_state == TCP_LISTEN) { 2730 if (sk->sk_state == TCP_LISTEN) {
2731 info->tcpi_unacked = sk->sk_ack_backlog; 2731 info->tcpi_unacked = sk->sk_ack_backlog;
2732 info->tcpi_sacked = sk->sk_max_ack_backlog; 2732 info->tcpi_sacked = sk->sk_max_ack_backlog;
2733 } else { 2733 } else {
2734 info->tcpi_unacked = tp->packets_out; 2734 info->tcpi_unacked = tp->packets_out;
2735 info->tcpi_sacked = tp->sacked_out; 2735 info->tcpi_sacked = tp->sacked_out;
2736 } 2736 }
2737 info->tcpi_lost = tp->lost_out; 2737 info->tcpi_lost = tp->lost_out;
2738 info->tcpi_retrans = tp->retrans_out; 2738 info->tcpi_retrans = tp->retrans_out;
2739 info->tcpi_fackets = tp->fackets_out; 2739 info->tcpi_fackets = tp->fackets_out;
2740 2740
2741 info->tcpi_last_data_sent = jiffies_to_msecs(now - tp->lsndtime); 2741 info->tcpi_last_data_sent = jiffies_to_msecs(now - tp->lsndtime);
2742 info->tcpi_last_data_recv = jiffies_to_msecs(now - icsk->icsk_ack.lrcvtime); 2742 info->tcpi_last_data_recv = jiffies_to_msecs(now - icsk->icsk_ack.lrcvtime);
2743 info->tcpi_last_ack_recv = jiffies_to_msecs(now - tp->rcv_tstamp); 2743 info->tcpi_last_ack_recv = jiffies_to_msecs(now - tp->rcv_tstamp);
2744 2744
2745 info->tcpi_pmtu = icsk->icsk_pmtu_cookie; 2745 info->tcpi_pmtu = icsk->icsk_pmtu_cookie;
2746 info->tcpi_rcv_ssthresh = tp->rcv_ssthresh; 2746 info->tcpi_rcv_ssthresh = tp->rcv_ssthresh;
2747 info->tcpi_rtt = jiffies_to_usecs(tp->srtt)>>3; 2747 info->tcpi_rtt = jiffies_to_usecs(tp->srtt)>>3;
2748 info->tcpi_rttvar = jiffies_to_usecs(tp->mdev)>>2; 2748 info->tcpi_rttvar = jiffies_to_usecs(tp->mdev)>>2;
2749 info->tcpi_snd_ssthresh = tp->snd_ssthresh; 2749 info->tcpi_snd_ssthresh = tp->snd_ssthresh;
2750 info->tcpi_snd_cwnd = tp->snd_cwnd; 2750 info->tcpi_snd_cwnd = tp->snd_cwnd;
2751 info->tcpi_advmss = tp->advmss; 2751 info->tcpi_advmss = tp->advmss;
2752 info->tcpi_reordering = tp->reordering; 2752 info->tcpi_reordering = tp->reordering;
2753 2753
2754 info->tcpi_rcv_rtt = jiffies_to_usecs(tp->rcv_rtt_est.rtt)>>3; 2754 info->tcpi_rcv_rtt = jiffies_to_usecs(tp->rcv_rtt_est.rtt)>>3;
2755 info->tcpi_rcv_space = tp->rcvq_space.space; 2755 info->tcpi_rcv_space = tp->rcvq_space.space;
2756 2756
2757 info->tcpi_total_retrans = tp->total_retrans; 2757 info->tcpi_total_retrans = tp->total_retrans;
2758 } 2758 }
2759 EXPORT_SYMBOL_GPL(tcp_get_info); 2759 EXPORT_SYMBOL_GPL(tcp_get_info);
2760 2760
2761 static int do_tcp_getsockopt(struct sock *sk, int level, 2761 static int do_tcp_getsockopt(struct sock *sk, int level,
2762 int optname, char __user *optval, int __user *optlen) 2762 int optname, char __user *optval, int __user *optlen)
2763 { 2763 {
2764 struct inet_connection_sock *icsk = inet_csk(sk); 2764 struct inet_connection_sock *icsk = inet_csk(sk);
2765 struct tcp_sock *tp = tcp_sk(sk); 2765 struct tcp_sock *tp = tcp_sk(sk);
2766 int val, len; 2766 int val, len;
2767 2767
2768 if (get_user(len, optlen)) 2768 if (get_user(len, optlen))
2769 return -EFAULT; 2769 return -EFAULT;
2770 2770
2771 len = min_t(unsigned int, len, sizeof(int)); 2771 len = min_t(unsigned int, len, sizeof(int));
2772 2772
2773 if (len < 0) 2773 if (len < 0)
2774 return -EINVAL; 2774 return -EINVAL;
2775 2775
2776 switch (optname) { 2776 switch (optname) {
2777 case TCP_MAXSEG: 2777 case TCP_MAXSEG:
2778 val = tp->mss_cache; 2778 val = tp->mss_cache;
2779 if (!val && ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) 2779 if (!val && ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)))
2780 val = tp->rx_opt.user_mss; 2780 val = tp->rx_opt.user_mss;
2781 if (tp->repair) 2781 if (tp->repair)
2782 val = tp->rx_opt.mss_clamp; 2782 val = tp->rx_opt.mss_clamp;
2783 break; 2783 break;
2784 case TCP_NODELAY: 2784 case TCP_NODELAY:
2785 val = !!(tp->nonagle&TCP_NAGLE_OFF); 2785 val = !!(tp->nonagle&TCP_NAGLE_OFF);
2786 break; 2786 break;
2787 case TCP_CORK: 2787 case TCP_CORK:
2788 val = !!(tp->nonagle&TCP_NAGLE_CORK); 2788 val = !!(tp->nonagle&TCP_NAGLE_CORK);
2789 break; 2789 break;
2790 case TCP_KEEPIDLE: 2790 case TCP_KEEPIDLE:
2791 val = keepalive_time_when(tp) / HZ; 2791 val = keepalive_time_when(tp) / HZ;
2792 break; 2792 break;
2793 case TCP_KEEPINTVL: 2793 case TCP_KEEPINTVL:
2794 val = keepalive_intvl_when(tp) / HZ; 2794 val = keepalive_intvl_when(tp) / HZ;
2795 break; 2795 break;
2796 case TCP_KEEPCNT: 2796 case TCP_KEEPCNT:
2797 val = keepalive_probes(tp); 2797 val = keepalive_probes(tp);
2798 break; 2798 break;
2799 case TCP_SYNCNT: 2799 case TCP_SYNCNT:
2800 val = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries; 2800 val = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries;
2801 break; 2801 break;
2802 case TCP_LINGER2: 2802 case TCP_LINGER2:
2803 val = tp->linger2; 2803 val = tp->linger2;
2804 if (val >= 0) 2804 if (val >= 0)
2805 val = (val ? : sysctl_tcp_fin_timeout) / HZ; 2805 val = (val ? : sysctl_tcp_fin_timeout) / HZ;
2806 break; 2806 break;
2807 case TCP_DEFER_ACCEPT: 2807 case TCP_DEFER_ACCEPT:
2808 val = retrans_to_secs(icsk->icsk_accept_queue.rskq_defer_accept, 2808 val = retrans_to_secs(icsk->icsk_accept_queue.rskq_defer_accept,
2809 TCP_TIMEOUT_INIT / HZ, TCP_RTO_MAX / HZ); 2809 TCP_TIMEOUT_INIT / HZ, TCP_RTO_MAX / HZ);
2810 break; 2810 break;
2811 case TCP_WINDOW_CLAMP: 2811 case TCP_WINDOW_CLAMP:
2812 val = tp->window_clamp; 2812 val = tp->window_clamp;
2813 break; 2813 break;
2814 case TCP_INFO: { 2814 case TCP_INFO: {
2815 struct tcp_info info; 2815 struct tcp_info info;
2816 2816
2817 if (get_user(len, optlen)) 2817 if (get_user(len, optlen))
2818 return -EFAULT; 2818 return -EFAULT;
2819 2819
2820 tcp_get_info(sk, &info); 2820 tcp_get_info(sk, &info);
2821 2821
2822 len = min_t(unsigned int, len, sizeof(info)); 2822 len = min_t(unsigned int, len, sizeof(info));
2823 if (put_user(len, optlen)) 2823 if (put_user(len, optlen))
2824 return -EFAULT; 2824 return -EFAULT;
2825 if (copy_to_user(optval, &info, len)) 2825 if (copy_to_user(optval, &info, len))
2826 return -EFAULT; 2826 return -EFAULT;
2827 return 0; 2827 return 0;
2828 } 2828 }
2829 case TCP_QUICKACK: 2829 case TCP_QUICKACK:
2830 val = !icsk->icsk_ack.pingpong; 2830 val = !icsk->icsk_ack.pingpong;
2831 break; 2831 break;
2832 2832
2833 case TCP_CONGESTION: 2833 case TCP_CONGESTION:
2834 if (get_user(len, optlen)) 2834 if (get_user(len, optlen))
2835 return -EFAULT; 2835 return -EFAULT;
2836 len = min_t(unsigned int, len, TCP_CA_NAME_MAX); 2836 len = min_t(unsigned int, len, TCP_CA_NAME_MAX);
2837 if (put_user(len, optlen)) 2837 if (put_user(len, optlen))
2838 return -EFAULT; 2838 return -EFAULT;
2839 if (copy_to_user(optval, icsk->icsk_ca_ops->name, len)) 2839 if (copy_to_user(optval, icsk->icsk_ca_ops->name, len))
2840 return -EFAULT; 2840 return -EFAULT;
2841 return 0; 2841 return 0;
2842 2842
2843 case TCP_THIN_LINEAR_TIMEOUTS: 2843 case TCP_THIN_LINEAR_TIMEOUTS:
2844 val = tp->thin_lto; 2844 val = tp->thin_lto;
2845 break; 2845 break;
2846 case TCP_THIN_DUPACK: 2846 case TCP_THIN_DUPACK:
2847 val = tp->thin_dupack; 2847 val = tp->thin_dupack;
2848 break; 2848 break;
2849 2849
2850 case TCP_REPAIR: 2850 case TCP_REPAIR:
2851 val = tp->repair; 2851 val = tp->repair;
2852 break; 2852 break;
2853 2853
2854 case TCP_REPAIR_QUEUE: 2854 case TCP_REPAIR_QUEUE:
2855 if (tp->repair) 2855 if (tp->repair)
2856 val = tp->repair_queue; 2856 val = tp->repair_queue;
2857 else 2857 else
2858 return -EINVAL; 2858 return -EINVAL;
2859 break; 2859 break;
2860 2860
2861 case TCP_QUEUE_SEQ: 2861 case TCP_QUEUE_SEQ:
2862 if (tp->repair_queue == TCP_SEND_QUEUE) 2862 if (tp->repair_queue == TCP_SEND_QUEUE)
2863 val = tp->write_seq; 2863 val = tp->write_seq;
2864 else if (tp->repair_queue == TCP_RECV_QUEUE) 2864 else if (tp->repair_queue == TCP_RECV_QUEUE)
2865 val = tp->rcv_nxt; 2865 val = tp->rcv_nxt;
2866 else 2866 else
2867 return -EINVAL; 2867 return -EINVAL;
2868 break; 2868 break;
2869 2869
2870 case TCP_USER_TIMEOUT: 2870 case TCP_USER_TIMEOUT:
2871 val = jiffies_to_msecs(icsk->icsk_user_timeout); 2871 val = jiffies_to_msecs(icsk->icsk_user_timeout);
2872 break; 2872 break;
2873 case TCP_TIMESTAMP: 2873 case TCP_TIMESTAMP:
2874 val = tcp_time_stamp + tp->tsoffset; 2874 val = tcp_time_stamp + tp->tsoffset;
2875 break; 2875 break;
2876 case TCP_NOTSENT_LOWAT: 2876 case TCP_NOTSENT_LOWAT:
2877 val = tp->notsent_lowat; 2877 val = tp->notsent_lowat;
2878 break; 2878 break;
2879 default: 2879 default:
2880 return -ENOPROTOOPT; 2880 return -ENOPROTOOPT;
2881 } 2881 }
2882 2882
2883 if (put_user(len, optlen)) 2883 if (put_user(len, optlen))
2884 return -EFAULT; 2884 return -EFAULT;
2885 if (copy_to_user(optval, &val, len)) 2885 if (copy_to_user(optval, &val, len))
2886 return -EFAULT; 2886 return -EFAULT;
2887 return 0; 2887 return 0;
2888 } 2888 }
2889 2889
2890 int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval, 2890 int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval,
2891 int __user *optlen) 2891 int __user *optlen)
2892 { 2892 {
2893 struct inet_connection_sock *icsk = inet_csk(sk); 2893 struct inet_connection_sock *icsk = inet_csk(sk);
2894 2894
2895 if (level != SOL_TCP) 2895 if (level != SOL_TCP)
2896 return icsk->icsk_af_ops->getsockopt(sk, level, optname, 2896 return icsk->icsk_af_ops->getsockopt(sk, level, optname,
2897 optval, optlen); 2897 optval, optlen);
2898 return do_tcp_getsockopt(sk, level, optname, optval, optlen); 2898 return do_tcp_getsockopt(sk, level, optname, optval, optlen);
2899 } 2899 }
2900 EXPORT_SYMBOL(tcp_getsockopt); 2900 EXPORT_SYMBOL(tcp_getsockopt);
2901 2901
2902 #ifdef CONFIG_COMPAT 2902 #ifdef CONFIG_COMPAT
2903 int compat_tcp_getsockopt(struct sock *sk, int level, int optname, 2903 int compat_tcp_getsockopt(struct sock *sk, int level, int optname,
2904 char __user *optval, int __user *optlen) 2904 char __user *optval, int __user *optlen)
2905 { 2905 {
2906 if (level != SOL_TCP) 2906 if (level != SOL_TCP)
2907 return inet_csk_compat_getsockopt(sk, level, optname, 2907 return inet_csk_compat_getsockopt(sk, level, optname,
2908 optval, optlen); 2908 optval, optlen);
2909 return do_tcp_getsockopt(sk, level, optname, optval, optlen); 2909 return do_tcp_getsockopt(sk, level, optname, optval, optlen);
2910 } 2910 }
2911 EXPORT_SYMBOL(compat_tcp_getsockopt); 2911 EXPORT_SYMBOL(compat_tcp_getsockopt);
2912 #endif 2912 #endif
2913 2913
2914 #ifdef CONFIG_TCP_MD5SIG 2914 #ifdef CONFIG_TCP_MD5SIG
2915 static struct tcp_md5sig_pool __percpu *tcp_md5sig_pool __read_mostly; 2915 static struct tcp_md5sig_pool __percpu *tcp_md5sig_pool __read_mostly;
2916 static DEFINE_MUTEX(tcp_md5sig_mutex); 2916 static DEFINE_MUTEX(tcp_md5sig_mutex);
2917 2917
2918 static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool __percpu *pool) 2918 static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool __percpu *pool)
2919 { 2919 {
2920 int cpu; 2920 int cpu;
2921 2921
2922 for_each_possible_cpu(cpu) { 2922 for_each_possible_cpu(cpu) {
2923 struct tcp_md5sig_pool *p = per_cpu_ptr(pool, cpu); 2923 struct tcp_md5sig_pool *p = per_cpu_ptr(pool, cpu);
2924 2924
2925 if (p->md5_desc.tfm) 2925 if (p->md5_desc.tfm)
2926 crypto_free_hash(p->md5_desc.tfm); 2926 crypto_free_hash(p->md5_desc.tfm);
2927 } 2927 }
2928 free_percpu(pool); 2928 free_percpu(pool);
2929 } 2929 }
2930 2930
2931 static void __tcp_alloc_md5sig_pool(void) 2931 static void __tcp_alloc_md5sig_pool(void)
2932 { 2932 {
2933 int cpu; 2933 int cpu;
2934 struct tcp_md5sig_pool __percpu *pool; 2934 struct tcp_md5sig_pool __percpu *pool;
2935 2935
2936 pool = alloc_percpu(struct tcp_md5sig_pool); 2936 pool = alloc_percpu(struct tcp_md5sig_pool);
2937 if (!pool) 2937 if (!pool)
2938 return; 2938 return;
2939 2939
2940 for_each_possible_cpu(cpu) { 2940 for_each_possible_cpu(cpu) {
2941 struct crypto_hash *hash; 2941 struct crypto_hash *hash;
2942 2942
2943 hash = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC); 2943 hash = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
2944 if (IS_ERR_OR_NULL(hash)) 2944 if (IS_ERR_OR_NULL(hash))
2945 goto out_free; 2945 goto out_free;
2946 2946
2947 per_cpu_ptr(pool, cpu)->md5_desc.tfm = hash; 2947 per_cpu_ptr(pool, cpu)->md5_desc.tfm = hash;
2948 } 2948 }
2949 /* before setting tcp_md5sig_pool, we must commit all writes 2949 /* before setting tcp_md5sig_pool, we must commit all writes
2950 * to memory. See ACCESS_ONCE() in tcp_get_md5sig_pool() 2950 * to memory. See ACCESS_ONCE() in tcp_get_md5sig_pool()
2951 */ 2951 */
2952 smp_wmb(); 2952 smp_wmb();
2953 tcp_md5sig_pool = pool; 2953 tcp_md5sig_pool = pool;
2954 return; 2954 return;
2955 out_free: 2955 out_free:
2956 __tcp_free_md5sig_pool(pool); 2956 __tcp_free_md5sig_pool(pool);
2957 } 2957 }
2958 2958
2959 bool tcp_alloc_md5sig_pool(void) 2959 bool tcp_alloc_md5sig_pool(void)
2960 { 2960 {
2961 if (unlikely(!tcp_md5sig_pool)) { 2961 if (unlikely(!tcp_md5sig_pool)) {
2962 mutex_lock(&tcp_md5sig_mutex); 2962 mutex_lock(&tcp_md5sig_mutex);
2963 2963
2964 if (!tcp_md5sig_pool) 2964 if (!tcp_md5sig_pool)
2965 __tcp_alloc_md5sig_pool(); 2965 __tcp_alloc_md5sig_pool();
2966 2966
2967 mutex_unlock(&tcp_md5sig_mutex); 2967 mutex_unlock(&tcp_md5sig_mutex);
2968 } 2968 }
2969 return tcp_md5sig_pool != NULL; 2969 return tcp_md5sig_pool != NULL;
2970 } 2970 }
2971 EXPORT_SYMBOL(tcp_alloc_md5sig_pool); 2971 EXPORT_SYMBOL(tcp_alloc_md5sig_pool);
2972 2972
2973 2973
2974 /** 2974 /**
2975 * tcp_get_md5sig_pool - get md5sig_pool for this user 2975 * tcp_get_md5sig_pool - get md5sig_pool for this user
2976 * 2976 *
2977 * We use percpu structure, so if we succeed, we exit with preemption 2977 * We use percpu structure, so if we succeed, we exit with preemption
2978 * and BH disabled, to make sure another thread or softirq handling 2978 * and BH disabled, to make sure another thread or softirq handling
2979 * wont try to get same context. 2979 * wont try to get same context.
2980 */ 2980 */
2981 struct tcp_md5sig_pool *tcp_get_md5sig_pool(void) 2981 struct tcp_md5sig_pool *tcp_get_md5sig_pool(void)
2982 { 2982 {
2983 struct tcp_md5sig_pool __percpu *p; 2983 struct tcp_md5sig_pool __percpu *p;
2984 2984
2985 local_bh_disable(); 2985 local_bh_disable();
2986 p = ACCESS_ONCE(tcp_md5sig_pool); 2986 p = ACCESS_ONCE(tcp_md5sig_pool);
2987 if (p) 2987 if (p)
2988 return __this_cpu_ptr(p); 2988 return __this_cpu_ptr(p);
2989 2989
2990 local_bh_enable(); 2990 local_bh_enable();
2991 return NULL; 2991 return NULL;
2992 } 2992 }
2993 EXPORT_SYMBOL(tcp_get_md5sig_pool); 2993 EXPORT_SYMBOL(tcp_get_md5sig_pool);
2994 2994
2995 int tcp_md5_hash_header(struct tcp_md5sig_pool *hp, 2995 int tcp_md5_hash_header(struct tcp_md5sig_pool *hp,
2996 const struct tcphdr *th) 2996 const struct tcphdr *th)
2997 { 2997 {
2998 struct scatterlist sg; 2998 struct scatterlist sg;
2999 struct tcphdr hdr; 2999 struct tcphdr hdr;
3000 int err; 3000 int err;
3001 3001
3002 /* We are not allowed to change tcphdr, make a local copy */ 3002 /* We are not allowed to change tcphdr, make a local copy */
3003 memcpy(&hdr, th, sizeof(hdr)); 3003 memcpy(&hdr, th, sizeof(hdr));
3004 hdr.check = 0; 3004 hdr.check = 0;
3005 3005
3006 /* options aren't included in the hash */ 3006 /* options aren't included in the hash */
3007 sg_init_one(&sg, &hdr, sizeof(hdr)); 3007 sg_init_one(&sg, &hdr, sizeof(hdr));
3008 err = crypto_hash_update(&hp->md5_desc, &sg, sizeof(hdr)); 3008 err = crypto_hash_update(&hp->md5_desc, &sg, sizeof(hdr));
3009 return err; 3009 return err;
3010 } 3010 }
3011 EXPORT_SYMBOL(tcp_md5_hash_header); 3011 EXPORT_SYMBOL(tcp_md5_hash_header);
3012 3012
3013 int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *hp, 3013 int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *hp,
3014 const struct sk_buff *skb, unsigned int header_len) 3014 const struct sk_buff *skb, unsigned int header_len)
3015 { 3015 {
3016 struct scatterlist sg; 3016 struct scatterlist sg;
3017 const struct tcphdr *tp = tcp_hdr(skb); 3017 const struct tcphdr *tp = tcp_hdr(skb);
3018 struct hash_desc *desc = &hp->md5_desc; 3018 struct hash_desc *desc = &hp->md5_desc;
3019 unsigned int i; 3019 unsigned int i;
3020 const unsigned int head_data_len = skb_headlen(skb) > header_len ? 3020 const unsigned int head_data_len = skb_headlen(skb) > header_len ?
3021 skb_headlen(skb) - header_len : 0; 3021 skb_headlen(skb) - header_len : 0;
3022 const struct skb_shared_info *shi = skb_shinfo(skb); 3022 const struct skb_shared_info *shi = skb_shinfo(skb);
3023 struct sk_buff *frag_iter; 3023 struct sk_buff *frag_iter;
3024 3024
3025 sg_init_table(&sg, 1); 3025 sg_init_table(&sg, 1);
3026 3026
3027 sg_set_buf(&sg, ((u8 *) tp) + header_len, head_data_len); 3027 sg_set_buf(&sg, ((u8 *) tp) + header_len, head_data_len);
3028 if (crypto_hash_update(desc, &sg, head_data_len)) 3028 if (crypto_hash_update(desc, &sg, head_data_len))
3029 return 1; 3029 return 1;
3030 3030
3031 for (i = 0; i < shi->nr_frags; ++i) { 3031 for (i = 0; i < shi->nr_frags; ++i) {
3032 const struct skb_frag_struct *f = &shi->frags[i]; 3032 const struct skb_frag_struct *f = &shi->frags[i];
3033 unsigned int offset = f->page_offset; 3033 unsigned int offset = f->page_offset;
3034 struct page *page = skb_frag_page(f) + (offset >> PAGE_SHIFT); 3034 struct page *page = skb_frag_page(f) + (offset >> PAGE_SHIFT);
3035 3035
3036 sg_set_page(&sg, page, skb_frag_size(f), 3036 sg_set_page(&sg, page, skb_frag_size(f),
3037 offset_in_page(offset)); 3037 offset_in_page(offset));
3038 if (crypto_hash_update(desc, &sg, skb_frag_size(f))) 3038 if (crypto_hash_update(desc, &sg, skb_frag_size(f)))
3039 return 1; 3039 return 1;
3040 } 3040 }
3041 3041
3042 skb_walk_frags(skb, frag_iter) 3042 skb_walk_frags(skb, frag_iter)
3043 if (tcp_md5_hash_skb_data(hp, frag_iter, 0)) 3043 if (tcp_md5_hash_skb_data(hp, frag_iter, 0))
3044 return 1; 3044 return 1;
3045 3045
3046 return 0; 3046 return 0;
3047 } 3047 }
3048 EXPORT_SYMBOL(tcp_md5_hash_skb_data); 3048 EXPORT_SYMBOL(tcp_md5_hash_skb_data);
3049 3049
3050 int tcp_md5_hash_key(struct tcp_md5sig_pool *hp, const struct tcp_md5sig_key *key) 3050 int tcp_md5_hash_key(struct tcp_md5sig_pool *hp, const struct tcp_md5sig_key *key)
3051 { 3051 {
3052 struct scatterlist sg; 3052 struct scatterlist sg;
3053 3053
3054 sg_init_one(&sg, key->key, key->keylen); 3054 sg_init_one(&sg, key->key, key->keylen);
3055 return crypto_hash_update(&hp->md5_desc, &sg, key->keylen); 3055 return crypto_hash_update(&hp->md5_desc, &sg, key->keylen);
3056 } 3056 }
3057 EXPORT_SYMBOL(tcp_md5_hash_key); 3057 EXPORT_SYMBOL(tcp_md5_hash_key);
3058 3058
3059 #endif 3059 #endif
3060 3060
3061 void tcp_done(struct sock *sk) 3061 void tcp_done(struct sock *sk)
3062 { 3062 {
3063 struct request_sock *req = tcp_sk(sk)->fastopen_rsk; 3063 struct request_sock *req = tcp_sk(sk)->fastopen_rsk;
3064 3064
3065 if (sk->sk_state == TCP_SYN_SENT || sk->sk_state == TCP_SYN_RECV) 3065 if (sk->sk_state == TCP_SYN_SENT || sk->sk_state == TCP_SYN_RECV)
3066 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_ATTEMPTFAILS); 3066 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_ATTEMPTFAILS);
3067 3067
3068 tcp_set_state(sk, TCP_CLOSE); 3068 tcp_set_state(sk, TCP_CLOSE);
3069 tcp_clear_xmit_timers(sk); 3069 tcp_clear_xmit_timers(sk);
3070 if (req != NULL) 3070 if (req != NULL)
3071 reqsk_fastopen_remove(sk, req, false); 3071 reqsk_fastopen_remove(sk, req, false);
3072 3072
3073 sk->sk_shutdown = SHUTDOWN_MASK; 3073 sk->sk_shutdown = SHUTDOWN_MASK;
3074 3074
3075 if (!sock_flag(sk, SOCK_DEAD)) 3075 if (!sock_flag(sk, SOCK_DEAD))
3076 sk->sk_state_change(sk); 3076 sk->sk_state_change(sk);
3077 else 3077 else
3078 inet_csk_destroy_sock(sk); 3078 inet_csk_destroy_sock(sk);
3079 } 3079 }
3080 EXPORT_SYMBOL_GPL(tcp_done); 3080 EXPORT_SYMBOL_GPL(tcp_done);
3081 3081
3082 extern struct tcp_congestion_ops tcp_reno; 3082 extern struct tcp_congestion_ops tcp_reno;
3083 3083
3084 static __initdata unsigned long thash_entries; 3084 static __initdata unsigned long thash_entries;
3085 static int __init set_thash_entries(char *str) 3085 static int __init set_thash_entries(char *str)
3086 { 3086 {
3087 ssize_t ret; 3087 ssize_t ret;
3088 3088
3089 if (!str) 3089 if (!str)
3090 return 0; 3090 return 0;
3091 3091
3092 ret = kstrtoul(str, 0, &thash_entries); 3092 ret = kstrtoul(str, 0, &thash_entries);
3093 if (ret) 3093 if (ret)
3094 return 0; 3094 return 0;
3095 3095
3096 return 1; 3096 return 1;
3097 } 3097 }
3098 __setup("thash_entries=", set_thash_entries); 3098 __setup("thash_entries=", set_thash_entries);
3099 3099
3100 void tcp_init_mem(struct net *net) 3100 void tcp_init_mem(struct net *net)
3101 { 3101 {
3102 unsigned long limit = nr_free_buffer_pages() / 8; 3102 unsigned long limit = nr_free_buffer_pages() / 8;
3103 limit = max(limit, 128UL); 3103 limit = max(limit, 128UL);
3104 net->ipv4.sysctl_tcp_mem[0] = limit / 4 * 3; 3104 net->ipv4.sysctl_tcp_mem[0] = limit / 4 * 3;
3105 net->ipv4.sysctl_tcp_mem[1] = limit; 3105 net->ipv4.sysctl_tcp_mem[1] = limit;
3106 net->ipv4.sysctl_tcp_mem[2] = net->ipv4.sysctl_tcp_mem[0] * 2; 3106 net->ipv4.sysctl_tcp_mem[2] = net->ipv4.sysctl_tcp_mem[0] * 2;
3107 } 3107 }
3108 3108
3109 void __init tcp_init(void) 3109 void __init tcp_init(void)
3110 { 3110 {
3111 struct sk_buff *skb = NULL; 3111 struct sk_buff *skb = NULL;
3112 unsigned long limit; 3112 unsigned long limit;
3113 int max_rshare, max_wshare, cnt; 3113 int max_rshare, max_wshare, cnt;
3114 unsigned int i; 3114 unsigned int i;
3115 3115
3116 BUILD_BUG_ON(sizeof(struct tcp_skb_cb) > sizeof(skb->cb)); 3116 BUILD_BUG_ON(sizeof(struct tcp_skb_cb) > sizeof(skb->cb));
3117 3117
3118 percpu_counter_init(&tcp_sockets_allocated, 0); 3118 percpu_counter_init(&tcp_sockets_allocated, 0);
3119 percpu_counter_init(&tcp_orphan_count, 0); 3119 percpu_counter_init(&tcp_orphan_count, 0);
3120 tcp_hashinfo.bind_bucket_cachep = 3120 tcp_hashinfo.bind_bucket_cachep =
3121 kmem_cache_create("tcp_bind_bucket", 3121 kmem_cache_create("tcp_bind_bucket",
3122 sizeof(struct inet_bind_bucket), 0, 3122 sizeof(struct inet_bind_bucket), 0,
3123 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); 3123 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
3124 3124
3125 /* Size and allocate the main established and bind bucket 3125 /* Size and allocate the main established and bind bucket
3126 * hash tables. 3126 * hash tables.
3127 * 3127 *
3128 * The methodology is similar to that of the buffer cache. 3128 * The methodology is similar to that of the buffer cache.
3129 */ 3129 */
3130 tcp_hashinfo.ehash = 3130 tcp_hashinfo.ehash =
3131 alloc_large_system_hash("TCP established", 3131 alloc_large_system_hash("TCP established",
3132 sizeof(struct inet_ehash_bucket), 3132 sizeof(struct inet_ehash_bucket),
3133 thash_entries, 3133 thash_entries,
3134 17, /* one slot per 128 KB of memory */ 3134 17, /* one slot per 128 KB of memory */
3135 0, 3135 0,
3136 NULL, 3136 NULL,
3137 &tcp_hashinfo.ehash_mask, 3137 &tcp_hashinfo.ehash_mask,
3138 0, 3138 0,
3139 thash_entries ? 0 : 512 * 1024); 3139 thash_entries ? 0 : 512 * 1024);
3140 for (i = 0; i <= tcp_hashinfo.ehash_mask; i++) { 3140 for (i = 0; i <= tcp_hashinfo.ehash_mask; i++) {
3141 INIT_HLIST_NULLS_HEAD(&tcp_hashinfo.ehash[i].chain, i); 3141 INIT_HLIST_NULLS_HEAD(&tcp_hashinfo.ehash[i].chain, i);
3142 INIT_HLIST_NULLS_HEAD(&tcp_hashinfo.ehash[i].twchain, i); 3142 INIT_HLIST_NULLS_HEAD(&tcp_hashinfo.ehash[i].twchain, i);
3143 } 3143 }
3144 if (inet_ehash_locks_alloc(&tcp_hashinfo)) 3144 if (inet_ehash_locks_alloc(&tcp_hashinfo))
3145 panic("TCP: failed to alloc ehash_locks"); 3145 panic("TCP: failed to alloc ehash_locks");
3146 tcp_hashinfo.bhash = 3146 tcp_hashinfo.bhash =
3147 alloc_large_system_hash("TCP bind", 3147 alloc_large_system_hash("TCP bind",
3148 sizeof(struct inet_bind_hashbucket), 3148 sizeof(struct inet_bind_hashbucket),
3149 tcp_hashinfo.ehash_mask + 1, 3149 tcp_hashinfo.ehash_mask + 1,
3150 17, /* one slot per 128 KB of memory */ 3150 17, /* one slot per 128 KB of memory */
3151 0, 3151 0,
3152 &tcp_hashinfo.bhash_size, 3152 &tcp_hashinfo.bhash_size,
3153 NULL, 3153 NULL,
3154 0, 3154 0,
3155 64 * 1024); 3155 64 * 1024);
3156 tcp_hashinfo.bhash_size = 1U << tcp_hashinfo.bhash_size; 3156 tcp_hashinfo.bhash_size = 1U << tcp_hashinfo.bhash_size;
3157 for (i = 0; i < tcp_hashinfo.bhash_size; i++) { 3157 for (i = 0; i < tcp_hashinfo.bhash_size; i++) {
3158 spin_lock_init(&tcp_hashinfo.bhash[i].lock); 3158 spin_lock_init(&tcp_hashinfo.bhash[i].lock);
3159 INIT_HLIST_HEAD(&tcp_hashinfo.bhash[i].chain); 3159 INIT_HLIST_HEAD(&tcp_hashinfo.bhash[i].chain);
3160 } 3160 }
3161 3161
3162 3162
3163 cnt = tcp_hashinfo.ehash_mask + 1; 3163 cnt = tcp_hashinfo.ehash_mask + 1;
3164 3164
3165 tcp_death_row.sysctl_max_tw_buckets = cnt / 2; 3165 tcp_death_row.sysctl_max_tw_buckets = cnt / 2;
3166 sysctl_tcp_max_orphans = cnt / 2; 3166 sysctl_tcp_max_orphans = cnt / 2;
3167 sysctl_max_syn_backlog = max(128, cnt / 256); 3167 sysctl_max_syn_backlog = max(128, cnt / 256);
3168 3168
3169 tcp_init_mem(&init_net); 3169 tcp_init_mem(&init_net);
3170 /* Set per-socket limits to no more than 1/128 the pressure threshold */ 3170 /* Set per-socket limits to no more than 1/128 the pressure threshold */
3171 limit = nr_free_buffer_pages() << (PAGE_SHIFT - 7); 3171 limit = nr_free_buffer_pages() << (PAGE_SHIFT - 7);
3172 max_wshare = min(4UL*1024*1024, limit); 3172 max_wshare = min(4UL*1024*1024, limit);
3173 max_rshare = min(6UL*1024*1024, limit); 3173 max_rshare = min(6UL*1024*1024, limit);
3174 3174
3175 sysctl_tcp_wmem[0] = SK_MEM_QUANTUM; 3175 sysctl_tcp_wmem[0] = SK_MEM_QUANTUM;
3176 sysctl_tcp_wmem[1] = 16*1024; 3176 sysctl_tcp_wmem[1] = 16*1024;
3177 sysctl_tcp_wmem[2] = max(64*1024, max_wshare); 3177 sysctl_tcp_wmem[2] = max(64*1024, max_wshare);
3178 3178
3179 sysctl_tcp_rmem[0] = SK_MEM_QUANTUM; 3179 sysctl_tcp_rmem[0] = SK_MEM_QUANTUM;
3180 sysctl_tcp_rmem[1] = 87380; 3180 sysctl_tcp_rmem[1] = 87380;
3181 sysctl_tcp_rmem[2] = max(87380, max_rshare); 3181 sysctl_tcp_rmem[2] = max(87380, max_rshare);
3182 3182
3183 pr_info("Hash tables configured (established %u bind %u)\n", 3183 pr_info("Hash tables configured (established %u bind %u)\n",
3184 tcp_hashinfo.ehash_mask + 1, tcp_hashinfo.bhash_size); 3184 tcp_hashinfo.ehash_mask + 1, tcp_hashinfo.bhash_size);
3185 3185
3186 tcp_metrics_init(); 3186 tcp_metrics_init();
3187 3187
3188 tcp_register_congestion_control(&tcp_reno); 3188 tcp_register_congestion_control(&tcp_reno);
3189 3189
3190 tcp_tasklet_init(); 3190 tcp_tasklet_init();
3191 } 3191 }
3192 3192