Commit 729b5d1b8ec72c28e99840b3f300ba67726e3ab9
1 parent
06164f3194
Exists in
master
and in
7 other branches
dmaengine: allow dma support for async_tx to be toggled
Provide a config option for blocking the allocation of dma channels to the async_tx api. Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Showing 3 changed files with 32 additions and 3 deletions Inline Diff
crypto/async_tx/async_tx.c
1 | /* | 1 | /* |
2 | * core routines for the asynchronous memory transfer/transform api | 2 | * core routines for the asynchronous memory transfer/transform api |
3 | * | 3 | * |
4 | * Copyright © 2006, Intel Corporation. | 4 | * Copyright © 2006, Intel Corporation. |
5 | * | 5 | * |
6 | * Dan Williams <dan.j.williams@intel.com> | 6 | * Dan Williams <dan.j.williams@intel.com> |
7 | * | 7 | * |
8 | * with architecture considerations by: | 8 | * with architecture considerations by: |
9 | * Neil Brown <neilb@suse.de> | 9 | * Neil Brown <neilb@suse.de> |
10 | * Jeff Garzik <jeff@garzik.org> | 10 | * Jeff Garzik <jeff@garzik.org> |
11 | * | 11 | * |
12 | * This program is free software; you can redistribute it and/or modify it | 12 | * This program is free software; you can redistribute it and/or modify it |
13 | * under the terms and conditions of the GNU General Public License, | 13 | * under the terms and conditions of the GNU General Public License, |
14 | * version 2, as published by the Free Software Foundation. | 14 | * version 2, as published by the Free Software Foundation. |
15 | * | 15 | * |
16 | * This program is distributed in the hope it will be useful, but WITHOUT | 16 | * This program is distributed in the hope it will be useful, but WITHOUT |
17 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | 17 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
18 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | 18 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
19 | * more details. | 19 | * more details. |
20 | * | 20 | * |
21 | * You should have received a copy of the GNU General Public License along with | 21 | * You should have received a copy of the GNU General Public License along with |
22 | * this program; if not, write to the Free Software Foundation, Inc., | 22 | * this program; if not, write to the Free Software Foundation, Inc., |
23 | * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | 23 | * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. |
24 | * | 24 | * |
25 | */ | 25 | */ |
26 | #include <linux/rculist.h> | 26 | #include <linux/rculist.h> |
27 | #include <linux/kernel.h> | 27 | #include <linux/kernel.h> |
28 | #include <linux/async_tx.h> | 28 | #include <linux/async_tx.h> |
29 | 29 | ||
30 | #ifdef CONFIG_DMA_ENGINE | 30 | #ifdef CONFIG_DMA_ENGINE |
31 | static int __init async_tx_init(void) | 31 | static int __init async_tx_init(void) |
32 | { | 32 | { |
33 | dmaengine_get(); | 33 | async_dmaengine_get(); |
34 | 34 | ||
35 | printk(KERN_INFO "async_tx: api initialized (async)\n"); | 35 | printk(KERN_INFO "async_tx: api initialized (async)\n"); |
36 | 36 | ||
37 | return 0; | 37 | return 0; |
38 | } | 38 | } |
39 | 39 | ||
40 | static void __exit async_tx_exit(void) | 40 | static void __exit async_tx_exit(void) |
41 | { | 41 | { |
42 | dmaengine_put(); | 42 | async_dmaengine_put(); |
43 | } | 43 | } |
44 | 44 | ||
45 | /** | 45 | /** |
46 | * __async_tx_find_channel - find a channel to carry out the operation or let | 46 | * __async_tx_find_channel - find a channel to carry out the operation or let |
47 | * the transaction execute synchronously | 47 | * the transaction execute synchronously |
48 | * @depend_tx: transaction dependency | 48 | * @depend_tx: transaction dependency |
49 | * @tx_type: transaction type | 49 | * @tx_type: transaction type |
50 | */ | 50 | */ |
51 | struct dma_chan * | 51 | struct dma_chan * |
52 | __async_tx_find_channel(struct dma_async_tx_descriptor *depend_tx, | 52 | __async_tx_find_channel(struct dma_async_tx_descriptor *depend_tx, |
53 | enum dma_transaction_type tx_type) | 53 | enum dma_transaction_type tx_type) |
54 | { | 54 | { |
55 | /* see if we can keep the chain on one channel */ | 55 | /* see if we can keep the chain on one channel */ |
56 | if (depend_tx && | 56 | if (depend_tx && |
57 | dma_has_cap(tx_type, depend_tx->chan->device->cap_mask)) | 57 | dma_has_cap(tx_type, depend_tx->chan->device->cap_mask)) |
58 | return depend_tx->chan; | 58 | return depend_tx->chan; |
59 | return dma_find_channel(tx_type); | 59 | return async_dma_find_channel(tx_type); |
60 | } | 60 | } |
61 | EXPORT_SYMBOL_GPL(__async_tx_find_channel); | 61 | EXPORT_SYMBOL_GPL(__async_tx_find_channel); |
62 | #else | 62 | #else |
63 | static int __init async_tx_init(void) | 63 | static int __init async_tx_init(void) |
64 | { | 64 | { |
65 | printk(KERN_INFO "async_tx: api initialized (sync-only)\n"); | 65 | printk(KERN_INFO "async_tx: api initialized (sync-only)\n"); |
66 | return 0; | 66 | return 0; |
67 | } | 67 | } |
68 | 68 | ||
69 | static void __exit async_tx_exit(void) | 69 | static void __exit async_tx_exit(void) |
70 | { | 70 | { |
71 | do { } while (0); | 71 | do { } while (0); |
72 | } | 72 | } |
73 | #endif | 73 | #endif |
74 | 74 | ||
75 | 75 | ||
76 | /** | 76 | /** |
77 | * async_tx_channel_switch - queue an interrupt descriptor with a dependency | 77 | * async_tx_channel_switch - queue an interrupt descriptor with a dependency |
78 | * pre-attached. | 78 | * pre-attached. |
79 | * @depend_tx: the operation that must finish before the new operation runs | 79 | * @depend_tx: the operation that must finish before the new operation runs |
80 | * @tx: the new operation | 80 | * @tx: the new operation |
81 | */ | 81 | */ |
82 | static void | 82 | static void |
83 | async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx, | 83 | async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx, |
84 | struct dma_async_tx_descriptor *tx) | 84 | struct dma_async_tx_descriptor *tx) |
85 | { | 85 | { |
86 | struct dma_chan *chan; | 86 | struct dma_chan *chan; |
87 | struct dma_device *device; | 87 | struct dma_device *device; |
88 | struct dma_async_tx_descriptor *intr_tx = (void *) ~0; | 88 | struct dma_async_tx_descriptor *intr_tx = (void *) ~0; |
89 | 89 | ||
90 | /* first check to see if we can still append to depend_tx */ | 90 | /* first check to see if we can still append to depend_tx */ |
91 | spin_lock_bh(&depend_tx->lock); | 91 | spin_lock_bh(&depend_tx->lock); |
92 | if (depend_tx->parent && depend_tx->chan == tx->chan) { | 92 | if (depend_tx->parent && depend_tx->chan == tx->chan) { |
93 | tx->parent = depend_tx; | 93 | tx->parent = depend_tx; |
94 | depend_tx->next = tx; | 94 | depend_tx->next = tx; |
95 | intr_tx = NULL; | 95 | intr_tx = NULL; |
96 | } | 96 | } |
97 | spin_unlock_bh(&depend_tx->lock); | 97 | spin_unlock_bh(&depend_tx->lock); |
98 | 98 | ||
99 | if (!intr_tx) | 99 | if (!intr_tx) |
100 | return; | 100 | return; |
101 | 101 | ||
102 | chan = depend_tx->chan; | 102 | chan = depend_tx->chan; |
103 | device = chan->device; | 103 | device = chan->device; |
104 | 104 | ||
105 | /* see if we can schedule an interrupt | 105 | /* see if we can schedule an interrupt |
106 | * otherwise poll for completion | 106 | * otherwise poll for completion |
107 | */ | 107 | */ |
108 | if (dma_has_cap(DMA_INTERRUPT, device->cap_mask)) | 108 | if (dma_has_cap(DMA_INTERRUPT, device->cap_mask)) |
109 | intr_tx = device->device_prep_dma_interrupt(chan, 0); | 109 | intr_tx = device->device_prep_dma_interrupt(chan, 0); |
110 | else | 110 | else |
111 | intr_tx = NULL; | 111 | intr_tx = NULL; |
112 | 112 | ||
113 | if (intr_tx) { | 113 | if (intr_tx) { |
114 | intr_tx->callback = NULL; | 114 | intr_tx->callback = NULL; |
115 | intr_tx->callback_param = NULL; | 115 | intr_tx->callback_param = NULL; |
116 | tx->parent = intr_tx; | 116 | tx->parent = intr_tx; |
117 | /* safe to set ->next outside the lock since we know we are | 117 | /* safe to set ->next outside the lock since we know we are |
118 | * not submitted yet | 118 | * not submitted yet |
119 | */ | 119 | */ |
120 | intr_tx->next = tx; | 120 | intr_tx->next = tx; |
121 | 121 | ||
122 | /* check if we need to append */ | 122 | /* check if we need to append */ |
123 | spin_lock_bh(&depend_tx->lock); | 123 | spin_lock_bh(&depend_tx->lock); |
124 | if (depend_tx->parent) { | 124 | if (depend_tx->parent) { |
125 | intr_tx->parent = depend_tx; | 125 | intr_tx->parent = depend_tx; |
126 | depend_tx->next = intr_tx; | 126 | depend_tx->next = intr_tx; |
127 | async_tx_ack(intr_tx); | 127 | async_tx_ack(intr_tx); |
128 | intr_tx = NULL; | 128 | intr_tx = NULL; |
129 | } | 129 | } |
130 | spin_unlock_bh(&depend_tx->lock); | 130 | spin_unlock_bh(&depend_tx->lock); |
131 | 131 | ||
132 | if (intr_tx) { | 132 | if (intr_tx) { |
133 | intr_tx->parent = NULL; | 133 | intr_tx->parent = NULL; |
134 | intr_tx->tx_submit(intr_tx); | 134 | intr_tx->tx_submit(intr_tx); |
135 | async_tx_ack(intr_tx); | 135 | async_tx_ack(intr_tx); |
136 | } | 136 | } |
137 | } else { | 137 | } else { |
138 | if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR) | 138 | if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR) |
139 | panic("%s: DMA_ERROR waiting for depend_tx\n", | 139 | panic("%s: DMA_ERROR waiting for depend_tx\n", |
140 | __func__); | 140 | __func__); |
141 | tx->tx_submit(tx); | 141 | tx->tx_submit(tx); |
142 | } | 142 | } |
143 | } | 143 | } |
144 | 144 | ||
145 | 145 | ||
146 | /** | 146 | /** |
147 | * submit_disposition - while holding depend_tx->lock we must avoid submitting | 147 | * submit_disposition - while holding depend_tx->lock we must avoid submitting |
148 | * new operations to prevent a circular locking dependency with | 148 | * new operations to prevent a circular locking dependency with |
149 | * drivers that already hold a channel lock when calling | 149 | * drivers that already hold a channel lock when calling |
150 | * async_tx_run_dependencies. | 150 | * async_tx_run_dependencies. |
151 | * @ASYNC_TX_SUBMITTED: we were able to append the new operation under the lock | 151 | * @ASYNC_TX_SUBMITTED: we were able to append the new operation under the lock |
152 | * @ASYNC_TX_CHANNEL_SWITCH: when the lock is dropped schedule a channel switch | 152 | * @ASYNC_TX_CHANNEL_SWITCH: when the lock is dropped schedule a channel switch |
153 | * @ASYNC_TX_DIRECT_SUBMIT: when the lock is dropped submit directly | 153 | * @ASYNC_TX_DIRECT_SUBMIT: when the lock is dropped submit directly |
154 | */ | 154 | */ |
155 | enum submit_disposition { | 155 | enum submit_disposition { |
156 | ASYNC_TX_SUBMITTED, | 156 | ASYNC_TX_SUBMITTED, |
157 | ASYNC_TX_CHANNEL_SWITCH, | 157 | ASYNC_TX_CHANNEL_SWITCH, |
158 | ASYNC_TX_DIRECT_SUBMIT, | 158 | ASYNC_TX_DIRECT_SUBMIT, |
159 | }; | 159 | }; |
160 | 160 | ||
161 | void | 161 | void |
162 | async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx, | 162 | async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx, |
163 | enum async_tx_flags flags, struct dma_async_tx_descriptor *depend_tx, | 163 | enum async_tx_flags flags, struct dma_async_tx_descriptor *depend_tx, |
164 | dma_async_tx_callback cb_fn, void *cb_param) | 164 | dma_async_tx_callback cb_fn, void *cb_param) |
165 | { | 165 | { |
166 | tx->callback = cb_fn; | 166 | tx->callback = cb_fn; |
167 | tx->callback_param = cb_param; | 167 | tx->callback_param = cb_param; |
168 | 168 | ||
169 | if (depend_tx) { | 169 | if (depend_tx) { |
170 | enum submit_disposition s; | 170 | enum submit_disposition s; |
171 | 171 | ||
172 | /* sanity check the dependency chain: | 172 | /* sanity check the dependency chain: |
173 | * 1/ if ack is already set then we cannot be sure | 173 | * 1/ if ack is already set then we cannot be sure |
174 | * we are referring to the correct operation | 174 | * we are referring to the correct operation |
175 | * 2/ dependencies are 1:1 i.e. two transactions can | 175 | * 2/ dependencies are 1:1 i.e. two transactions can |
176 | * not depend on the same parent | 176 | * not depend on the same parent |
177 | */ | 177 | */ |
178 | BUG_ON(async_tx_test_ack(depend_tx) || depend_tx->next || | 178 | BUG_ON(async_tx_test_ack(depend_tx) || depend_tx->next || |
179 | tx->parent); | 179 | tx->parent); |
180 | 180 | ||
181 | /* the lock prevents async_tx_run_dependencies from missing | 181 | /* the lock prevents async_tx_run_dependencies from missing |
182 | * the setting of ->next when ->parent != NULL | 182 | * the setting of ->next when ->parent != NULL |
183 | */ | 183 | */ |
184 | spin_lock_bh(&depend_tx->lock); | 184 | spin_lock_bh(&depend_tx->lock); |
185 | if (depend_tx->parent) { | 185 | if (depend_tx->parent) { |
186 | /* we have a parent so we can not submit directly | 186 | /* we have a parent so we can not submit directly |
187 | * if we are staying on the same channel: append | 187 | * if we are staying on the same channel: append |
188 | * else: channel switch | 188 | * else: channel switch |
189 | */ | 189 | */ |
190 | if (depend_tx->chan == chan) { | 190 | if (depend_tx->chan == chan) { |
191 | tx->parent = depend_tx; | 191 | tx->parent = depend_tx; |
192 | depend_tx->next = tx; | 192 | depend_tx->next = tx; |
193 | s = ASYNC_TX_SUBMITTED; | 193 | s = ASYNC_TX_SUBMITTED; |
194 | } else | 194 | } else |
195 | s = ASYNC_TX_CHANNEL_SWITCH; | 195 | s = ASYNC_TX_CHANNEL_SWITCH; |
196 | } else { | 196 | } else { |
197 | /* we do not have a parent so we may be able to submit | 197 | /* we do not have a parent so we may be able to submit |
198 | * directly if we are staying on the same channel | 198 | * directly if we are staying on the same channel |
199 | */ | 199 | */ |
200 | if (depend_tx->chan == chan) | 200 | if (depend_tx->chan == chan) |
201 | s = ASYNC_TX_DIRECT_SUBMIT; | 201 | s = ASYNC_TX_DIRECT_SUBMIT; |
202 | else | 202 | else |
203 | s = ASYNC_TX_CHANNEL_SWITCH; | 203 | s = ASYNC_TX_CHANNEL_SWITCH; |
204 | } | 204 | } |
205 | spin_unlock_bh(&depend_tx->lock); | 205 | spin_unlock_bh(&depend_tx->lock); |
206 | 206 | ||
207 | switch (s) { | 207 | switch (s) { |
208 | case ASYNC_TX_SUBMITTED: | 208 | case ASYNC_TX_SUBMITTED: |
209 | break; | 209 | break; |
210 | case ASYNC_TX_CHANNEL_SWITCH: | 210 | case ASYNC_TX_CHANNEL_SWITCH: |
211 | async_tx_channel_switch(depend_tx, tx); | 211 | async_tx_channel_switch(depend_tx, tx); |
212 | break; | 212 | break; |
213 | case ASYNC_TX_DIRECT_SUBMIT: | 213 | case ASYNC_TX_DIRECT_SUBMIT: |
214 | tx->parent = NULL; | 214 | tx->parent = NULL; |
215 | tx->tx_submit(tx); | 215 | tx->tx_submit(tx); |
216 | break; | 216 | break; |
217 | } | 217 | } |
218 | } else { | 218 | } else { |
219 | tx->parent = NULL; | 219 | tx->parent = NULL; |
220 | tx->tx_submit(tx); | 220 | tx->tx_submit(tx); |
221 | } | 221 | } |
222 | 222 | ||
223 | if (flags & ASYNC_TX_ACK) | 223 | if (flags & ASYNC_TX_ACK) |
224 | async_tx_ack(tx); | 224 | async_tx_ack(tx); |
225 | 225 | ||
226 | if (depend_tx && (flags & ASYNC_TX_DEP_ACK)) | 226 | if (depend_tx && (flags & ASYNC_TX_DEP_ACK)) |
227 | async_tx_ack(depend_tx); | 227 | async_tx_ack(depend_tx); |
228 | } | 228 | } |
229 | EXPORT_SYMBOL_GPL(async_tx_submit); | 229 | EXPORT_SYMBOL_GPL(async_tx_submit); |
230 | 230 | ||
231 | /** | 231 | /** |
232 | * async_trigger_callback - schedules the callback function to be run after | 232 | * async_trigger_callback - schedules the callback function to be run after |
233 | * any dependent operations have been completed. | 233 | * any dependent operations have been completed. |
234 | * @flags: ASYNC_TX_ACK, ASYNC_TX_DEP_ACK | 234 | * @flags: ASYNC_TX_ACK, ASYNC_TX_DEP_ACK |
235 | * @depend_tx: 'callback' requires the completion of this transaction | 235 | * @depend_tx: 'callback' requires the completion of this transaction |
236 | * @cb_fn: function to call after depend_tx completes | 236 | * @cb_fn: function to call after depend_tx completes |
237 | * @cb_param: parameter to pass to the callback routine | 237 | * @cb_param: parameter to pass to the callback routine |
238 | */ | 238 | */ |
239 | struct dma_async_tx_descriptor * | 239 | struct dma_async_tx_descriptor * |
240 | async_trigger_callback(enum async_tx_flags flags, | 240 | async_trigger_callback(enum async_tx_flags flags, |
241 | struct dma_async_tx_descriptor *depend_tx, | 241 | struct dma_async_tx_descriptor *depend_tx, |
242 | dma_async_tx_callback cb_fn, void *cb_param) | 242 | dma_async_tx_callback cb_fn, void *cb_param) |
243 | { | 243 | { |
244 | struct dma_chan *chan; | 244 | struct dma_chan *chan; |
245 | struct dma_device *device; | 245 | struct dma_device *device; |
246 | struct dma_async_tx_descriptor *tx; | 246 | struct dma_async_tx_descriptor *tx; |
247 | 247 | ||
248 | if (depend_tx) { | 248 | if (depend_tx) { |
249 | chan = depend_tx->chan; | 249 | chan = depend_tx->chan; |
250 | device = chan->device; | 250 | device = chan->device; |
251 | 251 | ||
252 | /* see if we can schedule an interrupt | 252 | /* see if we can schedule an interrupt |
253 | * otherwise poll for completion | 253 | * otherwise poll for completion |
254 | */ | 254 | */ |
255 | if (device && !dma_has_cap(DMA_INTERRUPT, device->cap_mask)) | 255 | if (device && !dma_has_cap(DMA_INTERRUPT, device->cap_mask)) |
256 | device = NULL; | 256 | device = NULL; |
257 | 257 | ||
258 | tx = device ? device->device_prep_dma_interrupt(chan, 0) : NULL; | 258 | tx = device ? device->device_prep_dma_interrupt(chan, 0) : NULL; |
259 | } else | 259 | } else |
260 | tx = NULL; | 260 | tx = NULL; |
261 | 261 | ||
262 | if (tx) { | 262 | if (tx) { |
263 | pr_debug("%s: (async)\n", __func__); | 263 | pr_debug("%s: (async)\n", __func__); |
264 | 264 | ||
265 | async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param); | 265 | async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param); |
266 | } else { | 266 | } else { |
267 | pr_debug("%s: (sync)\n", __func__); | 267 | pr_debug("%s: (sync)\n", __func__); |
268 | 268 | ||
269 | /* wait for any prerequisite operations */ | 269 | /* wait for any prerequisite operations */ |
270 | async_tx_quiesce(&depend_tx); | 270 | async_tx_quiesce(&depend_tx); |
271 | 271 | ||
272 | async_tx_sync_epilog(cb_fn, cb_param); | 272 | async_tx_sync_epilog(cb_fn, cb_param); |
273 | } | 273 | } |
274 | 274 | ||
275 | return tx; | 275 | return tx; |
276 | } | 276 | } |
277 | EXPORT_SYMBOL_GPL(async_trigger_callback); | 277 | EXPORT_SYMBOL_GPL(async_trigger_callback); |
278 | 278 | ||
279 | /** | 279 | /** |
280 | * async_tx_quiesce - ensure tx is complete and freeable upon return | 280 | * async_tx_quiesce - ensure tx is complete and freeable upon return |
281 | * @tx - transaction to quiesce | 281 | * @tx - transaction to quiesce |
282 | */ | 282 | */ |
283 | void async_tx_quiesce(struct dma_async_tx_descriptor **tx) | 283 | void async_tx_quiesce(struct dma_async_tx_descriptor **tx) |
284 | { | 284 | { |
285 | if (*tx) { | 285 | if (*tx) { |
286 | /* if ack is already set then we cannot be sure | 286 | /* if ack is already set then we cannot be sure |
287 | * we are referring to the correct operation | 287 | * we are referring to the correct operation |
288 | */ | 288 | */ |
289 | BUG_ON(async_tx_test_ack(*tx)); | 289 | BUG_ON(async_tx_test_ack(*tx)); |
290 | if (dma_wait_for_async_tx(*tx) == DMA_ERROR) | 290 | if (dma_wait_for_async_tx(*tx) == DMA_ERROR) |
291 | panic("DMA_ERROR waiting for transaction\n"); | 291 | panic("DMA_ERROR waiting for transaction\n"); |
292 | async_tx_ack(*tx); | 292 | async_tx_ack(*tx); |
293 | *tx = NULL; | 293 | *tx = NULL; |
294 | } | 294 | } |
295 | } | 295 | } |
296 | EXPORT_SYMBOL_GPL(async_tx_quiesce); | 296 | EXPORT_SYMBOL_GPL(async_tx_quiesce); |
297 | 297 | ||
298 | module_init(async_tx_init); | 298 | module_init(async_tx_init); |
299 | module_exit(async_tx_exit); | 299 | module_exit(async_tx_exit); |
300 | 300 | ||
301 | MODULE_AUTHOR("Intel Corporation"); | 301 | MODULE_AUTHOR("Intel Corporation"); |
302 | MODULE_DESCRIPTION("Asynchronous Bulk Memory Transactions API"); | 302 | MODULE_DESCRIPTION("Asynchronous Bulk Memory Transactions API"); |
303 | MODULE_LICENSE("GPL"); | 303 | MODULE_LICENSE("GPL"); |
304 | 304 |
drivers/dma/Kconfig
1 | # | 1 | # |
2 | # DMA engine configuration | 2 | # DMA engine configuration |
3 | # | 3 | # |
4 | 4 | ||
5 | menuconfig DMADEVICES | 5 | menuconfig DMADEVICES |
6 | bool "DMA Engine support" | 6 | bool "DMA Engine support" |
7 | depends on !HIGHMEM64G && HAS_DMA | 7 | depends on !HIGHMEM64G && HAS_DMA |
8 | help | 8 | help |
9 | DMA engines can do asynchronous data transfers without | 9 | DMA engines can do asynchronous data transfers without |
10 | involving the host CPU. Currently, this framework can be | 10 | involving the host CPU. Currently, this framework can be |
11 | used to offload memory copies in the network stack and | 11 | used to offload memory copies in the network stack and |
12 | RAID operations in the MD driver. This menu only presents | 12 | RAID operations in the MD driver. This menu only presents |
13 | DMA Device drivers supported by the configured arch, it may | 13 | DMA Device drivers supported by the configured arch, it may |
14 | be empty in some cases. | 14 | be empty in some cases. |
15 | 15 | ||
16 | if DMADEVICES | 16 | if DMADEVICES |
17 | 17 | ||
18 | comment "DMA Devices" | 18 | comment "DMA Devices" |
19 | 19 | ||
20 | config INTEL_IOATDMA | 20 | config INTEL_IOATDMA |
21 | tristate "Intel I/OAT DMA support" | 21 | tristate "Intel I/OAT DMA support" |
22 | depends on PCI && X86 | 22 | depends on PCI && X86 |
23 | select DMA_ENGINE | 23 | select DMA_ENGINE |
24 | select DCA | 24 | select DCA |
25 | help | 25 | help |
26 | Enable support for the Intel(R) I/OAT DMA engine present | 26 | Enable support for the Intel(R) I/OAT DMA engine present |
27 | in recent Intel Xeon chipsets. | 27 | in recent Intel Xeon chipsets. |
28 | 28 | ||
29 | Say Y here if you have such a chipset. | 29 | Say Y here if you have such a chipset. |
30 | 30 | ||
31 | If unsure, say N. | 31 | If unsure, say N. |
32 | 32 | ||
33 | config INTEL_IOP_ADMA | 33 | config INTEL_IOP_ADMA |
34 | tristate "Intel IOP ADMA support" | 34 | tristate "Intel IOP ADMA support" |
35 | depends on ARCH_IOP32X || ARCH_IOP33X || ARCH_IOP13XX | 35 | depends on ARCH_IOP32X || ARCH_IOP33X || ARCH_IOP13XX |
36 | select DMA_ENGINE | 36 | select DMA_ENGINE |
37 | help | 37 | help |
38 | Enable support for the Intel(R) IOP Series RAID engines. | 38 | Enable support for the Intel(R) IOP Series RAID engines. |
39 | 39 | ||
40 | config DW_DMAC | 40 | config DW_DMAC |
41 | tristate "Synopsys DesignWare AHB DMA support" | 41 | tristate "Synopsys DesignWare AHB DMA support" |
42 | depends on AVR32 | 42 | depends on AVR32 |
43 | select DMA_ENGINE | 43 | select DMA_ENGINE |
44 | default y if CPU_AT32AP7000 | 44 | default y if CPU_AT32AP7000 |
45 | help | 45 | help |
46 | Support the Synopsys DesignWare AHB DMA controller. This | 46 | Support the Synopsys DesignWare AHB DMA controller. This |
47 | can be integrated in chips such as the Atmel AT32ap7000. | 47 | can be integrated in chips such as the Atmel AT32ap7000. |
48 | 48 | ||
49 | config FSL_DMA | 49 | config FSL_DMA |
50 | tristate "Freescale Elo and Elo Plus DMA support" | 50 | tristate "Freescale Elo and Elo Plus DMA support" |
51 | depends on FSL_SOC | 51 | depends on FSL_SOC |
52 | select DMA_ENGINE | 52 | select DMA_ENGINE |
53 | ---help--- | 53 | ---help--- |
54 | Enable support for the Freescale Elo and Elo Plus DMA controllers. | 54 | Enable support for the Freescale Elo and Elo Plus DMA controllers. |
55 | The Elo is the DMA controller on some 82xx and 83xx parts, and the | 55 | The Elo is the DMA controller on some 82xx and 83xx parts, and the |
56 | Elo Plus is the DMA controller on 85xx and 86xx parts. | 56 | Elo Plus is the DMA controller on 85xx and 86xx parts. |
57 | 57 | ||
58 | config MV_XOR | 58 | config MV_XOR |
59 | bool "Marvell XOR engine support" | 59 | bool "Marvell XOR engine support" |
60 | depends on PLAT_ORION | 60 | depends on PLAT_ORION |
61 | select DMA_ENGINE | 61 | select DMA_ENGINE |
62 | ---help--- | 62 | ---help--- |
63 | Enable support for the Marvell XOR engine. | 63 | Enable support for the Marvell XOR engine. |
64 | 64 | ||
65 | config MX3_IPU | 65 | config MX3_IPU |
66 | bool "MX3x Image Processing Unit support" | 66 | bool "MX3x Image Processing Unit support" |
67 | depends on ARCH_MX3 | 67 | depends on ARCH_MX3 |
68 | select DMA_ENGINE | 68 | select DMA_ENGINE |
69 | default y | 69 | default y |
70 | help | 70 | help |
71 | If you plan to use the Image Processing unit in the i.MX3x, say | 71 | If you plan to use the Image Processing unit in the i.MX3x, say |
72 | Y here. If unsure, select Y. | 72 | Y here. If unsure, select Y. |
73 | 73 | ||
74 | config MX3_IPU_IRQS | 74 | config MX3_IPU_IRQS |
75 | int "Number of dynamically mapped interrupts for IPU" | 75 | int "Number of dynamically mapped interrupts for IPU" |
76 | depends on MX3_IPU | 76 | depends on MX3_IPU |
77 | range 2 137 | 77 | range 2 137 |
78 | default 4 | 78 | default 4 |
79 | help | 79 | help |
80 | Out of 137 interrupt sources on i.MX31 IPU only very few are used. | 80 | Out of 137 interrupt sources on i.MX31 IPU only very few are used. |
81 | To avoid bloating the irq_desc[] array we allocate a sufficient | 81 | To avoid bloating the irq_desc[] array we allocate a sufficient |
82 | number of IRQ slots and map them dynamically to specific sources. | 82 | number of IRQ slots and map them dynamically to specific sources. |
83 | 83 | ||
84 | config DMA_ENGINE | 84 | config DMA_ENGINE |
85 | bool | 85 | bool |
86 | 86 | ||
87 | comment "DMA Clients" | 87 | comment "DMA Clients" |
88 | depends on DMA_ENGINE | 88 | depends on DMA_ENGINE |
89 | 89 | ||
90 | config NET_DMA | 90 | config NET_DMA |
91 | bool "Network: TCP receive copy offload" | 91 | bool "Network: TCP receive copy offload" |
92 | depends on DMA_ENGINE && NET | 92 | depends on DMA_ENGINE && NET |
93 | default (INTEL_IOATDMA || FSL_DMA) | 93 | default (INTEL_IOATDMA || FSL_DMA) |
94 | help | 94 | help |
95 | This enables the use of DMA engines in the network stack to | 95 | This enables the use of DMA engines in the network stack to |
96 | offload receive copy-to-user operations, freeing CPU cycles. | 96 | offload receive copy-to-user operations, freeing CPU cycles. |
97 | 97 | ||
98 | Say Y here if you enabled INTEL_IOATDMA or FSL_DMA, otherwise | 98 | Say Y here if you enabled INTEL_IOATDMA or FSL_DMA, otherwise |
99 | say N. | 99 | say N. |
100 | 100 | ||
101 | config ASYNC_TX_DMA | ||
102 | bool "Async_tx: Offload support for the async_tx api" | ||
103 | depends on DMA_ENGINE | ||
104 | help | ||
105 | This allows the async_tx api to take advantage of offload engines for | ||
106 | memcpy, memset, xor, and raid6 p+q operations. If your platform has | ||
107 | a dma engine that can perform raid operations and you have enabled | ||
108 | MD_RAID456 say Y. | ||
109 | |||
110 | If unsure, say N. | ||
111 | |||
101 | config DMATEST | 112 | config DMATEST |
102 | tristate "DMA Test client" | 113 | tristate "DMA Test client" |
103 | depends on DMA_ENGINE | 114 | depends on DMA_ENGINE |
104 | help | 115 | help |
105 | Simple DMA test client. Say N unless you're debugging a | 116 | Simple DMA test client. Say N unless you're debugging a |
106 | DMA Device driver. | 117 | DMA Device driver. |
107 | 118 | ||
108 | endif | 119 | endif |
109 | 120 |
include/linux/dmaengine.h
1 | /* | 1 | /* |
2 | * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved. | 2 | * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved. |
3 | * | 3 | * |
4 | * This program is free software; you can redistribute it and/or modify it | 4 | * This program is free software; you can redistribute it and/or modify it |
5 | * under the terms of the GNU General Public License as published by the Free | 5 | * under the terms of the GNU General Public License as published by the Free |
6 | * Software Foundation; either version 2 of the License, or (at your option) | 6 | * Software Foundation; either version 2 of the License, or (at your option) |
7 | * any later version. | 7 | * any later version. |
8 | * | 8 | * |
9 | * This program is distributed in the hope that it will be useful, but WITHOUT | 9 | * This program is distributed in the hope that it will be useful, but WITHOUT |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
12 | * more details. | 12 | * more details. |
13 | * | 13 | * |
14 | * You should have received a copy of the GNU General Public License along with | 14 | * You should have received a copy of the GNU General Public License along with |
15 | * this program; if not, write to the Free Software Foundation, Inc., 59 | 15 | * this program; if not, write to the Free Software Foundation, Inc., 59 |
16 | * Temple Place - Suite 330, Boston, MA 02111-1307, USA. | 16 | * Temple Place - Suite 330, Boston, MA 02111-1307, USA. |
17 | * | 17 | * |
18 | * The full GNU General Public License is included in this distribution in the | 18 | * The full GNU General Public License is included in this distribution in the |
19 | * file called COPYING. | 19 | * file called COPYING. |
20 | */ | 20 | */ |
21 | #ifndef DMAENGINE_H | 21 | #ifndef DMAENGINE_H |
22 | #define DMAENGINE_H | 22 | #define DMAENGINE_H |
23 | 23 | ||
24 | #include <linux/device.h> | 24 | #include <linux/device.h> |
25 | #include <linux/uio.h> | 25 | #include <linux/uio.h> |
26 | #include <linux/dma-mapping.h> | 26 | #include <linux/dma-mapping.h> |
27 | 27 | ||
28 | /** | 28 | /** |
29 | * typedef dma_cookie_t - an opaque DMA cookie | 29 | * typedef dma_cookie_t - an opaque DMA cookie |
30 | * | 30 | * |
31 | * if dma_cookie_t is >0 it's a DMA request cookie, <0 it's an error code | 31 | * if dma_cookie_t is >0 it's a DMA request cookie, <0 it's an error code |
32 | */ | 32 | */ |
33 | typedef s32 dma_cookie_t; | 33 | typedef s32 dma_cookie_t; |
34 | 34 | ||
35 | #define dma_submit_error(cookie) ((cookie) < 0 ? 1 : 0) | 35 | #define dma_submit_error(cookie) ((cookie) < 0 ? 1 : 0) |
36 | 36 | ||
37 | /** | 37 | /** |
38 | * enum dma_status - DMA transaction status | 38 | * enum dma_status - DMA transaction status |
39 | * @DMA_SUCCESS: transaction completed successfully | 39 | * @DMA_SUCCESS: transaction completed successfully |
40 | * @DMA_IN_PROGRESS: transaction not yet processed | 40 | * @DMA_IN_PROGRESS: transaction not yet processed |
41 | * @DMA_ERROR: transaction failed | 41 | * @DMA_ERROR: transaction failed |
42 | */ | 42 | */ |
43 | enum dma_status { | 43 | enum dma_status { |
44 | DMA_SUCCESS, | 44 | DMA_SUCCESS, |
45 | DMA_IN_PROGRESS, | 45 | DMA_IN_PROGRESS, |
46 | DMA_ERROR, | 46 | DMA_ERROR, |
47 | }; | 47 | }; |
48 | 48 | ||
49 | /** | 49 | /** |
50 | * enum dma_transaction_type - DMA transaction types/indexes | 50 | * enum dma_transaction_type - DMA transaction types/indexes |
51 | */ | 51 | */ |
52 | enum dma_transaction_type { | 52 | enum dma_transaction_type { |
53 | DMA_MEMCPY, | 53 | DMA_MEMCPY, |
54 | DMA_XOR, | 54 | DMA_XOR, |
55 | DMA_PQ_XOR, | 55 | DMA_PQ_XOR, |
56 | DMA_DUAL_XOR, | 56 | DMA_DUAL_XOR, |
57 | DMA_PQ_UPDATE, | 57 | DMA_PQ_UPDATE, |
58 | DMA_ZERO_SUM, | 58 | DMA_ZERO_SUM, |
59 | DMA_PQ_ZERO_SUM, | 59 | DMA_PQ_ZERO_SUM, |
60 | DMA_MEMSET, | 60 | DMA_MEMSET, |
61 | DMA_MEMCPY_CRC32C, | 61 | DMA_MEMCPY_CRC32C, |
62 | DMA_INTERRUPT, | 62 | DMA_INTERRUPT, |
63 | DMA_PRIVATE, | 63 | DMA_PRIVATE, |
64 | DMA_SLAVE, | 64 | DMA_SLAVE, |
65 | }; | 65 | }; |
66 | 66 | ||
67 | /* last transaction type for creation of the capabilities mask */ | 67 | /* last transaction type for creation of the capabilities mask */ |
68 | #define DMA_TX_TYPE_END (DMA_SLAVE + 1) | 68 | #define DMA_TX_TYPE_END (DMA_SLAVE + 1) |
69 | 69 | ||
70 | 70 | ||
71 | /** | 71 | /** |
72 | * enum dma_ctrl_flags - DMA flags to augment operation preparation, | 72 | * enum dma_ctrl_flags - DMA flags to augment operation preparation, |
73 | * control completion, and communicate status. | 73 | * control completion, and communicate status. |
74 | * @DMA_PREP_INTERRUPT - trigger an interrupt (callback) upon completion of | 74 | * @DMA_PREP_INTERRUPT - trigger an interrupt (callback) upon completion of |
75 | * this transaction | 75 | * this transaction |
76 | * @DMA_CTRL_ACK - the descriptor cannot be reused until the client | 76 | * @DMA_CTRL_ACK - the descriptor cannot be reused until the client |
77 | * acknowledges receipt, i.e. has has a chance to establish any | 77 | * acknowledges receipt, i.e. has has a chance to establish any |
78 | * dependency chains | 78 | * dependency chains |
79 | * @DMA_COMPL_SKIP_SRC_UNMAP - set to disable dma-unmapping the source buffer(s) | 79 | * @DMA_COMPL_SKIP_SRC_UNMAP - set to disable dma-unmapping the source buffer(s) |
80 | * @DMA_COMPL_SKIP_DEST_UNMAP - set to disable dma-unmapping the destination(s) | 80 | * @DMA_COMPL_SKIP_DEST_UNMAP - set to disable dma-unmapping the destination(s) |
81 | */ | 81 | */ |
82 | enum dma_ctrl_flags { | 82 | enum dma_ctrl_flags { |
83 | DMA_PREP_INTERRUPT = (1 << 0), | 83 | DMA_PREP_INTERRUPT = (1 << 0), |
84 | DMA_CTRL_ACK = (1 << 1), | 84 | DMA_CTRL_ACK = (1 << 1), |
85 | DMA_COMPL_SKIP_SRC_UNMAP = (1 << 2), | 85 | DMA_COMPL_SKIP_SRC_UNMAP = (1 << 2), |
86 | DMA_COMPL_SKIP_DEST_UNMAP = (1 << 3), | 86 | DMA_COMPL_SKIP_DEST_UNMAP = (1 << 3), |
87 | }; | 87 | }; |
88 | 88 | ||
89 | /** | 89 | /** |
90 | * dma_cap_mask_t - capabilities bitmap modeled after cpumask_t. | 90 | * dma_cap_mask_t - capabilities bitmap modeled after cpumask_t. |
91 | * See linux/cpumask.h | 91 | * See linux/cpumask.h |
92 | */ | 92 | */ |
93 | typedef struct { DECLARE_BITMAP(bits, DMA_TX_TYPE_END); } dma_cap_mask_t; | 93 | typedef struct { DECLARE_BITMAP(bits, DMA_TX_TYPE_END); } dma_cap_mask_t; |
94 | 94 | ||
95 | /** | 95 | /** |
96 | * struct dma_chan_percpu - the per-CPU part of struct dma_chan | 96 | * struct dma_chan_percpu - the per-CPU part of struct dma_chan |
97 | * @memcpy_count: transaction counter | 97 | * @memcpy_count: transaction counter |
98 | * @bytes_transferred: byte counter | 98 | * @bytes_transferred: byte counter |
99 | */ | 99 | */ |
100 | 100 | ||
101 | struct dma_chan_percpu { | 101 | struct dma_chan_percpu { |
102 | /* stats */ | 102 | /* stats */ |
103 | unsigned long memcpy_count; | 103 | unsigned long memcpy_count; |
104 | unsigned long bytes_transferred; | 104 | unsigned long bytes_transferred; |
105 | }; | 105 | }; |
106 | 106 | ||
107 | /** | 107 | /** |
108 | * struct dma_chan - devices supply DMA channels, clients use them | 108 | * struct dma_chan - devices supply DMA channels, clients use them |
109 | * @device: ptr to the dma device who supplies this channel, always !%NULL | 109 | * @device: ptr to the dma device who supplies this channel, always !%NULL |
110 | * @cookie: last cookie value returned to client | 110 | * @cookie: last cookie value returned to client |
111 | * @chan_id: channel ID for sysfs | 111 | * @chan_id: channel ID for sysfs |
112 | * @dev: class device for sysfs | 112 | * @dev: class device for sysfs |
113 | * @device_node: used to add this to the device chan list | 113 | * @device_node: used to add this to the device chan list |
114 | * @local: per-cpu pointer to a struct dma_chan_percpu | 114 | * @local: per-cpu pointer to a struct dma_chan_percpu |
115 | * @client-count: how many clients are using this channel | 115 | * @client-count: how many clients are using this channel |
116 | * @table_count: number of appearances in the mem-to-mem allocation table | 116 | * @table_count: number of appearances in the mem-to-mem allocation table |
117 | * @private: private data for certain client-channel associations | 117 | * @private: private data for certain client-channel associations |
118 | */ | 118 | */ |
119 | struct dma_chan { | 119 | struct dma_chan { |
120 | struct dma_device *device; | 120 | struct dma_device *device; |
121 | dma_cookie_t cookie; | 121 | dma_cookie_t cookie; |
122 | 122 | ||
123 | /* sysfs */ | 123 | /* sysfs */ |
124 | int chan_id; | 124 | int chan_id; |
125 | struct dma_chan_dev *dev; | 125 | struct dma_chan_dev *dev; |
126 | 126 | ||
127 | struct list_head device_node; | 127 | struct list_head device_node; |
128 | struct dma_chan_percpu *local; | 128 | struct dma_chan_percpu *local; |
129 | int client_count; | 129 | int client_count; |
130 | int table_count; | 130 | int table_count; |
131 | void *private; | 131 | void *private; |
132 | }; | 132 | }; |
133 | 133 | ||
134 | /** | 134 | /** |
135 | * struct dma_chan_dev - relate sysfs device node to backing channel device | 135 | * struct dma_chan_dev - relate sysfs device node to backing channel device |
136 | * @chan - driver channel device | 136 | * @chan - driver channel device |
137 | * @device - sysfs device | 137 | * @device - sysfs device |
138 | * @dev_id - parent dma_device dev_id | 138 | * @dev_id - parent dma_device dev_id |
139 | * @idr_ref - reference count to gate release of dma_device dev_id | 139 | * @idr_ref - reference count to gate release of dma_device dev_id |
140 | */ | 140 | */ |
141 | struct dma_chan_dev { | 141 | struct dma_chan_dev { |
142 | struct dma_chan *chan; | 142 | struct dma_chan *chan; |
143 | struct device device; | 143 | struct device device; |
144 | int dev_id; | 144 | int dev_id; |
145 | atomic_t *idr_ref; | 145 | atomic_t *idr_ref; |
146 | }; | 146 | }; |
147 | 147 | ||
148 | static inline const char *dma_chan_name(struct dma_chan *chan) | 148 | static inline const char *dma_chan_name(struct dma_chan *chan) |
149 | { | 149 | { |
150 | return dev_name(&chan->dev->device); | 150 | return dev_name(&chan->dev->device); |
151 | } | 151 | } |
152 | 152 | ||
153 | void dma_chan_cleanup(struct kref *kref); | 153 | void dma_chan_cleanup(struct kref *kref); |
154 | 154 | ||
155 | /** | 155 | /** |
156 | * typedef dma_filter_fn - callback filter for dma_request_channel | 156 | * typedef dma_filter_fn - callback filter for dma_request_channel |
157 | * @chan: channel to be reviewed | 157 | * @chan: channel to be reviewed |
158 | * @filter_param: opaque parameter passed through dma_request_channel | 158 | * @filter_param: opaque parameter passed through dma_request_channel |
159 | * | 159 | * |
160 | * When this optional parameter is specified in a call to dma_request_channel a | 160 | * When this optional parameter is specified in a call to dma_request_channel a |
161 | * suitable channel is passed to this routine for further dispositioning before | 161 | * suitable channel is passed to this routine for further dispositioning before |
162 | * being returned. Where 'suitable' indicates a non-busy channel that | 162 | * being returned. Where 'suitable' indicates a non-busy channel that |
163 | * satisfies the given capability mask. It returns 'true' to indicate that the | 163 | * satisfies the given capability mask. It returns 'true' to indicate that the |
164 | * channel is suitable. | 164 | * channel is suitable. |
165 | */ | 165 | */ |
166 | typedef bool (*dma_filter_fn)(struct dma_chan *chan, void *filter_param); | 166 | typedef bool (*dma_filter_fn)(struct dma_chan *chan, void *filter_param); |
167 | 167 | ||
168 | typedef void (*dma_async_tx_callback)(void *dma_async_param); | 168 | typedef void (*dma_async_tx_callback)(void *dma_async_param); |
169 | /** | 169 | /** |
170 | * struct dma_async_tx_descriptor - async transaction descriptor | 170 | * struct dma_async_tx_descriptor - async transaction descriptor |
171 | * ---dma generic offload fields--- | 171 | * ---dma generic offload fields--- |
172 | * @cookie: tracking cookie for this transaction, set to -EBUSY if | 172 | * @cookie: tracking cookie for this transaction, set to -EBUSY if |
173 | * this tx is sitting on a dependency list | 173 | * this tx is sitting on a dependency list |
174 | * @flags: flags to augment operation preparation, control completion, and | 174 | * @flags: flags to augment operation preparation, control completion, and |
175 | * communicate status | 175 | * communicate status |
176 | * @phys: physical address of the descriptor | 176 | * @phys: physical address of the descriptor |
177 | * @tx_list: driver common field for operations that require multiple | 177 | * @tx_list: driver common field for operations that require multiple |
178 | * descriptors | 178 | * descriptors |
179 | * @chan: target channel for this operation | 179 | * @chan: target channel for this operation |
180 | * @tx_submit: set the prepared descriptor(s) to be executed by the engine | 180 | * @tx_submit: set the prepared descriptor(s) to be executed by the engine |
181 | * @callback: routine to call after this operation is complete | 181 | * @callback: routine to call after this operation is complete |
182 | * @callback_param: general parameter to pass to the callback routine | 182 | * @callback_param: general parameter to pass to the callback routine |
183 | * ---async_tx api specific fields--- | 183 | * ---async_tx api specific fields--- |
184 | * @next: at completion submit this descriptor | 184 | * @next: at completion submit this descriptor |
185 | * @parent: pointer to the next level up in the dependency chain | 185 | * @parent: pointer to the next level up in the dependency chain |
186 | * @lock: protect the parent and next pointers | 186 | * @lock: protect the parent and next pointers |
187 | */ | 187 | */ |
188 | struct dma_async_tx_descriptor { | 188 | struct dma_async_tx_descriptor { |
189 | dma_cookie_t cookie; | 189 | dma_cookie_t cookie; |
190 | enum dma_ctrl_flags flags; /* not a 'long' to pack with cookie */ | 190 | enum dma_ctrl_flags flags; /* not a 'long' to pack with cookie */ |
191 | dma_addr_t phys; | 191 | dma_addr_t phys; |
192 | struct list_head tx_list; | 192 | struct list_head tx_list; |
193 | struct dma_chan *chan; | 193 | struct dma_chan *chan; |
194 | dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx); | 194 | dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx); |
195 | dma_async_tx_callback callback; | 195 | dma_async_tx_callback callback; |
196 | void *callback_param; | 196 | void *callback_param; |
197 | struct dma_async_tx_descriptor *next; | 197 | struct dma_async_tx_descriptor *next; |
198 | struct dma_async_tx_descriptor *parent; | 198 | struct dma_async_tx_descriptor *parent; |
199 | spinlock_t lock; | 199 | spinlock_t lock; |
200 | }; | 200 | }; |
201 | 201 | ||
202 | /** | 202 | /** |
203 | * struct dma_device - info on the entity supplying DMA services | 203 | * struct dma_device - info on the entity supplying DMA services |
204 | * @chancnt: how many DMA channels are supported | 204 | * @chancnt: how many DMA channels are supported |
205 | * @channels: the list of struct dma_chan | 205 | * @channels: the list of struct dma_chan |
206 | * @global_node: list_head for global dma_device_list | 206 | * @global_node: list_head for global dma_device_list |
207 | * @cap_mask: one or more dma_capability flags | 207 | * @cap_mask: one or more dma_capability flags |
208 | * @max_xor: maximum number of xor sources, 0 if no capability | 208 | * @max_xor: maximum number of xor sources, 0 if no capability |
209 | * @dev_id: unique device ID | 209 | * @dev_id: unique device ID |
210 | * @dev: struct device reference for dma mapping api | 210 | * @dev: struct device reference for dma mapping api |
211 | * @device_alloc_chan_resources: allocate resources and return the | 211 | * @device_alloc_chan_resources: allocate resources and return the |
212 | * number of allocated descriptors | 212 | * number of allocated descriptors |
213 | * @device_free_chan_resources: release DMA channel's resources | 213 | * @device_free_chan_resources: release DMA channel's resources |
214 | * @device_prep_dma_memcpy: prepares a memcpy operation | 214 | * @device_prep_dma_memcpy: prepares a memcpy operation |
215 | * @device_prep_dma_xor: prepares a xor operation | 215 | * @device_prep_dma_xor: prepares a xor operation |
216 | * @device_prep_dma_zero_sum: prepares a zero_sum operation | 216 | * @device_prep_dma_zero_sum: prepares a zero_sum operation |
217 | * @device_prep_dma_memset: prepares a memset operation | 217 | * @device_prep_dma_memset: prepares a memset operation |
218 | * @device_prep_dma_interrupt: prepares an end of chain interrupt operation | 218 | * @device_prep_dma_interrupt: prepares an end of chain interrupt operation |
219 | * @device_prep_slave_sg: prepares a slave dma operation | 219 | * @device_prep_slave_sg: prepares a slave dma operation |
220 | * @device_terminate_all: terminate all pending operations | 220 | * @device_terminate_all: terminate all pending operations |
221 | * @device_is_tx_complete: poll for transaction completion | 221 | * @device_is_tx_complete: poll for transaction completion |
222 | * @device_issue_pending: push pending transactions to hardware | 222 | * @device_issue_pending: push pending transactions to hardware |
223 | */ | 223 | */ |
224 | struct dma_device { | 224 | struct dma_device { |
225 | 225 | ||
226 | unsigned int chancnt; | 226 | unsigned int chancnt; |
227 | struct list_head channels; | 227 | struct list_head channels; |
228 | struct list_head global_node; | 228 | struct list_head global_node; |
229 | dma_cap_mask_t cap_mask; | 229 | dma_cap_mask_t cap_mask; |
230 | int max_xor; | 230 | int max_xor; |
231 | 231 | ||
232 | int dev_id; | 232 | int dev_id; |
233 | struct device *dev; | 233 | struct device *dev; |
234 | 234 | ||
235 | int (*device_alloc_chan_resources)(struct dma_chan *chan); | 235 | int (*device_alloc_chan_resources)(struct dma_chan *chan); |
236 | void (*device_free_chan_resources)(struct dma_chan *chan); | 236 | void (*device_free_chan_resources)(struct dma_chan *chan); |
237 | 237 | ||
238 | struct dma_async_tx_descriptor *(*device_prep_dma_memcpy)( | 238 | struct dma_async_tx_descriptor *(*device_prep_dma_memcpy)( |
239 | struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | 239 | struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, |
240 | size_t len, unsigned long flags); | 240 | size_t len, unsigned long flags); |
241 | struct dma_async_tx_descriptor *(*device_prep_dma_xor)( | 241 | struct dma_async_tx_descriptor *(*device_prep_dma_xor)( |
242 | struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, | 242 | struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, |
243 | unsigned int src_cnt, size_t len, unsigned long flags); | 243 | unsigned int src_cnt, size_t len, unsigned long flags); |
244 | struct dma_async_tx_descriptor *(*device_prep_dma_zero_sum)( | 244 | struct dma_async_tx_descriptor *(*device_prep_dma_zero_sum)( |
245 | struct dma_chan *chan, dma_addr_t *src, unsigned int src_cnt, | 245 | struct dma_chan *chan, dma_addr_t *src, unsigned int src_cnt, |
246 | size_t len, u32 *result, unsigned long flags); | 246 | size_t len, u32 *result, unsigned long flags); |
247 | struct dma_async_tx_descriptor *(*device_prep_dma_memset)( | 247 | struct dma_async_tx_descriptor *(*device_prep_dma_memset)( |
248 | struct dma_chan *chan, dma_addr_t dest, int value, size_t len, | 248 | struct dma_chan *chan, dma_addr_t dest, int value, size_t len, |
249 | unsigned long flags); | 249 | unsigned long flags); |
250 | struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)( | 250 | struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)( |
251 | struct dma_chan *chan, unsigned long flags); | 251 | struct dma_chan *chan, unsigned long flags); |
252 | 252 | ||
253 | struct dma_async_tx_descriptor *(*device_prep_slave_sg)( | 253 | struct dma_async_tx_descriptor *(*device_prep_slave_sg)( |
254 | struct dma_chan *chan, struct scatterlist *sgl, | 254 | struct dma_chan *chan, struct scatterlist *sgl, |
255 | unsigned int sg_len, enum dma_data_direction direction, | 255 | unsigned int sg_len, enum dma_data_direction direction, |
256 | unsigned long flags); | 256 | unsigned long flags); |
257 | void (*device_terminate_all)(struct dma_chan *chan); | 257 | void (*device_terminate_all)(struct dma_chan *chan); |
258 | 258 | ||
259 | enum dma_status (*device_is_tx_complete)(struct dma_chan *chan, | 259 | enum dma_status (*device_is_tx_complete)(struct dma_chan *chan, |
260 | dma_cookie_t cookie, dma_cookie_t *last, | 260 | dma_cookie_t cookie, dma_cookie_t *last, |
261 | dma_cookie_t *used); | 261 | dma_cookie_t *used); |
262 | void (*device_issue_pending)(struct dma_chan *chan); | 262 | void (*device_issue_pending)(struct dma_chan *chan); |
263 | }; | 263 | }; |
264 | 264 | ||
265 | /* --- public DMA engine API --- */ | 265 | /* --- public DMA engine API --- */ |
266 | 266 | ||
267 | #ifdef CONFIG_DMA_ENGINE | 267 | #ifdef CONFIG_DMA_ENGINE |
268 | void dmaengine_get(void); | 268 | void dmaengine_get(void); |
269 | void dmaengine_put(void); | 269 | void dmaengine_put(void); |
270 | #else | 270 | #else |
271 | static inline void dmaengine_get(void) | 271 | static inline void dmaengine_get(void) |
272 | { | 272 | { |
273 | } | 273 | } |
274 | static inline void dmaengine_put(void) | 274 | static inline void dmaengine_put(void) |
275 | { | 275 | { |
276 | } | 276 | } |
277 | #endif | 277 | #endif |
278 | 278 | ||
279 | #ifdef CONFIG_NET_DMA | 279 | #ifdef CONFIG_NET_DMA |
280 | #define net_dmaengine_get() dmaengine_get() | 280 | #define net_dmaengine_get() dmaengine_get() |
281 | #define net_dmaengine_put() dmaengine_put() | 281 | #define net_dmaengine_put() dmaengine_put() |
282 | #else | 282 | #else |
283 | static inline void net_dmaengine_get(void) | 283 | static inline void net_dmaengine_get(void) |
284 | { | 284 | { |
285 | } | 285 | } |
286 | static inline void net_dmaengine_put(void) | 286 | static inline void net_dmaengine_put(void) |
287 | { | 287 | { |
288 | } | 288 | } |
289 | #endif | 289 | #endif |
290 | 290 | ||
291 | #ifdef CONFIG_ASYNC_TX_DMA | ||
292 | #define async_dmaengine_get() dmaengine_get() | ||
293 | #define async_dmaengine_put() dmaengine_put() | ||
294 | #define async_dma_find_channel(type) dma_find_channel(type) | ||
295 | #else | ||
296 | static inline void async_dmaengine_get(void) | ||
297 | { | ||
298 | } | ||
299 | static inline void async_dmaengine_put(void) | ||
300 | { | ||
301 | } | ||
302 | static inline struct dma_chan * | ||
303 | async_dma_find_channel(enum dma_transaction_type type) | ||
304 | { | ||
305 | return NULL; | ||
306 | } | ||
307 | #endif | ||
308 | |||
291 | dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan, | 309 | dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan, |
292 | void *dest, void *src, size_t len); | 310 | void *dest, void *src, size_t len); |
293 | dma_cookie_t dma_async_memcpy_buf_to_pg(struct dma_chan *chan, | 311 | dma_cookie_t dma_async_memcpy_buf_to_pg(struct dma_chan *chan, |
294 | struct page *page, unsigned int offset, void *kdata, size_t len); | 312 | struct page *page, unsigned int offset, void *kdata, size_t len); |
295 | dma_cookie_t dma_async_memcpy_pg_to_pg(struct dma_chan *chan, | 313 | dma_cookie_t dma_async_memcpy_pg_to_pg(struct dma_chan *chan, |
296 | struct page *dest_pg, unsigned int dest_off, struct page *src_pg, | 314 | struct page *dest_pg, unsigned int dest_off, struct page *src_pg, |
297 | unsigned int src_off, size_t len); | 315 | unsigned int src_off, size_t len); |
298 | void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx, | 316 | void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx, |
299 | struct dma_chan *chan); | 317 | struct dma_chan *chan); |
300 | 318 | ||
301 | static inline void async_tx_ack(struct dma_async_tx_descriptor *tx) | 319 | static inline void async_tx_ack(struct dma_async_tx_descriptor *tx) |
302 | { | 320 | { |
303 | tx->flags |= DMA_CTRL_ACK; | 321 | tx->flags |= DMA_CTRL_ACK; |
304 | } | 322 | } |
305 | 323 | ||
306 | static inline void async_tx_clear_ack(struct dma_async_tx_descriptor *tx) | 324 | static inline void async_tx_clear_ack(struct dma_async_tx_descriptor *tx) |
307 | { | 325 | { |
308 | tx->flags &= ~DMA_CTRL_ACK; | 326 | tx->flags &= ~DMA_CTRL_ACK; |
309 | } | 327 | } |
310 | 328 | ||
311 | static inline bool async_tx_test_ack(struct dma_async_tx_descriptor *tx) | 329 | static inline bool async_tx_test_ack(struct dma_async_tx_descriptor *tx) |
312 | { | 330 | { |
313 | return (tx->flags & DMA_CTRL_ACK) == DMA_CTRL_ACK; | 331 | return (tx->flags & DMA_CTRL_ACK) == DMA_CTRL_ACK; |
314 | } | 332 | } |
315 | 333 | ||
316 | #define first_dma_cap(mask) __first_dma_cap(&(mask)) | 334 | #define first_dma_cap(mask) __first_dma_cap(&(mask)) |
317 | static inline int __first_dma_cap(const dma_cap_mask_t *srcp) | 335 | static inline int __first_dma_cap(const dma_cap_mask_t *srcp) |
318 | { | 336 | { |
319 | return min_t(int, DMA_TX_TYPE_END, | 337 | return min_t(int, DMA_TX_TYPE_END, |
320 | find_first_bit(srcp->bits, DMA_TX_TYPE_END)); | 338 | find_first_bit(srcp->bits, DMA_TX_TYPE_END)); |
321 | } | 339 | } |
322 | 340 | ||
323 | #define next_dma_cap(n, mask) __next_dma_cap((n), &(mask)) | 341 | #define next_dma_cap(n, mask) __next_dma_cap((n), &(mask)) |
324 | static inline int __next_dma_cap(int n, const dma_cap_mask_t *srcp) | 342 | static inline int __next_dma_cap(int n, const dma_cap_mask_t *srcp) |
325 | { | 343 | { |
326 | return min_t(int, DMA_TX_TYPE_END, | 344 | return min_t(int, DMA_TX_TYPE_END, |
327 | find_next_bit(srcp->bits, DMA_TX_TYPE_END, n+1)); | 345 | find_next_bit(srcp->bits, DMA_TX_TYPE_END, n+1)); |
328 | } | 346 | } |
329 | 347 | ||
330 | #define dma_cap_set(tx, mask) __dma_cap_set((tx), &(mask)) | 348 | #define dma_cap_set(tx, mask) __dma_cap_set((tx), &(mask)) |
331 | static inline void | 349 | static inline void |
332 | __dma_cap_set(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp) | 350 | __dma_cap_set(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp) |
333 | { | 351 | { |
334 | set_bit(tx_type, dstp->bits); | 352 | set_bit(tx_type, dstp->bits); |
335 | } | 353 | } |
336 | 354 | ||
337 | #define dma_cap_zero(mask) __dma_cap_zero(&(mask)) | 355 | #define dma_cap_zero(mask) __dma_cap_zero(&(mask)) |
338 | static inline void __dma_cap_zero(dma_cap_mask_t *dstp) | 356 | static inline void __dma_cap_zero(dma_cap_mask_t *dstp) |
339 | { | 357 | { |
340 | bitmap_zero(dstp->bits, DMA_TX_TYPE_END); | 358 | bitmap_zero(dstp->bits, DMA_TX_TYPE_END); |
341 | } | 359 | } |
342 | 360 | ||
343 | #define dma_has_cap(tx, mask) __dma_has_cap((tx), &(mask)) | 361 | #define dma_has_cap(tx, mask) __dma_has_cap((tx), &(mask)) |
344 | static inline int | 362 | static inline int |
345 | __dma_has_cap(enum dma_transaction_type tx_type, dma_cap_mask_t *srcp) | 363 | __dma_has_cap(enum dma_transaction_type tx_type, dma_cap_mask_t *srcp) |
346 | { | 364 | { |
347 | return test_bit(tx_type, srcp->bits); | 365 | return test_bit(tx_type, srcp->bits); |
348 | } | 366 | } |
349 | 367 | ||
350 | #define for_each_dma_cap_mask(cap, mask) \ | 368 | #define for_each_dma_cap_mask(cap, mask) \ |
351 | for ((cap) = first_dma_cap(mask); \ | 369 | for ((cap) = first_dma_cap(mask); \ |
352 | (cap) < DMA_TX_TYPE_END; \ | 370 | (cap) < DMA_TX_TYPE_END; \ |
353 | (cap) = next_dma_cap((cap), (mask))) | 371 | (cap) = next_dma_cap((cap), (mask))) |
354 | 372 | ||
355 | /** | 373 | /** |
356 | * dma_async_issue_pending - flush pending transactions to HW | 374 | * dma_async_issue_pending - flush pending transactions to HW |
357 | * @chan: target DMA channel | 375 | * @chan: target DMA channel |
358 | * | 376 | * |
359 | * This allows drivers to push copies to HW in batches, | 377 | * This allows drivers to push copies to HW in batches, |
360 | * reducing MMIO writes where possible. | 378 | * reducing MMIO writes where possible. |
361 | */ | 379 | */ |
362 | static inline void dma_async_issue_pending(struct dma_chan *chan) | 380 | static inline void dma_async_issue_pending(struct dma_chan *chan) |
363 | { | 381 | { |
364 | chan->device->device_issue_pending(chan); | 382 | chan->device->device_issue_pending(chan); |
365 | } | 383 | } |
366 | 384 | ||
367 | #define dma_async_memcpy_issue_pending(chan) dma_async_issue_pending(chan) | 385 | #define dma_async_memcpy_issue_pending(chan) dma_async_issue_pending(chan) |
368 | 386 | ||
369 | /** | 387 | /** |
370 | * dma_async_is_tx_complete - poll for transaction completion | 388 | * dma_async_is_tx_complete - poll for transaction completion |
371 | * @chan: DMA channel | 389 | * @chan: DMA channel |
372 | * @cookie: transaction identifier to check status of | 390 | * @cookie: transaction identifier to check status of |
373 | * @last: returns last completed cookie, can be NULL | 391 | * @last: returns last completed cookie, can be NULL |
374 | * @used: returns last issued cookie, can be NULL | 392 | * @used: returns last issued cookie, can be NULL |
375 | * | 393 | * |
376 | * If @last and @used are passed in, upon return they reflect the driver | 394 | * If @last and @used are passed in, upon return they reflect the driver |
377 | * internal state and can be used with dma_async_is_complete() to check | 395 | * internal state and can be used with dma_async_is_complete() to check |
378 | * the status of multiple cookies without re-checking hardware state. | 396 | * the status of multiple cookies without re-checking hardware state. |
379 | */ | 397 | */ |
380 | static inline enum dma_status dma_async_is_tx_complete(struct dma_chan *chan, | 398 | static inline enum dma_status dma_async_is_tx_complete(struct dma_chan *chan, |
381 | dma_cookie_t cookie, dma_cookie_t *last, dma_cookie_t *used) | 399 | dma_cookie_t cookie, dma_cookie_t *last, dma_cookie_t *used) |
382 | { | 400 | { |
383 | return chan->device->device_is_tx_complete(chan, cookie, last, used); | 401 | return chan->device->device_is_tx_complete(chan, cookie, last, used); |
384 | } | 402 | } |
385 | 403 | ||
386 | #define dma_async_memcpy_complete(chan, cookie, last, used)\ | 404 | #define dma_async_memcpy_complete(chan, cookie, last, used)\ |
387 | dma_async_is_tx_complete(chan, cookie, last, used) | 405 | dma_async_is_tx_complete(chan, cookie, last, used) |
388 | 406 | ||
389 | /** | 407 | /** |
390 | * dma_async_is_complete - test a cookie against chan state | 408 | * dma_async_is_complete - test a cookie against chan state |
391 | * @cookie: transaction identifier to test status of | 409 | * @cookie: transaction identifier to test status of |
392 | * @last_complete: last know completed transaction | 410 | * @last_complete: last know completed transaction |
393 | * @last_used: last cookie value handed out | 411 | * @last_used: last cookie value handed out |
394 | * | 412 | * |
395 | * dma_async_is_complete() is used in dma_async_memcpy_complete() | 413 | * dma_async_is_complete() is used in dma_async_memcpy_complete() |
396 | * the test logic is separated for lightweight testing of multiple cookies | 414 | * the test logic is separated for lightweight testing of multiple cookies |
397 | */ | 415 | */ |
398 | static inline enum dma_status dma_async_is_complete(dma_cookie_t cookie, | 416 | static inline enum dma_status dma_async_is_complete(dma_cookie_t cookie, |
399 | dma_cookie_t last_complete, dma_cookie_t last_used) | 417 | dma_cookie_t last_complete, dma_cookie_t last_used) |
400 | { | 418 | { |
401 | if (last_complete <= last_used) { | 419 | if (last_complete <= last_used) { |
402 | if ((cookie <= last_complete) || (cookie > last_used)) | 420 | if ((cookie <= last_complete) || (cookie > last_used)) |
403 | return DMA_SUCCESS; | 421 | return DMA_SUCCESS; |
404 | } else { | 422 | } else { |
405 | if ((cookie <= last_complete) && (cookie > last_used)) | 423 | if ((cookie <= last_complete) && (cookie > last_used)) |
406 | return DMA_SUCCESS; | 424 | return DMA_SUCCESS; |
407 | } | 425 | } |
408 | return DMA_IN_PROGRESS; | 426 | return DMA_IN_PROGRESS; |
409 | } | 427 | } |
410 | 428 | ||
411 | enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie); | 429 | enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie); |
412 | #ifdef CONFIG_DMA_ENGINE | 430 | #ifdef CONFIG_DMA_ENGINE |
413 | enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx); | 431 | enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx); |
414 | void dma_issue_pending_all(void); | 432 | void dma_issue_pending_all(void); |
415 | #else | 433 | #else |
416 | static inline enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx) | 434 | static inline enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx) |
417 | { | 435 | { |
418 | return DMA_SUCCESS; | 436 | return DMA_SUCCESS; |
419 | } | 437 | } |
420 | static inline void dma_issue_pending_all(void) | 438 | static inline void dma_issue_pending_all(void) |
421 | { | 439 | { |
422 | do { } while (0); | 440 | do { } while (0); |
423 | } | 441 | } |
424 | #endif | 442 | #endif |
425 | 443 | ||
426 | /* --- DMA device --- */ | 444 | /* --- DMA device --- */ |
427 | 445 | ||
428 | int dma_async_device_register(struct dma_device *device); | 446 | int dma_async_device_register(struct dma_device *device); |
429 | void dma_async_device_unregister(struct dma_device *device); | 447 | void dma_async_device_unregister(struct dma_device *device); |
430 | void dma_run_dependencies(struct dma_async_tx_descriptor *tx); | 448 | void dma_run_dependencies(struct dma_async_tx_descriptor *tx); |
431 | struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type); | 449 | struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type); |
432 | #define dma_request_channel(mask, x, y) __dma_request_channel(&(mask), x, y) | 450 | #define dma_request_channel(mask, x, y) __dma_request_channel(&(mask), x, y) |
433 | struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, void *fn_param); | 451 | struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, void *fn_param); |
434 | void dma_release_channel(struct dma_chan *chan); | 452 | void dma_release_channel(struct dma_chan *chan); |
435 | 453 | ||
436 | /* --- Helper iov-locking functions --- */ | 454 | /* --- Helper iov-locking functions --- */ |
437 | 455 | ||
438 | struct dma_page_list { | 456 | struct dma_page_list { |
439 | char __user *base_address; | 457 | char __user *base_address; |
440 | int nr_pages; | 458 | int nr_pages; |
441 | struct page **pages; | 459 | struct page **pages; |
442 | }; | 460 | }; |
443 | 461 | ||
444 | struct dma_pinned_list { | 462 | struct dma_pinned_list { |
445 | int nr_iovecs; | 463 | int nr_iovecs; |
446 | struct dma_page_list page_list[0]; | 464 | struct dma_page_list page_list[0]; |
447 | }; | 465 | }; |
448 | 466 | ||
449 | struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len); | 467 | struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len); |
450 | void dma_unpin_iovec_pages(struct dma_pinned_list* pinned_list); | 468 | void dma_unpin_iovec_pages(struct dma_pinned_list* pinned_list); |
451 | 469 | ||
452 | dma_cookie_t dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov, | 470 | dma_cookie_t dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov, |
453 | struct dma_pinned_list *pinned_list, unsigned char *kdata, size_t len); | 471 | struct dma_pinned_list *pinned_list, unsigned char *kdata, size_t len); |
454 | dma_cookie_t dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov, | 472 | dma_cookie_t dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov, |
455 | struct dma_pinned_list *pinned_list, struct page *page, | 473 | struct dma_pinned_list *pinned_list, struct page *page, |
456 | unsigned int offset, size_t len); | 474 | unsigned int offset, size_t len); |
457 | 475 | ||
458 | #endif /* DMAENGINE_H */ | 476 | #endif /* DMAENGINE_H */ |
459 | 477 |