Blame view

include/linux/dmaengine.h 26.1 KB
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
  /*
   * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
   *
   * This program is free software; you can redistribute it and/or modify it
   * under the terms of the GNU General Public License as published by the Free
   * Software Foundation; either version 2 of the License, or (at your option)
   * any later version.
   *
   * This program is distributed in the hope that it will be useful, but WITHOUT
   * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
   * more details.
   *
   * You should have received a copy of the GNU General Public License along with
   * this program; if not, write to the Free Software Foundation, Inc., 59
   * Temple Place - Suite 330, Boston, MA  02111-1307, USA.
   *
   * The full GNU General Public License is included in this distribution in the
   * file called COPYING.
   */
  #ifndef DMAENGINE_H
  #define DMAENGINE_H
1c0f16e5c   David Woodhouse   [PATCH] Remove gr...
23

c13c8260d   Chris Leech   [I/OAT]: DMA memc...
24
25
  #include <linux/device.h>
  #include <linux/uio.h>
7405f74ba   Dan Williams   dmaengine: refact...
26
  #include <linux/dma-mapping.h>
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
27
28
  
  /**
fe4ada2d6   Randy Dunlap   [IOAT]: fix heade...
29
   * typedef dma_cookie_t - an opaque DMA cookie
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
30
31
32
33
   *
   * if dma_cookie_t is >0 it's a DMA request cookie, <0 it's an error code
   */
  typedef s32 dma_cookie_t;
76bd061f5   Steven J. Magnani   fsldma: Fix cooki...
34
35
  #define DMA_MIN_COOKIE	1
  #define DMA_MAX_COOKIE	INT_MAX
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
36
37
38
39
40
41
42
  
  #define dma_submit_error(cookie) ((cookie) < 0 ? 1 : 0)
  
  /**
   * enum dma_status - DMA transaction status
   * @DMA_SUCCESS: transaction completed successfully
   * @DMA_IN_PROGRESS: transaction not yet processed
079344818   Linus Walleij   DMAENGINE: generi...
43
   * @DMA_PAUSED: transaction is paused
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
44
45
46
47
48
   * @DMA_ERROR: transaction failed
   */
  enum dma_status {
  	DMA_SUCCESS,
  	DMA_IN_PROGRESS,
079344818   Linus Walleij   DMAENGINE: generi...
49
  	DMA_PAUSED,
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
50
51
52
53
  	DMA_ERROR,
  };
  
  /**
7405f74ba   Dan Williams   dmaengine: refact...
54
   * enum dma_transaction_type - DMA transaction types/indexes
138f4c359   Dan Williams   dmaengine, async_...
55
56
57
   *
   * Note: The DMA_ASYNC_TX capability is not to be set by drivers.  It is
   * automatically set as dma devices are registered.
7405f74ba   Dan Williams   dmaengine: refact...
58
59
60
61
   */
  enum dma_transaction_type {
  	DMA_MEMCPY,
  	DMA_XOR,
b2f46fd8e   Dan Williams   async_tx: add sup...
62
  	DMA_PQ,
099f53cb5   Dan Williams   async_tx: rename ...
63
64
  	DMA_XOR_VAL,
  	DMA_PQ_VAL,
7405f74ba   Dan Williams   dmaengine: refact...
65
  	DMA_MEMSET,
7405f74ba   Dan Williams   dmaengine: refact...
66
  	DMA_INTERRUPT,
59b5ec214   Dan Williams   dmaengine: introd...
67
  	DMA_PRIVATE,
138f4c359   Dan Williams   dmaengine, async_...
68
  	DMA_ASYNC_TX,
dc0ee6435   Haavard Skinnemoen   dmaengine: Add sl...
69
  	DMA_SLAVE,
7405f74ba   Dan Williams   dmaengine: refact...
70
71
72
  };
  
  /* last transaction type for creation of the capabilities mask */
dc0ee6435   Haavard Skinnemoen   dmaengine: Add sl...
73
  #define DMA_TX_TYPE_END (DMA_SLAVE + 1)
7405f74ba   Dan Williams   dmaengine: refact...
74
75
  
  /**
636bdeaa1   Dan Williams   dmaengine: ack to...
76
   * enum dma_ctrl_flags - DMA flags to augment operation preparation,
b2f46fd8e   Dan Williams   async_tx: add sup...
77
   *  control completion, and communicate status.
d4c56f97f   Dan Williams   async_tx: replace...
78
   * @DMA_PREP_INTERRUPT - trigger an interrupt (callback) upon completion of
b2f46fd8e   Dan Williams   async_tx: add sup...
79
   *  this transaction
a88f66670   Guennadi Liakhovetski   dmaengine: clarif...
80
   * @DMA_CTRL_ACK - if clear, the descriptor cannot be reused until the client
b2f46fd8e   Dan Williams   async_tx: add sup...
81
82
   *  acknowledges receipt, i.e. has has a chance to establish any dependency
   *  chains
e1d181efb   Dan Williams   dmaengine: add DM...
83
84
   * @DMA_COMPL_SKIP_SRC_UNMAP - set to disable dma-unmapping the source buffer(s)
   * @DMA_COMPL_SKIP_DEST_UNMAP - set to disable dma-unmapping the destination(s)
4f005dbe5   Maciej Sosnowski   ioatdma: fix "ioa...
85
86
87
88
   * @DMA_COMPL_SRC_UNMAP_SINGLE - set to do the source dma-unmapping as single
   * 	(if not set, do the source dma-unmapping as page)
   * @DMA_COMPL_DEST_UNMAP_SINGLE - set to do the destination dma-unmapping as single
   * 	(if not set, do the destination dma-unmapping as page)
b2f46fd8e   Dan Williams   async_tx: add sup...
89
90
91
92
93
   * @DMA_PREP_PQ_DISABLE_P - prevent generation of P while generating Q
   * @DMA_PREP_PQ_DISABLE_Q - prevent generation of Q while generating P
   * @DMA_PREP_CONTINUE - indicate to a driver that it is reusing buffers as
   *  sources that were the result of a previous operation, in the case of a PQ
   *  operation it continues the calculation with new sources
0403e3827   Dan Williams   dmaengine: add fe...
94
95
   * @DMA_PREP_FENCE - tell the driver that subsequent operations depend
   *  on the result of this operation
d4c56f97f   Dan Williams   async_tx: replace...
96
   */
636bdeaa1   Dan Williams   dmaengine: ack to...
97
  enum dma_ctrl_flags {
d4c56f97f   Dan Williams   async_tx: replace...
98
  	DMA_PREP_INTERRUPT = (1 << 0),
636bdeaa1   Dan Williams   dmaengine: ack to...
99
  	DMA_CTRL_ACK = (1 << 1),
e1d181efb   Dan Williams   dmaengine: add DM...
100
101
  	DMA_COMPL_SKIP_SRC_UNMAP = (1 << 2),
  	DMA_COMPL_SKIP_DEST_UNMAP = (1 << 3),
4f005dbe5   Maciej Sosnowski   ioatdma: fix "ioa...
102
103
  	DMA_COMPL_SRC_UNMAP_SINGLE = (1 << 4),
  	DMA_COMPL_DEST_UNMAP_SINGLE = (1 << 5),
f9dd21343   Dan Williams   Merge branch 'md-...
104
105
106
  	DMA_PREP_PQ_DISABLE_P = (1 << 6),
  	DMA_PREP_PQ_DISABLE_Q = (1 << 7),
  	DMA_PREP_CONTINUE = (1 << 8),
0403e3827   Dan Williams   dmaengine: add fe...
107
  	DMA_PREP_FENCE = (1 << 9),
d4c56f97f   Dan Williams   async_tx: replace...
108
109
110
  };
  
  /**
c3635c78e   Linus Walleij   DMAENGINE: generi...
111
112
113
114
115
   * enum dma_ctrl_cmd - DMA operations that can optionally be exercised
   * on a running channel.
   * @DMA_TERMINATE_ALL: terminate all ongoing transfers
   * @DMA_PAUSE: pause ongoing transfers
   * @DMA_RESUME: resume paused transfer
c156d0a5b   Linus Walleij   DMAENGINE: generi...
116
117
118
119
120
   * @DMA_SLAVE_CONFIG: this command is only implemented by DMA controllers
   * that need to runtime reconfigure the slave channels (as opposed to passing
   * configuration data in statically from the platform). An additional
   * argument of struct dma_slave_config must be passed in with this
   * command.
c3635c78e   Linus Walleij   DMAENGINE: generi...
121
122
123
124
125
   */
  enum dma_ctrl_cmd {
  	DMA_TERMINATE_ALL,
  	DMA_PAUSE,
  	DMA_RESUME,
c156d0a5b   Linus Walleij   DMAENGINE: generi...
126
  	DMA_SLAVE_CONFIG,
c3635c78e   Linus Walleij   DMAENGINE: generi...
127
128
129
  };
  
  /**
ad283ea4a   Dan Williams   async_tx: add sum...
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
   * enum sum_check_bits - bit position of pq_check_flags
   */
  enum sum_check_bits {
  	SUM_CHECK_P = 0,
  	SUM_CHECK_Q = 1,
  };
  
  /**
   * enum pq_check_flags - result of async_{xor,pq}_zero_sum operations
   * @SUM_CHECK_P_RESULT - 1 if xor zero sum error, 0 otherwise
   * @SUM_CHECK_Q_RESULT - 1 if reed-solomon zero sum error, 0 otherwise
   */
  enum sum_check_flags {
  	SUM_CHECK_P_RESULT = (1 << SUM_CHECK_P),
  	SUM_CHECK_Q_RESULT = (1 << SUM_CHECK_Q),
  };
  
  
  /**
7405f74ba   Dan Williams   dmaengine: refact...
149
150
151
152
153
154
   * dma_cap_mask_t - capabilities bitmap modeled after cpumask_t.
   * See linux/cpumask.h
   */
  typedef struct { DECLARE_BITMAP(bits, DMA_TX_TYPE_END); } dma_cap_mask_t;
  
  /**
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
155
   * struct dma_chan_percpu - the per-CPU part of struct dma_chan
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
156
157
158
159
160
   * @memcpy_count: transaction counter
   * @bytes_transferred: byte counter
   */
  
  struct dma_chan_percpu {
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
161
162
163
164
165
166
167
  	/* stats */
  	unsigned long memcpy_count;
  	unsigned long bytes_transferred;
  };
  
  /**
   * struct dma_chan - devices supply DMA channels, clients use them
fe4ada2d6   Randy Dunlap   [IOAT]: fix heade...
168
   * @device: ptr to the dma device who supplies this channel, always !%NULL
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
169
   * @cookie: last cookie value returned to client
fe4ada2d6   Randy Dunlap   [IOAT]: fix heade...
170
   * @chan_id: channel ID for sysfs
41d5e59c1   Dan Williams   dmaengine: add a ...
171
   * @dev: class device for sysfs
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
172
173
   * @device_node: used to add this to the device chan list
   * @local: per-cpu pointer to a struct dma_chan_percpu
7cc5bf9a3   Dan Williams   dmaengine: track ...
174
   * @client-count: how many clients are using this channel
bec085134   Dan Williams   dmaengine: centra...
175
   * @table_count: number of appearances in the mem-to-mem allocation table
287d85922   Dan Williams   atmel-mci: fix in...
176
   * @private: private data for certain client-channel associations
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
177
178
   */
  struct dma_chan {
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
179
180
181
182
183
  	struct dma_device *device;
  	dma_cookie_t cookie;
  
  	/* sysfs */
  	int chan_id;
41d5e59c1   Dan Williams   dmaengine: add a ...
184
  	struct dma_chan_dev *dev;
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
185

c13c8260d   Chris Leech   [I/OAT]: DMA memc...
186
  	struct list_head device_node;
a29d8b8e2   Tejun Heo   percpu: add __per...
187
  	struct dma_chan_percpu __percpu *local;
7cc5bf9a3   Dan Williams   dmaengine: track ...
188
  	int client_count;
bec085134   Dan Williams   dmaengine: centra...
189
  	int table_count;
287d85922   Dan Williams   atmel-mci: fix in...
190
  	void *private;
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
191
  };
41d5e59c1   Dan Williams   dmaengine: add a ...
192
193
194
195
  /**
   * struct dma_chan_dev - relate sysfs device node to backing channel device
   * @chan - driver channel device
   * @device - sysfs device
864498aaa   Dan Williams   dmaengine: use id...
196
197
   * @dev_id - parent dma_device dev_id
   * @idr_ref - reference count to gate release of dma_device dev_id
41d5e59c1   Dan Williams   dmaengine: add a ...
198
199
200
201
   */
  struct dma_chan_dev {
  	struct dma_chan *chan;
  	struct device device;
864498aaa   Dan Williams   dmaengine: use id...
202
203
  	int dev_id;
  	atomic_t *idr_ref;
41d5e59c1   Dan Williams   dmaengine: add a ...
204
  };
c156d0a5b   Linus Walleij   DMAENGINE: generi...
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
  /**
   * enum dma_slave_buswidth - defines bus with of the DMA slave
   * device, source or target buses
   */
  enum dma_slave_buswidth {
  	DMA_SLAVE_BUSWIDTH_UNDEFINED = 0,
  	DMA_SLAVE_BUSWIDTH_1_BYTE = 1,
  	DMA_SLAVE_BUSWIDTH_2_BYTES = 2,
  	DMA_SLAVE_BUSWIDTH_4_BYTES = 4,
  	DMA_SLAVE_BUSWIDTH_8_BYTES = 8,
  };
  
  /**
   * struct dma_slave_config - dma slave channel runtime config
   * @direction: whether the data shall go in or out on this slave
   * channel, right now. DMA_TO_DEVICE and DMA_FROM_DEVICE are
   * legal values, DMA_BIDIRECTIONAL is not acceptable since we
   * need to differentiate source and target addresses.
   * @src_addr: this is the physical address where DMA slave data
   * should be read (RX), if the source is memory this argument is
   * ignored.
   * @dst_addr: this is the physical address where DMA slave data
   * should be written (TX), if the source is memory this argument
   * is ignored.
   * @src_addr_width: this is the width in bytes of the source (RX)
   * register where DMA data shall be read. If the source
   * is memory this may be ignored depending on architecture.
   * Legal values: 1, 2, 4, 8.
   * @dst_addr_width: same as src_addr_width but for destination
   * target (TX) mutatis mutandis.
   * @src_maxburst: the maximum number of words (note: words, as in
   * units of the src_addr_width member, not bytes) that can be sent
   * in one burst to the device. Typically something like half the
   * FIFO depth on I/O peripherals so you don't overflow it. This
   * may or may not be applicable on memory sources.
   * @dst_maxburst: same as src_maxburst but for destination target
   * mutatis mutandis.
   *
   * This struct is passed in as configuration data to a DMA engine
   * in order to set up a certain channel for DMA transport at runtime.
   * The DMA device/engine has to provide support for an additional
   * command in the channel config interface, DMA_SLAVE_CONFIG
   * and this struct will then be passed in as an argument to the
   * DMA engine device_control() function.
   *
   * The rationale for adding configuration information to this struct
   * is as follows: if it is likely that most DMA slave controllers in
   * the world will support the configuration option, then make it
   * generic. If not: if it is fixed so that it be sent in static from
   * the platform data, then prefer to do that. Else, if it is neither
   * fixed at runtime, nor generic enough (such as bus mastership on
   * some CPU family and whatnot) then create a custom slave config
   * struct and pass that, then make this config a member of that
   * struct, if applicable.
   */
  struct dma_slave_config {
  	enum dma_data_direction direction;
  	dma_addr_t src_addr;
  	dma_addr_t dst_addr;
  	enum dma_slave_buswidth src_addr_width;
  	enum dma_slave_buswidth dst_addr_width;
  	u32 src_maxburst;
  	u32 dst_maxburst;
  };
41d5e59c1   Dan Williams   dmaengine: add a ...
269
270
271
272
  static inline const char *dma_chan_name(struct dma_chan *chan)
  {
  	return dev_name(&chan->dev->device);
  }
d379b01e9   Dan Williams   dmaengine: make c...
273

c13c8260d   Chris Leech   [I/OAT]: DMA memc...
274
  void dma_chan_cleanup(struct kref *kref);
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
275
  /**
59b5ec214   Dan Williams   dmaengine: introd...
276
277
278
279
280
281
282
   * typedef dma_filter_fn - callback filter for dma_request_channel
   * @chan: channel to be reviewed
   * @filter_param: opaque parameter passed through dma_request_channel
   *
   * When this optional parameter is specified in a call to dma_request_channel a
   * suitable channel is passed to this routine for further dispositioning before
   * being returned.  Where 'suitable' indicates a non-busy channel that
7dd602510   Dan Williams   dmaengine: kill e...
283
284
   * satisfies the given capability mask.  It returns 'true' to indicate that the
   * channel is suitable.
59b5ec214   Dan Williams   dmaengine: introd...
285
   */
7dd602510   Dan Williams   dmaengine: kill e...
286
  typedef bool (*dma_filter_fn)(struct dma_chan *chan, void *filter_param);
59b5ec214   Dan Williams   dmaengine: introd...
287

7405f74ba   Dan Williams   dmaengine: refact...
288
289
290
291
292
293
  typedef void (*dma_async_tx_callback)(void *dma_async_param);
  /**
   * struct dma_async_tx_descriptor - async transaction descriptor
   * ---dma generic offload fields---
   * @cookie: tracking cookie for this transaction, set to -EBUSY if
   *	this tx is sitting on a dependency list
636bdeaa1   Dan Williams   dmaengine: ack to...
294
295
   * @flags: flags to augment operation preparation, control completion, and
   * 	communicate status
7405f74ba   Dan Williams   dmaengine: refact...
296
   * @phys: physical address of the descriptor
7405f74ba   Dan Williams   dmaengine: refact...
297
298
   * @chan: target channel for this operation
   * @tx_submit: set the prepared descriptor(s) to be executed by the engine
7405f74ba   Dan Williams   dmaengine: refact...
299
300
301
   * @callback: routine to call after this operation is complete
   * @callback_param: general parameter to pass to the callback routine
   * ---async_tx api specific fields---
19242d723   Dan Williams   async_tx: fix mul...
302
   * @next: at completion submit this descriptor
7405f74ba   Dan Williams   dmaengine: refact...
303
   * @parent: pointer to the next level up in the dependency chain
19242d723   Dan Williams   async_tx: fix mul...
304
   * @lock: protect the parent and next pointers
7405f74ba   Dan Williams   dmaengine: refact...
305
306
307
   */
  struct dma_async_tx_descriptor {
  	dma_cookie_t cookie;
636bdeaa1   Dan Williams   dmaengine: ack to...
308
  	enum dma_ctrl_flags flags; /* not a 'long' to pack with cookie */
7405f74ba   Dan Williams   dmaengine: refact...
309
  	dma_addr_t phys;
7405f74ba   Dan Williams   dmaengine: refact...
310
311
  	struct dma_chan *chan;
  	dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx);
7405f74ba   Dan Williams   dmaengine: refact...
312
313
  	dma_async_tx_callback callback;
  	void *callback_param;
caa20d974   Dan Williams   async_tx: trim dm...
314
  #ifndef CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH
19242d723   Dan Williams   async_tx: fix mul...
315
  	struct dma_async_tx_descriptor *next;
7405f74ba   Dan Williams   dmaengine: refact...
316
317
  	struct dma_async_tx_descriptor *parent;
  	spinlock_t lock;
caa20d974   Dan Williams   async_tx: trim dm...
318
  #endif
7405f74ba   Dan Williams   dmaengine: refact...
319
  };
caa20d974   Dan Williams   async_tx: trim dm...
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
  #ifdef CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH
  static inline void txd_lock(struct dma_async_tx_descriptor *txd)
  {
  }
  static inline void txd_unlock(struct dma_async_tx_descriptor *txd)
  {
  }
  static inline void txd_chain(struct dma_async_tx_descriptor *txd, struct dma_async_tx_descriptor *next)
  {
  	BUG();
  }
  static inline void txd_clear_parent(struct dma_async_tx_descriptor *txd)
  {
  }
  static inline void txd_clear_next(struct dma_async_tx_descriptor *txd)
  {
  }
  static inline struct dma_async_tx_descriptor *txd_next(struct dma_async_tx_descriptor *txd)
  {
  	return NULL;
  }
  static inline struct dma_async_tx_descriptor *txd_parent(struct dma_async_tx_descriptor *txd)
  {
  	return NULL;
  }
  
  #else
  static inline void txd_lock(struct dma_async_tx_descriptor *txd)
  {
  	spin_lock_bh(&txd->lock);
  }
  static inline void txd_unlock(struct dma_async_tx_descriptor *txd)
  {
  	spin_unlock_bh(&txd->lock);
  }
  static inline void txd_chain(struct dma_async_tx_descriptor *txd, struct dma_async_tx_descriptor *next)
  {
  	txd->next = next;
  	next->parent = txd;
  }
  static inline void txd_clear_parent(struct dma_async_tx_descriptor *txd)
  {
  	txd->parent = NULL;
  }
  static inline void txd_clear_next(struct dma_async_tx_descriptor *txd)
  {
  	txd->next = NULL;
  }
  static inline struct dma_async_tx_descriptor *txd_parent(struct dma_async_tx_descriptor *txd)
  {
  	return txd->parent;
  }
  static inline struct dma_async_tx_descriptor *txd_next(struct dma_async_tx_descriptor *txd)
  {
  	return txd->next;
  }
  #endif
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
377
  /**
079344818   Linus Walleij   DMAENGINE: generi...
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
   * struct dma_tx_state - filled in to report the status of
   * a transfer.
   * @last: last completed DMA cookie
   * @used: last issued DMA cookie (i.e. the one in progress)
   * @residue: the remaining number of bytes left to transmit
   *	on the selected transfer for states DMA_IN_PROGRESS and
   *	DMA_PAUSED if this is implemented in the driver, else 0
   */
  struct dma_tx_state {
  	dma_cookie_t last;
  	dma_cookie_t used;
  	u32 residue;
  };
  
  /**
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
393
394
   * struct dma_device - info on the entity supplying DMA services
   * @chancnt: how many DMA channels are supported
0f571515c   Atsushi Nemoto   dmaengine: Add pr...
395
   * @privatecnt: how many DMA channels are requested by dma_request_channel
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
396
397
   * @channels: the list of struct dma_chan
   * @global_node: list_head for global dma_device_list
7405f74ba   Dan Williams   dmaengine: refact...
398
399
   * @cap_mask: one or more dma_capability flags
   * @max_xor: maximum number of xor sources, 0 if no capability
b2f46fd8e   Dan Williams   async_tx: add sup...
400
   * @max_pq: maximum number of PQ sources and PQ-continue capability
83544ae9f   Dan Williams   dmaengine, async_...
401
402
403
404
   * @copy_align: alignment shift for memcpy operations
   * @xor_align: alignment shift for xor operations
   * @pq_align: alignment shift for pq operations
   * @fill_align: alignment shift for memset operations
fe4ada2d6   Randy Dunlap   [IOAT]: fix heade...
405
   * @dev_id: unique device ID
7405f74ba   Dan Williams   dmaengine: refact...
406
   * @dev: struct device reference for dma mapping api
fe4ada2d6   Randy Dunlap   [IOAT]: fix heade...
407
408
409
   * @device_alloc_chan_resources: allocate resources and return the
   *	number of allocated descriptors
   * @device_free_chan_resources: release DMA channel's resources
7405f74ba   Dan Williams   dmaengine: refact...
410
411
   * @device_prep_dma_memcpy: prepares a memcpy operation
   * @device_prep_dma_xor: prepares a xor operation
099f53cb5   Dan Williams   async_tx: rename ...
412
   * @device_prep_dma_xor_val: prepares a xor validation operation
b2f46fd8e   Dan Williams   async_tx: add sup...
413
414
   * @device_prep_dma_pq: prepares a pq operation
   * @device_prep_dma_pq_val: prepares a pqzero_sum operation
7405f74ba   Dan Williams   dmaengine: refact...
415
416
   * @device_prep_dma_memset: prepares a memset operation
   * @device_prep_dma_interrupt: prepares an end of chain interrupt operation
dc0ee6435   Haavard Skinnemoen   dmaengine: Add sl...
417
   * @device_prep_slave_sg: prepares a slave dma operation
c3635c78e   Linus Walleij   DMAENGINE: generi...
418
419
   * @device_control: manipulate all pending operations on a channel, returns
   *	zero or error code
079344818   Linus Walleij   DMAENGINE: generi...
420
421
422
423
   * @device_tx_status: poll for transaction completion, the optional
   *	txstate parameter can be supplied with a pointer to get a
   *	struct with auxilary transfer status information, otherwise the call
   *	will just return a simple status code
7405f74ba   Dan Williams   dmaengine: refact...
424
   * @device_issue_pending: push pending transactions to hardware
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
425
426
427
428
   */
  struct dma_device {
  
  	unsigned int chancnt;
0f571515c   Atsushi Nemoto   dmaengine: Add pr...
429
  	unsigned int privatecnt;
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
430
431
  	struct list_head channels;
  	struct list_head global_node;
7405f74ba   Dan Williams   dmaengine: refact...
432
  	dma_cap_mask_t  cap_mask;
b2f46fd8e   Dan Williams   async_tx: add sup...
433
434
  	unsigned short max_xor;
  	unsigned short max_pq;
83544ae9f   Dan Williams   dmaengine, async_...
435
436
437
438
  	u8 copy_align;
  	u8 xor_align;
  	u8 pq_align;
  	u8 fill_align;
b2f46fd8e   Dan Williams   async_tx: add sup...
439
  	#define DMA_HAS_PQ_CONTINUE (1 << 15)
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
440

c13c8260d   Chris Leech   [I/OAT]: DMA memc...
441
  	int dev_id;
7405f74ba   Dan Williams   dmaengine: refact...
442
  	struct device *dev;
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
443

aa1e6f1a3   Dan Williams   dmaengine: kill s...
444
  	int (*device_alloc_chan_resources)(struct dma_chan *chan);
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
445
  	void (*device_free_chan_resources)(struct dma_chan *chan);
7405f74ba   Dan Williams   dmaengine: refact...
446
447
  
  	struct dma_async_tx_descriptor *(*device_prep_dma_memcpy)(
0036731c8   Dan Williams   async_tx: kill tx...
448
  		struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
d4c56f97f   Dan Williams   async_tx: replace...
449
  		size_t len, unsigned long flags);
7405f74ba   Dan Williams   dmaengine: refact...
450
  	struct dma_async_tx_descriptor *(*device_prep_dma_xor)(
0036731c8   Dan Williams   async_tx: kill tx...
451
  		struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
d4c56f97f   Dan Williams   async_tx: replace...
452
  		unsigned int src_cnt, size_t len, unsigned long flags);
099f53cb5   Dan Williams   async_tx: rename ...
453
  	struct dma_async_tx_descriptor *(*device_prep_dma_xor_val)(
0036731c8   Dan Williams   async_tx: kill tx...
454
  		struct dma_chan *chan, dma_addr_t *src,	unsigned int src_cnt,
ad283ea4a   Dan Williams   async_tx: add sum...
455
  		size_t len, enum sum_check_flags *result, unsigned long flags);
b2f46fd8e   Dan Williams   async_tx: add sup...
456
457
458
459
460
461
462
463
  	struct dma_async_tx_descriptor *(*device_prep_dma_pq)(
  		struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
  		unsigned int src_cnt, const unsigned char *scf,
  		size_t len, unsigned long flags);
  	struct dma_async_tx_descriptor *(*device_prep_dma_pq_val)(
  		struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
  		unsigned int src_cnt, const unsigned char *scf, size_t len,
  		enum sum_check_flags *pqres, unsigned long flags);
7405f74ba   Dan Williams   dmaengine: refact...
464
  	struct dma_async_tx_descriptor *(*device_prep_dma_memset)(
0036731c8   Dan Williams   async_tx: kill tx...
465
  		struct dma_chan *chan, dma_addr_t dest, int value, size_t len,
d4c56f97f   Dan Williams   async_tx: replace...
466
  		unsigned long flags);
7405f74ba   Dan Williams   dmaengine: refact...
467
  	struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)(
636bdeaa1   Dan Williams   dmaengine: ack to...
468
  		struct dma_chan *chan, unsigned long flags);
7405f74ba   Dan Williams   dmaengine: refact...
469

dc0ee6435   Haavard Skinnemoen   dmaengine: Add sl...
470
471
472
473
  	struct dma_async_tx_descriptor *(*device_prep_slave_sg)(
  		struct dma_chan *chan, struct scatterlist *sgl,
  		unsigned int sg_len, enum dma_data_direction direction,
  		unsigned long flags);
058276303   Linus Walleij   DMAENGINE: extend...
474
475
  	int (*device_control)(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
  		unsigned long arg);
dc0ee6435   Haavard Skinnemoen   dmaengine: Add sl...
476

079344818   Linus Walleij   DMAENGINE: generi...
477
478
479
  	enum dma_status (*device_tx_status)(struct dma_chan *chan,
  					    dma_cookie_t cookie,
  					    struct dma_tx_state *txstate);
7405f74ba   Dan Williams   dmaengine: refact...
480
  	void (*device_issue_pending)(struct dma_chan *chan);
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
481
  };
83544ae9f   Dan Williams   dmaengine, async_...
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
  static inline bool dmaengine_check_align(u8 align, size_t off1, size_t off2, size_t len)
  {
  	size_t mask;
  
  	if (!align)
  		return true;
  	mask = (1 << align) - 1;
  	if (mask & (off1 | off2 | len))
  		return false;
  	return true;
  }
  
  static inline bool is_dma_copy_aligned(struct dma_device *dev, size_t off1,
  				       size_t off2, size_t len)
  {
  	return dmaengine_check_align(dev->copy_align, off1, off2, len);
  }
  
  static inline bool is_dma_xor_aligned(struct dma_device *dev, size_t off1,
  				      size_t off2, size_t len)
  {
  	return dmaengine_check_align(dev->xor_align, off1, off2, len);
  }
  
  static inline bool is_dma_pq_aligned(struct dma_device *dev, size_t off1,
  				     size_t off2, size_t len)
  {
  	return dmaengine_check_align(dev->pq_align, off1, off2, len);
  }
  
  static inline bool is_dma_fill_aligned(struct dma_device *dev, size_t off1,
  				       size_t off2, size_t len)
  {
  	return dmaengine_check_align(dev->fill_align, off1, off2, len);
  }
b2f46fd8e   Dan Williams   async_tx: add sup...
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
  static inline void
  dma_set_maxpq(struct dma_device *dma, int maxpq, int has_pq_continue)
  {
  	dma->max_pq = maxpq;
  	if (has_pq_continue)
  		dma->max_pq |= DMA_HAS_PQ_CONTINUE;
  }
  
  static inline bool dmaf_continue(enum dma_ctrl_flags flags)
  {
  	return (flags & DMA_PREP_CONTINUE) == DMA_PREP_CONTINUE;
  }
  
  static inline bool dmaf_p_disabled_continue(enum dma_ctrl_flags flags)
  {
  	enum dma_ctrl_flags mask = DMA_PREP_CONTINUE | DMA_PREP_PQ_DISABLE_P;
  
  	return (flags & mask) == mask;
  }
  
  static inline bool dma_dev_has_pq_continue(struct dma_device *dma)
  {
  	return (dma->max_pq & DMA_HAS_PQ_CONTINUE) == DMA_HAS_PQ_CONTINUE;
  }
d3f3cf859   Mathieu Lacage   missing inline ke...
541
  static inline unsigned short dma_dev_to_maxpq(struct dma_device *dma)
b2f46fd8e   Dan Williams   async_tx: add sup...
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
  {
  	return dma->max_pq & ~DMA_HAS_PQ_CONTINUE;
  }
  
  /* dma_maxpq - reduce maxpq in the face of continued operations
   * @dma - dma device with PQ capability
   * @flags - to check if DMA_PREP_CONTINUE and DMA_PREP_PQ_DISABLE_P are set
   *
   * When an engine does not support native continuation we need 3 extra
   * source slots to reuse P and Q with the following coefficients:
   * 1/ {00} * P : remove P from Q', but use it as a source for P'
   * 2/ {01} * Q : use Q to continue Q' calculation
   * 3/ {00} * Q : subtract Q from P' to cancel (2)
   *
   * In the case where P is disabled we only need 1 extra source:
   * 1/ {01} * Q : use Q to continue Q' calculation
   */
  static inline int dma_maxpq(struct dma_device *dma, enum dma_ctrl_flags flags)
  {
  	if (dma_dev_has_pq_continue(dma) || !dmaf_continue(flags))
  		return dma_dev_to_maxpq(dma);
  	else if (dmaf_p_disabled_continue(flags))
  		return dma_dev_to_maxpq(dma) - 1;
  	else if (dmaf_continue(flags))
  		return dma_dev_to_maxpq(dma) - 3;
  	BUG();
  }
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
569
  /* --- public DMA engine API --- */
649274d99   Dan Williams   net_dma: acquire/...
570
  #ifdef CONFIG_DMA_ENGINE
209b84a88   Dan Williams   dmaengine: replac...
571
572
  void dmaengine_get(void);
  void dmaengine_put(void);
649274d99   Dan Williams   net_dma: acquire/...
573
574
575
576
577
578
579
580
  #else
  static inline void dmaengine_get(void)
  {
  }
  static inline void dmaengine_put(void)
  {
  }
  #endif
b4bd07c20   David S. Miller   net_dma: call dma...
581
582
583
584
585
586
587
588
589
590
591
  #ifdef CONFIG_NET_DMA
  #define net_dmaengine_get()	dmaengine_get()
  #define net_dmaengine_put()	dmaengine_put()
  #else
  static inline void net_dmaengine_get(void)
  {
  }
  static inline void net_dmaengine_put(void)
  {
  }
  #endif
729b5d1b8   Dan Williams   dmaengine: allow ...
592
593
594
  #ifdef CONFIG_ASYNC_TX_DMA
  #define async_dmaengine_get()	dmaengine_get()
  #define async_dmaengine_put()	dmaengine_put()
138f4c359   Dan Williams   dmaengine, async_...
595
596
597
  #ifdef CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH
  #define async_dma_find_channel(type) dma_find_channel(DMA_ASYNC_TX)
  #else
729b5d1b8   Dan Williams   dmaengine: allow ...
598
  #define async_dma_find_channel(type) dma_find_channel(type)
138f4c359   Dan Williams   dmaengine, async_...
599
  #endif /* CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH */
729b5d1b8   Dan Williams   dmaengine: allow ...
600
601
602
603
604
605
606
607
608
609
610
611
  #else
  static inline void async_dmaengine_get(void)
  {
  }
  static inline void async_dmaengine_put(void)
  {
  }
  static inline struct dma_chan *
  async_dma_find_channel(enum dma_transaction_type type)
  {
  	return NULL;
  }
138f4c359   Dan Williams   dmaengine, async_...
612
  #endif /* CONFIG_ASYNC_TX_DMA */
729b5d1b8   Dan Williams   dmaengine: allow ...
613

7405f74ba   Dan Williams   dmaengine: refact...
614
615
616
617
618
619
620
621
622
  dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan,
  	void *dest, void *src, size_t len);
  dma_cookie_t dma_async_memcpy_buf_to_pg(struct dma_chan *chan,
  	struct page *page, unsigned int offset, void *kdata, size_t len);
  dma_cookie_t dma_async_memcpy_pg_to_pg(struct dma_chan *chan,
  	struct page *dest_pg, unsigned int dest_off, struct page *src_pg,
  	unsigned int src_off, size_t len);
  void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
  	struct dma_chan *chan);
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
623

0839875e0   Dan Williams   async_tx: make as...
624
  static inline void async_tx_ack(struct dma_async_tx_descriptor *tx)
7405f74ba   Dan Williams   dmaengine: refact...
625
  {
636bdeaa1   Dan Williams   dmaengine: ack to...
626
627
  	tx->flags |= DMA_CTRL_ACK;
  }
ef560682a   Guennadi Liakhovetski   dmaengine: add as...
628
629
630
631
  static inline void async_tx_clear_ack(struct dma_async_tx_descriptor *tx)
  {
  	tx->flags &= ~DMA_CTRL_ACK;
  }
0839875e0   Dan Williams   async_tx: make as...
632
  static inline bool async_tx_test_ack(struct dma_async_tx_descriptor *tx)
636bdeaa1   Dan Williams   dmaengine: ack to...
633
  {
0839875e0   Dan Williams   async_tx: make as...
634
  	return (tx->flags & DMA_CTRL_ACK) == DMA_CTRL_ACK;
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
635
  }
7405f74ba   Dan Williams   dmaengine: refact...
636
637
  #define first_dma_cap(mask) __first_dma_cap(&(mask))
  static inline int __first_dma_cap(const dma_cap_mask_t *srcp)
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
638
  {
7405f74ba   Dan Williams   dmaengine: refact...
639
640
641
  	return min_t(int, DMA_TX_TYPE_END,
  		find_first_bit(srcp->bits, DMA_TX_TYPE_END));
  }
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
642

7405f74ba   Dan Williams   dmaengine: refact...
643
644
645
646
647
  #define next_dma_cap(n, mask) __next_dma_cap((n), &(mask))
  static inline int __next_dma_cap(int n, const dma_cap_mask_t *srcp)
  {
  	return min_t(int, DMA_TX_TYPE_END,
  		find_next_bit(srcp->bits, DMA_TX_TYPE_END, n+1));
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
648
  }
7405f74ba   Dan Williams   dmaengine: refact...
649
650
651
  #define dma_cap_set(tx, mask) __dma_cap_set((tx), &(mask))
  static inline void
  __dma_cap_set(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp)
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
652
  {
7405f74ba   Dan Williams   dmaengine: refact...
653
654
  	set_bit(tx_type, dstp->bits);
  }
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
655

0f571515c   Atsushi Nemoto   dmaengine: Add pr...
656
657
658
659
660
661
  #define dma_cap_clear(tx, mask) __dma_cap_clear((tx), &(mask))
  static inline void
  __dma_cap_clear(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp)
  {
  	clear_bit(tx_type, dstp->bits);
  }
33df8ca06   Dan Williams   dmatest: convert ...
662
663
664
665
666
  #define dma_cap_zero(mask) __dma_cap_zero(&(mask))
  static inline void __dma_cap_zero(dma_cap_mask_t *dstp)
  {
  	bitmap_zero(dstp->bits, DMA_TX_TYPE_END);
  }
7405f74ba   Dan Williams   dmaengine: refact...
667
668
669
670
671
  #define dma_has_cap(tx, mask) __dma_has_cap((tx), &(mask))
  static inline int
  __dma_has_cap(enum dma_transaction_type tx_type, dma_cap_mask_t *srcp)
  {
  	return test_bit(tx_type, srcp->bits);
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
672
  }
7405f74ba   Dan Williams   dmaengine: refact...
673
674
675
676
  #define for_each_dma_cap_mask(cap, mask) \
  	for ((cap) = first_dma_cap(mask);	\
  		(cap) < DMA_TX_TYPE_END;	\
  		(cap) = next_dma_cap((cap), (mask)))
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
677
  /**
7405f74ba   Dan Williams   dmaengine: refact...
678
   * dma_async_issue_pending - flush pending transactions to HW
fe4ada2d6   Randy Dunlap   [IOAT]: fix heade...
679
   * @chan: target DMA channel
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
680
681
682
683
   *
   * This allows drivers to push copies to HW in batches,
   * reducing MMIO writes where possible.
   */
7405f74ba   Dan Williams   dmaengine: refact...
684
  static inline void dma_async_issue_pending(struct dma_chan *chan)
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
685
  {
ec8670f1f   Dan Williams   dmaengine: fix sp...
686
  	chan->device->device_issue_pending(chan);
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
687
  }
7405f74ba   Dan Williams   dmaengine: refact...
688
  #define dma_async_memcpy_issue_pending(chan) dma_async_issue_pending(chan)
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
689
  /**
7405f74ba   Dan Williams   dmaengine: refact...
690
   * dma_async_is_tx_complete - poll for transaction completion
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
691
692
693
694
695
696
697
698
699
   * @chan: DMA channel
   * @cookie: transaction identifier to check status of
   * @last: returns last completed cookie, can be NULL
   * @used: returns last issued cookie, can be NULL
   *
   * If @last and @used are passed in, upon return they reflect the driver
   * internal state and can be used with dma_async_is_complete() to check
   * the status of multiple cookies without re-checking hardware state.
   */
7405f74ba   Dan Williams   dmaengine: refact...
700
  static inline enum dma_status dma_async_is_tx_complete(struct dma_chan *chan,
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
701
702
  	dma_cookie_t cookie, dma_cookie_t *last, dma_cookie_t *used)
  {
079344818   Linus Walleij   DMAENGINE: generi...
703
704
705
706
707
708
709
710
711
  	struct dma_tx_state state;
  	enum dma_status status;
  
  	status = chan->device->device_tx_status(chan, cookie, &state);
  	if (last)
  		*last = state.last;
  	if (used)
  		*used = state.used;
  	return status;
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
712
  }
7405f74ba   Dan Williams   dmaengine: refact...
713
714
  #define dma_async_memcpy_complete(chan, cookie, last, used)\
  	dma_async_is_tx_complete(chan, cookie, last, used)
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
715
716
717
718
719
720
721
  /**
   * dma_async_is_complete - test a cookie against chan state
   * @cookie: transaction identifier to test status of
   * @last_complete: last know completed transaction
   * @last_used: last cookie value handed out
   *
   * dma_async_is_complete() is used in dma_async_memcpy_complete()
8a5703f84   Sebastian Siewior   DMA engine: typo ...
722
   * the test logic is separated for lightweight testing of multiple cookies
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
723
724
725
726
727
728
729
730
731
732
733
734
735
   */
  static inline enum dma_status dma_async_is_complete(dma_cookie_t cookie,
  			dma_cookie_t last_complete, dma_cookie_t last_used)
  {
  	if (last_complete <= last_used) {
  		if ((cookie <= last_complete) || (cookie > last_used))
  			return DMA_SUCCESS;
  	} else {
  		if ((cookie <= last_complete) && (cookie > last_used))
  			return DMA_SUCCESS;
  	}
  	return DMA_IN_PROGRESS;
  }
bca346920   Dan Williams   dmaengine: provid...
736
737
738
739
740
741
742
743
744
  static inline void
  dma_set_tx_state(struct dma_tx_state *st, dma_cookie_t last, dma_cookie_t used, u32 residue)
  {
  	if (st) {
  		st->last = last;
  		st->used = used;
  		st->residue = residue;
  	}
  }
7405f74ba   Dan Williams   dmaengine: refact...
745
  enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie);
07f2211e4   Dan Williams   dmaengine: remove...
746
747
  #ifdef CONFIG_DMA_ENGINE
  enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx);
c50331e8b   Dan Williams   dmaengine: dma_is...
748
  void dma_issue_pending_all(void);
07f2211e4   Dan Williams   dmaengine: remove...
749
750
751
752
753
  #else
  static inline enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
  {
  	return DMA_SUCCESS;
  }
c50331e8b   Dan Williams   dmaengine: dma_is...
754
755
756
757
  static inline void dma_issue_pending_all(void)
  {
  	do { } while (0);
  }
07f2211e4   Dan Williams   dmaengine: remove...
758
  #endif
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
759
760
761
762
763
  
  /* --- DMA device --- */
  
  int dma_async_device_register(struct dma_device *device);
  void dma_async_device_unregister(struct dma_device *device);
07f2211e4   Dan Williams   dmaengine: remove...
764
  void dma_run_dependencies(struct dma_async_tx_descriptor *tx);
bec085134   Dan Williams   dmaengine: centra...
765
  struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type);
59b5ec214   Dan Williams   dmaengine: introd...
766
767
768
  #define dma_request_channel(mask, x, y) __dma_request_channel(&(mask), x, y)
  struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, void *fn_param);
  void dma_release_channel(struct dma_chan *chan);
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
769

de5506e15   Chris Leech   [I/OAT]: Utility ...
770
771
772
  /* --- Helper iov-locking functions --- */
  
  struct dma_page_list {
b2ddb9019   Al Viro   dma_page_list ->b...
773
  	char __user *base_address;
de5506e15   Chris Leech   [I/OAT]: Utility ...
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
  	int nr_pages;
  	struct page **pages;
  };
  
  struct dma_pinned_list {
  	int nr_iovecs;
  	struct dma_page_list page_list[0];
  };
  
  struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len);
  void dma_unpin_iovec_pages(struct dma_pinned_list* pinned_list);
  
  dma_cookie_t dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
  	struct dma_pinned_list *pinned_list, unsigned char *kdata, size_t len);
  dma_cookie_t dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
  	struct dma_pinned_list *pinned_list, struct page *page,
  	unsigned int offset, size_t len);
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
791
  #endif /* DMAENGINE_H */