Blame view

include/linux/dmaengine.h 28.2 KB
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
  /*
   * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
   *
   * This program is free software; you can redistribute it and/or modify it
   * under the terms of the GNU General Public License as published by the Free
   * Software Foundation; either version 2 of the License, or (at your option)
   * any later version.
   *
   * This program is distributed in the hope that it will be useful, but WITHOUT
   * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
   * more details.
   *
   * You should have received a copy of the GNU General Public License along with
   * this program; if not, write to the Free Software Foundation, Inc., 59
   * Temple Place - Suite 330, Boston, MA  02111-1307, USA.
   *
   * The full GNU General Public License is included in this distribution in the
   * file called COPYING.
   */
  #ifndef DMAENGINE_H
  #define DMAENGINE_H
1c0f16e5c   David Woodhouse   [PATCH] Remove gr...
23

c13c8260d   Chris Leech   [I/OAT]: DMA memc...
24
25
  #include <linux/device.h>
  #include <linux/uio.h>
b7f080cfe   Alexey Dobriyan   net: remove mm.h ...
26
  #include <linux/dma-direction.h>
90b44f8ff   Vinod Koul   dmaengine: add he...
27
  #include <linux/scatterlist.h>
a8efa9d6b   Paul Gortmaker   linux/dmaengine.h...
28
29
  #include <linux/bitmap.h>
  #include <asm/page.h>
b7f080cfe   Alexey Dobriyan   net: remove mm.h ...
30

c13c8260d   Chris Leech   [I/OAT]: DMA memc...
31
  /**
fe4ada2d6   Randy Dunlap   [IOAT]: fix heade...
32
   * typedef dma_cookie_t - an opaque DMA cookie
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
33
34
35
36
   *
   * if dma_cookie_t is >0 it's a DMA request cookie, <0 it's an error code
   */
  typedef s32 dma_cookie_t;
76bd061f5   Steven J. Magnani   fsldma: Fix cooki...
37
38
  #define DMA_MIN_COOKIE	1
  #define DMA_MAX_COOKIE	INT_MAX
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
39
40
41
42
43
44
45
  
  #define dma_submit_error(cookie) ((cookie) < 0 ? 1 : 0)
  
  /**
   * enum dma_status - DMA transaction status
   * @DMA_SUCCESS: transaction completed successfully
   * @DMA_IN_PROGRESS: transaction not yet processed
079344818   Linus Walleij   DMAENGINE: generi...
46
   * @DMA_PAUSED: transaction is paused
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
47
48
49
50
51
   * @DMA_ERROR: transaction failed
   */
  enum dma_status {
  	DMA_SUCCESS,
  	DMA_IN_PROGRESS,
079344818   Linus Walleij   DMAENGINE: generi...
52
  	DMA_PAUSED,
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
53
54
55
56
  	DMA_ERROR,
  };
  
  /**
7405f74ba   Dan Williams   dmaengine: refact...
57
   * enum dma_transaction_type - DMA transaction types/indexes
138f4c359   Dan Williams   dmaengine, async_...
58
59
60
   *
   * Note: The DMA_ASYNC_TX capability is not to be set by drivers.  It is
   * automatically set as dma devices are registered.
7405f74ba   Dan Williams   dmaengine: refact...
61
62
63
64
   */
  enum dma_transaction_type {
  	DMA_MEMCPY,
  	DMA_XOR,
b2f46fd8e   Dan Williams   async_tx: add sup...
65
  	DMA_PQ,
099f53cb5   Dan Williams   async_tx: rename ...
66
67
  	DMA_XOR_VAL,
  	DMA_PQ_VAL,
7405f74ba   Dan Williams   dmaengine: refact...
68
  	DMA_MEMSET,
7405f74ba   Dan Williams   dmaengine: refact...
69
  	DMA_INTERRUPT,
a86ee03ce   Ira Snyder   dma: add support ...
70
  	DMA_SG,
59b5ec214   Dan Williams   dmaengine: introd...
71
  	DMA_PRIVATE,
138f4c359   Dan Williams   dmaengine, async_...
72
  	DMA_ASYNC_TX,
dc0ee6435   Haavard Skinnemoen   dmaengine: Add sl...
73
  	DMA_SLAVE,
782bc950d   Sascha Hauer   dmaengine: add po...
74
  	DMA_CYCLIC,
7405f74ba   Dan Williams   dmaengine: refact...
75
76
77
  };
  
  /* last transaction type for creation of the capabilities mask */
782bc950d   Sascha Hauer   dmaengine: add po...
78
  #define DMA_TX_TYPE_END (DMA_CYCLIC + 1)
dc0ee6435   Haavard Skinnemoen   dmaengine: Add sl...
79

7405f74ba   Dan Williams   dmaengine: refact...
80
81
  
  /**
636bdeaa1   Dan Williams   dmaengine: ack to...
82
   * enum dma_ctrl_flags - DMA flags to augment operation preparation,
b2f46fd8e   Dan Williams   async_tx: add sup...
83
   *  control completion, and communicate status.
d4c56f97f   Dan Williams   async_tx: replace...
84
   * @DMA_PREP_INTERRUPT - trigger an interrupt (callback) upon completion of
b2f46fd8e   Dan Williams   async_tx: add sup...
85
   *  this transaction
a88f66670   Guennadi Liakhovetski   dmaengine: clarif...
86
   * @DMA_CTRL_ACK - if clear, the descriptor cannot be reused until the client
b2f46fd8e   Dan Williams   async_tx: add sup...
87
88
   *  acknowledges receipt, i.e. has has a chance to establish any dependency
   *  chains
e1d181efb   Dan Williams   dmaengine: add DM...
89
90
   * @DMA_COMPL_SKIP_SRC_UNMAP - set to disable dma-unmapping the source buffer(s)
   * @DMA_COMPL_SKIP_DEST_UNMAP - set to disable dma-unmapping the destination(s)
4f005dbe5   Maciej Sosnowski   ioatdma: fix "ioa...
91
92
93
94
   * @DMA_COMPL_SRC_UNMAP_SINGLE - set to do the source dma-unmapping as single
   * 	(if not set, do the source dma-unmapping as page)
   * @DMA_COMPL_DEST_UNMAP_SINGLE - set to do the destination dma-unmapping as single
   * 	(if not set, do the destination dma-unmapping as page)
b2f46fd8e   Dan Williams   async_tx: add sup...
95
96
97
98
99
   * @DMA_PREP_PQ_DISABLE_P - prevent generation of P while generating Q
   * @DMA_PREP_PQ_DISABLE_Q - prevent generation of Q while generating P
   * @DMA_PREP_CONTINUE - indicate to a driver that it is reusing buffers as
   *  sources that were the result of a previous operation, in the case of a PQ
   *  operation it continues the calculation with new sources
0403e3827   Dan Williams   dmaengine: add fe...
100
101
   * @DMA_PREP_FENCE - tell the driver that subsequent operations depend
   *  on the result of this operation
d4c56f97f   Dan Williams   async_tx: replace...
102
   */
636bdeaa1   Dan Williams   dmaengine: ack to...
103
  enum dma_ctrl_flags {
d4c56f97f   Dan Williams   async_tx: replace...
104
  	DMA_PREP_INTERRUPT = (1 << 0),
636bdeaa1   Dan Williams   dmaengine: ack to...
105
  	DMA_CTRL_ACK = (1 << 1),
e1d181efb   Dan Williams   dmaengine: add DM...
106
107
  	DMA_COMPL_SKIP_SRC_UNMAP = (1 << 2),
  	DMA_COMPL_SKIP_DEST_UNMAP = (1 << 3),
4f005dbe5   Maciej Sosnowski   ioatdma: fix "ioa...
108
109
  	DMA_COMPL_SRC_UNMAP_SINGLE = (1 << 4),
  	DMA_COMPL_DEST_UNMAP_SINGLE = (1 << 5),
f9dd21343   Dan Williams   Merge branch 'md-...
110
111
112
  	DMA_PREP_PQ_DISABLE_P = (1 << 6),
  	DMA_PREP_PQ_DISABLE_Q = (1 << 7),
  	DMA_PREP_CONTINUE = (1 << 8),
0403e3827   Dan Williams   dmaengine: add fe...
113
  	DMA_PREP_FENCE = (1 << 9),
d4c56f97f   Dan Williams   async_tx: replace...
114
115
116
  };
  
  /**
c3635c78e   Linus Walleij   DMAENGINE: generi...
117
118
119
120
121
   * enum dma_ctrl_cmd - DMA operations that can optionally be exercised
   * on a running channel.
   * @DMA_TERMINATE_ALL: terminate all ongoing transfers
   * @DMA_PAUSE: pause ongoing transfers
   * @DMA_RESUME: resume paused transfer
c156d0a5b   Linus Walleij   DMAENGINE: generi...
122
123
124
125
126
   * @DMA_SLAVE_CONFIG: this command is only implemented by DMA controllers
   * that need to runtime reconfigure the slave channels (as opposed to passing
   * configuration data in statically from the platform). An additional
   * argument of struct dma_slave_config must be passed in with this
   * command.
968f19ae8   Ira Snyder   fsldma: improved ...
127
128
   * @FSLDMA_EXTERNAL_START: this command will put the Freescale DMA controller
   * into external start mode.
c3635c78e   Linus Walleij   DMAENGINE: generi...
129
130
131
132
133
   */
  enum dma_ctrl_cmd {
  	DMA_TERMINATE_ALL,
  	DMA_PAUSE,
  	DMA_RESUME,
c156d0a5b   Linus Walleij   DMAENGINE: generi...
134
  	DMA_SLAVE_CONFIG,
968f19ae8   Ira Snyder   fsldma: improved ...
135
  	FSLDMA_EXTERNAL_START,
c3635c78e   Linus Walleij   DMAENGINE: generi...
136
137
138
  };
  
  /**
ad283ea4a   Dan Williams   async_tx: add sum...
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
   * enum sum_check_bits - bit position of pq_check_flags
   */
  enum sum_check_bits {
  	SUM_CHECK_P = 0,
  	SUM_CHECK_Q = 1,
  };
  
  /**
   * enum pq_check_flags - result of async_{xor,pq}_zero_sum operations
   * @SUM_CHECK_P_RESULT - 1 if xor zero sum error, 0 otherwise
   * @SUM_CHECK_Q_RESULT - 1 if reed-solomon zero sum error, 0 otherwise
   */
  enum sum_check_flags {
  	SUM_CHECK_P_RESULT = (1 << SUM_CHECK_P),
  	SUM_CHECK_Q_RESULT = (1 << SUM_CHECK_Q),
  };
  
  
  /**
7405f74ba   Dan Williams   dmaengine: refact...
158
159
160
161
162
163
   * dma_cap_mask_t - capabilities bitmap modeled after cpumask_t.
   * See linux/cpumask.h
   */
  typedef struct { DECLARE_BITMAP(bits, DMA_TX_TYPE_END); } dma_cap_mask_t;
  
  /**
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
164
   * struct dma_chan_percpu - the per-CPU part of struct dma_chan
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
165
166
167
168
169
   * @memcpy_count: transaction counter
   * @bytes_transferred: byte counter
   */
  
  struct dma_chan_percpu {
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
170
171
172
173
174
175
176
  	/* stats */
  	unsigned long memcpy_count;
  	unsigned long bytes_transferred;
  };
  
  /**
   * struct dma_chan - devices supply DMA channels, clients use them
fe4ada2d6   Randy Dunlap   [IOAT]: fix heade...
177
   * @device: ptr to the dma device who supplies this channel, always !%NULL
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
178
   * @cookie: last cookie value returned to client
fe4ada2d6   Randy Dunlap   [IOAT]: fix heade...
179
   * @chan_id: channel ID for sysfs
41d5e59c1   Dan Williams   dmaengine: add a ...
180
   * @dev: class device for sysfs
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
181
182
   * @device_node: used to add this to the device chan list
   * @local: per-cpu pointer to a struct dma_chan_percpu
7cc5bf9a3   Dan Williams   dmaengine: track ...
183
   * @client-count: how many clients are using this channel
bec085134   Dan Williams   dmaengine: centra...
184
   * @table_count: number of appearances in the mem-to-mem allocation table
287d85922   Dan Williams   atmel-mci: fix in...
185
   * @private: private data for certain client-channel associations
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
186
187
   */
  struct dma_chan {
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
188
189
190
191
192
  	struct dma_device *device;
  	dma_cookie_t cookie;
  
  	/* sysfs */
  	int chan_id;
41d5e59c1   Dan Williams   dmaengine: add a ...
193
  	struct dma_chan_dev *dev;
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
194

c13c8260d   Chris Leech   [I/OAT]: DMA memc...
195
  	struct list_head device_node;
a29d8b8e2   Tejun Heo   percpu: add __per...
196
  	struct dma_chan_percpu __percpu *local;
7cc5bf9a3   Dan Williams   dmaengine: track ...
197
  	int client_count;
bec085134   Dan Williams   dmaengine: centra...
198
  	int table_count;
287d85922   Dan Williams   atmel-mci: fix in...
199
  	void *private;
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
200
  };
41d5e59c1   Dan Williams   dmaengine: add a ...
201
202
203
204
  /**
   * struct dma_chan_dev - relate sysfs device node to backing channel device
   * @chan - driver channel device
   * @device - sysfs device
864498aaa   Dan Williams   dmaengine: use id...
205
206
   * @dev_id - parent dma_device dev_id
   * @idr_ref - reference count to gate release of dma_device dev_id
41d5e59c1   Dan Williams   dmaengine: add a ...
207
208
209
210
   */
  struct dma_chan_dev {
  	struct dma_chan *chan;
  	struct device device;
864498aaa   Dan Williams   dmaengine: use id...
211
212
  	int dev_id;
  	atomic_t *idr_ref;
41d5e59c1   Dan Williams   dmaengine: add a ...
213
  };
c156d0a5b   Linus Walleij   DMAENGINE: generi...
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
  /**
   * enum dma_slave_buswidth - defines bus with of the DMA slave
   * device, source or target buses
   */
  enum dma_slave_buswidth {
  	DMA_SLAVE_BUSWIDTH_UNDEFINED = 0,
  	DMA_SLAVE_BUSWIDTH_1_BYTE = 1,
  	DMA_SLAVE_BUSWIDTH_2_BYTES = 2,
  	DMA_SLAVE_BUSWIDTH_4_BYTES = 4,
  	DMA_SLAVE_BUSWIDTH_8_BYTES = 8,
  };
  
  /**
   * struct dma_slave_config - dma slave channel runtime config
   * @direction: whether the data shall go in or out on this slave
   * channel, right now. DMA_TO_DEVICE and DMA_FROM_DEVICE are
   * legal values, DMA_BIDIRECTIONAL is not acceptable since we
   * need to differentiate source and target addresses.
   * @src_addr: this is the physical address where DMA slave data
   * should be read (RX), if the source is memory this argument is
   * ignored.
   * @dst_addr: this is the physical address where DMA slave data
   * should be written (TX), if the source is memory this argument
   * is ignored.
   * @src_addr_width: this is the width in bytes of the source (RX)
   * register where DMA data shall be read. If the source
   * is memory this may be ignored depending on architecture.
   * Legal values: 1, 2, 4, 8.
   * @dst_addr_width: same as src_addr_width but for destination
   * target (TX) mutatis mutandis.
   * @src_maxburst: the maximum number of words (note: words, as in
   * units of the src_addr_width member, not bytes) that can be sent
   * in one burst to the device. Typically something like half the
   * FIFO depth on I/O peripherals so you don't overflow it. This
   * may or may not be applicable on memory sources.
   * @dst_maxburst: same as src_maxburst but for destination target
   * mutatis mutandis.
   *
   * This struct is passed in as configuration data to a DMA engine
   * in order to set up a certain channel for DMA transport at runtime.
   * The DMA device/engine has to provide support for an additional
   * command in the channel config interface, DMA_SLAVE_CONFIG
   * and this struct will then be passed in as an argument to the
   * DMA engine device_control() function.
   *
   * The rationale for adding configuration information to this struct
   * is as follows: if it is likely that most DMA slave controllers in
   * the world will support the configuration option, then make it
   * generic. If not: if it is fixed so that it be sent in static from
   * the platform data, then prefer to do that. Else, if it is neither
   * fixed at runtime, nor generic enough (such as bus mastership on
   * some CPU family and whatnot) then create a custom slave config
   * struct and pass that, then make this config a member of that
   * struct, if applicable.
   */
  struct dma_slave_config {
  	enum dma_data_direction direction;
  	dma_addr_t src_addr;
  	dma_addr_t dst_addr;
  	enum dma_slave_buswidth src_addr_width;
  	enum dma_slave_buswidth dst_addr_width;
  	u32 src_maxburst;
  	u32 dst_maxburst;
  };
41d5e59c1   Dan Williams   dmaengine: add a ...
278
279
280
281
  static inline const char *dma_chan_name(struct dma_chan *chan)
  {
  	return dev_name(&chan->dev->device);
  }
d379b01e9   Dan Williams   dmaengine: make c...
282

c13c8260d   Chris Leech   [I/OAT]: DMA memc...
283
  void dma_chan_cleanup(struct kref *kref);
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
284
  /**
59b5ec214   Dan Williams   dmaengine: introd...
285
286
287
288
289
290
291
   * typedef dma_filter_fn - callback filter for dma_request_channel
   * @chan: channel to be reviewed
   * @filter_param: opaque parameter passed through dma_request_channel
   *
   * When this optional parameter is specified in a call to dma_request_channel a
   * suitable channel is passed to this routine for further dispositioning before
   * being returned.  Where 'suitable' indicates a non-busy channel that
7dd602510   Dan Williams   dmaengine: kill e...
292
293
   * satisfies the given capability mask.  It returns 'true' to indicate that the
   * channel is suitable.
59b5ec214   Dan Williams   dmaengine: introd...
294
   */
7dd602510   Dan Williams   dmaengine: kill e...
295
  typedef bool (*dma_filter_fn)(struct dma_chan *chan, void *filter_param);
59b5ec214   Dan Williams   dmaengine: introd...
296

7405f74ba   Dan Williams   dmaengine: refact...
297
298
299
300
301
302
  typedef void (*dma_async_tx_callback)(void *dma_async_param);
  /**
   * struct dma_async_tx_descriptor - async transaction descriptor
   * ---dma generic offload fields---
   * @cookie: tracking cookie for this transaction, set to -EBUSY if
   *	this tx is sitting on a dependency list
636bdeaa1   Dan Williams   dmaengine: ack to...
303
304
   * @flags: flags to augment operation preparation, control completion, and
   * 	communicate status
7405f74ba   Dan Williams   dmaengine: refact...
305
   * @phys: physical address of the descriptor
7405f74ba   Dan Williams   dmaengine: refact...
306
307
   * @chan: target channel for this operation
   * @tx_submit: set the prepared descriptor(s) to be executed by the engine
7405f74ba   Dan Williams   dmaengine: refact...
308
309
310
   * @callback: routine to call after this operation is complete
   * @callback_param: general parameter to pass to the callback routine
   * ---async_tx api specific fields---
19242d723   Dan Williams   async_tx: fix mul...
311
   * @next: at completion submit this descriptor
7405f74ba   Dan Williams   dmaengine: refact...
312
   * @parent: pointer to the next level up in the dependency chain
19242d723   Dan Williams   async_tx: fix mul...
313
   * @lock: protect the parent and next pointers
7405f74ba   Dan Williams   dmaengine: refact...
314
315
316
   */
  struct dma_async_tx_descriptor {
  	dma_cookie_t cookie;
636bdeaa1   Dan Williams   dmaengine: ack to...
317
  	enum dma_ctrl_flags flags; /* not a 'long' to pack with cookie */
7405f74ba   Dan Williams   dmaengine: refact...
318
  	dma_addr_t phys;
7405f74ba   Dan Williams   dmaengine: refact...
319
320
  	struct dma_chan *chan;
  	dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx);
7405f74ba   Dan Williams   dmaengine: refact...
321
322
  	dma_async_tx_callback callback;
  	void *callback_param;
5fc6d897f   Dan Williams   async_tx: make as...
323
  #ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
19242d723   Dan Williams   async_tx: fix mul...
324
  	struct dma_async_tx_descriptor *next;
7405f74ba   Dan Williams   dmaengine: refact...
325
326
  	struct dma_async_tx_descriptor *parent;
  	spinlock_t lock;
caa20d974   Dan Williams   async_tx: trim dm...
327
  #endif
7405f74ba   Dan Williams   dmaengine: refact...
328
  };
5fc6d897f   Dan Williams   async_tx: make as...
329
  #ifndef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
caa20d974   Dan Williams   async_tx: trim dm...
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
  static inline void txd_lock(struct dma_async_tx_descriptor *txd)
  {
  }
  static inline void txd_unlock(struct dma_async_tx_descriptor *txd)
  {
  }
  static inline void txd_chain(struct dma_async_tx_descriptor *txd, struct dma_async_tx_descriptor *next)
  {
  	BUG();
  }
  static inline void txd_clear_parent(struct dma_async_tx_descriptor *txd)
  {
  }
  static inline void txd_clear_next(struct dma_async_tx_descriptor *txd)
  {
  }
  static inline struct dma_async_tx_descriptor *txd_next(struct dma_async_tx_descriptor *txd)
  {
  	return NULL;
  }
  static inline struct dma_async_tx_descriptor *txd_parent(struct dma_async_tx_descriptor *txd)
  {
  	return NULL;
  }
  
  #else
  static inline void txd_lock(struct dma_async_tx_descriptor *txd)
  {
  	spin_lock_bh(&txd->lock);
  }
  static inline void txd_unlock(struct dma_async_tx_descriptor *txd)
  {
  	spin_unlock_bh(&txd->lock);
  }
  static inline void txd_chain(struct dma_async_tx_descriptor *txd, struct dma_async_tx_descriptor *next)
  {
  	txd->next = next;
  	next->parent = txd;
  }
  static inline void txd_clear_parent(struct dma_async_tx_descriptor *txd)
  {
  	txd->parent = NULL;
  }
  static inline void txd_clear_next(struct dma_async_tx_descriptor *txd)
  {
  	txd->next = NULL;
  }
  static inline struct dma_async_tx_descriptor *txd_parent(struct dma_async_tx_descriptor *txd)
  {
  	return txd->parent;
  }
  static inline struct dma_async_tx_descriptor *txd_next(struct dma_async_tx_descriptor *txd)
  {
  	return txd->next;
  }
  #endif
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
386
  /**
079344818   Linus Walleij   DMAENGINE: generi...
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
   * struct dma_tx_state - filled in to report the status of
   * a transfer.
   * @last: last completed DMA cookie
   * @used: last issued DMA cookie (i.e. the one in progress)
   * @residue: the remaining number of bytes left to transmit
   *	on the selected transfer for states DMA_IN_PROGRESS and
   *	DMA_PAUSED if this is implemented in the driver, else 0
   */
  struct dma_tx_state {
  	dma_cookie_t last;
  	dma_cookie_t used;
  	u32 residue;
  };
  
  /**
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
402
403
   * struct dma_device - info on the entity supplying DMA services
   * @chancnt: how many DMA channels are supported
0f571515c   Atsushi Nemoto   dmaengine: Add pr...
404
   * @privatecnt: how many DMA channels are requested by dma_request_channel
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
405
406
   * @channels: the list of struct dma_chan
   * @global_node: list_head for global dma_device_list
7405f74ba   Dan Williams   dmaengine: refact...
407
408
   * @cap_mask: one or more dma_capability flags
   * @max_xor: maximum number of xor sources, 0 if no capability
b2f46fd8e   Dan Williams   async_tx: add sup...
409
   * @max_pq: maximum number of PQ sources and PQ-continue capability
83544ae9f   Dan Williams   dmaengine, async_...
410
411
412
413
   * @copy_align: alignment shift for memcpy operations
   * @xor_align: alignment shift for xor operations
   * @pq_align: alignment shift for pq operations
   * @fill_align: alignment shift for memset operations
fe4ada2d6   Randy Dunlap   [IOAT]: fix heade...
414
   * @dev_id: unique device ID
7405f74ba   Dan Williams   dmaengine: refact...
415
   * @dev: struct device reference for dma mapping api
fe4ada2d6   Randy Dunlap   [IOAT]: fix heade...
416
417
418
   * @device_alloc_chan_resources: allocate resources and return the
   *	number of allocated descriptors
   * @device_free_chan_resources: release DMA channel's resources
7405f74ba   Dan Williams   dmaengine: refact...
419
420
   * @device_prep_dma_memcpy: prepares a memcpy operation
   * @device_prep_dma_xor: prepares a xor operation
099f53cb5   Dan Williams   async_tx: rename ...
421
   * @device_prep_dma_xor_val: prepares a xor validation operation
b2f46fd8e   Dan Williams   async_tx: add sup...
422
423
   * @device_prep_dma_pq: prepares a pq operation
   * @device_prep_dma_pq_val: prepares a pqzero_sum operation
7405f74ba   Dan Williams   dmaengine: refact...
424
425
   * @device_prep_dma_memset: prepares a memset operation
   * @device_prep_dma_interrupt: prepares an end of chain interrupt operation
dc0ee6435   Haavard Skinnemoen   dmaengine: Add sl...
426
   * @device_prep_slave_sg: prepares a slave dma operation
782bc950d   Sascha Hauer   dmaengine: add po...
427
428
429
   * @device_prep_dma_cyclic: prepare a cyclic dma operation suitable for audio.
   *	The function takes a buffer of size buf_len. The callback function will
   *	be called after period_len bytes have been transferred.
c3635c78e   Linus Walleij   DMAENGINE: generi...
430
431
   * @device_control: manipulate all pending operations on a channel, returns
   *	zero or error code
079344818   Linus Walleij   DMAENGINE: generi...
432
433
   * @device_tx_status: poll for transaction completion, the optional
   *	txstate parameter can be supplied with a pointer to get a
25985edce   Lucas De Marchi   Fix common misspe...
434
   *	struct with auxiliary transfer status information, otherwise the call
079344818   Linus Walleij   DMAENGINE: generi...
435
   *	will just return a simple status code
7405f74ba   Dan Williams   dmaengine: refact...
436
   * @device_issue_pending: push pending transactions to hardware
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
437
438
439
440
   */
  struct dma_device {
  
  	unsigned int chancnt;
0f571515c   Atsushi Nemoto   dmaengine: Add pr...
441
  	unsigned int privatecnt;
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
442
443
  	struct list_head channels;
  	struct list_head global_node;
7405f74ba   Dan Williams   dmaengine: refact...
444
  	dma_cap_mask_t  cap_mask;
b2f46fd8e   Dan Williams   async_tx: add sup...
445
446
  	unsigned short max_xor;
  	unsigned short max_pq;
83544ae9f   Dan Williams   dmaengine, async_...
447
448
449
450
  	u8 copy_align;
  	u8 xor_align;
  	u8 pq_align;
  	u8 fill_align;
b2f46fd8e   Dan Williams   async_tx: add sup...
451
  	#define DMA_HAS_PQ_CONTINUE (1 << 15)
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
452

c13c8260d   Chris Leech   [I/OAT]: DMA memc...
453
  	int dev_id;
7405f74ba   Dan Williams   dmaengine: refact...
454
  	struct device *dev;
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
455

aa1e6f1a3   Dan Williams   dmaengine: kill s...
456
  	int (*device_alloc_chan_resources)(struct dma_chan *chan);
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
457
  	void (*device_free_chan_resources)(struct dma_chan *chan);
7405f74ba   Dan Williams   dmaengine: refact...
458
459
  
  	struct dma_async_tx_descriptor *(*device_prep_dma_memcpy)(
0036731c8   Dan Williams   async_tx: kill tx...
460
  		struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
d4c56f97f   Dan Williams   async_tx: replace...
461
  		size_t len, unsigned long flags);
7405f74ba   Dan Williams   dmaengine: refact...
462
  	struct dma_async_tx_descriptor *(*device_prep_dma_xor)(
0036731c8   Dan Williams   async_tx: kill tx...
463
  		struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
d4c56f97f   Dan Williams   async_tx: replace...
464
  		unsigned int src_cnt, size_t len, unsigned long flags);
099f53cb5   Dan Williams   async_tx: rename ...
465
  	struct dma_async_tx_descriptor *(*device_prep_dma_xor_val)(
0036731c8   Dan Williams   async_tx: kill tx...
466
  		struct dma_chan *chan, dma_addr_t *src,	unsigned int src_cnt,
ad283ea4a   Dan Williams   async_tx: add sum...
467
  		size_t len, enum sum_check_flags *result, unsigned long flags);
b2f46fd8e   Dan Williams   async_tx: add sup...
468
469
470
471
472
473
474
475
  	struct dma_async_tx_descriptor *(*device_prep_dma_pq)(
  		struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
  		unsigned int src_cnt, const unsigned char *scf,
  		size_t len, unsigned long flags);
  	struct dma_async_tx_descriptor *(*device_prep_dma_pq_val)(
  		struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
  		unsigned int src_cnt, const unsigned char *scf, size_t len,
  		enum sum_check_flags *pqres, unsigned long flags);
7405f74ba   Dan Williams   dmaengine: refact...
476
  	struct dma_async_tx_descriptor *(*device_prep_dma_memset)(
0036731c8   Dan Williams   async_tx: kill tx...
477
  		struct dma_chan *chan, dma_addr_t dest, int value, size_t len,
d4c56f97f   Dan Williams   async_tx: replace...
478
  		unsigned long flags);
7405f74ba   Dan Williams   dmaengine: refact...
479
  	struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)(
636bdeaa1   Dan Williams   dmaengine: ack to...
480
  		struct dma_chan *chan, unsigned long flags);
a86ee03ce   Ira Snyder   dma: add support ...
481
482
483
484
485
  	struct dma_async_tx_descriptor *(*device_prep_dma_sg)(
  		struct dma_chan *chan,
  		struct scatterlist *dst_sg, unsigned int dst_nents,
  		struct scatterlist *src_sg, unsigned int src_nents,
  		unsigned long flags);
7405f74ba   Dan Williams   dmaengine: refact...
486

dc0ee6435   Haavard Skinnemoen   dmaengine: Add sl...
487
488
489
490
  	struct dma_async_tx_descriptor *(*device_prep_slave_sg)(
  		struct dma_chan *chan, struct scatterlist *sgl,
  		unsigned int sg_len, enum dma_data_direction direction,
  		unsigned long flags);
782bc950d   Sascha Hauer   dmaengine: add po...
491
492
493
  	struct dma_async_tx_descriptor *(*device_prep_dma_cyclic)(
  		struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
  		size_t period_len, enum dma_data_direction direction);
058276303   Linus Walleij   DMAENGINE: extend...
494
495
  	int (*device_control)(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
  		unsigned long arg);
dc0ee6435   Haavard Skinnemoen   dmaengine: Add sl...
496

079344818   Linus Walleij   DMAENGINE: generi...
497
498
499
  	enum dma_status (*device_tx_status)(struct dma_chan *chan,
  					    dma_cookie_t cookie,
  					    struct dma_tx_state *txstate);
7405f74ba   Dan Williams   dmaengine: refact...
500
  	void (*device_issue_pending)(struct dma_chan *chan);
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
501
  };
6e3ecaf0a   Sascha Hauer   dmaengine: add wr...
502
503
504
505
506
507
508
509
510
511
512
513
514
  static inline int dmaengine_device_control(struct dma_chan *chan,
  					   enum dma_ctrl_cmd cmd,
  					   unsigned long arg)
  {
  	return chan->device->device_control(chan, cmd, arg);
  }
  
  static inline int dmaengine_slave_config(struct dma_chan *chan,
  					  struct dma_slave_config *config)
  {
  	return dmaengine_device_control(chan, DMA_SLAVE_CONFIG,
  			(unsigned long)config);
  }
90b44f8ff   Vinod Koul   dmaengine: add he...
515
516
517
518
519
520
521
522
523
  static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_single(
  	struct dma_chan *chan, void *buf, size_t len,
  	enum dma_data_direction dir, unsigned long flags)
  {
  	struct scatterlist sg;
  	sg_init_one(&sg, buf, len);
  
  	return chan->device->device_prep_slave_sg(chan, &sg, 1, dir, flags);
  }
6e3ecaf0a   Sascha Hauer   dmaengine: add wr...
524
525
526
527
528
529
530
531
532
533
534
535
536
537
  static inline int dmaengine_terminate_all(struct dma_chan *chan)
  {
  	return dmaengine_device_control(chan, DMA_TERMINATE_ALL, 0);
  }
  
  static inline int dmaengine_pause(struct dma_chan *chan)
  {
  	return dmaengine_device_control(chan, DMA_PAUSE, 0);
  }
  
  static inline int dmaengine_resume(struct dma_chan *chan)
  {
  	return dmaengine_device_control(chan, DMA_RESUME, 0);
  }
98d530fe2   Russell King - ARM Linux   Fix dmaengine_sub...
538
  static inline dma_cookie_t dmaengine_submit(struct dma_async_tx_descriptor *desc)
6e3ecaf0a   Sascha Hauer   dmaengine: add wr...
539
540
541
  {
  	return desc->tx_submit(desc);
  }
83544ae9f   Dan Williams   dmaengine, async_...
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
  static inline bool dmaengine_check_align(u8 align, size_t off1, size_t off2, size_t len)
  {
  	size_t mask;
  
  	if (!align)
  		return true;
  	mask = (1 << align) - 1;
  	if (mask & (off1 | off2 | len))
  		return false;
  	return true;
  }
  
  static inline bool is_dma_copy_aligned(struct dma_device *dev, size_t off1,
  				       size_t off2, size_t len)
  {
  	return dmaengine_check_align(dev->copy_align, off1, off2, len);
  }
  
  static inline bool is_dma_xor_aligned(struct dma_device *dev, size_t off1,
  				      size_t off2, size_t len)
  {
  	return dmaengine_check_align(dev->xor_align, off1, off2, len);
  }
  
  static inline bool is_dma_pq_aligned(struct dma_device *dev, size_t off1,
  				     size_t off2, size_t len)
  {
  	return dmaengine_check_align(dev->pq_align, off1, off2, len);
  }
  
  static inline bool is_dma_fill_aligned(struct dma_device *dev, size_t off1,
  				       size_t off2, size_t len)
  {
  	return dmaengine_check_align(dev->fill_align, off1, off2, len);
  }
b2f46fd8e   Dan Williams   async_tx: add sup...
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
  static inline void
  dma_set_maxpq(struct dma_device *dma, int maxpq, int has_pq_continue)
  {
  	dma->max_pq = maxpq;
  	if (has_pq_continue)
  		dma->max_pq |= DMA_HAS_PQ_CONTINUE;
  }
  
  static inline bool dmaf_continue(enum dma_ctrl_flags flags)
  {
  	return (flags & DMA_PREP_CONTINUE) == DMA_PREP_CONTINUE;
  }
  
  static inline bool dmaf_p_disabled_continue(enum dma_ctrl_flags flags)
  {
  	enum dma_ctrl_flags mask = DMA_PREP_CONTINUE | DMA_PREP_PQ_DISABLE_P;
  
  	return (flags & mask) == mask;
  }
  
  static inline bool dma_dev_has_pq_continue(struct dma_device *dma)
  {
  	return (dma->max_pq & DMA_HAS_PQ_CONTINUE) == DMA_HAS_PQ_CONTINUE;
  }
d3f3cf859   Mathieu Lacage   missing inline ke...
601
  static inline unsigned short dma_dev_to_maxpq(struct dma_device *dma)
b2f46fd8e   Dan Williams   async_tx: add sup...
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
  {
  	return dma->max_pq & ~DMA_HAS_PQ_CONTINUE;
  }
  
  /* dma_maxpq - reduce maxpq in the face of continued operations
   * @dma - dma device with PQ capability
   * @flags - to check if DMA_PREP_CONTINUE and DMA_PREP_PQ_DISABLE_P are set
   *
   * When an engine does not support native continuation we need 3 extra
   * source slots to reuse P and Q with the following coefficients:
   * 1/ {00} * P : remove P from Q', but use it as a source for P'
   * 2/ {01} * Q : use Q to continue Q' calculation
   * 3/ {00} * Q : subtract Q from P' to cancel (2)
   *
   * In the case where P is disabled we only need 1 extra source:
   * 1/ {01} * Q : use Q to continue Q' calculation
   */
  static inline int dma_maxpq(struct dma_device *dma, enum dma_ctrl_flags flags)
  {
  	if (dma_dev_has_pq_continue(dma) || !dmaf_continue(flags))
  		return dma_dev_to_maxpq(dma);
  	else if (dmaf_p_disabled_continue(flags))
  		return dma_dev_to_maxpq(dma) - 1;
  	else if (dmaf_continue(flags))
  		return dma_dev_to_maxpq(dma) - 3;
  	BUG();
  }
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
629
  /* --- public DMA engine API --- */
649274d99   Dan Williams   net_dma: acquire/...
630
  #ifdef CONFIG_DMA_ENGINE
209b84a88   Dan Williams   dmaengine: replac...
631
632
  void dmaengine_get(void);
  void dmaengine_put(void);
649274d99   Dan Williams   net_dma: acquire/...
633
634
635
636
637
638
639
640
  #else
  static inline void dmaengine_get(void)
  {
  }
  static inline void dmaengine_put(void)
  {
  }
  #endif
b4bd07c20   David S. Miller   net_dma: call dma...
641
642
643
644
645
646
647
648
649
650
651
  #ifdef CONFIG_NET_DMA
  #define net_dmaengine_get()	dmaengine_get()
  #define net_dmaengine_put()	dmaengine_put()
  #else
  static inline void net_dmaengine_get(void)
  {
  }
  static inline void net_dmaengine_put(void)
  {
  }
  #endif
729b5d1b8   Dan Williams   dmaengine: allow ...
652
653
654
  #ifdef CONFIG_ASYNC_TX_DMA
  #define async_dmaengine_get()	dmaengine_get()
  #define async_dmaengine_put()	dmaengine_put()
5fc6d897f   Dan Williams   async_tx: make as...
655
  #ifndef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
138f4c359   Dan Williams   dmaengine, async_...
656
657
  #define async_dma_find_channel(type) dma_find_channel(DMA_ASYNC_TX)
  #else
729b5d1b8   Dan Williams   dmaengine: allow ...
658
  #define async_dma_find_channel(type) dma_find_channel(type)
5fc6d897f   Dan Williams   async_tx: make as...
659
  #endif /* CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH */
729b5d1b8   Dan Williams   dmaengine: allow ...
660
661
662
663
664
665
666
667
668
669
670
671
  #else
  static inline void async_dmaengine_get(void)
  {
  }
  static inline void async_dmaengine_put(void)
  {
  }
  static inline struct dma_chan *
  async_dma_find_channel(enum dma_transaction_type type)
  {
  	return NULL;
  }
138f4c359   Dan Williams   dmaengine, async_...
672
  #endif /* CONFIG_ASYNC_TX_DMA */
729b5d1b8   Dan Williams   dmaengine: allow ...
673

7405f74ba   Dan Williams   dmaengine: refact...
674
675
676
677
678
679
680
681
682
  dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan,
  	void *dest, void *src, size_t len);
  dma_cookie_t dma_async_memcpy_buf_to_pg(struct dma_chan *chan,
  	struct page *page, unsigned int offset, void *kdata, size_t len);
  dma_cookie_t dma_async_memcpy_pg_to_pg(struct dma_chan *chan,
  	struct page *dest_pg, unsigned int dest_off, struct page *src_pg,
  	unsigned int src_off, size_t len);
  void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
  	struct dma_chan *chan);
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
683

0839875e0   Dan Williams   async_tx: make as...
684
  static inline void async_tx_ack(struct dma_async_tx_descriptor *tx)
7405f74ba   Dan Williams   dmaengine: refact...
685
  {
636bdeaa1   Dan Williams   dmaengine: ack to...
686
687
  	tx->flags |= DMA_CTRL_ACK;
  }
ef560682a   Guennadi Liakhovetski   dmaengine: add as...
688
689
690
691
  static inline void async_tx_clear_ack(struct dma_async_tx_descriptor *tx)
  {
  	tx->flags &= ~DMA_CTRL_ACK;
  }
0839875e0   Dan Williams   async_tx: make as...
692
  static inline bool async_tx_test_ack(struct dma_async_tx_descriptor *tx)
636bdeaa1   Dan Williams   dmaengine: ack to...
693
  {
0839875e0   Dan Williams   async_tx: make as...
694
  	return (tx->flags & DMA_CTRL_ACK) == DMA_CTRL_ACK;
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
695
  }
7405f74ba   Dan Williams   dmaengine: refact...
696
697
  #define first_dma_cap(mask) __first_dma_cap(&(mask))
  static inline int __first_dma_cap(const dma_cap_mask_t *srcp)
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
698
  {
7405f74ba   Dan Williams   dmaengine: refact...
699
700
701
  	return min_t(int, DMA_TX_TYPE_END,
  		find_first_bit(srcp->bits, DMA_TX_TYPE_END));
  }
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
702

7405f74ba   Dan Williams   dmaengine: refact...
703
704
705
706
707
  #define next_dma_cap(n, mask) __next_dma_cap((n), &(mask))
  static inline int __next_dma_cap(int n, const dma_cap_mask_t *srcp)
  {
  	return min_t(int, DMA_TX_TYPE_END,
  		find_next_bit(srcp->bits, DMA_TX_TYPE_END, n+1));
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
708
  }
7405f74ba   Dan Williams   dmaengine: refact...
709
710
711
  #define dma_cap_set(tx, mask) __dma_cap_set((tx), &(mask))
  static inline void
  __dma_cap_set(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp)
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
712
  {
7405f74ba   Dan Williams   dmaengine: refact...
713
714
  	set_bit(tx_type, dstp->bits);
  }
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
715

0f571515c   Atsushi Nemoto   dmaengine: Add pr...
716
717
718
719
720
721
  #define dma_cap_clear(tx, mask) __dma_cap_clear((tx), &(mask))
  static inline void
  __dma_cap_clear(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp)
  {
  	clear_bit(tx_type, dstp->bits);
  }
33df8ca06   Dan Williams   dmatest: convert ...
722
723
724
725
726
  #define dma_cap_zero(mask) __dma_cap_zero(&(mask))
  static inline void __dma_cap_zero(dma_cap_mask_t *dstp)
  {
  	bitmap_zero(dstp->bits, DMA_TX_TYPE_END);
  }
7405f74ba   Dan Williams   dmaengine: refact...
727
728
729
730
731
  #define dma_has_cap(tx, mask) __dma_has_cap((tx), &(mask))
  static inline int
  __dma_has_cap(enum dma_transaction_type tx_type, dma_cap_mask_t *srcp)
  {
  	return test_bit(tx_type, srcp->bits);
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
732
  }
7405f74ba   Dan Williams   dmaengine: refact...
733
734
735
736
  #define for_each_dma_cap_mask(cap, mask) \
  	for ((cap) = first_dma_cap(mask);	\
  		(cap) < DMA_TX_TYPE_END;	\
  		(cap) = next_dma_cap((cap), (mask)))
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
737
  /**
7405f74ba   Dan Williams   dmaengine: refact...
738
   * dma_async_issue_pending - flush pending transactions to HW
fe4ada2d6   Randy Dunlap   [IOAT]: fix heade...
739
   * @chan: target DMA channel
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
740
741
742
743
   *
   * This allows drivers to push copies to HW in batches,
   * reducing MMIO writes where possible.
   */
7405f74ba   Dan Williams   dmaengine: refact...
744
  static inline void dma_async_issue_pending(struct dma_chan *chan)
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
745
  {
ec8670f1f   Dan Williams   dmaengine: fix sp...
746
  	chan->device->device_issue_pending(chan);
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
747
  }
7405f74ba   Dan Williams   dmaengine: refact...
748
  #define dma_async_memcpy_issue_pending(chan) dma_async_issue_pending(chan)
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
749
  /**
7405f74ba   Dan Williams   dmaengine: refact...
750
   * dma_async_is_tx_complete - poll for transaction completion
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
751
752
753
754
755
756
757
758
759
   * @chan: DMA channel
   * @cookie: transaction identifier to check status of
   * @last: returns last completed cookie, can be NULL
   * @used: returns last issued cookie, can be NULL
   *
   * If @last and @used are passed in, upon return they reflect the driver
   * internal state and can be used with dma_async_is_complete() to check
   * the status of multiple cookies without re-checking hardware state.
   */
7405f74ba   Dan Williams   dmaengine: refact...
760
  static inline enum dma_status dma_async_is_tx_complete(struct dma_chan *chan,
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
761
762
  	dma_cookie_t cookie, dma_cookie_t *last, dma_cookie_t *used)
  {
079344818   Linus Walleij   DMAENGINE: generi...
763
764
765
766
767
768
769
770
771
  	struct dma_tx_state state;
  	enum dma_status status;
  
  	status = chan->device->device_tx_status(chan, cookie, &state);
  	if (last)
  		*last = state.last;
  	if (used)
  		*used = state.used;
  	return status;
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
772
  }
7405f74ba   Dan Williams   dmaengine: refact...
773
774
  #define dma_async_memcpy_complete(chan, cookie, last, used)\
  	dma_async_is_tx_complete(chan, cookie, last, used)
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
775
776
777
778
779
780
781
  /**
   * dma_async_is_complete - test a cookie against chan state
   * @cookie: transaction identifier to test status of
   * @last_complete: last know completed transaction
   * @last_used: last cookie value handed out
   *
   * dma_async_is_complete() is used in dma_async_memcpy_complete()
8a5703f84   Sebastian Siewior   DMA engine: typo ...
782
   * the test logic is separated for lightweight testing of multiple cookies
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
783
784
785
786
787
788
789
790
791
792
793
794
795
   */
  static inline enum dma_status dma_async_is_complete(dma_cookie_t cookie,
  			dma_cookie_t last_complete, dma_cookie_t last_used)
  {
  	if (last_complete <= last_used) {
  		if ((cookie <= last_complete) || (cookie > last_used))
  			return DMA_SUCCESS;
  	} else {
  		if ((cookie <= last_complete) && (cookie > last_used))
  			return DMA_SUCCESS;
  	}
  	return DMA_IN_PROGRESS;
  }
bca346920   Dan Williams   dmaengine: provid...
796
797
798
799
800
801
802
803
804
  static inline void
  dma_set_tx_state(struct dma_tx_state *st, dma_cookie_t last, dma_cookie_t used, u32 residue)
  {
  	if (st) {
  		st->last = last;
  		st->used = used;
  		st->residue = residue;
  	}
  }
7405f74ba   Dan Williams   dmaengine: refact...
805
  enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie);
07f2211e4   Dan Williams   dmaengine: remove...
806
807
  #ifdef CONFIG_DMA_ENGINE
  enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx);
c50331e8b   Dan Williams   dmaengine: dma_is...
808
  void dma_issue_pending_all(void);
8f33d5277   Guennadi Liakhovetski   dmaengine: provid...
809
810
  struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, void *fn_param);
  void dma_release_channel(struct dma_chan *chan);
07f2211e4   Dan Williams   dmaengine: remove...
811
812
813
814
815
  #else
  static inline enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
  {
  	return DMA_SUCCESS;
  }
c50331e8b   Dan Williams   dmaengine: dma_is...
816
817
  static inline void dma_issue_pending_all(void)
  {
8f33d5277   Guennadi Liakhovetski   dmaengine: provid...
818
819
820
821
822
823
824
825
  }
  static inline struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask,
  					      dma_filter_fn fn, void *fn_param)
  {
  	return NULL;
  }
  static inline void dma_release_channel(struct dma_chan *chan)
  {
c50331e8b   Dan Williams   dmaengine: dma_is...
826
  }
07f2211e4   Dan Williams   dmaengine: remove...
827
  #endif
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
828
829
830
831
832
  
  /* --- DMA device --- */
  
  int dma_async_device_register(struct dma_device *device);
  void dma_async_device_unregister(struct dma_device *device);
07f2211e4   Dan Williams   dmaengine: remove...
833
  void dma_run_dependencies(struct dma_async_tx_descriptor *tx);
bec085134   Dan Williams   dmaengine: centra...
834
  struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type);
59b5ec214   Dan Williams   dmaengine: introd...
835
  #define dma_request_channel(mask, x, y) __dma_request_channel(&(mask), x, y)
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
836

de5506e15   Chris Leech   [I/OAT]: Utility ...
837
838
839
  /* --- Helper iov-locking functions --- */
  
  struct dma_page_list {
b2ddb9019   Al Viro   dma_page_list ->b...
840
  	char __user *base_address;
de5506e15   Chris Leech   [I/OAT]: Utility ...
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
  	int nr_pages;
  	struct page **pages;
  };
  
  struct dma_pinned_list {
  	int nr_iovecs;
  	struct dma_page_list page_list[0];
  };
  
  struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len);
  void dma_unpin_iovec_pages(struct dma_pinned_list* pinned_list);
  
  dma_cookie_t dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
  	struct dma_pinned_list *pinned_list, unsigned char *kdata, size_t len);
  dma_cookie_t dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
  	struct dma_pinned_list *pinned_list, struct page *page,
  	unsigned int offset, size_t len);
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
858
  #endif /* DMAENGINE_H */