Blame view

include/linux/dmaengine.h 36.9 KB
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
  /*
   * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
   *
   * This program is free software; you can redistribute it and/or modify it
   * under the terms of the GNU General Public License as published by the Free
   * Software Foundation; either version 2 of the License, or (at your option)
   * any later version.
   *
   * This program is distributed in the hope that it will be useful, but WITHOUT
   * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
   * more details.
   *
   * You should have received a copy of the GNU General Public License along with
   * this program; if not, write to the Free Software Foundation, Inc., 59
   * Temple Place - Suite 330, Boston, MA  02111-1307, USA.
   *
   * The full GNU General Public License is included in this distribution in the
   * file called COPYING.
   */
d2ebfb335   Russell King - ARM Linux   dmaengine: add pr...
21
22
  #ifndef LINUX_DMAENGINE_H
  #define LINUX_DMAENGINE_H
1c0f16e5c   David Woodhouse   [PATCH] Remove gr...
23

c13c8260d   Chris Leech   [I/OAT]: DMA memc...
24
  #include <linux/device.h>
0ad7c0005   Stephen Warren   dma: add channel ...
25
  #include <linux/err.h>
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
26
  #include <linux/uio.h>
187f1882b   Paul Gortmaker   BUG: headers with...
27
  #include <linux/bug.h>
90b44f8ff   Vinod Koul   dmaengine: add he...
28
  #include <linux/scatterlist.h>
a8efa9d6b   Paul Gortmaker   linux/dmaengine.h...
29
  #include <linux/bitmap.h>
dcc043dc0   Viresh Kumar   dmaengine: Add fl...
30
  #include <linux/types.h>
a8efa9d6b   Paul Gortmaker   linux/dmaengine.h...
31
  #include <asm/page.h>
b7f080cfe   Alexey Dobriyan   net: remove mm.h ...
32

c13c8260d   Chris Leech   [I/OAT]: DMA memc...
33
  /**
fe4ada2d6   Randy Dunlap   [IOAT]: fix heade...
34
   * typedef dma_cookie_t - an opaque DMA cookie
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
35
36
37
38
   *
   * if dma_cookie_t is >0 it's a DMA request cookie, <0 it's an error code
   */
  typedef s32 dma_cookie_t;
76bd061f5   Steven J. Magnani   fsldma: Fix cooki...
39
40
  #define DMA_MIN_COOKIE	1
  #define DMA_MAX_COOKIE	INT_MAX
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
41

71ea14837   Dan Carpenter   dmaengine: make d...
42
43
44
45
  static inline int dma_submit_error(dma_cookie_t cookie)
  {
  	return cookie < 0 ? cookie : 0;
  }
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
46
47
48
  
  /**
   * enum dma_status - DMA transaction status
adfedd9a3   Vinod Koul   dmaengine: use DM...
49
   * @DMA_COMPLETE: transaction completed
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
50
   * @DMA_IN_PROGRESS: transaction not yet processed
079344818   Linus Walleij   DMAENGINE: generi...
51
   * @DMA_PAUSED: transaction is paused
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
52
53
54
   * @DMA_ERROR: transaction failed
   */
  enum dma_status {
7db5f7274   Vinod Koul   dmaengine: remove...
55
  	DMA_COMPLETE,
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
56
  	DMA_IN_PROGRESS,
079344818   Linus Walleij   DMAENGINE: generi...
57
  	DMA_PAUSED,
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
58
59
60
61
  	DMA_ERROR,
  };
  
  /**
7405f74ba   Dan Williams   dmaengine: refact...
62
   * enum dma_transaction_type - DMA transaction types/indexes
138f4c359   Dan Williams   dmaengine, async_...
63
64
65
   *
   * Note: The DMA_ASYNC_TX capability is not to be set by drivers.  It is
   * automatically set as dma devices are registered.
7405f74ba   Dan Williams   dmaengine: refact...
66
67
68
69
   */
  enum dma_transaction_type {
  	DMA_MEMCPY,
  	DMA_XOR,
b2f46fd8e   Dan Williams   async_tx: add sup...
70
  	DMA_PQ,
099f53cb5   Dan Williams   async_tx: rename ...
71
72
  	DMA_XOR_VAL,
  	DMA_PQ_VAL,
7405f74ba   Dan Williams   dmaengine: refact...
73
  	DMA_INTERRUPT,
a86ee03ce   Ira Snyder   dma: add support ...
74
  	DMA_SG,
59b5ec214   Dan Williams   dmaengine: introd...
75
  	DMA_PRIVATE,
138f4c359   Dan Williams   dmaengine, async_...
76
  	DMA_ASYNC_TX,
dc0ee6435   Haavard Skinnemoen   dmaengine: Add sl...
77
  	DMA_SLAVE,
782bc950d   Sascha Hauer   dmaengine: add po...
78
  	DMA_CYCLIC,
b14dab792   Jassi Brar   DMAEngine: Define...
79
  	DMA_INTERLEAVE,
7405f74ba   Dan Williams   dmaengine: refact...
80
  /* last transaction type for creation of the capabilities mask */
b14dab792   Jassi Brar   DMAEngine: Define...
81
82
  	DMA_TX_TYPE_END,
  };
dc0ee6435   Haavard Skinnemoen   dmaengine: Add sl...
83

49920bc66   Vinod Koul   dmaengine: add ne...
84
85
86
87
88
89
90
91
92
93
94
95
  /**
   * enum dma_transfer_direction - dma transfer mode and direction indicator
   * @DMA_MEM_TO_MEM: Async/Memcpy mode
   * @DMA_MEM_TO_DEV: Slave mode & From Memory to Device
   * @DMA_DEV_TO_MEM: Slave mode & From Device to Memory
   * @DMA_DEV_TO_DEV: Slave mode & From Device to Device
   */
  enum dma_transfer_direction {
  	DMA_MEM_TO_MEM,
  	DMA_MEM_TO_DEV,
  	DMA_DEV_TO_MEM,
  	DMA_DEV_TO_DEV,
62268ce91   Shawn Guo   dmaengine: add DM...
96
  	DMA_TRANS_NONE,
49920bc66   Vinod Koul   dmaengine: add ne...
97
  };
7405f74ba   Dan Williams   dmaengine: refact...
98
99
  
  /**
b14dab792   Jassi Brar   DMAEngine: Define...
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
   * Interleaved Transfer Request
   * ----------------------------
   * A chunk is collection of contiguous bytes to be transfered.
   * The gap(in bytes) between two chunks is called inter-chunk-gap(ICG).
   * ICGs may or maynot change between chunks.
   * A FRAME is the smallest series of contiguous {chunk,icg} pairs,
   *  that when repeated an integral number of times, specifies the transfer.
   * A transfer template is specification of a Frame, the number of times
   *  it is to be repeated and other per-transfer attributes.
   *
   * Practically, a client driver would have ready a template for each
   *  type of transfer it is going to need during its lifetime and
   *  set only 'src_start' and 'dst_start' before submitting the requests.
   *
   *
   *  |      Frame-1        |       Frame-2       | ~ |       Frame-'numf'  |
   *  |====....==.===...=...|====....==.===...=...| ~ |====....==.===...=...|
   *
   *    ==  Chunk size
   *    ... ICG
   */
  
  /**
   * struct data_chunk - Element of scatter-gather list that makes a frame.
   * @size: Number of bytes to read from source.
   *	  size_dst := fn(op, size_src), so doesn't mean much for destination.
   * @icg: Number of bytes to jump after last src/dst address of this
   *	 chunk and before first src/dst address for next chunk.
   *	 Ignored for dst(assumed 0), if dst_inc is true and dst_sgl is false.
   *	 Ignored for src(assumed 0), if src_inc is true and src_sgl is false.
   */
  struct data_chunk {
  	size_t size;
  	size_t icg;
  };
  
  /**
   * struct dma_interleaved_template - Template to convey DMAC the transfer pattern
   *	 and attributes.
   * @src_start: Bus address of source for the first chunk.
   * @dst_start: Bus address of destination for the first chunk.
   * @dir: Specifies the type of Source and Destination.
   * @src_inc: If the source address increments after reading from it.
   * @dst_inc: If the destination address increments after writing to it.
   * @src_sgl: If the 'icg' of sgl[] applies to Source (scattered read).
   *		Otherwise, source is read contiguously (icg ignored).
   *		Ignored if src_inc is false.
   * @dst_sgl: If the 'icg' of sgl[] applies to Destination (scattered write).
   *		Otherwise, destination is filled contiguously (icg ignored).
   *		Ignored if dst_inc is false.
   * @numf: Number of frames in this template.
   * @frame_size: Number of chunks in a frame i.e, size of sgl[].
   * @sgl: Array of {chunk,icg} pairs that make up a frame.
   */
  struct dma_interleaved_template {
  	dma_addr_t src_start;
  	dma_addr_t dst_start;
  	enum dma_transfer_direction dir;
  	bool src_inc;
  	bool dst_inc;
  	bool src_sgl;
  	bool dst_sgl;
  	size_t numf;
  	size_t frame_size;
  	struct data_chunk sgl[0];
  };
  
  /**
636bdeaa1   Dan Williams   dmaengine: ack to...
168
   * enum dma_ctrl_flags - DMA flags to augment operation preparation,
b2f46fd8e   Dan Williams   async_tx: add sup...
169
   *  control completion, and communicate status.
d4c56f97f   Dan Williams   async_tx: replace...
170
   * @DMA_PREP_INTERRUPT - trigger an interrupt (callback) upon completion of
b2f46fd8e   Dan Williams   async_tx: add sup...
171
   *  this transaction
a88f66670   Guennadi Liakhovetski   dmaengine: clarif...
172
   * @DMA_CTRL_ACK - if clear, the descriptor cannot be reused until the client
b2f46fd8e   Dan Williams   async_tx: add sup...
173
174
   *  acknowledges receipt, i.e. has has a chance to establish any dependency
   *  chains
b2f46fd8e   Dan Williams   async_tx: add sup...
175
176
177
178
179
   * @DMA_PREP_PQ_DISABLE_P - prevent generation of P while generating Q
   * @DMA_PREP_PQ_DISABLE_Q - prevent generation of Q while generating P
   * @DMA_PREP_CONTINUE - indicate to a driver that it is reusing buffers as
   *  sources that were the result of a previous operation, in the case of a PQ
   *  operation it continues the calculation with new sources
0403e3827   Dan Williams   dmaengine: add fe...
180
181
   * @DMA_PREP_FENCE - tell the driver that subsequent operations depend
   *  on the result of this operation
d4c56f97f   Dan Williams   async_tx: replace...
182
   */
636bdeaa1   Dan Williams   dmaengine: ack to...
183
  enum dma_ctrl_flags {
d4c56f97f   Dan Williams   async_tx: replace...
184
  	DMA_PREP_INTERRUPT = (1 << 0),
636bdeaa1   Dan Williams   dmaengine: ack to...
185
  	DMA_CTRL_ACK = (1 << 1),
0776ae7b8   Bartlomiej Zolnierkiewicz   dmaengine: remove...
186
187
188
189
  	DMA_PREP_PQ_DISABLE_P = (1 << 2),
  	DMA_PREP_PQ_DISABLE_Q = (1 << 3),
  	DMA_PREP_CONTINUE = (1 << 4),
  	DMA_PREP_FENCE = (1 << 5),
d4c56f97f   Dan Williams   async_tx: replace...
190
191
192
  };
  
  /**
c3635c78e   Linus Walleij   DMAENGINE: generi...
193
194
195
196
197
   * enum dma_ctrl_cmd - DMA operations that can optionally be exercised
   * on a running channel.
   * @DMA_TERMINATE_ALL: terminate all ongoing transfers
   * @DMA_PAUSE: pause ongoing transfers
   * @DMA_RESUME: resume paused transfer
c156d0a5b   Linus Walleij   DMAENGINE: generi...
198
199
200
201
202
   * @DMA_SLAVE_CONFIG: this command is only implemented by DMA controllers
   * that need to runtime reconfigure the slave channels (as opposed to passing
   * configuration data in statically from the platform). An additional
   * argument of struct dma_slave_config must be passed in with this
   * command.
968f19ae8   Ira Snyder   fsldma: improved ...
203
204
   * @FSLDMA_EXTERNAL_START: this command will put the Freescale DMA controller
   * into external start mode.
c3635c78e   Linus Walleij   DMAENGINE: generi...
205
206
207
208
209
   */
  enum dma_ctrl_cmd {
  	DMA_TERMINATE_ALL,
  	DMA_PAUSE,
  	DMA_RESUME,
c156d0a5b   Linus Walleij   DMAENGINE: generi...
210
  	DMA_SLAVE_CONFIG,
968f19ae8   Ira Snyder   fsldma: improved ...
211
  	FSLDMA_EXTERNAL_START,
c3635c78e   Linus Walleij   DMAENGINE: generi...
212
213
214
  };
  
  /**
ad283ea4a   Dan Williams   async_tx: add sum...
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
   * enum sum_check_bits - bit position of pq_check_flags
   */
  enum sum_check_bits {
  	SUM_CHECK_P = 0,
  	SUM_CHECK_Q = 1,
  };
  
  /**
   * enum pq_check_flags - result of async_{xor,pq}_zero_sum operations
   * @SUM_CHECK_P_RESULT - 1 if xor zero sum error, 0 otherwise
   * @SUM_CHECK_Q_RESULT - 1 if reed-solomon zero sum error, 0 otherwise
   */
  enum sum_check_flags {
  	SUM_CHECK_P_RESULT = (1 << SUM_CHECK_P),
  	SUM_CHECK_Q_RESULT = (1 << SUM_CHECK_Q),
  };
  
  
  /**
7405f74ba   Dan Williams   dmaengine: refact...
234
235
236
237
238
239
   * dma_cap_mask_t - capabilities bitmap modeled after cpumask_t.
   * See linux/cpumask.h
   */
  typedef struct { DECLARE_BITMAP(bits, DMA_TX_TYPE_END); } dma_cap_mask_t;
  
  /**
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
240
   * struct dma_chan_percpu - the per-CPU part of struct dma_chan
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
241
242
243
244
245
   * @memcpy_count: transaction counter
   * @bytes_transferred: byte counter
   */
  
  struct dma_chan_percpu {
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
246
247
248
249
250
251
252
  	/* stats */
  	unsigned long memcpy_count;
  	unsigned long bytes_transferred;
  };
  
  /**
   * struct dma_chan - devices supply DMA channels, clients use them
fe4ada2d6   Randy Dunlap   [IOAT]: fix heade...
253
   * @device: ptr to the dma device who supplies this channel, always !%NULL
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
254
   * @cookie: last cookie value returned to client
4d4e58de3   Russell King - ARM Linux   dmaengine: move l...
255
   * @completed_cookie: last completed cookie for this channel
fe4ada2d6   Randy Dunlap   [IOAT]: fix heade...
256
   * @chan_id: channel ID for sysfs
41d5e59c1   Dan Williams   dmaengine: add a ...
257
   * @dev: class device for sysfs
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
258
259
   * @device_node: used to add this to the device chan list
   * @local: per-cpu pointer to a struct dma_chan_percpu
868d2ee25   Vinod Koul   dmaengine: fix ke...
260
   * @client_count: how many clients are using this channel
bec085134   Dan Williams   dmaengine: centra...
261
   * @table_count: number of appearances in the mem-to-mem allocation table
287d85922   Dan Williams   atmel-mci: fix in...
262
   * @private: private data for certain client-channel associations
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
263
264
   */
  struct dma_chan {
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
265
266
  	struct dma_device *device;
  	dma_cookie_t cookie;
4d4e58de3   Russell King - ARM Linux   dmaengine: move l...
267
  	dma_cookie_t completed_cookie;
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
268
269
270
  
  	/* sysfs */
  	int chan_id;
41d5e59c1   Dan Williams   dmaengine: add a ...
271
  	struct dma_chan_dev *dev;
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
272

c13c8260d   Chris Leech   [I/OAT]: DMA memc...
273
  	struct list_head device_node;
a29d8b8e2   Tejun Heo   percpu: add __per...
274
  	struct dma_chan_percpu __percpu *local;
7cc5bf9a3   Dan Williams   dmaengine: track ...
275
  	int client_count;
bec085134   Dan Williams   dmaengine: centra...
276
  	int table_count;
287d85922   Dan Williams   atmel-mci: fix in...
277
  	void *private;
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
278
  };
41d5e59c1   Dan Williams   dmaengine: add a ...
279
280
  /**
   * struct dma_chan_dev - relate sysfs device node to backing channel device
868d2ee25   Vinod Koul   dmaengine: fix ke...
281
282
283
284
   * @chan: driver channel device
   * @device: sysfs device
   * @dev_id: parent dma_device dev_id
   * @idr_ref: reference count to gate release of dma_device dev_id
41d5e59c1   Dan Williams   dmaengine: add a ...
285
286
287
288
   */
  struct dma_chan_dev {
  	struct dma_chan *chan;
  	struct device device;
864498aaa   Dan Williams   dmaengine: use id...
289
290
  	int dev_id;
  	atomic_t *idr_ref;
41d5e59c1   Dan Williams   dmaengine: add a ...
291
  };
c156d0a5b   Linus Walleij   DMAENGINE: generi...
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
  /**
   * enum dma_slave_buswidth - defines bus with of the DMA slave
   * device, source or target buses
   */
  enum dma_slave_buswidth {
  	DMA_SLAVE_BUSWIDTH_UNDEFINED = 0,
  	DMA_SLAVE_BUSWIDTH_1_BYTE = 1,
  	DMA_SLAVE_BUSWIDTH_2_BYTES = 2,
  	DMA_SLAVE_BUSWIDTH_4_BYTES = 4,
  	DMA_SLAVE_BUSWIDTH_8_BYTES = 8,
  };
  
  /**
   * struct dma_slave_config - dma slave channel runtime config
   * @direction: whether the data shall go in or out on this slave
397321f45   Alexander Popov   dmaengine: fix in...
307
308
   * channel, right now. DMA_MEM_TO_DEV and DMA_DEV_TO_MEM are
   * legal values.
c156d0a5b   Linus Walleij   DMAENGINE: generi...
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
   * @src_addr: this is the physical address where DMA slave data
   * should be read (RX), if the source is memory this argument is
   * ignored.
   * @dst_addr: this is the physical address where DMA slave data
   * should be written (TX), if the source is memory this argument
   * is ignored.
   * @src_addr_width: this is the width in bytes of the source (RX)
   * register where DMA data shall be read. If the source
   * is memory this may be ignored depending on architecture.
   * Legal values: 1, 2, 4, 8.
   * @dst_addr_width: same as src_addr_width but for destination
   * target (TX) mutatis mutandis.
   * @src_maxburst: the maximum number of words (note: words, as in
   * units of the src_addr_width member, not bytes) that can be sent
   * in one burst to the device. Typically something like half the
   * FIFO depth on I/O peripherals so you don't overflow it. This
   * may or may not be applicable on memory sources.
   * @dst_maxburst: same as src_maxburst but for destination target
   * mutatis mutandis.
dcc043dc0   Viresh Kumar   dmaengine: Add fl...
328
329
330
   * @device_fc: Flow Controller Settings. Only valid for slave channels. Fill
   * with 'true' if peripheral should be flow controller. Direction will be
   * selected at Runtime.
4fd1e324b   Laxman Dewangan   dma: dmaengine: a...
331
332
333
   * @slave_id: Slave requester id. Only valid for slave channels. The dma
   * slave peripheral will have unique id as dma requester which need to be
   * pass as slave config.
c156d0a5b   Linus Walleij   DMAENGINE: generi...
334
335
336
337
338
339
340
341
   *
   * This struct is passed in as configuration data to a DMA engine
   * in order to set up a certain channel for DMA transport at runtime.
   * The DMA device/engine has to provide support for an additional
   * command in the channel config interface, DMA_SLAVE_CONFIG
   * and this struct will then be passed in as an argument to the
   * DMA engine device_control() function.
   *
7cbccb55f   Lars-Peter Clausen   dma: Remove comme...
342
343
344
345
346
   * The rationale for adding configuration information to this struct is as
   * follows: if it is likely that more than one DMA slave controllers in
   * the world will support the configuration option, then make it generic.
   * If not: if it is fixed so that it be sent in static from the platform
   * data, then prefer to do that.
c156d0a5b   Linus Walleij   DMAENGINE: generi...
347
348
   */
  struct dma_slave_config {
49920bc66   Vinod Koul   dmaengine: add ne...
349
  	enum dma_transfer_direction direction;
c156d0a5b   Linus Walleij   DMAENGINE: generi...
350
351
352
353
354
355
  	dma_addr_t src_addr;
  	dma_addr_t dst_addr;
  	enum dma_slave_buswidth src_addr_width;
  	enum dma_slave_buswidth dst_addr_width;
  	u32 src_maxburst;
  	u32 dst_maxburst;
dcc043dc0   Viresh Kumar   dmaengine: Add fl...
356
  	bool device_fc;
4fd1e324b   Laxman Dewangan   dma: dmaengine: a...
357
  	unsigned int slave_id;
c156d0a5b   Linus Walleij   DMAENGINE: generi...
358
  };
507205632   Lars-Peter Clausen   dma: Indicate res...
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
  /**
   * enum dma_residue_granularity - Granularity of the reported transfer residue
   * @DMA_RESIDUE_GRANULARITY_DESCRIPTOR: Residue reporting is not support. The
   *  DMA channel is only able to tell whether a descriptor has been completed or
   *  not, which means residue reporting is not supported by this channel. The
   *  residue field of the dma_tx_state field will always be 0.
   * @DMA_RESIDUE_GRANULARITY_SEGMENT: Residue is updated after each successfully
   *  completed segment of the transfer (For cyclic transfers this is after each
   *  period). This is typically implemented by having the hardware generate an
   *  interrupt after each transferred segment and then the drivers updates the
   *  outstanding residue by the size of the segment. Another possibility is if
   *  the hardware supports scatter-gather and the segment descriptor has a field
   *  which gets set after the segment has been completed. The driver then counts
   *  the number of segments without the flag set to compute the residue.
   * @DMA_RESIDUE_GRANULARITY_BURST: Residue is updated after each transferred
   *  burst. This is typically only supported if the hardware has a progress
   *  register of some sort (E.g. a register with the current read/write address
   *  or a register with the amount of bursts/beats/bytes that have been
   *  transferred or still need to be transferred).
   */
  enum dma_residue_granularity {
  	DMA_RESIDUE_GRANULARITY_DESCRIPTOR = 0,
  	DMA_RESIDUE_GRANULARITY_SEGMENT = 1,
  	DMA_RESIDUE_GRANULARITY_BURST = 2,
  };
221a27c76   Vinod Koul   dmaengine: add dm...
384
385
386
387
388
389
390
391
392
393
  /* struct dma_slave_caps - expose capabilities of a slave channel only
   *
   * @src_addr_widths: bit mask of src addr widths the channel supports
   * @dstn_addr_widths: bit mask of dstn addr widths the channel supports
   * @directions: bit mask of slave direction the channel supported
   * 	since the enum dma_transfer_direction is not defined as bits for each
   * 	type of direction, the dma controller should fill (1 << <TYPE>) and same
   * 	should be checked by controller as well
   * @cmd_pause: true, if pause and thereby resume is supported
   * @cmd_terminate: true, if terminate cmd is supported
507205632   Lars-Peter Clausen   dma: Indicate res...
394
   * @residue_granularity: granularity of the reported transfer residue
221a27c76   Vinod Koul   dmaengine: add dm...
395
396
397
398
399
400
401
   */
  struct dma_slave_caps {
  	u32 src_addr_widths;
  	u32 dstn_addr_widths;
  	u32 directions;
  	bool cmd_pause;
  	bool cmd_terminate;
507205632   Lars-Peter Clausen   dma: Indicate res...
402
  	enum dma_residue_granularity residue_granularity;
221a27c76   Vinod Koul   dmaengine: add dm...
403
  };
41d5e59c1   Dan Williams   dmaengine: add a ...
404
405
406
407
  static inline const char *dma_chan_name(struct dma_chan *chan)
  {
  	return dev_name(&chan->dev->device);
  }
d379b01e9   Dan Williams   dmaengine: make c...
408

c13c8260d   Chris Leech   [I/OAT]: DMA memc...
409
  void dma_chan_cleanup(struct kref *kref);
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
410
  /**
59b5ec214   Dan Williams   dmaengine: introd...
411
412
413
414
415
416
417
   * typedef dma_filter_fn - callback filter for dma_request_channel
   * @chan: channel to be reviewed
   * @filter_param: opaque parameter passed through dma_request_channel
   *
   * When this optional parameter is specified in a call to dma_request_channel a
   * suitable channel is passed to this routine for further dispositioning before
   * being returned.  Where 'suitable' indicates a non-busy channel that
7dd602510   Dan Williams   dmaengine: kill e...
418
419
   * satisfies the given capability mask.  It returns 'true' to indicate that the
   * channel is suitable.
59b5ec214   Dan Williams   dmaengine: introd...
420
   */
7dd602510   Dan Williams   dmaengine: kill e...
421
  typedef bool (*dma_filter_fn)(struct dma_chan *chan, void *filter_param);
59b5ec214   Dan Williams   dmaengine: introd...
422

7405f74ba   Dan Williams   dmaengine: refact...
423
  typedef void (*dma_async_tx_callback)(void *dma_async_param);
d38a8c622   Dan Williams   dmaengine: prepar...
424
425
426
427
428
429
430
431
432
433
  
  struct dmaengine_unmap_data {
  	u8 to_cnt;
  	u8 from_cnt;
  	u8 bidi_cnt;
  	struct device *dev;
  	struct kref kref;
  	size_t len;
  	dma_addr_t addr[0];
  };
7405f74ba   Dan Williams   dmaengine: refact...
434
435
436
437
438
  /**
   * struct dma_async_tx_descriptor - async transaction descriptor
   * ---dma generic offload fields---
   * @cookie: tracking cookie for this transaction, set to -EBUSY if
   *	this tx is sitting on a dependency list
636bdeaa1   Dan Williams   dmaengine: ack to...
439
440
   * @flags: flags to augment operation preparation, control completion, and
   * 	communicate status
7405f74ba   Dan Williams   dmaengine: refact...
441
   * @phys: physical address of the descriptor
7405f74ba   Dan Williams   dmaengine: refact...
442
443
   * @chan: target channel for this operation
   * @tx_submit: set the prepared descriptor(s) to be executed by the engine
7405f74ba   Dan Williams   dmaengine: refact...
444
445
446
   * @callback: routine to call after this operation is complete
   * @callback_param: general parameter to pass to the callback routine
   * ---async_tx api specific fields---
19242d723   Dan Williams   async_tx: fix mul...
447
   * @next: at completion submit this descriptor
7405f74ba   Dan Williams   dmaengine: refact...
448
   * @parent: pointer to the next level up in the dependency chain
19242d723   Dan Williams   async_tx: fix mul...
449
   * @lock: protect the parent and next pointers
7405f74ba   Dan Williams   dmaengine: refact...
450
451
452
   */
  struct dma_async_tx_descriptor {
  	dma_cookie_t cookie;
636bdeaa1   Dan Williams   dmaengine: ack to...
453
  	enum dma_ctrl_flags flags; /* not a 'long' to pack with cookie */
7405f74ba   Dan Williams   dmaengine: refact...
454
  	dma_addr_t phys;
7405f74ba   Dan Williams   dmaengine: refact...
455
456
  	struct dma_chan *chan;
  	dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx);
7405f74ba   Dan Williams   dmaengine: refact...
457
458
  	dma_async_tx_callback callback;
  	void *callback_param;
d38a8c622   Dan Williams   dmaengine: prepar...
459
  	struct dmaengine_unmap_data *unmap;
5fc6d897f   Dan Williams   async_tx: make as...
460
  #ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
19242d723   Dan Williams   async_tx: fix mul...
461
  	struct dma_async_tx_descriptor *next;
7405f74ba   Dan Williams   dmaengine: refact...
462
463
  	struct dma_async_tx_descriptor *parent;
  	spinlock_t lock;
caa20d974   Dan Williams   async_tx: trim dm...
464
  #endif
7405f74ba   Dan Williams   dmaengine: refact...
465
  };
897164629   Dan Williams   async_memcpy: con...
466
  #ifdef CONFIG_DMA_ENGINE
d38a8c622   Dan Williams   dmaengine: prepar...
467
468
469
470
471
472
  static inline void dma_set_unmap(struct dma_async_tx_descriptor *tx,
  				 struct dmaengine_unmap_data *unmap)
  {
  	kref_get(&unmap->kref);
  	tx->unmap = unmap;
  }
897164629   Dan Williams   async_memcpy: con...
473
474
  struct dmaengine_unmap_data *
  dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags);
45c463ae9   Dan Williams   dmaengine: refere...
475
  void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap);
897164629   Dan Williams   async_memcpy: con...
476
477
478
479
480
481
482
483
484
485
486
487
488
489
  #else
  static inline void dma_set_unmap(struct dma_async_tx_descriptor *tx,
  				 struct dmaengine_unmap_data *unmap)
  {
  }
  static inline struct dmaengine_unmap_data *
  dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags)
  {
  	return NULL;
  }
  static inline void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap)
  {
  }
  #endif
45c463ae9   Dan Williams   dmaengine: refere...
490

d38a8c622   Dan Williams   dmaengine: prepar...
491
492
493
  static inline void dma_descriptor_unmap(struct dma_async_tx_descriptor *tx)
  {
  	if (tx->unmap) {
45c463ae9   Dan Williams   dmaengine: refere...
494
  		dmaengine_unmap_put(tx->unmap);
d38a8c622   Dan Williams   dmaengine: prepar...
495
496
497
  		tx->unmap = NULL;
  	}
  }
5fc6d897f   Dan Williams   async_tx: make as...
498
  #ifndef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
caa20d974   Dan Williams   async_tx: trim dm...
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
  static inline void txd_lock(struct dma_async_tx_descriptor *txd)
  {
  }
  static inline void txd_unlock(struct dma_async_tx_descriptor *txd)
  {
  }
  static inline void txd_chain(struct dma_async_tx_descriptor *txd, struct dma_async_tx_descriptor *next)
  {
  	BUG();
  }
  static inline void txd_clear_parent(struct dma_async_tx_descriptor *txd)
  {
  }
  static inline void txd_clear_next(struct dma_async_tx_descriptor *txd)
  {
  }
  static inline struct dma_async_tx_descriptor *txd_next(struct dma_async_tx_descriptor *txd)
  {
  	return NULL;
  }
  static inline struct dma_async_tx_descriptor *txd_parent(struct dma_async_tx_descriptor *txd)
  {
  	return NULL;
  }
  
  #else
  static inline void txd_lock(struct dma_async_tx_descriptor *txd)
  {
  	spin_lock_bh(&txd->lock);
  }
  static inline void txd_unlock(struct dma_async_tx_descriptor *txd)
  {
  	spin_unlock_bh(&txd->lock);
  }
  static inline void txd_chain(struct dma_async_tx_descriptor *txd, struct dma_async_tx_descriptor *next)
  {
  	txd->next = next;
  	next->parent = txd;
  }
  static inline void txd_clear_parent(struct dma_async_tx_descriptor *txd)
  {
  	txd->parent = NULL;
  }
  static inline void txd_clear_next(struct dma_async_tx_descriptor *txd)
  {
  	txd->next = NULL;
  }
  static inline struct dma_async_tx_descriptor *txd_parent(struct dma_async_tx_descriptor *txd)
  {
  	return txd->parent;
  }
  static inline struct dma_async_tx_descriptor *txd_next(struct dma_async_tx_descriptor *txd)
  {
  	return txd->next;
  }
  #endif
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
555
  /**
079344818   Linus Walleij   DMAENGINE: generi...
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
   * struct dma_tx_state - filled in to report the status of
   * a transfer.
   * @last: last completed DMA cookie
   * @used: last issued DMA cookie (i.e. the one in progress)
   * @residue: the remaining number of bytes left to transmit
   *	on the selected transfer for states DMA_IN_PROGRESS and
   *	DMA_PAUSED if this is implemented in the driver, else 0
   */
  struct dma_tx_state {
  	dma_cookie_t last;
  	dma_cookie_t used;
  	u32 residue;
  };
  
  /**
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
571
572
   * struct dma_device - info on the entity supplying DMA services
   * @chancnt: how many DMA channels are supported
0f571515c   Atsushi Nemoto   dmaengine: Add pr...
573
   * @privatecnt: how many DMA channels are requested by dma_request_channel
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
574
575
   * @channels: the list of struct dma_chan
   * @global_node: list_head for global dma_device_list
7405f74ba   Dan Williams   dmaengine: refact...
576
577
   * @cap_mask: one or more dma_capability flags
   * @max_xor: maximum number of xor sources, 0 if no capability
b2f46fd8e   Dan Williams   async_tx: add sup...
578
   * @max_pq: maximum number of PQ sources and PQ-continue capability
83544ae9f   Dan Williams   dmaengine, async_...
579
580
581
582
   * @copy_align: alignment shift for memcpy operations
   * @xor_align: alignment shift for xor operations
   * @pq_align: alignment shift for pq operations
   * @fill_align: alignment shift for memset operations
fe4ada2d6   Randy Dunlap   [IOAT]: fix heade...
583
   * @dev_id: unique device ID
7405f74ba   Dan Williams   dmaengine: refact...
584
   * @dev: struct device reference for dma mapping api
fe4ada2d6   Randy Dunlap   [IOAT]: fix heade...
585
586
587
   * @device_alloc_chan_resources: allocate resources and return the
   *	number of allocated descriptors
   * @device_free_chan_resources: release DMA channel's resources
7405f74ba   Dan Williams   dmaengine: refact...
588
589
   * @device_prep_dma_memcpy: prepares a memcpy operation
   * @device_prep_dma_xor: prepares a xor operation
099f53cb5   Dan Williams   async_tx: rename ...
590
   * @device_prep_dma_xor_val: prepares a xor validation operation
b2f46fd8e   Dan Williams   async_tx: add sup...
591
592
   * @device_prep_dma_pq: prepares a pq operation
   * @device_prep_dma_pq_val: prepares a pqzero_sum operation
7405f74ba   Dan Williams   dmaengine: refact...
593
   * @device_prep_dma_interrupt: prepares an end of chain interrupt operation
dc0ee6435   Haavard Skinnemoen   dmaengine: Add sl...
594
   * @device_prep_slave_sg: prepares a slave dma operation
782bc950d   Sascha Hauer   dmaengine: add po...
595
596
597
   * @device_prep_dma_cyclic: prepare a cyclic dma operation suitable for audio.
   *	The function takes a buffer of size buf_len. The callback function will
   *	be called after period_len bytes have been transferred.
b14dab792   Jassi Brar   DMAEngine: Define...
598
   * @device_prep_interleaved_dma: Transfer expression in a generic way.
c3635c78e   Linus Walleij   DMAENGINE: generi...
599
600
   * @device_control: manipulate all pending operations on a channel, returns
   *	zero or error code
079344818   Linus Walleij   DMAENGINE: generi...
601
602
   * @device_tx_status: poll for transaction completion, the optional
   *	txstate parameter can be supplied with a pointer to get a
25985edce   Lucas De Marchi   Fix common misspe...
603
   *	struct with auxiliary transfer status information, otherwise the call
079344818   Linus Walleij   DMAENGINE: generi...
604
   *	will just return a simple status code
7405f74ba   Dan Williams   dmaengine: refact...
605
   * @device_issue_pending: push pending transactions to hardware
221a27c76   Vinod Koul   dmaengine: add dm...
606
   * @device_slave_caps: return the slave channel capabilities
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
607
608
609
610
   */
  struct dma_device {
  
  	unsigned int chancnt;
0f571515c   Atsushi Nemoto   dmaengine: Add pr...
611
  	unsigned int privatecnt;
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
612
613
  	struct list_head channels;
  	struct list_head global_node;
7405f74ba   Dan Williams   dmaengine: refact...
614
  	dma_cap_mask_t  cap_mask;
b2f46fd8e   Dan Williams   async_tx: add sup...
615
616
  	unsigned short max_xor;
  	unsigned short max_pq;
83544ae9f   Dan Williams   dmaengine, async_...
617
618
619
620
  	u8 copy_align;
  	u8 xor_align;
  	u8 pq_align;
  	u8 fill_align;
b2f46fd8e   Dan Williams   async_tx: add sup...
621
  	#define DMA_HAS_PQ_CONTINUE (1 << 15)
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
622

c13c8260d   Chris Leech   [I/OAT]: DMA memc...
623
  	int dev_id;
7405f74ba   Dan Williams   dmaengine: refact...
624
  	struct device *dev;
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
625

aa1e6f1a3   Dan Williams   dmaengine: kill s...
626
  	int (*device_alloc_chan_resources)(struct dma_chan *chan);
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
627
  	void (*device_free_chan_resources)(struct dma_chan *chan);
7405f74ba   Dan Williams   dmaengine: refact...
628
629
  
  	struct dma_async_tx_descriptor *(*device_prep_dma_memcpy)(
0036731c8   Dan Williams   async_tx: kill tx...
630
  		struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
d4c56f97f   Dan Williams   async_tx: replace...
631
  		size_t len, unsigned long flags);
7405f74ba   Dan Williams   dmaengine: refact...
632
  	struct dma_async_tx_descriptor *(*device_prep_dma_xor)(
0036731c8   Dan Williams   async_tx: kill tx...
633
  		struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
d4c56f97f   Dan Williams   async_tx: replace...
634
  		unsigned int src_cnt, size_t len, unsigned long flags);
099f53cb5   Dan Williams   async_tx: rename ...
635
  	struct dma_async_tx_descriptor *(*device_prep_dma_xor_val)(
0036731c8   Dan Williams   async_tx: kill tx...
636
  		struct dma_chan *chan, dma_addr_t *src,	unsigned int src_cnt,
ad283ea4a   Dan Williams   async_tx: add sum...
637
  		size_t len, enum sum_check_flags *result, unsigned long flags);
b2f46fd8e   Dan Williams   async_tx: add sup...
638
639
640
641
642
643
644
645
  	struct dma_async_tx_descriptor *(*device_prep_dma_pq)(
  		struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
  		unsigned int src_cnt, const unsigned char *scf,
  		size_t len, unsigned long flags);
  	struct dma_async_tx_descriptor *(*device_prep_dma_pq_val)(
  		struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
  		unsigned int src_cnt, const unsigned char *scf, size_t len,
  		enum sum_check_flags *pqres, unsigned long flags);
7405f74ba   Dan Williams   dmaengine: refact...
646
  	struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)(
636bdeaa1   Dan Williams   dmaengine: ack to...
647
  		struct dma_chan *chan, unsigned long flags);
a86ee03ce   Ira Snyder   dma: add support ...
648
649
650
651
652
  	struct dma_async_tx_descriptor *(*device_prep_dma_sg)(
  		struct dma_chan *chan,
  		struct scatterlist *dst_sg, unsigned int dst_nents,
  		struct scatterlist *src_sg, unsigned int src_nents,
  		unsigned long flags);
7405f74ba   Dan Williams   dmaengine: refact...
653

dc0ee6435   Haavard Skinnemoen   dmaengine: Add sl...
654
655
  	struct dma_async_tx_descriptor *(*device_prep_slave_sg)(
  		struct dma_chan *chan, struct scatterlist *sgl,
49920bc66   Vinod Koul   dmaengine: add ne...
656
  		unsigned int sg_len, enum dma_transfer_direction direction,
185ecb5f4   Alexandre Bounine   dmaengine: add co...
657
  		unsigned long flags, void *context);
782bc950d   Sascha Hauer   dmaengine: add po...
658
659
  	struct dma_async_tx_descriptor *(*device_prep_dma_cyclic)(
  		struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
185ecb5f4   Alexandre Bounine   dmaengine: add co...
660
  		size_t period_len, enum dma_transfer_direction direction,
ec8b5e48c   Peter Ujfalusi   dmaengine: Pass f...
661
  		unsigned long flags, void *context);
b14dab792   Jassi Brar   DMAEngine: Define...
662
663
664
  	struct dma_async_tx_descriptor *(*device_prep_interleaved_dma)(
  		struct dma_chan *chan, struct dma_interleaved_template *xt,
  		unsigned long flags);
058276303   Linus Walleij   DMAENGINE: extend...
665
666
  	int (*device_control)(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
  		unsigned long arg);
dc0ee6435   Haavard Skinnemoen   dmaengine: Add sl...
667

079344818   Linus Walleij   DMAENGINE: generi...
668
669
670
  	enum dma_status (*device_tx_status)(struct dma_chan *chan,
  					    dma_cookie_t cookie,
  					    struct dma_tx_state *txstate);
7405f74ba   Dan Williams   dmaengine: refact...
671
  	void (*device_issue_pending)(struct dma_chan *chan);
221a27c76   Vinod Koul   dmaengine: add dm...
672
  	int (*device_slave_caps)(struct dma_chan *chan, struct dma_slave_caps *caps);
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
673
  };
6e3ecaf0a   Sascha Hauer   dmaengine: add wr...
674
675
676
677
  static inline int dmaengine_device_control(struct dma_chan *chan,
  					   enum dma_ctrl_cmd cmd,
  					   unsigned long arg)
  {
944ea4dd3   Jon Mason   dmatest: Fix NULL...
678
679
  	if (chan->device->device_control)
  		return chan->device->device_control(chan, cmd, arg);
978c4172a   Andy Shevchenko   dmaengine.h: remo...
680
681
  
  	return -ENOSYS;
6e3ecaf0a   Sascha Hauer   dmaengine: add wr...
682
683
684
685
686
687
688
689
  }
  
  static inline int dmaengine_slave_config(struct dma_chan *chan,
  					  struct dma_slave_config *config)
  {
  	return dmaengine_device_control(chan, DMA_SLAVE_CONFIG,
  			(unsigned long)config);
  }
61cc13a51   Andy Shevchenko   dmaengine: introd...
690
691
692
693
  static inline bool is_slave_direction(enum dma_transfer_direction direction)
  {
  	return (direction == DMA_MEM_TO_DEV) || (direction == DMA_DEV_TO_MEM);
  }
90b44f8ff   Vinod Koul   dmaengine: add he...
694
  static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_single(
922ee08ba   Kuninori Morimoto   dmaengine: Fixup ...
695
  	struct dma_chan *chan, dma_addr_t buf, size_t len,
49920bc66   Vinod Koul   dmaengine: add ne...
696
  	enum dma_transfer_direction dir, unsigned long flags)
90b44f8ff   Vinod Koul   dmaengine: add he...
697
698
  {
  	struct scatterlist sg;
922ee08ba   Kuninori Morimoto   dmaengine: Fixup ...
699
700
701
  	sg_init_table(&sg, 1);
  	sg_dma_address(&sg) = buf;
  	sg_dma_len(&sg) = len;
90b44f8ff   Vinod Koul   dmaengine: add he...
702

185ecb5f4   Alexandre Bounine   dmaengine: add co...
703
704
  	return chan->device->device_prep_slave_sg(chan, &sg, 1,
  						  dir, flags, NULL);
90b44f8ff   Vinod Koul   dmaengine: add he...
705
  }
16052827d   Alexandre Bounine   dmaengine/dma_sla...
706
707
708
709
710
  static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_sg(
  	struct dma_chan *chan, struct scatterlist *sgl,	unsigned int sg_len,
  	enum dma_transfer_direction dir, unsigned long flags)
  {
  	return chan->device->device_prep_slave_sg(chan, sgl, sg_len,
185ecb5f4   Alexandre Bounine   dmaengine: add co...
711
  						  dir, flags, NULL);
16052827d   Alexandre Bounine   dmaengine/dma_sla...
712
  }
e42d98ebe   Alexandre Bounine   rapidio: add DMA ...
713
714
715
716
717
718
719
720
721
722
723
  #ifdef CONFIG_RAPIDIO_DMA_ENGINE
  struct rio_dma_ext;
  static inline struct dma_async_tx_descriptor *dmaengine_prep_rio_sg(
  	struct dma_chan *chan, struct scatterlist *sgl,	unsigned int sg_len,
  	enum dma_transfer_direction dir, unsigned long flags,
  	struct rio_dma_ext *rio_ext)
  {
  	return chan->device->device_prep_slave_sg(chan, sgl, sg_len,
  						  dir, flags, rio_ext);
  }
  #endif
16052827d   Alexandre Bounine   dmaengine/dma_sla...
724
725
  static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_cyclic(
  		struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
e7736cdea   Peter Ujfalusi   dmaengine: Add fl...
726
727
  		size_t period_len, enum dma_transfer_direction dir,
  		unsigned long flags)
16052827d   Alexandre Bounine   dmaengine/dma_sla...
728
729
  {
  	return chan->device->device_prep_dma_cyclic(chan, buf_addr, buf_len,
ec8b5e48c   Peter Ujfalusi   dmaengine: Pass f...
730
  						period_len, dir, flags, NULL);
a14acb4ac   Barry Song   DMAEngine: add dm...
731
732
733
734
735
736
737
  }
  
  static inline struct dma_async_tx_descriptor *dmaengine_prep_interleaved_dma(
  		struct dma_chan *chan, struct dma_interleaved_template *xt,
  		unsigned long flags)
  {
  	return chan->device->device_prep_interleaved_dma(chan, xt, flags);
90b44f8ff   Vinod Koul   dmaengine: add he...
738
  }
221a27c76   Vinod Koul   dmaengine: add dm...
739
740
741
742
743
744
745
746
747
748
749
750
751
752
  static inline int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps)
  {
  	if (!chan || !caps)
  		return -EINVAL;
  
  	/* check if the channel supports slave transactions */
  	if (!test_bit(DMA_SLAVE, chan->device->cap_mask.bits))
  		return -ENXIO;
  
  	if (chan->device->device_slave_caps)
  		return chan->device->device_slave_caps(chan, caps);
  
  	return -ENXIO;
  }
6e3ecaf0a   Sascha Hauer   dmaengine: add wr...
753
754
755
756
757
758
759
760
761
762
763
764
765
766
  static inline int dmaengine_terminate_all(struct dma_chan *chan)
  {
  	return dmaengine_device_control(chan, DMA_TERMINATE_ALL, 0);
  }
  
  static inline int dmaengine_pause(struct dma_chan *chan)
  {
  	return dmaengine_device_control(chan, DMA_PAUSE, 0);
  }
  
  static inline int dmaengine_resume(struct dma_chan *chan)
  {
  	return dmaengine_device_control(chan, DMA_RESUME, 0);
  }
3052cc2c9   Lars-Peter Clausen   dmaengine: Add wr...
767
768
769
770
771
  static inline enum dma_status dmaengine_tx_status(struct dma_chan *chan,
  	dma_cookie_t cookie, struct dma_tx_state *state)
  {
  	return chan->device->device_tx_status(chan, cookie, state);
  }
98d530fe2   Russell King - ARM Linux   Fix dmaengine_sub...
772
  static inline dma_cookie_t dmaengine_submit(struct dma_async_tx_descriptor *desc)
6e3ecaf0a   Sascha Hauer   dmaengine: add wr...
773
774
775
  {
  	return desc->tx_submit(desc);
  }
83544ae9f   Dan Williams   dmaengine, async_...
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
  static inline bool dmaengine_check_align(u8 align, size_t off1, size_t off2, size_t len)
  {
  	size_t mask;
  
  	if (!align)
  		return true;
  	mask = (1 << align) - 1;
  	if (mask & (off1 | off2 | len))
  		return false;
  	return true;
  }
  
  static inline bool is_dma_copy_aligned(struct dma_device *dev, size_t off1,
  				       size_t off2, size_t len)
  {
  	return dmaengine_check_align(dev->copy_align, off1, off2, len);
  }
  
  static inline bool is_dma_xor_aligned(struct dma_device *dev, size_t off1,
  				      size_t off2, size_t len)
  {
  	return dmaengine_check_align(dev->xor_align, off1, off2, len);
  }
  
  static inline bool is_dma_pq_aligned(struct dma_device *dev, size_t off1,
  				     size_t off2, size_t len)
  {
  	return dmaengine_check_align(dev->pq_align, off1, off2, len);
  }
  
  static inline bool is_dma_fill_aligned(struct dma_device *dev, size_t off1,
  				       size_t off2, size_t len)
  {
  	return dmaengine_check_align(dev->fill_align, off1, off2, len);
  }
b2f46fd8e   Dan Williams   async_tx: add sup...
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
  static inline void
  dma_set_maxpq(struct dma_device *dma, int maxpq, int has_pq_continue)
  {
  	dma->max_pq = maxpq;
  	if (has_pq_continue)
  		dma->max_pq |= DMA_HAS_PQ_CONTINUE;
  }
  
  static inline bool dmaf_continue(enum dma_ctrl_flags flags)
  {
  	return (flags & DMA_PREP_CONTINUE) == DMA_PREP_CONTINUE;
  }
  
  static inline bool dmaf_p_disabled_continue(enum dma_ctrl_flags flags)
  {
  	enum dma_ctrl_flags mask = DMA_PREP_CONTINUE | DMA_PREP_PQ_DISABLE_P;
  
  	return (flags & mask) == mask;
  }
  
  static inline bool dma_dev_has_pq_continue(struct dma_device *dma)
  {
  	return (dma->max_pq & DMA_HAS_PQ_CONTINUE) == DMA_HAS_PQ_CONTINUE;
  }
d3f3cf859   Mathieu Lacage   missing inline ke...
835
  static inline unsigned short dma_dev_to_maxpq(struct dma_device *dma)
b2f46fd8e   Dan Williams   async_tx: add sup...
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
  {
  	return dma->max_pq & ~DMA_HAS_PQ_CONTINUE;
  }
  
  /* dma_maxpq - reduce maxpq in the face of continued operations
   * @dma - dma device with PQ capability
   * @flags - to check if DMA_PREP_CONTINUE and DMA_PREP_PQ_DISABLE_P are set
   *
   * When an engine does not support native continuation we need 3 extra
   * source slots to reuse P and Q with the following coefficients:
   * 1/ {00} * P : remove P from Q', but use it as a source for P'
   * 2/ {01} * Q : use Q to continue Q' calculation
   * 3/ {00} * Q : subtract Q from P' to cancel (2)
   *
   * In the case where P is disabled we only need 1 extra source:
   * 1/ {01} * Q : use Q to continue Q' calculation
   */
  static inline int dma_maxpq(struct dma_device *dma, enum dma_ctrl_flags flags)
  {
  	if (dma_dev_has_pq_continue(dma) || !dmaf_continue(flags))
  		return dma_dev_to_maxpq(dma);
  	else if (dmaf_p_disabled_continue(flags))
  		return dma_dev_to_maxpq(dma) - 1;
  	else if (dmaf_continue(flags))
  		return dma_dev_to_maxpq(dma) - 3;
  	BUG();
  }
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
863
  /* --- public DMA engine API --- */
649274d99   Dan Williams   net_dma: acquire/...
864
  #ifdef CONFIG_DMA_ENGINE
209b84a88   Dan Williams   dmaengine: replac...
865
866
  void dmaengine_get(void);
  void dmaengine_put(void);
649274d99   Dan Williams   net_dma: acquire/...
867
868
869
870
871
872
873
874
  #else
  static inline void dmaengine_get(void)
  {
  }
  static inline void dmaengine_put(void)
  {
  }
  #endif
b4bd07c20   David S. Miller   net_dma: call dma...
875
876
877
878
879
880
881
882
883
884
885
  #ifdef CONFIG_NET_DMA
  #define net_dmaengine_get()	dmaengine_get()
  #define net_dmaengine_put()	dmaengine_put()
  #else
  static inline void net_dmaengine_get(void)
  {
  }
  static inline void net_dmaengine_put(void)
  {
  }
  #endif
729b5d1b8   Dan Williams   dmaengine: allow ...
886
887
888
  #ifdef CONFIG_ASYNC_TX_DMA
  #define async_dmaengine_get()	dmaengine_get()
  #define async_dmaengine_put()	dmaengine_put()
5fc6d897f   Dan Williams   async_tx: make as...
889
  #ifndef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
138f4c359   Dan Williams   dmaengine, async_...
890
891
  #define async_dma_find_channel(type) dma_find_channel(DMA_ASYNC_TX)
  #else
729b5d1b8   Dan Williams   dmaengine: allow ...
892
  #define async_dma_find_channel(type) dma_find_channel(type)
5fc6d897f   Dan Williams   async_tx: make as...
893
  #endif /* CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH */
729b5d1b8   Dan Williams   dmaengine: allow ...
894
895
896
897
898
899
900
901
902
903
904
905
  #else
  static inline void async_dmaengine_get(void)
  {
  }
  static inline void async_dmaengine_put(void)
  {
  }
  static inline struct dma_chan *
  async_dma_find_channel(enum dma_transaction_type type)
  {
  	return NULL;
  }
138f4c359   Dan Williams   dmaengine, async_...
906
  #endif /* CONFIG_ASYNC_TX_DMA */
729b5d1b8   Dan Williams   dmaengine: allow ...
907

7405f74ba   Dan Williams   dmaengine: refact...
908
909
910
911
912
913
914
915
916
  dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan,
  	void *dest, void *src, size_t len);
  dma_cookie_t dma_async_memcpy_buf_to_pg(struct dma_chan *chan,
  	struct page *page, unsigned int offset, void *kdata, size_t len);
  dma_cookie_t dma_async_memcpy_pg_to_pg(struct dma_chan *chan,
  	struct page *dest_pg, unsigned int dest_off, struct page *src_pg,
  	unsigned int src_off, size_t len);
  void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
  	struct dma_chan *chan);
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
917

0839875e0   Dan Williams   async_tx: make as...
918
  static inline void async_tx_ack(struct dma_async_tx_descriptor *tx)
7405f74ba   Dan Williams   dmaengine: refact...
919
  {
636bdeaa1   Dan Williams   dmaengine: ack to...
920
921
  	tx->flags |= DMA_CTRL_ACK;
  }
ef560682a   Guennadi Liakhovetski   dmaengine: add as...
922
923
924
925
  static inline void async_tx_clear_ack(struct dma_async_tx_descriptor *tx)
  {
  	tx->flags &= ~DMA_CTRL_ACK;
  }
0839875e0   Dan Williams   async_tx: make as...
926
  static inline bool async_tx_test_ack(struct dma_async_tx_descriptor *tx)
636bdeaa1   Dan Williams   dmaengine: ack to...
927
  {
0839875e0   Dan Williams   async_tx: make as...
928
  	return (tx->flags & DMA_CTRL_ACK) == DMA_CTRL_ACK;
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
929
  }
7405f74ba   Dan Williams   dmaengine: refact...
930
931
932
  #define dma_cap_set(tx, mask) __dma_cap_set((tx), &(mask))
  static inline void
  __dma_cap_set(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp)
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
933
  {
7405f74ba   Dan Williams   dmaengine: refact...
934
935
  	set_bit(tx_type, dstp->bits);
  }
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
936

0f571515c   Atsushi Nemoto   dmaengine: Add pr...
937
938
939
940
941
942
  #define dma_cap_clear(tx, mask) __dma_cap_clear((tx), &(mask))
  static inline void
  __dma_cap_clear(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp)
  {
  	clear_bit(tx_type, dstp->bits);
  }
33df8ca06   Dan Williams   dmatest: convert ...
943
944
945
946
947
  #define dma_cap_zero(mask) __dma_cap_zero(&(mask))
  static inline void __dma_cap_zero(dma_cap_mask_t *dstp)
  {
  	bitmap_zero(dstp->bits, DMA_TX_TYPE_END);
  }
7405f74ba   Dan Williams   dmaengine: refact...
948
949
950
951
952
  #define dma_has_cap(tx, mask) __dma_has_cap((tx), &(mask))
  static inline int
  __dma_has_cap(enum dma_transaction_type tx_type, dma_cap_mask_t *srcp)
  {
  	return test_bit(tx_type, srcp->bits);
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
953
  }
7405f74ba   Dan Williams   dmaengine: refact...
954
  #define for_each_dma_cap_mask(cap, mask) \
e5a087fdc   Akinobu Mita   dmaengine: use fo...
955
  	for_each_set_bit(cap, mask.bits, DMA_TX_TYPE_END)
7405f74ba   Dan Williams   dmaengine: refact...
956

c13c8260d   Chris Leech   [I/OAT]: DMA memc...
957
  /**
7405f74ba   Dan Williams   dmaengine: refact...
958
   * dma_async_issue_pending - flush pending transactions to HW
fe4ada2d6   Randy Dunlap   [IOAT]: fix heade...
959
   * @chan: target DMA channel
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
960
961
962
963
   *
   * This allows drivers to push copies to HW in batches,
   * reducing MMIO writes where possible.
   */
7405f74ba   Dan Williams   dmaengine: refact...
964
  static inline void dma_async_issue_pending(struct dma_chan *chan)
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
965
  {
ec8670f1f   Dan Williams   dmaengine: fix sp...
966
  	chan->device->device_issue_pending(chan);
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
967
968
969
  }
  
  /**
7405f74ba   Dan Williams   dmaengine: refact...
970
   * dma_async_is_tx_complete - poll for transaction completion
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
971
972
973
974
975
976
977
978
979
   * @chan: DMA channel
   * @cookie: transaction identifier to check status of
   * @last: returns last completed cookie, can be NULL
   * @used: returns last issued cookie, can be NULL
   *
   * If @last and @used are passed in, upon return they reflect the driver
   * internal state and can be used with dma_async_is_complete() to check
   * the status of multiple cookies without re-checking hardware state.
   */
7405f74ba   Dan Williams   dmaengine: refact...
980
  static inline enum dma_status dma_async_is_tx_complete(struct dma_chan *chan,
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
981
982
  	dma_cookie_t cookie, dma_cookie_t *last, dma_cookie_t *used)
  {
079344818   Linus Walleij   DMAENGINE: generi...
983
984
985
986
987
988
989
990
991
  	struct dma_tx_state state;
  	enum dma_status status;
  
  	status = chan->device->device_tx_status(chan, cookie, &state);
  	if (last)
  		*last = state.last;
  	if (used)
  		*used = state.used;
  	return status;
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
992
993
994
995
996
997
998
999
  }
  
  /**
   * dma_async_is_complete - test a cookie against chan state
   * @cookie: transaction identifier to test status of
   * @last_complete: last know completed transaction
   * @last_used: last cookie value handed out
   *
e239345f6   Bartlomiej Zolnierkiewicz   dmaengine: remove...
1000
   * dma_async_is_complete() is used in dma_async_is_tx_complete()
8a5703f84   Sebastian Siewior   DMA engine: typo ...
1001
   * the test logic is separated for lightweight testing of multiple cookies
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
1002
1003
1004
1005
1006
1007
   */
  static inline enum dma_status dma_async_is_complete(dma_cookie_t cookie,
  			dma_cookie_t last_complete, dma_cookie_t last_used)
  {
  	if (last_complete <= last_used) {
  		if ((cookie <= last_complete) || (cookie > last_used))
adfedd9a3   Vinod Koul   dmaengine: use DM...
1008
  			return DMA_COMPLETE;
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
1009
1010
  	} else {
  		if ((cookie <= last_complete) && (cookie > last_used))
adfedd9a3   Vinod Koul   dmaengine: use DM...
1011
  			return DMA_COMPLETE;
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
1012
1013
1014
  	}
  	return DMA_IN_PROGRESS;
  }
bca346920   Dan Williams   dmaengine: provid...
1015
1016
1017
1018
1019
1020
1021
1022
1023
  static inline void
  dma_set_tx_state(struct dma_tx_state *st, dma_cookie_t last, dma_cookie_t used, u32 residue)
  {
  	if (st) {
  		st->last = last;
  		st->used = used;
  		st->residue = residue;
  	}
  }
07f2211e4   Dan Williams   dmaengine: remove...
1024
  #ifdef CONFIG_DMA_ENGINE
4a43f394a   Jon Mason   dmaengine: dma_sy...
1025
1026
  struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type);
  enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie);
07f2211e4   Dan Williams   dmaengine: remove...
1027
  enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx);
c50331e8b   Dan Williams   dmaengine: dma_is...
1028
  void dma_issue_pending_all(void);
a53e28da5   Lars-Peter Clausen   dma: Make the 'ma...
1029
1030
  struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
  					dma_filter_fn fn, void *fn_param);
0ad7c0005   Stephen Warren   dma: add channel ...
1031
1032
  struct dma_chan *dma_request_slave_channel_reason(struct device *dev,
  						  const char *name);
bef29ec50   Markus Pargmann   DMA: of: Constant...
1033
  struct dma_chan *dma_request_slave_channel(struct device *dev, const char *name);
8f33d5277   Guennadi Liakhovetski   dmaengine: provid...
1034
  void dma_release_channel(struct dma_chan *chan);
07f2211e4   Dan Williams   dmaengine: remove...
1035
  #else
4a43f394a   Jon Mason   dmaengine: dma_sy...
1036
1037
1038
1039
1040
1041
  static inline struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
  {
  	return NULL;
  }
  static inline enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
  {
adfedd9a3   Vinod Koul   dmaengine: use DM...
1042
  	return DMA_COMPLETE;
4a43f394a   Jon Mason   dmaengine: dma_sy...
1043
  }
07f2211e4   Dan Williams   dmaengine: remove...
1044
1045
  static inline enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
  {
adfedd9a3   Vinod Koul   dmaengine: use DM...
1046
  	return DMA_COMPLETE;
07f2211e4   Dan Williams   dmaengine: remove...
1047
  }
c50331e8b   Dan Williams   dmaengine: dma_is...
1048
1049
  static inline void dma_issue_pending_all(void)
  {
8f33d5277   Guennadi Liakhovetski   dmaengine: provid...
1050
  }
a53e28da5   Lars-Peter Clausen   dma: Make the 'ma...
1051
  static inline struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
8f33d5277   Guennadi Liakhovetski   dmaengine: provid...
1052
1053
1054
1055
  					      dma_filter_fn fn, void *fn_param)
  {
  	return NULL;
  }
0ad7c0005   Stephen Warren   dma: add channel ...
1056
1057
1058
1059
1060
  static inline struct dma_chan *dma_request_slave_channel_reason(
  					struct device *dev, const char *name)
  {
  	return ERR_PTR(-ENODEV);
  }
9a6cecc84   Jon Hunter   dmaengine: add he...
1061
  static inline struct dma_chan *dma_request_slave_channel(struct device *dev,
bef29ec50   Markus Pargmann   DMA: of: Constant...
1062
  							 const char *name)
9a6cecc84   Jon Hunter   dmaengine: add he...
1063
  {
d18d5f597   Vinod Koul   dmaengine: fix bu...
1064
  	return NULL;
9a6cecc84   Jon Hunter   dmaengine: add he...
1065
  }
8f33d5277   Guennadi Liakhovetski   dmaengine: provid...
1066
1067
  static inline void dma_release_channel(struct dma_chan *chan)
  {
c50331e8b   Dan Williams   dmaengine: dma_is...
1068
  }
07f2211e4   Dan Williams   dmaengine: remove...
1069
  #endif
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
1070
1071
1072
1073
1074
  
  /* --- DMA device --- */
  
  int dma_async_device_register(struct dma_device *device);
  void dma_async_device_unregister(struct dma_device *device);
07f2211e4   Dan Williams   dmaengine: remove...
1075
  void dma_run_dependencies(struct dma_async_tx_descriptor *tx);
7bb587f4e   Zhangfei Gao   dmaengine: add in...
1076
  struct dma_chan *dma_get_slave_channel(struct dma_chan *chan);
8010dad55   Stephen Warren   dma: add dma_get_...
1077
  struct dma_chan *dma_get_any_slave_channel(struct dma_device *device);
a2bd1140a   Dave Jiang   netdma: adding al...
1078
  struct dma_chan *net_dma_find_channel(void);
59b5ec214   Dan Williams   dmaengine: introd...
1079
  #define dma_request_channel(mask, x, y) __dma_request_channel(&(mask), x, y)
864ef69b2   Matt Porter   dmaengine: add dm...
1080
1081
1082
1083
  #define dma_request_slave_channel_compat(mask, x, y, dev, name) \
  	__dma_request_slave_channel_compat(&(mask), x, y, dev, name)
  
  static inline struct dma_chan
a53e28da5   Lars-Peter Clausen   dma: Make the 'ma...
1084
1085
1086
  *__dma_request_slave_channel_compat(const dma_cap_mask_t *mask,
  				  dma_filter_fn fn, void *fn_param,
  				  struct device *dev, char *name)
864ef69b2   Matt Porter   dmaengine: add dm...
1087
1088
1089
1090
1091
1092
1093
1094
1095
  {
  	struct dma_chan *chan;
  
  	chan = dma_request_slave_channel(dev, name);
  	if (chan)
  		return chan;
  
  	return __dma_request_channel(mask, fn, fn_param);
  }
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
1096

de5506e15   Chris Leech   [I/OAT]: Utility ...
1097
1098
1099
  /* --- Helper iov-locking functions --- */
  
  struct dma_page_list {
b2ddb9019   Al Viro   dma_page_list ->b...
1100
  	char __user *base_address;
de5506e15   Chris Leech   [I/OAT]: Utility ...
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
  	int nr_pages;
  	struct page **pages;
  };
  
  struct dma_pinned_list {
  	int nr_iovecs;
  	struct dma_page_list page_list[0];
  };
  
  struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len);
  void dma_unpin_iovec_pages(struct dma_pinned_list* pinned_list);
  
  dma_cookie_t dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
  	struct dma_pinned_list *pinned_list, unsigned char *kdata, size_t len);
  dma_cookie_t dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
  	struct dma_pinned_list *pinned_list, struct page *page,
  	unsigned int offset, size_t len);
c13c8260d   Chris Leech   [I/OAT]: DMA memc...
1118
  #endif /* DMAENGINE_H */