Blame view

drivers/dma/mmp_pdma.c 29.6 KB
d2912cb15   Thomas Gleixner   treewide: Replace...
1
  // SPDX-License-Identifier: GPL-2.0-only
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
2
3
  /*
   * Copyright 2012 Marvell International Ltd.
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
4
   */
2b7f65b11   Joe Perches   mmp_pdma: Style n...
5

7331205a9   Thierry Reding   dma: Convert to d...
6
  #include <linux/err.h>
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
7
8
9
10
11
12
13
14
15
16
17
18
  #include <linux/module.h>
  #include <linux/init.h>
  #include <linux/types.h>
  #include <linux/interrupt.h>
  #include <linux/dma-mapping.h>
  #include <linux/slab.h>
  #include <linux/dmaengine.h>
  #include <linux/platform_device.h>
  #include <linux/device.h>
  #include <linux/platform_data/mmp_dma.h>
  #include <linux/dmapool.h>
  #include <linux/of_device.h>
a9a7cf08b   Daniel Mack   dma: mmp_pdma: ma...
19
  #include <linux/of_dma.h>
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
20
  #include <linux/of.h>
13b3006b8   Daniel Mack   dma: mmp_pdma: ad...
21
  #include <linux/dma/mmp-pdma.h>
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
22
23
24
25
26
27
28
  
  #include "dmaengine.h"
  
  #define DCSR		0x0000
  #define DALGN		0x00a0
  #define DINT		0x00f0
  #define DDADR		0x0200
1b38da264   Daniel Mack   dma: mmp_pdma: ad...
29
30
  #define DSADR(n)	(0x0204 + ((n) << 4))
  #define DTADR(n)	(0x0208 + ((n) << 4))
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
31
  #define DCMD		0x020c
2b7f65b11   Joe Perches   mmp_pdma: Style n...
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
  #define DCSR_RUN	BIT(31)	/* Run Bit (read / write) */
  #define DCSR_NODESC	BIT(30)	/* No-Descriptor Fetch (read / write) */
  #define DCSR_STOPIRQEN	BIT(29)	/* Stop Interrupt Enable (read / write) */
  #define DCSR_REQPEND	BIT(8)	/* Request Pending (read-only) */
  #define DCSR_STOPSTATE	BIT(3)	/* Stop State (read-only) */
  #define DCSR_ENDINTR	BIT(2)	/* End Interrupt (read / write) */
  #define DCSR_STARTINTR	BIT(1)	/* Start Interrupt (read / write) */
  #define DCSR_BUSERR	BIT(0)	/* Bus Error Interrupt (read / write) */
  
  #define DCSR_EORIRQEN	BIT(28)	/* End of Receive Interrupt Enable (R/W) */
  #define DCSR_EORJMPEN	BIT(27)	/* Jump to next descriptor on EOR */
  #define DCSR_EORSTOPEN	BIT(26)	/* STOP on an EOR */
  #define DCSR_SETCMPST	BIT(25)	/* Set Descriptor Compare Status */
  #define DCSR_CLRCMPST	BIT(24)	/* Clear Descriptor Compare Status */
  #define DCSR_CMPST	BIT(10)	/* The Descriptor Compare Status */
  #define DCSR_EORINTR	BIT(9)	/* The end of Receive */
  
  #define DRCMR(n)	((((n) < 64) ? 0x0100 : 0x1100) + (((n) & 0x3f) << 2))
  #define DRCMR_MAPVLD	BIT(7)	/* Map Valid (read / write) */
  #define DRCMR_CHLNUM	0x1f	/* mask for Channel Number (read / write) */
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
52
53
  
  #define DDADR_DESCADDR	0xfffffff0	/* Address of next descriptor (mask) */
2b7f65b11   Joe Perches   mmp_pdma: Style n...
54
55
56
57
58
59
60
61
62
  #define DDADR_STOP	BIT(0)	/* Stop (read / write) */
  
  #define DCMD_INCSRCADDR	BIT(31)	/* Source Address Increment Setting. */
  #define DCMD_INCTRGADDR	BIT(30)	/* Target Address Increment Setting. */
  #define DCMD_FLOWSRC	BIT(29)	/* Flow Control by the source. */
  #define DCMD_FLOWTRG	BIT(28)	/* Flow Control by the target. */
  #define DCMD_STARTIRQEN	BIT(22)	/* Start Interrupt Enable */
  #define DCMD_ENDIRQEN	BIT(21)	/* End Interrupt Enable */
  #define DCMD_ENDIAN	BIT(18)	/* Device Endian-ness. */
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
63
64
65
66
67
68
69
  #define DCMD_BURST8	(1 << 16)	/* 8 byte burst */
  #define DCMD_BURST16	(2 << 16)	/* 16 byte burst */
  #define DCMD_BURST32	(3 << 16)	/* 32 byte burst */
  #define DCMD_WIDTH1	(1 << 14)	/* 1 byte width */
  #define DCMD_WIDTH2	(2 << 14)	/* 2 byte width (HalfWord) */
  #define DCMD_WIDTH4	(3 << 14)	/* 4 byte width (Word) */
  #define DCMD_LENGTH	0x01fff		/* length mask (max = 8K - 1) */
1ac0e845c   Daniel Mack   dma: mmp_pdma: fi...
70
  #define PDMA_MAX_DESC_BYTES	DCMD_LENGTH
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
  
  struct mmp_pdma_desc_hw {
  	u32 ddadr;	/* Points to the next descriptor + flags */
  	u32 dsadr;	/* DSADR value for the current transfer */
  	u32 dtadr;	/* DTADR value for the current transfer */
  	u32 dcmd;	/* DCMD value for the current transfer */
  } __aligned(32);
  
  struct mmp_pdma_desc_sw {
  	struct mmp_pdma_desc_hw desc;
  	struct list_head node;
  	struct list_head tx_list;
  	struct dma_async_tx_descriptor async_tx;
  };
  
  struct mmp_pdma_phy;
  
  struct mmp_pdma_chan {
  	struct device *dev;
  	struct dma_chan chan;
  	struct dma_async_tx_descriptor desc;
  	struct mmp_pdma_phy *phy;
  	enum dma_transfer_direction dir;
56b94b02c   Vinod Koul   dmaengine: mmp_pd...
94
  	struct dma_slave_config slave_config;
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
95

50440d74a   Daniel Mack   dma: mmp_pdma: ad...
96
97
  	struct mmp_pdma_desc_sw *cyclic_first;	/* first desc_sw if channel
  						 * is in cyclic mode */
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
98
99
100
101
102
103
104
105
106
107
108
  	/* channel's basic info */
  	struct tasklet_struct tasklet;
  	u32 dcmd;
  	u32 drcmr;
  	u32 dev_addr;
  
  	/* list for desc */
  	spinlock_t desc_lock;		/* Descriptor list lock */
  	struct list_head chain_pending;	/* Link descriptors queue for pending */
  	struct list_head chain_running;	/* Link descriptors queue for running */
  	bool idle;			/* channel statue machine */
6fc4573c4   Daniel Mack   dma: mmp_pdma: ad...
109
  	bool byte_align;
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
  
  	struct dma_pool *desc_pool;	/* Descriptors pool */
  };
  
  struct mmp_pdma_phy {
  	int idx;
  	void __iomem *base;
  	struct mmp_pdma_chan *vchan;
  };
  
  struct mmp_pdma_device {
  	int				dma_channels;
  	void __iomem			*base;
  	struct device			*dev;
  	struct dma_device		device;
  	struct mmp_pdma_phy		*phy;
027f28b7b   Xiang Wang   dma: mmp_pdma: ad...
126
  	spinlock_t phy_lock; /* protect alloc/free phy channels */
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
127
  };
2b7f65b11   Joe Perches   mmp_pdma: Style n...
128
129
130
131
132
133
134
135
  #define tx_to_mmp_pdma_desc(tx)					\
  	container_of(tx, struct mmp_pdma_desc_sw, async_tx)
  #define to_mmp_pdma_desc(lh)					\
  	container_of(lh, struct mmp_pdma_desc_sw, node)
  #define to_mmp_pdma_chan(dchan)					\
  	container_of(dchan, struct mmp_pdma_chan, chan)
  #define to_mmp_pdma_dev(dmadev)					\
  	container_of(dmadev, struct mmp_pdma_device, device)
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
136

56b94b02c   Vinod Koul   dmaengine: mmp_pd...
137
138
139
  static int mmp_pdma_config_write(struct dma_chan *dchan,
  			   struct dma_slave_config *cfg,
  			   enum dma_transfer_direction direction);
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
140
141
142
143
144
145
146
147
148
  static void set_desc(struct mmp_pdma_phy *phy, dma_addr_t addr)
  {
  	u32 reg = (phy->idx << 4) + DDADR;
  
  	writel(addr, phy->base + reg);
  }
  
  static void enable_chan(struct mmp_pdma_phy *phy)
  {
6fc4573c4   Daniel Mack   dma: mmp_pdma: ad...
149
  	u32 reg, dalgn;
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
150
151
152
  
  	if (!phy->vchan)
  		return;
8b298ded9   Daniel Mack   dma: mmp_pdma: fa...
153
  	reg = DRCMR(phy->vchan->drcmr);
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
154
  	writel(DRCMR_MAPVLD | phy->idx, phy->base + reg);
6fc4573c4   Daniel Mack   dma: mmp_pdma: ad...
155
156
157
158
159
160
  	dalgn = readl(phy->base + DALGN);
  	if (phy->vchan->byte_align)
  		dalgn |= 1 << phy->idx;
  	else
  		dalgn &= ~(1 << phy->idx);
  	writel(dalgn, phy->base + DALGN);
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
161
  	reg = (phy->idx << 2) + DCSR;
2b7f65b11   Joe Perches   mmp_pdma: Style n...
162
  	writel(readl(phy->base + reg) | DCSR_RUN, phy->base + reg);
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
163
164
165
166
167
  }
  
  static void disable_chan(struct mmp_pdma_phy *phy)
  {
  	u32 reg;
2b7f65b11   Joe Perches   mmp_pdma: Style n...
168
169
170
171
172
  	if (!phy)
  		return;
  
  	reg = (phy->idx << 2) + DCSR;
  	writel(readl(phy->base + reg) & ~DCSR_RUN, phy->base + reg);
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
173
174
175
176
177
178
179
  }
  
  static int clear_chan_irq(struct mmp_pdma_phy *phy)
  {
  	u32 dcsr;
  	u32 dint = readl(phy->base + DINT);
  	u32 reg = (phy->idx << 2) + DCSR;
2b7f65b11   Joe Perches   mmp_pdma: Style n...
180
181
182
183
184
185
186
187
188
189
190
  	if (!(dint & BIT(phy->idx)))
  		return -EAGAIN;
  
  	/* clear irq */
  	dcsr = readl(phy->base + reg);
  	writel(dcsr, phy->base + reg);
  	if ((dcsr & DCSR_BUSERR) && (phy->vchan))
  		dev_warn(phy->vchan->dev, "DCSR_BUSERR
  ");
  
  	return 0;
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
191
192
193
194
195
  }
  
  static irqreturn_t mmp_pdma_chan_handler(int irq, void *dev_id)
  {
  	struct mmp_pdma_phy *phy = dev_id;
2b7f65b11   Joe Perches   mmp_pdma: Style n...
196
  	if (clear_chan_irq(phy) != 0)
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
197
  		return IRQ_NONE;
2b7f65b11   Joe Perches   mmp_pdma: Style n...
198
199
200
  
  	tasklet_schedule(&phy->vchan->tasklet);
  	return IRQ_HANDLED;
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
201
202
203
204
205
206
207
208
209
210
211
212
  }
  
  static irqreturn_t mmp_pdma_int_handler(int irq, void *dev_id)
  {
  	struct mmp_pdma_device *pdev = dev_id;
  	struct mmp_pdma_phy *phy;
  	u32 dint = readl(pdev->base + DINT);
  	int i, ret;
  	int irq_num = 0;
  
  	while (dint) {
  		i = __ffs(dint);
3a314f143   Qiao Zhou   dmaenegine: mmp-p...
213
214
215
  		/* only handle interrupts belonging to pdma driver*/
  		if (i >= pdev->dma_channels)
  			break;
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
216
217
218
219
220
221
222
223
224
  		dint &= (dint - 1);
  		phy = &pdev->phy[i];
  		ret = mmp_pdma_chan_handler(irq, phy);
  		if (ret == IRQ_HANDLED)
  			irq_num++;
  	}
  
  	if (irq_num)
  		return IRQ_HANDLED;
2b7f65b11   Joe Perches   mmp_pdma: Style n...
225
226
  
  	return IRQ_NONE;
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
227
228
229
230
231
232
233
  }
  
  /* lookup free phy channel as descending priority */
  static struct mmp_pdma_phy *lookup_phy(struct mmp_pdma_chan *pchan)
  {
  	int prio, i;
  	struct mmp_pdma_device *pdev = to_mmp_pdma_dev(pchan->chan.device);
638a542cc   Daniel Mack   dma: mmp_pdma: re...
234
  	struct mmp_pdma_phy *phy, *found = NULL;
027f28b7b   Xiang Wang   dma: mmp_pdma: ad...
235
  	unsigned long flags;
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
236
237
238
239
240
241
242
243
  
  	/*
  	 * dma channel priorities
  	 * ch 0 - 3,  16 - 19  <--> (0)
  	 * ch 4 - 7,  20 - 23  <--> (1)
  	 * ch 8 - 11, 24 - 27  <--> (2)
  	 * ch 12 - 15, 28 - 31  <--> (3)
  	 */
027f28b7b   Xiang Wang   dma: mmp_pdma: ad...
244
245
  
  	spin_lock_irqsave(&pdev->phy_lock, flags);
2b7f65b11   Joe Perches   mmp_pdma: Style n...
246
  	for (prio = 0; prio <= ((pdev->dma_channels - 1) & 0xf) >> 2; prio++) {
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
247
  		for (i = 0; i < pdev->dma_channels; i++) {
2b7f65b11   Joe Perches   mmp_pdma: Style n...
248
  			if (prio != (i & 0xf) >> 2)
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
249
250
251
252
  				continue;
  			phy = &pdev->phy[i];
  			if (!phy->vchan) {
  				phy->vchan = pchan;
638a542cc   Daniel Mack   dma: mmp_pdma: re...
253
254
  				found = phy;
  				goto out_unlock;
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
255
256
257
  			}
  		}
  	}
638a542cc   Daniel Mack   dma: mmp_pdma: re...
258
  out_unlock:
027f28b7b   Xiang Wang   dma: mmp_pdma: ad...
259
  	spin_unlock_irqrestore(&pdev->phy_lock, flags);
638a542cc   Daniel Mack   dma: mmp_pdma: re...
260
  	return found;
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
261
  }
027f28b7b   Xiang Wang   dma: mmp_pdma: ad...
262
263
264
265
  static void mmp_pdma_free_phy(struct mmp_pdma_chan *pchan)
  {
  	struct mmp_pdma_device *pdev = to_mmp_pdma_dev(pchan->chan.device);
  	unsigned long flags;
26a2dfdeb   Xiang Wang   dma: mmp_pdma: cl...
266
  	u32 reg;
027f28b7b   Xiang Wang   dma: mmp_pdma: ad...
267
268
269
  
  	if (!pchan->phy)
  		return;
26a2dfdeb   Xiang Wang   dma: mmp_pdma: cl...
270
  	/* clear the channel mapping in DRCMR */
a2a7c176c   Laurent Pinchart   dma: mmp_pdma: Si...
271
  	reg = DRCMR(pchan->drcmr);
26a2dfdeb   Xiang Wang   dma: mmp_pdma: cl...
272
  	writel(0, pchan->phy->base + reg);
027f28b7b   Xiang Wang   dma: mmp_pdma: ad...
273
274
275
276
277
  	spin_lock_irqsave(&pdev->phy_lock, flags);
  	pchan->phy->vchan = NULL;
  	pchan->phy = NULL;
  	spin_unlock_irqrestore(&pdev->phy_lock, flags);
  }
6cfb8321c   Lee Jones   dmaengine: mmp_pd...
278
  /*
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
   * start_pending_queue - transfer any pending transactions
   * pending list ==> running list
   */
  static void start_pending_queue(struct mmp_pdma_chan *chan)
  {
  	struct mmp_pdma_desc_sw *desc;
  
  	/* still in running, irq will start the pending list */
  	if (!chan->idle) {
  		dev_dbg(chan->dev, "DMA controller still busy
  ");
  		return;
  	}
  
  	if (list_empty(&chan->chain_pending)) {
  		/* chance to re-fetch phy channel with higher prio */
027f28b7b   Xiang Wang   dma: mmp_pdma: ad...
295
  		mmp_pdma_free_phy(chan);
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
  		dev_dbg(chan->dev, "no pending list
  ");
  		return;
  	}
  
  	if (!chan->phy) {
  		chan->phy = lookup_phy(chan);
  		if (!chan->phy) {
  			dev_dbg(chan->dev, "no free dma channel
  ");
  			return;
  		}
  	}
  
  	/*
  	 * pending -> running
  	 * reintilize pending list
  	 */
  	desc = list_first_entry(&chan->chain_pending,
  				struct mmp_pdma_desc_sw, node);
  	list_splice_tail_init(&chan->chain_pending, &chan->chain_running);
  
  	/*
  	 * Program the descriptor's address into the DMA controller,
  	 * then start the DMA transaction
  	 */
  	set_desc(chan->phy, desc->async_tx.phys);
  	enable_chan(chan->phy);
  	chan->idle = false;
  }
  
  
  /* desc->tx_list ==> pending list */
  static dma_cookie_t mmp_pdma_tx_submit(struct dma_async_tx_descriptor *tx)
  {
  	struct mmp_pdma_chan *chan = to_mmp_pdma_chan(tx->chan);
  	struct mmp_pdma_desc_sw *desc = tx_to_mmp_pdma_desc(tx);
  	struct mmp_pdma_desc_sw *child;
  	unsigned long flags;
  	dma_cookie_t cookie = -EBUSY;
  
  	spin_lock_irqsave(&chan->desc_lock, flags);
  
  	list_for_each_entry(child, &desc->tx_list, node) {
  		cookie = dma_cookie_assign(&child->async_tx);
  	}
0cd615617   Daniel Mack   dma: mmp_pdma: do...
342
343
  	/* softly link to pending list - desc->tx_list ==> pending list */
  	list_splice_tail_init(&desc->tx_list, &chan->chain_pending);
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
344
345
346
347
348
  
  	spin_unlock_irqrestore(&chan->desc_lock, flags);
  
  	return cookie;
  }
69c9f0ae1   Jingoo Han   dma: mmp_pdma: St...
349
350
  static struct mmp_pdma_desc_sw *
  mmp_pdma_alloc_descriptor(struct mmp_pdma_chan *chan)
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
351
352
353
  {
  	struct mmp_pdma_desc_sw *desc;
  	dma_addr_t pdesc;
1c85a8440   Julia Lawall   dmaengine: mmp_pd...
354
  	desc = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &pdesc);
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
355
356
357
358
359
  	if (!desc) {
  		dev_err(chan->dev, "out of memory for link descriptor
  ");
  		return NULL;
  	}
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
360
361
362
363
364
365
366
367
  	INIT_LIST_HEAD(&desc->tx_list);
  	dma_async_tx_descriptor_init(&desc->async_tx, &chan->chan);
  	/* each desc has submit */
  	desc->async_tx.tx_submit = mmp_pdma_tx_submit;
  	desc->async_tx.phys = pdesc;
  
  	return desc;
  }
6cfb8321c   Lee Jones   dmaengine: mmp_pd...
368
  /*
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
369
370
371
372
373
374
375
376
377
378
379
380
381
   * mmp_pdma_alloc_chan_resources - Allocate resources for DMA channel.
   *
   * This function will create a dma pool for descriptor allocation.
   * Request irq only when channel is requested
   * Return - The number of allocated descriptors.
   */
  
  static int mmp_pdma_alloc_chan_resources(struct dma_chan *dchan)
  {
  	struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
  
  	if (chan->desc_pool)
  		return 1;
2b7f65b11   Joe Perches   mmp_pdma: Style n...
382
383
384
385
386
  	chan->desc_pool = dma_pool_create(dev_name(&dchan->dev->device),
  					  chan->dev,
  					  sizeof(struct mmp_pdma_desc_sw),
  					  __alignof__(struct mmp_pdma_desc_sw),
  					  0);
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
387
388
389
390
391
  	if (!chan->desc_pool) {
  		dev_err(chan->dev, "unable to allocate descriptor pool
  ");
  		return -ENOMEM;
  	}
2b7f65b11   Joe Perches   mmp_pdma: Style n...
392

027f28b7b   Xiang Wang   dma: mmp_pdma: ad...
393
  	mmp_pdma_free_phy(chan);
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
394
395
396
397
398
399
  	chan->idle = true;
  	chan->dev_addr = 0;
  	return 1;
  }
  
  static void mmp_pdma_free_desc_list(struct mmp_pdma_chan *chan,
2b7f65b11   Joe Perches   mmp_pdma: Style n...
400
  				    struct list_head *list)
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
  {
  	struct mmp_pdma_desc_sw *desc, *_desc;
  
  	list_for_each_entry_safe(desc, _desc, list, node) {
  		list_del(&desc->node);
  		dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
  	}
  }
  
  static void mmp_pdma_free_chan_resources(struct dma_chan *dchan)
  {
  	struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
  	unsigned long flags;
  
  	spin_lock_irqsave(&chan->desc_lock, flags);
  	mmp_pdma_free_desc_list(chan, &chan->chain_pending);
  	mmp_pdma_free_desc_list(chan, &chan->chain_running);
  	spin_unlock_irqrestore(&chan->desc_lock, flags);
  
  	dma_pool_destroy(chan->desc_pool);
  	chan->desc_pool = NULL;
  	chan->idle = true;
  	chan->dev_addr = 0;
027f28b7b   Xiang Wang   dma: mmp_pdma: ad...
424
  	mmp_pdma_free_phy(chan);
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
425
426
427
428
429
  	return;
  }
  
  static struct dma_async_tx_descriptor *
  mmp_pdma_prep_memcpy(struct dma_chan *dchan,
2b7f65b11   Joe Perches   mmp_pdma: Style n...
430
431
  		     dma_addr_t dma_dst, dma_addr_t dma_src,
  		     size_t len, unsigned long flags)
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
432
433
434
435
436
437
438
439
440
441
442
443
  {
  	struct mmp_pdma_chan *chan;
  	struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new;
  	size_t copy = 0;
  
  	if (!dchan)
  		return NULL;
  
  	if (!len)
  		return NULL;
  
  	chan = to_mmp_pdma_chan(dchan);
6fc4573c4   Daniel Mack   dma: mmp_pdma: ad...
444
  	chan->byte_align = false;
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
  
  	if (!chan->dir) {
  		chan->dir = DMA_MEM_TO_MEM;
  		chan->dcmd = DCMD_INCTRGADDR | DCMD_INCSRCADDR;
  		chan->dcmd |= DCMD_BURST32;
  	}
  
  	do {
  		/* Allocate the link descriptor from DMA pool */
  		new = mmp_pdma_alloc_descriptor(chan);
  		if (!new) {
  			dev_err(chan->dev, "no memory for desc
  ");
  			goto fail;
  		}
  
  		copy = min_t(size_t, len, PDMA_MAX_DESC_BYTES);
6fc4573c4   Daniel Mack   dma: mmp_pdma: ad...
462
463
  		if (dma_src & 0x7 || dma_dst & 0x7)
  			chan->byte_align = true;
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
  
  		new->desc.dcmd = chan->dcmd | (DCMD_LENGTH & copy);
  		new->desc.dsadr = dma_src;
  		new->desc.dtadr = dma_dst;
  
  		if (!first)
  			first = new;
  		else
  			prev->desc.ddadr = new->async_tx.phys;
  
  		new->async_tx.cookie = 0;
  		async_tx_ack(&new->async_tx);
  
  		prev = new;
  		len -= copy;
  
  		if (chan->dir == DMA_MEM_TO_DEV) {
  			dma_src += copy;
  		} else if (chan->dir == DMA_DEV_TO_MEM) {
  			dma_dst += copy;
  		} else if (chan->dir == DMA_MEM_TO_MEM) {
  			dma_src += copy;
  			dma_dst += copy;
  		}
  
  		/* Insert the link descriptor to the LD ring */
  		list_add_tail(&new->node, &first->tx_list);
  	} while (len);
  
  	first->async_tx.flags = flags; /* client is in control of this ack */
  	first->async_tx.cookie = -EBUSY;
  
  	/* last desc and fire IRQ */
  	new->desc.ddadr = DDADR_STOP;
  	new->desc.dcmd |= DCMD_ENDIRQEN;
50440d74a   Daniel Mack   dma: mmp_pdma: ad...
499
  	chan->cyclic_first = NULL;
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
500
501
502
503
504
505
506
507
508
509
  	return &first->async_tx;
  
  fail:
  	if (first)
  		mmp_pdma_free_desc_list(chan, &first->tx_list);
  	return NULL;
  }
  
  static struct dma_async_tx_descriptor *
  mmp_pdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
2b7f65b11   Joe Perches   mmp_pdma: Style n...
510
511
  		       unsigned int sg_len, enum dma_transfer_direction dir,
  		       unsigned long flags, void *context)
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
512
513
514
515
516
517
518
519
520
521
  {
  	struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
  	struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new = NULL;
  	size_t len, avail;
  	struct scatterlist *sg;
  	dma_addr_t addr;
  	int i;
  
  	if ((sgl == NULL) || (sg_len == 0))
  		return NULL;
6fc4573c4   Daniel Mack   dma: mmp_pdma: ad...
522
  	chan->byte_align = false;
56b94b02c   Vinod Koul   dmaengine: mmp_pd...
523
  	mmp_pdma_config_write(dchan, &chan->slave_config, dir);
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
524
525
526
527
528
529
  	for_each_sg(sgl, sg, sg_len, i) {
  		addr = sg_dma_address(sg);
  		avail = sg_dma_len(sgl);
  
  		do {
  			len = min_t(size_t, avail, PDMA_MAX_DESC_BYTES);
6fc4573c4   Daniel Mack   dma: mmp_pdma: ad...
530
531
  			if (addr & 0x7)
  				chan->byte_align = true;
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
  
  			/* allocate and populate the descriptor */
  			new = mmp_pdma_alloc_descriptor(chan);
  			if (!new) {
  				dev_err(chan->dev, "no memory for desc
  ");
  				goto fail;
  			}
  
  			new->desc.dcmd = chan->dcmd | (DCMD_LENGTH & len);
  			if (dir == DMA_MEM_TO_DEV) {
  				new->desc.dsadr = addr;
  				new->desc.dtadr = chan->dev_addr;
  			} else {
  				new->desc.dsadr = chan->dev_addr;
  				new->desc.dtadr = addr;
  			}
  
  			if (!first)
  				first = new;
  			else
  				prev->desc.ddadr = new->async_tx.phys;
  
  			new->async_tx.cookie = 0;
  			async_tx_ack(&new->async_tx);
  			prev = new;
  
  			/* Insert the link descriptor to the LD ring */
  			list_add_tail(&new->node, &first->tx_list);
  
  			/* update metadata */
  			addr += len;
  			avail -= len;
  		} while (avail);
  	}
  
  	first->async_tx.cookie = -EBUSY;
  	first->async_tx.flags = flags;
  
  	/* last desc and fire IRQ */
  	new->desc.ddadr = DDADR_STOP;
  	new->desc.dcmd |= DCMD_ENDIRQEN;
50440d74a   Daniel Mack   dma: mmp_pdma: ad...
574
575
576
577
578
579
580
581
582
583
  	chan->dir = dir;
  	chan->cyclic_first = NULL;
  
  	return &first->async_tx;
  
  fail:
  	if (first)
  		mmp_pdma_free_desc_list(chan, &first->tx_list);
  	return NULL;
  }
2b7f65b11   Joe Perches   mmp_pdma: Style n...
584
585
586
587
  static struct dma_async_tx_descriptor *
  mmp_pdma_prep_dma_cyclic(struct dma_chan *dchan,
  			 dma_addr_t buf_addr, size_t len, size_t period_len,
  			 enum dma_transfer_direction direction,
31c1e5a13   Laurent Pinchart   dmaengine: Remove...
588
  			 unsigned long flags)
50440d74a   Daniel Mack   dma: mmp_pdma: ad...
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
  {
  	struct mmp_pdma_chan *chan;
  	struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new;
  	dma_addr_t dma_src, dma_dst;
  
  	if (!dchan || !len || !period_len)
  		return NULL;
  
  	/* the buffer length must be a multiple of period_len */
  	if (len % period_len != 0)
  		return NULL;
  
  	if (period_len > PDMA_MAX_DESC_BYTES)
  		return NULL;
  
  	chan = to_mmp_pdma_chan(dchan);
56b94b02c   Vinod Koul   dmaengine: mmp_pd...
605
  	mmp_pdma_config_write(dchan, &chan->slave_config, direction);
50440d74a   Daniel Mack   dma: mmp_pdma: ad...
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
  
  	switch (direction) {
  	case DMA_MEM_TO_DEV:
  		dma_src = buf_addr;
  		dma_dst = chan->dev_addr;
  		break;
  	case DMA_DEV_TO_MEM:
  		dma_dst = buf_addr;
  		dma_src = chan->dev_addr;
  		break;
  	default:
  		dev_err(chan->dev, "Unsupported direction for cyclic DMA
  ");
  		return NULL;
  	}
  
  	chan->dir = direction;
  
  	do {
  		/* Allocate the link descriptor from DMA pool */
  		new = mmp_pdma_alloc_descriptor(chan);
  		if (!new) {
  			dev_err(chan->dev, "no memory for desc
  ");
  			goto fail;
  		}
2b7f65b11   Joe Perches   mmp_pdma: Style n...
632
633
  		new->desc.dcmd = (chan->dcmd | DCMD_ENDIRQEN |
  				  (DCMD_LENGTH & period_len));
50440d74a   Daniel Mack   dma: mmp_pdma: ad...
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
  		new->desc.dsadr = dma_src;
  		new->desc.dtadr = dma_dst;
  
  		if (!first)
  			first = new;
  		else
  			prev->desc.ddadr = new->async_tx.phys;
  
  		new->async_tx.cookie = 0;
  		async_tx_ack(&new->async_tx);
  
  		prev = new;
  		len -= period_len;
  
  		if (chan->dir == DMA_MEM_TO_DEV)
  			dma_src += period_len;
  		else
  			dma_dst += period_len;
  
  		/* Insert the link descriptor to the LD ring */
  		list_add_tail(&new->node, &first->tx_list);
  	} while (len);
  
  	first->async_tx.flags = flags; /* client is in control of this ack */
  	first->async_tx.cookie = -EBUSY;
  
  	/* make the cyclic link */
  	new->desc.ddadr = first->async_tx.phys;
  	chan->cyclic_first = first;
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
663
664
665
666
667
668
669
  	return &first->async_tx;
  
  fail:
  	if (first)
  		mmp_pdma_free_desc_list(chan, &first->tx_list);
  	return NULL;
  }
56b94b02c   Vinod Koul   dmaengine: mmp_pd...
670
671
672
  static int mmp_pdma_config_write(struct dma_chan *dchan,
  			   struct dma_slave_config *cfg,
  			   enum dma_transfer_direction direction)
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
673
674
  {
  	struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
675
676
677
678
679
  	u32 maxburst = 0, addr = 0;
  	enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
  
  	if (!dchan)
  		return -EINVAL;
56b94b02c   Vinod Koul   dmaengine: mmp_pd...
680
  	if (direction == DMA_DEV_TO_MEM) {
a0abd6719   Maxime Ripard   dmaengine: mmp-pd...
681
682
683
684
  		chan->dcmd = DCMD_INCTRGADDR | DCMD_FLOWSRC;
  		maxburst = cfg->src_maxburst;
  		width = cfg->src_addr_width;
  		addr = cfg->src_addr;
56b94b02c   Vinod Koul   dmaengine: mmp_pd...
685
  	} else if (direction == DMA_MEM_TO_DEV) {
a0abd6719   Maxime Ripard   dmaengine: mmp-pd...
686
687
688
689
  		chan->dcmd = DCMD_INCSRCADDR | DCMD_FLOWTRG;
  		maxburst = cfg->dst_maxburst;
  		width = cfg->dst_addr_width;
  		addr = cfg->dst_addr;
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
690
  	}
a0abd6719   Maxime Ripard   dmaengine: mmp-pd...
691
692
693
694
695
696
697
698
699
700
701
702
703
  	if (width == DMA_SLAVE_BUSWIDTH_1_BYTE)
  		chan->dcmd |= DCMD_WIDTH1;
  	else if (width == DMA_SLAVE_BUSWIDTH_2_BYTES)
  		chan->dcmd |= DCMD_WIDTH2;
  	else if (width == DMA_SLAVE_BUSWIDTH_4_BYTES)
  		chan->dcmd |= DCMD_WIDTH4;
  
  	if (maxburst == 8)
  		chan->dcmd |= DCMD_BURST8;
  	else if (maxburst == 16)
  		chan->dcmd |= DCMD_BURST16;
  	else if (maxburst == 32)
  		chan->dcmd |= DCMD_BURST32;
56b94b02c   Vinod Koul   dmaengine: mmp_pd...
704
  	chan->dir = direction;
a0abd6719   Maxime Ripard   dmaengine: mmp-pd...
705
706
707
708
709
710
711
712
713
714
  	chan->dev_addr = addr;
  	/* FIXME: drivers should be ported over to use the filter
  	 * function. Once that's done, the following two lines can
  	 * be removed.
  	 */
  	if (cfg->slave_id)
  		chan->drcmr = cfg->slave_id;
  
  	return 0;
  }
56b94b02c   Vinod Koul   dmaengine: mmp_pd...
715
716
717
718
719
720
721
722
  static int mmp_pdma_config(struct dma_chan *dchan,
  			   struct dma_slave_config *cfg)
  {
  	struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
  
  	memcpy(&chan->slave_config, cfg, sizeof(*cfg));
  	return 0;
  }
a0abd6719   Maxime Ripard   dmaengine: mmp-pd...
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
  static int mmp_pdma_terminate_all(struct dma_chan *dchan)
  {
  	struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
  	unsigned long flags;
  
  	if (!dchan)
  		return -EINVAL;
  
  	disable_chan(chan->phy);
  	mmp_pdma_free_phy(chan);
  	spin_lock_irqsave(&chan->desc_lock, flags);
  	mmp_pdma_free_desc_list(chan, &chan->chain_pending);
  	mmp_pdma_free_desc_list(chan, &chan->chain_running);
  	spin_unlock_irqrestore(&chan->desc_lock, flags);
  	chan->idle = true;
2b7f65b11   Joe Perches   mmp_pdma: Style n...
738
  	return 0;
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
739
  }
1b38da264   Daniel Mack   dma: mmp_pdma: ad...
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
  static unsigned int mmp_pdma_residue(struct mmp_pdma_chan *chan,
  				     dma_cookie_t cookie)
  {
  	struct mmp_pdma_desc_sw *sw;
  	u32 curr, residue = 0;
  	bool passed = false;
  	bool cyclic = chan->cyclic_first != NULL;
  
  	/*
  	 * If the channel does not have a phy pointer anymore, it has already
  	 * been completed. Therefore, its residue is 0.
  	 */
  	if (!chan->phy)
  		return 0;
  
  	if (chan->dir == DMA_DEV_TO_MEM)
  		curr = readl(chan->phy->base + DTADR(chan->phy->idx));
  	else
  		curr = readl(chan->phy->base + DSADR(chan->phy->idx));
  
  	list_for_each_entry(sw, &chan->chain_running, node) {
  		u32 start, end, len;
  
  		if (chan->dir == DMA_DEV_TO_MEM)
  			start = sw->desc.dtadr;
  		else
  			start = sw->desc.dsadr;
  
  		len = sw->desc.dcmd & DCMD_LENGTH;
  		end = start + len;
  
  		/*
  		 * 'passed' will be latched once we found the descriptor which
  		 * lies inside the boundaries of the curr pointer. All
  		 * descriptors that occur in the list _after_ we found that
  		 * partially handled descriptor are still to be processed and
  		 * are hence added to the residual bytes counter.
  		 */
  
  		if (passed) {
  			residue += len;
  		} else if (curr >= start && curr <= end) {
  			residue += end - curr;
  			passed = true;
  		}
  
  		/*
  		 * Descriptors that have the ENDIRQEN bit set mark the end of a
  		 * transaction chain, and the cookie assigned with it has been
  		 * returned previously from mmp_pdma_tx_submit().
  		 *
  		 * In case we have multiple transactions in the running chain,
  		 * and the cookie does not match the one the user asked us
  		 * about, reset the state variables and start over.
  		 *
  		 * This logic does not apply to cyclic transactions, where all
  		 * descriptors have the ENDIRQEN bit set, and for which we
  		 * can't have multiple transactions on one channel anyway.
  		 */
  		if (cyclic || !(sw->desc.dcmd & DCMD_ENDIRQEN))
  			continue;
  
  		if (sw->async_tx.cookie == cookie) {
  			return residue;
  		} else {
  			residue = 0;
  			passed = false;
  		}
  	}
  
  	/* We should only get here in case of cyclic transactions */
  	return residue;
  }
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
813
  static enum dma_status mmp_pdma_tx_status(struct dma_chan *dchan,
2b7f65b11   Joe Perches   mmp_pdma: Style n...
814
815
  					  dma_cookie_t cookie,
  					  struct dma_tx_state *txstate)
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
816
  {
1b38da264   Daniel Mack   dma: mmp_pdma: ad...
817
818
819
820
821
822
823
824
  	struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
  	enum dma_status ret;
  
  	ret = dma_cookie_status(dchan, cookie, txstate);
  	if (likely(ret != DMA_ERROR))
  		dma_set_residue(txstate, mmp_pdma_residue(chan, cookie));
  
  	return ret;
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
825
  }
6cfb8321c   Lee Jones   dmaengine: mmp_pd...
826
  /*
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
   * mmp_pdma_issue_pending - Issue the DMA start command
   * pending list ==> running list
   */
  static void mmp_pdma_issue_pending(struct dma_chan *dchan)
  {
  	struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
  	unsigned long flags;
  
  	spin_lock_irqsave(&chan->desc_lock, flags);
  	start_pending_queue(chan);
  	spin_unlock_irqrestore(&chan->desc_lock, flags);
  }
  
  /*
   * dma_do_tasklet
   * Do call back
   * Start pending list
   */
77a4f4f74   Allen Pais   dmaengine: mmp: c...
845
  static void dma_do_tasklet(struct tasklet_struct *t)
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
846
  {
77a4f4f74   Allen Pais   dmaengine: mmp: c...
847
  	struct mmp_pdma_chan *chan = from_tasklet(chan, t, tasklet);
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
848
849
850
  	struct mmp_pdma_desc_sw *desc, *_desc;
  	LIST_HEAD(chain_cleanup);
  	unsigned long flags;
9c1e511cc   Dave Jiang   dmaengine: mmp_pd...
851
  	struct dmaengine_desc_callback cb;
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
852

50440d74a   Daniel Mack   dma: mmp_pdma: ad...
853
  	if (chan->cyclic_first) {
50440d74a   Daniel Mack   dma: mmp_pdma: ad...
854
855
  		spin_lock_irqsave(&chan->desc_lock, flags);
  		desc = chan->cyclic_first;
9c1e511cc   Dave Jiang   dmaengine: mmp_pd...
856
  		dmaengine_desc_get_callback(&desc->async_tx, &cb);
50440d74a   Daniel Mack   dma: mmp_pdma: ad...
857
  		spin_unlock_irqrestore(&chan->desc_lock, flags);
9c1e511cc   Dave Jiang   dmaengine: mmp_pd...
858
  		dmaengine_desc_callback_invoke(&cb, NULL);
50440d74a   Daniel Mack   dma: mmp_pdma: ad...
859
860
861
862
863
  
  		return;
  	}
  
  	/* submit pending list; callback for each desc; free desc */
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
864
  	spin_lock_irqsave(&chan->desc_lock, flags);
b721f9e80   Daniel Mack   dma: mmp_pdma: on...
865
866
867
868
869
  	list_for_each_entry_safe(desc, _desc, &chan->chain_running, node) {
  		/*
  		 * move the descriptors to a temporary list so we can drop
  		 * the lock during the entire cleanup operation
  		 */
f358c289e   Wei Yongjun   dma: mmp_pdma: us...
870
  		list_move(&desc->node, &chain_cleanup);
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
871

b721f9e80   Daniel Mack   dma: mmp_pdma: on...
872
873
874
875
876
877
878
879
880
881
882
883
  		/*
  		 * Look for the first list entry which has the ENDIRQEN flag
  		 * set. That is the descriptor we got an interrupt for, so
  		 * complete that transaction and its cookie.
  		 */
  		if (desc->desc.dcmd & DCMD_ENDIRQEN) {
  			dma_cookie_t cookie = desc->async_tx.cookie;
  			dma_cookie_complete(&desc->async_tx);
  			dev_dbg(chan->dev, "completed_cookie=%d
  ", cookie);
  			break;
  		}
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
884
885
886
  	}
  
  	/*
b721f9e80   Daniel Mack   dma: mmp_pdma: on...
887
888
  	 * The hardware is idle and ready for more when the
  	 * chain_running list is empty.
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
889
  	 */
b721f9e80   Daniel Mack   dma: mmp_pdma: on...
890
  	chan->idle = list_empty(&chan->chain_running);
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
891
892
893
894
895
896
897
898
899
900
901
902
  
  	/* Start any pending transactions automatically */
  	start_pending_queue(chan);
  	spin_unlock_irqrestore(&chan->desc_lock, flags);
  
  	/* Run the callback for each descriptor, in order */
  	list_for_each_entry_safe(desc, _desc, &chain_cleanup, node) {
  		struct dma_async_tx_descriptor *txd = &desc->async_tx;
  
  		/* Remove from the list of transactions */
  		list_del(&desc->node);
  		/* Run the link descriptor callback function */
9c1e511cc   Dave Jiang   dmaengine: mmp_pd...
903
904
  		dmaengine_desc_get_callback(txd, &cb);
  		dmaengine_desc_callback_invoke(&cb, NULL);
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
905
906
907
908
  
  		dma_pool_free(chan->desc_pool, desc, txd->phys);
  	}
  }
4bf27b8b3   Greg Kroah-Hartman   Drivers: dma: rem...
909
  static int mmp_pdma_remove(struct platform_device *op)
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
910
911
  {
  	struct mmp_pdma_device *pdev = platform_get_drvdata(op);
a46018929   Vinod Koul   dmaengine: mmp_pd...
912
913
  	struct mmp_pdma_phy *phy;
  	int i, irq = 0, irq_num = 0;
39716c560   Chuhong Yuan   dmaengine: mmp_pd...
914
915
  	if (op->dev.of_node)
  		of_dma_controller_free(op->dev.of_node);
a46018929   Vinod Koul   dmaengine: mmp_pd...
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
  
  	for (i = 0; i < pdev->dma_channels; i++) {
  		if (platform_get_irq(op, i) > 0)
  			irq_num++;
  	}
  
  	if (irq_num != pdev->dma_channels) {
  		irq = platform_get_irq(op, 0);
  		devm_free_irq(&op->dev, irq, pdev);
  	} else {
  		for (i = 0; i < pdev->dma_channels; i++) {
  			phy = &pdev->phy[i];
  			irq = platform_get_irq(op, i);
  			devm_free_irq(&op->dev, irq, phy);
  		}
  	}
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
932
933
934
935
  
  	dma_async_device_unregister(&pdev->device);
  	return 0;
  }
2b7f65b11   Joe Perches   mmp_pdma: Style n...
936
  static int mmp_pdma_chan_init(struct mmp_pdma_device *pdev, int idx, int irq)
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
937
938
939
940
  {
  	struct mmp_pdma_phy *phy  = &pdev->phy[idx];
  	struct mmp_pdma_chan *chan;
  	int ret;
593d9c2e1   Laurent Pinchart   dma: mmp_pdma: Fi...
941
  	chan = devm_kzalloc(pdev->dev, sizeof(*chan), GFP_KERNEL);
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
942
943
944
945
946
947
948
  	if (chan == NULL)
  		return -ENOMEM;
  
  	phy->idx = idx;
  	phy->base = pdev->base;
  
  	if (irq) {
f0b507774   Chao Xie   dma: mmp_pdma: ad...
949
950
  		ret = devm_request_irq(pdev->dev, irq, mmp_pdma_chan_handler,
  				       IRQF_SHARED, "pdma", phy);
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
951
952
953
954
955
956
957
958
959
960
  		if (ret) {
  			dev_err(pdev->dev, "channel request irq fail!
  ");
  			return ret;
  		}
  	}
  
  	spin_lock_init(&chan->desc_lock);
  	chan->dev = pdev->dev;
  	chan->chan.device = &pdev->device;
77a4f4f74   Allen Pais   dmaengine: mmp: c...
961
  	tasklet_setup(&chan->tasklet, dma_do_tasklet);
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
962
963
964
965
  	INIT_LIST_HEAD(&chan->chain_pending);
  	INIT_LIST_HEAD(&chan->chain_running);
  
  	/* register virt channel to dma engine */
2b7f65b11   Joe Perches   mmp_pdma: Style n...
966
  	list_add_tail(&chan->chan.device_node, &pdev->device.channels);
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
967
968
969
  
  	return 0;
  }
57c034223   Fabian Frederick   dmaengine: consti...
970
  static const struct of_device_id mmp_pdma_dt_ids[] = {
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
971
972
973
974
  	{ .compatible = "marvell,pdma-1.0", },
  	{}
  };
  MODULE_DEVICE_TABLE(of, mmp_pdma_dt_ids);
a9a7cf08b   Daniel Mack   dma: mmp_pdma: ma...
975
976
977
978
  static struct dma_chan *mmp_pdma_dma_xlate(struct of_phandle_args *dma_spec,
  					   struct of_dma *ofdma)
  {
  	struct mmp_pdma_device *d = ofdma->of_dma_data;
8010dad55   Stephen Warren   dma: add dma_get_...
979
  	struct dma_chan *chan;
a9a7cf08b   Daniel Mack   dma: mmp_pdma: ma...
980

8010dad55   Stephen Warren   dma: add dma_get_...
981
982
  	chan = dma_get_any_slave_channel(&d->device);
  	if (!chan)
a9a7cf08b   Daniel Mack   dma: mmp_pdma: ma...
983
  		return NULL;
2b7f65b11   Joe Perches   mmp_pdma: Style n...
984
985
986
  	to_mmp_pdma_chan(chan)->drcmr = dma_spec->args[0];
  
  	return chan;
a9a7cf08b   Daniel Mack   dma: mmp_pdma: ma...
987
  }
463a1f8b3   Bill Pemberton   dma: remove use o...
988
  static int mmp_pdma_probe(struct platform_device *op)
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
989
990
991
992
993
994
995
  {
  	struct mmp_pdma_device *pdev;
  	const struct of_device_id *of_id;
  	struct mmp_dma_platdata *pdata = dev_get_platdata(&op->dev);
  	struct resource *iores;
  	int i, ret, irq = 0;
  	int dma_channels = 0, irq_num = 0;
ecb9b4241   Robert Jarzmik   dmaengine: mmp_pd...
996
997
998
  	const enum dma_slave_buswidth widths =
  		DMA_SLAVE_BUSWIDTH_1_BYTE   | DMA_SLAVE_BUSWIDTH_2_BYTES |
  		DMA_SLAVE_BUSWIDTH_4_BYTES;
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
999
1000
1001
1002
  
  	pdev = devm_kzalloc(&op->dev, sizeof(*pdev), GFP_KERNEL);
  	if (!pdev)
  		return -ENOMEM;
2b7f65b11   Joe Perches   mmp_pdma: Style n...
1003

c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
1004
  	pdev->dev = &op->dev;
027f28b7b   Xiang Wang   dma: mmp_pdma: ad...
1005
  	spin_lock_init(&pdev->phy_lock);
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
1006
  	iores = platform_get_resource(op, IORESOURCE_MEM, 0);
7331205a9   Thierry Reding   dma: Convert to d...
1007
1008
1009
  	pdev->base = devm_ioremap_resource(pdev->dev, iores);
  	if (IS_ERR(pdev->base))
  		return PTR_ERR(pdev->base);
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
1010
1011
1012
  
  	of_id = of_match_device(mmp_pdma_dt_ids, pdev->dev);
  	if (of_id)
2b7f65b11   Joe Perches   mmp_pdma: Style n...
1013
1014
  		of_property_read_u32(pdev->dev->of_node, "#dma-channels",
  				     &dma_channels);
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
1015
1016
1017
1018
1019
1020
1021
  	else if (pdata && pdata->dma_channels)
  		dma_channels = pdata->dma_channels;
  	else
  		dma_channels = 32;	/* default 32 channel */
  	pdev->dma_channels = dma_channels;
  
  	for (i = 0; i < dma_channels; i++) {
5bc382ec2   Lubomir Rintel   dmaengine: mmp_pd...
1022
  		if (platform_get_irq_optional(op, i) > 0)
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
1023
1024
  			irq_num++;
  	}
593d9c2e1   Laurent Pinchart   dma: mmp_pdma: Fi...
1025
  	pdev->phy = devm_kcalloc(pdev->dev, dma_channels, sizeof(*pdev->phy),
2b7f65b11   Joe Perches   mmp_pdma: Style n...
1026
  				 GFP_KERNEL);
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
1027
1028
1029
1030
1031
1032
1033
1034
  	if (pdev->phy == NULL)
  		return -ENOMEM;
  
  	INIT_LIST_HEAD(&pdev->device.channels);
  
  	if (irq_num != dma_channels) {
  		/* all chan share one irq, demux inside */
  		irq = platform_get_irq(op, 0);
f0b507774   Chao Xie   dma: mmp_pdma: ad...
1035
1036
  		ret = devm_request_irq(pdev->dev, irq, mmp_pdma_int_handler,
  				       IRQF_SHARED, "pdma", pdev);
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
  		if (ret)
  			return ret;
  	}
  
  	for (i = 0; i < dma_channels; i++) {
  		irq = (irq_num != dma_channels) ? 0 : platform_get_irq(op, i);
  		ret = mmp_pdma_chan_init(pdev, i, irq);
  		if (ret)
  			return ret;
  	}
  
  	dma_cap_set(DMA_SLAVE, pdev->device.cap_mask);
  	dma_cap_set(DMA_MEMCPY, pdev->device.cap_mask);
50440d74a   Daniel Mack   dma: mmp_pdma: ad...
1050
  	dma_cap_set(DMA_CYCLIC, pdev->device.cap_mask);
023bf55f1   Daniel Mack   dma: mmp_pdma: se...
1051
  	dma_cap_set(DMA_PRIVATE, pdev->device.cap_mask);
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
1052
1053
1054
1055
1056
1057
  	pdev->device.dev = &op->dev;
  	pdev->device.device_alloc_chan_resources = mmp_pdma_alloc_chan_resources;
  	pdev->device.device_free_chan_resources = mmp_pdma_free_chan_resources;
  	pdev->device.device_tx_status = mmp_pdma_tx_status;
  	pdev->device.device_prep_dma_memcpy = mmp_pdma_prep_memcpy;
  	pdev->device.device_prep_slave_sg = mmp_pdma_prep_slave_sg;
50440d74a   Daniel Mack   dma: mmp_pdma: ad...
1058
  	pdev->device.device_prep_dma_cyclic = mmp_pdma_prep_dma_cyclic;
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
1059
  	pdev->device.device_issue_pending = mmp_pdma_issue_pending;
a0abd6719   Maxime Ripard   dmaengine: mmp-pd...
1060
1061
  	pdev->device.device_config = mmp_pdma_config;
  	pdev->device.device_terminate_all = mmp_pdma_terminate_all;
77a68e56a   Maxime Ripard   dmaengine: Add an...
1062
  	pdev->device.copy_align = DMAENGINE_ALIGN_8_BYTES;
ecb9b4241   Robert Jarzmik   dmaengine: mmp_pd...
1063
1064
1065
1066
  	pdev->device.src_addr_widths = widths;
  	pdev->device.dst_addr_widths = widths;
  	pdev->device.directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);
  	pdev->device.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
  
  	if (pdev->dev->coherent_dma_mask)
  		dma_set_mask(pdev->dev, pdev->dev->coherent_dma_mask);
  	else
  		dma_set_mask(pdev->dev, DMA_BIT_MASK(64));
  
  	ret = dma_async_device_register(&pdev->device);
  	if (ret) {
  		dev_err(pdev->device.dev, "unable to register
  ");
  		return ret;
  	}
a9a7cf08b   Daniel Mack   dma: mmp_pdma: ma...
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
  	if (op->dev.of_node) {
  		/* Device-tree DMA controller registration */
  		ret = of_dma_controller_register(op->dev.of_node,
  						 mmp_pdma_dma_xlate, pdev);
  		if (ret < 0) {
  			dev_err(&op->dev, "of_dma_controller_register failed
  ");
  			return ret;
  		}
  	}
086b0af19   Wei Yongjun   dma: mmp_pdma: ad...
1089
  	platform_set_drvdata(op, pdev);
419d1f126   Daniel Mack   dma: mmp_pdma: pr...
1090
1091
  	dev_info(pdev->device.dev, "initialized %d channels
  ", dma_channels);
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
  	return 0;
  }
  
  static const struct platform_device_id mmp_pdma_id_table[] = {
  	{ "mmp-pdma", },
  	{ },
  };
  
  static struct platform_driver mmp_pdma_driver = {
  	.driver		= {
  		.name	= "mmp-pdma",
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
1103
1104
1105
1106
  		.of_match_table = mmp_pdma_dt_ids,
  	},
  	.id_table	= mmp_pdma_id_table,
  	.probe		= mmp_pdma_probe,
a7d6e3ec2   Bill Pemberton   dma: remove use o...
1107
  	.remove		= mmp_pdma_remove,
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
1108
  };
13b3006b8   Daniel Mack   dma: mmp_pdma: ad...
1109
1110
1111
1112
1113
1114
  bool mmp_pdma_filter_fn(struct dma_chan *chan, void *param)
  {
  	struct mmp_pdma_chan *c = to_mmp_pdma_chan(chan);
  
  	if (chan->device->dev->driver != &mmp_pdma_driver.driver)
  		return false;
2b7f65b11   Joe Perches   mmp_pdma: Style n...
1115
  	c->drcmr = *(unsigned int *)param;
13b3006b8   Daniel Mack   dma: mmp_pdma: ad...
1116
1117
1118
1119
  
  	return true;
  }
  EXPORT_SYMBOL_GPL(mmp_pdma_filter_fn);
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
1120
  module_platform_driver(mmp_pdma_driver);
2b7f65b11   Joe Perches   mmp_pdma: Style n...
1121
  MODULE_DESCRIPTION("MARVELL MMP Peripheral DMA Driver");
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
1122
1123
  MODULE_AUTHOR("Marvell International Ltd.");
  MODULE_LICENSE("GPL v2");