Blame view

drivers/dma/mmp_pdma.c 26.3 KB
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
1
2
3
4
5
6
7
  /*
   * Copyright 2012 Marvell International Ltd.
   *
   * This program is free software; you can redistribute it and/or modify
   * it under the terms of the GNU General Public License version 2 as
   * published by the Free Software Foundation.
   */
2b7f65b11   Joe Perches   mmp_pdma: Style n...
8

7331205a9   Thierry Reding   dma: Convert to d...
9
  #include <linux/err.h>
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
10
11
12
13
14
15
16
17
18
19
20
21
  #include <linux/module.h>
  #include <linux/init.h>
  #include <linux/types.h>
  #include <linux/interrupt.h>
  #include <linux/dma-mapping.h>
  #include <linux/slab.h>
  #include <linux/dmaengine.h>
  #include <linux/platform_device.h>
  #include <linux/device.h>
  #include <linux/platform_data/mmp_dma.h>
  #include <linux/dmapool.h>
  #include <linux/of_device.h>
a9a7cf08b   Daniel Mack   dma: mmp_pdma: ma...
22
  #include <linux/of_dma.h>
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
23
  #include <linux/of.h>
13b3006b8   Daniel Mack   dma: mmp_pdma: ad...
24
  #include <linux/dma/mmp-pdma.h>
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
25
26
27
28
29
30
31
32
33
34
  
  #include "dmaengine.h"
  
  #define DCSR		0x0000
  #define DALGN		0x00a0
  #define DINT		0x00f0
  #define DDADR		0x0200
  #define DSADR		0x0204
  #define DTADR		0x0208
  #define DCMD		0x020c
2b7f65b11   Joe Perches   mmp_pdma: Style n...
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
  #define DCSR_RUN	BIT(31)	/* Run Bit (read / write) */
  #define DCSR_NODESC	BIT(30)	/* No-Descriptor Fetch (read / write) */
  #define DCSR_STOPIRQEN	BIT(29)	/* Stop Interrupt Enable (read / write) */
  #define DCSR_REQPEND	BIT(8)	/* Request Pending (read-only) */
  #define DCSR_STOPSTATE	BIT(3)	/* Stop State (read-only) */
  #define DCSR_ENDINTR	BIT(2)	/* End Interrupt (read / write) */
  #define DCSR_STARTINTR	BIT(1)	/* Start Interrupt (read / write) */
  #define DCSR_BUSERR	BIT(0)	/* Bus Error Interrupt (read / write) */
  
  #define DCSR_EORIRQEN	BIT(28)	/* End of Receive Interrupt Enable (R/W) */
  #define DCSR_EORJMPEN	BIT(27)	/* Jump to next descriptor on EOR */
  #define DCSR_EORSTOPEN	BIT(26)	/* STOP on an EOR */
  #define DCSR_SETCMPST	BIT(25)	/* Set Descriptor Compare Status */
  #define DCSR_CLRCMPST	BIT(24)	/* Clear Descriptor Compare Status */
  #define DCSR_CMPST	BIT(10)	/* The Descriptor Compare Status */
  #define DCSR_EORINTR	BIT(9)	/* The end of Receive */
  
  #define DRCMR(n)	((((n) < 64) ? 0x0100 : 0x1100) + (((n) & 0x3f) << 2))
  #define DRCMR_MAPVLD	BIT(7)	/* Map Valid (read / write) */
  #define DRCMR_CHLNUM	0x1f	/* mask for Channel Number (read / write) */
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
55
56
  
  #define DDADR_DESCADDR	0xfffffff0	/* Address of next descriptor (mask) */
2b7f65b11   Joe Perches   mmp_pdma: Style n...
57
58
59
60
61
62
63
64
65
  #define DDADR_STOP	BIT(0)	/* Stop (read / write) */
  
  #define DCMD_INCSRCADDR	BIT(31)	/* Source Address Increment Setting. */
  #define DCMD_INCTRGADDR	BIT(30)	/* Target Address Increment Setting. */
  #define DCMD_FLOWSRC	BIT(29)	/* Flow Control by the source. */
  #define DCMD_FLOWTRG	BIT(28)	/* Flow Control by the target. */
  #define DCMD_STARTIRQEN	BIT(22)	/* Start Interrupt Enable */
  #define DCMD_ENDIRQEN	BIT(21)	/* End Interrupt Enable */
  #define DCMD_ENDIAN	BIT(18)	/* Device Endian-ness. */
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
66
67
68
69
70
71
72
73
74
  #define DCMD_BURST8	(1 << 16)	/* 8 byte burst */
  #define DCMD_BURST16	(2 << 16)	/* 16 byte burst */
  #define DCMD_BURST32	(3 << 16)	/* 32 byte burst */
  #define DCMD_WIDTH1	(1 << 14)	/* 1 byte width */
  #define DCMD_WIDTH2	(2 << 14)	/* 2 byte width (HalfWord) */
  #define DCMD_WIDTH4	(3 << 14)	/* 4 byte width (Word) */
  #define DCMD_LENGTH	0x01fff		/* length mask (max = 8K - 1) */
  
  #define PDMA_ALIGNMENT		3
1ac0e845c   Daniel Mack   dma: mmp_pdma: fi...
75
  #define PDMA_MAX_DESC_BYTES	DCMD_LENGTH
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
  
  struct mmp_pdma_desc_hw {
  	u32 ddadr;	/* Points to the next descriptor + flags */
  	u32 dsadr;	/* DSADR value for the current transfer */
  	u32 dtadr;	/* DTADR value for the current transfer */
  	u32 dcmd;	/* DCMD value for the current transfer */
  } __aligned(32);
  
  struct mmp_pdma_desc_sw {
  	struct mmp_pdma_desc_hw desc;
  	struct list_head node;
  	struct list_head tx_list;
  	struct dma_async_tx_descriptor async_tx;
  };
  
  struct mmp_pdma_phy;
  
  struct mmp_pdma_chan {
  	struct device *dev;
  	struct dma_chan chan;
  	struct dma_async_tx_descriptor desc;
  	struct mmp_pdma_phy *phy;
  	enum dma_transfer_direction dir;
50440d74a   Daniel Mack   dma: mmp_pdma: ad...
99
100
  	struct mmp_pdma_desc_sw *cyclic_first;	/* first desc_sw if channel
  						 * is in cyclic mode */
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
101
102
103
104
105
106
107
108
109
110
111
  	/* channel's basic info */
  	struct tasklet_struct tasklet;
  	u32 dcmd;
  	u32 drcmr;
  	u32 dev_addr;
  
  	/* list for desc */
  	spinlock_t desc_lock;		/* Descriptor list lock */
  	struct list_head chain_pending;	/* Link descriptors queue for pending */
  	struct list_head chain_running;	/* Link descriptors queue for running */
  	bool idle;			/* channel statue machine */
6fc4573c4   Daniel Mack   dma: mmp_pdma: ad...
112
  	bool byte_align;
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
  
  	struct dma_pool *desc_pool;	/* Descriptors pool */
  };
  
  struct mmp_pdma_phy {
  	int idx;
  	void __iomem *base;
  	struct mmp_pdma_chan *vchan;
  };
  
  struct mmp_pdma_device {
  	int				dma_channels;
  	void __iomem			*base;
  	struct device			*dev;
  	struct dma_device		device;
  	struct mmp_pdma_phy		*phy;
027f28b7b   Xiang Wang   dma: mmp_pdma: ad...
129
  	spinlock_t phy_lock; /* protect alloc/free phy channels */
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
130
  };
2b7f65b11   Joe Perches   mmp_pdma: Style n...
131
132
133
134
135
136
137
138
  #define tx_to_mmp_pdma_desc(tx)					\
  	container_of(tx, struct mmp_pdma_desc_sw, async_tx)
  #define to_mmp_pdma_desc(lh)					\
  	container_of(lh, struct mmp_pdma_desc_sw, node)
  #define to_mmp_pdma_chan(dchan)					\
  	container_of(dchan, struct mmp_pdma_chan, chan)
  #define to_mmp_pdma_dev(dmadev)					\
  	container_of(dmadev, struct mmp_pdma_device, device)
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
139
140
141
142
143
144
145
146
147
148
  
  static void set_desc(struct mmp_pdma_phy *phy, dma_addr_t addr)
  {
  	u32 reg = (phy->idx << 4) + DDADR;
  
  	writel(addr, phy->base + reg);
  }
  
  static void enable_chan(struct mmp_pdma_phy *phy)
  {
6fc4573c4   Daniel Mack   dma: mmp_pdma: ad...
149
  	u32 reg, dalgn;
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
150
151
152
  
  	if (!phy->vchan)
  		return;
8b298ded9   Daniel Mack   dma: mmp_pdma: fa...
153
  	reg = DRCMR(phy->vchan->drcmr);
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
154
  	writel(DRCMR_MAPVLD | phy->idx, phy->base + reg);
6fc4573c4   Daniel Mack   dma: mmp_pdma: ad...
155
156
157
158
159
160
  	dalgn = readl(phy->base + DALGN);
  	if (phy->vchan->byte_align)
  		dalgn |= 1 << phy->idx;
  	else
  		dalgn &= ~(1 << phy->idx);
  	writel(dalgn, phy->base + DALGN);
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
161
  	reg = (phy->idx << 2) + DCSR;
2b7f65b11   Joe Perches   mmp_pdma: Style n...
162
  	writel(readl(phy->base + reg) | DCSR_RUN, phy->base + reg);
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
163
164
165
166
167
  }
  
  static void disable_chan(struct mmp_pdma_phy *phy)
  {
  	u32 reg;
2b7f65b11   Joe Perches   mmp_pdma: Style n...
168
169
170
171
172
  	if (!phy)
  		return;
  
  	reg = (phy->idx << 2) + DCSR;
  	writel(readl(phy->base + reg) & ~DCSR_RUN, phy->base + reg);
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
173
174
175
176
177
178
179
  }
  
  static int clear_chan_irq(struct mmp_pdma_phy *phy)
  {
  	u32 dcsr;
  	u32 dint = readl(phy->base + DINT);
  	u32 reg = (phy->idx << 2) + DCSR;
2b7f65b11   Joe Perches   mmp_pdma: Style n...
180
181
182
183
184
185
186
187
188
189
190
  	if (!(dint & BIT(phy->idx)))
  		return -EAGAIN;
  
  	/* clear irq */
  	dcsr = readl(phy->base + reg);
  	writel(dcsr, phy->base + reg);
  	if ((dcsr & DCSR_BUSERR) && (phy->vchan))
  		dev_warn(phy->vchan->dev, "DCSR_BUSERR
  ");
  
  	return 0;
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
191
192
193
194
195
  }
  
  static irqreturn_t mmp_pdma_chan_handler(int irq, void *dev_id)
  {
  	struct mmp_pdma_phy *phy = dev_id;
2b7f65b11   Joe Perches   mmp_pdma: Style n...
196
  	if (clear_chan_irq(phy) != 0)
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
197
  		return IRQ_NONE;
2b7f65b11   Joe Perches   mmp_pdma: Style n...
198
199
200
  
  	tasklet_schedule(&phy->vchan->tasklet);
  	return IRQ_HANDLED;
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
  }
  
  static irqreturn_t mmp_pdma_int_handler(int irq, void *dev_id)
  {
  	struct mmp_pdma_device *pdev = dev_id;
  	struct mmp_pdma_phy *phy;
  	u32 dint = readl(pdev->base + DINT);
  	int i, ret;
  	int irq_num = 0;
  
  	while (dint) {
  		i = __ffs(dint);
  		dint &= (dint - 1);
  		phy = &pdev->phy[i];
  		ret = mmp_pdma_chan_handler(irq, phy);
  		if (ret == IRQ_HANDLED)
  			irq_num++;
  	}
  
  	if (irq_num)
  		return IRQ_HANDLED;
2b7f65b11   Joe Perches   mmp_pdma: Style n...
222
223
  
  	return IRQ_NONE;
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
224
225
226
227
228
229
230
  }
  
  /* lookup free phy channel as descending priority */
  static struct mmp_pdma_phy *lookup_phy(struct mmp_pdma_chan *pchan)
  {
  	int prio, i;
  	struct mmp_pdma_device *pdev = to_mmp_pdma_dev(pchan->chan.device);
638a542cc   Daniel Mack   dma: mmp_pdma: re...
231
  	struct mmp_pdma_phy *phy, *found = NULL;
027f28b7b   Xiang Wang   dma: mmp_pdma: ad...
232
  	unsigned long flags;
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
233
234
235
236
237
238
239
240
  
  	/*
  	 * dma channel priorities
  	 * ch 0 - 3,  16 - 19  <--> (0)
  	 * ch 4 - 7,  20 - 23  <--> (1)
  	 * ch 8 - 11, 24 - 27  <--> (2)
  	 * ch 12 - 15, 28 - 31  <--> (3)
  	 */
027f28b7b   Xiang Wang   dma: mmp_pdma: ad...
241
242
  
  	spin_lock_irqsave(&pdev->phy_lock, flags);
2b7f65b11   Joe Perches   mmp_pdma: Style n...
243
  	for (prio = 0; prio <= ((pdev->dma_channels - 1) & 0xf) >> 2; prio++) {
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
244
  		for (i = 0; i < pdev->dma_channels; i++) {
2b7f65b11   Joe Perches   mmp_pdma: Style n...
245
  			if (prio != (i & 0xf) >> 2)
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
246
247
248
249
  				continue;
  			phy = &pdev->phy[i];
  			if (!phy->vchan) {
  				phy->vchan = pchan;
638a542cc   Daniel Mack   dma: mmp_pdma: re...
250
251
  				found = phy;
  				goto out_unlock;
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
252
253
254
  			}
  		}
  	}
638a542cc   Daniel Mack   dma: mmp_pdma: re...
255
  out_unlock:
027f28b7b   Xiang Wang   dma: mmp_pdma: ad...
256
  	spin_unlock_irqrestore(&pdev->phy_lock, flags);
638a542cc   Daniel Mack   dma: mmp_pdma: re...
257
  	return found;
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
258
  }
027f28b7b   Xiang Wang   dma: mmp_pdma: ad...
259
260
261
262
  static void mmp_pdma_free_phy(struct mmp_pdma_chan *pchan)
  {
  	struct mmp_pdma_device *pdev = to_mmp_pdma_dev(pchan->chan.device);
  	unsigned long flags;
26a2dfdeb   Xiang Wang   dma: mmp_pdma: cl...
263
  	u32 reg;
027f28b7b   Xiang Wang   dma: mmp_pdma: ad...
264
265
266
  
  	if (!pchan->phy)
  		return;
26a2dfdeb   Xiang Wang   dma: mmp_pdma: cl...
267
  	/* clear the channel mapping in DRCMR */
8b298ded9   Daniel Mack   dma: mmp_pdma: fa...
268
  	reg = DRCMR(pchan->phy->vchan->drcmr);
26a2dfdeb   Xiang Wang   dma: mmp_pdma: cl...
269
  	writel(0, pchan->phy->base + reg);
027f28b7b   Xiang Wang   dma: mmp_pdma: ad...
270
271
272
273
274
  	spin_lock_irqsave(&pdev->phy_lock, flags);
  	pchan->phy->vchan = NULL;
  	pchan->phy = NULL;
  	spin_unlock_irqrestore(&pdev->phy_lock, flags);
  }
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
  /**
   * start_pending_queue - transfer any pending transactions
   * pending list ==> running list
   */
  static void start_pending_queue(struct mmp_pdma_chan *chan)
  {
  	struct mmp_pdma_desc_sw *desc;
  
  	/* still in running, irq will start the pending list */
  	if (!chan->idle) {
  		dev_dbg(chan->dev, "DMA controller still busy
  ");
  		return;
  	}
  
  	if (list_empty(&chan->chain_pending)) {
  		/* chance to re-fetch phy channel with higher prio */
027f28b7b   Xiang Wang   dma: mmp_pdma: ad...
292
  		mmp_pdma_free_phy(chan);
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
  		dev_dbg(chan->dev, "no pending list
  ");
  		return;
  	}
  
  	if (!chan->phy) {
  		chan->phy = lookup_phy(chan);
  		if (!chan->phy) {
  			dev_dbg(chan->dev, "no free dma channel
  ");
  			return;
  		}
  	}
  
  	/*
  	 * pending -> running
  	 * reintilize pending list
  	 */
  	desc = list_first_entry(&chan->chain_pending,
  				struct mmp_pdma_desc_sw, node);
  	list_splice_tail_init(&chan->chain_pending, &chan->chain_running);
  
  	/*
  	 * Program the descriptor's address into the DMA controller,
  	 * then start the DMA transaction
  	 */
  	set_desc(chan->phy, desc->async_tx.phys);
  	enable_chan(chan->phy);
  	chan->idle = false;
  }
  
  
  /* desc->tx_list ==> pending list */
  static dma_cookie_t mmp_pdma_tx_submit(struct dma_async_tx_descriptor *tx)
  {
  	struct mmp_pdma_chan *chan = to_mmp_pdma_chan(tx->chan);
  	struct mmp_pdma_desc_sw *desc = tx_to_mmp_pdma_desc(tx);
  	struct mmp_pdma_desc_sw *child;
  	unsigned long flags;
  	dma_cookie_t cookie = -EBUSY;
  
  	spin_lock_irqsave(&chan->desc_lock, flags);
  
  	list_for_each_entry(child, &desc->tx_list, node) {
  		cookie = dma_cookie_assign(&child->async_tx);
  	}
0cd615617   Daniel Mack   dma: mmp_pdma: do...
339
340
  	/* softly link to pending list - desc->tx_list ==> pending list */
  	list_splice_tail_init(&desc->tx_list, &chan->chain_pending);
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
341
342
343
344
345
  
  	spin_unlock_irqrestore(&chan->desc_lock, flags);
  
  	return cookie;
  }
69c9f0ae1   Jingoo Han   dma: mmp_pdma: St...
346
347
  static struct mmp_pdma_desc_sw *
  mmp_pdma_alloc_descriptor(struct mmp_pdma_chan *chan)
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
  {
  	struct mmp_pdma_desc_sw *desc;
  	dma_addr_t pdesc;
  
  	desc = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &pdesc);
  	if (!desc) {
  		dev_err(chan->dev, "out of memory for link descriptor
  ");
  		return NULL;
  	}
  
  	memset(desc, 0, sizeof(*desc));
  	INIT_LIST_HEAD(&desc->tx_list);
  	dma_async_tx_descriptor_init(&desc->async_tx, &chan->chan);
  	/* each desc has submit */
  	desc->async_tx.tx_submit = mmp_pdma_tx_submit;
  	desc->async_tx.phys = pdesc;
  
  	return desc;
  }
  
  /**
   * mmp_pdma_alloc_chan_resources - Allocate resources for DMA channel.
   *
   * This function will create a dma pool for descriptor allocation.
   * Request irq only when channel is requested
   * Return - The number of allocated descriptors.
   */
  
  static int mmp_pdma_alloc_chan_resources(struct dma_chan *dchan)
  {
  	struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
  
  	if (chan->desc_pool)
  		return 1;
2b7f65b11   Joe Perches   mmp_pdma: Style n...
383
384
385
386
387
  	chan->desc_pool = dma_pool_create(dev_name(&dchan->dev->device),
  					  chan->dev,
  					  sizeof(struct mmp_pdma_desc_sw),
  					  __alignof__(struct mmp_pdma_desc_sw),
  					  0);
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
388
389
390
391
392
  	if (!chan->desc_pool) {
  		dev_err(chan->dev, "unable to allocate descriptor pool
  ");
  		return -ENOMEM;
  	}
2b7f65b11   Joe Perches   mmp_pdma: Style n...
393

027f28b7b   Xiang Wang   dma: mmp_pdma: ad...
394
  	mmp_pdma_free_phy(chan);
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
395
396
397
398
399
400
  	chan->idle = true;
  	chan->dev_addr = 0;
  	return 1;
  }
  
  static void mmp_pdma_free_desc_list(struct mmp_pdma_chan *chan,
2b7f65b11   Joe Perches   mmp_pdma: Style n...
401
  				    struct list_head *list)
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
  {
  	struct mmp_pdma_desc_sw *desc, *_desc;
  
  	list_for_each_entry_safe(desc, _desc, list, node) {
  		list_del(&desc->node);
  		dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
  	}
  }
  
  static void mmp_pdma_free_chan_resources(struct dma_chan *dchan)
  {
  	struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
  	unsigned long flags;
  
  	spin_lock_irqsave(&chan->desc_lock, flags);
  	mmp_pdma_free_desc_list(chan, &chan->chain_pending);
  	mmp_pdma_free_desc_list(chan, &chan->chain_running);
  	spin_unlock_irqrestore(&chan->desc_lock, flags);
  
  	dma_pool_destroy(chan->desc_pool);
  	chan->desc_pool = NULL;
  	chan->idle = true;
  	chan->dev_addr = 0;
027f28b7b   Xiang Wang   dma: mmp_pdma: ad...
425
  	mmp_pdma_free_phy(chan);
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
426
427
428
429
430
  	return;
  }
  
  static struct dma_async_tx_descriptor *
  mmp_pdma_prep_memcpy(struct dma_chan *dchan,
2b7f65b11   Joe Perches   mmp_pdma: Style n...
431
432
  		     dma_addr_t dma_dst, dma_addr_t dma_src,
  		     size_t len, unsigned long flags)
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
433
434
435
436
437
438
439
440
441
442
443
444
  {
  	struct mmp_pdma_chan *chan;
  	struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new;
  	size_t copy = 0;
  
  	if (!dchan)
  		return NULL;
  
  	if (!len)
  		return NULL;
  
  	chan = to_mmp_pdma_chan(dchan);
6fc4573c4   Daniel Mack   dma: mmp_pdma: ad...
445
  	chan->byte_align = false;
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
  
  	if (!chan->dir) {
  		chan->dir = DMA_MEM_TO_MEM;
  		chan->dcmd = DCMD_INCTRGADDR | DCMD_INCSRCADDR;
  		chan->dcmd |= DCMD_BURST32;
  	}
  
  	do {
  		/* Allocate the link descriptor from DMA pool */
  		new = mmp_pdma_alloc_descriptor(chan);
  		if (!new) {
  			dev_err(chan->dev, "no memory for desc
  ");
  			goto fail;
  		}
  
  		copy = min_t(size_t, len, PDMA_MAX_DESC_BYTES);
6fc4573c4   Daniel Mack   dma: mmp_pdma: ad...
463
464
  		if (dma_src & 0x7 || dma_dst & 0x7)
  			chan->byte_align = true;
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
  
  		new->desc.dcmd = chan->dcmd | (DCMD_LENGTH & copy);
  		new->desc.dsadr = dma_src;
  		new->desc.dtadr = dma_dst;
  
  		if (!first)
  			first = new;
  		else
  			prev->desc.ddadr = new->async_tx.phys;
  
  		new->async_tx.cookie = 0;
  		async_tx_ack(&new->async_tx);
  
  		prev = new;
  		len -= copy;
  
  		if (chan->dir == DMA_MEM_TO_DEV) {
  			dma_src += copy;
  		} else if (chan->dir == DMA_DEV_TO_MEM) {
  			dma_dst += copy;
  		} else if (chan->dir == DMA_MEM_TO_MEM) {
  			dma_src += copy;
  			dma_dst += copy;
  		}
  
  		/* Insert the link descriptor to the LD ring */
  		list_add_tail(&new->node, &first->tx_list);
  	} while (len);
  
  	first->async_tx.flags = flags; /* client is in control of this ack */
  	first->async_tx.cookie = -EBUSY;
  
  	/* last desc and fire IRQ */
  	new->desc.ddadr = DDADR_STOP;
  	new->desc.dcmd |= DCMD_ENDIRQEN;
50440d74a   Daniel Mack   dma: mmp_pdma: ad...
500
  	chan->cyclic_first = NULL;
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
501
502
503
504
505
506
507
508
509
510
  	return &first->async_tx;
  
  fail:
  	if (first)
  		mmp_pdma_free_desc_list(chan, &first->tx_list);
  	return NULL;
  }
  
  static struct dma_async_tx_descriptor *
  mmp_pdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
2b7f65b11   Joe Perches   mmp_pdma: Style n...
511
512
  		       unsigned int sg_len, enum dma_transfer_direction dir,
  		       unsigned long flags, void *context)
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
513
514
515
516
517
518
519
520
521
522
  {
  	struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
  	struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new = NULL;
  	size_t len, avail;
  	struct scatterlist *sg;
  	dma_addr_t addr;
  	int i;
  
  	if ((sgl == NULL) || (sg_len == 0))
  		return NULL;
6fc4573c4   Daniel Mack   dma: mmp_pdma: ad...
523
  	chan->byte_align = false;
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
524
525
526
527
528
529
  	for_each_sg(sgl, sg, sg_len, i) {
  		addr = sg_dma_address(sg);
  		avail = sg_dma_len(sgl);
  
  		do {
  			len = min_t(size_t, avail, PDMA_MAX_DESC_BYTES);
6fc4573c4   Daniel Mack   dma: mmp_pdma: ad...
530
531
  			if (addr & 0x7)
  				chan->byte_align = true;
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
  
  			/* allocate and populate the descriptor */
  			new = mmp_pdma_alloc_descriptor(chan);
  			if (!new) {
  				dev_err(chan->dev, "no memory for desc
  ");
  				goto fail;
  			}
  
  			new->desc.dcmd = chan->dcmd | (DCMD_LENGTH & len);
  			if (dir == DMA_MEM_TO_DEV) {
  				new->desc.dsadr = addr;
  				new->desc.dtadr = chan->dev_addr;
  			} else {
  				new->desc.dsadr = chan->dev_addr;
  				new->desc.dtadr = addr;
  			}
  
  			if (!first)
  				first = new;
  			else
  				prev->desc.ddadr = new->async_tx.phys;
  
  			new->async_tx.cookie = 0;
  			async_tx_ack(&new->async_tx);
  			prev = new;
  
  			/* Insert the link descriptor to the LD ring */
  			list_add_tail(&new->node, &first->tx_list);
  
  			/* update metadata */
  			addr += len;
  			avail -= len;
  		} while (avail);
  	}
  
  	first->async_tx.cookie = -EBUSY;
  	first->async_tx.flags = flags;
  
  	/* last desc and fire IRQ */
  	new->desc.ddadr = DDADR_STOP;
  	new->desc.dcmd |= DCMD_ENDIRQEN;
50440d74a   Daniel Mack   dma: mmp_pdma: ad...
574
575
576
577
578
579
580
581
582
583
  	chan->dir = dir;
  	chan->cyclic_first = NULL;
  
  	return &first->async_tx;
  
  fail:
  	if (first)
  		mmp_pdma_free_desc_list(chan, &first->tx_list);
  	return NULL;
  }
2b7f65b11   Joe Perches   mmp_pdma: Style n...
584
585
586
587
588
  static struct dma_async_tx_descriptor *
  mmp_pdma_prep_dma_cyclic(struct dma_chan *dchan,
  			 dma_addr_t buf_addr, size_t len, size_t period_len,
  			 enum dma_transfer_direction direction,
  			 unsigned long flags, void *context)
50440d74a   Daniel Mack   dma: mmp_pdma: ad...
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
  {
  	struct mmp_pdma_chan *chan;
  	struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new;
  	dma_addr_t dma_src, dma_dst;
  
  	if (!dchan || !len || !period_len)
  		return NULL;
  
  	/* the buffer length must be a multiple of period_len */
  	if (len % period_len != 0)
  		return NULL;
  
  	if (period_len > PDMA_MAX_DESC_BYTES)
  		return NULL;
  
  	chan = to_mmp_pdma_chan(dchan);
  
  	switch (direction) {
  	case DMA_MEM_TO_DEV:
  		dma_src = buf_addr;
  		dma_dst = chan->dev_addr;
  		break;
  	case DMA_DEV_TO_MEM:
  		dma_dst = buf_addr;
  		dma_src = chan->dev_addr;
  		break;
  	default:
  		dev_err(chan->dev, "Unsupported direction for cyclic DMA
  ");
  		return NULL;
  	}
  
  	chan->dir = direction;
  
  	do {
  		/* Allocate the link descriptor from DMA pool */
  		new = mmp_pdma_alloc_descriptor(chan);
  		if (!new) {
  			dev_err(chan->dev, "no memory for desc
  ");
  			goto fail;
  		}
2b7f65b11   Joe Perches   mmp_pdma: Style n...
631
632
  		new->desc.dcmd = (chan->dcmd | DCMD_ENDIRQEN |
  				  (DCMD_LENGTH & period_len));
50440d74a   Daniel Mack   dma: mmp_pdma: ad...
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
  		new->desc.dsadr = dma_src;
  		new->desc.dtadr = dma_dst;
  
  		if (!first)
  			first = new;
  		else
  			prev->desc.ddadr = new->async_tx.phys;
  
  		new->async_tx.cookie = 0;
  		async_tx_ack(&new->async_tx);
  
  		prev = new;
  		len -= period_len;
  
  		if (chan->dir == DMA_MEM_TO_DEV)
  			dma_src += period_len;
  		else
  			dma_dst += period_len;
  
  		/* Insert the link descriptor to the LD ring */
  		list_add_tail(&new->node, &first->tx_list);
  	} while (len);
  
  	first->async_tx.flags = flags; /* client is in control of this ack */
  	first->async_tx.cookie = -EBUSY;
  
  	/* make the cyclic link */
  	new->desc.ddadr = first->async_tx.phys;
  	chan->cyclic_first = first;
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
662
663
664
665
666
667
668
669
670
  	return &first->async_tx;
  
  fail:
  	if (first)
  		mmp_pdma_free_desc_list(chan, &first->tx_list);
  	return NULL;
  }
  
  static int mmp_pdma_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd,
2b7f65b11   Joe Perches   mmp_pdma: Style n...
671
  			    unsigned long arg)
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
672
673
674
675
  {
  	struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
  	struct dma_slave_config *cfg = (void *)arg;
  	unsigned long flags;
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
676
677
678
679
680
681
682
683
684
  	u32 maxburst = 0, addr = 0;
  	enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
  
  	if (!dchan)
  		return -EINVAL;
  
  	switch (cmd) {
  	case DMA_TERMINATE_ALL:
  		disable_chan(chan->phy);
027f28b7b   Xiang Wang   dma: mmp_pdma: ad...
685
  		mmp_pdma_free_phy(chan);
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
  		spin_lock_irqsave(&chan->desc_lock, flags);
  		mmp_pdma_free_desc_list(chan, &chan->chain_pending);
  		mmp_pdma_free_desc_list(chan, &chan->chain_running);
  		spin_unlock_irqrestore(&chan->desc_lock, flags);
  		chan->idle = true;
  		break;
  	case DMA_SLAVE_CONFIG:
  		if (cfg->direction == DMA_DEV_TO_MEM) {
  			chan->dcmd = DCMD_INCTRGADDR | DCMD_FLOWSRC;
  			maxburst = cfg->src_maxburst;
  			width = cfg->src_addr_width;
  			addr = cfg->src_addr;
  		} else if (cfg->direction == DMA_MEM_TO_DEV) {
  			chan->dcmd = DCMD_INCSRCADDR | DCMD_FLOWTRG;
  			maxburst = cfg->dst_maxburst;
  			width = cfg->dst_addr_width;
  			addr = cfg->dst_addr;
  		}
  
  		if (width == DMA_SLAVE_BUSWIDTH_1_BYTE)
  			chan->dcmd |= DCMD_WIDTH1;
  		else if (width == DMA_SLAVE_BUSWIDTH_2_BYTES)
  			chan->dcmd |= DCMD_WIDTH2;
  		else if (width == DMA_SLAVE_BUSWIDTH_4_BYTES)
  			chan->dcmd |= DCMD_WIDTH4;
  
  		if (maxburst == 8)
  			chan->dcmd |= DCMD_BURST8;
  		else if (maxburst == 16)
  			chan->dcmd |= DCMD_BURST16;
  		else if (maxburst == 32)
  			chan->dcmd |= DCMD_BURST32;
ed30933e6   Cong Ding   dma: remove unnec...
718
  		chan->dir = cfg->direction;
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
719
  		chan->dev_addr = addr;
13b3006b8   Daniel Mack   dma: mmp_pdma: ad...
720
721
722
723
724
725
  		/* FIXME: drivers should be ported over to use the filter
  		 * function. Once that's done, the following two lines can
  		 * be removed.
  		 */
  		if (cfg->slave_id)
  			chan->drcmr = cfg->slave_id;
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
726
727
728
729
  		break;
  	default:
  		return -ENOSYS;
  	}
2b7f65b11   Joe Perches   mmp_pdma: Style n...
730
  	return 0;
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
731
732
733
  }
  
  static enum dma_status mmp_pdma_tx_status(struct dma_chan *dchan,
2b7f65b11   Joe Perches   mmp_pdma: Style n...
734
735
  					  dma_cookie_t cookie,
  					  struct dma_tx_state *txstate)
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
736
  {
4aa9fe0a1   Andy Shevchenko   mmp_pdma: remove ...
737
  	return dma_cookie_status(dchan, cookie, txstate);
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
  }
  
  /**
   * mmp_pdma_issue_pending - Issue the DMA start command
   * pending list ==> running list
   */
  static void mmp_pdma_issue_pending(struct dma_chan *dchan)
  {
  	struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
  	unsigned long flags;
  
  	spin_lock_irqsave(&chan->desc_lock, flags);
  	start_pending_queue(chan);
  	spin_unlock_irqrestore(&chan->desc_lock, flags);
  }
  
  /*
   * dma_do_tasklet
   * Do call back
   * Start pending list
   */
  static void dma_do_tasklet(unsigned long data)
  {
  	struct mmp_pdma_chan *chan = (struct mmp_pdma_chan *)data;
  	struct mmp_pdma_desc_sw *desc, *_desc;
  	LIST_HEAD(chain_cleanup);
  	unsigned long flags;
50440d74a   Daniel Mack   dma: mmp_pdma: ad...
765
766
767
  	if (chan->cyclic_first) {
  		dma_async_tx_callback cb = NULL;
  		void *cb_data = NULL;
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
768

50440d74a   Daniel Mack   dma: mmp_pdma: ad...
769
770
771
772
773
774
775
776
777
778
779
780
781
  		spin_lock_irqsave(&chan->desc_lock, flags);
  		desc = chan->cyclic_first;
  		cb = desc->async_tx.callback;
  		cb_data = desc->async_tx.callback_param;
  		spin_unlock_irqrestore(&chan->desc_lock, flags);
  
  		if (cb)
  			cb(cb_data);
  
  		return;
  	}
  
  	/* submit pending list; callback for each desc; free desc */
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
782
  	spin_lock_irqsave(&chan->desc_lock, flags);
b721f9e80   Daniel Mack   dma: mmp_pdma: on...
783
784
785
786
787
  	list_for_each_entry_safe(desc, _desc, &chan->chain_running, node) {
  		/*
  		 * move the descriptors to a temporary list so we can drop
  		 * the lock during the entire cleanup operation
  		 */
f358c289e   Wei Yongjun   dma: mmp_pdma: us...
788
  		list_move(&desc->node, &chain_cleanup);
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
789

b721f9e80   Daniel Mack   dma: mmp_pdma: on...
790
791
792
793
794
795
796
797
798
799
800
801
  		/*
  		 * Look for the first list entry which has the ENDIRQEN flag
  		 * set. That is the descriptor we got an interrupt for, so
  		 * complete that transaction and its cookie.
  		 */
  		if (desc->desc.dcmd & DCMD_ENDIRQEN) {
  			dma_cookie_t cookie = desc->async_tx.cookie;
  			dma_cookie_complete(&desc->async_tx);
  			dev_dbg(chan->dev, "completed_cookie=%d
  ", cookie);
  			break;
  		}
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
802
803
804
  	}
  
  	/*
b721f9e80   Daniel Mack   dma: mmp_pdma: on...
805
806
  	 * The hardware is idle and ready for more when the
  	 * chain_running list is empty.
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
807
  	 */
b721f9e80   Daniel Mack   dma: mmp_pdma: on...
808
  	chan->idle = list_empty(&chan->chain_running);
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
  
  	/* Start any pending transactions automatically */
  	start_pending_queue(chan);
  	spin_unlock_irqrestore(&chan->desc_lock, flags);
  
  	/* Run the callback for each descriptor, in order */
  	list_for_each_entry_safe(desc, _desc, &chain_cleanup, node) {
  		struct dma_async_tx_descriptor *txd = &desc->async_tx;
  
  		/* Remove from the list of transactions */
  		list_del(&desc->node);
  		/* Run the link descriptor callback function */
  		if (txd->callback)
  			txd->callback(txd->callback_param);
  
  		dma_pool_free(chan->desc_pool, desc, txd->phys);
  	}
  }
4bf27b8b3   Greg Kroah-Hartman   Drivers: dma: rem...
827
  static int mmp_pdma_remove(struct platform_device *op)
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
828
829
830
831
832
833
  {
  	struct mmp_pdma_device *pdev = platform_get_drvdata(op);
  
  	dma_async_device_unregister(&pdev->device);
  	return 0;
  }
2b7f65b11   Joe Perches   mmp_pdma: Style n...
834
  static int mmp_pdma_chan_init(struct mmp_pdma_device *pdev, int idx, int irq)
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
835
836
837
838
  {
  	struct mmp_pdma_phy *phy  = &pdev->phy[idx];
  	struct mmp_pdma_chan *chan;
  	int ret;
2b7f65b11   Joe Perches   mmp_pdma: Style n...
839
840
  	chan = devm_kzalloc(pdev->dev, sizeof(struct mmp_pdma_chan),
  			    GFP_KERNEL);
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
841
842
843
844
845
846
847
  	if (chan == NULL)
  		return -ENOMEM;
  
  	phy->idx = idx;
  	phy->base = pdev->base;
  
  	if (irq) {
f0b507774   Chao Xie   dma: mmp_pdma: ad...
848
849
  		ret = devm_request_irq(pdev->dev, irq, mmp_pdma_chan_handler,
  				       IRQF_SHARED, "pdma", phy);
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
  		if (ret) {
  			dev_err(pdev->dev, "channel request irq fail!
  ");
  			return ret;
  		}
  	}
  
  	spin_lock_init(&chan->desc_lock);
  	chan->dev = pdev->dev;
  	chan->chan.device = &pdev->device;
  	tasklet_init(&chan->tasklet, dma_do_tasklet, (unsigned long)chan);
  	INIT_LIST_HEAD(&chan->chain_pending);
  	INIT_LIST_HEAD(&chan->chain_running);
  
  	/* register virt channel to dma engine */
2b7f65b11   Joe Perches   mmp_pdma: Style n...
865
  	list_add_tail(&chan->chan.device_node, &pdev->device.channels);
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
866
867
868
869
870
871
872
873
874
  
  	return 0;
  }
  
  static struct of_device_id mmp_pdma_dt_ids[] = {
  	{ .compatible = "marvell,pdma-1.0", },
  	{}
  };
  MODULE_DEVICE_TABLE(of, mmp_pdma_dt_ids);
a9a7cf08b   Daniel Mack   dma: mmp_pdma: ma...
875
876
877
878
  static struct dma_chan *mmp_pdma_dma_xlate(struct of_phandle_args *dma_spec,
  					   struct of_dma *ofdma)
  {
  	struct mmp_pdma_device *d = ofdma->of_dma_data;
8010dad55   Stephen Warren   dma: add dma_get_...
879
  	struct dma_chan *chan;
a9a7cf08b   Daniel Mack   dma: mmp_pdma: ma...
880

8010dad55   Stephen Warren   dma: add dma_get_...
881
882
  	chan = dma_get_any_slave_channel(&d->device);
  	if (!chan)
a9a7cf08b   Daniel Mack   dma: mmp_pdma: ma...
883
  		return NULL;
2b7f65b11   Joe Perches   mmp_pdma: Style n...
884
885
886
  	to_mmp_pdma_chan(chan)->drcmr = dma_spec->args[0];
  
  	return chan;
a9a7cf08b   Daniel Mack   dma: mmp_pdma: ma...
887
  }
463a1f8b3   Bill Pemberton   dma: remove use o...
888
  static int mmp_pdma_probe(struct platform_device *op)
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
889
890
891
892
893
894
895
896
897
898
899
  {
  	struct mmp_pdma_device *pdev;
  	const struct of_device_id *of_id;
  	struct mmp_dma_platdata *pdata = dev_get_platdata(&op->dev);
  	struct resource *iores;
  	int i, ret, irq = 0;
  	int dma_channels = 0, irq_num = 0;
  
  	pdev = devm_kzalloc(&op->dev, sizeof(*pdev), GFP_KERNEL);
  	if (!pdev)
  		return -ENOMEM;
2b7f65b11   Joe Perches   mmp_pdma: Style n...
900

c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
901
  	pdev->dev = &op->dev;
027f28b7b   Xiang Wang   dma: mmp_pdma: ad...
902
  	spin_lock_init(&pdev->phy_lock);
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
903
  	iores = platform_get_resource(op, IORESOURCE_MEM, 0);
7331205a9   Thierry Reding   dma: Convert to d...
904
905
906
  	pdev->base = devm_ioremap_resource(pdev->dev, iores);
  	if (IS_ERR(pdev->base))
  		return PTR_ERR(pdev->base);
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
907
908
909
  
  	of_id = of_match_device(mmp_pdma_dt_ids, pdev->dev);
  	if (of_id)
2b7f65b11   Joe Perches   mmp_pdma: Style n...
910
911
  		of_property_read_u32(pdev->dev->of_node, "#dma-channels",
  				     &dma_channels);
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
912
913
914
915
916
917
918
919
920
921
  	else if (pdata && pdata->dma_channels)
  		dma_channels = pdata->dma_channels;
  	else
  		dma_channels = 32;	/* default 32 channel */
  	pdev->dma_channels = dma_channels;
  
  	for (i = 0; i < dma_channels; i++) {
  		if (platform_get_irq(op, i) > 0)
  			irq_num++;
  	}
2b7f65b11   Joe Perches   mmp_pdma: Style n...
922
923
924
  	pdev->phy = devm_kcalloc(pdev->dev,
  				 dma_channels, sizeof(struct mmp_pdma_chan),
  				 GFP_KERNEL);
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
925
926
927
928
929
930
931
932
  	if (pdev->phy == NULL)
  		return -ENOMEM;
  
  	INIT_LIST_HEAD(&pdev->device.channels);
  
  	if (irq_num != dma_channels) {
  		/* all chan share one irq, demux inside */
  		irq = platform_get_irq(op, 0);
f0b507774   Chao Xie   dma: mmp_pdma: ad...
933
934
  		ret = devm_request_irq(pdev->dev, irq, mmp_pdma_int_handler,
  				       IRQF_SHARED, "pdma", pdev);
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
935
936
937
938
939
940
941
942
943
944
945
946
947
  		if (ret)
  			return ret;
  	}
  
  	for (i = 0; i < dma_channels; i++) {
  		irq = (irq_num != dma_channels) ? 0 : platform_get_irq(op, i);
  		ret = mmp_pdma_chan_init(pdev, i, irq);
  		if (ret)
  			return ret;
  	}
  
  	dma_cap_set(DMA_SLAVE, pdev->device.cap_mask);
  	dma_cap_set(DMA_MEMCPY, pdev->device.cap_mask);
50440d74a   Daniel Mack   dma: mmp_pdma: ad...
948
  	dma_cap_set(DMA_CYCLIC, pdev->device.cap_mask);
023bf55f1   Daniel Mack   dma: mmp_pdma: se...
949
  	dma_cap_set(DMA_PRIVATE, pdev->device.cap_mask);
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
950
951
952
953
954
955
  	pdev->device.dev = &op->dev;
  	pdev->device.device_alloc_chan_resources = mmp_pdma_alloc_chan_resources;
  	pdev->device.device_free_chan_resources = mmp_pdma_free_chan_resources;
  	pdev->device.device_tx_status = mmp_pdma_tx_status;
  	pdev->device.device_prep_dma_memcpy = mmp_pdma_prep_memcpy;
  	pdev->device.device_prep_slave_sg = mmp_pdma_prep_slave_sg;
50440d74a   Daniel Mack   dma: mmp_pdma: ad...
956
  	pdev->device.device_prep_dma_cyclic = mmp_pdma_prep_dma_cyclic;
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
  	pdev->device.device_issue_pending = mmp_pdma_issue_pending;
  	pdev->device.device_control = mmp_pdma_control;
  	pdev->device.copy_align = PDMA_ALIGNMENT;
  
  	if (pdev->dev->coherent_dma_mask)
  		dma_set_mask(pdev->dev, pdev->dev->coherent_dma_mask);
  	else
  		dma_set_mask(pdev->dev, DMA_BIT_MASK(64));
  
  	ret = dma_async_device_register(&pdev->device);
  	if (ret) {
  		dev_err(pdev->device.dev, "unable to register
  ");
  		return ret;
  	}
a9a7cf08b   Daniel Mack   dma: mmp_pdma: ma...
972
973
974
975
976
977
978
979
980
981
  	if (op->dev.of_node) {
  		/* Device-tree DMA controller registration */
  		ret = of_dma_controller_register(op->dev.of_node,
  						 mmp_pdma_dma_xlate, pdev);
  		if (ret < 0) {
  			dev_err(&op->dev, "of_dma_controller_register failed
  ");
  			return ret;
  		}
  	}
086b0af19   Wei Yongjun   dma: mmp_pdma: ad...
982
  	platform_set_drvdata(op, pdev);
419d1f126   Daniel Mack   dma: mmp_pdma: pr...
983
984
  	dev_info(pdev->device.dev, "initialized %d channels
  ", dma_channels);
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
  	return 0;
  }
  
  static const struct platform_device_id mmp_pdma_id_table[] = {
  	{ "mmp-pdma", },
  	{ },
  };
  
  static struct platform_driver mmp_pdma_driver = {
  	.driver		= {
  		.name	= "mmp-pdma",
  		.owner  = THIS_MODULE,
  		.of_match_table = mmp_pdma_dt_ids,
  	},
  	.id_table	= mmp_pdma_id_table,
  	.probe		= mmp_pdma_probe,
a7d6e3ec2   Bill Pemberton   dma: remove use o...
1001
  	.remove		= mmp_pdma_remove,
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
1002
  };
13b3006b8   Daniel Mack   dma: mmp_pdma: ad...
1003
1004
1005
1006
1007
1008
  bool mmp_pdma_filter_fn(struct dma_chan *chan, void *param)
  {
  	struct mmp_pdma_chan *c = to_mmp_pdma_chan(chan);
  
  	if (chan->device->dev->driver != &mmp_pdma_driver.driver)
  		return false;
2b7f65b11   Joe Perches   mmp_pdma: Style n...
1009
  	c->drcmr = *(unsigned int *)param;
13b3006b8   Daniel Mack   dma: mmp_pdma: ad...
1010
1011
1012
1013
  
  	return true;
  }
  EXPORT_SYMBOL_GPL(mmp_pdma_filter_fn);
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
1014
  module_platform_driver(mmp_pdma_driver);
2b7f65b11   Joe Perches   mmp_pdma: Style n...
1015
  MODULE_DESCRIPTION("MARVELL MMP Peripheral DMA Driver");
c8acd6aa6   Zhangfei Gao   dmaengine: mmp-pd...
1016
1017
  MODULE_AUTHOR("Marvell International Ltd.");
  MODULE_LICENSE("GPL v2");