Blame view

drivers/dma/k3dma.c 24.9 KB
d2912cb15   Thomas Gleixner   treewide: Replace...
1
  // SPDX-License-Identifier: GPL-2.0-only
8e6152bc6   Zhangfei Gao   dmaengine: Add hi...
2
  /*
a7e08fa6c   Andy Green   k3dma: Add cyclic...
3
   * Copyright (c) 2013 - 2015 Linaro Ltd.
8e6152bc6   Zhangfei Gao   dmaengine: Add hi...
4
   * Copyright (c) 2013 Hisilicon Limited.
8e6152bc6   Zhangfei Gao   dmaengine: Add hi...
5
6
7
   */
  #include <linux/sched.h>
  #include <linux/device.h>
b77f262ae   John Stultz   k3dma: Fix occasi...
8
9
  #include <linux/dma-mapping.h>
  #include <linux/dmapool.h>
8e6152bc6   Zhangfei Gao   dmaengine: Add hi...
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
  #include <linux/dmaengine.h>
  #include <linux/init.h>
  #include <linux/interrupt.h>
  #include <linux/kernel.h>
  #include <linux/module.h>
  #include <linux/platform_device.h>
  #include <linux/slab.h>
  #include <linux/spinlock.h>
  #include <linux/of_device.h>
  #include <linux/of.h>
  #include <linux/clk.h>
  #include <linux/of_dma.h>
  
  #include "virt-dma.h"
  
  #define DRIVER_NAME		"k3-dma"
8e6152bc6   Zhangfei Gao   dmaengine: Add hi...
26
  #define DMA_MAX_SIZE		0x1ffc
a7e08fa6c   Andy Green   k3dma: Add cyclic...
27
  #define DMA_CYCLIC_MAX_PERIOD	0x1000
b77f262ae   John Stultz   k3dma: Fix occasi...
28
  #define LLI_BLOCK_SIZE		(4 * PAGE_SIZE)
8e6152bc6   Zhangfei Gao   dmaengine: Add hi...
29
30
31
  
  #define INT_STAT		0x00
  #define INT_TC1			0x04
a7e08fa6c   Andy Green   k3dma: Add cyclic...
32
  #define INT_TC2			0x08
8e6152bc6   Zhangfei Gao   dmaengine: Add hi...
33
34
35
  #define INT_ERR1		0x0c
  #define INT_ERR2		0x10
  #define INT_TC1_MASK		0x18
a7e08fa6c   Andy Green   k3dma: Add cyclic...
36
  #define INT_TC2_MASK		0x1c
8e6152bc6   Zhangfei Gao   dmaengine: Add hi...
37
38
39
  #define INT_ERR1_MASK		0x20
  #define INT_ERR2_MASK		0x24
  #define INT_TC1_RAW		0x600
a7e08fa6c   Andy Green   k3dma: Add cyclic...
40
  #define INT_TC2_RAW		0x608
aceaaa17e   Andy Green   k3dma: Fix dma er...
41
42
  #define INT_ERR1_RAW		0x610
  #define INT_ERR2_RAW		0x618
8e6152bc6   Zhangfei Gao   dmaengine: Add hi...
43
44
45
46
  #define CH_PRI			0x688
  #define CH_STAT			0x690
  #define CX_CUR_CNT		0x704
  #define CX_LLI			0x800
a7e08fa6c   Andy Green   k3dma: Add cyclic...
47
48
  #define CX_CNT1			0x80c
  #define CX_CNT0			0x810
8e6152bc6   Zhangfei Gao   dmaengine: Add hi...
49
50
51
  #define CX_SRC			0x814
  #define CX_DST			0x818
  #define CX_CFG			0x81c
8e6152bc6   Zhangfei Gao   dmaengine: Add hi...
52
53
54
  
  #define CX_LLI_CHAIN_EN		0x2
  #define CX_CFG_EN		0x1
a7e08fa6c   Andy Green   k3dma: Add cyclic...
55
  #define CX_CFG_NODEIRQ		BIT(1)
8e6152bc6   Zhangfei Gao   dmaengine: Add hi...
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
  #define CX_CFG_MEM2PER		(0x1 << 2)
  #define CX_CFG_PER2MEM		(0x2 << 2)
  #define CX_CFG_SRCINCR		(0x1 << 31)
  #define CX_CFG_DSTINCR		(0x1 << 30)
  
  struct k3_desc_hw {
  	u32 lli;
  	u32 reserved[3];
  	u32 count;
  	u32 saddr;
  	u32 daddr;
  	u32 config;
  } __aligned(32);
  
  struct k3_dma_desc_sw {
  	struct virt_dma_desc	vd;
  	dma_addr_t		desc_hw_lli;
  	size_t			desc_num;
  	size_t			size;
b77f262ae   John Stultz   k3dma: Fix occasi...
75
  	struct k3_desc_hw	*desc_hw;
8e6152bc6   Zhangfei Gao   dmaengine: Add hi...
76
77
78
79
80
81
82
83
84
  };
  
  struct k3_dma_phy;
  
  struct k3_dma_chan {
  	u32			ccfg;
  	struct virt_dma_chan	vc;
  	struct k3_dma_phy	*phy;
  	struct list_head	node;
8e6152bc6   Zhangfei Gao   dmaengine: Add hi...
85
86
  	dma_addr_t		dev_addr;
  	enum dma_status		status;
a7e08fa6c   Andy Green   k3dma: Add cyclic...
87
  	bool			cyclic;
2ae1a237e   Vinod Koul   dmaengine: k3dma:...
88
  	struct dma_slave_config	slave_config;
8e6152bc6   Zhangfei Gao   dmaengine: Add hi...
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
  };
  
  struct k3_dma_phy {
  	u32			idx;
  	void __iomem		*base;
  	struct k3_dma_chan	*vchan;
  	struct k3_dma_desc_sw	*ds_run;
  	struct k3_dma_desc_sw	*ds_done;
  };
  
  struct k3_dma_dev {
  	struct dma_device	slave;
  	void __iomem		*base;
  	struct tasklet_struct	task;
  	spinlock_t		lock;
  	struct list_head	chan_pending;
  	struct k3_dma_phy	*phy;
  	struct k3_dma_chan	*chans;
  	struct clk		*clk;
b77f262ae   John Stultz   k3dma: Fix occasi...
108
  	struct dma_pool		*pool;
8e6152bc6   Zhangfei Gao   dmaengine: Add hi...
109
110
  	u32			dma_channels;
  	u32			dma_requests;
c4994a98f   Li Yu   dmaengine: k3dma:...
111
  	u32			dma_channel_mask;
486b10a25   Vinod Koul   dmaengine: k3dma:...
112
  	unsigned int		irq;
8e6152bc6   Zhangfei Gao   dmaengine: Add hi...
113
  };
d4bdc39f5   Youlin Wang   dmaengine: k3dma:...
114
115
116
117
118
119
  
  #define K3_FLAG_NOCLK	BIT(1)
  
  struct k3dma_soc_data {
  	unsigned long flags;
  };
8e6152bc6   Zhangfei Gao   dmaengine: Add hi...
120
  #define to_k3_dma(dmadev) container_of(dmadev, struct k3_dma_dev, slave)
2ae1a237e   Vinod Koul   dmaengine: k3dma:...
121
122
123
  static int k3_dma_config_write(struct dma_chan *chan,
  			       enum dma_transfer_direction dir,
  			       struct dma_slave_config *cfg);
8e6152bc6   Zhangfei Gao   dmaengine: Add hi...
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
  static struct k3_dma_chan *to_k3_chan(struct dma_chan *chan)
  {
  	return container_of(chan, struct k3_dma_chan, vc.chan);
  }
  
  static void k3_dma_pause_dma(struct k3_dma_phy *phy, bool on)
  {
  	u32 val = 0;
  
  	if (on) {
  		val = readl_relaxed(phy->base + CX_CFG);
  		val |= CX_CFG_EN;
  		writel_relaxed(val, phy->base + CX_CFG);
  	} else {
  		val = readl_relaxed(phy->base + CX_CFG);
  		val &= ~CX_CFG_EN;
  		writel_relaxed(val, phy->base + CX_CFG);
  	}
  }
  
  static void k3_dma_terminate_chan(struct k3_dma_phy *phy, struct k3_dma_dev *d)
  {
  	u32 val = 0;
  
  	k3_dma_pause_dma(phy, false);
  
  	val = 0x1 << phy->idx;
  	writel_relaxed(val, d->base + INT_TC1_RAW);
a7e08fa6c   Andy Green   k3dma: Add cyclic...
152
  	writel_relaxed(val, d->base + INT_TC2_RAW);
8e6152bc6   Zhangfei Gao   dmaengine: Add hi...
153
154
155
156
157
158
159
  	writel_relaxed(val, d->base + INT_ERR1_RAW);
  	writel_relaxed(val, d->base + INT_ERR2_RAW);
  }
  
  static void k3_dma_set_desc(struct k3_dma_phy *phy, struct k3_desc_hw *hw)
  {
  	writel_relaxed(hw->lli, phy->base + CX_LLI);
a7e08fa6c   Andy Green   k3dma: Add cyclic...
160
  	writel_relaxed(hw->count, phy->base + CX_CNT0);
8e6152bc6   Zhangfei Gao   dmaengine: Add hi...
161
162
  	writel_relaxed(hw->saddr, phy->base + CX_SRC);
  	writel_relaxed(hw->daddr, phy->base + CX_DST);
8e6152bc6   Zhangfei Gao   dmaengine: Add hi...
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
  	writel_relaxed(hw->config, phy->base + CX_CFG);
  }
  
  static u32 k3_dma_get_curr_cnt(struct k3_dma_dev *d, struct k3_dma_phy *phy)
  {
  	u32 cnt = 0;
  
  	cnt = readl_relaxed(d->base + CX_CUR_CNT + phy->idx * 0x10);
  	cnt &= 0xffff;
  	return cnt;
  }
  
  static u32 k3_dma_get_curr_lli(struct k3_dma_phy *phy)
  {
  	return readl_relaxed(phy->base + CX_LLI);
  }
  
  static u32 k3_dma_get_chan_stat(struct k3_dma_dev *d)
  {
  	return readl_relaxed(d->base + CH_STAT);
  }
  
  static void k3_dma_enable_dma(struct k3_dma_dev *d, bool on)
  {
  	if (on) {
  		/* set same priority */
  		writel_relaxed(0x0, d->base + CH_PRI);
  
  		/* unmask irq */
  		writel_relaxed(0xffff, d->base + INT_TC1_MASK);
a7e08fa6c   Andy Green   k3dma: Add cyclic...
193
  		writel_relaxed(0xffff, d->base + INT_TC2_MASK);
8e6152bc6   Zhangfei Gao   dmaengine: Add hi...
194
195
196
197
198
  		writel_relaxed(0xffff, d->base + INT_ERR1_MASK);
  		writel_relaxed(0xffff, d->base + INT_ERR2_MASK);
  	} else {
  		/* mask irq */
  		writel_relaxed(0x0, d->base + INT_TC1_MASK);
a7e08fa6c   Andy Green   k3dma: Add cyclic...
199
  		writel_relaxed(0x0, d->base + INT_TC2_MASK);
8e6152bc6   Zhangfei Gao   dmaengine: Add hi...
200
201
202
203
204
205
206
207
208
209
210
211
  		writel_relaxed(0x0, d->base + INT_ERR1_MASK);
  		writel_relaxed(0x0, d->base + INT_ERR2_MASK);
  	}
  }
  
  static irqreturn_t k3_dma_int_handler(int irq, void *dev_id)
  {
  	struct k3_dma_dev *d = (struct k3_dma_dev *)dev_id;
  	struct k3_dma_phy *p;
  	struct k3_dma_chan *c;
  	u32 stat = readl_relaxed(d->base + INT_STAT);
  	u32 tc1  = readl_relaxed(d->base + INT_TC1);
a7e08fa6c   Andy Green   k3dma: Add cyclic...
212
  	u32 tc2  = readl_relaxed(d->base + INT_TC2);
8e6152bc6   Zhangfei Gao   dmaengine: Add hi...
213
214
215
216
217
218
  	u32 err1 = readl_relaxed(d->base + INT_ERR1);
  	u32 err2 = readl_relaxed(d->base + INT_ERR2);
  	u32 i, irq_chan = 0;
  
  	while (stat) {
  		i = __ffs(stat);
a7e08fa6c   Andy Green   k3dma: Add cyclic...
219
220
221
  		stat &= ~BIT(i);
  		if (likely(tc1 & BIT(i)) || (tc2 & BIT(i))) {
  			unsigned long flags;
8e6152bc6   Zhangfei Gao   dmaengine: Add hi...
222
223
  			p = &d->phy[i];
  			c = p->vchan;
a7e08fa6c   Andy Green   k3dma: Add cyclic...
224
  			if (c && (tc1 & BIT(i))) {
8e6152bc6   Zhangfei Gao   dmaengine: Add hi...
225
  				spin_lock_irqsave(&c->vc.lock, flags);
2f42e05b9   John Stultz   dmaengine: k3dma:...
226
227
228
229
230
  				if (p->ds_run != NULL) {
  					vchan_cookie_complete(&p->ds_run->vd);
  					p->ds_done = p->ds_run;
  					p->ds_run = NULL;
  				}
8e6152bc6   Zhangfei Gao   dmaengine: Add hi...
231
232
  				spin_unlock_irqrestore(&c->vc.lock, flags);
  			}
a7e08fa6c   Andy Green   k3dma: Add cyclic...
233
234
235
236
237
238
  			if (c && (tc2 & BIT(i))) {
  				spin_lock_irqsave(&c->vc.lock, flags);
  				if (p->ds_run != NULL)
  					vchan_cyclic_callback(&p->ds_run->vd);
  				spin_unlock_irqrestore(&c->vc.lock, flags);
  			}
8e6152bc6   Zhangfei Gao   dmaengine: Add hi...
239
240
241
242
243
244
245
246
  			irq_chan |= BIT(i);
  		}
  		if (unlikely((err1 & BIT(i)) || (err2 & BIT(i))))
  			dev_warn(d->slave.dev, "DMA ERR
  ");
  	}
  
  	writel_relaxed(irq_chan, d->base + INT_TC1_RAW);
a7e08fa6c   Andy Green   k3dma: Add cyclic...
247
  	writel_relaxed(irq_chan, d->base + INT_TC2_RAW);
8e6152bc6   Zhangfei Gao   dmaengine: Add hi...
248
249
  	writel_relaxed(err1, d->base + INT_ERR1_RAW);
  	writel_relaxed(err2, d->base + INT_ERR2_RAW);
0173c895e   Andy Green   k3dma: Fix "nobod...
250
  	if (irq_chan)
8e6152bc6   Zhangfei Gao   dmaengine: Add hi...
251
  		tasklet_schedule(&d->task);
0173c895e   Andy Green   k3dma: Fix "nobod...
252
253
  
  	if (irq_chan || err1 || err2)
8e6152bc6   Zhangfei Gao   dmaengine: Add hi...
254
  		return IRQ_HANDLED;
0173c895e   Andy Green   k3dma: Fix "nobod...
255
256
  
  	return IRQ_NONE;
8e6152bc6   Zhangfei Gao   dmaengine: Add hi...
257
258
259
260
261
262
263
264
265
266
267
268
  }
  
  static int k3_dma_start_txd(struct k3_dma_chan *c)
  {
  	struct k3_dma_dev *d = to_k3_dma(c->vc.chan.device);
  	struct virt_dma_desc *vd = vchan_next_desc(&c->vc);
  
  	if (!c->phy)
  		return -EAGAIN;
  
  	if (BIT(c->phy->idx) & k3_dma_get_chan_stat(d))
  		return -EAGAIN;
2f42e05b9   John Stultz   dmaengine: k3dma:...
269
270
271
  	/* Avoid losing track of  ds_run if a transaction is in flight */
  	if (c->phy->ds_run)
  		return -EAGAIN;
8e6152bc6   Zhangfei Gao   dmaengine: Add hi...
272
273
274
275
276
277
278
279
  	if (vd) {
  		struct k3_dma_desc_sw *ds =
  			container_of(vd, struct k3_dma_desc_sw, vd);
  		/*
  		 * fetch and remove request from vc->desc_issued
  		 * so vc->desc_issued only contains desc pending
  		 */
  		list_del(&ds->vd.node);
36387a2b1   John Stultz   k3dma: Fix memory...
280

8e6152bc6   Zhangfei Gao   dmaengine: Add hi...
281
  		c->phy->ds_run = ds;
626c4e85a   Antonio Borneo   dmaengine: k3dma:...
282
  		c->phy->ds_done = NULL;
8e6152bc6   Zhangfei Gao   dmaengine: Add hi...
283
284
285
286
  		/* start dma */
  		k3_dma_set_desc(c->phy, &ds->desc_hw[0]);
  		return 0;
  	}
626c4e85a   Antonio Borneo   dmaengine: k3dma:...
287
288
  	c->phy->ds_run = NULL;
  	c->phy->ds_done = NULL;
8e6152bc6   Zhangfei Gao   dmaengine: Add hi...
289
290
  	return -EAGAIN;
  }
881bd1428   Allen Pais   dmaengine: k3dma:...
291
  static void k3_dma_tasklet(struct tasklet_struct *t)
8e6152bc6   Zhangfei Gao   dmaengine: Add hi...
292
  {
881bd1428   Allen Pais   dmaengine: k3dma:...
293
  	struct k3_dma_dev *d = from_tasklet(d, t, task);
8e6152bc6   Zhangfei Gao   dmaengine: Add hi...
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
  	struct k3_dma_phy *p;
  	struct k3_dma_chan *c, *cn;
  	unsigned pch, pch_alloc = 0;
  
  	/* check new dma request of running channel in vc->desc_issued */
  	list_for_each_entry_safe(c, cn, &d->slave.channels, vc.chan.device_node) {
  		spin_lock_irq(&c->vc.lock);
  		p = c->phy;
  		if (p && p->ds_done) {
  			if (k3_dma_start_txd(c)) {
  				/* No current txd associated with this channel */
  				dev_dbg(d->slave.dev, "pchan %u: free
  ", p->idx);
  				/* Mark this channel free */
  				c->phy = NULL;
  				p->vchan = NULL;
  			}
  		}
  		spin_unlock_irq(&c->vc.lock);
  	}
  
  	/* check new channel request in d->chan_pending */
  	spin_lock_irq(&d->lock);
  	for (pch = 0; pch < d->dma_channels; pch++) {
c4994a98f   Li Yu   dmaengine: k3dma:...
318
319
  		if (!(d->dma_channel_mask & (1 << pch)))
  			continue;
8e6152bc6   Zhangfei Gao   dmaengine: Add hi...
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
  		p = &d->phy[pch];
  
  		if (p->vchan == NULL && !list_empty(&d->chan_pending)) {
  			c = list_first_entry(&d->chan_pending,
  				struct k3_dma_chan, node);
  			/* remove from d->chan_pending */
  			list_del_init(&c->node);
  			pch_alloc |= 1 << pch;
  			/* Mark this channel allocated */
  			p->vchan = c;
  			c->phy = p;
  			dev_dbg(d->slave.dev, "pchan %u: alloc vchan %p
  ", pch, &c->vc);
  		}
  	}
  	spin_unlock_irq(&d->lock);
  
  	for (pch = 0; pch < d->dma_channels; pch++) {
c4994a98f   Li Yu   dmaengine: k3dma:...
338
339
  		if (!(d->dma_channel_mask & (1 << pch)))
  			continue;
8e6152bc6   Zhangfei Gao   dmaengine: Add hi...
340
341
342
343
344
345
346
347
348
349
350
  		if (pch_alloc & (1 << pch)) {
  			p = &d->phy[pch];
  			c = p->vchan;
  			if (c) {
  				spin_lock_irq(&c->vc.lock);
  				k3_dma_start_txd(c);
  				spin_unlock_irq(&c->vc.lock);
  			}
  		}
  	}
  }
8e6152bc6   Zhangfei Gao   dmaengine: Add hi...
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
  static void k3_dma_free_chan_resources(struct dma_chan *chan)
  {
  	struct k3_dma_chan *c = to_k3_chan(chan);
  	struct k3_dma_dev *d = to_k3_dma(chan->device);
  	unsigned long flags;
  
  	spin_lock_irqsave(&d->lock, flags);
  	list_del_init(&c->node);
  	spin_unlock_irqrestore(&d->lock, flags);
  
  	vchan_free_chan_resources(&c->vc);
  	c->ccfg = 0;
  }
  
  static enum dma_status k3_dma_tx_status(struct dma_chan *chan,
  	dma_cookie_t cookie, struct dma_tx_state *state)
  {
  	struct k3_dma_chan *c = to_k3_chan(chan);
  	struct k3_dma_dev *d = to_k3_dma(chan->device);
  	struct k3_dma_phy *p;
  	struct virt_dma_desc *vd;
  	unsigned long flags;
  	enum dma_status ret;
  	size_t bytes = 0;
  
  	ret = dma_cookie_status(&c->vc.chan, cookie, state);
bd2c348e5   Vinod Koul   dmaengine: k3dma:...
377
  	if (ret == DMA_COMPLETE)
8e6152bc6   Zhangfei Gao   dmaengine: Add hi...
378
379
380
381
382
383
384
385
386
387
388
  		return ret;
  
  	spin_lock_irqsave(&c->vc.lock, flags);
  	p = c->phy;
  	ret = c->status;
  
  	/*
  	 * If the cookie is on our issue queue, then the residue is
  	 * its total size.
  	 */
  	vd = vchan_find_desc(&c->vc, cookie);
a7e08fa6c   Andy Green   k3dma: Add cyclic...
389
  	if (vd && !c->cyclic) {
8e6152bc6   Zhangfei Gao   dmaengine: Add hi...
390
391
392
393
394
395
396
397
398
  		bytes = container_of(vd, struct k3_dma_desc_sw, vd)->size;
  	} else if ((!p) || (!p->ds_run)) {
  		bytes = 0;
  	} else {
  		struct k3_dma_desc_sw *ds = p->ds_run;
  		u32 clli = 0, index = 0;
  
  		bytes = k3_dma_get_curr_cnt(d, p);
  		clli = k3_dma_get_curr_lli(p);
a7e08fa6c   Andy Green   k3dma: Add cyclic...
399
400
  		index = ((clli - ds->desc_hw_lli) /
  				sizeof(struct k3_desc_hw)) + 1;
8e6152bc6   Zhangfei Gao   dmaengine: Add hi...
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
  		for (; index < ds->desc_num; index++) {
  			bytes += ds->desc_hw[index].count;
  			/* end of lli */
  			if (!ds->desc_hw[index].lli)
  				break;
  		}
  	}
  	spin_unlock_irqrestore(&c->vc.lock, flags);
  	dma_set_residue(state, bytes);
  	return ret;
  }
  
  static void k3_dma_issue_pending(struct dma_chan *chan)
  {
  	struct k3_dma_chan *c = to_k3_chan(chan);
  	struct k3_dma_dev *d = to_k3_dma(chan->device);
  	unsigned long flags;
  
  	spin_lock_irqsave(&c->vc.lock, flags);
  	/* add request to vc->desc_issued */
  	if (vchan_issue_pending(&c->vc)) {
  		spin_lock(&d->lock);
  		if (!c->phy) {
  			if (list_empty(&c->node)) {
  				/* if new channel, add chan_pending */
  				list_add_tail(&c->node, &d->chan_pending);
  				/* check in tasklet */
  				tasklet_schedule(&d->task);
  				dev_dbg(d->slave.dev, "vchan %p: issued
  ", &c->vc);
  			}
  		}
  		spin_unlock(&d->lock);
  	} else
  		dev_dbg(d->slave.dev, "vchan %p: nothing to issue
  ", &c->vc);
  	spin_unlock_irqrestore(&c->vc.lock, flags);
  }
  
  static void k3_dma_fill_desc(struct k3_dma_desc_sw *ds, dma_addr_t dst,
  			dma_addr_t src, size_t len, u32 num, u32 ccfg)
  {
a7e08fa6c   Andy Green   k3dma: Add cyclic...
443
  	if (num != ds->desc_num - 1)
8e6152bc6   Zhangfei Gao   dmaengine: Add hi...
444
445
  		ds->desc_hw[num].lli = ds->desc_hw_lli + (num + 1) *
  			sizeof(struct k3_desc_hw);
a7e08fa6c   Andy Green   k3dma: Add cyclic...
446

8e6152bc6   Zhangfei Gao   dmaengine: Add hi...
447
448
449
450
451
452
  	ds->desc_hw[num].lli |= CX_LLI_CHAIN_EN;
  	ds->desc_hw[num].count = len;
  	ds->desc_hw[num].saddr = src;
  	ds->desc_hw[num].daddr = dst;
  	ds->desc_hw[num].config = ccfg;
  }
b77f262ae   John Stultz   k3dma: Fix occasi...
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
  static struct k3_dma_desc_sw *k3_dma_alloc_desc_resource(int num,
  							struct dma_chan *chan)
  {
  	struct k3_dma_chan *c = to_k3_chan(chan);
  	struct k3_dma_desc_sw *ds;
  	struct k3_dma_dev *d = to_k3_dma(chan->device);
  	int lli_limit = LLI_BLOCK_SIZE / sizeof(struct k3_desc_hw);
  
  	if (num > lli_limit) {
  		dev_dbg(chan->device->dev, "vch %p: sg num %d exceed max %d
  ",
  			&c->vc, num, lli_limit);
  		return NULL;
  	}
  
  	ds = kzalloc(sizeof(*ds), GFP_NOWAIT);
  	if (!ds)
  		return NULL;
646b3b569   Vinod Koul   dmaengine: k3dma:...
471
  	ds->desc_hw = dma_pool_zalloc(d->pool, GFP_NOWAIT, &ds->desc_hw_lli);
b77f262ae   John Stultz   k3dma: Fix occasi...
472
473
474
475
476
477
  	if (!ds->desc_hw) {
  		dev_dbg(chan->device->dev, "vch %p: dma alloc fail
  ", &c->vc);
  		kfree(ds);
  		return NULL;
  	}
b77f262ae   John Stultz   k3dma: Fix occasi...
478
479
480
  	ds->desc_num = num;
  	return ds;
  }
8e6152bc6   Zhangfei Gao   dmaengine: Add hi...
481
482
483
484
485
486
487
488
489
490
491
492
493
  static struct dma_async_tx_descriptor *k3_dma_prep_memcpy(
  	struct dma_chan *chan,	dma_addr_t dst, dma_addr_t src,
  	size_t len, unsigned long flags)
  {
  	struct k3_dma_chan *c = to_k3_chan(chan);
  	struct k3_dma_desc_sw *ds;
  	size_t copy = 0;
  	int num = 0;
  
  	if (!len)
  		return NULL;
  
  	num = DIV_ROUND_UP(len, DMA_MAX_SIZE);
b77f262ae   John Stultz   k3dma: Fix occasi...
494
495
  
  	ds = k3_dma_alloc_desc_resource(num, chan);
aef94fea9   Peter Griffin   dmaengine: Remove...
496
  	if (!ds)
8e6152bc6   Zhangfei Gao   dmaengine: Add hi...
497
  		return NULL;
aef94fea9   Peter Griffin   dmaengine: Remove...
498

a7e08fa6c   Andy Green   k3dma: Add cyclic...
499
  	c->cyclic = 0;
8e6152bc6   Zhangfei Gao   dmaengine: Add hi...
500
  	ds->size = len;
8e6152bc6   Zhangfei Gao   dmaengine: Add hi...
501
502
503
  	num = 0;
  
  	if (!c->ccfg) {
db08425eb   Maxime Ripard   dmaengine: k3: Sp...
504
  		/* default is memtomem, without calling device_config */
8e6152bc6   Zhangfei Gao   dmaengine: Add hi...
505
506
507
508
509
510
511
512
  		c->ccfg = CX_CFG_SRCINCR | CX_CFG_DSTINCR | CX_CFG_EN;
  		c->ccfg |= (0xf << 20) | (0xf << 24);	/* burst = 16 */
  		c->ccfg |= (0x3 << 12) | (0x3 << 16);	/* width = 64 bit */
  	}
  
  	do {
  		copy = min_t(size_t, len, DMA_MAX_SIZE);
  		k3_dma_fill_desc(ds, dst, src, copy, num++, c->ccfg);
ad7756e77   Vinod Koul   dmaengine: k3dma:...
513
514
  		src += copy;
  		dst += copy;
8e6152bc6   Zhangfei Gao   dmaengine: Add hi...
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
  		len -= copy;
  	} while (len);
  
  	ds->desc_hw[num-1].lli = 0;	/* end of link */
  	return vchan_tx_prep(&c->vc, &ds->vd, flags);
  }
  
  static struct dma_async_tx_descriptor *k3_dma_prep_slave_sg(
  	struct dma_chan *chan, struct scatterlist *sgl, unsigned int sglen,
  	enum dma_transfer_direction dir, unsigned long flags, void *context)
  {
  	struct k3_dma_chan *c = to_k3_chan(chan);
  	struct k3_dma_desc_sw *ds;
  	size_t len, avail, total = 0;
  	struct scatterlist *sg;
  	dma_addr_t addr, src = 0, dst = 0;
  	int num = sglen, i;
c61177c54   Zhangfei Gao   dmaengine: k3dma:...
532
  	if (sgl == NULL)
8e6152bc6   Zhangfei Gao   dmaengine: Add hi...
533
  		return NULL;
a7e08fa6c   Andy Green   k3dma: Add cyclic...
534
  	c->cyclic = 0;
8e6152bc6   Zhangfei Gao   dmaengine: Add hi...
535
536
537
538
539
  	for_each_sg(sgl, sg, sglen, i) {
  		avail = sg_dma_len(sg);
  		if (avail > DMA_MAX_SIZE)
  			num += DIV_ROUND_UP(avail, DMA_MAX_SIZE) - 1;
  	}
b77f262ae   John Stultz   k3dma: Fix occasi...
540
  	ds = k3_dma_alloc_desc_resource(num, chan);
aef94fea9   Peter Griffin   dmaengine: Remove...
541
  	if (!ds)
8e6152bc6   Zhangfei Gao   dmaengine: Add hi...
542
  		return NULL;
8e6152bc6   Zhangfei Gao   dmaengine: Add hi...
543
  	num = 0;
2ae1a237e   Vinod Koul   dmaengine: k3dma:...
544
  	k3_dma_config_write(chan, dir, &c->slave_config);
8e6152bc6   Zhangfei Gao   dmaengine: Add hi...
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
  
  	for_each_sg(sgl, sg, sglen, i) {
  		addr = sg_dma_address(sg);
  		avail = sg_dma_len(sg);
  		total += avail;
  
  		do {
  			len = min_t(size_t, avail, DMA_MAX_SIZE);
  
  			if (dir == DMA_MEM_TO_DEV) {
  				src = addr;
  				dst = c->dev_addr;
  			} else if (dir == DMA_DEV_TO_MEM) {
  				src = c->dev_addr;
  				dst = addr;
  			}
  
  			k3_dma_fill_desc(ds, dst, src, len, num++, c->ccfg);
  
  			addr += len;
  			avail -= len;
  		} while (avail);
  	}
  
  	ds->desc_hw[num-1].lli = 0;	/* end of link */
  	ds->size = total;
  	return vchan_tx_prep(&c->vc, &ds->vd, flags);
  }
a7e08fa6c   Andy Green   k3dma: Add cyclic...
573
574
575
576
577
578
579
580
581
582
583
584
585
  static struct dma_async_tx_descriptor *
  k3_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr,
  		       size_t buf_len, size_t period_len,
  		       enum dma_transfer_direction dir,
  		       unsigned long flags)
  {
  	struct k3_dma_chan *c = to_k3_chan(chan);
  	struct k3_dma_desc_sw *ds;
  	size_t len, avail, total = 0;
  	dma_addr_t addr, src = 0, dst = 0;
  	int num = 1, since = 0;
  	size_t modulo = DMA_CYCLIC_MAX_PERIOD;
  	u32 en_tc2 = 0;
5f03c3997   Arnd Bergmann   dmaengine: k3dma:...
586
587
588
589
  	dev_dbg(chan->device->dev, "%s: buf %pad, dst %pad, buf len %zu, period_len = %zu, dir %d
  ",
  	       __func__, &buf_addr, &to_k3_chan(chan)->dev_addr,
  	       buf_len, period_len, (int)dir);
a7e08fa6c   Andy Green   k3dma: Add cyclic...
590
591
592
593
594
595
596
597
598
599
600
601
602
603
  
  	avail = buf_len;
  	if (avail > modulo)
  		num += DIV_ROUND_UP(avail, modulo) - 1;
  
  	ds = k3_dma_alloc_desc_resource(num, chan);
  	if (!ds)
  		return NULL;
  
  	c->cyclic = 1;
  	addr = buf_addr;
  	avail = buf_len;
  	total = avail;
  	num = 0;
2ae1a237e   Vinod Koul   dmaengine: k3dma:...
604
  	k3_dma_config_write(chan, dir, &c->slave_config);
a7e08fa6c   Andy Green   k3dma: Add cyclic...
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
  
  	if (period_len < modulo)
  		modulo = period_len;
  
  	do {
  		len = min_t(size_t, avail, modulo);
  
  		if (dir == DMA_MEM_TO_DEV) {
  			src = addr;
  			dst = c->dev_addr;
  		} else if (dir == DMA_DEV_TO_MEM) {
  			src = c->dev_addr;
  			dst = addr;
  		}
  		since += len;
  		if (since >= period_len) {
  			/* descriptor asks for TC2 interrupt on completion */
  			en_tc2 = CX_CFG_NODEIRQ;
  			since -= period_len;
  		} else
  			en_tc2 = 0;
  
  		k3_dma_fill_desc(ds, dst, src, len, num++, c->ccfg | en_tc2);
  
  		addr += len;
  		avail -= len;
  	} while (avail);
  
  	/* "Cyclic" == end of link points back to start of link */
  	ds->desc_hw[num - 1].lli |= ds->desc_hw_lli;
  
  	ds->size = total;
  
  	return vchan_tx_prep(&c->vc, &ds->vd, flags);
  }
db08425eb   Maxime Ripard   dmaengine: k3: Sp...
640
641
642
643
  static int k3_dma_config(struct dma_chan *chan,
  			 struct dma_slave_config *cfg)
  {
  	struct k3_dma_chan *c = to_k3_chan(chan);
2ae1a237e   Vinod Koul   dmaengine: k3dma:...
644
645
646
647
648
649
650
651
652
653
654
  
  	memcpy(&c->slave_config, cfg, sizeof(*cfg));
  
  	return 0;
  }
  
  static int k3_dma_config_write(struct dma_chan *chan,
  			       enum dma_transfer_direction dir,
  			       struct dma_slave_config *cfg)
  {
  	struct k3_dma_chan *c = to_k3_chan(chan);
db08425eb   Maxime Ripard   dmaengine: k3: Sp...
655
656
  	u32 maxburst = 0, val = 0;
  	enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
2ae1a237e   Vinod Koul   dmaengine: k3dma:...
657
  	if (dir == DMA_DEV_TO_MEM) {
db08425eb   Maxime Ripard   dmaengine: k3: Sp...
658
659
660
661
  		c->ccfg = CX_CFG_DSTINCR;
  		c->dev_addr = cfg->src_addr;
  		maxburst = cfg->src_maxburst;
  		width = cfg->src_addr_width;
2ae1a237e   Vinod Koul   dmaengine: k3dma:...
662
  	} else if (dir == DMA_MEM_TO_DEV) {
db08425eb   Maxime Ripard   dmaengine: k3: Sp...
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
  		c->ccfg = CX_CFG_SRCINCR;
  		c->dev_addr = cfg->dst_addr;
  		maxburst = cfg->dst_maxburst;
  		width = cfg->dst_addr_width;
  	}
  	switch (width) {
  	case DMA_SLAVE_BUSWIDTH_1_BYTE:
  	case DMA_SLAVE_BUSWIDTH_2_BYTES:
  	case DMA_SLAVE_BUSWIDTH_4_BYTES:
  	case DMA_SLAVE_BUSWIDTH_8_BYTES:
  		val =  __ffs(width);
  		break;
  	default:
  		val = 3;
  		break;
  	}
  	c->ccfg |= (val << 12) | (val << 16);
  
  	if ((maxburst == 0) || (maxburst > 16))
6c28a90fb   Andy Green   k3dma: Fix hisi b...
682
  		val = 15;
db08425eb   Maxime Ripard   dmaengine: k3: Sp...
683
684
685
686
687
688
689
690
691
692
  	else
  		val = maxburst - 1;
  	c->ccfg |= (val << 20) | (val << 24);
  	c->ccfg |= CX_CFG_MEM2PER | CX_CFG_EN;
  
  	/* specific request line */
  	c->ccfg |= c->vc.chan.chan_id << 4;
  
  	return 0;
  }
36387a2b1   John Stultz   k3dma: Fix memory...
693
694
695
696
697
698
699
700
701
  static void k3_dma_free_desc(struct virt_dma_desc *vd)
  {
  	struct k3_dma_desc_sw *ds =
  		container_of(vd, struct k3_dma_desc_sw, vd);
  	struct k3_dma_dev *d = to_k3_dma(vd->tx.chan->device);
  
  	dma_pool_free(d->pool, ds->desc_hw, ds->desc_hw_lli);
  	kfree(ds);
  }
db08425eb   Maxime Ripard   dmaengine: k3: Sp...
702
  static int k3_dma_terminate_all(struct dma_chan *chan)
8e6152bc6   Zhangfei Gao   dmaengine: Add hi...
703
704
705
  {
  	struct k3_dma_chan *c = to_k3_chan(chan);
  	struct k3_dma_dev *d = to_k3_dma(chan->device);
8e6152bc6   Zhangfei Gao   dmaengine: Add hi...
706
707
  	struct k3_dma_phy *p = c->phy;
  	unsigned long flags;
8e6152bc6   Zhangfei Gao   dmaengine: Add hi...
708
  	LIST_HEAD(head);
db08425eb   Maxime Ripard   dmaengine: k3: Sp...
709
710
  	dev_dbg(d->slave.dev, "vchan %p: terminate all
  ", &c->vc);
8e6152bc6   Zhangfei Gao   dmaengine: Add hi...
711

db08425eb   Maxime Ripard   dmaengine: k3: Sp...
712
713
714
715
  	/* Prevent this channel being scheduled */
  	spin_lock(&d->lock);
  	list_del_init(&c->node);
  	spin_unlock(&d->lock);
8e6152bc6   Zhangfei Gao   dmaengine: Add hi...
716

db08425eb   Maxime Ripard   dmaengine: k3: Sp...
717
718
719
720
721
722
723
724
  	/* Clear the tx descriptor lists */
  	spin_lock_irqsave(&c->vc.lock, flags);
  	vchan_get_all_descriptors(&c->vc, &head);
  	if (p) {
  		/* vchan is assigned to a pchan - stop the channel */
  		k3_dma_terminate_chan(p, d);
  		c->phy = NULL;
  		p->vchan = NULL;
36387a2b1   John Stultz   k3dma: Fix memory...
725
  		if (p->ds_run) {
3ee7e42f3   Peter Ujfalusi   dmaengine: k3dma:...
726
  			vchan_terminate_vdesc(&p->ds_run->vd);
36387a2b1   John Stultz   k3dma: Fix memory...
727
728
  			p->ds_run = NULL;
  		}
132b473cd   Antonio Borneo   dmaengine: k3dma:...
729
  		p->ds_done = NULL;
db08425eb   Maxime Ripard   dmaengine: k3: Sp...
730
731
732
  	}
  	spin_unlock_irqrestore(&c->vc.lock, flags);
  	vchan_dma_desc_free_list(&c->vc, &head);
8e6152bc6   Zhangfei Gao   dmaengine: Add hi...
733

db08425eb   Maxime Ripard   dmaengine: k3: Sp...
734
735
  	return 0;
  }
8e6152bc6   Zhangfei Gao   dmaengine: Add hi...
736

3ee7e42f3   Peter Ujfalusi   dmaengine: k3dma:...
737
738
739
740
741
742
  static void k3_dma_synchronize(struct dma_chan *chan)
  {
  	struct k3_dma_chan *c = to_k3_chan(chan);
  
  	vchan_synchronize(&c->vc);
  }
a1a9becbf   Krzysztof Kozlowski   dmaengine: k3: Fi...
743
  static int k3_dma_transfer_pause(struct dma_chan *chan)
db08425eb   Maxime Ripard   dmaengine: k3: Sp...
744
745
746
747
  {
  	struct k3_dma_chan *c = to_k3_chan(chan);
  	struct k3_dma_dev *d = to_k3_dma(chan->device);
  	struct k3_dma_phy *p = c->phy;
8e6152bc6   Zhangfei Gao   dmaengine: Add hi...
748

db08425eb   Maxime Ripard   dmaengine: k3: Sp...
749
750
751
752
  	dev_dbg(d->slave.dev, "vchan %p: pause
  ", &c->vc);
  	if (c->status == DMA_IN_PROGRESS) {
  		c->status = DMA_PAUSED;
8e6152bc6   Zhangfei Gao   dmaengine: Add hi...
753
  		if (p) {
db08425eb   Maxime Ripard   dmaengine: k3: Sp...
754
755
756
757
758
  			k3_dma_pause_dma(p, false);
  		} else {
  			spin_lock(&d->lock);
  			list_del_init(&c->node);
  			spin_unlock(&d->lock);
8e6152bc6   Zhangfei Gao   dmaengine: Add hi...
759
  		}
db08425eb   Maxime Ripard   dmaengine: k3: Sp...
760
  	}
8e6152bc6   Zhangfei Gao   dmaengine: Add hi...
761

db08425eb   Maxime Ripard   dmaengine: k3: Sp...
762
763
  	return 0;
  }
8e6152bc6   Zhangfei Gao   dmaengine: Add hi...
764

a1a9becbf   Krzysztof Kozlowski   dmaengine: k3: Fi...
765
  static int k3_dma_transfer_resume(struct dma_chan *chan)
db08425eb   Maxime Ripard   dmaengine: k3: Sp...
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
  {
  	struct k3_dma_chan *c = to_k3_chan(chan);
  	struct k3_dma_dev *d = to_k3_dma(chan->device);
  	struct k3_dma_phy *p = c->phy;
  	unsigned long flags;
  
  	dev_dbg(d->slave.dev, "vchan %p: resume
  ", &c->vc);
  	spin_lock_irqsave(&c->vc.lock, flags);
  	if (c->status == DMA_PAUSED) {
  		c->status = DMA_IN_PROGRESS;
  		if (p) {
  			k3_dma_pause_dma(p, true);
  		} else if (!list_empty(&c->vc.desc_issued)) {
  			spin_lock(&d->lock);
  			list_add_tail(&c->node, &d->chan_pending);
  			spin_unlock(&d->lock);
8e6152bc6   Zhangfei Gao   dmaengine: Add hi...
783
  		}
8e6152bc6   Zhangfei Gao   dmaengine: Add hi...
784
  	}
db08425eb   Maxime Ripard   dmaengine: k3: Sp...
785
  	spin_unlock_irqrestore(&c->vc.lock, flags);
8e6152bc6   Zhangfei Gao   dmaengine: Add hi...
786
787
  	return 0;
  }
d4bdc39f5   Youlin Wang   dmaengine: k3dma:...
788
789
790
791
792
793
794
  static const struct k3dma_soc_data k3_v1_dma_data = {
  	.flags = 0,
  };
  
  static const struct k3dma_soc_data asp_v1_dma_data = {
  	.flags = K3_FLAG_NOCLK,
  };
57c034223   Fabian Frederick   dmaengine: consti...
795
  static const struct of_device_id k3_pdma_dt_ids[] = {
d4bdc39f5   Youlin Wang   dmaengine: k3dma:...
796
797
798
799
800
801
  	{ .compatible = "hisilicon,k3-dma-1.0",
  	  .data = &k3_v1_dma_data
  	},
  	{ .compatible = "hisilicon,hisi-pcm-asp-dma-1.0",
  	  .data = &asp_v1_dma_data
  	},
8e6152bc6   Zhangfei Gao   dmaengine: Add hi...
802
803
804
805
806
807
808
809
810
  	{}
  };
  MODULE_DEVICE_TABLE(of, k3_pdma_dt_ids);
  
  static struct dma_chan *k3_of_dma_simple_xlate(struct of_phandle_args *dma_spec,
  						struct of_dma *ofdma)
  {
  	struct k3_dma_dev *d = ofdma->of_dma_data;
  	unsigned int request = dma_spec->args[0];
c4c2b7644   Dan Carpenter   dmaengine: k3dma:...
811
  	if (request >= d->dma_requests)
8e6152bc6   Zhangfei Gao   dmaengine: Add hi...
812
813
814
815
816
817
818
  		return NULL;
  
  	return dma_get_slave_channel(&(d->chans[request].vc.chan));
  }
  
  static int k3_dma_probe(struct platform_device *op)
  {
d4bdc39f5   Youlin Wang   dmaengine: k3dma:...
819
  	const struct k3dma_soc_data *soc_data;
8e6152bc6   Zhangfei Gao   dmaengine: Add hi...
820
821
  	struct k3_dma_dev *d;
  	const struct of_device_id *of_id;
8e6152bc6   Zhangfei Gao   dmaengine: Add hi...
822
  	int i, ret, irq = 0;
8e6152bc6   Zhangfei Gao   dmaengine: Add hi...
823
824
825
  	d = devm_kzalloc(&op->dev, sizeof(*d), GFP_KERNEL);
  	if (!d)
  		return -ENOMEM;
d4bdc39f5   Youlin Wang   dmaengine: k3dma:...
826
827
828
  	soc_data = device_get_match_data(&op->dev);
  	if (!soc_data)
  		return -EINVAL;
3d4d6c27f   Markus Elfring   dmaengine: k3dma:...
829
  	d->base = devm_platform_ioremap_resource(op, 0);
a576b7fe5   Jingoo Han   dma: k3dma: use d...
830
831
  	if (IS_ERR(d->base))
  		return PTR_ERR(d->base);
8e6152bc6   Zhangfei Gao   dmaengine: Add hi...
832
833
834
835
836
837
838
  
  	of_id = of_match_device(k3_pdma_dt_ids, &op->dev);
  	if (of_id) {
  		of_property_read_u32((&op->dev)->of_node,
  				"dma-channels", &d->dma_channels);
  		of_property_read_u32((&op->dev)->of_node,
  				"dma-requests", &d->dma_requests);
c4994a98f   Li Yu   dmaengine: k3dma:...
839
840
841
842
843
844
845
846
  		ret = of_property_read_u32((&op->dev)->of_node,
  				"dma-channel-mask", &d->dma_channel_mask);
  		if (ret) {
  			dev_warn(&op->dev,
  				 "dma-channel-mask doesn't exist, considering all as available.
  ");
  			d->dma_channel_mask = (u32)~0UL;
  		}
8e6152bc6   Zhangfei Gao   dmaengine: Add hi...
847
  	}
d4bdc39f5   Youlin Wang   dmaengine: k3dma:...
848
849
850
851
852
853
854
  	if (!(soc_data->flags & K3_FLAG_NOCLK)) {
  		d->clk = devm_clk_get(&op->dev, NULL);
  		if (IS_ERR(d->clk)) {
  			dev_err(&op->dev, "no dma clk
  ");
  			return PTR_ERR(d->clk);
  		}
8e6152bc6   Zhangfei Gao   dmaengine: Add hi...
855
856
857
858
  	}
  
  	irq = platform_get_irq(op, 0);
  	ret = devm_request_irq(&op->dev, irq,
174b537ac   Michael Opdenacker   dma: misc: remove...
859
  			k3_dma_int_handler, 0, DRIVER_NAME, d);
8e6152bc6   Zhangfei Gao   dmaengine: Add hi...
860
861
  	if (ret)
  		return ret;
486b10a25   Vinod Koul   dmaengine: k3dma:...
862
  	d->irq = irq;
b77f262ae   John Stultz   k3dma: Fix occasi...
863
864
865
866
867
  	/* A DMA memory pool for LLIs, align on 32-byte boundary */
  	d->pool = dmam_pool_create(DRIVER_NAME, &op->dev,
  					LLI_BLOCK_SIZE, 32, 0);
  	if (!d->pool)
  		return -ENOMEM;
8e6152bc6   Zhangfei Gao   dmaengine: Add hi...
868
  	/* init phy channel */
a86854d0c   Kees Cook   treewide: devm_kz...
869
870
  	d->phy = devm_kcalloc(&op->dev,
  		d->dma_channels, sizeof(struct k3_dma_phy), GFP_KERNEL);
8e6152bc6   Zhangfei Gao   dmaengine: Add hi...
871
872
873
874
  	if (d->phy == NULL)
  		return -ENOMEM;
  
  	for (i = 0; i < d->dma_channels; i++) {
c4994a98f   Li Yu   dmaengine: k3dma:...
875
876
877
878
  		struct k3_dma_phy *p;
  
  		if (!(d->dma_channel_mask & BIT(i)))
  			continue;
8e6152bc6   Zhangfei Gao   dmaengine: Add hi...
879

c4994a98f   Li Yu   dmaengine: k3dma:...
880
  		p = &d->phy[i];
8e6152bc6   Zhangfei Gao   dmaengine: Add hi...
881
882
883
884
885
886
887
  		p->idx = i;
  		p->base = d->base + i * 0x40;
  	}
  
  	INIT_LIST_HEAD(&d->slave.channels);
  	dma_cap_set(DMA_SLAVE, d->slave.cap_mask);
  	dma_cap_set(DMA_MEMCPY, d->slave.cap_mask);
a7e08fa6c   Andy Green   k3dma: Add cyclic...
888
  	dma_cap_set(DMA_CYCLIC, d->slave.cap_mask);
8e6152bc6   Zhangfei Gao   dmaengine: Add hi...
889
  	d->slave.dev = &op->dev;
8e6152bc6   Zhangfei Gao   dmaengine: Add hi...
890
891
892
893
  	d->slave.device_free_chan_resources = k3_dma_free_chan_resources;
  	d->slave.device_tx_status = k3_dma_tx_status;
  	d->slave.device_prep_dma_memcpy = k3_dma_prep_memcpy;
  	d->slave.device_prep_slave_sg = k3_dma_prep_slave_sg;
a7e08fa6c   Andy Green   k3dma: Add cyclic...
894
  	d->slave.device_prep_dma_cyclic = k3_dma_prep_dma_cyclic;
8e6152bc6   Zhangfei Gao   dmaengine: Add hi...
895
  	d->slave.device_issue_pending = k3_dma_issue_pending;
db08425eb   Maxime Ripard   dmaengine: k3: Sp...
896
  	d->slave.device_config = k3_dma_config;
a1a9becbf   Krzysztof Kozlowski   dmaengine: k3: Fi...
897
898
  	d->slave.device_pause = k3_dma_transfer_pause;
  	d->slave.device_resume = k3_dma_transfer_resume;
db08425eb   Maxime Ripard   dmaengine: k3: Sp...
899
  	d->slave.device_terminate_all = k3_dma_terminate_all;
3ee7e42f3   Peter Ujfalusi   dmaengine: k3dma:...
900
  	d->slave.device_synchronize = k3_dma_synchronize;
77a68e56a   Maxime Ripard   dmaengine: Add an...
901
  	d->slave.copy_align = DMAENGINE_ALIGN_8_BYTES;
8e6152bc6   Zhangfei Gao   dmaengine: Add hi...
902
903
  
  	/* init virtual channel */
a86854d0c   Kees Cook   treewide: devm_kz...
904
905
  	d->chans = devm_kcalloc(&op->dev,
  		d->dma_requests, sizeof(struct k3_dma_chan), GFP_KERNEL);
8e6152bc6   Zhangfei Gao   dmaengine: Add hi...
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
  	if (d->chans == NULL)
  		return -ENOMEM;
  
  	for (i = 0; i < d->dma_requests; i++) {
  		struct k3_dma_chan *c = &d->chans[i];
  
  		c->status = DMA_IN_PROGRESS;
  		INIT_LIST_HEAD(&c->node);
  		c->vc.desc_free = k3_dma_free_desc;
  		vchan_init(&c->vc, &d->slave);
  	}
  
  	/* Enable clock before accessing registers */
  	ret = clk_prepare_enable(d->clk);
  	if (ret < 0) {
  		dev_err(&op->dev, "clk_prepare_enable failed: %d
  ", ret);
  		return ret;
  	}
  
  	k3_dma_enable_dma(d, true);
  
  	ret = dma_async_device_register(&d->slave);
  	if (ret)
89b90c09b   Wei Yongjun   dmaengine: k3dma:...
930
  		goto dma_async_register_fail;
8e6152bc6   Zhangfei Gao   dmaengine: Add hi...
931
932
933
934
935
936
937
938
  
  	ret = of_dma_controller_register((&op->dev)->of_node,
  					k3_of_dma_simple_xlate, d);
  	if (ret)
  		goto of_dma_register_fail;
  
  	spin_lock_init(&d->lock);
  	INIT_LIST_HEAD(&d->chan_pending);
881bd1428   Allen Pais   dmaengine: k3dma:...
939
  	tasklet_setup(&d->task, k3_dma_tasklet);
8e6152bc6   Zhangfei Gao   dmaengine: Add hi...
940
941
942
943
944
945
946
947
  	platform_set_drvdata(op, d);
  	dev_info(&op->dev, "initialized
  ");
  
  	return 0;
  
  of_dma_register_fail:
  	dma_async_device_unregister(&d->slave);
89b90c09b   Wei Yongjun   dmaengine: k3dma:...
948
949
  dma_async_register_fail:
  	clk_disable_unprepare(d->clk);
8e6152bc6   Zhangfei Gao   dmaengine: Add hi...
950
951
952
953
954
955
956
957
958
959
  	return ret;
  }
  
  static int k3_dma_remove(struct platform_device *op)
  {
  	struct k3_dma_chan *c, *cn;
  	struct k3_dma_dev *d = platform_get_drvdata(op);
  
  	dma_async_device_unregister(&d->slave);
  	of_dma_controller_free((&op->dev)->of_node);
486b10a25   Vinod Koul   dmaengine: k3dma:...
960
  	devm_free_irq(&op->dev, d->irq, d);
8e6152bc6   Zhangfei Gao   dmaengine: Add hi...
961
962
963
964
965
966
967
968
  	list_for_each_entry_safe(c, cn, &d->slave.channels, vc.chan.device_node) {
  		list_del(&c->vc.chan.device_node);
  		tasklet_kill(&c->vc.task);
  	}
  	tasklet_kill(&d->task);
  	clk_disable_unprepare(d->clk);
  	return 0;
  }
af2d3139e   Jingoo Han   dmaengine: k3dma:...
969
  #ifdef CONFIG_PM_SLEEP
10b3e2231   Arnd Bergmann   dmaengine: k3: fi...
970
  static int k3_dma_suspend_dev(struct device *dev)
8e6152bc6   Zhangfei Gao   dmaengine: Add hi...
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
  {
  	struct k3_dma_dev *d = dev_get_drvdata(dev);
  	u32 stat = 0;
  
  	stat = k3_dma_get_chan_stat(d);
  	if (stat) {
  		dev_warn(d->slave.dev,
  			"chan %d is running fail to suspend
  ", stat);
  		return -1;
  	}
  	k3_dma_enable_dma(d, false);
  	clk_disable_unprepare(d->clk);
  	return 0;
  }
10b3e2231   Arnd Bergmann   dmaengine: k3: fi...
986
  static int k3_dma_resume_dev(struct device *dev)
8e6152bc6   Zhangfei Gao   dmaengine: Add hi...
987
988
989
990
991
992
993
994
995
996
997
998
999
  {
  	struct k3_dma_dev *d = dev_get_drvdata(dev);
  	int ret = 0;
  
  	ret = clk_prepare_enable(d->clk);
  	if (ret < 0) {
  		dev_err(d->slave.dev, "clk_prepare_enable failed: %d
  ", ret);
  		return ret;
  	}
  	k3_dma_enable_dma(d, true);
  	return 0;
  }
af2d3139e   Jingoo Han   dmaengine: k3dma:...
1000
  #endif
8e6152bc6   Zhangfei Gao   dmaengine: Add hi...
1001

10b3e2231   Arnd Bergmann   dmaengine: k3: fi...
1002
  static SIMPLE_DEV_PM_OPS(k3_dma_pmops, k3_dma_suspend_dev, k3_dma_resume_dev);
8e6152bc6   Zhangfei Gao   dmaengine: Add hi...
1003
1004
1005
1006
  
  static struct platform_driver k3_pdma_driver = {
  	.driver		= {
  		.name	= DRIVER_NAME,
8e6152bc6   Zhangfei Gao   dmaengine: Add hi...
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
  		.pm	= &k3_dma_pmops,
  		.of_match_table = k3_pdma_dt_ids,
  	},
  	.probe		= k3_dma_probe,
  	.remove		= k3_dma_remove,
  };
  
  module_platform_driver(k3_pdma_driver);
  
  MODULE_DESCRIPTION("Hisilicon k3 DMA Driver");
  MODULE_ALIAS("platform:k3dma");
  MODULE_LICENSE("GPL v2");