Blame view

drivers/dma/tegra20-apb-dma.c 44.8 KB
9952f6918   Thomas Gleixner   treewide: Replace...
1
  // SPDX-License-Identifier: GPL-2.0-only
ec8a15867   Laxman Dewangan   dma: tegra: add d...
2
3
4
  /*
   * DMA driver for Nvidia's Tegra20 APB DMA controller.
   *
996556c92   Stephen Warren   dma: tegra: regis...
5
   * Copyright (c) 2012-2013, NVIDIA CORPORATION.  All rights reserved.
ec8a15867   Laxman Dewangan   dma: tegra: add d...
6
7
8
9
10
11
12
   */
  
  #include <linux/bitops.h>
  #include <linux/clk.h>
  #include <linux/delay.h>
  #include <linux/dmaengine.h>
  #include <linux/dma-mapping.h>
7331205a9   Thierry Reding   dma: Convert to d...
13
  #include <linux/err.h>
ec8a15867   Laxman Dewangan   dma: tegra: add d...
14
15
16
17
18
19
20
  #include <linux/init.h>
  #include <linux/interrupt.h>
  #include <linux/io.h>
  #include <linux/mm.h>
  #include <linux/module.h>
  #include <linux/of.h>
  #include <linux/of_device.h>
996556c92   Stephen Warren   dma: tegra: regis...
21
  #include <linux/of_dma.h>
ec8a15867   Laxman Dewangan   dma: tegra: add d...
22
  #include <linux/platform_device.h>
3065c1946   Laxman Dewangan   dma: tegra: imple...
23
  #include <linux/pm.h>
ec8a15867   Laxman Dewangan   dma: tegra: add d...
24
  #include <linux/pm_runtime.h>
9aa433d2a   Stephen Warren   dma: tegra: use r...
25
  #include <linux/reset.h>
ec8a15867   Laxman Dewangan   dma: tegra: add d...
26
  #include <linux/slab.h>
6697255f2   Dmitry Osipenko   dmaengine: tegra-...
27
  #include <linux/wait.h>
ec8a15867   Laxman Dewangan   dma: tegra: add d...
28

ec8a15867   Laxman Dewangan   dma: tegra: add d...
29
  #include "dmaengine.h"
95f295f9f   Ben Dooks   dmaengine: tegra:...
30
31
  #define CREATE_TRACE_POINTS
  #include <trace/events/tegra_apb_dma.h>
ec8a15867   Laxman Dewangan   dma: tegra: add d...
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
  #define TEGRA_APBDMA_GENERAL			0x0
  #define TEGRA_APBDMA_GENERAL_ENABLE		BIT(31)
  
  #define TEGRA_APBDMA_CONTROL			0x010
  #define TEGRA_APBDMA_IRQ_MASK			0x01c
  #define TEGRA_APBDMA_IRQ_MASK_SET		0x020
  
  /* CSR register */
  #define TEGRA_APBDMA_CHAN_CSR			0x00
  #define TEGRA_APBDMA_CSR_ENB			BIT(31)
  #define TEGRA_APBDMA_CSR_IE_EOC			BIT(30)
  #define TEGRA_APBDMA_CSR_HOLD			BIT(29)
  #define TEGRA_APBDMA_CSR_DIR			BIT(28)
  #define TEGRA_APBDMA_CSR_ONCE			BIT(27)
  #define TEGRA_APBDMA_CSR_FLOW			BIT(21)
  #define TEGRA_APBDMA_CSR_REQ_SEL_SHIFT		16
00ef4490e   Shardar Shariff Md   dmaengine: tegra-...
48
  #define TEGRA_APBDMA_CSR_REQ_SEL_MASK		0x1F
ec8a15867   Laxman Dewangan   dma: tegra: add d...
49
50
51
52
53
54
55
56
57
58
  #define TEGRA_APBDMA_CSR_WCOUNT_MASK		0xFFFC
  
  /* STATUS register */
  #define TEGRA_APBDMA_CHAN_STATUS		0x004
  #define TEGRA_APBDMA_STATUS_BUSY		BIT(31)
  #define TEGRA_APBDMA_STATUS_ISE_EOC		BIT(30)
  #define TEGRA_APBDMA_STATUS_HALT		BIT(29)
  #define TEGRA_APBDMA_STATUS_PING_PONG		BIT(28)
  #define TEGRA_APBDMA_STATUS_COUNT_SHIFT		2
  #define TEGRA_APBDMA_STATUS_COUNT_MASK		0xFFFC
1b140908c   Laxman Dewangan   dma: tegra: add s...
59
  #define TEGRA_APBDMA_CHAN_CSRE			0x00C
3964293ae   Dmitry Osipenko   dmaengine: tegra-...
60
  #define TEGRA_APBDMA_CHAN_CSRE_PAUSE		BIT(31)
1b140908c   Laxman Dewangan   dma: tegra: add s...
61

ec8a15867   Laxman Dewangan   dma: tegra: add d...
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
  /* AHB memory address */
  #define TEGRA_APBDMA_CHAN_AHBPTR		0x010
  
  /* AHB sequence register */
  #define TEGRA_APBDMA_CHAN_AHBSEQ		0x14
  #define TEGRA_APBDMA_AHBSEQ_INTR_ENB		BIT(31)
  #define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_8		(0 << 28)
  #define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_16	(1 << 28)
  #define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_32	(2 << 28)
  #define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_64	(3 << 28)
  #define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_128	(4 << 28)
  #define TEGRA_APBDMA_AHBSEQ_DATA_SWAP		BIT(27)
  #define TEGRA_APBDMA_AHBSEQ_BURST_1		(4 << 24)
  #define TEGRA_APBDMA_AHBSEQ_BURST_4		(5 << 24)
  #define TEGRA_APBDMA_AHBSEQ_BURST_8		(6 << 24)
  #define TEGRA_APBDMA_AHBSEQ_DBL_BUF		BIT(19)
  #define TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT		16
  #define TEGRA_APBDMA_AHBSEQ_WRAP_NONE		0
  
  /* APB address */
  #define TEGRA_APBDMA_CHAN_APBPTR		0x018
  
  /* APB sequence register */
  #define TEGRA_APBDMA_CHAN_APBSEQ		0x01c
  #define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_8		(0 << 28)
  #define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_16	(1 << 28)
  #define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_32	(2 << 28)
  #define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_64	(3 << 28)
  #define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_128	(4 << 28)
  #define TEGRA_APBDMA_APBSEQ_DATA_SWAP		BIT(27)
  #define TEGRA_APBDMA_APBSEQ_WRAP_WORD_1		(1 << 16)
911daccc8   Laxman Dewangan   dma: tegra: add s...
93
94
95
96
  /* Tegra148 specific registers */
  #define TEGRA_APBDMA_CHAN_WCOUNT		0x20
  
  #define TEGRA_APBDMA_CHAN_WORD_TRANSFER		0x24
ec8a15867   Laxman Dewangan   dma: tegra: add d...
97
98
99
100
101
102
103
104
  /*
   * If any burst is in flight and DMA paused then this is the time to complete
   * on-flight burst and update DMA status register.
   */
  #define TEGRA_APBDMA_BURST_COMPLETE_TIME	20
  
  /* Channel base address offset from APBDMA base address */
  #define TEGRA_APBDMA_CHANNEL_BASE_ADD_OFFSET	0x1000
00ef4490e   Shardar Shariff Md   dmaengine: tegra-...
105
  #define TEGRA_APBDMA_SLAVE_ID_INVALID	(TEGRA_APBDMA_CSR_REQ_SEL_MASK + 1)
ec8a15867   Laxman Dewangan   dma: tegra: add d...
106
107
108
109
110
  struct tegra_dma;
  
  /*
   * tegra_dma_chip_data Tegra chip specific DMA data
   * @nr_channels: Number of channels available in the controller.
911daccc8   Laxman Dewangan   dma: tegra: add s...
111
   * @channel_reg_size: Channel register size/stride.
ec8a15867   Laxman Dewangan   dma: tegra: add d...
112
   * @max_dma_count: Maximum DMA transfer count supported by DMA controller.
1b140908c   Laxman Dewangan   dma: tegra: add s...
113
   * @support_channel_pause: Support channel wise pause of dma.
911daccc8   Laxman Dewangan   dma: tegra: add s...
114
   * @support_separate_wcount_reg: Support separate word count register.
ec8a15867   Laxman Dewangan   dma: tegra: add d...
115
116
   */
  struct tegra_dma_chip_data {
3964293ae   Dmitry Osipenko   dmaengine: tegra-...
117
118
119
  	unsigned int nr_channels;
  	unsigned int channel_reg_size;
  	unsigned int max_dma_count;
1b140908c   Laxman Dewangan   dma: tegra: add s...
120
  	bool support_channel_pause;
911daccc8   Laxman Dewangan   dma: tegra: add s...
121
  	bool support_separate_wcount_reg;
ec8a15867   Laxman Dewangan   dma: tegra: add d...
122
123
124
125
  };
  
  /* DMA channel registers */
  struct tegra_dma_channel_regs {
3964293ae   Dmitry Osipenko   dmaengine: tegra-...
126
127
128
129
130
131
  	u32 csr;
  	u32 ahb_ptr;
  	u32 apb_ptr;
  	u32 ahb_seq;
  	u32 apb_seq;
  	u32 wcount;
ec8a15867   Laxman Dewangan   dma: tegra: add d...
132
133
134
  };
  
  /*
547b311cf   Ben Dooks   dmaengine: tegra:...
135
   * tegra_dma_sg_req: DMA request details to configure hardware. This
ec8a15867   Laxman Dewangan   dma: tegra: add d...
136
137
138
139
140
141
142
143
   * contains the details for one transfer to configure DMA hw.
   * The client's request for data transfer can be broken into multiple
   * sub-transfer as per requester details and hw support.
   * This sub transfer get added in the list of transfer and point to Tegra
   * DMA descriptor which manages the transfer details.
   */
  struct tegra_dma_sg_req {
  	struct tegra_dma_channel_regs	ch_regs;
216a1d7da   Ben Dooks   dmaengine: tegra:...
144
  	unsigned int			req_len;
ec8a15867   Laxman Dewangan   dma: tegra: add d...
145
146
  	bool				configured;
  	bool				last_sg;
ec8a15867   Laxman Dewangan   dma: tegra: add d...
147
148
  	struct list_head		node;
  	struct tegra_dma_desc		*dma_desc;
156a599b0   Dmitry Osipenko   dmaengine: tegra-...
149
  	unsigned int			words_xferred;
ec8a15867   Laxman Dewangan   dma: tegra: add d...
150
151
152
153
154
155
156
157
158
  };
  
  /*
   * tegra_dma_desc: Tegra DMA descriptors which manages the client requests.
   * This descriptor keep track of transfer status, callbacks and request
   * counts etc.
   */
  struct tegra_dma_desc {
  	struct dma_async_tx_descriptor	txd;
216a1d7da   Ben Dooks   dmaengine: tegra:...
159
160
  	unsigned int			bytes_requested;
  	unsigned int			bytes_transferred;
ec8a15867   Laxman Dewangan   dma: tegra: add d...
161
162
163
164
  	enum dma_status			dma_status;
  	struct list_head		node;
  	struct list_head		tx_list;
  	struct list_head		cb_node;
3964293ae   Dmitry Osipenko   dmaengine: tegra-...
165
  	unsigned int			cb_count;
ec8a15867   Laxman Dewangan   dma: tegra: add d...
166
167
168
169
170
171
172
173
174
175
  };
  
  struct tegra_dma_channel;
  
  typedef void (*dma_isr_handler)(struct tegra_dma_channel *tdc,
  				bool to_terminate);
  
  /* tegra_dma_channel: Channel specific information */
  struct tegra_dma_channel {
  	struct dma_chan		dma_chan;
65c383c78   Ben Dooks   dmaengine: tegra:...
176
  	char			name[12];
ec8a15867   Laxman Dewangan   dma: tegra: add d...
177
  	bool			config_init;
3964293ae   Dmitry Osipenko   dmaengine: tegra-...
178
  	unsigned int		id;
13a332863   Jon Hunter   dmaengine: tegra-...
179
  	void __iomem		*chan_addr;
ec8a15867   Laxman Dewangan   dma: tegra: add d...
180
181
182
183
184
185
186
187
188
189
190
191
192
193
  	spinlock_t		lock;
  	bool			busy;
  	struct tegra_dma	*tdma;
  	bool			cyclic;
  
  	/* Different lists for managing the requests */
  	struct list_head	free_sg_req;
  	struct list_head	pending_sg_req;
  	struct list_head	free_dma_desc;
  	struct list_head	cb_desc;
  
  	/* ISR handler and tasklet for bottom half of isr handling */
  	dma_isr_handler		isr_handler;
  	struct tasklet_struct	tasklet;
ec8a15867   Laxman Dewangan   dma: tegra: add d...
194
195
  
  	/* Channel-slave specific configuration */
996556c92   Stephen Warren   dma: tegra: regis...
196
  	unsigned int slave_id;
ec8a15867   Laxman Dewangan   dma: tegra: add d...
197
  	struct dma_slave_config dma_sconfig;
3964293ae   Dmitry Osipenko   dmaengine: tegra-...
198
  	struct tegra_dma_channel_regs channel_reg;
6697255f2   Dmitry Osipenko   dmaengine: tegra-...
199
200
  
  	struct wait_queue_head wq;
ec8a15867   Laxman Dewangan   dma: tegra: add d...
201
202
203
204
205
206
207
  };
  
  /* tegra_dma: Tegra DMA specific information */
  struct tegra_dma {
  	struct dma_device		dma_dev;
  	struct device			*dev;
  	struct clk			*dma_clk;
9aa433d2a   Stephen Warren   dma: tegra: use r...
208
  	struct reset_control		*rst;
ec8a15867   Laxman Dewangan   dma: tegra: add d...
209
210
  	spinlock_t			global_lock;
  	void __iomem			*base_addr;
83a1ef2eb   Laxman Dewangan   dma: tegra: make ...
211
  	const struct tegra_dma_chip_data *chip_data;
ec8a15867   Laxman Dewangan   dma: tegra: add d...
212

23a1ec304   Jon Hunter   dmaengine: tegra-...
213
214
215
216
217
218
  	/*
  	 * Counter for managing global pausing of the DMA controller.
  	 * Only applicable for devices that don't support individual
  	 * channel pausing.
  	 */
  	u32				global_pause_count;
ec8a15867   Laxman Dewangan   dma: tegra: add d...
219
  	/* Last member of the structure */
0084b225e   Gustavo A. R. Silva   dmaengine: tegra-...
220
  	struct tegra_dma_channel channels[];
ec8a15867   Laxman Dewangan   dma: tegra: add d...
221
222
223
224
225
226
227
228
229
230
231
232
233
  };
  
  static inline void tdma_write(struct tegra_dma *tdma, u32 reg, u32 val)
  {
  	writel(val, tdma->base_addr + reg);
  }
  
  static inline u32 tdma_read(struct tegra_dma *tdma, u32 reg)
  {
  	return readl(tdma->base_addr + reg);
  }
  
  static inline void tdc_write(struct tegra_dma_channel *tdc,
3964293ae   Dmitry Osipenko   dmaengine: tegra-...
234
  			     u32 reg, u32 val)
ec8a15867   Laxman Dewangan   dma: tegra: add d...
235
  {
13a332863   Jon Hunter   dmaengine: tegra-...
236
  	writel(val, tdc->chan_addr + reg);
ec8a15867   Laxman Dewangan   dma: tegra: add d...
237
238
239
240
  }
  
  static inline u32 tdc_read(struct tegra_dma_channel *tdc, u32 reg)
  {
13a332863   Jon Hunter   dmaengine: tegra-...
241
  	return readl(tdc->chan_addr + reg);
ec8a15867   Laxman Dewangan   dma: tegra: add d...
242
243
244
245
246
247
  }
  
  static inline struct tegra_dma_channel *to_tegra_dma_chan(struct dma_chan *dc)
  {
  	return container_of(dc, struct tegra_dma_channel, dma_chan);
  }
3964293ae   Dmitry Osipenko   dmaengine: tegra-...
248
249
  static inline struct tegra_dma_desc *
  txd_to_tegra_dma_desc(struct dma_async_tx_descriptor *td)
ec8a15867   Laxman Dewangan   dma: tegra: add d...
250
251
252
253
254
255
256
257
258
259
  {
  	return container_of(td, struct tegra_dma_desc, txd);
  }
  
  static inline struct device *tdc2dev(struct tegra_dma_channel *tdc)
  {
  	return &tdc->dma_chan.dev->device;
  }
  
  static dma_cookie_t tegra_dma_tx_submit(struct dma_async_tx_descriptor *tx);
ec8a15867   Laxman Dewangan   dma: tegra: add d...
260
261
  
  /* Get DMA desc from free list, if not there then allocate it.  */
3964293ae   Dmitry Osipenko   dmaengine: tegra-...
262
  static struct tegra_dma_desc *tegra_dma_desc_get(struct tegra_dma_channel *tdc)
ec8a15867   Laxman Dewangan   dma: tegra: add d...
263
264
265
266
267
268
269
270
  {
  	struct tegra_dma_desc *dma_desc;
  	unsigned long flags;
  
  	spin_lock_irqsave(&tdc->lock, flags);
  
  	/* Do not allocate if desc are waiting for ack */
  	list_for_each_entry(dma_desc, &tdc->free_dma_desc, node) {
c33ee1301   Dmitry Osipenko   dmaengine: tegra-...
271
  		if (async_tx_test_ack(&dma_desc->txd) && !dma_desc->cb_count) {
ec8a15867   Laxman Dewangan   dma: tegra: add d...
272
273
  			list_del(&dma_desc->node);
  			spin_unlock_irqrestore(&tdc->lock, flags);
b9bb37f54   Laxman Dewangan   dma: tegra: imple...
274
  			dma_desc->txd.flags = 0;
ec8a15867   Laxman Dewangan   dma: tegra: add d...
275
276
277
278
279
280
281
  			return dma_desc;
  		}
  	}
  
  	spin_unlock_irqrestore(&tdc->lock, flags);
  
  	/* Allocate DMA desc */
8fe9739bc   Jon Hunter   dmaengine: tegra-...
282
  	dma_desc = kzalloc(sizeof(*dma_desc), GFP_NOWAIT);
aef94fea9   Peter Griffin   dmaengine: Remove...
283
  	if (!dma_desc)
ec8a15867   Laxman Dewangan   dma: tegra: add d...
284
  		return NULL;
ec8a15867   Laxman Dewangan   dma: tegra: add d...
285
286
287
288
  
  	dma_async_tx_descriptor_init(&dma_desc->txd, &tdc->dma_chan);
  	dma_desc->txd.tx_submit = tegra_dma_tx_submit;
  	dma_desc->txd.flags = 0;
3964293ae   Dmitry Osipenko   dmaengine: tegra-...
289

ec8a15867   Laxman Dewangan   dma: tegra: add d...
290
291
292
293
  	return dma_desc;
  }
  
  static void tegra_dma_desc_put(struct tegra_dma_channel *tdc,
3964293ae   Dmitry Osipenko   dmaengine: tegra-...
294
  			       struct tegra_dma_desc *dma_desc)
ec8a15867   Laxman Dewangan   dma: tegra: add d...
295
296
297
298
299
300
301
302
303
  {
  	unsigned long flags;
  
  	spin_lock_irqsave(&tdc->lock, flags);
  	if (!list_empty(&dma_desc->tx_list))
  		list_splice_init(&dma_desc->tx_list, &tdc->free_sg_req);
  	list_add_tail(&dma_desc->node, &tdc->free_dma_desc);
  	spin_unlock_irqrestore(&tdc->lock, flags);
  }
3964293ae   Dmitry Osipenko   dmaengine: tegra-...
304
305
  static struct tegra_dma_sg_req *
  tegra_dma_sg_req_get(struct tegra_dma_channel *tdc)
ec8a15867   Laxman Dewangan   dma: tegra: add d...
306
  {
3964293ae   Dmitry Osipenko   dmaengine: tegra-...
307
  	struct tegra_dma_sg_req *sg_req;
ec8a15867   Laxman Dewangan   dma: tegra: add d...
308
309
310
311
  	unsigned long flags;
  
  	spin_lock_irqsave(&tdc->lock, flags);
  	if (!list_empty(&tdc->free_sg_req)) {
3964293ae   Dmitry Osipenko   dmaengine: tegra-...
312
313
  		sg_req = list_first_entry(&tdc->free_sg_req, typeof(*sg_req),
  					  node);
ec8a15867   Laxman Dewangan   dma: tegra: add d...
314
315
316
317
318
  		list_del(&sg_req->node);
  		spin_unlock_irqrestore(&tdc->lock, flags);
  		return sg_req;
  	}
  	spin_unlock_irqrestore(&tdc->lock, flags);
3964293ae   Dmitry Osipenko   dmaengine: tegra-...
319
  	sg_req = kzalloc(sizeof(*sg_req), GFP_NOWAIT);
aef94fea9   Peter Griffin   dmaengine: Remove...
320

ec8a15867   Laxman Dewangan   dma: tegra: add d...
321
322
323
324
  	return sg_req;
  }
  
  static int tegra_dma_slave_config(struct dma_chan *dc,
3964293ae   Dmitry Osipenko   dmaengine: tegra-...
325
  				  struct dma_slave_config *sconfig)
ec8a15867   Laxman Dewangan   dma: tegra: add d...
326
327
328
329
330
331
332
333
334
335
  {
  	struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
  
  	if (!list_empty(&tdc->pending_sg_req)) {
  		dev_err(tdc2dev(tdc), "Configuration not allowed
  ");
  		return -EBUSY;
  	}
  
  	memcpy(&tdc->dma_sconfig, sconfig, sizeof(*sconfig));
f6160f359   Dmitry Osipenko   dmaengine: tegra-...
336
337
  	if (tdc->slave_id == TEGRA_APBDMA_SLAVE_ID_INVALID &&
  	    sconfig->device_fc) {
00ef4490e   Shardar Shariff Md   dmaengine: tegra-...
338
339
  		if (sconfig->slave_id > TEGRA_APBDMA_CSR_REQ_SEL_MASK)
  			return -EINVAL;
996556c92   Stephen Warren   dma: tegra: regis...
340
  		tdc->slave_id = sconfig->slave_id;
00ef4490e   Shardar Shariff Md   dmaengine: tegra-...
341
  	}
ec8a15867   Laxman Dewangan   dma: tegra: add d...
342
  	tdc->config_init = true;
3964293ae   Dmitry Osipenko   dmaengine: tegra-...
343

ec8a15867   Laxman Dewangan   dma: tegra: add d...
344
345
346
347
  	return 0;
  }
  
  static void tegra_dma_global_pause(struct tegra_dma_channel *tdc,
3964293ae   Dmitry Osipenko   dmaengine: tegra-...
348
  				   bool wait_for_burst_complete)
ec8a15867   Laxman Dewangan   dma: tegra: add d...
349
350
351
352
  {
  	struct tegra_dma *tdma = tdc->tdma;
  
  	spin_lock(&tdma->global_lock);
23a1ec304   Jon Hunter   dmaengine: tegra-...
353
354
355
356
357
358
359
360
361
362
  
  	if (tdc->tdma->global_pause_count == 0) {
  		tdma_write(tdma, TEGRA_APBDMA_GENERAL, 0);
  		if (wait_for_burst_complete)
  			udelay(TEGRA_APBDMA_BURST_COMPLETE_TIME);
  	}
  
  	tdc->tdma->global_pause_count++;
  
  	spin_unlock(&tdma->global_lock);
ec8a15867   Laxman Dewangan   dma: tegra: add d...
363
364
365
366
367
  }
  
  static void tegra_dma_global_resume(struct tegra_dma_channel *tdc)
  {
  	struct tegra_dma *tdma = tdc->tdma;
23a1ec304   Jon Hunter   dmaengine: tegra-...
368
369
370
371
372
373
374
375
376
377
  	spin_lock(&tdma->global_lock);
  
  	if (WARN_ON(tdc->tdma->global_pause_count == 0))
  		goto out;
  
  	if (--tdc->tdma->global_pause_count == 0)
  		tdma_write(tdma, TEGRA_APBDMA_GENERAL,
  			   TEGRA_APBDMA_GENERAL_ENABLE);
  
  out:
ec8a15867   Laxman Dewangan   dma: tegra: add d...
378
379
  	spin_unlock(&tdma->global_lock);
  }
1b140908c   Laxman Dewangan   dma: tegra: add s...
380
  static void tegra_dma_pause(struct tegra_dma_channel *tdc,
3964293ae   Dmitry Osipenko   dmaengine: tegra-...
381
  			    bool wait_for_burst_complete)
1b140908c   Laxman Dewangan   dma: tegra: add s...
382
383
384
385
386
  {
  	struct tegra_dma *tdma = tdc->tdma;
  
  	if (tdma->chip_data->support_channel_pause) {
  		tdc_write(tdc, TEGRA_APBDMA_CHAN_CSRE,
3964293ae   Dmitry Osipenko   dmaengine: tegra-...
387
  			  TEGRA_APBDMA_CHAN_CSRE_PAUSE);
1b140908c   Laxman Dewangan   dma: tegra: add s...
388
389
390
391
392
393
394
395
396
397
  		if (wait_for_burst_complete)
  			udelay(TEGRA_APBDMA_BURST_COMPLETE_TIME);
  	} else {
  		tegra_dma_global_pause(tdc, wait_for_burst_complete);
  	}
  }
  
  static void tegra_dma_resume(struct tegra_dma_channel *tdc)
  {
  	struct tegra_dma *tdma = tdc->tdma;
3964293ae   Dmitry Osipenko   dmaengine: tegra-...
398
  	if (tdma->chip_data->support_channel_pause)
1b140908c   Laxman Dewangan   dma: tegra: add s...
399
  		tdc_write(tdc, TEGRA_APBDMA_CHAN_CSRE, 0);
3964293ae   Dmitry Osipenko   dmaengine: tegra-...
400
  	else
1b140908c   Laxman Dewangan   dma: tegra: add s...
401
  		tegra_dma_global_resume(tdc);
1b140908c   Laxman Dewangan   dma: tegra: add s...
402
  }
ec8a15867   Laxman Dewangan   dma: tegra: add d...
403
404
  static void tegra_dma_stop(struct tegra_dma_channel *tdc)
  {
3964293ae   Dmitry Osipenko   dmaengine: tegra-...
405
  	u32 csr, status;
ec8a15867   Laxman Dewangan   dma: tegra: add d...
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
  
  	/* Disable interrupts */
  	csr = tdc_read(tdc, TEGRA_APBDMA_CHAN_CSR);
  	csr &= ~TEGRA_APBDMA_CSR_IE_EOC;
  	tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, csr);
  
  	/* Disable DMA */
  	csr &= ~TEGRA_APBDMA_CSR_ENB;
  	tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, csr);
  
  	/* Clear interrupt status if it is there */
  	status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
  	if (status & TEGRA_APBDMA_STATUS_ISE_EOC) {
  		dev_dbg(tdc2dev(tdc), "%s():clearing interrupt
  ", __func__);
  		tdc_write(tdc, TEGRA_APBDMA_CHAN_STATUS, status);
  	}
  	tdc->busy = false;
  }
  
  static void tegra_dma_start(struct tegra_dma_channel *tdc,
3964293ae   Dmitry Osipenko   dmaengine: tegra-...
427
  			    struct tegra_dma_sg_req *sg_req)
ec8a15867   Laxman Dewangan   dma: tegra: add d...
428
429
430
431
432
433
434
435
  {
  	struct tegra_dma_channel_regs *ch_regs = &sg_req->ch_regs;
  
  	tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, ch_regs->csr);
  	tdc_write(tdc, TEGRA_APBDMA_CHAN_APBSEQ, ch_regs->apb_seq);
  	tdc_write(tdc, TEGRA_APBDMA_CHAN_APBPTR, ch_regs->apb_ptr);
  	tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBSEQ, ch_regs->ahb_seq);
  	tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBPTR, ch_regs->ahb_ptr);
911daccc8   Laxman Dewangan   dma: tegra: add s...
436
437
  	if (tdc->tdma->chip_data->support_separate_wcount_reg)
  		tdc_write(tdc, TEGRA_APBDMA_CHAN_WCOUNT, ch_regs->wcount);
ec8a15867   Laxman Dewangan   dma: tegra: add d...
438
439
440
  
  	/* Start DMA */
  	tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR,
3964293ae   Dmitry Osipenko   dmaengine: tegra-...
441
  		  ch_regs->csr | TEGRA_APBDMA_CSR_ENB);
ec8a15867   Laxman Dewangan   dma: tegra: add d...
442
443
444
  }
  
  static void tegra_dma_configure_for_next(struct tegra_dma_channel *tdc,
3964293ae   Dmitry Osipenko   dmaengine: tegra-...
445
  					 struct tegra_dma_sg_req *nsg_req)
ec8a15867   Laxman Dewangan   dma: tegra: add d...
446
447
448
449
450
451
452
453
454
455
456
457
458
459
  {
  	unsigned long status;
  
  	/*
  	 * The DMA controller reloads the new configuration for next transfer
  	 * after last burst of current transfer completes.
  	 * If there is no IEC status then this makes sure that last burst
  	 * has not be completed. There may be case that last burst is on
  	 * flight and so it can complete but because DMA is paused, it
  	 * will not generates interrupt as well as not reload the new
  	 * configuration.
  	 * If there is already IEC status then interrupt handler need to
  	 * load new configuration.
  	 */
1b140908c   Laxman Dewangan   dma: tegra: add s...
460
  	tegra_dma_pause(tdc, false);
7b0e00d91   Thierry Reding   dmaengine: tegra:...
461
  	status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
ec8a15867   Laxman Dewangan   dma: tegra: add d...
462
463
464
465
466
467
468
469
470
  
  	/*
  	 * If interrupt is pending then do nothing as the ISR will handle
  	 * the programing for new request.
  	 */
  	if (status & TEGRA_APBDMA_STATUS_ISE_EOC) {
  		dev_err(tdc2dev(tdc),
  			"Skipping new configuration as interrupt is pending
  ");
1b140908c   Laxman Dewangan   dma: tegra: add s...
471
  		tegra_dma_resume(tdc);
ec8a15867   Laxman Dewangan   dma: tegra: add d...
472
473
474
475
476
477
  		return;
  	}
  
  	/* Safe to program new configuration */
  	tdc_write(tdc, TEGRA_APBDMA_CHAN_APBPTR, nsg_req->ch_regs.apb_ptr);
  	tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBPTR, nsg_req->ch_regs.ahb_ptr);
911daccc8   Laxman Dewangan   dma: tegra: add s...
478
479
  	if (tdc->tdma->chip_data->support_separate_wcount_reg)
  		tdc_write(tdc, TEGRA_APBDMA_CHAN_WCOUNT,
3964293ae   Dmitry Osipenko   dmaengine: tegra-...
480
  			  nsg_req->ch_regs.wcount);
ec8a15867   Laxman Dewangan   dma: tegra: add d...
481
  	tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR,
3964293ae   Dmitry Osipenko   dmaengine: tegra-...
482
  		  nsg_req->ch_regs.csr | TEGRA_APBDMA_CSR_ENB);
ec8a15867   Laxman Dewangan   dma: tegra: add d...
483
  	nsg_req->configured = true;
156a599b0   Dmitry Osipenko   dmaengine: tegra-...
484
  	nsg_req->words_xferred = 0;
ec8a15867   Laxman Dewangan   dma: tegra: add d...
485

1b140908c   Laxman Dewangan   dma: tegra: add s...
486
  	tegra_dma_resume(tdc);
ec8a15867   Laxman Dewangan   dma: tegra: add d...
487
488
489
490
491
  }
  
  static void tdc_start_head_req(struct tegra_dma_channel *tdc)
  {
  	struct tegra_dma_sg_req *sg_req;
3964293ae   Dmitry Osipenko   dmaengine: tegra-...
492
  	sg_req = list_first_entry(&tdc->pending_sg_req, typeof(*sg_req), node);
ec8a15867   Laxman Dewangan   dma: tegra: add d...
493
494
  	tegra_dma_start(tdc, sg_req);
  	sg_req->configured = true;
156a599b0   Dmitry Osipenko   dmaengine: tegra-...
495
  	sg_req->words_xferred = 0;
ec8a15867   Laxman Dewangan   dma: tegra: add d...
496
497
498
499
500
  	tdc->busy = true;
  }
  
  static void tdc_configure_next_head_desc(struct tegra_dma_channel *tdc)
  {
3964293ae   Dmitry Osipenko   dmaengine: tegra-...
501
  	struct tegra_dma_sg_req *hsgreq, *hnsgreq;
ec8a15867   Laxman Dewangan   dma: tegra: add d...
502
503
504
  
  	hsgreq = list_first_entry(&tdc->pending_sg_req, typeof(*hsgreq), node);
  	if (!list_is_last(&hsgreq->node, &tdc->pending_sg_req)) {
3964293ae   Dmitry Osipenko   dmaengine: tegra-...
505
506
  		hnsgreq = list_first_entry(&hsgreq->node, typeof(*hnsgreq),
  					   node);
ec8a15867   Laxman Dewangan   dma: tegra: add d...
507
508
509
  		tegra_dma_configure_for_next(tdc, hnsgreq);
  	}
  }
3964293ae   Dmitry Osipenko   dmaengine: tegra-...
510
511
512
513
  static inline unsigned int
  get_current_xferred_count(struct tegra_dma_channel *tdc,
  			  struct tegra_dma_sg_req *sg_req,
  			  unsigned long status)
ec8a15867   Laxman Dewangan   dma: tegra: add d...
514
515
516
517
518
519
  {
  	return sg_req->req_len - (status & TEGRA_APBDMA_STATUS_COUNT_MASK) - 4;
  }
  
  static void tegra_dma_abort_all(struct tegra_dma_channel *tdc)
  {
ec8a15867   Laxman Dewangan   dma: tegra: add d...
520
  	struct tegra_dma_desc *dma_desc;
3964293ae   Dmitry Osipenko   dmaengine: tegra-...
521
  	struct tegra_dma_sg_req *sgreq;
ec8a15867   Laxman Dewangan   dma: tegra: add d...
522
523
  
  	while (!list_empty(&tdc->pending_sg_req)) {
3964293ae   Dmitry Osipenko   dmaengine: tegra-...
524
525
  		sgreq = list_first_entry(&tdc->pending_sg_req, typeof(*sgreq),
  					 node);
2cc44e631   Wei Yongjun   dma: tegra: use l...
526
  		list_move_tail(&sgreq->node, &tdc->free_sg_req);
ec8a15867   Laxman Dewangan   dma: tegra: add d...
527
528
529
530
531
532
533
534
  		if (sgreq->last_sg) {
  			dma_desc = sgreq->dma_desc;
  			dma_desc->dma_status = DMA_ERROR;
  			list_add_tail(&dma_desc->node, &tdc->free_dma_desc);
  
  			/* Add in cb list if it is not there. */
  			if (!dma_desc->cb_count)
  				list_add_tail(&dma_desc->cb_node,
3964293ae   Dmitry Osipenko   dmaengine: tegra-...
535
  					      &tdc->cb_desc);
ec8a15867   Laxman Dewangan   dma: tegra: add d...
536
537
538
539
540
541
542
  			dma_desc->cb_count++;
  		}
  	}
  	tdc->isr_handler = NULL;
  }
  
  static bool handle_continuous_head_request(struct tegra_dma_channel *tdc,
3964293ae   Dmitry Osipenko   dmaengine: tegra-...
543
  					   bool to_terminate)
ec8a15867   Laxman Dewangan   dma: tegra: add d...
544
  {
3964293ae   Dmitry Osipenko   dmaengine: tegra-...
545
  	struct tegra_dma_sg_req *hsgreq;
ec8a15867   Laxman Dewangan   dma: tegra: add d...
546
547
548
549
550
551
552
553
554
  
  	/*
  	 * Check that head req on list should be in flight.
  	 * If it is not in flight then abort transfer as
  	 * looping of transfer can not continue.
  	 */
  	hsgreq = list_first_entry(&tdc->pending_sg_req, typeof(*hsgreq), node);
  	if (!hsgreq->configured) {
  		tegra_dma_stop(tdc);
84a3f375e   Dmitry Osipenko   dmaengine: tegra-...
555
  		pm_runtime_put(tdc->tdma->dev);
01b66a752   Dmitry Osipenko   dmaengine: tegra-...
556
557
  		dev_err(tdc2dev(tdc), "DMA transfer underflow, aborting DMA
  ");
ec8a15867   Laxman Dewangan   dma: tegra: add d...
558
559
560
561
562
563
564
  		tegra_dma_abort_all(tdc);
  		return false;
  	}
  
  	/* Configure next request */
  	if (!to_terminate)
  		tdc_configure_next_head_desc(tdc);
3964293ae   Dmitry Osipenko   dmaengine: tegra-...
565

ec8a15867   Laxman Dewangan   dma: tegra: add d...
566
567
568
569
  	return true;
  }
  
  static void handle_once_dma_done(struct tegra_dma_channel *tdc,
3964293ae   Dmitry Osipenko   dmaengine: tegra-...
570
  				 bool to_terminate)
ec8a15867   Laxman Dewangan   dma: tegra: add d...
571
  {
ec8a15867   Laxman Dewangan   dma: tegra: add d...
572
  	struct tegra_dma_desc *dma_desc;
3964293ae   Dmitry Osipenko   dmaengine: tegra-...
573
  	struct tegra_dma_sg_req *sgreq;
ec8a15867   Laxman Dewangan   dma: tegra: add d...
574
575
576
577
578
579
580
581
  
  	tdc->busy = false;
  	sgreq = list_first_entry(&tdc->pending_sg_req, typeof(*sgreq), node);
  	dma_desc = sgreq->dma_desc;
  	dma_desc->bytes_transferred += sgreq->req_len;
  
  	list_del(&sgreq->node);
  	if (sgreq->last_sg) {
00d696f52   Vinod Koul   dmaengine: tegra:...
582
  		dma_desc->dma_status = DMA_COMPLETE;
ec8a15867   Laxman Dewangan   dma: tegra: add d...
583
584
585
586
587
588
589
590
591
  		dma_cookie_complete(&dma_desc->txd);
  		if (!dma_desc->cb_count)
  			list_add_tail(&dma_desc->cb_node, &tdc->cb_desc);
  		dma_desc->cb_count++;
  		list_add_tail(&dma_desc->node, &tdc->free_dma_desc);
  	}
  	list_add_tail(&sgreq->node, &tdc->free_sg_req);
  
  	/* Do not start DMA if it is going to be terminate */
84a3f375e   Dmitry Osipenko   dmaengine: tegra-...
592
  	if (to_terminate)
ec8a15867   Laxman Dewangan   dma: tegra: add d...
593
  		return;
84a3f375e   Dmitry Osipenko   dmaengine: tegra-...
594
595
  	if (list_empty(&tdc->pending_sg_req)) {
  		pm_runtime_put(tdc->tdma->dev);
ec8a15867   Laxman Dewangan   dma: tegra: add d...
596
  		return;
84a3f375e   Dmitry Osipenko   dmaengine: tegra-...
597
  	}
ec8a15867   Laxman Dewangan   dma: tegra: add d...
598
599
  
  	tdc_start_head_req(tdc);
ec8a15867   Laxman Dewangan   dma: tegra: add d...
600
601
602
  }
  
  static void handle_cont_sngl_cycle_dma_done(struct tegra_dma_channel *tdc,
3964293ae   Dmitry Osipenko   dmaengine: tegra-...
603
  					    bool to_terminate)
ec8a15867   Laxman Dewangan   dma: tegra: add d...
604
  {
ec8a15867   Laxman Dewangan   dma: tegra: add d...
605
  	struct tegra_dma_desc *dma_desc;
3964293ae   Dmitry Osipenko   dmaengine: tegra-...
606
  	struct tegra_dma_sg_req *sgreq;
ec8a15867   Laxman Dewangan   dma: tegra: add d...
607
608
609
610
  	bool st;
  
  	sgreq = list_first_entry(&tdc->pending_sg_req, typeof(*sgreq), node);
  	dma_desc = sgreq->dma_desc;
e486df393   Ben Dooks   dmaengine: tegra:...
611
612
613
614
  	/* if we dma for long enough the transfer count will wrap */
  	dma_desc->bytes_transferred =
  		(dma_desc->bytes_transferred + sgreq->req_len) %
  		dma_desc->bytes_requested;
ec8a15867   Laxman Dewangan   dma: tegra: add d...
615
616
617
618
619
  
  	/* Callback need to be call */
  	if (!dma_desc->cb_count)
  		list_add_tail(&dma_desc->cb_node, &tdc->cb_desc);
  	dma_desc->cb_count++;
156a599b0   Dmitry Osipenko   dmaengine: tegra-...
620
  	sgreq->words_xferred = 0;
ec8a15867   Laxman Dewangan   dma: tegra: add d...
621
622
  	/* If not last req then put at end of pending list */
  	if (!list_is_last(&sgreq->node, &tdc->pending_sg_req)) {
2cc44e631   Wei Yongjun   dma: tegra: use l...
623
  		list_move_tail(&sgreq->node, &tdc->pending_sg_req);
ec8a15867   Laxman Dewangan   dma: tegra: add d...
624
  		sgreq->configured = false;
f261f1cd9   Dmitry Osipenko   dmaengine: tegra-...
625
  		st = handle_continuous_head_request(tdc, to_terminate);
ec8a15867   Laxman Dewangan   dma: tegra: add d...
626
627
628
  		if (!st)
  			dma_desc->dma_status = DMA_ERROR;
  	}
ec8a15867   Laxman Dewangan   dma: tegra: add d...
629
  }
86fc54fa2   Allen Pais   dmaengine: tegra2...
630
  static void tegra_dma_tasklet(struct tasklet_struct *t)
ec8a15867   Laxman Dewangan   dma: tegra: add d...
631
  {
86fc54fa2   Allen Pais   dmaengine: tegra2...
632
  	struct tegra_dma_channel *tdc = from_tasklet(tdc, t, tasklet);
370c0446a   Dave Jiang   dmaengine: tegra2...
633
  	struct dmaengine_desc_callback cb;
ec8a15867   Laxman Dewangan   dma: tegra: add d...
634
  	struct tegra_dma_desc *dma_desc;
3964293ae   Dmitry Osipenko   dmaengine: tegra-...
635
  	unsigned int cb_count;
ec8a15867   Laxman Dewangan   dma: tegra: add d...
636
  	unsigned long flags;
ec8a15867   Laxman Dewangan   dma: tegra: add d...
637
638
639
  
  	spin_lock_irqsave(&tdc->lock, flags);
  	while (!list_empty(&tdc->cb_desc)) {
3964293ae   Dmitry Osipenko   dmaengine: tegra-...
640
641
  		dma_desc = list_first_entry(&tdc->cb_desc, typeof(*dma_desc),
  					    cb_node);
ec8a15867   Laxman Dewangan   dma: tegra: add d...
642
  		list_del(&dma_desc->cb_node);
370c0446a   Dave Jiang   dmaengine: tegra2...
643
  		dmaengine_desc_get_callback(&dma_desc->txd, &cb);
ec8a15867   Laxman Dewangan   dma: tegra: add d...
644
645
  		cb_count = dma_desc->cb_count;
  		dma_desc->cb_count = 0;
95f295f9f   Ben Dooks   dmaengine: tegra:...
646
647
  		trace_tegra_dma_complete_cb(&tdc->dma_chan, cb_count,
  					    cb.callback);
ec8a15867   Laxman Dewangan   dma: tegra: add d...
648
  		spin_unlock_irqrestore(&tdc->lock, flags);
370c0446a   Dave Jiang   dmaengine: tegra2...
649
650
  		while (cb_count--)
  			dmaengine_desc_callback_invoke(&cb, NULL);
ec8a15867   Laxman Dewangan   dma: tegra: add d...
651
652
653
654
655
656
657
658
  		spin_lock_irqsave(&tdc->lock, flags);
  	}
  	spin_unlock_irqrestore(&tdc->lock, flags);
  }
  
  static irqreturn_t tegra_dma_isr(int irq, void *dev_id)
  {
  	struct tegra_dma_channel *tdc = dev_id;
3964293ae   Dmitry Osipenko   dmaengine: tegra-...
659
  	u32 status;
ec8a15867   Laxman Dewangan   dma: tegra: add d...
660

6de88ea4f   Dmitry Osipenko   dmaengine: tegra-...
661
  	spin_lock(&tdc->lock);
ec8a15867   Laxman Dewangan   dma: tegra: add d...
662

95f295f9f   Ben Dooks   dmaengine: tegra:...
663
  	trace_tegra_dma_isr(&tdc->dma_chan, irq);
ec8a15867   Laxman Dewangan   dma: tegra: add d...
664
665
666
667
668
  	status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
  	if (status & TEGRA_APBDMA_STATUS_ISE_EOC) {
  		tdc_write(tdc, TEGRA_APBDMA_CHAN_STATUS, status);
  		tdc->isr_handler(tdc, false);
  		tasklet_schedule(&tdc->tasklet);
6697255f2   Dmitry Osipenko   dmaengine: tegra-...
669
  		wake_up_all(&tdc->wq);
6de88ea4f   Dmitry Osipenko   dmaengine: tegra-...
670
  		spin_unlock(&tdc->lock);
ec8a15867   Laxman Dewangan   dma: tegra: add d...
671
672
  		return IRQ_HANDLED;
  	}
6de88ea4f   Dmitry Osipenko   dmaengine: tegra-...
673
  	spin_unlock(&tdc->lock);
3964293ae   Dmitry Osipenko   dmaengine: tegra-...
674
675
676
  	dev_info(tdc2dev(tdc), "Interrupt already served status 0x%08x
  ",
  		 status);
ec8a15867   Laxman Dewangan   dma: tegra: add d...
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
  	return IRQ_NONE;
  }
  
  static dma_cookie_t tegra_dma_tx_submit(struct dma_async_tx_descriptor *txd)
  {
  	struct tegra_dma_desc *dma_desc = txd_to_tegra_dma_desc(txd);
  	struct tegra_dma_channel *tdc = to_tegra_dma_chan(txd->chan);
  	unsigned long flags;
  	dma_cookie_t cookie;
  
  	spin_lock_irqsave(&tdc->lock, flags);
  	dma_desc->dma_status = DMA_IN_PROGRESS;
  	cookie = dma_cookie_assign(&dma_desc->txd);
  	list_splice_tail_init(&dma_desc->tx_list, &tdc->pending_sg_req);
  	spin_unlock_irqrestore(&tdc->lock, flags);
3964293ae   Dmitry Osipenko   dmaengine: tegra-...
692

ec8a15867   Laxman Dewangan   dma: tegra: add d...
693
694
695
696
697
698
699
  	return cookie;
  }
  
  static void tegra_dma_issue_pending(struct dma_chan *dc)
  {
  	struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
  	unsigned long flags;
84a3f375e   Dmitry Osipenko   dmaengine: tegra-...
700
  	int err;
ec8a15867   Laxman Dewangan   dma: tegra: add d...
701
702
703
704
705
706
707
708
  
  	spin_lock_irqsave(&tdc->lock, flags);
  	if (list_empty(&tdc->pending_sg_req)) {
  		dev_err(tdc2dev(tdc), "No DMA request
  ");
  		goto end;
  	}
  	if (!tdc->busy) {
84a3f375e   Dmitry Osipenko   dmaengine: tegra-...
709
710
711
712
713
714
  		err = pm_runtime_get_sync(tdc->tdma->dev);
  		if (err < 0) {
  			dev_err(tdc2dev(tdc), "Failed to enable DMA
  ");
  			goto end;
  		}
ec8a15867   Laxman Dewangan   dma: tegra: add d...
715
716
717
718
719
720
721
722
723
724
725
726
727
728
  		tdc_start_head_req(tdc);
  
  		/* Continuous single mode: Configure next req */
  		if (tdc->cyclic) {
  			/*
  			 * Wait for 1 burst time for configure DMA for
  			 * next transfer.
  			 */
  			udelay(TEGRA_APBDMA_BURST_COMPLETE_TIME);
  			tdc_configure_next_head_desc(tdc);
  		}
  	}
  end:
  	spin_unlock_irqrestore(&tdc->lock, flags);
ec8a15867   Laxman Dewangan   dma: tegra: add d...
729
  }
a7c439a45   Vinod Koul   dmaengine: tegra:...
730
  static int tegra_dma_terminate_all(struct dma_chan *dc)
ec8a15867   Laxman Dewangan   dma: tegra: add d...
731
732
  {
  	struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
ec8a15867   Laxman Dewangan   dma: tegra: add d...
733
  	struct tegra_dma_desc *dma_desc;
3964293ae   Dmitry Osipenko   dmaengine: tegra-...
734
  	struct tegra_dma_sg_req *sgreq;
ec8a15867   Laxman Dewangan   dma: tegra: add d...
735
  	unsigned long flags;
3964293ae   Dmitry Osipenko   dmaengine: tegra-...
736
  	u32 status, wcount;
ec8a15867   Laxman Dewangan   dma: tegra: add d...
737
738
739
  	bool was_busy;
  
  	spin_lock_irqsave(&tdc->lock, flags);
ec8a15867   Laxman Dewangan   dma: tegra: add d...
740
741
742
743
744
  
  	if (!tdc->busy)
  		goto skip_dma_stop;
  
  	/* Pause DMA before checking the queue status */
1b140908c   Laxman Dewangan   dma: tegra: add s...
745
  	tegra_dma_pause(tdc, true);
ec8a15867   Laxman Dewangan   dma: tegra: add d...
746
747
748
749
750
751
752
753
  
  	status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
  	if (status & TEGRA_APBDMA_STATUS_ISE_EOC) {
  		dev_dbg(tdc2dev(tdc), "%s():handling isr
  ", __func__);
  		tdc->isr_handler(tdc, true);
  		status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
  	}
911daccc8   Laxman Dewangan   dma: tegra: add s...
754
755
756
757
  	if (tdc->tdma->chip_data->support_separate_wcount_reg)
  		wcount = tdc_read(tdc, TEGRA_APBDMA_CHAN_WORD_TRANSFER);
  	else
  		wcount = status;
ec8a15867   Laxman Dewangan   dma: tegra: add d...
758
759
760
761
762
  
  	was_busy = tdc->busy;
  	tegra_dma_stop(tdc);
  
  	if (!list_empty(&tdc->pending_sg_req) && was_busy) {
3964293ae   Dmitry Osipenko   dmaengine: tegra-...
763
764
  		sgreq = list_first_entry(&tdc->pending_sg_req, typeof(*sgreq),
  					 node);
ec8a15867   Laxman Dewangan   dma: tegra: add d...
765
  		sgreq->dma_desc->bytes_transferred +=
911daccc8   Laxman Dewangan   dma: tegra: add s...
766
  				get_current_xferred_count(tdc, sgreq, wcount);
ec8a15867   Laxman Dewangan   dma: tegra: add d...
767
  	}
1b140908c   Laxman Dewangan   dma: tegra: add s...
768
  	tegra_dma_resume(tdc);
ec8a15867   Laxman Dewangan   dma: tegra: add d...
769

84a3f375e   Dmitry Osipenko   dmaengine: tegra-...
770
  	pm_runtime_put(tdc->tdma->dev);
6697255f2   Dmitry Osipenko   dmaengine: tegra-...
771
  	wake_up_all(&tdc->wq);
84a3f375e   Dmitry Osipenko   dmaengine: tegra-...
772

ec8a15867   Laxman Dewangan   dma: tegra: add d...
773
774
775
776
  skip_dma_stop:
  	tegra_dma_abort_all(tdc);
  
  	while (!list_empty(&tdc->cb_desc)) {
3964293ae   Dmitry Osipenko   dmaengine: tegra-...
777
778
  		dma_desc = list_first_entry(&tdc->cb_desc, typeof(*dma_desc),
  					    cb_node);
ec8a15867   Laxman Dewangan   dma: tegra: add d...
779
780
781
782
  		list_del(&dma_desc->cb_node);
  		dma_desc->cb_count = 0;
  	}
  	spin_unlock_irqrestore(&tdc->lock, flags);
3964293ae   Dmitry Osipenko   dmaengine: tegra-...
783

a7c439a45   Vinod Koul   dmaengine: tegra:...
784
  	return 0;
ec8a15867   Laxman Dewangan   dma: tegra: add d...
785
  }
6697255f2   Dmitry Osipenko   dmaengine: tegra-...
786
787
788
789
790
791
792
793
794
795
796
  static bool tegra_dma_eoc_interrupt_deasserted(struct tegra_dma_channel *tdc)
  {
  	unsigned long flags;
  	u32 status;
  
  	spin_lock_irqsave(&tdc->lock, flags);
  	status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
  	spin_unlock_irqrestore(&tdc->lock, flags);
  
  	return !(status & TEGRA_APBDMA_STATUS_ISE_EOC);
  }
dda5e35a7   Dmitry Osipenko   dmaengine: tegra-...
797
798
799
  static void tegra_dma_synchronize(struct dma_chan *dc)
  {
  	struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
664475cff   Dmitry Osipenko   dmaengine: tegra-...
800
801
802
803
804
805
806
807
  	int err;
  
  	err = pm_runtime_get_sync(tdc->tdma->dev);
  	if (err < 0) {
  		dev_err(tdc2dev(tdc), "Failed to synchronize DMA: %d
  ", err);
  		return;
  	}
dda5e35a7   Dmitry Osipenko   dmaengine: tegra-...
808

6697255f2   Dmitry Osipenko   dmaengine: tegra-...
809
810
811
812
813
814
  	/*
  	 * CPU, which handles interrupt, could be busy in
  	 * uninterruptible state, in this case sibling CPU
  	 * should wait until interrupt is handled.
  	 */
  	wait_event(tdc->wq, tegra_dma_eoc_interrupt_deasserted(tdc));
dda5e35a7   Dmitry Osipenko   dmaengine: tegra-...
815
  	tasklet_kill(&tdc->tasklet);
664475cff   Dmitry Osipenko   dmaengine: tegra-...
816
817
  
  	pm_runtime_put(tdc->tdma->dev);
dda5e35a7   Dmitry Osipenko   dmaengine: tegra-...
818
  }
156a599b0   Dmitry Osipenko   dmaengine: tegra-...
819
820
821
  static unsigned int tegra_dma_sg_bytes_xferred(struct tegra_dma_channel *tdc,
  					       struct tegra_dma_sg_req *sg_req)
  {
3964293ae   Dmitry Osipenko   dmaengine: tegra-...
822
  	u32 status, wcount = 0;
156a599b0   Dmitry Osipenko   dmaengine: tegra-...
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
  
  	if (!list_is_first(&sg_req->node, &tdc->pending_sg_req))
  		return 0;
  
  	if (tdc->tdma->chip_data->support_separate_wcount_reg)
  		wcount = tdc_read(tdc, TEGRA_APBDMA_CHAN_WORD_TRANSFER);
  
  	status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
  
  	if (!tdc->tdma->chip_data->support_separate_wcount_reg)
  		wcount = status;
  
  	if (status & TEGRA_APBDMA_STATUS_ISE_EOC)
  		return sg_req->req_len;
  
  	wcount = get_current_xferred_count(tdc, sg_req, wcount);
  
  	if (!wcount) {
  		/*
  		 * If wcount wasn't ever polled for this SG before, then
  		 * simply assume that transfer hasn't started yet.
  		 *
  		 * Otherwise it's the end of the transfer.
  		 *
  		 * The alternative would be to poll the status register
  		 * until EOC bit is set or wcount goes UP. That's so
  		 * because EOC bit is getting set only after the last
  		 * burst's completion and counter is less than the actual
  		 * transfer size by 4 bytes. The counter value wraps around
  		 * in a cyclic mode before EOC is set(!), so we can't easily
  		 * distinguish start of transfer from its end.
  		 */
  		if (sg_req->words_xferred)
  			wcount = sg_req->req_len - 4;
  
  	} else if (wcount < sg_req->words_xferred) {
  		/*
  		 * This case will never happen for a non-cyclic transfer.
  		 *
  		 * For a cyclic transfer, although it is possible for the
  		 * next transfer to have already started (resetting the word
  		 * count), this case should still not happen because we should
  		 * have detected that the EOC bit is set and hence the transfer
  		 * was completed.
  		 */
  		WARN_ON_ONCE(1);
  
  		wcount = sg_req->req_len - 4;
  	} else {
  		sg_req->words_xferred = wcount;
  	}
  
  	return wcount;
  }
ec8a15867   Laxman Dewangan   dma: tegra: add d...
877
  static enum dma_status tegra_dma_tx_status(struct dma_chan *dc,
3964293ae   Dmitry Osipenko   dmaengine: tegra-...
878
879
  					   dma_cookie_t cookie,
  					   struct dma_tx_state *txstate)
ec8a15867   Laxman Dewangan   dma: tegra: add d...
880
881
882
883
884
885
  {
  	struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
  	struct tegra_dma_desc *dma_desc;
  	struct tegra_dma_sg_req *sg_req;
  	enum dma_status ret;
  	unsigned long flags;
4a46ba36e   Laxman Dewangan   dma: tegra: fix r...
886
  	unsigned int residual;
156a599b0   Dmitry Osipenko   dmaengine: tegra-...
887
  	unsigned int bytes = 0;
ec8a15867   Laxman Dewangan   dma: tegra: add d...
888

ec8a15867   Laxman Dewangan   dma: tegra: add d...
889
  	ret = dma_cookie_status(dc, cookie, txstate);
d3183447e   Jon Hunter   dmaengine: tegra-...
890
  	if (ret == DMA_COMPLETE)
ec8a15867   Laxman Dewangan   dma: tegra: add d...
891
  		return ret;
0a0aee203   Andy Shevchenko   tegra20-apb-dma: ...
892
893
  
  	spin_lock_irqsave(&tdc->lock, flags);
ec8a15867   Laxman Dewangan   dma: tegra: add d...
894
895
896
897
  
  	/* Check on wait_ack desc status */
  	list_for_each_entry(dma_desc, &tdc->free_dma_desc, node) {
  		if (dma_desc->txd.cookie == cookie) {
ec8a15867   Laxman Dewangan   dma: tegra: add d...
898
  			ret = dma_desc->dma_status;
004f614ed   Jon Hunter   dmaengine: tegra-...
899
  			goto found;
ec8a15867   Laxman Dewangan   dma: tegra: add d...
900
901
902
903
904
905
906
  		}
  	}
  
  	/* Check in pending list */
  	list_for_each_entry(sg_req, &tdc->pending_sg_req, node) {
  		dma_desc = sg_req->dma_desc;
  		if (dma_desc->txd.cookie == cookie) {
156a599b0   Dmitry Osipenko   dmaengine: tegra-...
907
  			bytes = tegra_dma_sg_bytes_xferred(tdc, sg_req);
ec8a15867   Laxman Dewangan   dma: tegra: add d...
908
  			ret = dma_desc->dma_status;
004f614ed   Jon Hunter   dmaengine: tegra-...
909
  			goto found;
ec8a15867   Laxman Dewangan   dma: tegra: add d...
910
911
  		}
  	}
019bfcc65   Jon Hunter   dmaengine: tegra-...
912
913
  	dev_dbg(tdc2dev(tdc), "cookie %d not found
  ", cookie);
004f614ed   Jon Hunter   dmaengine: tegra-...
914
915
916
  	dma_desc = NULL;
  
  found:
d3183447e   Jon Hunter   dmaengine: tegra-...
917
  	if (dma_desc && txstate) {
004f614ed   Jon Hunter   dmaengine: tegra-...
918
  		residual = dma_desc->bytes_requested -
156a599b0   Dmitry Osipenko   dmaengine: tegra-...
919
  			   ((dma_desc->bytes_transferred + bytes) %
004f614ed   Jon Hunter   dmaengine: tegra-...
920
921
922
  			    dma_desc->bytes_requested);
  		dma_set_residue(txstate, residual);
  	}
95f295f9f   Ben Dooks   dmaengine: tegra:...
923
  	trace_tegra_dma_tx_status(&tdc->dma_chan, cookie, txstate);
ec8a15867   Laxman Dewangan   dma: tegra: add d...
924
  	spin_unlock_irqrestore(&tdc->lock, flags);
3964293ae   Dmitry Osipenko   dmaengine: tegra-...
925

ec8a15867   Laxman Dewangan   dma: tegra: add d...
926
927
  	return ret;
  }
3964293ae   Dmitry Osipenko   dmaengine: tegra-...
928
929
  static inline unsigned int get_bus_width(struct tegra_dma_channel *tdc,
  					 enum dma_slave_buswidth slave_bw)
ec8a15867   Laxman Dewangan   dma: tegra: add d...
930
931
932
933
934
935
936
937
938
939
940
941
  {
  	switch (slave_bw) {
  	case DMA_SLAVE_BUSWIDTH_1_BYTE:
  		return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_8;
  	case DMA_SLAVE_BUSWIDTH_2_BYTES:
  		return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_16;
  	case DMA_SLAVE_BUSWIDTH_4_BYTES:
  		return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_32;
  	case DMA_SLAVE_BUSWIDTH_8_BYTES:
  		return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_64;
  	default:
  		dev_warn(tdc2dev(tdc),
3964293ae   Dmitry Osipenko   dmaengine: tegra-...
942
943
  			 "slave bw is not supported, using 32bits
  ");
ec8a15867   Laxman Dewangan   dma: tegra: add d...
944
945
946
  		return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_32;
  	}
  }
3964293ae   Dmitry Osipenko   dmaengine: tegra-...
947
948
949
950
  static inline unsigned int get_burst_size(struct tegra_dma_channel *tdc,
  					  u32 burst_size,
  					  enum dma_slave_buswidth slave_bw,
  					  u32 len)
ec8a15867   Laxman Dewangan   dma: tegra: add d...
951
  {
3964293ae   Dmitry Osipenko   dmaengine: tegra-...
952
  	unsigned int burst_byte, burst_ahb_width;
ec8a15867   Laxman Dewangan   dma: tegra: add d...
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
  
  	/*
  	 * burst_size from client is in terms of the bus_width.
  	 * convert them into AHB memory width which is 4 byte.
  	 */
  	burst_byte = burst_size * slave_bw;
  	burst_ahb_width = burst_byte / 4;
  
  	/* If burst size is 0 then calculate the burst size based on length */
  	if (!burst_ahb_width) {
  		if (len & 0xF)
  			return TEGRA_APBDMA_AHBSEQ_BURST_1;
  		else if ((len >> 4) & 0x1)
  			return TEGRA_APBDMA_AHBSEQ_BURST_4;
  		else
  			return TEGRA_APBDMA_AHBSEQ_BURST_8;
  	}
  	if (burst_ahb_width < 4)
  		return TEGRA_APBDMA_AHBSEQ_BURST_1;
  	else if (burst_ahb_width < 8)
  		return TEGRA_APBDMA_AHBSEQ_BURST_4;
  	else
  		return TEGRA_APBDMA_AHBSEQ_BURST_8;
  }
  
  static int get_transfer_param(struct tegra_dma_channel *tdc,
3964293ae   Dmitry Osipenko   dmaengine: tegra-...
979
980
981
982
983
984
  			      enum dma_transfer_direction direction,
  			      u32 *apb_addr,
  			      u32 *apb_seq,
  			      u32 *csr,
  			      unsigned int *burst_size,
  			      enum dma_slave_buswidth *slave_bw)
ec8a15867   Laxman Dewangan   dma: tegra: add d...
985
  {
ec8a15867   Laxman Dewangan   dma: tegra: add d...
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
  	switch (direction) {
  	case DMA_MEM_TO_DEV:
  		*apb_addr = tdc->dma_sconfig.dst_addr;
  		*apb_seq = get_bus_width(tdc, tdc->dma_sconfig.dst_addr_width);
  		*burst_size = tdc->dma_sconfig.dst_maxburst;
  		*slave_bw = tdc->dma_sconfig.dst_addr_width;
  		*csr = TEGRA_APBDMA_CSR_DIR;
  		return 0;
  
  	case DMA_DEV_TO_MEM:
  		*apb_addr = tdc->dma_sconfig.src_addr;
  		*apb_seq = get_bus_width(tdc, tdc->dma_sconfig.src_addr_width);
  		*burst_size = tdc->dma_sconfig.src_maxburst;
  		*slave_bw = tdc->dma_sconfig.src_addr_width;
  		*csr = 0;
  		return 0;
  
  	default:
547b311cf   Ben Dooks   dmaengine: tegra:...
1004
1005
  		dev_err(tdc2dev(tdc), "DMA direction is not supported
  ");
3964293ae   Dmitry Osipenko   dmaengine: tegra-...
1006
  		break;
ec8a15867   Laxman Dewangan   dma: tegra: add d...
1007
  	}
3964293ae   Dmitry Osipenko   dmaengine: tegra-...
1008

ec8a15867   Laxman Dewangan   dma: tegra: add d...
1009
1010
  	return -EINVAL;
  }
911daccc8   Laxman Dewangan   dma: tegra: add s...
1011
  static void tegra_dma_prep_wcount(struct tegra_dma_channel *tdc,
3964293ae   Dmitry Osipenko   dmaengine: tegra-...
1012
1013
  				  struct tegra_dma_channel_regs *ch_regs,
  				  u32 len)
911daccc8   Laxman Dewangan   dma: tegra: add s...
1014
1015
1016
1017
1018
1019
1020
1021
  {
  	u32 len_field = (len - 4) & 0xFFFC;
  
  	if (tdc->tdma->chip_data->support_separate_wcount_reg)
  		ch_regs->wcount = len_field;
  	else
  		ch_regs->csr |= len_field;
  }
3964293ae   Dmitry Osipenko   dmaengine: tegra-...
1022
1023
1024
1025
1026
1027
1028
  static struct dma_async_tx_descriptor *
  tegra_dma_prep_slave_sg(struct dma_chan *dc,
  			struct scatterlist *sgl,
  			unsigned int sg_len,
  			enum dma_transfer_direction direction,
  			unsigned long flags,
  			void *context)
ec8a15867   Laxman Dewangan   dma: tegra: add d...
1029
1030
  {
  	struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
3964293ae   Dmitry Osipenko   dmaengine: tegra-...
1031
1032
1033
  	struct tegra_dma_sg_req *sg_req = NULL;
  	u32 csr, ahb_seq, apb_ptr, apb_seq;
  	enum dma_slave_buswidth slave_bw;
ec8a15867   Laxman Dewangan   dma: tegra: add d...
1034
  	struct tegra_dma_desc *dma_desc;
ec8a15867   Laxman Dewangan   dma: tegra: add d...
1035
  	struct list_head req_list;
3964293ae   Dmitry Osipenko   dmaengine: tegra-...
1036
1037
1038
  	struct scatterlist *sg;
  	unsigned int burst_size;
  	unsigned int i;
ec8a15867   Laxman Dewangan   dma: tegra: add d...
1039
1040
  
  	if (!tdc->config_init) {
547b311cf   Ben Dooks   dmaengine: tegra:...
1041
1042
  		dev_err(tdc2dev(tdc), "DMA channel is not configured
  ");
ec8a15867   Laxman Dewangan   dma: tegra: add d...
1043
1044
1045
1046
1047
1048
1049
  		return NULL;
  	}
  	if (sg_len < 1) {
  		dev_err(tdc2dev(tdc), "Invalid segment length %d
  ", sg_len);
  		return NULL;
  	}
dc1ff4b30   Jon Hunter   dmaengine: tegra-...
1050
  	if (get_transfer_param(tdc, direction, &apb_ptr, &apb_seq, &csr,
3964293ae   Dmitry Osipenko   dmaengine: tegra-...
1051
  			       &burst_size, &slave_bw) < 0)
ec8a15867   Laxman Dewangan   dma: tegra: add d...
1052
1053
1054
1055
1056
1057
1058
1059
  		return NULL;
  
  	INIT_LIST_HEAD(&req_list);
  
  	ahb_seq = TEGRA_APBDMA_AHBSEQ_INTR_ENB;
  	ahb_seq |= TEGRA_APBDMA_AHBSEQ_WRAP_NONE <<
  					TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT;
  	ahb_seq |= TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_32;
f6160f359   Dmitry Osipenko   dmaengine: tegra-...
1060
1061
1062
1063
1064
1065
  	csr |= TEGRA_APBDMA_CSR_ONCE;
  
  	if (tdc->slave_id != TEGRA_APBDMA_SLAVE_ID_INVALID) {
  		csr |= TEGRA_APBDMA_CSR_FLOW;
  		csr |= tdc->slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT;
  	}
dc161064b   Dmitry Osipenko   dmaengine: tegra-...
1066
  	if (flags & DMA_PREP_INTERRUPT) {
ec8a15867   Laxman Dewangan   dma: tegra: add d...
1067
  		csr |= TEGRA_APBDMA_CSR_IE_EOC;
dc161064b   Dmitry Osipenko   dmaengine: tegra-...
1068
1069
1070
1071
  	} else {
  		WARN_ON_ONCE(1);
  		return NULL;
  	}
ec8a15867   Laxman Dewangan   dma: tegra: add d...
1072
1073
1074
1075
1076
  
  	apb_seq |= TEGRA_APBDMA_APBSEQ_WRAP_WORD_1;
  
  	dma_desc = tegra_dma_desc_get(tdc);
  	if (!dma_desc) {
547b311cf   Ben Dooks   dmaengine: tegra:...
1077
1078
  		dev_err(tdc2dev(tdc), "DMA descriptors not available
  ");
ec8a15867   Laxman Dewangan   dma: tegra: add d...
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
  		return NULL;
  	}
  	INIT_LIST_HEAD(&dma_desc->tx_list);
  	INIT_LIST_HEAD(&dma_desc->cb_node);
  	dma_desc->cb_count = 0;
  	dma_desc->bytes_requested = 0;
  	dma_desc->bytes_transferred = 0;
  	dma_desc->dma_status = DMA_IN_PROGRESS;
  
  	/* Make transfer requests */
  	for_each_sg(sgl, sg, sg_len, i) {
  		u32 len, mem;
597c85497   Laxman Dewangan   dma: tegra: use s...
1091
  		mem = sg_dma_address(sg);
ec8a15867   Laxman Dewangan   dma: tegra: add d...
1092
1093
1094
  		len = sg_dma_len(sg);
  
  		if ((len & 3) || (mem & 3) ||
3964293ae   Dmitry Osipenko   dmaengine: tegra-...
1095
  		    len > tdc->tdma->chip_data->max_dma_count) {
ec8a15867   Laxman Dewangan   dma: tegra: add d...
1096
  			dev_err(tdc2dev(tdc),
547b311cf   Ben Dooks   dmaengine: tegra:...
1097
1098
  				"DMA length/memory address is not supported
  ");
ec8a15867   Laxman Dewangan   dma: tegra: add d...
1099
1100
1101
1102
1103
1104
  			tegra_dma_desc_put(tdc, dma_desc);
  			return NULL;
  		}
  
  		sg_req = tegra_dma_sg_req_get(tdc);
  		if (!sg_req) {
547b311cf   Ben Dooks   dmaengine: tegra:...
1105
1106
  			dev_err(tdc2dev(tdc), "DMA sg-req not available
  ");
ec8a15867   Laxman Dewangan   dma: tegra: add d...
1107
1108
1109
1110
1111
1112
1113
1114
1115
  			tegra_dma_desc_put(tdc, dma_desc);
  			return NULL;
  		}
  
  		ahb_seq |= get_burst_size(tdc, burst_size, slave_bw, len);
  		dma_desc->bytes_requested += len;
  
  		sg_req->ch_regs.apb_ptr = apb_ptr;
  		sg_req->ch_regs.ahb_ptr = mem;
911daccc8   Laxman Dewangan   dma: tegra: add s...
1116
1117
  		sg_req->ch_regs.csr = csr;
  		tegra_dma_prep_wcount(tdc, &sg_req->ch_regs, len);
ec8a15867   Laxman Dewangan   dma: tegra: add d...
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
  		sg_req->ch_regs.apb_seq = apb_seq;
  		sg_req->ch_regs.ahb_seq = ahb_seq;
  		sg_req->configured = false;
  		sg_req->last_sg = false;
  		sg_req->dma_desc = dma_desc;
  		sg_req->req_len = len;
  
  		list_add_tail(&sg_req->node, &dma_desc->tx_list);
  	}
  	sg_req->last_sg = true;
  	if (flags & DMA_CTRL_ACK)
  		dma_desc->txd.flags = DMA_CTRL_ACK;
  
  	/*
  	 * Make sure that mode should not be conflicting with currently
  	 * configured mode.
  	 */
  	if (!tdc->isr_handler) {
  		tdc->isr_handler = handle_once_dma_done;
  		tdc->cyclic = false;
  	} else {
  		if (tdc->cyclic) {
  			dev_err(tdc2dev(tdc), "DMA configured in cyclic mode
  ");
  			tegra_dma_desc_put(tdc, dma_desc);
  			return NULL;
  		}
  	}
  
  	return &dma_desc->txd;
  }
3964293ae   Dmitry Osipenko   dmaengine: tegra-...
1149
1150
1151
1152
1153
1154
  static struct dma_async_tx_descriptor *
  tegra_dma_prep_dma_cyclic(struct dma_chan *dc, dma_addr_t buf_addr,
  			  size_t buf_len,
  			  size_t period_len,
  			  enum dma_transfer_direction direction,
  			  unsigned long flags)
ec8a15867   Laxman Dewangan   dma: tegra: add d...
1155
1156
  {
  	struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
7b0e00d91   Thierry Reding   dmaengine: tegra:...
1157
  	struct tegra_dma_sg_req *sg_req = NULL;
3964293ae   Dmitry Osipenko   dmaengine: tegra-...
1158
  	u32 csr, ahb_seq, apb_ptr, apb_seq;
ec8a15867   Laxman Dewangan   dma: tegra: add d...
1159
  	enum dma_slave_buswidth slave_bw;
3964293ae   Dmitry Osipenko   dmaengine: tegra-...
1160
1161
1162
1163
  	struct tegra_dma_desc *dma_desc;
  	dma_addr_t mem = buf_addr;
  	unsigned int burst_size;
  	size_t len, remain_len;
ec8a15867   Laxman Dewangan   dma: tegra: add d...
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
  
  	if (!buf_len || !period_len) {
  		dev_err(tdc2dev(tdc), "Invalid buffer/period len
  ");
  		return NULL;
  	}
  
  	if (!tdc->config_init) {
  		dev_err(tdc2dev(tdc), "DMA slave is not configured
  ");
  		return NULL;
  	}
  
  	/*
  	 * We allow to take more number of requests till DMA is
  	 * not started. The driver will loop over all requests.
  	 * Once DMA is started then new requests can be queued only after
  	 * terminating the DMA.
  	 */
  	if (tdc->busy) {
547b311cf   Ben Dooks   dmaengine: tegra:...
1184
1185
  		dev_err(tdc2dev(tdc), "Request not allowed when DMA running
  ");
ec8a15867   Laxman Dewangan   dma: tegra: add d...
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
  		return NULL;
  	}
  
  	/*
  	 * We only support cycle transfer when buf_len is multiple of
  	 * period_len.
  	 */
  	if (buf_len % period_len) {
  		dev_err(tdc2dev(tdc), "buf_len is not multiple of period_len
  ");
  		return NULL;
  	}
  
  	len = period_len;
  	if ((len & 3) || (buf_addr & 3) ||
3964293ae   Dmitry Osipenko   dmaengine: tegra-...
1201
  	    len > tdc->tdma->chip_data->max_dma_count) {
ec8a15867   Laxman Dewangan   dma: tegra: add d...
1202
1203
1204
1205
  		dev_err(tdc2dev(tdc), "Req len/mem address is not correct
  ");
  		return NULL;
  	}
dc1ff4b30   Jon Hunter   dmaengine: tegra-...
1206
  	if (get_transfer_param(tdc, direction, &apb_ptr, &apb_seq, &csr,
3964293ae   Dmitry Osipenko   dmaengine: tegra-...
1207
  			       &burst_size, &slave_bw) < 0)
ec8a15867   Laxman Dewangan   dma: tegra: add d...
1208
  		return NULL;
ec8a15867   Laxman Dewangan   dma: tegra: add d...
1209
1210
1211
1212
  	ahb_seq = TEGRA_APBDMA_AHBSEQ_INTR_ENB;
  	ahb_seq |= TEGRA_APBDMA_AHBSEQ_WRAP_NONE <<
  					TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT;
  	ahb_seq |= TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_32;
f6160f359   Dmitry Osipenko   dmaengine: tegra-...
1213
1214
1215
1216
  	if (tdc->slave_id != TEGRA_APBDMA_SLAVE_ID_INVALID) {
  		csr |= TEGRA_APBDMA_CSR_FLOW;
  		csr |= tdc->slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT;
  	}
dc161064b   Dmitry Osipenko   dmaengine: tegra-...
1217
  	if (flags & DMA_PREP_INTERRUPT) {
b9bb37f54   Laxman Dewangan   dma: tegra: imple...
1218
  		csr |= TEGRA_APBDMA_CSR_IE_EOC;
dc161064b   Dmitry Osipenko   dmaengine: tegra-...
1219
1220
1221
1222
  	} else {
  		WARN_ON_ONCE(1);
  		return NULL;
  	}
ec8a15867   Laxman Dewangan   dma: tegra: add d...
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
  
  	apb_seq |= TEGRA_APBDMA_APBSEQ_WRAP_WORD_1;
  
  	dma_desc = tegra_dma_desc_get(tdc);
  	if (!dma_desc) {
  		dev_err(tdc2dev(tdc), "not enough descriptors available
  ");
  		return NULL;
  	}
  
  	INIT_LIST_HEAD(&dma_desc->tx_list);
  	INIT_LIST_HEAD(&dma_desc->cb_node);
  	dma_desc->cb_count = 0;
  
  	dma_desc->bytes_transferred = 0;
  	dma_desc->bytes_requested = buf_len;
  	remain_len = buf_len;
  
  	/* Split transfer equal to period size */
  	while (remain_len) {
  		sg_req = tegra_dma_sg_req_get(tdc);
  		if (!sg_req) {
547b311cf   Ben Dooks   dmaengine: tegra:...
1245
1246
  			dev_err(tdc2dev(tdc), "DMA sg-req not available
  ");
ec8a15867   Laxman Dewangan   dma: tegra: add d...
1247
1248
1249
1250
1251
1252
1253
  			tegra_dma_desc_put(tdc, dma_desc);
  			return NULL;
  		}
  
  		ahb_seq |= get_burst_size(tdc, burst_size, slave_bw, len);
  		sg_req->ch_regs.apb_ptr = apb_ptr;
  		sg_req->ch_regs.ahb_ptr = mem;
911daccc8   Laxman Dewangan   dma: tegra: add s...
1254
1255
  		sg_req->ch_regs.csr = csr;
  		tegra_dma_prep_wcount(tdc, &sg_req->ch_regs, len);
ec8a15867   Laxman Dewangan   dma: tegra: add d...
1256
1257
1258
  		sg_req->ch_regs.apb_seq = apb_seq;
  		sg_req->ch_regs.ahb_seq = ahb_seq;
  		sg_req->configured = false;
ec8a15867   Laxman Dewangan   dma: tegra: add d...
1259
1260
1261
1262
1263
1264
1265
1266
1267
  		sg_req->last_sg = false;
  		sg_req->dma_desc = dma_desc;
  		sg_req->req_len = len;
  
  		list_add_tail(&sg_req->node, &dma_desc->tx_list);
  		remain_len -= len;
  		mem += len;
  	}
  	sg_req->last_sg = true;
b9bb37f54   Laxman Dewangan   dma: tegra: imple...
1268
1269
  	if (flags & DMA_CTRL_ACK)
  		dma_desc->txd.flags = DMA_CTRL_ACK;
ec8a15867   Laxman Dewangan   dma: tegra: add d...
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
  
  	/*
  	 * Make sure that mode should not be conflicting with currently
  	 * configured mode.
  	 */
  	if (!tdc->isr_handler) {
  		tdc->isr_handler = handle_cont_sngl_cycle_dma_done;
  		tdc->cyclic = true;
  	} else {
  		if (!tdc->cyclic) {
  			dev_err(tdc2dev(tdc), "DMA configuration conflict
  ");
  			tegra_dma_desc_put(tdc, dma_desc);
  			return NULL;
  		}
  	}
  
  	return &dma_desc->txd;
  }
  
  static int tegra_dma_alloc_chan_resources(struct dma_chan *dc)
  {
  	struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
  
  	dma_cookie_init(&tdc->dma_chan);
edd3bdbe9   Jon Hunter   dmaengine: tegra-...
1295
1296
  
  	return 0;
ec8a15867   Laxman Dewangan   dma: tegra: add d...
1297
1298
1299
1300
1301
  }
  
  static void tegra_dma_free_chan_resources(struct dma_chan *dc)
  {
  	struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
ec8a15867   Laxman Dewangan   dma: tegra: add d...
1302
1303
1304
1305
  	struct tegra_dma_desc *dma_desc;
  	struct tegra_dma_sg_req *sg_req;
  	struct list_head dma_desc_list;
  	struct list_head sg_req_list;
ec8a15867   Laxman Dewangan   dma: tegra: add d...
1306
1307
1308
1309
1310
1311
  
  	INIT_LIST_HEAD(&dma_desc_list);
  	INIT_LIST_HEAD(&sg_req_list);
  
  	dev_dbg(tdc2dev(tdc), "Freeing channel %d
  ", tdc->id);
8e84172e3   Dmitry Osipenko   dmaengine: tegra-...
1312
  	tegra_dma_terminate_all(dc);
41ffc423e   Dmitry Osipenko   dmaengine: tegra-...
1313
  	tasklet_kill(&tdc->tasklet);
ec8a15867   Laxman Dewangan   dma: tegra: add d...
1314

ec8a15867   Laxman Dewangan   dma: tegra: add d...
1315
1316
1317
1318
1319
  	list_splice_init(&tdc->pending_sg_req, &sg_req_list);
  	list_splice_init(&tdc->free_sg_req, &sg_req_list);
  	list_splice_init(&tdc->free_dma_desc, &dma_desc_list);
  	INIT_LIST_HEAD(&tdc->cb_desc);
  	tdc->config_init = false;
7bdc1e272   Dmitry Osipenko   dma: tegra: avoid...
1320
  	tdc->isr_handler = NULL;
ec8a15867   Laxman Dewangan   dma: tegra: add d...
1321
1322
  
  	while (!list_empty(&dma_desc_list)) {
3964293ae   Dmitry Osipenko   dmaengine: tegra-...
1323
1324
  		dma_desc = list_first_entry(&dma_desc_list, typeof(*dma_desc),
  					    node);
ec8a15867   Laxman Dewangan   dma: tegra: add d...
1325
1326
1327
1328
1329
1330
1331
1332
1333
  		list_del(&dma_desc->node);
  		kfree(dma_desc);
  	}
  
  	while (!list_empty(&sg_req_list)) {
  		sg_req = list_first_entry(&sg_req_list, typeof(*sg_req), node);
  		list_del(&sg_req->node);
  		kfree(sg_req);
  	}
996556c92   Stephen Warren   dma: tegra: regis...
1334

00ef4490e   Shardar Shariff Md   dmaengine: tegra-...
1335
  	tdc->slave_id = TEGRA_APBDMA_SLAVE_ID_INVALID;
996556c92   Stephen Warren   dma: tegra: regis...
1336
1337
1338
1339
1340
1341
  }
  
  static struct dma_chan *tegra_dma_of_xlate(struct of_phandle_args *dma_spec,
  					   struct of_dma *ofdma)
  {
  	struct tegra_dma *tdma = ofdma->of_dma_data;
996556c92   Stephen Warren   dma: tegra: regis...
1342
  	struct tegra_dma_channel *tdc;
3964293ae   Dmitry Osipenko   dmaengine: tegra-...
1343
  	struct dma_chan *chan;
996556c92   Stephen Warren   dma: tegra: regis...
1344

00ef4490e   Shardar Shariff Md   dmaengine: tegra-...
1345
1346
1347
1348
1349
  	if (dma_spec->args[0] > TEGRA_APBDMA_CSR_REQ_SEL_MASK) {
  		dev_err(tdma->dev, "Invalid slave id: %d
  ", dma_spec->args[0]);
  		return NULL;
  	}
996556c92   Stephen Warren   dma: tegra: regis...
1350
1351
1352
1353
1354
1355
1356
1357
  	chan = dma_get_any_slave_channel(&tdma->dma_dev);
  	if (!chan)
  		return NULL;
  
  	tdc = to_tegra_dma_chan(chan);
  	tdc->slave_id = dma_spec->args[0];
  
  	return chan;
ec8a15867   Laxman Dewangan   dma: tegra: add d...
1358
1359
1360
  }
  
  /* Tegra20 specific DMA controller information */
75f21631b   Laxman Dewangan   dma: tegra: make ...
1361
  static const struct tegra_dma_chip_data tegra20_dma_chip_data = {
ec8a15867   Laxman Dewangan   dma: tegra: add d...
1362
  	.nr_channels		= 16,
911daccc8   Laxman Dewangan   dma: tegra: add s...
1363
  	.channel_reg_size	= 0x20,
ec8a15867   Laxman Dewangan   dma: tegra: add d...
1364
  	.max_dma_count		= 1024UL * 64,
1b140908c   Laxman Dewangan   dma: tegra: add s...
1365
  	.support_channel_pause	= false,
911daccc8   Laxman Dewangan   dma: tegra: add s...
1366
  	.support_separate_wcount_reg = false,
ec8a15867   Laxman Dewangan   dma: tegra: add d...
1367
  };
ec8a15867   Laxman Dewangan   dma: tegra: add d...
1368
  /* Tegra30 specific DMA controller information */
75f21631b   Laxman Dewangan   dma: tegra: make ...
1369
  static const struct tegra_dma_chip_data tegra30_dma_chip_data = {
ec8a15867   Laxman Dewangan   dma: tegra: add d...
1370
  	.nr_channels		= 32,
911daccc8   Laxman Dewangan   dma: tegra: add s...
1371
  	.channel_reg_size	= 0x20,
ec8a15867   Laxman Dewangan   dma: tegra: add d...
1372
  	.max_dma_count		= 1024UL * 64,
1b140908c   Laxman Dewangan   dma: tegra: add s...
1373
  	.support_channel_pause	= false,
911daccc8   Laxman Dewangan   dma: tegra: add s...
1374
  	.support_separate_wcount_reg = false,
ec8a15867   Laxman Dewangan   dma: tegra: add d...
1375
  };
5ea7caf30   Laxman Dewangan   dma: tegra: add s...
1376
1377
1378
  /* Tegra114 specific DMA controller information */
  static const struct tegra_dma_chip_data tegra114_dma_chip_data = {
  	.nr_channels		= 32,
911daccc8   Laxman Dewangan   dma: tegra: add s...
1379
  	.channel_reg_size	= 0x20,
5ea7caf30   Laxman Dewangan   dma: tegra: add s...
1380
1381
  	.max_dma_count		= 1024UL * 64,
  	.support_channel_pause	= true,
911daccc8   Laxman Dewangan   dma: tegra: add s...
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
  	.support_separate_wcount_reg = false,
  };
  
  /* Tegra148 specific DMA controller information */
  static const struct tegra_dma_chip_data tegra148_dma_chip_data = {
  	.nr_channels		= 32,
  	.channel_reg_size	= 0x40,
  	.max_dma_count		= 1024UL * 64,
  	.support_channel_pause	= true,
  	.support_separate_wcount_reg = true,
5ea7caf30   Laxman Dewangan   dma: tegra: add s...
1392
  };
dcb394b6b   Dmitry Osipenko   dmaengine: tegra-...
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
  static int tegra_dma_init_hw(struct tegra_dma *tdma)
  {
  	int err;
  
  	err = reset_control_assert(tdma->rst);
  	if (err) {
  		dev_err(tdma->dev, "failed to assert reset: %d
  ", err);
  		return err;
  	}
  
  	err = clk_enable(tdma->dma_clk);
  	if (err) {
  		dev_err(tdma->dev, "failed to enable clk: %d
  ", err);
  		return err;
  	}
  
  	/* reset DMA controller */
  	udelay(2);
  	reset_control_deassert(tdma->rst);
  
  	/* enable global DMA registers */
  	tdma_write(tdma, TEGRA_APBDMA_GENERAL, TEGRA_APBDMA_GENERAL_ENABLE);
  	tdma_write(tdma, TEGRA_APBDMA_CONTROL, 0);
  	tdma_write(tdma, TEGRA_APBDMA_IRQ_MASK_SET, 0xFFFFFFFF);
  
  	clk_disable(tdma->dma_clk);
  
  	return 0;
  }
463a1f8b3   Bill Pemberton   dma: remove use o...
1424
  static int tegra_dma_probe(struct platform_device *pdev)
ec8a15867   Laxman Dewangan   dma: tegra: add d...
1425
  {
3964293ae   Dmitry Osipenko   dmaengine: tegra-...
1426
  	const struct tegra_dma_chip_data *cdata;
ec8a15867   Laxman Dewangan   dma: tegra: add d...
1427
  	struct tegra_dma *tdma;
3964293ae   Dmitry Osipenko   dmaengine: tegra-...
1428
1429
  	unsigned int i;
  	size_t size;
ec8a15867   Laxman Dewangan   dma: tegra: add d...
1430
  	int ret;
ec8a15867   Laxman Dewangan   dma: tegra: add d...
1431

333f16ec6   Laxman Dewangan   dmaengine: tegra:...
1432
  	cdata = of_device_get_match_data(&pdev->dev);
3964293ae   Dmitry Osipenko   dmaengine: tegra-...
1433
  	size = struct_size(tdma, channels, cdata->nr_channels);
ec8a15867   Laxman Dewangan   dma: tegra: add d...
1434

3964293ae   Dmitry Osipenko   dmaengine: tegra-...
1435
  	tdma = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
aef94fea9   Peter Griffin   dmaengine: Remove...
1436
  	if (!tdma)
ec8a15867   Laxman Dewangan   dma: tegra: add d...
1437
  		return -ENOMEM;
ec8a15867   Laxman Dewangan   dma: tegra: add d...
1438
1439
1440
1441
  
  	tdma->dev = &pdev->dev;
  	tdma->chip_data = cdata;
  	platform_set_drvdata(pdev, tdma);
c55c745e6   Dmitry Osipenko   dmaengine: tegra-...
1442
  	tdma->base_addr = devm_platform_ioremap_resource(pdev, 0);
7331205a9   Thierry Reding   dma: Convert to d...
1443
1444
  	if (IS_ERR(tdma->base_addr))
  		return PTR_ERR(tdma->base_addr);
ec8a15867   Laxman Dewangan   dma: tegra: add d...
1445
1446
1447
1448
1449
1450
1451
  
  	tdma->dma_clk = devm_clk_get(&pdev->dev, NULL);
  	if (IS_ERR(tdma->dma_clk)) {
  		dev_err(&pdev->dev, "Error: Missing controller clock
  ");
  		return PTR_ERR(tdma->dma_clk);
  	}
9aa433d2a   Stephen Warren   dma: tegra: use r...
1452
1453
1454
1455
1456
1457
  	tdma->rst = devm_reset_control_get(&pdev->dev, "dma");
  	if (IS_ERR(tdma->rst)) {
  		dev_err(&pdev->dev, "Error: Missing reset
  ");
  		return PTR_ERR(tdma->rst);
  	}
ec8a15867   Laxman Dewangan   dma: tegra: add d...
1458
  	spin_lock_init(&tdma->global_lock);
84a3f375e   Dmitry Osipenko   dmaengine: tegra-...
1459
1460
  	ret = clk_prepare(tdma->dma_clk);
  	if (ret)
edd3bdbe9   Jon Hunter   dmaengine: tegra-...
1461
  		return ret;
ec8a15867   Laxman Dewangan   dma: tegra: add d...
1462

dcb394b6b   Dmitry Osipenko   dmaengine: tegra-...
1463
1464
1465
  	ret = tegra_dma_init_hw(tdma);
  	if (ret)
  		goto err_clk_unprepare;
ec8a15867   Laxman Dewangan   dma: tegra: add d...
1466

84a3f375e   Dmitry Osipenko   dmaengine: tegra-...
1467
  	pm_runtime_irq_safe(&pdev->dev);
ec8a15867   Laxman Dewangan   dma: tegra: add d...
1468
  	pm_runtime_enable(&pdev->dev);
ffc493062   Laxman Dewangan   dma: tegra: enabl...
1469

ec8a15867   Laxman Dewangan   dma: tegra: add d...
1470
1471
1472
  	INIT_LIST_HEAD(&tdma->dma_dev.channels);
  	for (i = 0; i < cdata->nr_channels; i++) {
  		struct tegra_dma_channel *tdc = &tdma->channels[i];
2cd3d13cb   Dmitry Osipenko   dmaengine: tegra-...
1473
  		int irq;
ec8a15867   Laxman Dewangan   dma: tegra: add d...
1474

13a332863   Jon Hunter   dmaengine: tegra-...
1475
1476
1477
  		tdc->chan_addr = tdma->base_addr +
  				 TEGRA_APBDMA_CHANNEL_BASE_ADD_OFFSET +
  				 (i * cdata->channel_reg_size);
ec8a15867   Laxman Dewangan   dma: tegra: add d...
1478

2cd3d13cb   Dmitry Osipenko   dmaengine: tegra-...
1479
1480
1481
  		irq = platform_get_irq(pdev, i);
  		if (irq < 0) {
  			ret = irq;
2cd3d13cb   Dmitry Osipenko   dmaengine: tegra-...
1482
  			goto err_pm_disable;
ec8a15867   Laxman Dewangan   dma: tegra: add d...
1483
  		}
2cd3d13cb   Dmitry Osipenko   dmaengine: tegra-...
1484

d0fc90542   Laxman Dewangan   dma: tegra: fix i...
1485
  		snprintf(tdc->name, sizeof(tdc->name), "apbdma.%d", i);
2cd3d13cb   Dmitry Osipenko   dmaengine: tegra-...
1486
1487
  		ret = devm_request_irq(&pdev->dev, irq, tegra_dma_isr, 0,
  				       tdc->name, tdc);
ec8a15867   Laxman Dewangan   dma: tegra: add d...
1488
1489
1490
1491
  		if (ret) {
  			dev_err(&pdev->dev,
  				"request_irq failed with err %d channel %d
  ",
ac7ae754d   Dmitry Osipenko   dma: tegra20-apbd...
1492
  				ret, i);
2cd3d13cb   Dmitry Osipenko   dmaengine: tegra-...
1493
  			goto err_pm_disable;
ec8a15867   Laxman Dewangan   dma: tegra: add d...
1494
1495
1496
1497
1498
  		}
  
  		tdc->dma_chan.device = &tdma->dma_dev;
  		dma_cookie_init(&tdc->dma_chan);
  		list_add_tail(&tdc->dma_chan.device_node,
3964293ae   Dmitry Osipenko   dmaengine: tegra-...
1499
  			      &tdma->dma_dev.channels);
ec8a15867   Laxman Dewangan   dma: tegra: add d...
1500
1501
  		tdc->tdma = tdma;
  		tdc->id = i;
00ef4490e   Shardar Shariff Md   dmaengine: tegra-...
1502
  		tdc->slave_id = TEGRA_APBDMA_SLAVE_ID_INVALID;
ec8a15867   Laxman Dewangan   dma: tegra: add d...
1503

86fc54fa2   Allen Pais   dmaengine: tegra2...
1504
  		tasklet_setup(&tdc->tasklet, tegra_dma_tasklet);
ec8a15867   Laxman Dewangan   dma: tegra: add d...
1505
  		spin_lock_init(&tdc->lock);
6697255f2   Dmitry Osipenko   dmaengine: tegra-...
1506
  		init_waitqueue_head(&tdc->wq);
ec8a15867   Laxman Dewangan   dma: tegra: add d...
1507
1508
1509
1510
1511
1512
1513
1514
1515
  
  		INIT_LIST_HEAD(&tdc->pending_sg_req);
  		INIT_LIST_HEAD(&tdc->free_sg_req);
  		INIT_LIST_HEAD(&tdc->free_dma_desc);
  		INIT_LIST_HEAD(&tdc->cb_desc);
  	}
  
  	dma_cap_set(DMA_SLAVE, tdma->dma_dev.cap_mask);
  	dma_cap_set(DMA_PRIVATE, tdma->dma_dev.cap_mask);
46fb3f8ef   Laxman Dewangan   dma: tegra: set D...
1516
  	dma_cap_set(DMA_CYCLIC, tdma->dma_dev.cap_mask);
23a1ec304   Jon Hunter   dmaengine: tegra-...
1517
  	tdma->global_pause_count = 0;
ec8a15867   Laxman Dewangan   dma: tegra: add d...
1518
1519
1520
1521
1522
1523
1524
  	tdma->dma_dev.dev = &pdev->dev;
  	tdma->dma_dev.device_alloc_chan_resources =
  					tegra_dma_alloc_chan_resources;
  	tdma->dma_dev.device_free_chan_resources =
  					tegra_dma_free_chan_resources;
  	tdma->dma_dev.device_prep_slave_sg = tegra_dma_prep_slave_sg;
  	tdma->dma_dev.device_prep_dma_cyclic = tegra_dma_prep_dma_cyclic;
891653ab8   Paul Walmsley   dmaengine: tegra:...
1525
1526
1527
1528
1529
1530
1531
1532
1533
  	tdma->dma_dev.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
  		BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
  		BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |
  		BIT(DMA_SLAVE_BUSWIDTH_8_BYTES);
  	tdma->dma_dev.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
  		BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
  		BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |
  		BIT(DMA_SLAVE_BUSWIDTH_8_BYTES);
  	tdma->dma_dev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
156a599b0   Dmitry Osipenko   dmaengine: tegra-...
1534
  	tdma->dma_dev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
662f1ac31   Maxime Ripard   dmaengine: tegra2...
1535
1536
  	tdma->dma_dev.device_config = tegra_dma_slave_config;
  	tdma->dma_dev.device_terminate_all = tegra_dma_terminate_all;
dda5e35a7   Dmitry Osipenko   dmaengine: tegra-...
1537
  	tdma->dma_dev.device_synchronize = tegra_dma_synchronize;
ec8a15867   Laxman Dewangan   dma: tegra: add d...
1538
1539
1540
1541
1542
1543
1544
1545
  	tdma->dma_dev.device_tx_status = tegra_dma_tx_status;
  	tdma->dma_dev.device_issue_pending = tegra_dma_issue_pending;
  
  	ret = dma_async_device_register(&tdma->dma_dev);
  	if (ret < 0) {
  		dev_err(&pdev->dev,
  			"Tegra20 APB DMA driver registration failed %d
  ", ret);
2cd3d13cb   Dmitry Osipenko   dmaengine: tegra-...
1546
  		goto err_pm_disable;
ec8a15867   Laxman Dewangan   dma: tegra: add d...
1547
  	}
996556c92   Stephen Warren   dma: tegra: regis...
1548
1549
1550
1551
1552
1553
1554
1555
  	ret = of_dma_controller_register(pdev->dev.of_node,
  					 tegra_dma_of_xlate, tdma);
  	if (ret < 0) {
  		dev_err(&pdev->dev,
  			"Tegra20 APB DMA OF registration failed %d
  ", ret);
  		goto err_unregister_dma_dev;
  	}
3964293ae   Dmitry Osipenko   dmaengine: tegra-...
1556
1557
1558
  	dev_info(&pdev->dev, "Tegra20 APB DMA driver registered %u channels
  ",
  		 cdata->nr_channels);
ec8a15867   Laxman Dewangan   dma: tegra: add d...
1559
  	return 0;
996556c92   Stephen Warren   dma: tegra: regis...
1560
1561
  err_unregister_dma_dev:
  	dma_async_device_unregister(&tdma->dma_dev);
ec8a15867   Laxman Dewangan   dma: tegra: add d...
1562

2cd3d13cb   Dmitry Osipenko   dmaengine: tegra-...
1563
  err_pm_disable:
ec8a15867   Laxman Dewangan   dma: tegra: add d...
1564
  	pm_runtime_disable(&pdev->dev);
dcb394b6b   Dmitry Osipenko   dmaengine: tegra-...
1565
1566
  
  err_clk_unprepare:
84a3f375e   Dmitry Osipenko   dmaengine: tegra-...
1567
  	clk_unprepare(tdma->dma_clk);
3964293ae   Dmitry Osipenko   dmaengine: tegra-...
1568

ec8a15867   Laxman Dewangan   dma: tegra: add d...
1569
1570
  	return ret;
  }
4bf27b8b3   Greg Kroah-Hartman   Drivers: dma: rem...
1571
  static int tegra_dma_remove(struct platform_device *pdev)
ec8a15867   Laxman Dewangan   dma: tegra: add d...
1572
1573
  {
  	struct tegra_dma *tdma = platform_get_drvdata(pdev);
ec8a15867   Laxman Dewangan   dma: tegra: add d...
1574

16e2b3e24   Dmitry Osipenko   dmaengine: tegra-...
1575
  	of_dma_controller_free(pdev->dev.of_node);
ec8a15867   Laxman Dewangan   dma: tegra: add d...
1576
  	dma_async_device_unregister(&tdma->dma_dev);
ec8a15867   Laxman Dewangan   dma: tegra: add d...
1577
  	pm_runtime_disable(&pdev->dev);
84a3f375e   Dmitry Osipenko   dmaengine: tegra-...
1578
  	clk_unprepare(tdma->dma_clk);
ec8a15867   Laxman Dewangan   dma: tegra: add d...
1579
1580
1581
  
  	return 0;
  }
a48d44c80   YueHaibing   dmaengine: tegra-...
1582
  static int __maybe_unused tegra_dma_runtime_suspend(struct device *dev)
ec8a15867   Laxman Dewangan   dma: tegra: add d...
1583
  {
286a6441a   Jon Hunter   dmaengine: tegra-...
1584
  	struct tegra_dma *tdma = dev_get_drvdata(dev);
3065c1946   Laxman Dewangan   dma: tegra: imple...
1585

84a3f375e   Dmitry Osipenko   dmaengine: tegra-...
1586
  	clk_disable(tdma->dma_clk);
65a5c3dd3   Jon Hunter   dmaengine: tegra-...
1587

3065c1946   Laxman Dewangan   dma: tegra: imple...
1588
1589
  	return 0;
  }
a48d44c80   YueHaibing   dmaengine: tegra-...
1590
  static int __maybe_unused tegra_dma_runtime_resume(struct device *dev)
3065c1946   Laxman Dewangan   dma: tegra: imple...
1591
1592
  {
  	struct tegra_dma *tdma = dev_get_drvdata(dev);
3065c1946   Laxman Dewangan   dma: tegra: imple...
1593

dcb394b6b   Dmitry Osipenko   dmaengine: tegra-...
1594
1595
  	return clk_enable(tdma->dma_clk);
  }
3065c1946   Laxman Dewangan   dma: tegra: imple...
1596

dcb394b6b   Dmitry Osipenko   dmaengine: tegra-...
1597
1598
1599
1600
1601
1602
  static int __maybe_unused tegra_dma_dev_suspend(struct device *dev)
  {
  	struct tegra_dma *tdma = dev_get_drvdata(dev);
  	unsigned long flags;
  	unsigned int i;
  	bool busy;
3065c1946   Laxman Dewangan   dma: tegra: imple...
1603
1604
1605
  
  	for (i = 0; i < tdma->chip_data->nr_channels; i++) {
  		struct tegra_dma_channel *tdc = &tdma->channels[i];
dcb394b6b   Dmitry Osipenko   dmaengine: tegra-...
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
  
  		tasklet_kill(&tdc->tasklet);
  
  		spin_lock_irqsave(&tdc->lock, flags);
  		busy = tdc->busy;
  		spin_unlock_irqrestore(&tdc->lock, flags);
  
  		if (busy) {
  			dev_err(tdma->dev, "channel %u busy
  ", i);
  			return -EBUSY;
  		}
3065c1946   Laxman Dewangan   dma: tegra: imple...
1618
  	}
dcb394b6b   Dmitry Osipenko   dmaengine: tegra-...
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
  	return pm_runtime_force_suspend(dev);
  }
  
  static int __maybe_unused tegra_dma_dev_resume(struct device *dev)
  {
  	struct tegra_dma *tdma = dev_get_drvdata(dev);
  	int err;
  
  	err = tegra_dma_init_hw(tdma);
  	if (err)
  		return err;
  
  	return pm_runtime_force_resume(dev);
3065c1946   Laxman Dewangan   dma: tegra: imple...
1632
  }
3065c1946   Laxman Dewangan   dma: tegra: imple...
1633

4bf27b8b3   Greg Kroah-Hartman   Drivers: dma: rem...
1634
  static const struct dev_pm_ops tegra_dma_dev_pm_ops = {
edd3bdbe9   Jon Hunter   dmaengine: tegra-...
1635
1636
  	SET_RUNTIME_PM_OPS(tegra_dma_runtime_suspend, tegra_dma_runtime_resume,
  			   NULL)
dcb394b6b   Dmitry Osipenko   dmaengine: tegra-...
1637
  	SET_SYSTEM_SLEEP_PM_OPS(tegra_dma_dev_suspend, tegra_dma_dev_resume)
ec8a15867   Laxman Dewangan   dma: tegra: add d...
1638
  };
242637bac   Laxman Dewangan   dmaengine: tegra:...
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
  static const struct of_device_id tegra_dma_of_match[] = {
  	{
  		.compatible = "nvidia,tegra148-apbdma",
  		.data = &tegra148_dma_chip_data,
  	}, {
  		.compatible = "nvidia,tegra114-apbdma",
  		.data = &tegra114_dma_chip_data,
  	}, {
  		.compatible = "nvidia,tegra30-apbdma",
  		.data = &tegra30_dma_chip_data,
  	}, {
  		.compatible = "nvidia,tegra20-apbdma",
  		.data = &tegra20_dma_chip_data,
  	}, {
  	},
  };
  MODULE_DEVICE_TABLE(of, tegra_dma_of_match);
ec8a15867   Laxman Dewangan   dma: tegra: add d...
1656
1657
  static struct platform_driver tegra_dmac_driver = {
  	.driver = {
cd9092c6e   Laxman Dewangan   dma: tegra: renam...
1658
  		.name	= "tegra-apbdma",
ec8a15867   Laxman Dewangan   dma: tegra: add d...
1659
  		.pm	= &tegra_dma_dev_pm_ops,
dc7badba1   Stephen Warren   dma: tegra: assum...
1660
  		.of_match_table = tegra_dma_of_match,
ec8a15867   Laxman Dewangan   dma: tegra: add d...
1661
1662
  	},
  	.probe		= tegra_dma_probe,
a7d6e3ec2   Bill Pemberton   dma: remove use o...
1663
  	.remove		= tegra_dma_remove,
ec8a15867   Laxman Dewangan   dma: tegra: add d...
1664
1665
1666
  };
  
  module_platform_driver(tegra_dmac_driver);
ec8a15867   Laxman Dewangan   dma: tegra: add d...
1667
1668
1669
  MODULE_DESCRIPTION("NVIDIA Tegra APB DMA Controller driver");
  MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
  MODULE_LICENSE("GPL v2");