Blame view

drivers/dma/shdma.c 38.3 KB
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
  /*
   * Renesas SuperH DMA Engine support
   *
   * base is drivers/dma/flsdma.c
   *
   * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
   * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
   * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
   *
   * This is free software; you can redistribute it and/or modify
   * it under the terms of the GNU General Public License as published by
   * the Free Software Foundation; either version 2 of the License, or
   * (at your option) any later version.
   *
   * - DMA of SuperH does not have Hardware DMA chain mode.
   * - MAX DMA size is 16MB.
   *
   */
  
  #include <linux/init.h>
  #include <linux/module.h>
5a0e3ad6a   Tejun Heo   include cleanup: ...
22
  #include <linux/slab.h>
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
23
24
25
26
  #include <linux/interrupt.h>
  #include <linux/dmaengine.h>
  #include <linux/delay.h>
  #include <linux/dma-mapping.h>
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
27
  #include <linux/platform_device.h>
20f2a3b5d   Guennadi Liakhovetski   dmaengine: shdma:...
28
  #include <linux/pm_runtime.h>
b2623a61c   Magnus Damm   dmaengine: shdma:...
29
  #include <linux/sh_dma.h>
03aa18f55   Paul Mundt   dma: shdma: NMI s...
30
31
32
33
  #include <linux/notifier.h>
  #include <linux/kdebug.h>
  #include <linux/spinlock.h>
  #include <linux/rculist.h>
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
34
35
36
  #include "shdma.h"
  
  /* DMA descriptor control */
3542a113a   Guennadi Liakhovetski   sh: fix DMA drive...
37
38
39
40
41
42
43
  enum sh_dmae_desc_status {
  	DESC_IDLE,
  	DESC_PREPARED,
  	DESC_SUBMITTED,
  	DESC_COMPLETED,	/* completed, have to call callback */
  	DESC_WAITING,	/* callback called, waiting for ack / re-submit */
  };
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
44
45
  
  #define NR_DESCS_PER_CHANNEL 32
8b1935e6a   Guennadi Liakhovetski   dmaengine: shdma:...
46
47
  /* Default MEMCPY transfer size = 2^2 = 4 bytes */
  #define LOG2_DEFAULT_XFER_SIZE	2
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
48

03aa18f55   Paul Mundt   dma: shdma: NMI s...
49
50
  /*
   * Used for write-side mutual exclusion for the global device list,
2dc666673   Guennadi Liakhovetski   dmaengine: shdma:...
51
   * read-side synchronization by way of RCU, and per-controller data.
03aa18f55   Paul Mundt   dma: shdma: NMI s...
52
53
54
   */
  static DEFINE_SPINLOCK(sh_dmae_lock);
  static LIST_HEAD(sh_dmae_devices);
cfefe9979   Guennadi Liakhovetski   sh: implement DMA...
55
  /* A bitmask with bits enough for enum sh_dmae_slave_chan_id */
02ca5083f   Magnus Damm   dmaengine: shdma:...
56
  static unsigned long sh_dmae_slave_used[BITS_TO_LONGS(SH_DMA_SLAVE_NUMBER)];
cfefe9979   Guennadi Liakhovetski   sh: implement DMA...
57

3542a113a   Guennadi Liakhovetski   sh: fix DMA drive...
58
  static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all);
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
59
60
  static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg)
  {
027811b9b   Guennadi Liakhovetski   dmaengine: shdma:...
61
  	__raw_writel(data, sh_dc->base + reg / sizeof(u32));
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
62
63
64
65
  }
  
  static u32 sh_dmae_readl(struct sh_dmae_chan *sh_dc, u32 reg)
  {
027811b9b   Guennadi Liakhovetski   dmaengine: shdma:...
66
67
68
69
70
  	return __raw_readl(sh_dc->base + reg / sizeof(u32));
  }
  
  static u16 dmaor_read(struct sh_dmae_device *shdev)
  {
e76c3af87   Kuninori Morimoto   dmaengine: shdma:...
71
72
73
74
75
76
  	u32 __iomem *addr = shdev->chan_reg + DMAOR / sizeof(u32);
  
  	if (shdev->pdata->dmaor_is_32bit)
  		return __raw_readl(addr);
  	else
  		return __raw_readw(addr);
027811b9b   Guennadi Liakhovetski   dmaengine: shdma:...
77
78
79
80
  }
  
  static void dmaor_write(struct sh_dmae_device *shdev, u16 data)
  {
e76c3af87   Kuninori Morimoto   dmaengine: shdma:...
81
82
83
84
85
86
  	u32 __iomem *addr = shdev->chan_reg + DMAOR / sizeof(u32);
  
  	if (shdev->pdata->dmaor_is_32bit)
  		__raw_writel(data, addr);
  	else
  		__raw_writew(data, addr);
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
87
  }
5899a723b   Kuninori Morimoto   dmaengine: shdma:...
88
89
90
91
92
93
94
95
96
97
98
99
  static void chcr_write(struct sh_dmae_chan *sh_dc, u32 data)
  {
  	struct sh_dmae_device *shdev = to_sh_dev(sh_dc);
  
  	__raw_writel(data, sh_dc->base + shdev->chcr_offset / sizeof(u32));
  }
  
  static u32 chcr_read(struct sh_dmae_chan *sh_dc)
  {
  	struct sh_dmae_device *shdev = to_sh_dev(sh_dc);
  
  	return __raw_readl(sh_dc->base + shdev->chcr_offset / sizeof(u32));
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
100
  }
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
101
102
103
104
105
  /*
   * Reset DMA controller
   *
   * SH7780 has two DMAOR register
   */
027811b9b   Guennadi Liakhovetski   dmaengine: shdma:...
106
  static void sh_dmae_ctl_stop(struct sh_dmae_device *shdev)
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
107
  {
2dc666673   Guennadi Liakhovetski   dmaengine: shdma:...
108
109
110
111
  	unsigned short dmaor;
  	unsigned long flags;
  
  	spin_lock_irqsave(&sh_dmae_lock, flags);
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
112

2dc666673   Guennadi Liakhovetski   dmaengine: shdma:...
113
  	dmaor = dmaor_read(shdev);
027811b9b   Guennadi Liakhovetski   dmaengine: shdma:...
114
  	dmaor_write(shdev, dmaor & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME));
2dc666673   Guennadi Liakhovetski   dmaengine: shdma:...
115
116
  
  	spin_unlock_irqrestore(&sh_dmae_lock, flags);
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
117
  }
027811b9b   Guennadi Liakhovetski   dmaengine: shdma:...
118
  static int sh_dmae_rst(struct sh_dmae_device *shdev)
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
119
120
  {
  	unsigned short dmaor;
2dc666673   Guennadi Liakhovetski   dmaengine: shdma:...
121
  	unsigned long flags;
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
122

2dc666673   Guennadi Liakhovetski   dmaengine: shdma:...
123
  	spin_lock_irqsave(&sh_dmae_lock, flags);
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
124

2dc666673   Guennadi Liakhovetski   dmaengine: shdma:...
125
126
127
128
129
130
131
132
133
134
135
136
  	dmaor = dmaor_read(shdev) & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME);
  
  	dmaor_write(shdev, dmaor | shdev->pdata->dmaor_init);
  
  	dmaor = dmaor_read(shdev);
  
  	spin_unlock_irqrestore(&sh_dmae_lock, flags);
  
  	if (dmaor & (DMAOR_AE | DMAOR_NMIF)) {
  		dev_warn(shdev->common.dev, "Can't initialize DMAOR.
  ");
  		return -EIO;
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
137
138
139
  	}
  	return 0;
  }
fc4618575   Guennadi Liakhovetski   sh: prepare the D...
140
  static bool dmae_is_busy(struct sh_dmae_chan *sh_chan)
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
141
  {
5899a723b   Kuninori Morimoto   dmaengine: shdma:...
142
  	u32 chcr = chcr_read(sh_chan);
fc4618575   Guennadi Liakhovetski   sh: prepare the D...
143
144
145
146
147
  
  	if ((chcr & (CHCR_DE | CHCR_TE)) == CHCR_DE)
  		return true; /* working */
  
  	return false; /* waiting */
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
148
  }
8b1935e6a   Guennadi Liakhovetski   dmaengine: shdma:...
149
  static unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan, u32 chcr)
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
150
  {
c4e0dd783   Kuninori Morimoto   dmaengine: shdma:...
151
  	struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
8b1935e6a   Guennadi Liakhovetski   dmaengine: shdma:...
152
153
154
155
156
157
  	struct sh_dmae_pdata *pdata = shdev->pdata;
  	int cnt = ((chcr & pdata->ts_low_mask) >> pdata->ts_low_shift) |
  		((chcr & pdata->ts_high_mask) >> pdata->ts_high_shift);
  
  	if (cnt >= pdata->ts_shift_num)
  		cnt = 0;
623b4ac4b   Guennadi Liakhovetski   sh: fix Transfer ...
158

8b1935e6a   Guennadi Liakhovetski   dmaengine: shdma:...
159
160
161
162
163
  	return pdata->ts_shift[cnt];
  }
  
  static u32 log2size_to_chcr(struct sh_dmae_chan *sh_chan, int l2size)
  {
c4e0dd783   Kuninori Morimoto   dmaengine: shdma:...
164
  	struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
8b1935e6a   Guennadi Liakhovetski   dmaengine: shdma:...
165
166
167
168
169
170
171
172
173
174
175
176
  	struct sh_dmae_pdata *pdata = shdev->pdata;
  	int i;
  
  	for (i = 0; i < pdata->ts_shift_num; i++)
  		if (pdata->ts_shift[i] == l2size)
  			break;
  
  	if (i == pdata->ts_shift_num)
  		i = 0;
  
  	return ((i << pdata->ts_low_shift) & pdata->ts_low_mask) |
  		((i << pdata->ts_high_shift) & pdata->ts_high_mask);
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
177
  }
3542a113a   Guennadi Liakhovetski   sh: fix DMA drive...
178
  static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs *hw)
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
179
  {
3542a113a   Guennadi Liakhovetski   sh: fix DMA drive...
180
181
  	sh_dmae_writel(sh_chan, hw->sar, SAR);
  	sh_dmae_writel(sh_chan, hw->dar, DAR);
cfefe9979   Guennadi Liakhovetski   sh: implement DMA...
182
  	sh_dmae_writel(sh_chan, hw->tcr >> sh_chan->xmit_shift, TCR);
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
183
184
185
186
  }
  
  static void dmae_start(struct sh_dmae_chan *sh_chan)
  {
67c6269e5   Kuninori Morimoto   dmaengine: shdma:...
187
  	struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
5899a723b   Kuninori Morimoto   dmaengine: shdma:...
188
  	u32 chcr = chcr_read(sh_chan);
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
189

260bf2c5f   Kuninori Morimoto   dmaengine: shdma:...
190
191
  	if (shdev->pdata->needs_tend_set)
  		sh_dmae_writel(sh_chan, 0xFFFFFFFF, TEND);
67c6269e5   Kuninori Morimoto   dmaengine: shdma:...
192
  	chcr |= CHCR_DE | shdev->chcr_ie_bit;
5899a723b   Kuninori Morimoto   dmaengine: shdma:...
193
  	chcr_write(sh_chan, chcr & ~CHCR_TE);
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
194
195
196
197
  }
  
  static void dmae_halt(struct sh_dmae_chan *sh_chan)
  {
67c6269e5   Kuninori Morimoto   dmaengine: shdma:...
198
  	struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
5899a723b   Kuninori Morimoto   dmaengine: shdma:...
199
  	u32 chcr = chcr_read(sh_chan);
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
200

67c6269e5   Kuninori Morimoto   dmaengine: shdma:...
201
  	chcr &= ~(CHCR_DE | CHCR_TE | shdev->chcr_ie_bit);
5899a723b   Kuninori Morimoto   dmaengine: shdma:...
202
  	chcr_write(sh_chan, chcr);
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
203
  }
cfefe9979   Guennadi Liakhovetski   sh: implement DMA...
204
205
  static void dmae_init(struct sh_dmae_chan *sh_chan)
  {
8b1935e6a   Guennadi Liakhovetski   dmaengine: shdma:...
206
207
208
209
210
211
212
  	/*
  	 * Default configuration for dual address memory-memory transfer.
  	 * 0x400 represents auto-request.
  	 */
  	u32 chcr = DM_INC | SM_INC | 0x400 | log2size_to_chcr(sh_chan,
  						   LOG2_DEFAULT_XFER_SIZE);
  	sh_chan->xmit_shift = calc_xmit_shift(sh_chan, chcr);
5899a723b   Kuninori Morimoto   dmaengine: shdma:...
213
  	chcr_write(sh_chan, chcr);
cfefe9979   Guennadi Liakhovetski   sh: implement DMA...
214
  }
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
215
216
  static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val)
  {
2dc666673   Guennadi Liakhovetski   dmaengine: shdma:...
217
  	/* If DMA is active, cannot set CHCR. TODO: remove this superfluous check */
fc4618575   Guennadi Liakhovetski   sh: prepare the D...
218
219
  	if (dmae_is_busy(sh_chan))
  		return -EBUSY;
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
220

8b1935e6a   Guennadi Liakhovetski   dmaengine: shdma:...
221
  	sh_chan->xmit_shift = calc_xmit_shift(sh_chan, val);
5899a723b   Kuninori Morimoto   dmaengine: shdma:...
222
  	chcr_write(sh_chan, val);
cfefe9979   Guennadi Liakhovetski   sh: implement DMA...
223

d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
224
225
  	return 0;
  }
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
226
227
  static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val)
  {
c4e0dd783   Kuninori Morimoto   dmaengine: shdma:...
228
  	struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
027811b9b   Guennadi Liakhovetski   dmaengine: shdma:...
229
  	struct sh_dmae_pdata *pdata = shdev->pdata;
5bac942db   Guennadi Liakhovetski   SH: constify mult...
230
  	const struct sh_dmae_channel *chan_pdata = &pdata->channel[sh_chan->id];
26fc02ab5   Magnus Damm   dmaengine: shdma:...
231
  	u16 __iomem *addr = shdev->dmars;
090b91805   Kuninori Morimoto   dmaengine: shdma:...
232
  	unsigned int shift = chan_pdata->dmars_bit;
fc4618575   Guennadi Liakhovetski   sh: prepare the D...
233
234
235
  
  	if (dmae_is_busy(sh_chan))
  		return -EBUSY;
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
236

260bf2c5f   Kuninori Morimoto   dmaengine: shdma:...
237
238
  	if (pdata->no_dmars)
  		return 0;
26fc02ab5   Magnus Damm   dmaengine: shdma:...
239
240
241
242
  	/* in the case of a missing DMARS resource use first memory window */
  	if (!addr)
  		addr = (u16 __iomem *)shdev->chan_reg;
  	addr += chan_pdata->dmars / sizeof(u16);
027811b9b   Guennadi Liakhovetski   dmaengine: shdma:...
243
244
  	__raw_writew((__raw_readw(addr) & (0xff00 >> shift)) | (val << shift),
  		     addr);
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
245
246
247
  
  	return 0;
  }
7a1cd9ad8   Guennadi Liakhovetski   dma: shdma: trans...
248
  static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan);
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
249
250
  static dma_cookie_t sh_dmae_tx_submit(struct dma_async_tx_descriptor *tx)
  {
3542a113a   Guennadi Liakhovetski   sh: fix DMA drive...
251
  	struct sh_desc *desc = tx_to_sh_desc(tx), *chunk, *last = desc, *c;
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
252
  	struct sh_dmae_chan *sh_chan = to_sh_chan(tx->chan);
7a1cd9ad8   Guennadi Liakhovetski   dma: shdma: trans...
253
  	struct sh_dmae_slave *param = tx->chan->private;
3542a113a   Guennadi Liakhovetski   sh: fix DMA drive...
254
  	dma_async_tx_callback callback = tx->callback;
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
255
  	dma_cookie_t cookie;
7a1cd9ad8   Guennadi Liakhovetski   dma: shdma: trans...
256
  	bool power_up;
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
257

7a1cd9ad8   Guennadi Liakhovetski   dma: shdma: trans...
258
259
260
261
262
263
  	spin_lock_irq(&sh_chan->desc_lock);
  
  	if (list_empty(&sh_chan->ld_queue))
  		power_up = true;
  	else
  		power_up = false;
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
264
265
266
267
268
  
  	cookie = sh_chan->common.cookie;
  	cookie++;
  	if (cookie < 0)
  		cookie = 1;
3542a113a   Guennadi Liakhovetski   sh: fix DMA drive...
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
  	sh_chan->common.cookie = cookie;
  	tx->cookie = cookie;
  
  	/* Mark all chunks of this descriptor as submitted, move to the queue */
  	list_for_each_entry_safe(chunk, c, desc->node.prev, node) {
  		/*
  		 * All chunks are on the global ld_free, so, we have to find
  		 * the end of the chain ourselves
  		 */
  		if (chunk != desc && (chunk->mark == DESC_IDLE ||
  				      chunk->async_tx.cookie > 0 ||
  				      chunk->async_tx.cookie == -EBUSY ||
  				      &chunk->node == &sh_chan->ld_free))
  			break;
  		chunk->mark = DESC_SUBMITTED;
  		/* Callback goes to the last chunk */
  		chunk->async_tx.callback = NULL;
  		chunk->cookie = cookie;
  		list_move_tail(&chunk->node, &sh_chan->ld_queue);
  		last = chunk;
  	}
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
290

3542a113a   Guennadi Liakhovetski   sh: fix DMA drive...
291
292
293
294
295
296
297
  	last->async_tx.callback = callback;
  	last->async_tx.callback_param = tx->callback_param;
  
  	dev_dbg(sh_chan->dev, "submit #%d@%p on %d: %x[%d] -> %x
  ",
  		tx->cookie, &last->async_tx, sh_chan->id,
  		desc->hw.sar, desc->hw.tcr, desc->hw.dar);
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
298

7a1cd9ad8   Guennadi Liakhovetski   dma: shdma: trans...
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
  	if (power_up) {
  		sh_chan->pm_state = DMAE_PM_BUSY;
  
  		pm_runtime_get(sh_chan->dev);
  
  		spin_unlock_irq(&sh_chan->desc_lock);
  
  		pm_runtime_barrier(sh_chan->dev);
  
  		spin_lock_irq(&sh_chan->desc_lock);
  
  		/* Have we been reset, while waiting? */
  		if (sh_chan->pm_state != DMAE_PM_ESTABLISHED) {
  			dev_dbg(sh_chan->dev, "Bring up channel %d
  ",
  				sh_chan->id);
  			if (param) {
  				const struct sh_dmae_slave_config *cfg =
  					param->config;
  
  				dmae_set_dmars(sh_chan, cfg->mid_rid);
  				dmae_set_chcr(sh_chan, cfg->chcr);
  			} else {
  				dmae_init(sh_chan);
  			}
  
  			if (sh_chan->pm_state == DMAE_PM_PENDING)
  				sh_chan_xfer_ld_queue(sh_chan);
  			sh_chan->pm_state = DMAE_PM_ESTABLISHED;
  		}
  	}
  
  	spin_unlock_irq(&sh_chan->desc_lock);
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
332
333
334
  
  	return cookie;
  }
3542a113a   Guennadi Liakhovetski   sh: fix DMA drive...
335
  /* Called with desc_lock held */
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
336
337
  static struct sh_desc *sh_dmae_get_desc(struct sh_dmae_chan *sh_chan)
  {
3542a113a   Guennadi Liakhovetski   sh: fix DMA drive...
338
  	struct sh_desc *desc;
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
339

3542a113a   Guennadi Liakhovetski   sh: fix DMA drive...
340
341
342
  	list_for_each_entry(desc, &sh_chan->ld_free, node)
  		if (desc->mark != DESC_PREPARED) {
  			BUG_ON(desc->mark != DESC_IDLE);
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
343
  			list_del(&desc->node);
3542a113a   Guennadi Liakhovetski   sh: fix DMA drive...
344
  			return desc;
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
345
  		}
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
346

3542a113a   Guennadi Liakhovetski   sh: fix DMA drive...
347
  	return NULL;
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
348
  }
5bac942db   Guennadi Liakhovetski   SH: constify mult...
349
  static const struct sh_dmae_slave_config *sh_dmae_find_slave(
4bab9d426   Magnus Damm   dmaengine: shdma:...
350
  	struct sh_dmae_chan *sh_chan, struct sh_dmae_slave *param)
cfefe9979   Guennadi Liakhovetski   sh: implement DMA...
351
  {
c4e0dd783   Kuninori Morimoto   dmaengine: shdma:...
352
  	struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
027811b9b   Guennadi Liakhovetski   dmaengine: shdma:...
353
  	struct sh_dmae_pdata *pdata = shdev->pdata;
cfefe9979   Guennadi Liakhovetski   sh: implement DMA...
354
  	int i;
02ca5083f   Magnus Damm   dmaengine: shdma:...
355
  	if (param->slave_id >= SH_DMA_SLAVE_NUMBER)
cfefe9979   Guennadi Liakhovetski   sh: implement DMA...
356
  		return NULL;
027811b9b   Guennadi Liakhovetski   dmaengine: shdma:...
357
  	for (i = 0; i < pdata->slave_num; i++)
4bab9d426   Magnus Damm   dmaengine: shdma:...
358
  		if (pdata->slave[i].slave_id == param->slave_id)
027811b9b   Guennadi Liakhovetski   dmaengine: shdma:...
359
  			return pdata->slave + i;
cfefe9979   Guennadi Liakhovetski   sh: implement DMA...
360
361
362
  
  	return NULL;
  }
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
363
364
365
366
  static int sh_dmae_alloc_chan_resources(struct dma_chan *chan)
  {
  	struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
  	struct sh_desc *desc;
cfefe9979   Guennadi Liakhovetski   sh: implement DMA...
367
  	struct sh_dmae_slave *param = chan->private;
83515bc7d   Guennadi Liakhovetski   SH: fix error pat...
368
  	int ret;
cfefe9979   Guennadi Liakhovetski   sh: implement DMA...
369
370
371
372
373
374
  
  	/*
  	 * This relies on the guarantee from dmaengine that alloc_chan_resources
  	 * never runs concurrently with itself or free_chan_resources.
  	 */
  	if (param) {
5bac942db   Guennadi Liakhovetski   SH: constify mult...
375
  		const struct sh_dmae_slave_config *cfg;
cfefe9979   Guennadi Liakhovetski   sh: implement DMA...
376

4bab9d426   Magnus Damm   dmaengine: shdma:...
377
  		cfg = sh_dmae_find_slave(sh_chan, param);
83515bc7d   Guennadi Liakhovetski   SH: fix error pat...
378
379
380
381
  		if (!cfg) {
  			ret = -EINVAL;
  			goto efindslave;
  		}
cfefe9979   Guennadi Liakhovetski   sh: implement DMA...
382

83515bc7d   Guennadi Liakhovetski   SH: fix error pat...
383
384
385
386
  		if (test_and_set_bit(param->slave_id, sh_dmae_slave_used)) {
  			ret = -EBUSY;
  			goto etestused;
  		}
cfefe9979   Guennadi Liakhovetski   sh: implement DMA...
387
388
  
  		param->config = cfg;
cfefe9979   Guennadi Liakhovetski   sh: implement DMA...
389
  	}
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
390

d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
391
  	while (sh_chan->descs_allocated < NR_DESCS_PER_CHANNEL) {
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
392
  		desc = kzalloc(sizeof(struct sh_desc), GFP_KERNEL);
b4dae6e1a   Guennadi Liakhovetski   dmaengine: shdma:...
393
  		if (!desc)
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
394
  			break;
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
395
396
397
  		dma_async_tx_descriptor_init(&desc->async_tx,
  					&sh_chan->common);
  		desc->async_tx.tx_submit = sh_dmae_tx_submit;
3542a113a   Guennadi Liakhovetski   sh: fix DMA drive...
398
  		desc->mark = DESC_IDLE;
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
399

3542a113a   Guennadi Liakhovetski   sh: fix DMA drive...
400
  		list_add(&desc->node, &sh_chan->ld_free);
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
401
402
  		sh_chan->descs_allocated++;
  	}
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
403

83515bc7d   Guennadi Liakhovetski   SH: fix error pat...
404
405
406
407
  	if (!sh_chan->descs_allocated) {
  		ret = -ENOMEM;
  		goto edescalloc;
  	}
20f2a3b5d   Guennadi Liakhovetski   dmaengine: shdma:...
408

d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
409
  	return sh_chan->descs_allocated;
83515bc7d   Guennadi Liakhovetski   SH: fix error pat...
410
411
412
413
414
415
  
  edescalloc:
  	if (param)
  		clear_bit(param->slave_id, sh_dmae_slave_used);
  etestused:
  efindslave:
b4dae6e1a   Guennadi Liakhovetski   dmaengine: shdma:...
416
  	chan->private = NULL;
83515bc7d   Guennadi Liakhovetski   SH: fix error pat...
417
  	return ret;
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
418
419
420
421
422
423
424
425
426
427
  }
  
  /*
   * sh_dma_free_chan_resources - Free all resources of the channel.
   */
  static void sh_dmae_free_chan_resources(struct dma_chan *chan)
  {
  	struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
  	struct sh_desc *desc, *_desc;
  	LIST_HEAD(list);
2dc666673   Guennadi Liakhovetski   dmaengine: shdma:...
428
429
  	/* Protect against ISR */
  	spin_lock_irq(&sh_chan->desc_lock);
cfefe9979   Guennadi Liakhovetski   sh: implement DMA...
430
  	dmae_halt(sh_chan);
2dc666673   Guennadi Liakhovetski   dmaengine: shdma:...
431
432
433
  	spin_unlock_irq(&sh_chan->desc_lock);
  
  	/* Now no new interrupts will occur */
cfefe9979   Guennadi Liakhovetski   sh: implement DMA...
434

3542a113a   Guennadi Liakhovetski   sh: fix DMA drive...
435
436
437
  	/* Prepared and not submitted descriptors can still be on the queue */
  	if (!list_empty(&sh_chan->ld_queue))
  		sh_dmae_chan_ld_cleanup(sh_chan, true);
cfefe9979   Guennadi Liakhovetski   sh: implement DMA...
438
439
440
441
  	if (chan->private) {
  		/* The caller is holding dma_list_mutex */
  		struct sh_dmae_slave *param = chan->private;
  		clear_bit(param->slave_id, sh_dmae_slave_used);
2dc666673   Guennadi Liakhovetski   dmaengine: shdma:...
442
  		chan->private = NULL;
cfefe9979   Guennadi Liakhovetski   sh: implement DMA...
443
  	}
b4dae6e1a   Guennadi Liakhovetski   dmaengine: shdma:...
444
  	spin_lock_irq(&sh_chan->desc_lock);
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
445
446
447
  
  	list_splice_init(&sh_chan->ld_free, &list);
  	sh_chan->descs_allocated = 0;
b4dae6e1a   Guennadi Liakhovetski   dmaengine: shdma:...
448
  	spin_unlock_irq(&sh_chan->desc_lock);
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
449
450
451
452
  
  	list_for_each_entry_safe(desc, _desc, &list, node)
  		kfree(desc);
  }
cfefe9979   Guennadi Liakhovetski   sh: implement DMA...
453
  /**
fc4618575   Guennadi Liakhovetski   sh: prepare the D...
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
   * sh_dmae_add_desc - get, set up and return one transfer descriptor
   * @sh_chan:	DMA channel
   * @flags:	DMA transfer flags
   * @dest:	destination DMA address, incremented when direction equals
   *		DMA_FROM_DEVICE or DMA_BIDIRECTIONAL
   * @src:	source DMA address, incremented when direction equals
   *		DMA_TO_DEVICE or DMA_BIDIRECTIONAL
   * @len:	DMA transfer length
   * @first:	if NULL, set to the current descriptor and cookie set to -EBUSY
   * @direction:	needed for slave DMA to decide which address to keep constant,
   *		equals DMA_BIDIRECTIONAL for MEMCPY
   * Returns 0 or an error
   * Locks: called with desc_lock held
   */
  static struct sh_desc *sh_dmae_add_desc(struct sh_dmae_chan *sh_chan,
  	unsigned long flags, dma_addr_t *dest, dma_addr_t *src, size_t *len,
  	struct sh_desc **first, enum dma_data_direction direction)
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
471
  {
fc4618575   Guennadi Liakhovetski   sh: prepare the D...
472
  	struct sh_desc *new;
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
473
  	size_t copy_size;
fc4618575   Guennadi Liakhovetski   sh: prepare the D...
474
  	if (!*len)
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
475
  		return NULL;
fc4618575   Guennadi Liakhovetski   sh: prepare the D...
476
477
478
479
480
  	/* Allocate the link descriptor from the free list */
  	new = sh_dmae_get_desc(sh_chan);
  	if (!new) {
  		dev_err(sh_chan->dev, "No free link descriptor available
  ");
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
481
  		return NULL;
fc4618575   Guennadi Liakhovetski   sh: prepare the D...
482
  	}
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
483

fc4618575   Guennadi Liakhovetski   sh: prepare the D...
484
485
486
487
488
489
490
491
492
493
494
495
496
497
  	copy_size = min(*len, (size_t)SH_DMA_TCR_MAX + 1);
  
  	new->hw.sar = *src;
  	new->hw.dar = *dest;
  	new->hw.tcr = copy_size;
  
  	if (!*first) {
  		/* First desc */
  		new->async_tx.cookie = -EBUSY;
  		*first = new;
  	} else {
  		/* Other desc - invisible to the user */
  		new->async_tx.cookie = -EINVAL;
  	}
cfefe9979   Guennadi Liakhovetski   sh: implement DMA...
498
499
500
  	dev_dbg(sh_chan->dev,
  		"chaining (%u/%u)@%x -> %x with %p, cookie %d, shift %d
  ",
fc4618575   Guennadi Liakhovetski   sh: prepare the D...
501
  		copy_size, *len, *src, *dest, &new->async_tx,
cfefe9979   Guennadi Liakhovetski   sh: implement DMA...
502
  		new->async_tx.cookie, sh_chan->xmit_shift);
fc4618575   Guennadi Liakhovetski   sh: prepare the D...
503
504
505
  
  	new->mark = DESC_PREPARED;
  	new->async_tx.flags = flags;
cfefe9979   Guennadi Liakhovetski   sh: implement DMA...
506
  	new->direction = direction;
fc4618575   Guennadi Liakhovetski   sh: prepare the D...
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
  
  	*len -= copy_size;
  	if (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE)
  		*src += copy_size;
  	if (direction == DMA_BIDIRECTIONAL || direction == DMA_FROM_DEVICE)
  		*dest += copy_size;
  
  	return new;
  }
  
  /*
   * sh_dmae_prep_sg - prepare transfer descriptors from an SG list
   *
   * Common routine for public (MEMCPY) and slave DMA. The MEMCPY case is also
   * converted to scatter-gather to guarantee consistent locking and a correct
   * list manipulation. For slave DMA direction carries the usual meaning, and,
   * logically, the SG list is RAM and the addr variable contains slave address,
   * e.g., the FIFO I/O register. For MEMCPY direction equals DMA_BIDIRECTIONAL
   * and the SG list contains only one element and points at the source buffer.
   */
  static struct dma_async_tx_descriptor *sh_dmae_prep_sg(struct sh_dmae_chan *sh_chan,
  	struct scatterlist *sgl, unsigned int sg_len, dma_addr_t *addr,
  	enum dma_data_direction direction, unsigned long flags)
  {
  	struct scatterlist *sg;
  	struct sh_desc *first = NULL, *new = NULL /* compiler... */;
  	LIST_HEAD(tx_list);
  	int chunks = 0;
b4dae6e1a   Guennadi Liakhovetski   dmaengine: shdma:...
535
  	unsigned long irq_flags;
fc4618575   Guennadi Liakhovetski   sh: prepare the D...
536
537
538
539
540
541
542
543
  	int i;
  
  	if (!sg_len)
  		return NULL;
  
  	for_each_sg(sgl, sg, sg_len, i)
  		chunks += (sg_dma_len(sg) + SH_DMA_TCR_MAX) /
  			(SH_DMA_TCR_MAX + 1);
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
544

3542a113a   Guennadi Liakhovetski   sh: fix DMA drive...
545
  	/* Have to lock the whole loop to protect against concurrent release */
b4dae6e1a   Guennadi Liakhovetski   dmaengine: shdma:...
546
  	spin_lock_irqsave(&sh_chan->desc_lock, irq_flags);
3542a113a   Guennadi Liakhovetski   sh: fix DMA drive...
547
548
549
550
551
552
553
554
555
556
557
558
  
  	/*
  	 * Chaining:
  	 * first descriptor is what user is dealing with in all API calls, its
  	 *	cookie is at first set to -EBUSY, at tx-submit to a positive
  	 *	number
  	 * if more than one chunk is needed further chunks have cookie = -EINVAL
  	 * the last chunk, if not equal to the first, has cookie = -ENOSPC
  	 * all chunks are linked onto the tx_list head with their .node heads
  	 *	only during this function, then they are immediately spliced
  	 *	back onto the free list in form of a chain
  	 */
fc4618575   Guennadi Liakhovetski   sh: prepare the D...
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
  	for_each_sg(sgl, sg, sg_len, i) {
  		dma_addr_t sg_addr = sg_dma_address(sg);
  		size_t len = sg_dma_len(sg);
  
  		if (!len)
  			goto err_get_desc;
  
  		do {
  			dev_dbg(sh_chan->dev, "Add SG #%d@%p[%d], dma %llx
  ",
  				i, sg, len, (unsigned long long)sg_addr);
  
  			if (direction == DMA_FROM_DEVICE)
  				new = sh_dmae_add_desc(sh_chan, flags,
  						&sg_addr, addr, &len, &first,
  						direction);
  			else
  				new = sh_dmae_add_desc(sh_chan, flags,
  						addr, &sg_addr, &len, &first,
  						direction);
  			if (!new)
  				goto err_get_desc;
  
  			new->chunks = chunks--;
  			list_add_tail(&new->node, &tx_list);
  		} while (len);
  	}
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
586

3542a113a   Guennadi Liakhovetski   sh: fix DMA drive...
587
588
  	if (new != first)
  		new->async_tx.cookie = -ENOSPC;
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
589

3542a113a   Guennadi Liakhovetski   sh: fix DMA drive...
590
591
  	/* Put them back on the free list, so, they don't get lost */
  	list_splice_tail(&tx_list, &sh_chan->ld_free);
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
592

b4dae6e1a   Guennadi Liakhovetski   dmaengine: shdma:...
593
  	spin_unlock_irqrestore(&sh_chan->desc_lock, irq_flags);
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
594

3542a113a   Guennadi Liakhovetski   sh: fix DMA drive...
595
  	return &first->async_tx;
fc4618575   Guennadi Liakhovetski   sh: prepare the D...
596
597
598
599
600
  
  err_get_desc:
  	list_for_each_entry(new, &tx_list, node)
  		new->mark = DESC_IDLE;
  	list_splice(&tx_list, &sh_chan->ld_free);
b4dae6e1a   Guennadi Liakhovetski   dmaengine: shdma:...
601
  	spin_unlock_irqrestore(&sh_chan->desc_lock, irq_flags);
fc4618575   Guennadi Liakhovetski   sh: prepare the D...
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
  
  	return NULL;
  }
  
  static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy(
  	struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src,
  	size_t len, unsigned long flags)
  {
  	struct sh_dmae_chan *sh_chan;
  	struct scatterlist sg;
  
  	if (!chan || !len)
  		return NULL;
  
  	sh_chan = to_sh_chan(chan);
  
  	sg_init_table(&sg, 1);
  	sg_set_page(&sg, pfn_to_page(PFN_DOWN(dma_src)), len,
  		    offset_in_page(dma_src));
  	sg_dma_address(&sg) = dma_src;
  	sg_dma_len(&sg) = len;
  
  	return sh_dmae_prep_sg(sh_chan, &sg, 1, &dma_dest, DMA_BIDIRECTIONAL,
  			       flags);
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
626
  }
cfefe9979   Guennadi Liakhovetski   sh: implement DMA...
627
628
629
630
631
632
  static struct dma_async_tx_descriptor *sh_dmae_prep_slave_sg(
  	struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
  	enum dma_data_direction direction, unsigned long flags)
  {
  	struct sh_dmae_slave *param;
  	struct sh_dmae_chan *sh_chan;
5bac942db   Guennadi Liakhovetski   SH: constify mult...
633
  	dma_addr_t slave_addr;
cfefe9979   Guennadi Liakhovetski   sh: implement DMA...
634
635
636
637
638
639
640
641
642
643
644
645
646
647
  
  	if (!chan)
  		return NULL;
  
  	sh_chan = to_sh_chan(chan);
  	param = chan->private;
  
  	/* Someone calling slave DMA on a public channel? */
  	if (!param || !sg_len) {
  		dev_warn(sh_chan->dev, "%s: bad parameter: %p, %d, %d
  ",
  			 __func__, param, sg_len, param ? param->slave_id : -1);
  		return NULL;
  	}
9f9ff20d4   Dan Carpenter   dma/shdma: move d...
648
  	slave_addr = param->config->addr;
cfefe9979   Guennadi Liakhovetski   sh: implement DMA...
649
650
651
652
  	/*
  	 * if (param != NULL), this is a successfully requested slave channel,
  	 * therefore param->config != NULL too.
  	 */
5bac942db   Guennadi Liakhovetski   SH: constify mult...
653
  	return sh_dmae_prep_sg(sh_chan, sgl, sg_len, &slave_addr,
cfefe9979   Guennadi Liakhovetski   sh: implement DMA...
654
655
  			       direction, flags);
  }
058276303   Linus Walleij   DMAENGINE: extend...
656
657
  static int sh_dmae_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
  			   unsigned long arg)
cfefe9979   Guennadi Liakhovetski   sh: implement DMA...
658
659
  {
  	struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
b4dae6e1a   Guennadi Liakhovetski   dmaengine: shdma:...
660
  	unsigned long flags;
cfefe9979   Guennadi Liakhovetski   sh: implement DMA...
661

c3635c78e   Linus Walleij   DMAENGINE: generi...
662
663
664
  	/* Only supports DMA_TERMINATE_ALL */
  	if (cmd != DMA_TERMINATE_ALL)
  		return -ENXIO;
cfefe9979   Guennadi Liakhovetski   sh: implement DMA...
665
  	if (!chan)
c3635c78e   Linus Walleij   DMAENGINE: generi...
666
  		return -EINVAL;
cfefe9979   Guennadi Liakhovetski   sh: implement DMA...
667

b4dae6e1a   Guennadi Liakhovetski   dmaengine: shdma:...
668
  	spin_lock_irqsave(&sh_chan->desc_lock, flags);
c014906a8   Guennadi Liakhovetski   dmaengine: shdma:...
669
  	dmae_halt(sh_chan);
c014906a8   Guennadi Liakhovetski   dmaengine: shdma:...
670
671
672
673
674
675
  	if (!list_empty(&sh_chan->ld_queue)) {
  		/* Record partial transfer */
  		struct sh_desc *desc = list_entry(sh_chan->ld_queue.next,
  						  struct sh_desc, node);
  		desc->partial = (desc->hw.tcr - sh_dmae_readl(sh_chan, TCR)) <<
  			sh_chan->xmit_shift;
c014906a8   Guennadi Liakhovetski   dmaengine: shdma:...
676
  	}
b4dae6e1a   Guennadi Liakhovetski   dmaengine: shdma:...
677
  	spin_unlock_irqrestore(&sh_chan->desc_lock, flags);
c014906a8   Guennadi Liakhovetski   dmaengine: shdma:...
678

cfefe9979   Guennadi Liakhovetski   sh: implement DMA...
679
  	sh_dmae_chan_ld_cleanup(sh_chan, true);
c3635c78e   Linus Walleij   DMAENGINE: generi...
680
681
  
  	return 0;
cfefe9979   Guennadi Liakhovetski   sh: implement DMA...
682
  }
3542a113a   Guennadi Liakhovetski   sh: fix DMA drive...
683
  static dma_async_tx_callback __ld_cleanup(struct sh_dmae_chan *sh_chan, bool all)
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
684
685
  {
  	struct sh_desc *desc, *_desc;
3542a113a   Guennadi Liakhovetski   sh: fix DMA drive...
686
687
688
689
690
  	/* Is the "exposed" head of a chain acked? */
  	bool head_acked = false;
  	dma_cookie_t cookie = 0;
  	dma_async_tx_callback callback = NULL;
  	void *param = NULL;
b4dae6e1a   Guennadi Liakhovetski   dmaengine: shdma:...
691
  	unsigned long flags;
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
692

b4dae6e1a   Guennadi Liakhovetski   dmaengine: shdma:...
693
  	spin_lock_irqsave(&sh_chan->desc_lock, flags);
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
694
  	list_for_each_entry_safe(desc, _desc, &sh_chan->ld_queue, node) {
3542a113a   Guennadi Liakhovetski   sh: fix DMA drive...
695
696
697
698
699
700
701
702
703
704
705
706
707
708
  		struct dma_async_tx_descriptor *tx = &desc->async_tx;
  
  		BUG_ON(tx->cookie > 0 && tx->cookie != desc->cookie);
  		BUG_ON(desc->mark != DESC_SUBMITTED &&
  		       desc->mark != DESC_COMPLETED &&
  		       desc->mark != DESC_WAITING);
  
  		/*
  		 * queue is ordered, and we use this loop to (1) clean up all
  		 * completed descriptors, and to (2) update descriptor flags of
  		 * any chunks in a (partially) completed chain
  		 */
  		if (!all && desc->mark == DESC_SUBMITTED &&
  		    desc->cookie != cookie)
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
709
  			break;
3542a113a   Guennadi Liakhovetski   sh: fix DMA drive...
710
711
  		if (tx->cookie > 0)
  			cookie = tx->cookie;
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
712

3542a113a   Guennadi Liakhovetski   sh: fix DMA drive...
713
  		if (desc->mark == DESC_COMPLETED && desc->chunks == 1) {
cfefe9979   Guennadi Liakhovetski   sh: implement DMA...
714
715
716
717
718
719
  			if (sh_chan->completed_cookie != desc->cookie - 1)
  				dev_dbg(sh_chan->dev,
  					"Completing cookie %d, expected %d
  ",
  					desc->cookie,
  					sh_chan->completed_cookie + 1);
3542a113a   Guennadi Liakhovetski   sh: fix DMA drive...
720
721
  			sh_chan->completed_cookie = desc->cookie;
  		}
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
722

3542a113a   Guennadi Liakhovetski   sh: fix DMA drive...
723
724
725
726
727
728
729
730
731
732
733
  		/* Call callback on the last chunk */
  		if (desc->mark == DESC_COMPLETED && tx->callback) {
  			desc->mark = DESC_WAITING;
  			callback = tx->callback;
  			param = tx->callback_param;
  			dev_dbg(sh_chan->dev, "descriptor #%d@%p on %d callback
  ",
  				tx->cookie, tx, sh_chan->id);
  			BUG_ON(desc->chunks != 1);
  			break;
  		}
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
734

3542a113a   Guennadi Liakhovetski   sh: fix DMA drive...
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
  		if (tx->cookie > 0 || tx->cookie == -EBUSY) {
  			if (desc->mark == DESC_COMPLETED) {
  				BUG_ON(tx->cookie < 0);
  				desc->mark = DESC_WAITING;
  			}
  			head_acked = async_tx_test_ack(tx);
  		} else {
  			switch (desc->mark) {
  			case DESC_COMPLETED:
  				desc->mark = DESC_WAITING;
  				/* Fall through */
  			case DESC_WAITING:
  				if (head_acked)
  					async_tx_ack(&desc->async_tx);
  			}
  		}
  
  		dev_dbg(sh_chan->dev, "descriptor %p #%d completed.
  ",
  			tx, tx->cookie);
  
  		if (((desc->mark == DESC_COMPLETED ||
  		      desc->mark == DESC_WAITING) &&
  		     async_tx_test_ack(&desc->async_tx)) || all) {
  			/* Remove from ld_queue list */
  			desc->mark = DESC_IDLE;
7a1cd9ad8   Guennadi Liakhovetski   dma: shdma: trans...
761

3542a113a   Guennadi Liakhovetski   sh: fix DMA drive...
762
  			list_move(&desc->node, &sh_chan->ld_free);
7a1cd9ad8   Guennadi Liakhovetski   dma: shdma: trans...
763
764
765
766
767
768
  
  			if (list_empty(&sh_chan->ld_queue)) {
  				dev_dbg(sh_chan->dev, "Bring down channel %d
  ", sh_chan->id);
  				pm_runtime_put(sh_chan->dev);
  			}
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
769
770
  		}
  	}
2dc666673   Guennadi Liakhovetski   dmaengine: shdma:...
771
772
773
774
775
776
777
  
  	if (all && !callback)
  		/*
  		 * Terminating and the loop completed normally: forgive
  		 * uncompleted cookies
  		 */
  		sh_chan->completed_cookie = sh_chan->common.cookie;
b4dae6e1a   Guennadi Liakhovetski   dmaengine: shdma:...
778
  	spin_unlock_irqrestore(&sh_chan->desc_lock, flags);
3542a113a   Guennadi Liakhovetski   sh: fix DMA drive...
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
  
  	if (callback)
  		callback(param);
  
  	return callback;
  }
  
  /*
   * sh_chan_ld_cleanup - Clean up link descriptors
   *
   * This function cleans up the ld_queue of DMA channel.
   */
  static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all)
  {
  	while (__ld_cleanup(sh_chan, all))
  		;
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
795
  }
7a1cd9ad8   Guennadi Liakhovetski   dma: shdma: trans...
796
  /* Called under spin_lock_irq(&sh_chan->desc_lock) */
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
797
798
  static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan)
  {
47a4dc26e   Guennadi Liakhovetski   dmaengine: shdma:...
799
  	struct sh_desc *desc;
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
800
801
  
  	/* DMA work check */
7a1cd9ad8   Guennadi Liakhovetski   dma: shdma: trans...
802
  	if (dmae_is_busy(sh_chan))
b4dae6e1a   Guennadi Liakhovetski   dmaengine: shdma:...
803
  		return;
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
804

5a3a76588   Justin P. Mattock   Comment typo fixe...
805
  	/* Find the first not transferred descriptor */
47a4dc26e   Guennadi Liakhovetski   dmaengine: shdma:...
806
807
  	list_for_each_entry(desc, &sh_chan->ld_queue, node)
  		if (desc->mark == DESC_SUBMITTED) {
c014906a8   Guennadi Liakhovetski   dmaengine: shdma:...
808
809
810
811
  			dev_dbg(sh_chan->dev, "Queue #%d to %d: %u@%x -> %x
  ",
  				desc->async_tx.cookie, sh_chan->id,
  				desc->hw.tcr, desc->hw.sar, desc->hw.dar);
3542a113a   Guennadi Liakhovetski   sh: fix DMA drive...
812
  			/* Get the ld start address from ld_queue */
47a4dc26e   Guennadi Liakhovetski   dmaengine: shdma:...
813
  			dmae_set_reg(sh_chan, &desc->hw);
3542a113a   Guennadi Liakhovetski   sh: fix DMA drive...
814
815
816
  			dmae_start(sh_chan);
  			break;
  		}
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
817
818
819
820
821
  }
  
  static void sh_dmae_memcpy_issue_pending(struct dma_chan *chan)
  {
  	struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
7a1cd9ad8   Guennadi Liakhovetski   dma: shdma: trans...
822
823
824
825
826
827
828
  
  	spin_lock_irq(&sh_chan->desc_lock);
  	if (sh_chan->pm_state == DMAE_PM_ESTABLISHED)
  		sh_chan_xfer_ld_queue(sh_chan);
  	else
  		sh_chan->pm_state = DMAE_PM_PENDING;
  	spin_unlock_irq(&sh_chan->desc_lock);
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
829
  }
079344818   Linus Walleij   DMAENGINE: generi...
830
  static enum dma_status sh_dmae_tx_status(struct dma_chan *chan,
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
831
  					dma_cookie_t cookie,
079344818   Linus Walleij   DMAENGINE: generi...
832
  					struct dma_tx_state *txstate)
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
833
834
835
836
  {
  	struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
  	dma_cookie_t last_used;
  	dma_cookie_t last_complete;
47a4dc26e   Guennadi Liakhovetski   dmaengine: shdma:...
837
  	enum dma_status status;
b4dae6e1a   Guennadi Liakhovetski   dmaengine: shdma:...
838
  	unsigned long flags;
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
839

3542a113a   Guennadi Liakhovetski   sh: fix DMA drive...
840
  	sh_dmae_chan_ld_cleanup(sh_chan, false);
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
841

2dc666673   Guennadi Liakhovetski   dmaengine: shdma:...
842
  	/* First read completed cookie to avoid a skew */
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
843
  	last_complete = sh_chan->completed_cookie;
2dc666673   Guennadi Liakhovetski   dmaengine: shdma:...
844
845
  	rmb();
  	last_used = chan->cookie;
3542a113a   Guennadi Liakhovetski   sh: fix DMA drive...
846
  	BUG_ON(last_complete < 0);
bca346920   Dan Williams   dmaengine: provid...
847
  	dma_set_tx_state(txstate, last_complete, last_used, 0);
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
848

b4dae6e1a   Guennadi Liakhovetski   dmaengine: shdma:...
849
  	spin_lock_irqsave(&sh_chan->desc_lock, flags);
47a4dc26e   Guennadi Liakhovetski   dmaengine: shdma:...
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
  
  	status = dma_async_is_complete(cookie, last_complete, last_used);
  
  	/*
  	 * If we don't find cookie on the queue, it has been aborted and we have
  	 * to report error
  	 */
  	if (status != DMA_SUCCESS) {
  		struct sh_desc *desc;
  		status = DMA_ERROR;
  		list_for_each_entry(desc, &sh_chan->ld_queue, node)
  			if (desc->cookie == cookie) {
  				status = DMA_IN_PROGRESS;
  				break;
  			}
  	}
b4dae6e1a   Guennadi Liakhovetski   dmaengine: shdma:...
866
  	spin_unlock_irqrestore(&sh_chan->desc_lock, flags);
47a4dc26e   Guennadi Liakhovetski   dmaengine: shdma:...
867
868
  
  	return status;
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
869
870
871
872
873
  }
  
  static irqreturn_t sh_dmae_interrupt(int irq, void *data)
  {
  	irqreturn_t ret = IRQ_NONE;
2dc666673   Guennadi Liakhovetski   dmaengine: shdma:...
874
875
876
877
  	struct sh_dmae_chan *sh_chan = data;
  	u32 chcr;
  
  	spin_lock(&sh_chan->desc_lock);
5899a723b   Kuninori Morimoto   dmaengine: shdma:...
878
  	chcr = chcr_read(sh_chan);
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
879
880
881
882
883
884
885
886
  
  	if (chcr & CHCR_TE) {
  		/* DMA stop */
  		dmae_halt(sh_chan);
  
  		ret = IRQ_HANDLED;
  		tasklet_schedule(&sh_chan->tasklet);
  	}
2dc666673   Guennadi Liakhovetski   dmaengine: shdma:...
887
  	spin_unlock(&sh_chan->desc_lock);
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
888
889
  	return ret;
  }
2dc666673   Guennadi Liakhovetski   dmaengine: shdma:...
890
891
  /* Called from error IRQ or NMI */
  static bool sh_dmae_reset(struct sh_dmae_device *shdev)
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
892
  {
03aa18f55   Paul Mundt   dma: shdma: NMI s...
893
  	unsigned int handled = 0;
47a4dc26e   Guennadi Liakhovetski   dmaengine: shdma:...
894
  	int i;
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
895

47a4dc26e   Guennadi Liakhovetski   dmaengine: shdma:...
896
  	/* halt the dma controller */
027811b9b   Guennadi Liakhovetski   dmaengine: shdma:...
897
  	sh_dmae_ctl_stop(shdev);
47a4dc26e   Guennadi Liakhovetski   dmaengine: shdma:...
898
899
  
  	/* We cannot detect, which channel caused the error, have to reset all */
8b1935e6a   Guennadi Liakhovetski   dmaengine: shdma:...
900
  	for (i = 0; i < SH_DMAC_MAX_CHANNELS; i++) {
47a4dc26e   Guennadi Liakhovetski   dmaengine: shdma:...
901
  		struct sh_dmae_chan *sh_chan = shdev->chan[i];
03aa18f55   Paul Mundt   dma: shdma: NMI s...
902
  		struct sh_desc *desc;
2dc666673   Guennadi Liakhovetski   dmaengine: shdma:...
903
  		LIST_HEAD(dl);
03aa18f55   Paul Mundt   dma: shdma: NMI s...
904
905
906
  
  		if (!sh_chan)
  			continue;
2dc666673   Guennadi Liakhovetski   dmaengine: shdma:...
907
  		spin_lock(&sh_chan->desc_lock);
03aa18f55   Paul Mundt   dma: shdma: NMI s...
908
909
  		/* Stop the channel */
  		dmae_halt(sh_chan);
2dc666673   Guennadi Liakhovetski   dmaengine: shdma:...
910
  		list_splice_init(&sh_chan->ld_queue, &dl);
7a1cd9ad8   Guennadi Liakhovetski   dma: shdma: trans...
911
912
913
914
915
916
  		if (!list_empty(&dl)) {
  			dev_dbg(sh_chan->dev, "Bring down channel %d
  ", sh_chan->id);
  			pm_runtime_put(sh_chan->dev);
  		}
  		sh_chan->pm_state = DMAE_PM_ESTABLISHED;
2dc666673   Guennadi Liakhovetski   dmaengine: shdma:...
917
  		spin_unlock(&sh_chan->desc_lock);
03aa18f55   Paul Mundt   dma: shdma: NMI s...
918
  		/* Complete all  */
2dc666673   Guennadi Liakhovetski   dmaengine: shdma:...
919
  		list_for_each_entry(desc, &dl, node) {
03aa18f55   Paul Mundt   dma: shdma: NMI s...
920
921
922
923
  			struct dma_async_tx_descriptor *tx = &desc->async_tx;
  			desc->mark = DESC_IDLE;
  			if (tx->callback)
  				tx->callback(tx->callback_param);
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
924
  		}
03aa18f55   Paul Mundt   dma: shdma: NMI s...
925

2dc666673   Guennadi Liakhovetski   dmaengine: shdma:...
926
927
928
  		spin_lock(&sh_chan->desc_lock);
  		list_splice(&dl, &sh_chan->ld_free);
  		spin_unlock(&sh_chan->desc_lock);
03aa18f55   Paul Mundt   dma: shdma: NMI s...
929
  		handled++;
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
930
  	}
03aa18f55   Paul Mundt   dma: shdma: NMI s...
931

027811b9b   Guennadi Liakhovetski   dmaengine: shdma:...
932
  	sh_dmae_rst(shdev);
47a4dc26e   Guennadi Liakhovetski   dmaengine: shdma:...
933

03aa18f55   Paul Mundt   dma: shdma: NMI s...
934
935
936
937
938
  	return !!handled;
  }
  
  static irqreturn_t sh_dmae_err(int irq, void *data)
  {
ff7690b48   Yoshihiro Shimoda   dma: shdma: add c...
939
  	struct sh_dmae_device *shdev = data;
2dc666673   Guennadi Liakhovetski   dmaengine: shdma:...
940
  	if (!(dmaor_read(shdev) & DMAOR_AE))
ff7690b48   Yoshihiro Shimoda   dma: shdma: add c...
941
  		return IRQ_NONE;
2dc666673   Guennadi Liakhovetski   dmaengine: shdma:...
942
943
944
  
  	sh_dmae_reset(data);
  	return IRQ_HANDLED;
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
945
  }
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
946
947
948
949
  
  static void dmae_do_tasklet(unsigned long data)
  {
  	struct sh_dmae_chan *sh_chan = (struct sh_dmae_chan *)data;
3542a113a   Guennadi Liakhovetski   sh: fix DMA drive...
950
  	struct sh_desc *desc;
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
951
  	u32 sar_buf = sh_dmae_readl(sh_chan, SAR);
cfefe9979   Guennadi Liakhovetski   sh: implement DMA...
952
  	u32 dar_buf = sh_dmae_readl(sh_chan, DAR);
86d61b33e   Guennadi Liakhovetski   sh: stylistic imp...
953

b4dae6e1a   Guennadi Liakhovetski   dmaengine: shdma:...
954
  	spin_lock_irq(&sh_chan->desc_lock);
3542a113a   Guennadi Liakhovetski   sh: fix DMA drive...
955
  	list_for_each_entry(desc, &sh_chan->ld_queue, node) {
cfefe9979   Guennadi Liakhovetski   sh: implement DMA...
956
957
958
959
  		if (desc->mark == DESC_SUBMITTED &&
  		    ((desc->direction == DMA_FROM_DEVICE &&
  		      (desc->hw.dar + desc->hw.tcr) == dar_buf) ||
  		     (desc->hw.sar + desc->hw.tcr) == sar_buf)) {
3542a113a   Guennadi Liakhovetski   sh: fix DMA drive...
960
961
962
963
964
  			dev_dbg(sh_chan->dev, "done #%d@%p dst %u
  ",
  				desc->async_tx.cookie, &desc->async_tx,
  				desc->hw.dar);
  			desc->mark = DESC_COMPLETED;
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
965
966
967
  			break;
  		}
  	}
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
968
969
  	/* Next desc */
  	sh_chan_xfer_ld_queue(sh_chan);
7a1cd9ad8   Guennadi Liakhovetski   dma: shdma: trans...
970
  	spin_unlock_irq(&sh_chan->desc_lock);
3542a113a   Guennadi Liakhovetski   sh: fix DMA drive...
971
  	sh_dmae_chan_ld_cleanup(sh_chan, false);
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
972
  }
03aa18f55   Paul Mundt   dma: shdma: NMI s...
973
974
  static bool sh_dmae_nmi_notify(struct sh_dmae_device *shdev)
  {
03aa18f55   Paul Mundt   dma: shdma: NMI s...
975
976
977
  	/* Fast path out if NMIF is not asserted for this controller */
  	if ((dmaor_read(shdev) & DMAOR_NMIF) == 0)
  		return false;
2dc666673   Guennadi Liakhovetski   dmaengine: shdma:...
978
  	return sh_dmae_reset(shdev);
03aa18f55   Paul Mundt   dma: shdma: NMI s...
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
  }
  
  static int sh_dmae_nmi_handler(struct notifier_block *self,
  			       unsigned long cmd, void *data)
  {
  	struct sh_dmae_device *shdev;
  	int ret = NOTIFY_DONE;
  	bool triggered;
  
  	/*
  	 * Only concern ourselves with NMI events.
  	 *
  	 * Normally we would check the die chain value, but as this needs
  	 * to be architecture independent, check for NMI context instead.
  	 */
  	if (!in_nmi())
  		return NOTIFY_DONE;
  
  	rcu_read_lock();
  	list_for_each_entry_rcu(shdev, &sh_dmae_devices, node) {
  		/*
  		 * Only stop if one of the controllers has NMIF asserted,
  		 * we do not want to interfere with regular address error
  		 * handling or NMI events that don't concern the DMACs.
  		 */
  		triggered = sh_dmae_nmi_notify(shdev);
  		if (triggered == true)
  			ret = NOTIFY_OK;
  	}
  	rcu_read_unlock();
  
  	return ret;
  }
  
  static struct notifier_block sh_dmae_nmi_notifier __read_mostly = {
  	.notifier_call	= sh_dmae_nmi_handler,
  
  	/* Run before NMI debug handler and KGDB */
  	.priority	= 1,
  };
027811b9b   Guennadi Liakhovetski   dmaengine: shdma:...
1019
1020
  static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id,
  					int irq, unsigned long flags)
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
1021
1022
  {
  	int err;
5bac942db   Guennadi Liakhovetski   SH: constify mult...
1023
  	const struct sh_dmae_channel *chan_pdata = &shdev->pdata->channel[id];
027811b9b   Guennadi Liakhovetski   dmaengine: shdma:...
1024
  	struct platform_device *pdev = to_platform_device(shdev->common.dev);
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
1025
1026
1027
1028
1029
  	struct sh_dmae_chan *new_sh_chan;
  
  	/* alloc channel */
  	new_sh_chan = kzalloc(sizeof(struct sh_dmae_chan), GFP_KERNEL);
  	if (!new_sh_chan) {
86d61b33e   Guennadi Liakhovetski   sh: stylistic imp...
1030
1031
1032
  		dev_err(shdev->common.dev,
  			"No free memory for allocating dma channels!
  ");
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
1033
1034
  		return -ENOMEM;
  	}
7a1cd9ad8   Guennadi Liakhovetski   dma: shdma: trans...
1035
1036
1037
  	new_sh_chan->pm_state = DMAE_PM_ESTABLISHED;
  
  	/* reference struct dma_device */
8b1935e6a   Guennadi Liakhovetski   dmaengine: shdma:...
1038
  	new_sh_chan->common.device = &shdev->common;
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
1039
1040
  	new_sh_chan->dev = shdev->common.dev;
  	new_sh_chan->id = id;
027811b9b   Guennadi Liakhovetski   dmaengine: shdma:...
1041
1042
  	new_sh_chan->irq = irq;
  	new_sh_chan->base = shdev->chan_reg + chan_pdata->offset / sizeof(u32);
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
1043
1044
1045
1046
  
  	/* Init DMA tasklet */
  	tasklet_init(&new_sh_chan->tasklet, dmae_do_tasklet,
  			(unsigned long)new_sh_chan);
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
1047
1048
1049
1050
1051
  	spin_lock_init(&new_sh_chan->desc_lock);
  
  	/* Init descripter manage list */
  	INIT_LIST_HEAD(&new_sh_chan->ld_queue);
  	INIT_LIST_HEAD(&new_sh_chan->ld_free);
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
1052
1053
1054
1055
  	/* Add the channel to DMA device channel list */
  	list_add_tail(&new_sh_chan->common.device_node,
  			&shdev->common.channels);
  	shdev->common.chancnt++;
027811b9b   Guennadi Liakhovetski   dmaengine: shdma:...
1056
1057
1058
1059
1060
1061
  	if (pdev->id >= 0)
  		snprintf(new_sh_chan->dev_id, sizeof(new_sh_chan->dev_id),
  			 "sh-dmae%d.%d", pdev->id, new_sh_chan->id);
  	else
  		snprintf(new_sh_chan->dev_id, sizeof(new_sh_chan->dev_id),
  			 "sh-dma%d", new_sh_chan->id);
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
1062
1063
  
  	/* set up channel irq */
027811b9b   Guennadi Liakhovetski   dmaengine: shdma:...
1064
  	err = request_irq(irq, &sh_dmae_interrupt, flags,
86d61b33e   Guennadi Liakhovetski   sh: stylistic imp...
1065
  			  new_sh_chan->dev_id, new_sh_chan);
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
1066
1067
1068
1069
1070
1071
  	if (err) {
  		dev_err(shdev->common.dev, "DMA channel %d request_irq error "
  			"with return %d
  ", id, err);
  		goto err_no_irq;
  	}
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
  	shdev->chan[id] = new_sh_chan;
  	return 0;
  
  err_no_irq:
  	/* remove from dmaengine device node */
  	list_del(&new_sh_chan->common.device_node);
  	kfree(new_sh_chan);
  	return err;
  }
  
  static void sh_dmae_chan_remove(struct sh_dmae_device *shdev)
  {
  	int i;
  
  	for (i = shdev->common.chancnt - 1 ; i >= 0 ; i--) {
  		if (shdev->chan[i]) {
027811b9b   Guennadi Liakhovetski   dmaengine: shdma:...
1088
1089
1090
  			struct sh_dmae_chan *sh_chan = shdev->chan[i];
  
  			free_irq(sh_chan->irq, sh_chan);
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
1091

027811b9b   Guennadi Liakhovetski   dmaengine: shdma:...
1092
1093
  			list_del(&sh_chan->common.device_node);
  			kfree(sh_chan);
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
1094
1095
1096
1097
1098
1099
1100
1101
  			shdev->chan[i] = NULL;
  		}
  	}
  	shdev->common.chancnt = 0;
  }
  
  static int __init sh_dmae_probe(struct platform_device *pdev)
  {
027811b9b   Guennadi Liakhovetski   dmaengine: shdma:...
1102
1103
  	struct sh_dmae_pdata *pdata = pdev->dev.platform_data;
  	unsigned long irqflags = IRQF_DISABLED,
8b1935e6a   Guennadi Liakhovetski   dmaengine: shdma:...
1104
1105
  		chan_flag[SH_DMAC_MAX_CHANNELS] = {};
  	int errirq, chan_irq[SH_DMAC_MAX_CHANNELS];
300e5f97d   Magnus Damm   dmaengine: shdma:...
1106
  	int err, i, irq_cnt = 0, irqres = 0, irq_cap = 0;
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
1107
  	struct sh_dmae_device *shdev;
027811b9b   Guennadi Liakhovetski   dmaengine: shdma:...
1108
  	struct resource *chan, *dmars, *errirq_res, *chanirq_res;
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
1109

56adf7e81   Dan Williams   shdma: fix initia...
1110
  	/* get platform data */
027811b9b   Guennadi Liakhovetski   dmaengine: shdma:...
1111
  	if (!pdata || !pdata->channel_num)
56adf7e81   Dan Williams   shdma: fix initia...
1112
  		return -ENODEV;
027811b9b   Guennadi Liakhovetski   dmaengine: shdma:...
1113
  	chan = platform_get_resource(pdev, IORESOURCE_MEM, 0);
26fc02ab5   Magnus Damm   dmaengine: shdma:...
1114
  	/* DMARS area is optional */
027811b9b   Guennadi Liakhovetski   dmaengine: shdma:...
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
  	dmars = platform_get_resource(pdev, IORESOURCE_MEM, 1);
  	/*
  	 * IRQ resources:
  	 * 1. there always must be at least one IRQ IO-resource. On SH4 it is
  	 *    the error IRQ, in which case it is the only IRQ in this resource:
  	 *    start == end. If it is the only IRQ resource, all channels also
  	 *    use the same IRQ.
  	 * 2. DMA channel IRQ resources can be specified one per resource or in
  	 *    ranges (start != end)
  	 * 3. iff all events (channels and, optionally, error) on this
  	 *    controller use the same IRQ, only one IRQ resource can be
  	 *    specified, otherwise there must be one IRQ per channel, even if
  	 *    some of them are equal
  	 * 4. if all IRQs on this controller are equal or if some specific IRQs
  	 *    specify IORESOURCE_IRQ_SHAREABLE in their resources, they will be
  	 *    requested with the IRQF_SHARED flag
  	 */
  	errirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
  	if (!chan || !errirq_res)
  		return -ENODEV;
  
  	if (!request_mem_region(chan->start, resource_size(chan), pdev->name)) {
  		dev_err(&pdev->dev, "DMAC register region already claimed
  ");
  		return -EBUSY;
  	}
  
  	if (dmars && !request_mem_region(dmars->start, resource_size(dmars), pdev->name)) {
  		dev_err(&pdev->dev, "DMAC DMARS region already claimed
  ");
  		err = -EBUSY;
  		goto ermrdmars;
  	}
  
  	err = -ENOMEM;
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
1150
1151
  	shdev = kzalloc(sizeof(struct sh_dmae_device), GFP_KERNEL);
  	if (!shdev) {
027811b9b   Guennadi Liakhovetski   dmaengine: shdma:...
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
  		dev_err(&pdev->dev, "Not enough memory
  ");
  		goto ealloc;
  	}
  
  	shdev->chan_reg = ioremap(chan->start, resource_size(chan));
  	if (!shdev->chan_reg)
  		goto emapchan;
  	if (dmars) {
  		shdev->dmars = ioremap(dmars->start, resource_size(dmars));
  		if (!shdev->dmars)
  			goto emapdmars;
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
1164
  	}
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
1165
  	/* platform data */
027811b9b   Guennadi Liakhovetski   dmaengine: shdma:...
1166
  	shdev->pdata = pdata;
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
1167

5899a723b   Kuninori Morimoto   dmaengine: shdma:...
1168
1169
1170
1171
  	if (pdata->chcr_offset)
  		shdev->chcr_offset = pdata->chcr_offset;
  	else
  		shdev->chcr_offset = CHCR;
67c6269e5   Kuninori Morimoto   dmaengine: shdma:...
1172
1173
1174
1175
  	if (pdata->chcr_ie_bit)
  		shdev->chcr_ie_bit = pdata->chcr_ie_bit;
  	else
  		shdev->chcr_ie_bit = CHCR_IE;
5c2de4441   Paul Mundt   dmaengine: shdma:...
1176
  	platform_set_drvdata(pdev, shdev);
20f2a3b5d   Guennadi Liakhovetski   dmaengine: shdma:...
1177
1178
  	pm_runtime_enable(&pdev->dev);
  	pm_runtime_get_sync(&pdev->dev);
31705e21f   Guennadi Liakhovetski   dmaengine: shdma:...
1179
  	spin_lock_irq(&sh_dmae_lock);
03aa18f55   Paul Mundt   dma: shdma: NMI s...
1180
  	list_add_tail_rcu(&shdev->node, &sh_dmae_devices);
31705e21f   Guennadi Liakhovetski   dmaengine: shdma:...
1181
  	spin_unlock_irq(&sh_dmae_lock);
03aa18f55   Paul Mundt   dma: shdma: NMI s...
1182

2dc666673   Guennadi Liakhovetski   dmaengine: shdma:...
1183
  	/* reset dma controller - only needed as a test */
027811b9b   Guennadi Liakhovetski   dmaengine: shdma:...
1184
  	err = sh_dmae_rst(shdev);
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
1185
1186
  	if (err)
  		goto rst_err;
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
1187
1188
1189
  	INIT_LIST_HEAD(&shdev->common.channels);
  
  	dma_cap_set(DMA_MEMCPY, shdev->common.cap_mask);
26fc02ab5   Magnus Damm   dmaengine: shdma:...
1190
  	if (pdata->slave && pdata->slave_num)
027811b9b   Guennadi Liakhovetski   dmaengine: shdma:...
1191
  		dma_cap_set(DMA_SLAVE, shdev->common.cap_mask);
cfefe9979   Guennadi Liakhovetski   sh: implement DMA...
1192

d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
1193
1194
1195
1196
  	shdev->common.device_alloc_chan_resources
  		= sh_dmae_alloc_chan_resources;
  	shdev->common.device_free_chan_resources = sh_dmae_free_chan_resources;
  	shdev->common.device_prep_dma_memcpy = sh_dmae_prep_memcpy;
079344818   Linus Walleij   DMAENGINE: generi...
1197
  	shdev->common.device_tx_status = sh_dmae_tx_status;
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
1198
  	shdev->common.device_issue_pending = sh_dmae_memcpy_issue_pending;
cfefe9979   Guennadi Liakhovetski   sh: implement DMA...
1199
1200
1201
  
  	/* Compulsory for DMA_SLAVE fields */
  	shdev->common.device_prep_slave_sg = sh_dmae_prep_slave_sg;
c3635c78e   Linus Walleij   DMAENGINE: generi...
1202
  	shdev->common.device_control = sh_dmae_control;
cfefe9979   Guennadi Liakhovetski   sh: implement DMA...
1203

d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
1204
  	shdev->common.dev = &pdev->dev;
ddb4f0f0e   Guennadi Liakhovetski   sh: DMA driver ha...
1205
  	/* Default transfer size of 32 bytes requires 32-byte alignment */
8b1935e6a   Guennadi Liakhovetski   dmaengine: shdma:...
1206
  	shdev->common.copy_align = LOG2_DEFAULT_XFER_SIZE;
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
1207

927a7c9c1   Magnus Damm   dmaengine: shdma:...
1208
  #if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
027811b9b   Guennadi Liakhovetski   dmaengine: shdma:...
1209
1210
1211
1212
1213
1214
1215
1216
1217
  	chanirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
  
  	if (!chanirq_res)
  		chanirq_res = errirq_res;
  	else
  		irqres++;
  
  	if (chanirq_res == errirq_res ||
  	    (errirq_res->flags & IORESOURCE_BITS) == IORESOURCE_IRQ_SHAREABLE)
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
1218
  		irqflags = IRQF_SHARED;
027811b9b   Guennadi Liakhovetski   dmaengine: shdma:...
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
  
  	errirq = errirq_res->start;
  
  	err = request_irq(errirq, sh_dmae_err, irqflags,
  			  "DMAC Address Error", shdev);
  	if (err) {
  		dev_err(&pdev->dev,
  			"DMA failed requesting irq #%d, error %d
  ",
  			errirq, err);
  		goto eirq_err;
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
1230
  	}
027811b9b   Guennadi Liakhovetski   dmaengine: shdma:...
1231
1232
  #else
  	chanirq_res = errirq_res;
927a7c9c1   Magnus Damm   dmaengine: shdma:...
1233
  #endif /* CONFIG_CPU_SH4 || CONFIG_ARCH_SHMOBILE */
027811b9b   Guennadi Liakhovetski   dmaengine: shdma:...
1234
1235
1236
1237
1238
  
  	if (chanirq_res->start == chanirq_res->end &&
  	    !platform_get_resource(pdev, IORESOURCE_IRQ, 1)) {
  		/* Special case - all multiplexed */
  		for (; irq_cnt < pdata->channel_num; irq_cnt++) {
300e5f97d   Magnus Damm   dmaengine: shdma:...
1239
1240
1241
1242
1243
1244
1245
  			if (irq_cnt < SH_DMAC_MAX_CHANNELS) {
  				chan_irq[irq_cnt] = chanirq_res->start;
  				chan_flag[irq_cnt] = IRQF_SHARED;
  			} else {
  				irq_cap = 1;
  				break;
  			}
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
1246
  		}
027811b9b   Guennadi Liakhovetski   dmaengine: shdma:...
1247
1248
1249
  	} else {
  		do {
  			for (i = chanirq_res->start; i <= chanirq_res->end; i++) {
dcee0bb71   Magnus Damm   dmaengine: shdma:...
1250
1251
1252
1253
  				if (irq_cnt >= SH_DMAC_MAX_CHANNELS) {
  					irq_cap = 1;
  					break;
  				}
027811b9b   Guennadi Liakhovetski   dmaengine: shdma:...
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
  				if ((errirq_res->flags & IORESOURCE_BITS) ==
  				    IORESOURCE_IRQ_SHAREABLE)
  					chan_flag[irq_cnt] = IRQF_SHARED;
  				else
  					chan_flag[irq_cnt] = IRQF_DISABLED;
  				dev_dbg(&pdev->dev,
  					"Found IRQ %d for channel %d
  ",
  					i, irq_cnt);
  				chan_irq[irq_cnt++] = i;
300e5f97d   Magnus Damm   dmaengine: shdma:...
1264
  			}
dcee0bb71   Magnus Damm   dmaengine: shdma:...
1265
  			if (irq_cnt >= SH_DMAC_MAX_CHANNELS)
300e5f97d   Magnus Damm   dmaengine: shdma:...
1266
  				break;
dcee0bb71   Magnus Damm   dmaengine: shdma:...
1267

027811b9b   Guennadi Liakhovetski   dmaengine: shdma:...
1268
1269
1270
  			chanirq_res = platform_get_resource(pdev,
  						IORESOURCE_IRQ, ++irqres);
  		} while (irq_cnt < pdata->channel_num && chanirq_res);
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
1271
  	}
027811b9b   Guennadi Liakhovetski   dmaengine: shdma:...
1272

d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
1273
  	/* Create DMA Channel */
300e5f97d   Magnus Damm   dmaengine: shdma:...
1274
  	for (i = 0; i < irq_cnt; i++) {
027811b9b   Guennadi Liakhovetski   dmaengine: shdma:...
1275
  		err = sh_dmae_chan_probe(shdev, i, chan_irq[i], chan_flag[i]);
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
1276
1277
1278
  		if (err)
  			goto chan_probe_err;
  	}
300e5f97d   Magnus Damm   dmaengine: shdma:...
1279
1280
1281
1282
1283
  	if (irq_cap)
  		dev_notice(&pdev->dev, "Attempting to register %d DMA "
  			   "channels when a maximum of %d are supported.
  ",
  			   pdata->channel_num, SH_DMAC_MAX_CHANNELS);
20f2a3b5d   Guennadi Liakhovetski   dmaengine: shdma:...
1284
  	pm_runtime_put(&pdev->dev);
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
1285
1286
1287
1288
1289
1290
  	dma_async_device_register(&shdev->common);
  
  	return err;
  
  chan_probe_err:
  	sh_dmae_chan_remove(shdev);
300e5f97d   Magnus Damm   dmaengine: shdma:...
1291

927a7c9c1   Magnus Damm   dmaengine: shdma:...
1292
  #if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
027811b9b   Guennadi Liakhovetski   dmaengine: shdma:...
1293
  	free_irq(errirq, shdev);
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
1294
  eirq_err:
027811b9b   Guennadi Liakhovetski   dmaengine: shdma:...
1295
  #endif
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
1296
  rst_err:
31705e21f   Guennadi Liakhovetski   dmaengine: shdma:...
1297
  	spin_lock_irq(&sh_dmae_lock);
03aa18f55   Paul Mundt   dma: shdma: NMI s...
1298
  	list_del_rcu(&shdev->node);
31705e21f   Guennadi Liakhovetski   dmaengine: shdma:...
1299
  	spin_unlock_irq(&sh_dmae_lock);
03aa18f55   Paul Mundt   dma: shdma: NMI s...
1300

20f2a3b5d   Guennadi Liakhovetski   dmaengine: shdma:...
1301
  	pm_runtime_put(&pdev->dev);
467017b83   Guennadi Liakhovetski   dmaengine: shdma:...
1302
  	pm_runtime_disable(&pdev->dev);
027811b9b   Guennadi Liakhovetski   dmaengine: shdma:...
1303
1304
  	if (dmars)
  		iounmap(shdev->dmars);
5c2de4441   Paul Mundt   dmaengine: shdma:...
1305
1306
  
  	platform_set_drvdata(pdev, NULL);
027811b9b   Guennadi Liakhovetski   dmaengine: shdma:...
1307
1308
  emapdmars:
  	iounmap(shdev->chan_reg);
31705e21f   Guennadi Liakhovetski   dmaengine: shdma:...
1309
  	synchronize_rcu();
027811b9b   Guennadi Liakhovetski   dmaengine: shdma:...
1310
  emapchan:
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
1311
  	kfree(shdev);
027811b9b   Guennadi Liakhovetski   dmaengine: shdma:...
1312
1313
1314
1315
1316
  ealloc:
  	if (dmars)
  		release_mem_region(dmars->start, resource_size(dmars));
  ermrdmars:
  	release_mem_region(chan->start, resource_size(chan));
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
1317

d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
1318
1319
1320
1321
1322
1323
  	return err;
  }
  
  static int __exit sh_dmae_remove(struct platform_device *pdev)
  {
  	struct sh_dmae_device *shdev = platform_get_drvdata(pdev);
027811b9b   Guennadi Liakhovetski   dmaengine: shdma:...
1324
1325
  	struct resource *res;
  	int errirq = platform_get_irq(pdev, 0);
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
1326
1327
  
  	dma_async_device_unregister(&shdev->common);
027811b9b   Guennadi Liakhovetski   dmaengine: shdma:...
1328
1329
  	if (errirq > 0)
  		free_irq(errirq, shdev);
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
1330

31705e21f   Guennadi Liakhovetski   dmaengine: shdma:...
1331
  	spin_lock_irq(&sh_dmae_lock);
03aa18f55   Paul Mundt   dma: shdma: NMI s...
1332
  	list_del_rcu(&shdev->node);
31705e21f   Guennadi Liakhovetski   dmaengine: shdma:...
1333
  	spin_unlock_irq(&sh_dmae_lock);
03aa18f55   Paul Mundt   dma: shdma: NMI s...
1334

d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
1335
1336
  	/* channel data remove */
  	sh_dmae_chan_remove(shdev);
20f2a3b5d   Guennadi Liakhovetski   dmaengine: shdma:...
1337
  	pm_runtime_disable(&pdev->dev);
027811b9b   Guennadi Liakhovetski   dmaengine: shdma:...
1338
1339
1340
  	if (shdev->dmars)
  		iounmap(shdev->dmars);
  	iounmap(shdev->chan_reg);
5c2de4441   Paul Mundt   dmaengine: shdma:...
1341
  	platform_set_drvdata(pdev, NULL);
31705e21f   Guennadi Liakhovetski   dmaengine: shdma:...
1342
  	synchronize_rcu();
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
1343
  	kfree(shdev);
027811b9b   Guennadi Liakhovetski   dmaengine: shdma:...
1344
1345
1346
1347
1348
1349
  	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  	if (res)
  		release_mem_region(res->start, resource_size(res));
  	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
  	if (res)
  		release_mem_region(res->start, resource_size(res));
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
1350
1351
1352
1353
1354
1355
  	return 0;
  }
  
  static void sh_dmae_shutdown(struct platform_device *pdev)
  {
  	struct sh_dmae_device *shdev = platform_get_drvdata(pdev);
027811b9b   Guennadi Liakhovetski   dmaengine: shdma:...
1356
  	sh_dmae_ctl_stop(shdev);
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
1357
  }
467017b83   Guennadi Liakhovetski   dmaengine: shdma:...
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
  static int sh_dmae_runtime_suspend(struct device *dev)
  {
  	return 0;
  }
  
  static int sh_dmae_runtime_resume(struct device *dev)
  {
  	struct sh_dmae_device *shdev = dev_get_drvdata(dev);
  
  	return sh_dmae_rst(shdev);
  }
  
  #ifdef CONFIG_PM
  static int sh_dmae_suspend(struct device *dev)
  {
  	struct sh_dmae_device *shdev = dev_get_drvdata(dev);
  	int i;
  
  	for (i = 0; i < shdev->pdata->channel_num; i++) {
  		struct sh_dmae_chan *sh_chan = shdev->chan[i];
  		if (sh_chan->descs_allocated)
  			sh_chan->pm_error = pm_runtime_put_sync(dev);
  	}
  
  	return 0;
  }
  
  static int sh_dmae_resume(struct device *dev)
  {
  	struct sh_dmae_device *shdev = dev_get_drvdata(dev);
  	int i;
  
  	for (i = 0; i < shdev->pdata->channel_num; i++) {
  		struct sh_dmae_chan *sh_chan = shdev->chan[i];
  		struct sh_dmae_slave *param = sh_chan->common.private;
  
  		if (!sh_chan->descs_allocated)
  			continue;
  
  		if (!sh_chan->pm_error)
  			pm_runtime_get_sync(dev);
  
  		if (param) {
  			const struct sh_dmae_slave_config *cfg = param->config;
  			dmae_set_dmars(sh_chan, cfg->mid_rid);
  			dmae_set_chcr(sh_chan, cfg->chcr);
  		} else {
  			dmae_init(sh_chan);
  		}
  	}
  
  	return 0;
  }
  #else
  #define sh_dmae_suspend NULL
  #define sh_dmae_resume NULL
  #endif
  
  const struct dev_pm_ops sh_dmae_pm = {
  	.suspend		= sh_dmae_suspend,
  	.resume			= sh_dmae_resume,
  	.runtime_suspend	= sh_dmae_runtime_suspend,
  	.runtime_resume		= sh_dmae_runtime_resume,
  };
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
1422
1423
1424
1425
  static struct platform_driver sh_dmae_driver = {
  	.remove		= __exit_p(sh_dmae_remove),
  	.shutdown	= sh_dmae_shutdown,
  	.driver = {
7a5c106a0   Guennadi Liakhovetski   sh: prevent the D...
1426
  		.owner	= THIS_MODULE,
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
1427
  		.name	= "sh-dma-engine",
467017b83   Guennadi Liakhovetski   dmaengine: shdma:...
1428
  		.pm	= &sh_dmae_pm,
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
1429
1430
1431
1432
1433
  	},
  };
  
  static int __init sh_dmae_init(void)
  {
661382fe1   Guennadi Liakhovetski   dma: shdma: don't...
1434
1435
1436
1437
  	/* Wire up NMI handling */
  	int err = register_die_notifier(&sh_dmae_nmi_notifier);
  	if (err)
  		return err;
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
1438
1439
1440
1441
1442
1443
1444
  	return platform_driver_probe(&sh_dmae_driver, sh_dmae_probe);
  }
  module_init(sh_dmae_init);
  
  static void __exit sh_dmae_exit(void)
  {
  	platform_driver_unregister(&sh_dmae_driver);
661382fe1   Guennadi Liakhovetski   dma: shdma: don't...
1445
1446
  
  	unregister_die_notifier(&sh_dmae_nmi_notifier);
d8902adcc   Nobuhiro Iwamatsu   dmaengine: sh: Ad...
1447
1448
1449
1450
1451
1452
  }
  module_exit(sh_dmae_exit);
  
  MODULE_AUTHOR("Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>");
  MODULE_DESCRIPTION("Renesas SH DMA Engine driver");
  MODULE_LICENSE("GPL");
e5843341e   Guennadi Liakhovetski   dma: shdma: add a...
1453
  MODULE_ALIAS("platform:sh-dma-engine");