Blame view

drivers/dma/pch_dma.c 25.1 KB
1802d0bee   Thomas Gleixner   treewide: Replace...
1
  // SPDX-License-Identifier: GPL-2.0-only
0c42bd0e4   Yong Wang   dmaengine: Driver...
2
3
4
  /*
   * Topcliff PCH DMA controller driver
   * Copyright (c) 2010 Intel Corporation
e79e72be2   Tomoya MORINAGA   pch_dma: Change c...
5
   * Copyright (C) 2011 LAPIS Semiconductor Co., Ltd.
0c42bd0e4   Yong Wang   dmaengine: Driver...
6
7
8
9
10
11
   */
  
  #include <linux/dmaengine.h>
  #include <linux/dma-mapping.h>
  #include <linux/init.h>
  #include <linux/pci.h>
a15783c34   Vinod Koul   dmaengine: pch: f...
12
  #include <linux/slab.h>
0c42bd0e4   Yong Wang   dmaengine: Driver...
13
14
15
  #include <linux/interrupt.h>
  #include <linux/module.h>
  #include <linux/pch_dma.h>
d2ebfb335   Russell King - ARM Linux   dmaengine: add pr...
16
  #include "dmaengine.h"
0c42bd0e4   Yong Wang   dmaengine: Driver...
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
  #define DRV_NAME "pch-dma"
  
  #define DMA_CTL0_DISABLE		0x0
  #define DMA_CTL0_SG			0x1
  #define DMA_CTL0_ONESHOT		0x2
  #define DMA_CTL0_MODE_MASK_BITS		0x3
  #define DMA_CTL0_DIR_SHIFT_BITS		2
  #define DMA_CTL0_BITS_PER_CH		4
  
  #define DMA_CTL2_START_SHIFT_BITS	8
  #define DMA_CTL2_IRQ_ENABLE_MASK	((1UL << DMA_CTL2_START_SHIFT_BITS) - 1)
  
  #define DMA_STATUS_IDLE			0x0
  #define DMA_STATUS_DESC_READ		0x1
  #define DMA_STATUS_WAIT			0x2
  #define DMA_STATUS_ACCESS		0x3
  #define DMA_STATUS_BITS_PER_CH		2
  #define DMA_STATUS_MASK_BITS		0x3
  #define DMA_STATUS_SHIFT_BITS		16
  #define DMA_STATUS_IRQ(x)		(0x1 << (x))
c3d4913cd   Tomoya MORINAGA   pch_dma: fix DMA ...
37
38
  #define DMA_STATUS0_ERR(x)		(0x1 << ((x) + 8))
  #define DMA_STATUS2_ERR(x)		(0x1 << (x))
0c42bd0e4   Yong Wang   dmaengine: Driver...
39
40
41
42
43
44
45
46
47
48
49
50
  
  #define DMA_DESC_WIDTH_SHIFT_BITS	12
  #define DMA_DESC_WIDTH_1_BYTE		(0x3 << DMA_DESC_WIDTH_SHIFT_BITS)
  #define DMA_DESC_WIDTH_2_BYTES		(0x2 << DMA_DESC_WIDTH_SHIFT_BITS)
  #define DMA_DESC_WIDTH_4_BYTES		(0x0 << DMA_DESC_WIDTH_SHIFT_BITS)
  #define DMA_DESC_MAX_COUNT_1_BYTE	0x3FF
  #define DMA_DESC_MAX_COUNT_2_BYTES	0x3FF
  #define DMA_DESC_MAX_COUNT_4_BYTES	0x7FF
  #define DMA_DESC_END_WITHOUT_IRQ	0x0
  #define DMA_DESC_END_WITH_IRQ		0x1
  #define DMA_DESC_FOLLOW_WITHOUT_IRQ	0x2
  #define DMA_DESC_FOLLOW_WITH_IRQ	0x3
c43f15086   Tomoya MORINAGA   pch_dma: Fix susp...
51
  #define MAX_CHAN_NR			12
0c42bd0e4   Yong Wang   dmaengine: Driver...
52

0b052f4a0   Tomoya MORINAGA   pch_dma: Fix CTL ...
53
54
  #define DMA_MASK_CTL0_MODE	0x33333333
  #define DMA_MASK_CTL2_MODE	0x00003333
0c42bd0e4   Yong Wang   dmaengine: Driver...
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
  static unsigned int init_nr_desc_per_channel = 64;
  module_param(init_nr_desc_per_channel, uint, 0644);
  MODULE_PARM_DESC(init_nr_desc_per_channel,
  		 "initial descriptors per channel (default: 64)");
  
  struct pch_dma_desc_regs {
  	u32	dev_addr;
  	u32	mem_addr;
  	u32	size;
  	u32	next;
  };
  
  struct pch_dma_regs {
  	u32	dma_ctl0;
  	u32	dma_ctl1;
  	u32	dma_ctl2;
194f5f270   Tomoya MORINAGA   pch_dma: Support ...
71
  	u32	dma_ctl3;
0c42bd0e4   Yong Wang   dmaengine: Driver...
72
73
  	u32	dma_sts0;
  	u32	dma_sts1;
194f5f270   Tomoya MORINAGA   pch_dma: Support ...
74
  	u32	dma_sts2;
0c42bd0e4   Yong Wang   dmaengine: Driver...
75
  	u32	reserved3;
26d890f0d   Tomoya MORINAGA   pch_dma: set the ...
76
  	struct pch_dma_desc_regs desc[MAX_CHAN_NR];
0c42bd0e4   Yong Wang   dmaengine: Driver...
77
78
79
80
81
82
83
84
85
86
87
88
  };
  
  struct pch_dma_desc {
  	struct pch_dma_desc_regs regs;
  	struct dma_async_tx_descriptor txd;
  	struct list_head	desc_node;
  	struct list_head	tx_list;
  };
  
  struct pch_dma_chan {
  	struct dma_chan		chan;
  	void __iomem *membase;
db8196df4   Vinod Koul   dmaengine: move d...
89
  	enum dma_transfer_direction dir;
0c42bd0e4   Yong Wang   dmaengine: Driver...
90
91
92
93
  	struct tasklet_struct	tasklet;
  	unsigned long		err_status;
  
  	spinlock_t		lock;
0c42bd0e4   Yong Wang   dmaengine: Driver...
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
  	struct list_head	active_list;
  	struct list_head	queue;
  	struct list_head	free_list;
  	unsigned int		descs_allocated;
  };
  
  #define PDC_DEV_ADDR	0x00
  #define PDC_MEM_ADDR	0x04
  #define PDC_SIZE	0x08
  #define PDC_NEXT	0x0C
  
  #define channel_readl(pdc, name) \
  	readl((pdc)->membase + PDC_##name)
  #define channel_writel(pdc, name, val) \
  	writel((val), (pdc)->membase + PDC_##name)
  
  struct pch_dma {
  	struct dma_device	dma;
  	void __iomem *membase;
10c191a11   Romain Perier   dmaengine: pch_dm...
113
  	struct dma_pool		*pool;
0c42bd0e4   Yong Wang   dmaengine: Driver...
114
115
  	struct pch_dma_regs	regs;
  	struct pch_dma_desc_regs ch_regs[MAX_CHAN_NR];
26d890f0d   Tomoya MORINAGA   pch_dma: set the ...
116
  	struct pch_dma_chan	channels[MAX_CHAN_NR];
0c42bd0e4   Yong Wang   dmaengine: Driver...
117
118
119
120
121
  };
  
  #define PCH_DMA_CTL0	0x00
  #define PCH_DMA_CTL1	0x04
  #define PCH_DMA_CTL2	0x08
194f5f270   Tomoya MORINAGA   pch_dma: Support ...
122
  #define PCH_DMA_CTL3	0x0C
0c42bd0e4   Yong Wang   dmaengine: Driver...
123
124
  #define PCH_DMA_STS0	0x10
  #define PCH_DMA_STS1	0x14
c3d4913cd   Tomoya MORINAGA   pch_dma: fix DMA ...
125
  #define PCH_DMA_STS2	0x18
0c42bd0e4   Yong Wang   dmaengine: Driver...
126
127
  
  #define dma_readl(pd, name) \
61cd22037   Yong Wang   DMAENGINE: pch_dm...
128
  	readl((pd)->membase + PCH_DMA_##name)
0c42bd0e4   Yong Wang   dmaengine: Driver...
129
  #define dma_writel(pd, name, val) \
61cd22037   Yong Wang   DMAENGINE: pch_dm...
130
  	writel((val), (pd)->membase + PCH_DMA_##name)
0c42bd0e4   Yong Wang   dmaengine: Driver...
131

08645fdc7   Tomoya MORINAGA   pch_dma: modify f...
132
133
  static inline
  struct pch_dma_desc *to_pd_desc(struct dma_async_tx_descriptor *txd)
0c42bd0e4   Yong Wang   dmaengine: Driver...
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
  {
  	return container_of(txd, struct pch_dma_desc, txd);
  }
  
  static inline struct pch_dma_chan *to_pd_chan(struct dma_chan *chan)
  {
  	return container_of(chan, struct pch_dma_chan, chan);
  }
  
  static inline struct pch_dma *to_pd(struct dma_device *ddev)
  {
  	return container_of(ddev, struct pch_dma, dma);
  }
  
  static inline struct device *chan2dev(struct dma_chan *chan)
  {
  	return &chan->dev->device;
  }
  
  static inline struct device *chan2parent(struct dma_chan *chan)
  {
  	return chan->dev->device.parent;
  }
08645fdc7   Tomoya MORINAGA   pch_dma: modify f...
157
158
  static inline
  struct pch_dma_desc *pdc_first_active(struct pch_dma_chan *pd_chan)
0c42bd0e4   Yong Wang   dmaengine: Driver...
159
160
161
162
  {
  	return list_first_entry(&pd_chan->active_list,
  				struct pch_dma_desc, desc_node);
  }
08645fdc7   Tomoya MORINAGA   pch_dma: modify f...
163
164
  static inline
  struct pch_dma_desc *pdc_first_queued(struct pch_dma_chan *pd_chan)
0c42bd0e4   Yong Wang   dmaengine: Driver...
165
166
167
168
169
170
171
172
173
  {
  	return list_first_entry(&pd_chan->queue,
  				struct pch_dma_desc, desc_node);
  }
  
  static void pdc_enable_irq(struct dma_chan *chan, int enable)
  {
  	struct pch_dma *pd = to_pd(chan->device);
  	u32 val;
c3d4913cd   Tomoya MORINAGA   pch_dma: fix DMA ...
174
175
176
177
178
179
  	int pos;
  
  	if (chan->chan_id < 8)
  		pos = chan->chan_id;
  	else
  		pos = chan->chan_id + 8;
0c42bd0e4   Yong Wang   dmaengine: Driver...
180
181
182
183
  
  	val = dma_readl(pd, CTL2);
  
  	if (enable)
c3d4913cd   Tomoya MORINAGA   pch_dma: fix DMA ...
184
  		val |= 0x1 << pos;
0c42bd0e4   Yong Wang   dmaengine: Driver...
185
  	else
c3d4913cd   Tomoya MORINAGA   pch_dma: fix DMA ...
186
  		val &= ~(0x1 << pos);
0c42bd0e4   Yong Wang   dmaengine: Driver...
187
188
189
190
191
192
193
194
195
196
197
198
199
  
  	dma_writel(pd, CTL2, val);
  
  	dev_dbg(chan2dev(chan), "pdc_enable_irq: chan %d -> %x
  ",
  		chan->chan_id, val);
  }
  
  static void pdc_set_dir(struct dma_chan *chan)
  {
  	struct pch_dma_chan *pd_chan = to_pd_chan(chan);
  	struct pch_dma *pd = to_pd(chan->device);
  	u32 val;
0b052f4a0   Tomoya MORINAGA   pch_dma: Fix CTL ...
200
201
  	u32 mask_mode;
  	u32 mask_ctl;
0c42bd0e4   Yong Wang   dmaengine: Driver...
202

194f5f270   Tomoya MORINAGA   pch_dma: Support ...
203
204
  	if (chan->chan_id < 8) {
  		val = dma_readl(pd, CTL0);
0c42bd0e4   Yong Wang   dmaengine: Driver...
205

0b052f4a0   Tomoya MORINAGA   pch_dma: Fix CTL ...
206
207
208
209
210
  		mask_mode = DMA_CTL0_MODE_MASK_BITS <<
  					(DMA_CTL0_BITS_PER_CH * chan->chan_id);
  		mask_ctl = DMA_MASK_CTL0_MODE & ~(DMA_CTL0_MODE_MASK_BITS <<
  				       (DMA_CTL0_BITS_PER_CH * chan->chan_id));
  		val &= mask_mode;
db8196df4   Vinod Koul   dmaengine: move d...
211
  		if (pd_chan->dir == DMA_MEM_TO_DEV)
194f5f270   Tomoya MORINAGA   pch_dma: Support ...
212
213
214
215
216
  			val |= 0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id +
  				       DMA_CTL0_DIR_SHIFT_BITS);
  		else
  			val &= ~(0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id +
  					 DMA_CTL0_DIR_SHIFT_BITS));
0b052f4a0   Tomoya MORINAGA   pch_dma: Fix CTL ...
217
  		val |= mask_ctl;
194f5f270   Tomoya MORINAGA   pch_dma: Support ...
218
219
220
221
  		dma_writel(pd, CTL0, val);
  	} else {
  		int ch = chan->chan_id - 8; /* ch8-->0 ch9-->1 ... ch11->3 */
  		val = dma_readl(pd, CTL3);
0c42bd0e4   Yong Wang   dmaengine: Driver...
222

0b052f4a0   Tomoya MORINAGA   pch_dma: Fix CTL ...
223
224
225
226
227
  		mask_mode = DMA_CTL0_MODE_MASK_BITS <<
  						(DMA_CTL0_BITS_PER_CH * ch);
  		mask_ctl = DMA_MASK_CTL2_MODE & ~(DMA_CTL0_MODE_MASK_BITS <<
  						 (DMA_CTL0_BITS_PER_CH * ch));
  		val &= mask_mode;
db8196df4   Vinod Koul   dmaengine: move d...
228
  		if (pd_chan->dir == DMA_MEM_TO_DEV)
194f5f270   Tomoya MORINAGA   pch_dma: Support ...
229
230
231
232
233
  			val |= 0x1 << (DMA_CTL0_BITS_PER_CH * ch +
  				       DMA_CTL0_DIR_SHIFT_BITS);
  		else
  			val &= ~(0x1 << (DMA_CTL0_BITS_PER_CH * ch +
  					 DMA_CTL0_DIR_SHIFT_BITS));
0b052f4a0   Tomoya MORINAGA   pch_dma: Fix CTL ...
234
  		val |= mask_ctl;
194f5f270   Tomoya MORINAGA   pch_dma: Support ...
235
236
  		dma_writel(pd, CTL3, val);
  	}
0c42bd0e4   Yong Wang   dmaengine: Driver...
237
238
239
240
241
242
243
244
245
246
  
  	dev_dbg(chan2dev(chan), "pdc_set_dir: chan %d -> %x
  ",
  		chan->chan_id, val);
  }
  
  static void pdc_set_mode(struct dma_chan *chan, u32 mode)
  {
  	struct pch_dma *pd = to_pd(chan->device);
  	u32 val;
0b052f4a0   Tomoya MORINAGA   pch_dma: Fix CTL ...
247
248
  	u32 mask_ctl;
  	u32 mask_dir;
0c42bd0e4   Yong Wang   dmaengine: Driver...
249

194f5f270   Tomoya MORINAGA   pch_dma: Support ...
250
  	if (chan->chan_id < 8) {
0b052f4a0   Tomoya MORINAGA   pch_dma: Fix CTL ...
251
252
253
254
  		mask_ctl = DMA_MASK_CTL0_MODE & ~(DMA_CTL0_MODE_MASK_BITS <<
  			   (DMA_CTL0_BITS_PER_CH * chan->chan_id));
  		mask_dir = 1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id +\
  				 DMA_CTL0_DIR_SHIFT_BITS);
194f5f270   Tomoya MORINAGA   pch_dma: Support ...
255
  		val = dma_readl(pd, CTL0);
0b052f4a0   Tomoya MORINAGA   pch_dma: Fix CTL ...
256
  		val &= mask_dir;
194f5f270   Tomoya MORINAGA   pch_dma: Support ...
257
  		val |= mode << (DMA_CTL0_BITS_PER_CH * chan->chan_id);
0b052f4a0   Tomoya MORINAGA   pch_dma: Fix CTL ...
258
  		val |= mask_ctl;
194f5f270   Tomoya MORINAGA   pch_dma: Support ...
259
260
261
  		dma_writel(pd, CTL0, val);
  	} else {
  		int ch = chan->chan_id - 8; /* ch8-->0 ch9-->1 ... ch11->3 */
0b052f4a0   Tomoya MORINAGA   pch_dma: Fix CTL ...
262
263
264
265
  		mask_ctl = DMA_MASK_CTL2_MODE & ~(DMA_CTL0_MODE_MASK_BITS <<
  						 (DMA_CTL0_BITS_PER_CH * ch));
  		mask_dir = 1 << (DMA_CTL0_BITS_PER_CH * ch +\
  				 DMA_CTL0_DIR_SHIFT_BITS);
194f5f270   Tomoya MORINAGA   pch_dma: Support ...
266
  		val = dma_readl(pd, CTL3);
0b052f4a0   Tomoya MORINAGA   pch_dma: Fix CTL ...
267
  		val &= mask_dir;
194f5f270   Tomoya MORINAGA   pch_dma: Support ...
268
  		val |= mode << (DMA_CTL0_BITS_PER_CH * ch);
0b052f4a0   Tomoya MORINAGA   pch_dma: Fix CTL ...
269
  		val |= mask_ctl;
194f5f270   Tomoya MORINAGA   pch_dma: Support ...
270
  		dma_writel(pd, CTL3, val);
194f5f270   Tomoya MORINAGA   pch_dma: Support ...
271
  	}
0c42bd0e4   Yong Wang   dmaengine: Driver...
272
273
274
275
276
  
  	dev_dbg(chan2dev(chan), "pdc_set_mode: chan %d -> %x
  ",
  		chan->chan_id, val);
  }
c3d4913cd   Tomoya MORINAGA   pch_dma: fix DMA ...
277
  static u32 pdc_get_status0(struct pch_dma_chan *pd_chan)
0c42bd0e4   Yong Wang   dmaengine: Driver...
278
279
280
281
282
283
284
285
  {
  	struct pch_dma *pd = to_pd(pd_chan->chan.device);
  	u32 val;
  
  	val = dma_readl(pd, STS0);
  	return DMA_STATUS_MASK_BITS & (val >> (DMA_STATUS_SHIFT_BITS +
  			DMA_STATUS_BITS_PER_CH * pd_chan->chan.chan_id));
  }
c3d4913cd   Tomoya MORINAGA   pch_dma: fix DMA ...
286
287
288
289
290
291
292
293
294
  static u32 pdc_get_status2(struct pch_dma_chan *pd_chan)
  {
  	struct pch_dma *pd = to_pd(pd_chan->chan.device);
  	u32 val;
  
  	val = dma_readl(pd, STS2);
  	return DMA_STATUS_MASK_BITS & (val >> (DMA_STATUS_SHIFT_BITS +
  			DMA_STATUS_BITS_PER_CH * (pd_chan->chan.chan_id - 8)));
  }
0c42bd0e4   Yong Wang   dmaengine: Driver...
295
296
  static bool pdc_is_idle(struct pch_dma_chan *pd_chan)
  {
c3d4913cd   Tomoya MORINAGA   pch_dma: fix DMA ...
297
298
299
300
301
302
303
304
305
  	u32 sts;
  
  	if (pd_chan->chan.chan_id < 8)
  		sts = pdc_get_status0(pd_chan);
  	else
  		sts = pdc_get_status2(pd_chan);
  
  
  	if (sts == DMA_STATUS_IDLE)
0c42bd0e4   Yong Wang   dmaengine: Driver...
306
307
308
309
310
311
312
  		return true;
  	else
  		return false;
  }
  
  static void pdc_dostart(struct pch_dma_chan *pd_chan, struct pch_dma_desc* desc)
  {
0c42bd0e4   Yong Wang   dmaengine: Driver...
313
314
315
316
317
318
  	if (!pdc_is_idle(pd_chan)) {
  		dev_err(chan2dev(&pd_chan->chan),
  			"BUG: Attempt to start non-idle channel
  ");
  		return;
  	}
0c42bd0e4   Yong Wang   dmaengine: Driver...
319
320
321
322
323
324
325
326
327
328
329
330
  	dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> dev_addr: %x
  ",
  		pd_chan->chan.chan_id, desc->regs.dev_addr);
  	dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> mem_addr: %x
  ",
  		pd_chan->chan.chan_id, desc->regs.mem_addr);
  	dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> size: %x
  ",
  		pd_chan->chan.chan_id, desc->regs.size);
  	dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> next: %x
  ",
  		pd_chan->chan.chan_id, desc->regs.next);
943d8d8bc   Tomoya MORINAGA   dma : EG20T PCH: ...
331
332
333
334
335
  	if (list_empty(&desc->tx_list)) {
  		channel_writel(pd_chan, DEV_ADDR, desc->regs.dev_addr);
  		channel_writel(pd_chan, MEM_ADDR, desc->regs.mem_addr);
  		channel_writel(pd_chan, SIZE, desc->regs.size);
  		channel_writel(pd_chan, NEXT, desc->regs.next);
0c42bd0e4   Yong Wang   dmaengine: Driver...
336
  		pdc_set_mode(&pd_chan->chan, DMA_CTL0_ONESHOT);
943d8d8bc   Tomoya MORINAGA   dma : EG20T PCH: ...
337
338
  	} else {
  		channel_writel(pd_chan, NEXT, desc->txd.phys);
0c42bd0e4   Yong Wang   dmaengine: Driver...
339
  		pdc_set_mode(&pd_chan->chan, DMA_CTL0_SG);
943d8d8bc   Tomoya MORINAGA   dma : EG20T PCH: ...
340
  	}
0c42bd0e4   Yong Wang   dmaengine: Driver...
341
342
343
344
345
346
  }
  
  static void pdc_chain_complete(struct pch_dma_chan *pd_chan,
  			       struct pch_dma_desc *desc)
  {
  	struct dma_async_tx_descriptor *txd = &desc->txd;
5c066f7d0   Dave Jiang   dmaengine: pch_dm...
347
  	struct dmaengine_desc_callback cb;
0c42bd0e4   Yong Wang   dmaengine: Driver...
348

5c066f7d0   Dave Jiang   dmaengine: pch_dm...
349
  	dmaengine_desc_get_callback(txd, &cb);
0c42bd0e4   Yong Wang   dmaengine: Driver...
350
351
  	list_splice_init(&desc->tx_list, &pd_chan->free_list);
  	list_move(&desc->desc_node, &pd_chan->free_list);
5c066f7d0   Dave Jiang   dmaengine: pch_dm...
352
  	dmaengine_desc_callback_invoke(&cb, NULL);
0c42bd0e4   Yong Wang   dmaengine: Driver...
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
  }
  
  static void pdc_complete_all(struct pch_dma_chan *pd_chan)
  {
  	struct pch_dma_desc *desc, *_d;
  	LIST_HEAD(list);
  
  	BUG_ON(!pdc_is_idle(pd_chan));
  
  	if (!list_empty(&pd_chan->queue))
  		pdc_dostart(pd_chan, pdc_first_queued(pd_chan));
  
  	list_splice_init(&pd_chan->active_list, &list);
  	list_splice_init(&pd_chan->queue, &pd_chan->active_list);
  
  	list_for_each_entry_safe(desc, _d, &list, desc_node)
  		pdc_chain_complete(pd_chan, desc);
  }
  
  static void pdc_handle_error(struct pch_dma_chan *pd_chan)
  {
  	struct pch_dma_desc *bad_desc;
  
  	bad_desc = pdc_first_active(pd_chan);
  	list_del(&bad_desc->desc_node);
  
  	list_splice_init(&pd_chan->queue, pd_chan->active_list.prev);
  
  	if (!list_empty(&pd_chan->active_list))
  		pdc_dostart(pd_chan, pdc_first_active(pd_chan));
  
  	dev_crit(chan2dev(&pd_chan->chan), "Bad descriptor submitted
  ");
  	dev_crit(chan2dev(&pd_chan->chan), "descriptor cookie: %d
  ",
  		 bad_desc->txd.cookie);
  
  	pdc_chain_complete(pd_chan, bad_desc);
  }
  
  static void pdc_advance_work(struct pch_dma_chan *pd_chan)
  {
  	if (list_empty(&pd_chan->active_list) ||
  		list_is_singular(&pd_chan->active_list)) {
  		pdc_complete_all(pd_chan);
  	} else {
  		pdc_chain_complete(pd_chan, pdc_first_active(pd_chan));
  		pdc_dostart(pd_chan, pdc_first_active(pd_chan));
  	}
  }
0c42bd0e4   Yong Wang   dmaengine: Driver...
403
404
405
406
  static dma_cookie_t pd_tx_submit(struct dma_async_tx_descriptor *txd)
  {
  	struct pch_dma_desc *desc = to_pd_desc(txd);
  	struct pch_dma_chan *pd_chan = to_pd_chan(txd->chan);
0c42bd0e4   Yong Wang   dmaengine: Driver...
407

c5a9f9d08   Tomoya MORINAGA   pch_dma: fix kern...
408
  	spin_lock(&pd_chan->lock);
0c42bd0e4   Yong Wang   dmaengine: Driver...
409
410
411
412
413
414
415
  
  	if (list_empty(&pd_chan->active_list)) {
  		list_add_tail(&desc->desc_node, &pd_chan->active_list);
  		pdc_dostart(pd_chan, desc);
  	} else {
  		list_add_tail(&desc->desc_node, &pd_chan->queue);
  	}
c5a9f9d08   Tomoya MORINAGA   pch_dma: fix kern...
416
  	spin_unlock(&pd_chan->lock);
0c42bd0e4   Yong Wang   dmaengine: Driver...
417
418
419
420
421
422
423
424
  	return 0;
  }
  
  static struct pch_dma_desc *pdc_alloc_desc(struct dma_chan *chan, gfp_t flags)
  {
  	struct pch_dma_desc *desc = NULL;
  	struct pch_dma *pd = to_pd(chan->device);
  	dma_addr_t addr;
10c191a11   Romain Perier   dmaengine: pch_dm...
425
  	desc = dma_pool_zalloc(pd->pool, flags, &addr);
0c42bd0e4   Yong Wang   dmaengine: Driver...
426
  	if (desc) {
0c42bd0e4   Yong Wang   dmaengine: Driver...
427
428
429
430
431
432
433
434
435
436
437
438
439
440
  		INIT_LIST_HEAD(&desc->tx_list);
  		dma_async_tx_descriptor_init(&desc->txd, chan);
  		desc->txd.tx_submit = pd_tx_submit;
  		desc->txd.flags = DMA_CTRL_ACK;
  		desc->txd.phys = addr;
  	}
  
  	return desc;
  }
  
  static struct pch_dma_desc *pdc_desc_get(struct pch_dma_chan *pd_chan)
  {
  	struct pch_dma_desc *desc, *_d;
  	struct pch_dma_desc *ret = NULL;
364de7783   Liu Yuan   drivers, pch_dma:...
441
  	int i = 0;
0c42bd0e4   Yong Wang   dmaengine: Driver...
442

c5a9f9d08   Tomoya MORINAGA   pch_dma: fix kern...
443
  	spin_lock(&pd_chan->lock);
0c42bd0e4   Yong Wang   dmaengine: Driver...
444
445
446
447
448
449
450
451
452
453
  	list_for_each_entry_safe(desc, _d, &pd_chan->free_list, desc_node) {
  		i++;
  		if (async_tx_test_ack(&desc->txd)) {
  			list_del(&desc->desc_node);
  			ret = desc;
  			break;
  		}
  		dev_dbg(chan2dev(&pd_chan->chan), "desc %p not ACKed
  ", desc);
  	}
c5a9f9d08   Tomoya MORINAGA   pch_dma: fix kern...
454
  	spin_unlock(&pd_chan->lock);
0c42bd0e4   Yong Wang   dmaengine: Driver...
455
456
457
458
  	dev_dbg(chan2dev(&pd_chan->chan), "scanned %d descriptors
  ", i);
  
  	if (!ret) {
5c1ef5916   Tomoya MORINAGA   pch_dma: Use GFP_...
459
  		ret = pdc_alloc_desc(&pd_chan->chan, GFP_ATOMIC);
0c42bd0e4   Yong Wang   dmaengine: Driver...
460
  		if (ret) {
c5a9f9d08   Tomoya MORINAGA   pch_dma: fix kern...
461
  			spin_lock(&pd_chan->lock);
0c42bd0e4   Yong Wang   dmaengine: Driver...
462
  			pd_chan->descs_allocated++;
c5a9f9d08   Tomoya MORINAGA   pch_dma: fix kern...
463
  			spin_unlock(&pd_chan->lock);
0c42bd0e4   Yong Wang   dmaengine: Driver...
464
465
466
467
468
469
470
471
472
473
474
475
476
477
  		} else {
  			dev_err(chan2dev(&pd_chan->chan),
  				"failed to alloc desc
  ");
  		}
  	}
  
  	return ret;
  }
  
  static void pdc_desc_put(struct pch_dma_chan *pd_chan,
  			 struct pch_dma_desc *desc)
  {
  	if (desc) {
c5a9f9d08   Tomoya MORINAGA   pch_dma: fix kern...
478
  		spin_lock(&pd_chan->lock);
0c42bd0e4   Yong Wang   dmaengine: Driver...
479
480
  		list_splice_init(&desc->tx_list, &pd_chan->free_list);
  		list_add(&desc->desc_node, &pd_chan->free_list);
c5a9f9d08   Tomoya MORINAGA   pch_dma: fix kern...
481
  		spin_unlock(&pd_chan->lock);
0c42bd0e4   Yong Wang   dmaengine: Driver...
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
  	}
  }
  
  static int pd_alloc_chan_resources(struct dma_chan *chan)
  {
  	struct pch_dma_chan *pd_chan = to_pd_chan(chan);
  	struct pch_dma_desc *desc;
  	LIST_HEAD(tmp_list);
  	int i;
  
  	if (!pdc_is_idle(pd_chan)) {
  		dev_dbg(chan2dev(chan), "DMA channel not idle ?
  ");
  		return -EIO;
  	}
  
  	if (!list_empty(&pd_chan->free_list))
  		return pd_chan->descs_allocated;
  
  	for (i = 0; i < init_nr_desc_per_channel; i++) {
  		desc = pdc_alloc_desc(chan, GFP_KERNEL);
  
  		if (!desc) {
  			dev_warn(chan2dev(chan),
  				"Only allocated %d initial descriptors
  ", i);
  			break;
  		}
  
  		list_add_tail(&desc->desc_node, &tmp_list);
  	}
70f189158   Alexander Stein   pch_dma: Fix chan...
513
  	spin_lock_irq(&pd_chan->lock);
0c42bd0e4   Yong Wang   dmaengine: Driver...
514
515
  	list_splice(&tmp_list, &pd_chan->free_list);
  	pd_chan->descs_allocated = i;
d3ee98cdc   Russell King - ARM Linux   dmaengine: consol...
516
  	dma_cookie_init(chan);
70f189158   Alexander Stein   pch_dma: Fix chan...
517
  	spin_unlock_irq(&pd_chan->lock);
0c42bd0e4   Yong Wang   dmaengine: Driver...
518
519
  
  	pdc_enable_irq(chan, 1);
0c42bd0e4   Yong Wang   dmaengine: Driver...
520
521
522
523
524
525
526
527
528
529
530
531
532
533
  
  	return pd_chan->descs_allocated;
  }
  
  static void pd_free_chan_resources(struct dma_chan *chan)
  {
  	struct pch_dma_chan *pd_chan = to_pd_chan(chan);
  	struct pch_dma *pd = to_pd(chan->device);
  	struct pch_dma_desc *desc, *_d;
  	LIST_HEAD(tmp_list);
  
  	BUG_ON(!pdc_is_idle(pd_chan));
  	BUG_ON(!list_empty(&pd_chan->active_list));
  	BUG_ON(!list_empty(&pd_chan->queue));
70f189158   Alexander Stein   pch_dma: Fix chan...
534
  	spin_lock_irq(&pd_chan->lock);
0c42bd0e4   Yong Wang   dmaengine: Driver...
535
536
  	list_splice_init(&pd_chan->free_list, &tmp_list);
  	pd_chan->descs_allocated = 0;
70f189158   Alexander Stein   pch_dma: Fix chan...
537
  	spin_unlock_irq(&pd_chan->lock);
0c42bd0e4   Yong Wang   dmaengine: Driver...
538
539
  
  	list_for_each_entry_safe(desc, _d, &tmp_list, desc_node)
10c191a11   Romain Perier   dmaengine: pch_dm...
540
  		dma_pool_free(pd->pool, desc, desc->txd.phys);
0c42bd0e4   Yong Wang   dmaengine: Driver...
541
542
543
544
545
546
547
  
  	pdc_enable_irq(chan, 0);
  }
  
  static enum dma_status pd_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
  				    struct dma_tx_state *txstate)
  {
da0a908ed   Andy Shevchenko   pch_dma: remove u...
548
  	return dma_cookie_status(chan, cookie, txstate);
0c42bd0e4   Yong Wang   dmaengine: Driver...
549
550
551
552
553
554
555
  }
  
  static void pd_issue_pending(struct dma_chan *chan)
  {
  	struct pch_dma_chan *pd_chan = to_pd_chan(chan);
  
  	if (pdc_is_idle(pd_chan)) {
c5a9f9d08   Tomoya MORINAGA   pch_dma: fix kern...
556
  		spin_lock(&pd_chan->lock);
0c42bd0e4   Yong Wang   dmaengine: Driver...
557
  		pdc_advance_work(pd_chan);
c5a9f9d08   Tomoya MORINAGA   pch_dma: fix kern...
558
  		spin_unlock(&pd_chan->lock);
0c42bd0e4   Yong Wang   dmaengine: Driver...
559
560
561
562
563
  	}
  }
  
  static struct dma_async_tx_descriptor *pd_prep_slave_sg(struct dma_chan *chan,
  			struct scatterlist *sgl, unsigned int sg_len,
185ecb5f4   Alexandre Bounine   dmaengine: add co...
564
565
  			enum dma_transfer_direction direction, unsigned long flags,
  			void *context)
0c42bd0e4   Yong Wang   dmaengine: Driver...
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
  {
  	struct pch_dma_chan *pd_chan = to_pd_chan(chan);
  	struct pch_dma_slave *pd_slave = chan->private;
  	struct pch_dma_desc *first = NULL;
  	struct pch_dma_desc *prev = NULL;
  	struct pch_dma_desc *desc = NULL;
  	struct scatterlist *sg;
  	dma_addr_t reg;
  	int i;
  
  	if (unlikely(!sg_len)) {
  		dev_info(chan2dev(chan), "prep_slave_sg: length is zero!
  ");
  		return NULL;
  	}
db8196df4   Vinod Koul   dmaengine: move d...
581
  	if (direction == DMA_DEV_TO_MEM)
0c42bd0e4   Yong Wang   dmaengine: Driver...
582
  		reg = pd_slave->rx_reg;
db8196df4   Vinod Koul   dmaengine: move d...
583
  	else if (direction == DMA_MEM_TO_DEV)
0c42bd0e4   Yong Wang   dmaengine: Driver...
584
585
586
  		reg = pd_slave->tx_reg;
  	else
  		return NULL;
c8fcba600   Tomoya MORINAGA   pch_dma: fix dma ...
587
588
  	pd_chan->dir = direction;
  	pdc_set_dir(chan);
0c42bd0e4   Yong Wang   dmaengine: Driver...
589
590
591
592
593
594
595
  	for_each_sg(sgl, sg, sg_len, i) {
  		desc = pdc_desc_get(pd_chan);
  
  		if (!desc)
  			goto err_desc_get;
  
  		desc->regs.dev_addr = reg;
cbb796ccd   Lars-Peter Clausen   dmaengine: Use sg...
596
  		desc->regs.mem_addr = sg_dma_address(sg);
0c42bd0e4   Yong Wang   dmaengine: Driver...
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
  		desc->regs.size = sg_dma_len(sg);
  		desc->regs.next = DMA_DESC_FOLLOW_WITHOUT_IRQ;
  
  		switch (pd_slave->width) {
  		case PCH_DMA_WIDTH_1_BYTE:
  			if (desc->regs.size > DMA_DESC_MAX_COUNT_1_BYTE)
  				goto err_desc_get;
  			desc->regs.size |= DMA_DESC_WIDTH_1_BYTE;
  			break;
  		case PCH_DMA_WIDTH_2_BYTES:
  			if (desc->regs.size > DMA_DESC_MAX_COUNT_2_BYTES)
  				goto err_desc_get;
  			desc->regs.size |= DMA_DESC_WIDTH_2_BYTES;
  			break;
  		case PCH_DMA_WIDTH_4_BYTES:
  			if (desc->regs.size > DMA_DESC_MAX_COUNT_4_BYTES)
  				goto err_desc_get;
  			desc->regs.size |= DMA_DESC_WIDTH_4_BYTES;
  			break;
  		default:
  			goto err_desc_get;
  		}
0c42bd0e4   Yong Wang   dmaengine: Driver...
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
  		if (!first) {
  			first = desc;
  		} else {
  			prev->regs.next |= desc->txd.phys;
  			list_add_tail(&desc->desc_node, &first->tx_list);
  		}
  
  		prev = desc;
  	}
  
  	if (flags & DMA_PREP_INTERRUPT)
  		desc->regs.next = DMA_DESC_END_WITH_IRQ;
  	else
  		desc->regs.next = DMA_DESC_END_WITHOUT_IRQ;
  
  	first->txd.cookie = -EBUSY;
  	desc->txd.flags = flags;
  
  	return &first->txd;
  
  err_desc_get:
  	dev_err(chan2dev(chan), "failed to get desc or wrong parameters
  ");
  	pdc_desc_put(pd_chan, first);
  	return NULL;
  }
c91781b44   Maxime Ripard   dmaengine: pch-dm...
645
  static int pd_device_terminate_all(struct dma_chan *chan)
0c42bd0e4   Yong Wang   dmaengine: Driver...
646
647
648
649
  {
  	struct pch_dma_chan *pd_chan = to_pd_chan(chan);
  	struct pch_dma_desc *desc, *_d;
  	LIST_HEAD(list);
70f189158   Alexander Stein   pch_dma: Fix chan...
650
  	spin_lock_irq(&pd_chan->lock);
0c42bd0e4   Yong Wang   dmaengine: Driver...
651
652
653
654
655
656
657
658
  
  	pdc_set_mode(&pd_chan->chan, DMA_CTL0_DISABLE);
  
  	list_splice_init(&pd_chan->active_list, &list);
  	list_splice_init(&pd_chan->queue, &list);
  
  	list_for_each_entry_safe(desc, _d, &list, desc_node)
  		pdc_chain_complete(pd_chan, desc);
70f189158   Alexander Stein   pch_dma: Fix chan...
659
  	spin_unlock_irq(&pd_chan->lock);
0c42bd0e4   Yong Wang   dmaengine: Driver...
660

0c42bd0e4   Yong Wang   dmaengine: Driver...
661
662
663
664
665
666
  	return 0;
  }
  
  static void pdc_tasklet(unsigned long data)
  {
  	struct pch_dma_chan *pd_chan = (struct pch_dma_chan *)data;
c5a9f9d08   Tomoya MORINAGA   pch_dma: fix kern...
667
  	unsigned long flags;
0c42bd0e4   Yong Wang   dmaengine: Driver...
668
669
670
671
672
673
674
  
  	if (!pdc_is_idle(pd_chan)) {
  		dev_err(chan2dev(&pd_chan->chan),
  			"BUG: handle non-idle channel in tasklet
  ");
  		return;
  	}
c5a9f9d08   Tomoya MORINAGA   pch_dma: fix kern...
675
  	spin_lock_irqsave(&pd_chan->lock, flags);
0c42bd0e4   Yong Wang   dmaengine: Driver...
676
677
678
679
  	if (test_and_clear_bit(0, &pd_chan->err_status))
  		pdc_handle_error(pd_chan);
  	else
  		pdc_advance_work(pd_chan);
c5a9f9d08   Tomoya MORINAGA   pch_dma: fix kern...
680
  	spin_unlock_irqrestore(&pd_chan->lock, flags);
0c42bd0e4   Yong Wang   dmaengine: Driver...
681
682
683
684
685
686
687
  }
  
  static irqreturn_t pd_irq(int irq, void *devid)
  {
  	struct pch_dma *pd = (struct pch_dma *)devid;
  	struct pch_dma_chan *pd_chan;
  	u32 sts0;
c3d4913cd   Tomoya MORINAGA   pch_dma: fix DMA ...
688
  	u32 sts2;
0c42bd0e4   Yong Wang   dmaengine: Driver...
689
  	int i;
c3d4913cd   Tomoya MORINAGA   pch_dma: fix DMA ...
690
691
  	int ret0 = IRQ_NONE;
  	int ret2 = IRQ_NONE;
0c42bd0e4   Yong Wang   dmaengine: Driver...
692
693
  
  	sts0 = dma_readl(pd, STS0);
c3d4913cd   Tomoya MORINAGA   pch_dma: fix DMA ...
694
  	sts2 = dma_readl(pd, STS2);
0c42bd0e4   Yong Wang   dmaengine: Driver...
695
696
697
698
699
700
  
  	dev_dbg(pd->dma.dev, "pd_irq sts0: %x
  ", sts0);
  
  	for (i = 0; i < pd->dma.chancnt; i++) {
  		pd_chan = &pd->channels[i];
c3d4913cd   Tomoya MORINAGA   pch_dma: fix DMA ...
701
702
703
704
  		if (i < 8) {
  			if (sts0 & DMA_STATUS_IRQ(i)) {
  				if (sts0 & DMA_STATUS0_ERR(i))
  					set_bit(0, &pd_chan->err_status);
0c42bd0e4   Yong Wang   dmaengine: Driver...
705

c3d4913cd   Tomoya MORINAGA   pch_dma: fix DMA ...
706
707
708
709
710
711
712
  				tasklet_schedule(&pd_chan->tasklet);
  				ret0 = IRQ_HANDLED;
  			}
  		} else {
  			if (sts2 & DMA_STATUS_IRQ(i - 8)) {
  				if (sts2 & DMA_STATUS2_ERR(i))
  					set_bit(0, &pd_chan->err_status);
0c42bd0e4   Yong Wang   dmaengine: Driver...
713

c3d4913cd   Tomoya MORINAGA   pch_dma: fix DMA ...
714
715
716
717
  				tasklet_schedule(&pd_chan->tasklet);
  				ret2 = IRQ_HANDLED;
  			}
  		}
0c42bd0e4   Yong Wang   dmaengine: Driver...
718
719
720
  	}
  
  	/* clear interrupt bits in status register */
c3d4913cd   Tomoya MORINAGA   pch_dma: fix DMA ...
721
722
723
724
  	if (ret0)
  		dma_writel(pd, STS0, sts0);
  	if (ret2)
  		dma_writel(pd, STS2, sts2);
0c42bd0e4   Yong Wang   dmaengine: Driver...
725

c3d4913cd   Tomoya MORINAGA   pch_dma: fix DMA ...
726
  	return ret0 | ret2;
0c42bd0e4   Yong Wang   dmaengine: Driver...
727
  }
0b863b333   Rakib Mullick   drivers, pch_dma:...
728
  #ifdef	CONFIG_PM
0c42bd0e4   Yong Wang   dmaengine: Driver...
729
730
731
732
733
734
735
736
737
  static void pch_dma_save_regs(struct pch_dma *pd)
  {
  	struct pch_dma_chan *pd_chan;
  	struct dma_chan *chan, *_c;
  	int i = 0;
  
  	pd->regs.dma_ctl0 = dma_readl(pd, CTL0);
  	pd->regs.dma_ctl1 = dma_readl(pd, CTL1);
  	pd->regs.dma_ctl2 = dma_readl(pd, CTL2);
194f5f270   Tomoya MORINAGA   pch_dma: Support ...
738
  	pd->regs.dma_ctl3 = dma_readl(pd, CTL3);
0c42bd0e4   Yong Wang   dmaengine: Driver...
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
  
  	list_for_each_entry_safe(chan, _c, &pd->dma.channels, device_node) {
  		pd_chan = to_pd_chan(chan);
  
  		pd->ch_regs[i].dev_addr = channel_readl(pd_chan, DEV_ADDR);
  		pd->ch_regs[i].mem_addr = channel_readl(pd_chan, MEM_ADDR);
  		pd->ch_regs[i].size = channel_readl(pd_chan, SIZE);
  		pd->ch_regs[i].next = channel_readl(pd_chan, NEXT);
  
  		i++;
  	}
  }
  
  static void pch_dma_restore_regs(struct pch_dma *pd)
  {
  	struct pch_dma_chan *pd_chan;
  	struct dma_chan *chan, *_c;
  	int i = 0;
  
  	dma_writel(pd, CTL0, pd->regs.dma_ctl0);
  	dma_writel(pd, CTL1, pd->regs.dma_ctl1);
  	dma_writel(pd, CTL2, pd->regs.dma_ctl2);
194f5f270   Tomoya MORINAGA   pch_dma: Support ...
761
  	dma_writel(pd, CTL3, pd->regs.dma_ctl3);
0c42bd0e4   Yong Wang   dmaengine: Driver...
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
  
  	list_for_each_entry_safe(chan, _c, &pd->dma.channels, device_node) {
  		pd_chan = to_pd_chan(chan);
  
  		channel_writel(pd_chan, DEV_ADDR, pd->ch_regs[i].dev_addr);
  		channel_writel(pd_chan, MEM_ADDR, pd->ch_regs[i].mem_addr);
  		channel_writel(pd_chan, SIZE, pd->ch_regs[i].size);
  		channel_writel(pd_chan, NEXT, pd->ch_regs[i].next);
  
  		i++;
  	}
  }
  
  static int pch_dma_suspend(struct pci_dev *pdev, pm_message_t state)
  {
  	struct pch_dma *pd = pci_get_drvdata(pdev);
  
  	if (pd)
  		pch_dma_save_regs(pd);
  
  	pci_save_state(pdev);
  	pci_disable_device(pdev);
  	pci_set_power_state(pdev, pci_choose_state(pdev, state));
  
  	return 0;
  }
  
  static int pch_dma_resume(struct pci_dev *pdev)
  {
  	struct pch_dma *pd = pci_get_drvdata(pdev);
  	int err;
  
  	pci_set_power_state(pdev, PCI_D0);
  	pci_restore_state(pdev);
  
  	err = pci_enable_device(pdev);
  	if (err) {
  		dev_dbg(&pdev->dev, "failed to enable device
  ");
  		return err;
  	}
  
  	if (pd)
  		pch_dma_restore_regs(pd);
  
  	return 0;
  }
0b863b333   Rakib Mullick   drivers, pch_dma:...
809
  #endif
0c42bd0e4   Yong Wang   dmaengine: Driver...
810

463a1f8b3   Bill Pemberton   dma: remove use o...
811
  static int pch_dma_probe(struct pci_dev *pdev,
0c42bd0e4   Yong Wang   dmaengine: Driver...
812
813
814
815
816
817
818
819
820
  				   const struct pci_device_id *id)
  {
  	struct pch_dma *pd;
  	struct pch_dma_regs *regs;
  	unsigned int nr_channels;
  	int err;
  	int i;
  
  	nr_channels = id->driver_data;
01631243d   Tomoya MORINAGA   pch_dma: Reduce w...
821
  	pd = kzalloc(sizeof(*pd), GFP_KERNEL);
0c42bd0e4   Yong Wang   dmaengine: Driver...
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
  	if (!pd)
  		return -ENOMEM;
  
  	pci_set_drvdata(pdev, pd);
  
  	err = pci_enable_device(pdev);
  	if (err) {
  		dev_err(&pdev->dev, "Cannot enable PCI device
  ");
  		goto err_free_mem;
  	}
  
  	if (!(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
  		dev_err(&pdev->dev, "Cannot find proper base address
  ");
27abb2ffb   Wei Yongjun   pch_dma: fix erro...
837
  		err = -ENODEV;
0c42bd0e4   Yong Wang   dmaengine: Driver...
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
  		goto err_disable_pdev;
  	}
  
  	err = pci_request_regions(pdev, DRV_NAME);
  	if (err) {
  		dev_err(&pdev->dev, "Cannot obtain PCI resources
  ");
  		goto err_disable_pdev;
  	}
  
  	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
  	if (err) {
  		dev_err(&pdev->dev, "Cannot set proper DMA config
  ");
  		goto err_free_res;
  	}
  
  	regs = pd->membase = pci_iomap(pdev, 1, 0);
  	if (!pd->membase) {
  		dev_err(&pdev->dev, "Cannot map MMIO registers
  ");
  		err = -ENOMEM;
  		goto err_free_res;
  	}
  
  	pci_set_master(pdev);
de76c0d4a   Madhuparna Bhowmik   dmaengine: pch_dm...
864
  	pd->dma.dev = &pdev->dev;
0c42bd0e4   Yong Wang   dmaengine: Driver...
865
866
867
868
869
870
871
  
  	err = request_irq(pdev->irq, pd_irq, IRQF_SHARED, DRV_NAME, pd);
  	if (err) {
  		dev_err(&pdev->dev, "Failed to request IRQ
  ");
  		goto err_iounmap;
  	}
10c191a11   Romain Perier   dmaengine: pch_dm...
872
  	pd->pool = dma_pool_create("pch_dma_desc_pool", &pdev->dev,
0c42bd0e4   Yong Wang   dmaengine: Driver...
873
874
875
876
877
878
879
  				   sizeof(struct pch_dma_desc), 4, 0);
  	if (!pd->pool) {
  		dev_err(&pdev->dev, "Failed to alloc DMA descriptors
  ");
  		err = -ENOMEM;
  		goto err_free_irq;
  	}
0c42bd0e4   Yong Wang   dmaengine: Driver...
880
881
882
883
884
885
886
  
  	INIT_LIST_HEAD(&pd->dma.channels);
  
  	for (i = 0; i < nr_channels; i++) {
  		struct pch_dma_chan *pd_chan = &pd->channels[i];
  
  		pd_chan->chan.device = &pd->dma;
d3ee98cdc   Russell King - ARM Linux   dmaengine: consol...
887
  		dma_cookie_init(&pd_chan->chan);
0c42bd0e4   Yong Wang   dmaengine: Driver...
888
889
  
  		pd_chan->membase = &regs->desc[i];
0c42bd0e4   Yong Wang   dmaengine: Driver...
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
  		spin_lock_init(&pd_chan->lock);
  
  		INIT_LIST_HEAD(&pd_chan->active_list);
  		INIT_LIST_HEAD(&pd_chan->queue);
  		INIT_LIST_HEAD(&pd_chan->free_list);
  
  		tasklet_init(&pd_chan->tasklet, pdc_tasklet,
  			     (unsigned long)pd_chan);
  		list_add_tail(&pd_chan->chan.device_node, &pd->dma.channels);
  	}
  
  	dma_cap_zero(pd->dma.cap_mask);
  	dma_cap_set(DMA_PRIVATE, pd->dma.cap_mask);
  	dma_cap_set(DMA_SLAVE, pd->dma.cap_mask);
  
  	pd->dma.device_alloc_chan_resources = pd_alloc_chan_resources;
  	pd->dma.device_free_chan_resources = pd_free_chan_resources;
  	pd->dma.device_tx_status = pd_tx_status;
  	pd->dma.device_issue_pending = pd_issue_pending;
  	pd->dma.device_prep_slave_sg = pd_prep_slave_sg;
c91781b44   Maxime Ripard   dmaengine: pch-dm...
910
  	pd->dma.device_terminate_all = pd_device_terminate_all;
0c42bd0e4   Yong Wang   dmaengine: Driver...
911
912
913
914
915
916
917
918
919
920
921
  
  	err = dma_async_device_register(&pd->dma);
  	if (err) {
  		dev_err(&pdev->dev, "Failed to register DMA device
  ");
  		goto err_free_pool;
  	}
  
  	return 0;
  
  err_free_pool:
10c191a11   Romain Perier   dmaengine: pch_dm...
922
  	dma_pool_destroy(pd->pool);
0c42bd0e4   Yong Wang   dmaengine: Driver...
923
924
925
926
927
928
929
930
931
  err_free_irq:
  	free_irq(pdev->irq, pd);
  err_iounmap:
  	pci_iounmap(pdev, pd->membase);
  err_free_res:
  	pci_release_regions(pdev);
  err_disable_pdev:
  	pci_disable_device(pdev);
  err_free_mem:
12d7b7a23   Alexey Khoroshilov   dmaengine: pch_dm...
932
  	kfree(pd);
0c42bd0e4   Yong Wang   dmaengine: Driver...
933
934
  	return err;
  }
4bf27b8b3   Greg Kroah-Hartman   Drivers: dma: rem...
935
  static void pch_dma_remove(struct pci_dev *pdev)
0c42bd0e4   Yong Wang   dmaengine: Driver...
936
937
938
939
940
941
942
  {
  	struct pch_dma *pd = pci_get_drvdata(pdev);
  	struct pch_dma_chan *pd_chan;
  	struct dma_chan *chan, *_c;
  
  	if (pd) {
  		dma_async_device_unregister(&pd->dma);
9068b032d   Vinod Koul   dmaengine: pch_dm...
943
  		free_irq(pdev->irq, pd);
0c42bd0e4   Yong Wang   dmaengine: Driver...
944
945
946
  		list_for_each_entry_safe(chan, _c, &pd->dma.channels,
  					 device_node) {
  			pd_chan = to_pd_chan(chan);
0c42bd0e4   Yong Wang   dmaengine: Driver...
947
948
  			tasklet_kill(&pd_chan->tasklet);
  		}
10c191a11   Romain Perier   dmaengine: pch_dm...
949
  		dma_pool_destroy(pd->pool);
0c42bd0e4   Yong Wang   dmaengine: Driver...
950
951
952
953
954
955
956
957
  		pci_iounmap(pdev, pd->membase);
  		pci_release_regions(pdev);
  		pci_disable_device(pdev);
  		kfree(pd);
  	}
  }
  
  /* PCI Device ID of DMA device */
2cdf2455a   Tomoya MORINAGA   pch_dma: support ...
958
959
960
961
962
  #define PCI_DEVICE_ID_EG20T_PCH_DMA_8CH        0x8810
  #define PCI_DEVICE_ID_EG20T_PCH_DMA_4CH        0x8815
  #define PCI_DEVICE_ID_ML7213_DMA1_8CH	0x8026
  #define PCI_DEVICE_ID_ML7213_DMA2_8CH	0x802B
  #define PCI_DEVICE_ID_ML7213_DMA3_4CH	0x8034
194f5f270   Tomoya MORINAGA   pch_dma: Support ...
963
  #define PCI_DEVICE_ID_ML7213_DMA4_12CH	0x8032
c0dfc04ac   Tomoya MORINAGA   pch_dma: Support ...
964
965
966
967
  #define PCI_DEVICE_ID_ML7223_DMA1_4CH	0x800B
  #define PCI_DEVICE_ID_ML7223_DMA2_4CH	0x800E
  #define PCI_DEVICE_ID_ML7223_DMA3_4CH	0x8017
  #define PCI_DEVICE_ID_ML7223_DMA4_4CH	0x803B
ca7fe2db8   Tomoya MORINAGA   pch_dma: Support ...
968
969
  #define PCI_DEVICE_ID_ML7831_DMA1_8CH	0x8810
  #define PCI_DEVICE_ID_ML7831_DMA2_4CH	0x8815
0c42bd0e4   Yong Wang   dmaengine: Driver...
970

345e3123d   Michele Curti   drivers/dma/pch_d...
971
  static const struct pci_device_id pch_dma_id_table[] = {
2cdf2455a   Tomoya MORINAGA   pch_dma: support ...
972
973
974
975
976
  	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_EG20T_PCH_DMA_8CH), 8 },
  	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_EG20T_PCH_DMA_4CH), 4 },
  	{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA1_8CH), 8}, /* UART Video */
  	{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA2_8CH), 8}, /* PCMIF SPI */
  	{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA3_4CH), 4}, /* FPGA */
194f5f270   Tomoya MORINAGA   pch_dma: Support ...
977
  	{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA4_12CH), 12}, /* I2S */
c0dfc04ac   Tomoya MORINAGA   pch_dma: Support ...
978
979
980
981
  	{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA1_4CH), 4}, /* UART */
  	{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA2_4CH), 4}, /* Video SPI */
  	{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA3_4CH), 4}, /* Security */
  	{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA4_4CH), 4}, /* FPGA */
ca7fe2db8   Tomoya MORINAGA   pch_dma: Support ...
982
983
  	{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7831_DMA1_8CH), 8}, /* UART */
  	{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7831_DMA2_4CH), 4}, /* SPI */
87acf5ad8   Dzianis Kahanovich   NULL-terminate al...
984
  	{ 0, },
0c42bd0e4   Yong Wang   dmaengine: Driver...
985
986
987
988
989
990
  };
  
  static struct pci_driver pch_dma_driver = {
  	.name		= DRV_NAME,
  	.id_table	= pch_dma_id_table,
  	.probe		= pch_dma_probe,
a7d6e3ec2   Bill Pemberton   dma: remove use o...
991
  	.remove		= pch_dma_remove,
0c42bd0e4   Yong Wang   dmaengine: Driver...
992
993
994
995
996
  #ifdef CONFIG_PM
  	.suspend	= pch_dma_suspend,
  	.resume		= pch_dma_resume,
  #endif
  };
53b9989bc   Wei Yongjun   pch_dma: use modu...
997
  module_pci_driver(pch_dma_driver);
0c42bd0e4   Yong Wang   dmaengine: Driver...
998

ca7fe2db8   Tomoya MORINAGA   pch_dma: Support ...
999
  MODULE_DESCRIPTION("Intel EG20T PCH / LAPIS Semicon ML7213/ML7223/ML7831 IOH "
2cdf2455a   Tomoya MORINAGA   pch_dma: support ...
1000
  		   "DMA controller driver");
0c42bd0e4   Yong Wang   dmaengine: Driver...
1001
1002
  MODULE_AUTHOR("Yong Wang <yong.y.wang@intel.com>");
  MODULE_LICENSE("GPL v2");
58ddff20a   Ben Hutchings   pch_dma: Add MODU...
1003
  MODULE_DEVICE_TABLE(pci, pch_dma_id_table);