Blame view

drivers/dma/mv_xor.c 37.1 KB
2025cf9e1   Thomas Gleixner   treewide: Replace...
1
  // SPDX-License-Identifier: GPL-2.0-only
ff7b04796   Saeed Bishara   dmaengine: DMA en...
2
3
4
  /*
   * offload engine driver for the Marvell XOR engine
   * Copyright (C) 2007, 2008, Marvell International Ltd.
ff7b04796   Saeed Bishara   dmaengine: DMA en...
5
6
7
   */
  
  #include <linux/init.h>
5a0e3ad6a   Tejun Heo   include cleanup: ...
8
  #include <linux/slab.h>
ff7b04796   Saeed Bishara   dmaengine: DMA en...
9
10
11
12
  #include <linux/delay.h>
  #include <linux/dma-mapping.h>
  #include <linux/spinlock.h>
  #include <linux/interrupt.h>
6f166312c   Lior Amsalem   dmaengine: mv_xor...
13
  #include <linux/of_device.h>
ff7b04796   Saeed Bishara   dmaengine: DMA en...
14
15
  #include <linux/platform_device.h>
  #include <linux/memory.h>
c510182b1   Andrew Lunn   ARM: Orion: XOR: ...
16
  #include <linux/clk.h>
f7d12ef53   Thomas Petazzoni   dma: mv_xor: add ...
17
18
19
  #include <linux/of.h>
  #include <linux/of_irq.h>
  #include <linux/irqdomain.h>
777572911   Thomas Petazzoni   dmaengine: mv_xor...
20
  #include <linux/cpumask.h>
c02cecb92   Arnd Bergmann   ARM: orion: move ...
21
  #include <linux/platform_data/dma-mv_xor.h>
d2ebfb335   Russell King - ARM Linux   dmaengine: add pr...
22
23
  
  #include "dmaengine.h"
ff7b04796   Saeed Bishara   dmaengine: DMA en...
24
  #include "mv_xor.h"
dd130c652   Gregory CLEMENT   dmaengine: mv_xor...
25
26
27
  enum mv_xor_type {
  	XOR_ORION,
  	XOR_ARMADA_38X,
ac5f0f3f8   Marcin Wojtas   dmaengine: mv_xor...
28
  	XOR_ARMADA_37XX,
dd130c652   Gregory CLEMENT   dmaengine: mv_xor...
29
  };
6f166312c   Lior Amsalem   dmaengine: mv_xor...
30
31
32
33
  enum mv_xor_mode {
  	XOR_MODE_IN_REG,
  	XOR_MODE_IN_DESC,
  };
ff7b04796   Saeed Bishara   dmaengine: DMA en...
34
35
36
  static void mv_xor_issue_pending(struct dma_chan *chan);
  
  #define to_mv_xor_chan(chan)		\
98817b995   Thomas Petazzoni   dma: mv_xor: in m...
37
  	container_of(chan, struct mv_xor_chan, dmachan)
ff7b04796   Saeed Bishara   dmaengine: DMA en...
38
39
40
  
  #define to_mv_xor_slot(tx)		\
  	container_of(tx, struct mv_xor_desc_slot, async_tx)
c98c17813   Thomas Petazzoni   dma: mv_xor: intr...
41
  #define mv_chan_to_devp(chan)           \
1ef48a262   Thomas Petazzoni   dma: mv_xor: merg...
42
  	((chan)->dmadev.dev)
c98c17813   Thomas Petazzoni   dma: mv_xor: intr...
43

dfc97661b   Lior Amsalem   dma: mv_xor: Remo...
44
  static void mv_desc_init(struct mv_xor_desc_slot *desc,
ba87d1372   Lior Amsalem   dma: mv_xor: Redu...
45
46
  			 dma_addr_t addr, u32 byte_count,
  			 enum dma_ctrl_flags flags)
ff7b04796   Saeed Bishara   dmaengine: DMA en...
47
48
  {
  	struct mv_xor_desc *hw_desc = desc->hw_desc;
0e7488ed0   Ezequiel Garcia   dma: mv_xor: Remo...
49
  	hw_desc->status = XOR_DESC_DMA_OWNED;
ff7b04796   Saeed Bishara   dmaengine: DMA en...
50
  	hw_desc->phy_next_desc = 0;
ba87d1372   Lior Amsalem   dma: mv_xor: Redu...
51
52
53
  	/* Enable end-of-descriptor interrupts only for DMA_PREP_INTERRUPT */
  	hw_desc->desc_command = (flags & DMA_PREP_INTERRUPT) ?
  				XOR_DESC_EOD_INT_EN : 0;
dfc97661b   Lior Amsalem   dma: mv_xor: Remo...
54
  	hw_desc->phy_dest_addr = addr;
ff7b04796   Saeed Bishara   dmaengine: DMA en...
55
56
  	hw_desc->byte_count = byte_count;
  }
6f166312c   Lior Amsalem   dmaengine: mv_xor...
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
  static void mv_desc_set_mode(struct mv_xor_desc_slot *desc)
  {
  	struct mv_xor_desc *hw_desc = desc->hw_desc;
  
  	switch (desc->type) {
  	case DMA_XOR:
  	case DMA_INTERRUPT:
  		hw_desc->desc_command |= XOR_DESC_OPERATION_XOR;
  		break;
  	case DMA_MEMCPY:
  		hw_desc->desc_command |= XOR_DESC_OPERATION_MEMCPY;
  		break;
  	default:
  		BUG();
  		return;
  	}
  }
ff7b04796   Saeed Bishara   dmaengine: DMA en...
74
75
76
77
78
79
80
  static void mv_desc_set_next_desc(struct mv_xor_desc_slot *desc,
  				  u32 next_desc_addr)
  {
  	struct mv_xor_desc *hw_desc = desc->hw_desc;
  	BUG_ON(hw_desc->phy_next_desc);
  	hw_desc->phy_next_desc = next_desc_addr;
  }
ff7b04796   Saeed Bishara   dmaengine: DMA en...
81
82
83
84
  static void mv_desc_set_src_addr(struct mv_xor_desc_slot *desc,
  				 int index, dma_addr_t addr)
  {
  	struct mv_xor_desc *hw_desc = desc->hw_desc;
e03bc654f   Thomas Petazzoni   mv_xor: support b...
85
  	hw_desc->phy_src_addr[mv_phy_src_idx(index)] = addr;
ff7b04796   Saeed Bishara   dmaengine: DMA en...
86
87
88
89
90
91
  	if (desc->type == DMA_XOR)
  		hw_desc->desc_command |= (1 << index);
  }
  
  static u32 mv_chan_get_current_desc(struct mv_xor_chan *chan)
  {
5733c38ae   Thomas Petazzoni   mv_xor: use {read...
92
  	return readl_relaxed(XOR_CURR_DESC(chan));
ff7b04796   Saeed Bishara   dmaengine: DMA en...
93
94
95
96
97
  }
  
  static void mv_chan_set_next_descriptor(struct mv_xor_chan *chan,
  					u32 next_desc_addr)
  {
5733c38ae   Thomas Petazzoni   mv_xor: use {read...
98
  	writel_relaxed(next_desc_addr, XOR_NEXT_DESC(chan));
ff7b04796   Saeed Bishara   dmaengine: DMA en...
99
  }
ff7b04796   Saeed Bishara   dmaengine: DMA en...
100
101
  static void mv_chan_unmask_interrupts(struct mv_xor_chan *chan)
  {
5733c38ae   Thomas Petazzoni   mv_xor: use {read...
102
  	u32 val = readl_relaxed(XOR_INTR_MASK(chan));
ff7b04796   Saeed Bishara   dmaengine: DMA en...
103
  	val |= XOR_INTR_MASK_VALUE << (chan->idx * 16);
5733c38ae   Thomas Petazzoni   mv_xor: use {read...
104
  	writel_relaxed(val, XOR_INTR_MASK(chan));
ff7b04796   Saeed Bishara   dmaengine: DMA en...
105
106
107
108
  }
  
  static u32 mv_chan_get_intr_cause(struct mv_xor_chan *chan)
  {
5733c38ae   Thomas Petazzoni   mv_xor: use {read...
109
  	u32 intr_cause = readl_relaxed(XOR_INTR_CAUSE(chan));
ff7b04796   Saeed Bishara   dmaengine: DMA en...
110
111
112
  	intr_cause = (intr_cause >> (chan->idx * 16)) & 0xFFFF;
  	return intr_cause;
  }
0951e728f   Maxime Ripard   dmaengine: mv_xor...
113
  static void mv_chan_clear_eoc_cause(struct mv_xor_chan *chan)
ff7b04796   Saeed Bishara   dmaengine: DMA en...
114
  {
ba87d1372   Lior Amsalem   dma: mv_xor: Redu...
115
116
117
118
  	u32 val;
  
  	val = XOR_INT_END_OF_DESC | XOR_INT_END_OF_CHAIN | XOR_INT_STOPPED;
  	val = ~(val << (chan->idx * 16));
c98c17813   Thomas Petazzoni   dma: mv_xor: intr...
119
120
  	dev_dbg(mv_chan_to_devp(chan), "%s, val 0x%08x
  ", __func__, val);
5733c38ae   Thomas Petazzoni   mv_xor: use {read...
121
  	writel_relaxed(val, XOR_INTR_CAUSE(chan));
ff7b04796   Saeed Bishara   dmaengine: DMA en...
122
  }
0951e728f   Maxime Ripard   dmaengine: mv_xor...
123
  static void mv_chan_clear_err_status(struct mv_xor_chan *chan)
ff7b04796   Saeed Bishara   dmaengine: DMA en...
124
125
  {
  	u32 val = 0xFFFF0000 >> (chan->idx * 16);
5733c38ae   Thomas Petazzoni   mv_xor: use {read...
126
  	writel_relaxed(val, XOR_INTR_CAUSE(chan));
ff7b04796   Saeed Bishara   dmaengine: DMA en...
127
  }
0951e728f   Maxime Ripard   dmaengine: mv_xor...
128
  static void mv_chan_set_mode(struct mv_xor_chan *chan,
81aafb3e0   Thomas Petazzoni   dmaengine: mv_xor...
129
  			     u32 op_mode)
ff7b04796   Saeed Bishara   dmaengine: DMA en...
130
  {
5733c38ae   Thomas Petazzoni   mv_xor: use {read...
131
  	u32 config = readl_relaxed(XOR_CONFIG(chan));
ff7b04796   Saeed Bishara   dmaengine: DMA en...
132

6f166312c   Lior Amsalem   dmaengine: mv_xor...
133
134
  	config &= ~0x7;
  	config |= op_mode;
e03bc654f   Thomas Petazzoni   mv_xor: support b...
135
136
137
138
139
  #if defined(__BIG_ENDIAN)
  	config |= XOR_DESCRIPTOR_SWAP;
  #else
  	config &= ~XOR_DESCRIPTOR_SWAP;
  #endif
5733c38ae   Thomas Petazzoni   mv_xor: use {read...
140
  	writel_relaxed(config, XOR_CONFIG(chan));
ff7b04796   Saeed Bishara   dmaengine: DMA en...
141
142
143
144
  }
  
  static void mv_chan_activate(struct mv_xor_chan *chan)
  {
c98c17813   Thomas Petazzoni   dma: mv_xor: intr...
145
146
  	dev_dbg(mv_chan_to_devp(chan), " activate chan.
  ");
5a9a55bf9   Ezequiel Garcia   dma: mv_xor: Flus...
147
148
149
  
  	/* writel ensures all descriptors are flushed before activation */
  	writel(BIT(0), XOR_ACTIVATION(chan));
ff7b04796   Saeed Bishara   dmaengine: DMA en...
150
151
152
153
  }
  
  static char mv_chan_is_busy(struct mv_xor_chan *chan)
  {
5733c38ae   Thomas Petazzoni   mv_xor: use {read...
154
  	u32 state = readl_relaxed(XOR_ACTIVATION(chan));
ff7b04796   Saeed Bishara   dmaengine: DMA en...
155
156
157
158
159
  
  	state = (state >> 4) & 0x3;
  
  	return (state == 1) ? 1 : 0;
  }
ff7b04796   Saeed Bishara   dmaengine: DMA en...
160
  /*
0951e728f   Maxime Ripard   dmaengine: mv_xor...
161
162
   * mv_chan_start_new_chain - program the engine to operate on new
   * chain headed by sw_desc
ff7b04796   Saeed Bishara   dmaengine: DMA en...
163
164
   * Caller must hold &mv_chan->lock while calling this function
   */
0951e728f   Maxime Ripard   dmaengine: mv_xor...
165
166
  static void mv_chan_start_new_chain(struct mv_xor_chan *mv_chan,
  				    struct mv_xor_desc_slot *sw_desc)
ff7b04796   Saeed Bishara   dmaengine: DMA en...
167
  {
c98c17813   Thomas Petazzoni   dma: mv_xor: intr...
168
169
  	dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: sw_desc %p
  ",
ff7b04796   Saeed Bishara   dmaengine: DMA en...
170
  		__func__, __LINE__, sw_desc);
ff7b04796   Saeed Bishara   dmaengine: DMA en...
171

48a9db462   Bartlomiej Zolnierkiewicz   drivers/dma: remo...
172
173
  	/* set the hardware chain */
  	mv_chan_set_next_descriptor(mv_chan, sw_desc->async_tx.phys);
dfc97661b   Lior Amsalem   dma: mv_xor: Remo...
174
  	mv_chan->pending++;
98817b995   Thomas Petazzoni   dma: mv_xor: in m...
175
  	mv_xor_issue_pending(&mv_chan->dmachan);
ff7b04796   Saeed Bishara   dmaengine: DMA en...
176
177
178
  }
  
  static dma_cookie_t
0951e728f   Maxime Ripard   dmaengine: mv_xor...
179
180
181
  mv_desc_run_tx_complete_actions(struct mv_xor_desc_slot *desc,
  				struct mv_xor_chan *mv_chan,
  				dma_cookie_t cookie)
ff7b04796   Saeed Bishara   dmaengine: DMA en...
182
183
184
185
186
  {
  	BUG_ON(desc->async_tx.cookie < 0);
  
  	if (desc->async_tx.cookie > 0) {
  		cookie = desc->async_tx.cookie;
8058e2580   Dave Jiang   dmaengine: mv_xor...
187
  		dma_descriptor_unmap(&desc->async_tx);
ff7b04796   Saeed Bishara   dmaengine: DMA en...
188
189
190
  		/* call the callback (must not sleep or submit new
  		 * operations to this channel)
  		 */
ee7681a48   Dave Jiang   dmaengine: mv_xor...
191
  		dmaengine_desc_get_callback_invoke(&desc->async_tx, NULL);
ff7b04796   Saeed Bishara   dmaengine: DMA en...
192
193
194
  	}
  
  	/* run dependent operations */
07f2211e4   Dan Williams   dmaengine: remove...
195
  	dma_run_dependencies(&desc->async_tx);
ff7b04796   Saeed Bishara   dmaengine: DMA en...
196
197
198
199
200
  
  	return cookie;
  }
  
  static int
0951e728f   Maxime Ripard   dmaengine: mv_xor...
201
  mv_chan_clean_completed_slots(struct mv_xor_chan *mv_chan)
ff7b04796   Saeed Bishara   dmaengine: DMA en...
202
203
  {
  	struct mv_xor_desc_slot *iter, *_iter;
c98c17813   Thomas Petazzoni   dma: mv_xor: intr...
204
205
  	dev_dbg(mv_chan_to_devp(mv_chan), "%s %d
  ", __func__, __LINE__);
ff7b04796   Saeed Bishara   dmaengine: DMA en...
206
  	list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
fbea28a2a   Lior Amsalem   dmaengine: mv_xor...
207
  				 node) {
ff7b04796   Saeed Bishara   dmaengine: DMA en...
208

c5db858bd   Stefan Roese   dmaengine: mv_xor...
209
  		if (async_tx_test_ack(&iter->async_tx)) {
fbea28a2a   Lior Amsalem   dmaengine: mv_xor...
210
  			list_move_tail(&iter->node, &mv_chan->free_slots);
c5db858bd   Stefan Roese   dmaengine: mv_xor...
211
212
213
214
215
  			if (!list_empty(&iter->sg_tx_list)) {
  				list_splice_tail_init(&iter->sg_tx_list,
  							&mv_chan->free_slots);
  			}
  		}
ff7b04796   Saeed Bishara   dmaengine: DMA en...
216
217
218
219
220
  	}
  	return 0;
  }
  
  static int
0951e728f   Maxime Ripard   dmaengine: mv_xor...
221
222
  mv_desc_clean_slot(struct mv_xor_desc_slot *desc,
  		   struct mv_xor_chan *mv_chan)
ff7b04796   Saeed Bishara   dmaengine: DMA en...
223
  {
c98c17813   Thomas Petazzoni   dma: mv_xor: intr...
224
225
  	dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: desc %p flags %d
  ",
ff7b04796   Saeed Bishara   dmaengine: DMA en...
226
  		__func__, __LINE__, desc, desc->async_tx.flags);
fbea28a2a   Lior Amsalem   dmaengine: mv_xor...
227

ff7b04796   Saeed Bishara   dmaengine: DMA en...
228
229
230
  	/* the client is allowed to attach dependent operations
  	 * until 'ack' is set
  	 */
c5db858bd   Stefan Roese   dmaengine: mv_xor...
231
  	if (!async_tx_test_ack(&desc->async_tx)) {
ff7b04796   Saeed Bishara   dmaengine: DMA en...
232
  		/* move this slot to the completed_slots */
fbea28a2a   Lior Amsalem   dmaengine: mv_xor...
233
  		list_move_tail(&desc->node, &mv_chan->completed_slots);
c5db858bd   Stefan Roese   dmaengine: mv_xor...
234
235
236
237
238
  		if (!list_empty(&desc->sg_tx_list)) {
  			list_splice_tail_init(&desc->sg_tx_list,
  					      &mv_chan->completed_slots);
  		}
  	} else {
fbea28a2a   Lior Amsalem   dmaengine: mv_xor...
239
  		list_move_tail(&desc->node, &mv_chan->free_slots);
c5db858bd   Stefan Roese   dmaengine: mv_xor...
240
241
242
243
244
  		if (!list_empty(&desc->sg_tx_list)) {
  			list_splice_tail_init(&desc->sg_tx_list,
  					      &mv_chan->free_slots);
  		}
  	}
ff7b04796   Saeed Bishara   dmaengine: DMA en...
245

ff7b04796   Saeed Bishara   dmaengine: DMA en...
246
247
  	return 0;
  }
fbeec99ad   Ezequiel Garcia   dma: mv_xor: Rena...
248
  /* This function must be called with the mv_xor_chan spinlock held */
0951e728f   Maxime Ripard   dmaengine: mv_xor...
249
  static void mv_chan_slot_cleanup(struct mv_xor_chan *mv_chan)
ff7b04796   Saeed Bishara   dmaengine: DMA en...
250
251
252
253
254
  {
  	struct mv_xor_desc_slot *iter, *_iter;
  	dma_cookie_t cookie = 0;
  	int busy = mv_chan_is_busy(mv_chan);
  	u32 current_desc = mv_chan_get_current_desc(mv_chan);
9136291f1   Lior Amsalem   dmaengine: mv_xor...
255
256
  	int current_cleaned = 0;
  	struct mv_xor_desc *hw_desc;
ff7b04796   Saeed Bishara   dmaengine: DMA en...
257

c98c17813   Thomas Petazzoni   dma: mv_xor: intr...
258
259
260
261
  	dev_dbg(mv_chan_to_devp(mv_chan), "%s %d
  ", __func__, __LINE__);
  	dev_dbg(mv_chan_to_devp(mv_chan), "current_desc %x
  ", current_desc);
0951e728f   Maxime Ripard   dmaengine: mv_xor...
262
  	mv_chan_clean_completed_slots(mv_chan);
ff7b04796   Saeed Bishara   dmaengine: DMA en...
263
264
265
266
267
268
  
  	/* free completed slots from the chain starting with
  	 * the oldest descriptor
  	 */
  
  	list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
fbea28a2a   Lior Amsalem   dmaengine: mv_xor...
269
  				 node) {
ff7b04796   Saeed Bishara   dmaengine: DMA en...
270

9136291f1   Lior Amsalem   dmaengine: mv_xor...
271
272
273
  		/* clean finished descriptors */
  		hw_desc = iter->hw_desc;
  		if (hw_desc->status & XOR_DESC_SUCCESS) {
0951e728f   Maxime Ripard   dmaengine: mv_xor...
274
275
  			cookie = mv_desc_run_tx_complete_actions(iter, mv_chan,
  								 cookie);
ff7b04796   Saeed Bishara   dmaengine: DMA en...
276

9136291f1   Lior Amsalem   dmaengine: mv_xor...
277
  			/* done processing desc, clean slot */
0951e728f   Maxime Ripard   dmaengine: mv_xor...
278
  			mv_desc_clean_slot(iter, mv_chan);
9136291f1   Lior Amsalem   dmaengine: mv_xor...
279
280
281
282
283
284
285
286
287
  
  			/* break if we did cleaned the current */
  			if (iter->async_tx.phys == current_desc) {
  				current_cleaned = 1;
  				break;
  			}
  		} else {
  			if (iter->async_tx.phys == current_desc) {
  				current_cleaned = 0;
ff7b04796   Saeed Bishara   dmaengine: DMA en...
288
  				break;
9136291f1   Lior Amsalem   dmaengine: mv_xor...
289
  			}
ff7b04796   Saeed Bishara   dmaengine: DMA en...
290
  		}
ff7b04796   Saeed Bishara   dmaengine: DMA en...
291
292
293
  	}
  
  	if ((busy == 0) && !list_empty(&mv_chan->chain)) {
9136291f1   Lior Amsalem   dmaengine: mv_xor...
294
295
296
297
298
299
300
  		if (current_cleaned) {
  			/*
  			 * current descriptor cleaned and removed, run
  			 * from list head
  			 */
  			iter = list_entry(mv_chan->chain.next,
  					  struct mv_xor_desc_slot,
fbea28a2a   Lior Amsalem   dmaengine: mv_xor...
301
  					  node);
0951e728f   Maxime Ripard   dmaengine: mv_xor...
302
  			mv_chan_start_new_chain(mv_chan, iter);
9136291f1   Lior Amsalem   dmaengine: mv_xor...
303
  		} else {
fbea28a2a   Lior Amsalem   dmaengine: mv_xor...
304
  			if (!list_is_last(&iter->node, &mv_chan->chain)) {
9136291f1   Lior Amsalem   dmaengine: mv_xor...
305
306
307
308
  				/*
  				 * descriptors are still waiting after
  				 * current, trigger them
  				 */
fbea28a2a   Lior Amsalem   dmaengine: mv_xor...
309
  				iter = list_entry(iter->node.next,
9136291f1   Lior Amsalem   dmaengine: mv_xor...
310
  						  struct mv_xor_desc_slot,
fbea28a2a   Lior Amsalem   dmaengine: mv_xor...
311
  						  node);
0951e728f   Maxime Ripard   dmaengine: mv_xor...
312
  				mv_chan_start_new_chain(mv_chan, iter);
9136291f1   Lior Amsalem   dmaengine: mv_xor...
313
314
315
316
317
318
319
320
  			} else {
  				/*
  				 * some descriptors are still waiting
  				 * to be cleaned
  				 */
  				tasklet_schedule(&mv_chan->irq_tasklet);
  			}
  		}
ff7b04796   Saeed Bishara   dmaengine: DMA en...
321
322
323
  	}
  
  	if (cookie > 0)
98817b995   Thomas Petazzoni   dma: mv_xor: in m...
324
  		mv_chan->dmachan.completed_cookie = cookie;
ff7b04796   Saeed Bishara   dmaengine: DMA en...
325
  }
34ca9a53e   Allen Pais   dmaengine: mv_xor...
326
  static void mv_xor_tasklet(struct tasklet_struct *t)
ff7b04796   Saeed Bishara   dmaengine: DMA en...
327
  {
34ca9a53e   Allen Pais   dmaengine: mv_xor...
328
  	struct mv_xor_chan *chan = from_tasklet(chan, t, irq_tasklet);
e43147acb   Ezequiel Garcia   dma: mv_xor: Remo...
329

cbc229a47   Barry Song   dmaengine: mv_xor...
330
  	spin_lock(&chan->lock);
0951e728f   Maxime Ripard   dmaengine: mv_xor...
331
  	mv_chan_slot_cleanup(chan);
cbc229a47   Barry Song   dmaengine: mv_xor...
332
  	spin_unlock(&chan->lock);
ff7b04796   Saeed Bishara   dmaengine: DMA en...
333
334
335
  }
  
  static struct mv_xor_desc_slot *
0951e728f   Maxime Ripard   dmaengine: mv_xor...
336
  mv_chan_alloc_slot(struct mv_xor_chan *mv_chan)
ff7b04796   Saeed Bishara   dmaengine: DMA en...
337
  {
fbea28a2a   Lior Amsalem   dmaengine: mv_xor...
338
  	struct mv_xor_desc_slot *iter;
ff7b04796   Saeed Bishara   dmaengine: DMA en...
339

fbea28a2a   Lior Amsalem   dmaengine: mv_xor...
340
341
342
343
344
345
346
347
348
349
  	spin_lock_bh(&mv_chan->lock);
  
  	if (!list_empty(&mv_chan->free_slots)) {
  		iter = list_first_entry(&mv_chan->free_slots,
  					struct mv_xor_desc_slot,
  					node);
  
  		list_move_tail(&iter->node, &mv_chan->allocated_slots);
  
  		spin_unlock_bh(&mv_chan->lock);
ff7b04796   Saeed Bishara   dmaengine: DMA en...
350

dfc97661b   Lior Amsalem   dma: mv_xor: Remo...
351
352
  		/* pre-ack descriptor */
  		async_tx_ack(&iter->async_tx);
dfc97661b   Lior Amsalem   dma: mv_xor: Remo...
353
  		iter->async_tx.cookie = -EBUSY;
dfc97661b   Lior Amsalem   dma: mv_xor: Remo...
354
355
  
  		return iter;
ff7b04796   Saeed Bishara   dmaengine: DMA en...
356
  	}
fbea28a2a   Lior Amsalem   dmaengine: mv_xor...
357
358
  
  	spin_unlock_bh(&mv_chan->lock);
ff7b04796   Saeed Bishara   dmaengine: DMA en...
359
360
361
362
363
364
  
  	/* try to free some slots if the allocation fails */
  	tasklet_schedule(&mv_chan->irq_tasklet);
  
  	return NULL;
  }
ff7b04796   Saeed Bishara   dmaengine: DMA en...
365
366
367
368
369
370
  /************************ DMA engine API functions ****************************/
  static dma_cookie_t
  mv_xor_tx_submit(struct dma_async_tx_descriptor *tx)
  {
  	struct mv_xor_desc_slot *sw_desc = to_mv_xor_slot(tx);
  	struct mv_xor_chan *mv_chan = to_mv_xor_chan(tx->chan);
dfc97661b   Lior Amsalem   dma: mv_xor: Remo...
371
  	struct mv_xor_desc_slot *old_chain_tail;
ff7b04796   Saeed Bishara   dmaengine: DMA en...
372
373
  	dma_cookie_t cookie;
  	int new_hw_chain = 1;
c98c17813   Thomas Petazzoni   dma: mv_xor: intr...
374
  	dev_dbg(mv_chan_to_devp(mv_chan),
ff7b04796   Saeed Bishara   dmaengine: DMA en...
375
376
377
  		"%s sw_desc %p: async_tx %p
  ",
  		__func__, sw_desc, &sw_desc->async_tx);
ff7b04796   Saeed Bishara   dmaengine: DMA en...
378
  	spin_lock_bh(&mv_chan->lock);
884485e1f   Russell King - ARM Linux   dmaengine: consol...
379
  	cookie = dma_cookie_assign(tx);
ff7b04796   Saeed Bishara   dmaengine: DMA en...
380
381
  
  	if (list_empty(&mv_chan->chain))
fbea28a2a   Lior Amsalem   dmaengine: mv_xor...
382
  		list_move_tail(&sw_desc->node, &mv_chan->chain);
ff7b04796   Saeed Bishara   dmaengine: DMA en...
383
384
385
386
387
  	else {
  		new_hw_chain = 0;
  
  		old_chain_tail = list_entry(mv_chan->chain.prev,
  					    struct mv_xor_desc_slot,
fbea28a2a   Lior Amsalem   dmaengine: mv_xor...
388
389
  					    node);
  		list_move_tail(&sw_desc->node, &mv_chan->chain);
ff7b04796   Saeed Bishara   dmaengine: DMA en...
390

31fd8f5b8   Olof Johansson   dma: mv_xor: Sile...
391
392
393
  		dev_dbg(mv_chan_to_devp(mv_chan), "Append to last desc %pa
  ",
  			&old_chain_tail->async_tx.phys);
ff7b04796   Saeed Bishara   dmaengine: DMA en...
394
395
  
  		/* fix up the hardware chain */
dfc97661b   Lior Amsalem   dma: mv_xor: Remo...
396
  		mv_desc_set_next_desc(old_chain_tail, sw_desc->async_tx.phys);
ff7b04796   Saeed Bishara   dmaengine: DMA en...
397
398
399
400
401
402
403
404
405
406
407
408
409
410
  
  		/* if the channel is not busy */
  		if (!mv_chan_is_busy(mv_chan)) {
  			u32 current_desc = mv_chan_get_current_desc(mv_chan);
  			/*
  			 * and the curren desc is the end of the chain before
  			 * the append, then we need to start the channel
  			 */
  			if (current_desc == old_chain_tail->async_tx.phys)
  				new_hw_chain = 1;
  		}
  	}
  
  	if (new_hw_chain)
0951e728f   Maxime Ripard   dmaengine: mv_xor...
411
  		mv_chan_start_new_chain(mv_chan, sw_desc);
ff7b04796   Saeed Bishara   dmaengine: DMA en...
412

ff7b04796   Saeed Bishara   dmaengine: DMA en...
413
414
415
416
417
418
  	spin_unlock_bh(&mv_chan->lock);
  
  	return cookie;
  }
  
  /* returns the number of allocated descriptors */
aa1e6f1a3   Dan Williams   dmaengine: kill s...
419
  static int mv_xor_alloc_chan_resources(struct dma_chan *chan)
ff7b04796   Saeed Bishara   dmaengine: DMA en...
420
  {
31fd8f5b8   Olof Johansson   dma: mv_xor: Sile...
421
422
  	void *virt_desc;
  	dma_addr_t dma_desc;
ff7b04796   Saeed Bishara   dmaengine: DMA en...
423
424
425
  	int idx;
  	struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
  	struct mv_xor_desc_slot *slot = NULL;
b503fa019   Thomas Petazzoni   dma: mv_xor: remo...
426
  	int num_descs_in_pool = MV_XOR_POOL_SIZE/MV_XOR_SLOT_SIZE;
ff7b04796   Saeed Bishara   dmaengine: DMA en...
427
428
429
430
431
432
  
  	/* Allocate descriptor slots */
  	idx = mv_chan->slots_allocated;
  	while (idx < num_descs_in_pool) {
  		slot = kzalloc(sizeof(*slot), GFP_KERNEL);
  		if (!slot) {
b8291ddee   Ezequiel Garcia   dma: mv_xor: Repl...
433
434
435
  			dev_info(mv_chan_to_devp(mv_chan),
  				 "channel only initialized %d descriptor slots",
  				 idx);
ff7b04796   Saeed Bishara   dmaengine: DMA en...
436
437
  			break;
  		}
31fd8f5b8   Olof Johansson   dma: mv_xor: Sile...
438
439
  		virt_desc = mv_chan->dma_desc_pool_virt;
  		slot->hw_desc = virt_desc + idx * MV_XOR_SLOT_SIZE;
ff7b04796   Saeed Bishara   dmaengine: DMA en...
440
441
442
  
  		dma_async_tx_descriptor_init(&slot->async_tx, chan);
  		slot->async_tx.tx_submit = mv_xor_tx_submit;
fbea28a2a   Lior Amsalem   dmaengine: mv_xor...
443
  		INIT_LIST_HEAD(&slot->node);
c5db858bd   Stefan Roese   dmaengine: mv_xor...
444
  		INIT_LIST_HEAD(&slot->sg_tx_list);
31fd8f5b8   Olof Johansson   dma: mv_xor: Sile...
445
446
  		dma_desc = mv_chan->dma_desc_pool;
  		slot->async_tx.phys = dma_desc + idx * MV_XOR_SLOT_SIZE;
ff7b04796   Saeed Bishara   dmaengine: DMA en...
447
448
449
450
  		slot->idx = idx++;
  
  		spin_lock_bh(&mv_chan->lock);
  		mv_chan->slots_allocated = idx;
fbea28a2a   Lior Amsalem   dmaengine: mv_xor...
451
  		list_add_tail(&slot->node, &mv_chan->free_slots);
ff7b04796   Saeed Bishara   dmaengine: DMA en...
452
453
  		spin_unlock_bh(&mv_chan->lock);
  	}
c98c17813   Thomas Petazzoni   dma: mv_xor: intr...
454
  	dev_dbg(mv_chan_to_devp(mv_chan),
fbea28a2a   Lior Amsalem   dmaengine: mv_xor...
455
456
457
  		"allocated %d descriptor slots
  ",
  		mv_chan->slots_allocated);
ff7b04796   Saeed Bishara   dmaengine: DMA en...
458
459
460
  
  	return mv_chan->slots_allocated ? : -ENOMEM;
  }
77ff7a706   Stefan Roese   dmaengine: mv_xor...
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
  /*
   * Check if source or destination is an PCIe/IO address (non-SDRAM) and add
   * a new MBus window if necessary. Use a cache for these check so that
   * the MMIO mapped registers don't have to be accessed for this check
   * to speed up this process.
   */
  static int mv_xor_add_io_win(struct mv_xor_chan *mv_chan, u32 addr)
  {
  	struct mv_xor_device *xordev = mv_chan->xordev;
  	void __iomem *base = mv_chan->mmr_high_base;
  	u32 win_enable;
  	u32 size;
  	u8 target, attr;
  	int ret;
  	int i;
  
  	/* Nothing needs to get done for the Armada 3700 */
  	if (xordev->xor_type == XOR_ARMADA_37XX)
  		return 0;
  
  	/*
  	 * Loop over the cached windows to check, if the requested area
  	 * is already mapped. If this the case, nothing needs to be done
  	 * and we can return.
  	 */
  	for (i = 0; i < WINDOW_COUNT; i++) {
  		if (addr >= xordev->win_start[i] &&
  		    addr <= xordev->win_end[i]) {
  			/* Window is already mapped */
  			return 0;
  		}
  	}
  
  	/*
  	 * The window is not mapped, so we need to create the new mapping
  	 */
  
  	/* If no IO window is found that addr has to be located in SDRAM */
  	ret = mvebu_mbus_get_io_win_info(addr, &size, &target, &attr);
  	if (ret < 0)
  		return 0;
  
  	/*
  	 * Mask the base addr 'addr' according to 'size' read back from the
  	 * MBus window. Otherwise we might end up with an address located
  	 * somewhere in the middle of this area here.
  	 */
  	size -= 1;
  	addr &= ~size;
  
  	/*
  	 * Reading one of both enabled register is enough, as they are always
  	 * programmed to the identical values
  	 */
  	win_enable = readl(base + WINDOW_BAR_ENABLE(0));
  
  	/* Set 'i' to the first free window to write the new values to */
  	i = ffs(~win_enable) - 1;
  	if (i >= WINDOW_COUNT)
  		return -ENOMEM;
  
  	writel((addr & 0xffff0000) | (attr << 8) | target,
  	       base + WINDOW_BASE(i));
  	writel(size & 0xffff0000, base + WINDOW_SIZE(i));
  
  	/* Fill the caching variables for later use */
  	xordev->win_start[i] = addr;
  	xordev->win_end[i] = addr + size;
  
  	win_enable |= (1 << i);
  	win_enable |= 3 << (16 + (2 * i));
  	writel(win_enable, base + WINDOW_BAR_ENABLE(0));
  	writel(win_enable, base + WINDOW_BAR_ENABLE(1));
  
  	return 0;
  }
ff7b04796   Saeed Bishara   dmaengine: DMA en...
537
  static struct dma_async_tx_descriptor *
ff7b04796   Saeed Bishara   dmaengine: DMA en...
538
539
540
541
  mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
  		    unsigned int src_cnt, size_t len, unsigned long flags)
  {
  	struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
dfc97661b   Lior Amsalem   dma: mv_xor: Remo...
542
  	struct mv_xor_desc_slot *sw_desc;
77ff7a706   Stefan Roese   dmaengine: mv_xor...
543
  	int ret;
ff7b04796   Saeed Bishara   dmaengine: DMA en...
544
545
546
  
  	if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
  		return NULL;
7912d3000   Coly Li   dma: use BUG_ON c...
547
  	BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
ff7b04796   Saeed Bishara   dmaengine: DMA en...
548

c98c17813   Thomas Petazzoni   dma: mv_xor: intr...
549
  	dev_dbg(mv_chan_to_devp(mv_chan),
bc822e125   Gregory CLEMENT   dmaengine: mv_xor...
550
551
  		"%s src_cnt: %d len: %zu dest %pad flags: %ld
  ",
31fd8f5b8   Olof Johansson   dma: mv_xor: Sile...
552
  		__func__, src_cnt, len, &dest, flags);
ff7b04796   Saeed Bishara   dmaengine: DMA en...
553

77ff7a706   Stefan Roese   dmaengine: mv_xor...
554
555
556
557
  	/* Check if a new window needs to get added for 'dest' */
  	ret = mv_xor_add_io_win(mv_chan, dest);
  	if (ret)
  		return NULL;
0951e728f   Maxime Ripard   dmaengine: mv_xor...
558
  	sw_desc = mv_chan_alloc_slot(mv_chan);
ff7b04796   Saeed Bishara   dmaengine: DMA en...
559
560
561
  	if (sw_desc) {
  		sw_desc->type = DMA_XOR;
  		sw_desc->async_tx.flags = flags;
ba87d1372   Lior Amsalem   dma: mv_xor: Redu...
562
  		mv_desc_init(sw_desc, dest, len, flags);
6f166312c   Lior Amsalem   dmaengine: mv_xor...
563
564
  		if (mv_chan->op_in_desc == XOR_MODE_IN_DESC)
  			mv_desc_set_mode(sw_desc);
77ff7a706   Stefan Roese   dmaengine: mv_xor...
565
566
567
568
569
  		while (src_cnt--) {
  			/* Check if a new window needs to get added for 'src' */
  			ret = mv_xor_add_io_win(mv_chan, src[src_cnt]);
  			if (ret)
  				return NULL;
dfc97661b   Lior Amsalem   dma: mv_xor: Remo...
570
  			mv_desc_set_src_addr(sw_desc, src_cnt, src[src_cnt]);
77ff7a706   Stefan Roese   dmaengine: mv_xor...
571
  		}
ff7b04796   Saeed Bishara   dmaengine: DMA en...
572
  	}
fbea28a2a   Lior Amsalem   dmaengine: mv_xor...
573

c98c17813   Thomas Petazzoni   dma: mv_xor: intr...
574
  	dev_dbg(mv_chan_to_devp(mv_chan),
ff7b04796   Saeed Bishara   dmaengine: DMA en...
575
576
577
578
579
  		"%s sw_desc %p async_tx %p 
  ",
  		__func__, sw_desc, &sw_desc->async_tx);
  	return sw_desc ? &sw_desc->async_tx : NULL;
  }
3e4f52e2d   Lior Amsalem   dma: mv_xor: Simp...
580
581
582
583
584
585
586
587
588
589
  static struct dma_async_tx_descriptor *
  mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
  		size_t len, unsigned long flags)
  {
  	/*
  	 * A MEMCPY operation is identical to an XOR operation with only
  	 * a single source address.
  	 */
  	return mv_xor_prep_dma_xor(chan, dest, &src, 1, len, flags);
  }
22843545b   Lior Amsalem   dma: mv_xor: Add ...
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
  static struct dma_async_tx_descriptor *
  mv_xor_prep_dma_interrupt(struct dma_chan *chan, unsigned long flags)
  {
  	struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
  	dma_addr_t src, dest;
  	size_t len;
  
  	src = mv_chan->dummy_src_addr;
  	dest = mv_chan->dummy_dst_addr;
  	len = MV_XOR_MIN_BYTE_COUNT;
  
  	/*
  	 * We implement the DMA_INTERRUPT operation as a minimum sized
  	 * XOR operation with a single dummy source address.
  	 */
  	return mv_xor_prep_dma_xor(chan, dest, &src, 1, len, flags);
  }
ff7b04796   Saeed Bishara   dmaengine: DMA en...
607
608
609
610
611
  static void mv_xor_free_chan_resources(struct dma_chan *chan)
  {
  	struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
  	struct mv_xor_desc_slot *iter, *_iter;
  	int in_use_descs = 0;
ff7b04796   Saeed Bishara   dmaengine: DMA en...
612
  	spin_lock_bh(&mv_chan->lock);
e43147acb   Ezequiel Garcia   dma: mv_xor: Remo...
613

0951e728f   Maxime Ripard   dmaengine: mv_xor...
614
  	mv_chan_slot_cleanup(mv_chan);
ff7b04796   Saeed Bishara   dmaengine: DMA en...
615

ff7b04796   Saeed Bishara   dmaengine: DMA en...
616
  	list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
fbea28a2a   Lior Amsalem   dmaengine: mv_xor...
617
  					node) {
ff7b04796   Saeed Bishara   dmaengine: DMA en...
618
  		in_use_descs++;
fbea28a2a   Lior Amsalem   dmaengine: mv_xor...
619
  		list_move_tail(&iter->node, &mv_chan->free_slots);
ff7b04796   Saeed Bishara   dmaengine: DMA en...
620
621
  	}
  	list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
fbea28a2a   Lior Amsalem   dmaengine: mv_xor...
622
623
624
625
626
627
  				 node) {
  		in_use_descs++;
  		list_move_tail(&iter->node, &mv_chan->free_slots);
  	}
  	list_for_each_entry_safe(iter, _iter, &mv_chan->allocated_slots,
  				 node) {
ff7b04796   Saeed Bishara   dmaengine: DMA en...
628
  		in_use_descs++;
fbea28a2a   Lior Amsalem   dmaengine: mv_xor...
629
  		list_move_tail(&iter->node, &mv_chan->free_slots);
ff7b04796   Saeed Bishara   dmaengine: DMA en...
630
631
  	}
  	list_for_each_entry_safe_reverse(
fbea28a2a   Lior Amsalem   dmaengine: mv_xor...
632
633
  		iter, _iter, &mv_chan->free_slots, node) {
  		list_del(&iter->node);
ff7b04796   Saeed Bishara   dmaengine: DMA en...
634
635
636
  		kfree(iter);
  		mv_chan->slots_allocated--;
  	}
ff7b04796   Saeed Bishara   dmaengine: DMA en...
637

c98c17813   Thomas Petazzoni   dma: mv_xor: intr...
638
639
  	dev_dbg(mv_chan_to_devp(mv_chan), "%s slots_allocated %d
  ",
ff7b04796   Saeed Bishara   dmaengine: DMA en...
640
641
642
643
  		__func__, mv_chan->slots_allocated);
  	spin_unlock_bh(&mv_chan->lock);
  
  	if (in_use_descs)
c98c17813   Thomas Petazzoni   dma: mv_xor: intr...
644
  		dev_err(mv_chan_to_devp(mv_chan),
ff7b04796   Saeed Bishara   dmaengine: DMA en...
645
646
647
648
649
  			"freeing %d in use descriptors!
  ", in_use_descs);
  }
  
  /**
079344818   Linus Walleij   DMAENGINE: generi...
650
   * mv_xor_status - poll the status of an XOR transaction
ff7b04796   Saeed Bishara   dmaengine: DMA en...
651
652
   * @chan: XOR channel handle
   * @cookie: XOR transaction identifier
079344818   Linus Walleij   DMAENGINE: generi...
653
   * @txstate: XOR transactions state holder (or NULL)
ff7b04796   Saeed Bishara   dmaengine: DMA en...
654
   */
079344818   Linus Walleij   DMAENGINE: generi...
655
  static enum dma_status mv_xor_status(struct dma_chan *chan,
ff7b04796   Saeed Bishara   dmaengine: DMA en...
656
  					  dma_cookie_t cookie,
079344818   Linus Walleij   DMAENGINE: generi...
657
  					  struct dma_tx_state *txstate)
ff7b04796   Saeed Bishara   dmaengine: DMA en...
658
659
  {
  	struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
ff7b04796   Saeed Bishara   dmaengine: DMA en...
660
  	enum dma_status ret;
96a2af41c   Russell King - ARM Linux   dmaengine: consol...
661
  	ret = dma_cookie_status(chan, cookie, txstate);
890766d27   Ezequiel Garcia   dma: mv_xor: Remo...
662
  	if (ret == DMA_COMPLETE)
ff7b04796   Saeed Bishara   dmaengine: DMA en...
663
  		return ret;
e43147acb   Ezequiel Garcia   dma: mv_xor: Remo...
664
665
  
  	spin_lock_bh(&mv_chan->lock);
0951e728f   Maxime Ripard   dmaengine: mv_xor...
666
  	mv_chan_slot_cleanup(mv_chan);
e43147acb   Ezequiel Garcia   dma: mv_xor: Remo...
667
  	spin_unlock_bh(&mv_chan->lock);
ff7b04796   Saeed Bishara   dmaengine: DMA en...
668

96a2af41c   Russell King - ARM Linux   dmaengine: consol...
669
  	return dma_cookie_status(chan, cookie, txstate);
ff7b04796   Saeed Bishara   dmaengine: DMA en...
670
  }
0951e728f   Maxime Ripard   dmaengine: mv_xor...
671
  static void mv_chan_dump_regs(struct mv_xor_chan *chan)
ff7b04796   Saeed Bishara   dmaengine: DMA en...
672
673
  {
  	u32 val;
5733c38ae   Thomas Petazzoni   mv_xor: use {read...
674
  	val = readl_relaxed(XOR_CONFIG(chan));
1ba151cdf   Joe Perches   dma: Convert dev_...
675
676
  	dev_err(mv_chan_to_devp(chan), "config       0x%08x
  ", val);
ff7b04796   Saeed Bishara   dmaengine: DMA en...
677

5733c38ae   Thomas Petazzoni   mv_xor: use {read...
678
  	val = readl_relaxed(XOR_ACTIVATION(chan));
1ba151cdf   Joe Perches   dma: Convert dev_...
679
680
  	dev_err(mv_chan_to_devp(chan), "activation   0x%08x
  ", val);
ff7b04796   Saeed Bishara   dmaengine: DMA en...
681

5733c38ae   Thomas Petazzoni   mv_xor: use {read...
682
  	val = readl_relaxed(XOR_INTR_CAUSE(chan));
1ba151cdf   Joe Perches   dma: Convert dev_...
683
684
  	dev_err(mv_chan_to_devp(chan), "intr cause   0x%08x
  ", val);
ff7b04796   Saeed Bishara   dmaengine: DMA en...
685

5733c38ae   Thomas Petazzoni   mv_xor: use {read...
686
  	val = readl_relaxed(XOR_INTR_MASK(chan));
1ba151cdf   Joe Perches   dma: Convert dev_...
687
688
  	dev_err(mv_chan_to_devp(chan), "intr mask    0x%08x
  ", val);
ff7b04796   Saeed Bishara   dmaengine: DMA en...
689

5733c38ae   Thomas Petazzoni   mv_xor: use {read...
690
  	val = readl_relaxed(XOR_ERROR_CAUSE(chan));
1ba151cdf   Joe Perches   dma: Convert dev_...
691
692
  	dev_err(mv_chan_to_devp(chan), "error cause  0x%08x
  ", val);
ff7b04796   Saeed Bishara   dmaengine: DMA en...
693

5733c38ae   Thomas Petazzoni   mv_xor: use {read...
694
  	val = readl_relaxed(XOR_ERROR_ADDR(chan));
1ba151cdf   Joe Perches   dma: Convert dev_...
695
696
  	dev_err(mv_chan_to_devp(chan), "error addr   0x%08x
  ", val);
ff7b04796   Saeed Bishara   dmaengine: DMA en...
697
  }
0951e728f   Maxime Ripard   dmaengine: mv_xor...
698
699
  static void mv_chan_err_interrupt_handler(struct mv_xor_chan *chan,
  					  u32 intr_cause)
ff7b04796   Saeed Bishara   dmaengine: DMA en...
700
  {
0e7488ed0   Ezequiel Garcia   dma: mv_xor: Remo...
701
702
703
704
  	if (intr_cause & XOR_INT_ERR_DECODE) {
  		dev_dbg(mv_chan_to_devp(chan), "ignoring address decode error
  ");
  		return;
ff7b04796   Saeed Bishara   dmaengine: DMA en...
705
  	}
0e7488ed0   Ezequiel Garcia   dma: mv_xor: Remo...
706
707
  	dev_err(mv_chan_to_devp(chan), "error on chan %d. intr cause 0x%08x
  ",
a3fc74bc9   Thomas Petazzoni   dma: mv_xor: use ...
708
  		chan->idx, intr_cause);
ff7b04796   Saeed Bishara   dmaengine: DMA en...
709

0951e728f   Maxime Ripard   dmaengine: mv_xor...
710
  	mv_chan_dump_regs(chan);
0e7488ed0   Ezequiel Garcia   dma: mv_xor: Remo...
711
  	WARN_ON(1);
ff7b04796   Saeed Bishara   dmaengine: DMA en...
712
713
714
715
716
717
  }
  
  static irqreturn_t mv_xor_interrupt_handler(int irq, void *data)
  {
  	struct mv_xor_chan *chan = data;
  	u32 intr_cause = mv_chan_get_intr_cause(chan);
c98c17813   Thomas Petazzoni   dma: mv_xor: intr...
718
719
  	dev_dbg(mv_chan_to_devp(chan), "intr cause %x
  ", intr_cause);
ff7b04796   Saeed Bishara   dmaengine: DMA en...
720

0e7488ed0   Ezequiel Garcia   dma: mv_xor: Remo...
721
  	if (intr_cause & XOR_INTR_ERRORS)
0951e728f   Maxime Ripard   dmaengine: mv_xor...
722
  		mv_chan_err_interrupt_handler(chan, intr_cause);
ff7b04796   Saeed Bishara   dmaengine: DMA en...
723
724
  
  	tasklet_schedule(&chan->irq_tasklet);
0951e728f   Maxime Ripard   dmaengine: mv_xor...
725
  	mv_chan_clear_eoc_cause(chan);
ff7b04796   Saeed Bishara   dmaengine: DMA en...
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
  
  	return IRQ_HANDLED;
  }
  
  static void mv_xor_issue_pending(struct dma_chan *chan)
  {
  	struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
  
  	if (mv_chan->pending >= MV_XOR_THRESHOLD) {
  		mv_chan->pending = 0;
  		mv_chan_activate(mv_chan);
  	}
  }
  
  /*
   * Perform a transaction to verify the HW works.
   */
ff7b04796   Saeed Bishara   dmaengine: DMA en...
743

0951e728f   Maxime Ripard   dmaengine: mv_xor...
744
  static int mv_chan_memcpy_self_test(struct mv_xor_chan *mv_chan)
ff7b04796   Saeed Bishara   dmaengine: DMA en...
745
  {
b8c01d259   Ezequiel Garcia   dma: mv_xor: Add ...
746
  	int i, ret;
ff7b04796   Saeed Bishara   dmaengine: DMA en...
747
748
749
750
751
  	void *src, *dest;
  	dma_addr_t src_dma, dest_dma;
  	struct dma_chan *dma_chan;
  	dma_cookie_t cookie;
  	struct dma_async_tx_descriptor *tx;
d16695a75   Ezequiel Garcia   dma: mv_xor: Use ...
752
  	struct dmaengine_unmap_data *unmap;
ff7b04796   Saeed Bishara   dmaengine: DMA en...
753
  	int err = 0;
ff7b04796   Saeed Bishara   dmaengine: DMA en...
754

6da2ec560   Kees Cook   treewide: kmalloc...
755
  	src = kmalloc(PAGE_SIZE, GFP_KERNEL);
ff7b04796   Saeed Bishara   dmaengine: DMA en...
756
757
  	if (!src)
  		return -ENOMEM;
6396bb221   Kees Cook   treewide: kzalloc...
758
  	dest = kzalloc(PAGE_SIZE, GFP_KERNEL);
ff7b04796   Saeed Bishara   dmaengine: DMA en...
759
760
761
762
763
764
  	if (!dest) {
  		kfree(src);
  		return -ENOMEM;
  	}
  
  	/* Fill in src buffer */
d16695a75   Ezequiel Garcia   dma: mv_xor: Use ...
765
  	for (i = 0; i < PAGE_SIZE; i++)
ff7b04796   Saeed Bishara   dmaengine: DMA en...
766
  		((u8 *) src)[i] = (u8)i;
275cc0c8b   Thomas Petazzoni   dma: mv_xor: use ...
767
  	dma_chan = &mv_chan->dmachan;
aa1e6f1a3   Dan Williams   dmaengine: kill s...
768
  	if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
ff7b04796   Saeed Bishara   dmaengine: DMA en...
769
770
771
  		err = -ENODEV;
  		goto out;
  	}
d16695a75   Ezequiel Garcia   dma: mv_xor: Use ...
772
773
774
775
776
  	unmap = dmaengine_get_unmap_data(dma_chan->device->dev, 2, GFP_KERNEL);
  	if (!unmap) {
  		err = -ENOMEM;
  		goto free_resources;
  	}
515646358   Stefan Roese   dmaengine: mv_xor...
777
  	src_dma = dma_map_page(dma_chan->device->dev, virt_to_page(src),
b70e52cac   Geliang Tang   dmaengine: mv_xor...
778
  			       offset_in_page(src), PAGE_SIZE,
515646358   Stefan Roese   dmaengine: mv_xor...
779
  			       DMA_TO_DEVICE);
d16695a75   Ezequiel Garcia   dma: mv_xor: Use ...
780
  	unmap->addr[0] = src_dma;
ff7b04796   Saeed Bishara   dmaengine: DMA en...
781

b8c01d259   Ezequiel Garcia   dma: mv_xor: Add ...
782
783
784
785
786
787
  	ret = dma_mapping_error(dma_chan->device->dev, src_dma);
  	if (ret) {
  		err = -ENOMEM;
  		goto free_resources;
  	}
  	unmap->to_cnt = 1;
515646358   Stefan Roese   dmaengine: mv_xor...
788
  	dest_dma = dma_map_page(dma_chan->device->dev, virt_to_page(dest),
b70e52cac   Geliang Tang   dmaengine: mv_xor...
789
  				offset_in_page(dest), PAGE_SIZE,
515646358   Stefan Roese   dmaengine: mv_xor...
790
  				DMA_FROM_DEVICE);
d16695a75   Ezequiel Garcia   dma: mv_xor: Use ...
791
  	unmap->addr[1] = dest_dma;
b8c01d259   Ezequiel Garcia   dma: mv_xor: Add ...
792
793
794
795
796
797
  	ret = dma_mapping_error(dma_chan->device->dev, dest_dma);
  	if (ret) {
  		err = -ENOMEM;
  		goto free_resources;
  	}
  	unmap->from_cnt = 1;
d16695a75   Ezequiel Garcia   dma: mv_xor: Use ...
798
  	unmap->len = PAGE_SIZE;
ff7b04796   Saeed Bishara   dmaengine: DMA en...
799
800
  
  	tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma,
d16695a75   Ezequiel Garcia   dma: mv_xor: Use ...
801
  				    PAGE_SIZE, 0);
b8c01d259   Ezequiel Garcia   dma: mv_xor: Add ...
802
803
804
805
806
807
808
  	if (!tx) {
  		dev_err(dma_chan->device->dev,
  			"Self-test cannot prepare operation, disabling
  ");
  		err = -ENODEV;
  		goto free_resources;
  	}
ff7b04796   Saeed Bishara   dmaengine: DMA en...
809
  	cookie = mv_xor_tx_submit(tx);
b8c01d259   Ezequiel Garcia   dma: mv_xor: Add ...
810
811
812
813
814
815
816
  	if (dma_submit_error(cookie)) {
  		dev_err(dma_chan->device->dev,
  			"Self-test submit error, disabling
  ");
  		err = -ENODEV;
  		goto free_resources;
  	}
ff7b04796   Saeed Bishara   dmaengine: DMA en...
817
818
819
  	mv_xor_issue_pending(dma_chan);
  	async_tx_ack(tx);
  	msleep(1);
079344818   Linus Walleij   DMAENGINE: generi...
820
  	if (mv_xor_status(dma_chan, cookie, NULL) !=
b3efb8fc9   Vinod Koul   dmaengine: mv_xor...
821
  	    DMA_COMPLETE) {
a3fc74bc9   Thomas Petazzoni   dma: mv_xor: use ...
822
823
824
  		dev_err(dma_chan->device->dev,
  			"Self-test copy timed out, disabling
  ");
ff7b04796   Saeed Bishara   dmaengine: DMA en...
825
826
827
  		err = -ENODEV;
  		goto free_resources;
  	}
c35064c4b   Thomas Petazzoni   dma: mv_xor: simp...
828
  	dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma,
d16695a75   Ezequiel Garcia   dma: mv_xor: Use ...
829
830
  				PAGE_SIZE, DMA_FROM_DEVICE);
  	if (memcmp(src, dest, PAGE_SIZE)) {
a3fc74bc9   Thomas Petazzoni   dma: mv_xor: use ...
831
832
833
  		dev_err(dma_chan->device->dev,
  			"Self-test copy failed compare, disabling
  ");
ff7b04796   Saeed Bishara   dmaengine: DMA en...
834
835
836
837
838
  		err = -ENODEV;
  		goto free_resources;
  	}
  
  free_resources:
d16695a75   Ezequiel Garcia   dma: mv_xor: Use ...
839
  	dmaengine_unmap_put(unmap);
ff7b04796   Saeed Bishara   dmaengine: DMA en...
840
841
842
843
844
845
846
847
  	mv_xor_free_chan_resources(dma_chan);
  out:
  	kfree(src);
  	kfree(dest);
  	return err;
  }
  
  #define MV_XOR_NUM_SRC_TEST 4 /* must be <= 15 */
463a1f8b3   Bill Pemberton   dma: remove use o...
848
  static int
0951e728f   Maxime Ripard   dmaengine: mv_xor...
849
  mv_chan_xor_self_test(struct mv_xor_chan *mv_chan)
ff7b04796   Saeed Bishara   dmaengine: DMA en...
850
  {
b8c01d259   Ezequiel Garcia   dma: mv_xor: Add ...
851
  	int i, src_idx, ret;
ff7b04796   Saeed Bishara   dmaengine: DMA en...
852
853
854
855
856
  	struct page *dest;
  	struct page *xor_srcs[MV_XOR_NUM_SRC_TEST];
  	dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST];
  	dma_addr_t dest_dma;
  	struct dma_async_tx_descriptor *tx;
d16695a75   Ezequiel Garcia   dma: mv_xor: Use ...
857
  	struct dmaengine_unmap_data *unmap;
ff7b04796   Saeed Bishara   dmaengine: DMA en...
858
859
860
861
862
  	struct dma_chan *dma_chan;
  	dma_cookie_t cookie;
  	u8 cmp_byte = 0;
  	u32 cmp_word;
  	int err = 0;
d16695a75   Ezequiel Garcia   dma: mv_xor: Use ...
863
  	int src_count = MV_XOR_NUM_SRC_TEST;
ff7b04796   Saeed Bishara   dmaengine: DMA en...
864

d16695a75   Ezequiel Garcia   dma: mv_xor: Use ...
865
  	for (src_idx = 0; src_idx < src_count; src_idx++) {
ff7b04796   Saeed Bishara   dmaengine: DMA en...
866
  		xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
a09b09ae5   Roel Kluin   iop-adma, mv_xor:...
867
868
  		if (!xor_srcs[src_idx]) {
  			while (src_idx--)
ff7b04796   Saeed Bishara   dmaengine: DMA en...
869
  				__free_page(xor_srcs[src_idx]);
a09b09ae5   Roel Kluin   iop-adma, mv_xor:...
870
871
  			return -ENOMEM;
  		}
ff7b04796   Saeed Bishara   dmaengine: DMA en...
872
873
874
  	}
  
  	dest = alloc_page(GFP_KERNEL);
a09b09ae5   Roel Kluin   iop-adma, mv_xor:...
875
876
  	if (!dest) {
  		while (src_idx--)
ff7b04796   Saeed Bishara   dmaengine: DMA en...
877
  			__free_page(xor_srcs[src_idx]);
a09b09ae5   Roel Kluin   iop-adma, mv_xor:...
878
879
  		return -ENOMEM;
  	}
ff7b04796   Saeed Bishara   dmaengine: DMA en...
880
881
  
  	/* Fill in src buffers */
d16695a75   Ezequiel Garcia   dma: mv_xor: Use ...
882
  	for (src_idx = 0; src_idx < src_count; src_idx++) {
ff7b04796   Saeed Bishara   dmaengine: DMA en...
883
884
885
886
  		u8 *ptr = page_address(xor_srcs[src_idx]);
  		for (i = 0; i < PAGE_SIZE; i++)
  			ptr[i] = (1 << src_idx);
  	}
d16695a75   Ezequiel Garcia   dma: mv_xor: Use ...
887
  	for (src_idx = 0; src_idx < src_count; src_idx++)
ff7b04796   Saeed Bishara   dmaengine: DMA en...
888
889
890
891
892
893
  		cmp_byte ^= (u8) (1 << src_idx);
  
  	cmp_word = (cmp_byte << 24) | (cmp_byte << 16) |
  		(cmp_byte << 8) | cmp_byte;
  
  	memset(page_address(dest), 0, PAGE_SIZE);
275cc0c8b   Thomas Petazzoni   dma: mv_xor: use ...
894
  	dma_chan = &mv_chan->dmachan;
aa1e6f1a3   Dan Williams   dmaengine: kill s...
895
  	if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
ff7b04796   Saeed Bishara   dmaengine: DMA en...
896
897
898
  		err = -ENODEV;
  		goto out;
  	}
d16695a75   Ezequiel Garcia   dma: mv_xor: Use ...
899
900
901
902
903
904
  	unmap = dmaengine_get_unmap_data(dma_chan->device->dev, src_count + 1,
  					 GFP_KERNEL);
  	if (!unmap) {
  		err = -ENOMEM;
  		goto free_resources;
  	}
ff7b04796   Saeed Bishara   dmaengine: DMA en...
905
  	/* test xor */
d16695a75   Ezequiel Garcia   dma: mv_xor: Use ...
906
907
908
909
  	for (i = 0; i < src_count; i++) {
  		unmap->addr[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i],
  					      0, PAGE_SIZE, DMA_TO_DEVICE);
  		dma_srcs[i] = unmap->addr[i];
b8c01d259   Ezequiel Garcia   dma: mv_xor: Add ...
910
911
912
913
914
  		ret = dma_mapping_error(dma_chan->device->dev, unmap->addr[i]);
  		if (ret) {
  			err = -ENOMEM;
  			goto free_resources;
  		}
d16695a75   Ezequiel Garcia   dma: mv_xor: Use ...
915
916
  		unmap->to_cnt++;
  	}
ff7b04796   Saeed Bishara   dmaengine: DMA en...
917

d16695a75   Ezequiel Garcia   dma: mv_xor: Use ...
918
919
920
  	unmap->addr[src_count] = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE,
  				      DMA_FROM_DEVICE);
  	dest_dma = unmap->addr[src_count];
b8c01d259   Ezequiel Garcia   dma: mv_xor: Add ...
921
922
923
924
925
  	ret = dma_mapping_error(dma_chan->device->dev, unmap->addr[src_count]);
  	if (ret) {
  		err = -ENOMEM;
  		goto free_resources;
  	}
d16695a75   Ezequiel Garcia   dma: mv_xor: Use ...
926
927
  	unmap->from_cnt = 1;
  	unmap->len = PAGE_SIZE;
ff7b04796   Saeed Bishara   dmaengine: DMA en...
928
929
  
  	tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
d16695a75   Ezequiel Garcia   dma: mv_xor: Use ...
930
  				 src_count, PAGE_SIZE, 0);
b8c01d259   Ezequiel Garcia   dma: mv_xor: Add ...
931
932
933
934
935
936
937
  	if (!tx) {
  		dev_err(dma_chan->device->dev,
  			"Self-test cannot prepare operation, disabling
  ");
  		err = -ENODEV;
  		goto free_resources;
  	}
ff7b04796   Saeed Bishara   dmaengine: DMA en...
938
939
  
  	cookie = mv_xor_tx_submit(tx);
b8c01d259   Ezequiel Garcia   dma: mv_xor: Add ...
940
941
942
943
944
945
946
  	if (dma_submit_error(cookie)) {
  		dev_err(dma_chan->device->dev,
  			"Self-test submit error, disabling
  ");
  		err = -ENODEV;
  		goto free_resources;
  	}
ff7b04796   Saeed Bishara   dmaengine: DMA en...
947
948
949
  	mv_xor_issue_pending(dma_chan);
  	async_tx_ack(tx);
  	msleep(8);
079344818   Linus Walleij   DMAENGINE: generi...
950
  	if (mv_xor_status(dma_chan, cookie, NULL) !=
b3efb8fc9   Vinod Koul   dmaengine: mv_xor...
951
  	    DMA_COMPLETE) {
a3fc74bc9   Thomas Petazzoni   dma: mv_xor: use ...
952
953
954
  		dev_err(dma_chan->device->dev,
  			"Self-test xor timed out, disabling
  ");
ff7b04796   Saeed Bishara   dmaengine: DMA en...
955
956
957
  		err = -ENODEV;
  		goto free_resources;
  	}
c35064c4b   Thomas Petazzoni   dma: mv_xor: simp...
958
  	dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma,
ff7b04796   Saeed Bishara   dmaengine: DMA en...
959
960
961
962
  				PAGE_SIZE, DMA_FROM_DEVICE);
  	for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) {
  		u32 *ptr = page_address(dest);
  		if (ptr[i] != cmp_word) {
a3fc74bc9   Thomas Petazzoni   dma: mv_xor: use ...
963
  			dev_err(dma_chan->device->dev,
1ba151cdf   Joe Perches   dma: Convert dev_...
964
965
966
  				"Self-test xor failed compare, disabling. index %d, data %x, expected %x
  ",
  				i, ptr[i], cmp_word);
ff7b04796   Saeed Bishara   dmaengine: DMA en...
967
968
969
970
971
972
  			err = -ENODEV;
  			goto free_resources;
  		}
  	}
  
  free_resources:
d16695a75   Ezequiel Garcia   dma: mv_xor: Use ...
973
  	dmaengine_unmap_put(unmap);
ff7b04796   Saeed Bishara   dmaengine: DMA en...
974
975
  	mv_xor_free_chan_resources(dma_chan);
  out:
d16695a75   Ezequiel Garcia   dma: mv_xor: Use ...
976
  	src_idx = src_count;
ff7b04796   Saeed Bishara   dmaengine: DMA en...
977
978
979
980
981
  	while (src_idx--)
  		__free_page(xor_srcs[src_idx]);
  	__free_page(dest);
  	return err;
  }
1ef48a262   Thomas Petazzoni   dma: mv_xor: merg...
982
  static int mv_xor_channel_remove(struct mv_xor_chan *mv_chan)
ff7b04796   Saeed Bishara   dmaengine: DMA en...
983
  {
ff7b04796   Saeed Bishara   dmaengine: DMA en...
984
  	struct dma_chan *chan, *_chan;
1ef48a262   Thomas Petazzoni   dma: mv_xor: merg...
985
  	struct device *dev = mv_chan->dmadev.dev;
ff7b04796   Saeed Bishara   dmaengine: DMA en...
986

1ef48a262   Thomas Petazzoni   dma: mv_xor: merg...
987
  	dma_async_device_unregister(&mv_chan->dmadev);
ff7b04796   Saeed Bishara   dmaengine: DMA en...
988

b503fa019   Thomas Petazzoni   dma: mv_xor: remo...
989
  	dma_free_coherent(dev, MV_XOR_POOL_SIZE,
1ef48a262   Thomas Petazzoni   dma: mv_xor: merg...
990
  			  mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool);
22843545b   Lior Amsalem   dma: mv_xor: Add ...
991
992
993
994
  	dma_unmap_single(dev, mv_chan->dummy_src_addr,
  			 MV_XOR_MIN_BYTE_COUNT, DMA_FROM_DEVICE);
  	dma_unmap_single(dev, mv_chan->dummy_dst_addr,
  			 MV_XOR_MIN_BYTE_COUNT, DMA_TO_DEVICE);
ff7b04796   Saeed Bishara   dmaengine: DMA en...
995

1ef48a262   Thomas Petazzoni   dma: mv_xor: merg...
996
  	list_for_each_entry_safe(chan, _chan, &mv_chan->dmadev.channels,
a6b4a9d2c   Thomas Petazzoni   dma: mv_xor: spli...
997
  				 device_node) {
ff7b04796   Saeed Bishara   dmaengine: DMA en...
998
999
  		list_del(&chan->device_node);
  	}
88eb92cb4   Thomas Petazzoni   dma: mv_xor: add ...
1000
  	free_irq(mv_chan->irq, mv_chan);
ff7b04796   Saeed Bishara   dmaengine: DMA en...
1001
1002
  	return 0;
  }
1ef48a262   Thomas Petazzoni   dma: mv_xor: merg...
1003
  static struct mv_xor_chan *
297eedbae   Thomas Petazzoni   dma: mv_xor: rena...
1004
  mv_xor_channel_add(struct mv_xor_device *xordev,
a6b4a9d2c   Thomas Petazzoni   dma: mv_xor: spli...
1005
  		   struct platform_device *pdev,
dd130c652   Gregory CLEMENT   dmaengine: mv_xor...
1006
  		   int idx, dma_cap_mask_t cap_mask, int irq)
ff7b04796   Saeed Bishara   dmaengine: DMA en...
1007
1008
  {
  	int ret = 0;
ff7b04796   Saeed Bishara   dmaengine: DMA en...
1009
1010
  	struct mv_xor_chan *mv_chan;
  	struct dma_device *dma_dev;
ff7b04796   Saeed Bishara   dmaengine: DMA en...
1011

1ef48a262   Thomas Petazzoni   dma: mv_xor: merg...
1012
  	mv_chan = devm_kzalloc(&pdev->dev, sizeof(*mv_chan), GFP_KERNEL);
a577659f4   Sachin Kamat   dma: mv_xor: Fix ...
1013
1014
  	if (!mv_chan)
  		return ERR_PTR(-ENOMEM);
ff7b04796   Saeed Bishara   dmaengine: DMA en...
1015

9aedbdbab   Thomas Petazzoni   dma: mv_xor: remo...
1016
  	mv_chan->idx = idx;
88eb92cb4   Thomas Petazzoni   dma: mv_xor: add ...
1017
  	mv_chan->irq = irq;
dd130c652   Gregory CLEMENT   dmaengine: mv_xor...
1018
1019
1020
1021
  	if (xordev->xor_type == XOR_ORION)
  		mv_chan->op_in_desc = XOR_MODE_IN_REG;
  	else
  		mv_chan->op_in_desc = XOR_MODE_IN_DESC;
ff7b04796   Saeed Bishara   dmaengine: DMA en...
1022

1ef48a262   Thomas Petazzoni   dma: mv_xor: merg...
1023
  	dma_dev = &mv_chan->dmadev;
3e5daee5e   Robin Murphy   dmaengine: mv_xor...
1024
  	dma_dev->dev = &pdev->dev;
77ff7a706   Stefan Roese   dmaengine: mv_xor...
1025
  	mv_chan->xordev = xordev;
ff7b04796   Saeed Bishara   dmaengine: DMA en...
1026

22843545b   Lior Amsalem   dma: mv_xor: Add ...
1027
1028
1029
1030
1031
1032
1033
1034
1035
  	/*
  	 * These source and destination dummy buffers are used to implement
  	 * a DMA_INTERRUPT operation as a minimum-sized XOR operation.
  	 * Hence, we only need to map the buffers at initialization-time.
  	 */
  	mv_chan->dummy_src_addr = dma_map_single(dma_dev->dev,
  		mv_chan->dummy_src, MV_XOR_MIN_BYTE_COUNT, DMA_FROM_DEVICE);
  	mv_chan->dummy_dst_addr = dma_map_single(dma_dev->dev,
  		mv_chan->dummy_dst, MV_XOR_MIN_BYTE_COUNT, DMA_TO_DEVICE);
ff7b04796   Saeed Bishara   dmaengine: DMA en...
1036
1037
1038
1039
  	/* allocate coherent memory for hardware descriptors
  	 * note: writecombine gives slightly better performance, but
  	 * requires that we explicitly flush the writes
  	 */
1ef48a262   Thomas Petazzoni   dma: mv_xor: merg...
1040
  	mv_chan->dma_desc_pool_virt =
f6e45661f   Luis R. Rodriguez   dma, mm/pat: Rena...
1041
1042
  	  dma_alloc_wc(&pdev->dev, MV_XOR_POOL_SIZE, &mv_chan->dma_desc_pool,
  		       GFP_KERNEL);
1ef48a262   Thomas Petazzoni   dma: mv_xor: merg...
1043
  	if (!mv_chan->dma_desc_pool_virt)
a6b4a9d2c   Thomas Petazzoni   dma: mv_xor: spli...
1044
  		return ERR_PTR(-ENOMEM);
ff7b04796   Saeed Bishara   dmaengine: DMA en...
1045
1046
  
  	/* discover transaction capabilites from the platform data */
a6b4a9d2c   Thomas Petazzoni   dma: mv_xor: spli...
1047
  	dma_dev->cap_mask = cap_mask;
ff7b04796   Saeed Bishara   dmaengine: DMA en...
1048
1049
1050
1051
1052
1053
  
  	INIT_LIST_HEAD(&dma_dev->channels);
  
  	/* set base routines */
  	dma_dev->device_alloc_chan_resources = mv_xor_alloc_chan_resources;
  	dma_dev->device_free_chan_resources = mv_xor_free_chan_resources;
079344818   Linus Walleij   DMAENGINE: generi...
1054
  	dma_dev->device_tx_status = mv_xor_status;
ff7b04796   Saeed Bishara   dmaengine: DMA en...
1055
  	dma_dev->device_issue_pending = mv_xor_issue_pending;
ff7b04796   Saeed Bishara   dmaengine: DMA en...
1056
1057
  
  	/* set prep routines based on capability */
22843545b   Lior Amsalem   dma: mv_xor: Add ...
1058
1059
  	if (dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask))
  		dma_dev->device_prep_dma_interrupt = mv_xor_prep_dma_interrupt;
ff7b04796   Saeed Bishara   dmaengine: DMA en...
1060
1061
  	if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask))
  		dma_dev->device_prep_dma_memcpy = mv_xor_prep_dma_memcpy;
ff7b04796   Saeed Bishara   dmaengine: DMA en...
1062
  	if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
c019894ef   Joe Perches   drivers/dma: Remo...
1063
  		dma_dev->max_xor = 8;
ff7b04796   Saeed Bishara   dmaengine: DMA en...
1064
1065
  		dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor;
  	}
297eedbae   Thomas Petazzoni   dma: mv_xor: rena...
1066
  	mv_chan->mmr_base = xordev->xor_base;
82a1402ea   Ezequiel Garcia   dma: mv_xor: Fix ...
1067
  	mv_chan->mmr_high_base = xordev->xor_high_base;
34ca9a53e   Allen Pais   dmaengine: mv_xor...
1068
  	tasklet_setup(&mv_chan->irq_tasklet, mv_xor_tasklet);
ff7b04796   Saeed Bishara   dmaengine: DMA en...
1069
1070
  
  	/* clear errors before enabling interrupts */
0951e728f   Maxime Ripard   dmaengine: mv_xor...
1071
  	mv_chan_clear_err_status(mv_chan);
ff7b04796   Saeed Bishara   dmaengine: DMA en...
1072

2d0a07451   Thomas Petazzoni   dma: mv_xor: use ...
1073
1074
  	ret = request_irq(mv_chan->irq, mv_xor_interrupt_handler,
  			  0, dev_name(&pdev->dev), mv_chan);
ff7b04796   Saeed Bishara   dmaengine: DMA en...
1075
1076
1077
1078
  	if (ret)
  		goto err_free_dma;
  
  	mv_chan_unmask_interrupts(mv_chan);
6f166312c   Lior Amsalem   dmaengine: mv_xor...
1079
  	if (mv_chan->op_in_desc == XOR_MODE_IN_DESC)
81aafb3e0   Thomas Petazzoni   dmaengine: mv_xor...
1080
  		mv_chan_set_mode(mv_chan, XOR_OPERATION_MODE_IN_DESC);
6f166312c   Lior Amsalem   dmaengine: mv_xor...
1081
  	else
81aafb3e0   Thomas Petazzoni   dmaengine: mv_xor...
1082
  		mv_chan_set_mode(mv_chan, XOR_OPERATION_MODE_XOR);
ff7b04796   Saeed Bishara   dmaengine: DMA en...
1083
1084
1085
1086
  
  	spin_lock_init(&mv_chan->lock);
  	INIT_LIST_HEAD(&mv_chan->chain);
  	INIT_LIST_HEAD(&mv_chan->completed_slots);
fbea28a2a   Lior Amsalem   dmaengine: mv_xor...
1087
1088
  	INIT_LIST_HEAD(&mv_chan->free_slots);
  	INIT_LIST_HEAD(&mv_chan->allocated_slots);
98817b995   Thomas Petazzoni   dma: mv_xor: in m...
1089
1090
  	mv_chan->dmachan.device = dma_dev;
  	dma_cookie_init(&mv_chan->dmachan);
ff7b04796   Saeed Bishara   dmaengine: DMA en...
1091

98817b995   Thomas Petazzoni   dma: mv_xor: in m...
1092
  	list_add_tail(&mv_chan->dmachan.device_node, &dma_dev->channels);
ff7b04796   Saeed Bishara   dmaengine: DMA en...
1093
1094
  
  	if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
0951e728f   Maxime Ripard   dmaengine: mv_xor...
1095
  		ret = mv_chan_memcpy_self_test(mv_chan);
ff7b04796   Saeed Bishara   dmaengine: DMA en...
1096
1097
1098
  		dev_dbg(&pdev->dev, "memcpy self test returned %d
  ", ret);
  		if (ret)
2d0a07451   Thomas Petazzoni   dma: mv_xor: use ...
1099
  			goto err_free_irq;
ff7b04796   Saeed Bishara   dmaengine: DMA en...
1100
1101
1102
  	}
  
  	if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
0951e728f   Maxime Ripard   dmaengine: mv_xor...
1103
  		ret = mv_chan_xor_self_test(mv_chan);
ff7b04796   Saeed Bishara   dmaengine: DMA en...
1104
1105
1106
  		dev_dbg(&pdev->dev, "xor self test returned %d
  ", ret);
  		if (ret)
2d0a07451   Thomas Petazzoni   dma: mv_xor: use ...
1107
  			goto err_free_irq;
ff7b04796   Saeed Bishara   dmaengine: DMA en...
1108
  	}
c678fa663   Dave Jiang   dmaengine: remove...
1109
1110
  	dev_info(&pdev->dev, "Marvell XOR (%s): ( %s%s%s)
  ",
6f166312c   Lior Amsalem   dmaengine: mv_xor...
1111
  		 mv_chan->op_in_desc ? "Descriptor Mode" : "Registers Mode",
1ba151cdf   Joe Perches   dma: Convert dev_...
1112
  		 dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
1ba151cdf   Joe Perches   dma: Convert dev_...
1113
1114
  		 dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "",
  		 dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : "");
ff7b04796   Saeed Bishara   dmaengine: DMA en...
1115

7c97381e7   Aditya Pakki   dmaengine: mv_xor...
1116
1117
1118
  	ret = dma_async_device_register(dma_dev);
  	if (ret)
  		goto err_free_irq;
1ef48a262   Thomas Petazzoni   dma: mv_xor: merg...
1119
  	return mv_chan;
ff7b04796   Saeed Bishara   dmaengine: DMA en...
1120

2d0a07451   Thomas Petazzoni   dma: mv_xor: use ...
1121
1122
  err_free_irq:
  	free_irq(mv_chan->irq, mv_chan);
a4a1e53df   Stefan Roese   dmaengine: mv_xor...
1123
  err_free_dma:
b503fa019   Thomas Petazzoni   dma: mv_xor: remo...
1124
  	dma_free_coherent(&pdev->dev, MV_XOR_POOL_SIZE,
1ef48a262   Thomas Petazzoni   dma: mv_xor: merg...
1125
  			  mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool);
a6b4a9d2c   Thomas Petazzoni   dma: mv_xor: spli...
1126
  	return ERR_PTR(ret);
ff7b04796   Saeed Bishara   dmaengine: DMA en...
1127
1128
1129
  }
  
  static void
297eedbae   Thomas Petazzoni   dma: mv_xor: rena...
1130
  mv_xor_conf_mbus_windows(struct mv_xor_device *xordev,
63a9332b2   Andrew Lunn   ARM: Orion: Get a...
1131
  			 const struct mbus_dram_target_info *dram)
ff7b04796   Saeed Bishara   dmaengine: DMA en...
1132
  {
82a1402ea   Ezequiel Garcia   dma: mv_xor: Fix ...
1133
  	void __iomem *base = xordev->xor_high_base;
ff7b04796   Saeed Bishara   dmaengine: DMA en...
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
  	u32 win_enable = 0;
  	int i;
  
  	for (i = 0; i < 8; i++) {
  		writel(0, base + WINDOW_BASE(i));
  		writel(0, base + WINDOW_SIZE(i));
  		if (i < 4)
  			writel(0, base + WINDOW_REMAP_HIGH(i));
  	}
  
  	for (i = 0; i < dram->num_cs; i++) {
63a9332b2   Andrew Lunn   ARM: Orion: Get a...
1145
  		const struct mbus_dram_window *cs = dram->cs + i;
ff7b04796   Saeed Bishara   dmaengine: DMA en...
1146
1147
1148
1149
1150
  
  		writel((cs->base & 0xffff0000) |
  		       (cs->mbus_attr << 8) |
  		       dram->mbus_dram_target_id, base + WINDOW_BASE(i));
  		writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i));
77ff7a706   Stefan Roese   dmaengine: mv_xor...
1151
1152
1153
  		/* Fill the caching variables for later use */
  		xordev->win_start[i] = cs->base;
  		xordev->win_end[i] = cs->base + cs->size - 1;
ff7b04796   Saeed Bishara   dmaengine: DMA en...
1154
1155
1156
1157
1158
1159
  		win_enable |= (1 << i);
  		win_enable |= 3 << (16 + (2 * i));
  	}
  
  	writel(win_enable, base + WINDOW_BAR_ENABLE(0));
  	writel(win_enable, base + WINDOW_BAR_ENABLE(1));
c4b4b732b   Thomas Petazzoni   dma: mv_xor: clea...
1160
1161
  	writel(0, base + WINDOW_OVERRIDE_CTRL(0));
  	writel(0, base + WINDOW_OVERRIDE_CTRL(1));
ff7b04796   Saeed Bishara   dmaengine: DMA en...
1162
  }
ac5f0f3f8   Marcin Wojtas   dmaengine: mv_xor...
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
  static void
  mv_xor_conf_mbus_windows_a3700(struct mv_xor_device *xordev)
  {
  	void __iomem *base = xordev->xor_high_base;
  	u32 win_enable = 0;
  	int i;
  
  	for (i = 0; i < 8; i++) {
  		writel(0, base + WINDOW_BASE(i));
  		writel(0, base + WINDOW_SIZE(i));
  		if (i < 4)
  			writel(0, base + WINDOW_REMAP_HIGH(i));
  	}
  	/*
  	 * For Armada3700 open default 4GB Mbus window. The dram
  	 * related configuration are done at AXIS level.
  	 */
  	writel(0xffff0000, base + WINDOW_SIZE(0));
  	win_enable |= 1;
  	win_enable |= 3 << 16;
  
  	writel(win_enable, base + WINDOW_BAR_ENABLE(0));
  	writel(win_enable, base + WINDOW_BAR_ENABLE(1));
  	writel(0, base + WINDOW_OVERRIDE_CTRL(0));
  	writel(0, base + WINDOW_OVERRIDE_CTRL(1));
  }
8b648436e   Thomas Petazzoni   dmaengine: mv_xor...
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
  /*
   * Since this XOR driver is basically used only for RAID5, we don't
   * need to care about synchronizing ->suspend with DMA activity,
   * because the DMA engine will naturally be quiet due to the block
   * devices being suspended.
   */
  static int mv_xor_suspend(struct platform_device *pdev, pm_message_t state)
  {
  	struct mv_xor_device *xordev = platform_get_drvdata(pdev);
  	int i;
  
  	for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) {
  		struct mv_xor_chan *mv_chan = xordev->channels[i];
  
  		if (!mv_chan)
  			continue;
  
  		mv_chan->saved_config_reg =
  			readl_relaxed(XOR_CONFIG(mv_chan));
  		mv_chan->saved_int_mask_reg =
  			readl_relaxed(XOR_INTR_MASK(mv_chan));
  	}
  
  	return 0;
  }
  
  static int mv_xor_resume(struct platform_device *dev)
  {
  	struct mv_xor_device *xordev = platform_get_drvdata(dev);
  	const struct mbus_dram_target_info *dram;
  	int i;
  
  	for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) {
  		struct mv_xor_chan *mv_chan = xordev->channels[i];
  
  		if (!mv_chan)
  			continue;
  
  		writel_relaxed(mv_chan->saved_config_reg,
  			       XOR_CONFIG(mv_chan));
  		writel_relaxed(mv_chan->saved_int_mask_reg,
  			       XOR_INTR_MASK(mv_chan));
  	}
ac5f0f3f8   Marcin Wojtas   dmaengine: mv_xor...
1232
1233
1234
1235
  	if (xordev->xor_type == XOR_ARMADA_37XX) {
  		mv_xor_conf_mbus_windows_a3700(xordev);
  		return 0;
  	}
8b648436e   Thomas Petazzoni   dmaengine: mv_xor...
1236
1237
1238
1239
1240
1241
  	dram = mv_mbus_dram_info();
  	if (dram)
  		mv_xor_conf_mbus_windows(xordev, dram);
  
  	return 0;
  }
6f166312c   Lior Amsalem   dmaengine: mv_xor...
1242
  static const struct of_device_id mv_xor_dt_ids[] = {
dd130c652   Gregory CLEMENT   dmaengine: mv_xor...
1243
1244
  	{ .compatible = "marvell,orion-xor", .data = (void *)XOR_ORION },
  	{ .compatible = "marvell,armada-380-xor", .data = (void *)XOR_ARMADA_38X },
ac5f0f3f8   Marcin Wojtas   dmaengine: mv_xor...
1245
  	{ .compatible = "marvell,armada-3700-xor", .data = (void *)XOR_ARMADA_37XX },
6f166312c   Lior Amsalem   dmaengine: mv_xor...
1246
1247
  	{},
  };
6f166312c   Lior Amsalem   dmaengine: mv_xor...
1248

777572911   Thomas Petazzoni   dmaengine: mv_xor...
1249
  static unsigned int mv_xor_engine_count;
6f166312c   Lior Amsalem   dmaengine: mv_xor...
1250

c2714334b   Linus Torvalds   Merge tag 'mvebu'...
1251
  static int mv_xor_probe(struct platform_device *pdev)
ff7b04796   Saeed Bishara   dmaengine: DMA en...
1252
  {
63a9332b2   Andrew Lunn   ARM: Orion: Get a...
1253
  	const struct mbus_dram_target_info *dram;
297eedbae   Thomas Petazzoni   dma: mv_xor: rena...
1254
  	struct mv_xor_device *xordev;
d4adcc016   Jingoo Han   dma: use dev_get_...
1255
  	struct mv_xor_platform_data *pdata = dev_get_platdata(&pdev->dev);
ff7b04796   Saeed Bishara   dmaengine: DMA en...
1256
  	struct resource *res;
777572911   Thomas Petazzoni   dmaengine: mv_xor...
1257
  	unsigned int max_engines, max_channels;
60d151f38   Thomas Petazzoni   dma: mv_xor: allo...
1258
  	int i, ret;
ff7b04796   Saeed Bishara   dmaengine: DMA en...
1259

1ba151cdf   Joe Perches   dma: Convert dev_...
1260
1261
  	dev_notice(&pdev->dev, "Marvell shared XOR driver
  ");
ff7b04796   Saeed Bishara   dmaengine: DMA en...
1262

297eedbae   Thomas Petazzoni   dma: mv_xor: rena...
1263
1264
  	xordev = devm_kzalloc(&pdev->dev, sizeof(*xordev), GFP_KERNEL);
  	if (!xordev)
ff7b04796   Saeed Bishara   dmaengine: DMA en...
1265
1266
1267
1268
1269
  		return -ENOMEM;
  
  	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  	if (!res)
  		return -ENODEV;
297eedbae   Thomas Petazzoni   dma: mv_xor: rena...
1270
1271
1272
  	xordev->xor_base = devm_ioremap(&pdev->dev, res->start,
  					resource_size(res));
  	if (!xordev->xor_base)
ff7b04796   Saeed Bishara   dmaengine: DMA en...
1273
1274
1275
1276
1277
  		return -EBUSY;
  
  	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
  	if (!res)
  		return -ENODEV;
297eedbae   Thomas Petazzoni   dma: mv_xor: rena...
1278
1279
1280
  	xordev->xor_high_base = devm_ioremap(&pdev->dev, res->start,
  					     resource_size(res));
  	if (!xordev->xor_high_base)
ff7b04796   Saeed Bishara   dmaengine: DMA en...
1281
  		return -EBUSY;
297eedbae   Thomas Petazzoni   dma: mv_xor: rena...
1282
  	platform_set_drvdata(pdev, xordev);
ff7b04796   Saeed Bishara   dmaengine: DMA en...
1283

dd130c652   Gregory CLEMENT   dmaengine: mv_xor...
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
  
  	/*
  	 * We need to know which type of XOR device we use before
  	 * setting up. In non-dt case it can only be the legacy one.
  	 */
  	xordev->xor_type = XOR_ORION;
  	if (pdev->dev.of_node) {
  		const struct of_device_id *of_id =
  			of_match_device(mv_xor_dt_ids,
  					&pdev->dev);
  
  		xordev->xor_type = (uintptr_t)of_id->data;
  	}
ff7b04796   Saeed Bishara   dmaengine: DMA en...
1297
1298
1299
  	/*
  	 * (Re-)program MBUS remapping windows if we are asked to.
  	 */
ac5f0f3f8   Marcin Wojtas   dmaengine: mv_xor...
1300
1301
1302
1303
1304
1305
1306
  	if (xordev->xor_type == XOR_ARMADA_37XX) {
  		mv_xor_conf_mbus_windows_a3700(xordev);
  	} else {
  		dram = mv_mbus_dram_info();
  		if (dram)
  			mv_xor_conf_mbus_windows(xordev, dram);
  	}
ff7b04796   Saeed Bishara   dmaengine: DMA en...
1307

c510182b1   Andrew Lunn   ARM: Orion: XOR: ...
1308
1309
1310
  	/* Not all platforms can gate the clock, so it is not
  	 * an error if the clock does not exists.
  	 */
297eedbae   Thomas Petazzoni   dma: mv_xor: rena...
1311
1312
1313
  	xordev->clk = clk_get(&pdev->dev, NULL);
  	if (!IS_ERR(xordev->clk))
  		clk_prepare_enable(xordev->clk);
c510182b1   Andrew Lunn   ARM: Orion: XOR: ...
1314

777572911   Thomas Petazzoni   dmaengine: mv_xor...
1315
1316
1317
1318
1319
  	/*
  	 * We don't want to have more than one channel per CPU in
  	 * order for async_tx to perform well. So we limit the number
  	 * of engines and channels so that we take into account this
  	 * constraint. Note that we also want to use channels from
ac5f0f3f8   Marcin Wojtas   dmaengine: mv_xor...
1320
1321
  	 * separate engines when possible.  For dual-CPU Armada 3700
  	 * SoC with single XOR engine allow using its both channels.
777572911   Thomas Petazzoni   dmaengine: mv_xor...
1322
1323
  	 */
  	max_engines = num_present_cpus();
ac5f0f3f8   Marcin Wojtas   dmaengine: mv_xor...
1324
1325
1326
1327
1328
1329
  	if (xordev->xor_type == XOR_ARMADA_37XX)
  		max_channels =	num_present_cpus();
  	else
  		max_channels = min_t(unsigned int,
  				     MV_XOR_MAX_CHANNELS,
  				     DIV_ROUND_UP(num_present_cpus(), 2));
777572911   Thomas Petazzoni   dmaengine: mv_xor...
1330
1331
1332
  
  	if (mv_xor_engine_count >= max_engines)
  		return 0;
f7d12ef53   Thomas Petazzoni   dma: mv_xor: add ...
1333
1334
1335
1336
1337
  	if (pdev->dev.of_node) {
  		struct device_node *np;
  		int i = 0;
  
  		for_each_child_of_node(pdev->dev.of_node, np) {
0be8253fa   Russell King   dmaengine: mv_xor...
1338
  			struct mv_xor_chan *chan;
f7d12ef53   Thomas Petazzoni   dma: mv_xor: add ...
1339
1340
  			dma_cap_mask_t cap_mask;
  			int irq;
777572911   Thomas Petazzoni   dmaengine: mv_xor...
1341
1342
  			if (i >= max_channels)
  				continue;
f7d12ef53   Thomas Petazzoni   dma: mv_xor: add ...
1343
  			dma_cap_zero(cap_mask);
6d8f7abd2   Thomas Petazzoni   dmaengine: mv_xor...
1344
1345
1346
  			dma_cap_set(DMA_MEMCPY, cap_mask);
  			dma_cap_set(DMA_XOR, cap_mask);
  			dma_cap_set(DMA_INTERRUPT, cap_mask);
f7d12ef53   Thomas Petazzoni   dma: mv_xor: add ...
1347
1348
  
  			irq = irq_of_parse_and_map(np, 0);
f8eb9e7d2   Thomas Petazzoni   dma: mv_xor: fix ...
1349
1350
  			if (!irq) {
  				ret = -ENODEV;
f7d12ef53   Thomas Petazzoni   dma: mv_xor: add ...
1351
1352
  				goto err_channel_add;
  			}
0be8253fa   Russell King   dmaengine: mv_xor...
1353
  			chan = mv_xor_channel_add(xordev, pdev, i,
dd130c652   Gregory CLEMENT   dmaengine: mv_xor...
1354
  						  cap_mask, irq);
0be8253fa   Russell King   dmaengine: mv_xor...
1355
1356
  			if (IS_ERR(chan)) {
  				ret = PTR_ERR(chan);
f7d12ef53   Thomas Petazzoni   dma: mv_xor: add ...
1357
1358
1359
  				irq_dispose_mapping(irq);
  				goto err_channel_add;
  			}
0be8253fa   Russell King   dmaengine: mv_xor...
1360
  			xordev->channels[i] = chan;
f7d12ef53   Thomas Petazzoni   dma: mv_xor: add ...
1361
1362
1363
  			i++;
  		}
  	} else if (pdata && pdata->channels) {
777572911   Thomas Petazzoni   dmaengine: mv_xor...
1364
  		for (i = 0; i < max_channels; i++) {
e39f6ec1f   Thomas Petazzoni   dma: mv_xor: rena...
1365
  			struct mv_xor_channel_data *cd;
0be8253fa   Russell King   dmaengine: mv_xor...
1366
  			struct mv_xor_chan *chan;
60d151f38   Thomas Petazzoni   dma: mv_xor: allo...
1367
1368
1369
  			int irq;
  
  			cd = &pdata->channels[i];
60d151f38   Thomas Petazzoni   dma: mv_xor: allo...
1370
1371
1372
1373
1374
  			irq = platform_get_irq(pdev, i);
  			if (irq < 0) {
  				ret = irq;
  				goto err_channel_add;
  			}
0be8253fa   Russell King   dmaengine: mv_xor...
1375
  			chan = mv_xor_channel_add(xordev, pdev, i,
dd130c652   Gregory CLEMENT   dmaengine: mv_xor...
1376
  						  cd->cap_mask, irq);
0be8253fa   Russell King   dmaengine: mv_xor...
1377
1378
  			if (IS_ERR(chan)) {
  				ret = PTR_ERR(chan);
60d151f38   Thomas Petazzoni   dma: mv_xor: allo...
1379
1380
  				goto err_channel_add;
  			}
0be8253fa   Russell King   dmaengine: mv_xor...
1381
1382
  
  			xordev->channels[i] = chan;
60d151f38   Thomas Petazzoni   dma: mv_xor: allo...
1383
1384
  		}
  	}
c510182b1   Andrew Lunn   ARM: Orion: XOR: ...
1385

ff7b04796   Saeed Bishara   dmaengine: DMA en...
1386
  	return 0;
60d151f38   Thomas Petazzoni   dma: mv_xor: allo...
1387
1388
1389
  
  err_channel_add:
  	for (i = 0; i < MV_XOR_MAX_CHANNELS; i++)
f7d12ef53   Thomas Petazzoni   dma: mv_xor: add ...
1390
  		if (xordev->channels[i]) {
ab6e439fd   Thomas Petazzoni   dma: mv_xor: fix ...
1391
  			mv_xor_channel_remove(xordev->channels[i]);
f7d12ef53   Thomas Petazzoni   dma: mv_xor: add ...
1392
1393
  			if (pdev->dev.of_node)
  				irq_dispose_mapping(xordev->channels[i]->irq);
f7d12ef53   Thomas Petazzoni   dma: mv_xor: add ...
1394
  		}
60d151f38   Thomas Petazzoni   dma: mv_xor: allo...
1395

dab920644   Thomas Petazzoni   dma: mv_xor: fix ...
1396
1397
1398
1399
  	if (!IS_ERR(xordev->clk)) {
  		clk_disable_unprepare(xordev->clk);
  		clk_put(xordev->clk);
  	}
60d151f38   Thomas Petazzoni   dma: mv_xor: allo...
1400
  	return ret;
ff7b04796   Saeed Bishara   dmaengine: DMA en...
1401
  }
61971656c   Thomas Petazzoni   dma: mv_xor: rena...
1402
1403
  static struct platform_driver mv_xor_driver = {
  	.probe		= mv_xor_probe,
8b648436e   Thomas Petazzoni   dmaengine: mv_xor...
1404
1405
  	.suspend        = mv_xor_suspend,
  	.resume         = mv_xor_resume,
ff7b04796   Saeed Bishara   dmaengine: DMA en...
1406
  	.driver		= {
f7d12ef53   Thomas Petazzoni   dma: mv_xor: add ...
1407
1408
  		.name	        = MV_XOR_NAME,
  		.of_match_table = of_match_ptr(mv_xor_dt_ids),
ff7b04796   Saeed Bishara   dmaengine: DMA en...
1409
1410
  	},
  };
812608d19   Geliang Tang   dmaengine: mv_xor...
1411
  builtin_platform_driver(mv_xor_driver);
ff7b04796   Saeed Bishara   dmaengine: DMA en...
1412

25cf68da0   Paul Gortmaker   drivers/dma: make...
1413
  /*
ff7b04796   Saeed Bishara   dmaengine: DMA en...
1414
1415
1416
  MODULE_AUTHOR("Saeed Bishara <saeed@marvell.com>");
  MODULE_DESCRIPTION("DMA engine driver for Marvell's XOR engine");
  MODULE_LICENSE("GPL");
25cf68da0   Paul Gortmaker   drivers/dma: make...
1417
  */