Blame view

drivers/dma/mv_xor.c 37.5 KB
ff7b04796   Saeed Bishara   dmaengine: DMA en...
1
2
3
4
5
6
7
8
9
10
11
12
  /*
   * offload engine driver for the Marvell XOR engine
   * Copyright (C) 2007, 2008, Marvell International Ltd.
   *
   * This program is free software; you can redistribute it and/or modify it
   * under the terms and conditions of the GNU General Public License,
   * version 2, as published by the Free Software Foundation.
   *
   * This program is distributed in the hope it will be useful, but WITHOUT
   * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
   * more details.
ff7b04796   Saeed Bishara   dmaengine: DMA en...
13
14
15
   */
  
  #include <linux/init.h>
5a0e3ad6a   Tejun Heo   include cleanup: ...
16
  #include <linux/slab.h>
ff7b04796   Saeed Bishara   dmaengine: DMA en...
17
18
19
20
  #include <linux/delay.h>
  #include <linux/dma-mapping.h>
  #include <linux/spinlock.h>
  #include <linux/interrupt.h>
6f166312c   Lior Amsalem   dmaengine: mv_xor...
21
  #include <linux/of_device.h>
ff7b04796   Saeed Bishara   dmaengine: DMA en...
22
23
  #include <linux/platform_device.h>
  #include <linux/memory.h>
c510182b1   Andrew Lunn   ARM: Orion: XOR: ...
24
  #include <linux/clk.h>
f7d12ef53   Thomas Petazzoni   dma: mv_xor: add ...
25
26
27
  #include <linux/of.h>
  #include <linux/of_irq.h>
  #include <linux/irqdomain.h>
777572911   Thomas Petazzoni   dmaengine: mv_xor...
28
  #include <linux/cpumask.h>
c02cecb92   Arnd Bergmann   ARM: orion: move ...
29
  #include <linux/platform_data/dma-mv_xor.h>
d2ebfb335   Russell King - ARM Linux   dmaengine: add pr...
30
31
  
  #include "dmaengine.h"
ff7b04796   Saeed Bishara   dmaengine: DMA en...
32
  #include "mv_xor.h"
dd130c652   Gregory CLEMENT   dmaengine: mv_xor...
33
34
35
  enum mv_xor_type {
  	XOR_ORION,
  	XOR_ARMADA_38X,
ac5f0f3f8   Marcin Wojtas   dmaengine: mv_xor...
36
  	XOR_ARMADA_37XX,
dd130c652   Gregory CLEMENT   dmaengine: mv_xor...
37
  };
6f166312c   Lior Amsalem   dmaengine: mv_xor...
38
39
40
41
  enum mv_xor_mode {
  	XOR_MODE_IN_REG,
  	XOR_MODE_IN_DESC,
  };
ff7b04796   Saeed Bishara   dmaengine: DMA en...
42
43
44
  static void mv_xor_issue_pending(struct dma_chan *chan);
  
  #define to_mv_xor_chan(chan)		\
98817b995   Thomas Petazzoni   dma: mv_xor: in m...
45
  	container_of(chan, struct mv_xor_chan, dmachan)
ff7b04796   Saeed Bishara   dmaengine: DMA en...
46
47
48
  
  #define to_mv_xor_slot(tx)		\
  	container_of(tx, struct mv_xor_desc_slot, async_tx)
c98c17813   Thomas Petazzoni   dma: mv_xor: intr...
49
  #define mv_chan_to_devp(chan)           \
1ef48a262   Thomas Petazzoni   dma: mv_xor: merg...
50
  	((chan)->dmadev.dev)
c98c17813   Thomas Petazzoni   dma: mv_xor: intr...
51

dfc97661b   Lior Amsalem   dma: mv_xor: Remo...
52
  static void mv_desc_init(struct mv_xor_desc_slot *desc,
ba87d1372   Lior Amsalem   dma: mv_xor: Redu...
53
54
  			 dma_addr_t addr, u32 byte_count,
  			 enum dma_ctrl_flags flags)
ff7b04796   Saeed Bishara   dmaengine: DMA en...
55
56
  {
  	struct mv_xor_desc *hw_desc = desc->hw_desc;
0e7488ed0   Ezequiel Garcia   dma: mv_xor: Remo...
57
  	hw_desc->status = XOR_DESC_DMA_OWNED;
ff7b04796   Saeed Bishara   dmaengine: DMA en...
58
  	hw_desc->phy_next_desc = 0;
ba87d1372   Lior Amsalem   dma: mv_xor: Redu...
59
60
61
  	/* Enable end-of-descriptor interrupts only for DMA_PREP_INTERRUPT */
  	hw_desc->desc_command = (flags & DMA_PREP_INTERRUPT) ?
  				XOR_DESC_EOD_INT_EN : 0;
dfc97661b   Lior Amsalem   dma: mv_xor: Remo...
62
  	hw_desc->phy_dest_addr = addr;
ff7b04796   Saeed Bishara   dmaengine: DMA en...
63
64
  	hw_desc->byte_count = byte_count;
  }
6f166312c   Lior Amsalem   dmaengine: mv_xor...
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
  static void mv_desc_set_mode(struct mv_xor_desc_slot *desc)
  {
  	struct mv_xor_desc *hw_desc = desc->hw_desc;
  
  	switch (desc->type) {
  	case DMA_XOR:
  	case DMA_INTERRUPT:
  		hw_desc->desc_command |= XOR_DESC_OPERATION_XOR;
  		break;
  	case DMA_MEMCPY:
  		hw_desc->desc_command |= XOR_DESC_OPERATION_MEMCPY;
  		break;
  	default:
  		BUG();
  		return;
  	}
  }
ff7b04796   Saeed Bishara   dmaengine: DMA en...
82
83
84
85
86
87
88
  static void mv_desc_set_next_desc(struct mv_xor_desc_slot *desc,
  				  u32 next_desc_addr)
  {
  	struct mv_xor_desc *hw_desc = desc->hw_desc;
  	BUG_ON(hw_desc->phy_next_desc);
  	hw_desc->phy_next_desc = next_desc_addr;
  }
ff7b04796   Saeed Bishara   dmaengine: DMA en...
89
90
91
92
  static void mv_desc_set_src_addr(struct mv_xor_desc_slot *desc,
  				 int index, dma_addr_t addr)
  {
  	struct mv_xor_desc *hw_desc = desc->hw_desc;
e03bc654f   Thomas Petazzoni   mv_xor: support b...
93
  	hw_desc->phy_src_addr[mv_phy_src_idx(index)] = addr;
ff7b04796   Saeed Bishara   dmaengine: DMA en...
94
95
96
97
98
99
  	if (desc->type == DMA_XOR)
  		hw_desc->desc_command |= (1 << index);
  }
  
  static u32 mv_chan_get_current_desc(struct mv_xor_chan *chan)
  {
5733c38ae   Thomas Petazzoni   mv_xor: use {read...
100
  	return readl_relaxed(XOR_CURR_DESC(chan));
ff7b04796   Saeed Bishara   dmaengine: DMA en...
101
102
103
104
105
  }
  
  static void mv_chan_set_next_descriptor(struct mv_xor_chan *chan,
  					u32 next_desc_addr)
  {
5733c38ae   Thomas Petazzoni   mv_xor: use {read...
106
  	writel_relaxed(next_desc_addr, XOR_NEXT_DESC(chan));
ff7b04796   Saeed Bishara   dmaengine: DMA en...
107
  }
ff7b04796   Saeed Bishara   dmaengine: DMA en...
108
109
  static void mv_chan_unmask_interrupts(struct mv_xor_chan *chan)
  {
5733c38ae   Thomas Petazzoni   mv_xor: use {read...
110
  	u32 val = readl_relaxed(XOR_INTR_MASK(chan));
ff7b04796   Saeed Bishara   dmaengine: DMA en...
111
  	val |= XOR_INTR_MASK_VALUE << (chan->idx * 16);
5733c38ae   Thomas Petazzoni   mv_xor: use {read...
112
  	writel_relaxed(val, XOR_INTR_MASK(chan));
ff7b04796   Saeed Bishara   dmaengine: DMA en...
113
114
115
116
  }
  
  static u32 mv_chan_get_intr_cause(struct mv_xor_chan *chan)
  {
5733c38ae   Thomas Petazzoni   mv_xor: use {read...
117
  	u32 intr_cause = readl_relaxed(XOR_INTR_CAUSE(chan));
ff7b04796   Saeed Bishara   dmaengine: DMA en...
118
119
120
  	intr_cause = (intr_cause >> (chan->idx * 16)) & 0xFFFF;
  	return intr_cause;
  }
0951e728f   Maxime Ripard   dmaengine: mv_xor...
121
  static void mv_chan_clear_eoc_cause(struct mv_xor_chan *chan)
ff7b04796   Saeed Bishara   dmaengine: DMA en...
122
  {
ba87d1372   Lior Amsalem   dma: mv_xor: Redu...
123
124
125
126
  	u32 val;
  
  	val = XOR_INT_END_OF_DESC | XOR_INT_END_OF_CHAIN | XOR_INT_STOPPED;
  	val = ~(val << (chan->idx * 16));
c98c17813   Thomas Petazzoni   dma: mv_xor: intr...
127
128
  	dev_dbg(mv_chan_to_devp(chan), "%s, val 0x%08x
  ", __func__, val);
5733c38ae   Thomas Petazzoni   mv_xor: use {read...
129
  	writel_relaxed(val, XOR_INTR_CAUSE(chan));
ff7b04796   Saeed Bishara   dmaengine: DMA en...
130
  }
0951e728f   Maxime Ripard   dmaengine: mv_xor...
131
  static void mv_chan_clear_err_status(struct mv_xor_chan *chan)
ff7b04796   Saeed Bishara   dmaengine: DMA en...
132
133
  {
  	u32 val = 0xFFFF0000 >> (chan->idx * 16);
5733c38ae   Thomas Petazzoni   mv_xor: use {read...
134
  	writel_relaxed(val, XOR_INTR_CAUSE(chan));
ff7b04796   Saeed Bishara   dmaengine: DMA en...
135
  }
0951e728f   Maxime Ripard   dmaengine: mv_xor...
136
  static void mv_chan_set_mode(struct mv_xor_chan *chan,
81aafb3e0   Thomas Petazzoni   dmaengine: mv_xor...
137
  			     u32 op_mode)
ff7b04796   Saeed Bishara   dmaengine: DMA en...
138
  {
5733c38ae   Thomas Petazzoni   mv_xor: use {read...
139
  	u32 config = readl_relaxed(XOR_CONFIG(chan));
ff7b04796   Saeed Bishara   dmaengine: DMA en...
140

6f166312c   Lior Amsalem   dmaengine: mv_xor...
141
142
  	config &= ~0x7;
  	config |= op_mode;
e03bc654f   Thomas Petazzoni   mv_xor: support b...
143
144
145
146
147
  #if defined(__BIG_ENDIAN)
  	config |= XOR_DESCRIPTOR_SWAP;
  #else
  	config &= ~XOR_DESCRIPTOR_SWAP;
  #endif
5733c38ae   Thomas Petazzoni   mv_xor: use {read...
148
  	writel_relaxed(config, XOR_CONFIG(chan));
ff7b04796   Saeed Bishara   dmaengine: DMA en...
149
150
151
152
  }
  
  static void mv_chan_activate(struct mv_xor_chan *chan)
  {
c98c17813   Thomas Petazzoni   dma: mv_xor: intr...
153
154
  	dev_dbg(mv_chan_to_devp(chan), " activate chan.
  ");
5a9a55bf9   Ezequiel Garcia   dma: mv_xor: Flus...
155
156
157
  
  	/* writel ensures all descriptors are flushed before activation */
  	writel(BIT(0), XOR_ACTIVATION(chan));
ff7b04796   Saeed Bishara   dmaengine: DMA en...
158
159
160
161
  }
  
  static char mv_chan_is_busy(struct mv_xor_chan *chan)
  {
5733c38ae   Thomas Petazzoni   mv_xor: use {read...
162
  	u32 state = readl_relaxed(XOR_ACTIVATION(chan));
ff7b04796   Saeed Bishara   dmaengine: DMA en...
163
164
165
166
167
  
  	state = (state >> 4) & 0x3;
  
  	return (state == 1) ? 1 : 0;
  }
ff7b04796   Saeed Bishara   dmaengine: DMA en...
168
  /*
0951e728f   Maxime Ripard   dmaengine: mv_xor...
169
170
   * mv_chan_start_new_chain - program the engine to operate on new
   * chain headed by sw_desc
ff7b04796   Saeed Bishara   dmaengine: DMA en...
171
172
   * Caller must hold &mv_chan->lock while calling this function
   */
0951e728f   Maxime Ripard   dmaengine: mv_xor...
173
174
  static void mv_chan_start_new_chain(struct mv_xor_chan *mv_chan,
  				    struct mv_xor_desc_slot *sw_desc)
ff7b04796   Saeed Bishara   dmaengine: DMA en...
175
  {
c98c17813   Thomas Petazzoni   dma: mv_xor: intr...
176
177
  	dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: sw_desc %p
  ",
ff7b04796   Saeed Bishara   dmaengine: DMA en...
178
  		__func__, __LINE__, sw_desc);
ff7b04796   Saeed Bishara   dmaengine: DMA en...
179

48a9db462   Bartlomiej Zolnierkiewicz   drivers/dma: remo...
180
181
  	/* set the hardware chain */
  	mv_chan_set_next_descriptor(mv_chan, sw_desc->async_tx.phys);
dfc97661b   Lior Amsalem   dma: mv_xor: Remo...
182
  	mv_chan->pending++;
98817b995   Thomas Petazzoni   dma: mv_xor: in m...
183
  	mv_xor_issue_pending(&mv_chan->dmachan);
ff7b04796   Saeed Bishara   dmaengine: DMA en...
184
185
186
  }
  
  static dma_cookie_t
0951e728f   Maxime Ripard   dmaengine: mv_xor...
187
188
189
  mv_desc_run_tx_complete_actions(struct mv_xor_desc_slot *desc,
  				struct mv_xor_chan *mv_chan,
  				dma_cookie_t cookie)
ff7b04796   Saeed Bishara   dmaengine: DMA en...
190
191
192
193
194
  {
  	BUG_ON(desc->async_tx.cookie < 0);
  
  	if (desc->async_tx.cookie > 0) {
  		cookie = desc->async_tx.cookie;
8058e2580   Dave Jiang   dmaengine: mv_xor...
195
  		dma_descriptor_unmap(&desc->async_tx);
ff7b04796   Saeed Bishara   dmaengine: DMA en...
196
197
198
  		/* call the callback (must not sleep or submit new
  		 * operations to this channel)
  		 */
ee7681a48   Dave Jiang   dmaengine: mv_xor...
199
  		dmaengine_desc_get_callback_invoke(&desc->async_tx, NULL);
ff7b04796   Saeed Bishara   dmaengine: DMA en...
200
201
202
  	}
  
  	/* run dependent operations */
07f2211e4   Dan Williams   dmaengine: remove...
203
  	dma_run_dependencies(&desc->async_tx);
ff7b04796   Saeed Bishara   dmaengine: DMA en...
204
205
206
207
208
  
  	return cookie;
  }
  
  static int
0951e728f   Maxime Ripard   dmaengine: mv_xor...
209
  mv_chan_clean_completed_slots(struct mv_xor_chan *mv_chan)
ff7b04796   Saeed Bishara   dmaengine: DMA en...
210
211
  {
  	struct mv_xor_desc_slot *iter, *_iter;
c98c17813   Thomas Petazzoni   dma: mv_xor: intr...
212
213
  	dev_dbg(mv_chan_to_devp(mv_chan), "%s %d
  ", __func__, __LINE__);
ff7b04796   Saeed Bishara   dmaengine: DMA en...
214
  	list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
fbea28a2a   Lior Amsalem   dmaengine: mv_xor...
215
  				 node) {
ff7b04796   Saeed Bishara   dmaengine: DMA en...
216

c5db858bd   Stefan Roese   dmaengine: mv_xor...
217
  		if (async_tx_test_ack(&iter->async_tx)) {
fbea28a2a   Lior Amsalem   dmaengine: mv_xor...
218
  			list_move_tail(&iter->node, &mv_chan->free_slots);
c5db858bd   Stefan Roese   dmaengine: mv_xor...
219
220
221
222
223
  			if (!list_empty(&iter->sg_tx_list)) {
  				list_splice_tail_init(&iter->sg_tx_list,
  							&mv_chan->free_slots);
  			}
  		}
ff7b04796   Saeed Bishara   dmaengine: DMA en...
224
225
226
227
228
  	}
  	return 0;
  }
  
  static int
0951e728f   Maxime Ripard   dmaengine: mv_xor...
229
230
  mv_desc_clean_slot(struct mv_xor_desc_slot *desc,
  		   struct mv_xor_chan *mv_chan)
ff7b04796   Saeed Bishara   dmaengine: DMA en...
231
  {
c98c17813   Thomas Petazzoni   dma: mv_xor: intr...
232
233
  	dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: desc %p flags %d
  ",
ff7b04796   Saeed Bishara   dmaengine: DMA en...
234
  		__func__, __LINE__, desc, desc->async_tx.flags);
fbea28a2a   Lior Amsalem   dmaengine: mv_xor...
235

ff7b04796   Saeed Bishara   dmaengine: DMA en...
236
237
238
  	/* the client is allowed to attach dependent operations
  	 * until 'ack' is set
  	 */
c5db858bd   Stefan Roese   dmaengine: mv_xor...
239
  	if (!async_tx_test_ack(&desc->async_tx)) {
ff7b04796   Saeed Bishara   dmaengine: DMA en...
240
  		/* move this slot to the completed_slots */
fbea28a2a   Lior Amsalem   dmaengine: mv_xor...
241
  		list_move_tail(&desc->node, &mv_chan->completed_slots);
c5db858bd   Stefan Roese   dmaengine: mv_xor...
242
243
244
245
246
  		if (!list_empty(&desc->sg_tx_list)) {
  			list_splice_tail_init(&desc->sg_tx_list,
  					      &mv_chan->completed_slots);
  		}
  	} else {
fbea28a2a   Lior Amsalem   dmaengine: mv_xor...
247
  		list_move_tail(&desc->node, &mv_chan->free_slots);
c5db858bd   Stefan Roese   dmaengine: mv_xor...
248
249
250
251
252
  		if (!list_empty(&desc->sg_tx_list)) {
  			list_splice_tail_init(&desc->sg_tx_list,
  					      &mv_chan->free_slots);
  		}
  	}
ff7b04796   Saeed Bishara   dmaengine: DMA en...
253

ff7b04796   Saeed Bishara   dmaengine: DMA en...
254
255
  	return 0;
  }
fbeec99ad   Ezequiel Garcia   dma: mv_xor: Rena...
256
  /* This function must be called with the mv_xor_chan spinlock held */
0951e728f   Maxime Ripard   dmaengine: mv_xor...
257
  static void mv_chan_slot_cleanup(struct mv_xor_chan *mv_chan)
ff7b04796   Saeed Bishara   dmaengine: DMA en...
258
259
260
261
262
  {
  	struct mv_xor_desc_slot *iter, *_iter;
  	dma_cookie_t cookie = 0;
  	int busy = mv_chan_is_busy(mv_chan);
  	u32 current_desc = mv_chan_get_current_desc(mv_chan);
9136291f1   Lior Amsalem   dmaengine: mv_xor...
263
264
  	int current_cleaned = 0;
  	struct mv_xor_desc *hw_desc;
ff7b04796   Saeed Bishara   dmaengine: DMA en...
265

c98c17813   Thomas Petazzoni   dma: mv_xor: intr...
266
267
268
269
  	dev_dbg(mv_chan_to_devp(mv_chan), "%s %d
  ", __func__, __LINE__);
  	dev_dbg(mv_chan_to_devp(mv_chan), "current_desc %x
  ", current_desc);
0951e728f   Maxime Ripard   dmaengine: mv_xor...
270
  	mv_chan_clean_completed_slots(mv_chan);
ff7b04796   Saeed Bishara   dmaengine: DMA en...
271
272
273
274
275
276
  
  	/* free completed slots from the chain starting with
  	 * the oldest descriptor
  	 */
  
  	list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
fbea28a2a   Lior Amsalem   dmaengine: mv_xor...
277
  				 node) {
ff7b04796   Saeed Bishara   dmaengine: DMA en...
278

9136291f1   Lior Amsalem   dmaengine: mv_xor...
279
280
281
  		/* clean finished descriptors */
  		hw_desc = iter->hw_desc;
  		if (hw_desc->status & XOR_DESC_SUCCESS) {
0951e728f   Maxime Ripard   dmaengine: mv_xor...
282
283
  			cookie = mv_desc_run_tx_complete_actions(iter, mv_chan,
  								 cookie);
ff7b04796   Saeed Bishara   dmaengine: DMA en...
284

9136291f1   Lior Amsalem   dmaengine: mv_xor...
285
  			/* done processing desc, clean slot */
0951e728f   Maxime Ripard   dmaengine: mv_xor...
286
  			mv_desc_clean_slot(iter, mv_chan);
9136291f1   Lior Amsalem   dmaengine: mv_xor...
287
288
289
290
291
292
293
294
295
  
  			/* break if we did cleaned the current */
  			if (iter->async_tx.phys == current_desc) {
  				current_cleaned = 1;
  				break;
  			}
  		} else {
  			if (iter->async_tx.phys == current_desc) {
  				current_cleaned = 0;
ff7b04796   Saeed Bishara   dmaengine: DMA en...
296
  				break;
9136291f1   Lior Amsalem   dmaengine: mv_xor...
297
  			}
ff7b04796   Saeed Bishara   dmaengine: DMA en...
298
  		}
ff7b04796   Saeed Bishara   dmaengine: DMA en...
299
300
301
  	}
  
  	if ((busy == 0) && !list_empty(&mv_chan->chain)) {
9136291f1   Lior Amsalem   dmaengine: mv_xor...
302
303
304
305
306
307
308
  		if (current_cleaned) {
  			/*
  			 * current descriptor cleaned and removed, run
  			 * from list head
  			 */
  			iter = list_entry(mv_chan->chain.next,
  					  struct mv_xor_desc_slot,
fbea28a2a   Lior Amsalem   dmaengine: mv_xor...
309
  					  node);
0951e728f   Maxime Ripard   dmaengine: mv_xor...
310
  			mv_chan_start_new_chain(mv_chan, iter);
9136291f1   Lior Amsalem   dmaengine: mv_xor...
311
  		} else {
fbea28a2a   Lior Amsalem   dmaengine: mv_xor...
312
  			if (!list_is_last(&iter->node, &mv_chan->chain)) {
9136291f1   Lior Amsalem   dmaengine: mv_xor...
313
314
315
316
  				/*
  				 * descriptors are still waiting after
  				 * current, trigger them
  				 */
fbea28a2a   Lior Amsalem   dmaengine: mv_xor...
317
  				iter = list_entry(iter->node.next,
9136291f1   Lior Amsalem   dmaengine: mv_xor...
318
  						  struct mv_xor_desc_slot,
fbea28a2a   Lior Amsalem   dmaengine: mv_xor...
319
  						  node);
0951e728f   Maxime Ripard   dmaengine: mv_xor...
320
  				mv_chan_start_new_chain(mv_chan, iter);
9136291f1   Lior Amsalem   dmaengine: mv_xor...
321
322
323
324
325
326
327
328
  			} else {
  				/*
  				 * some descriptors are still waiting
  				 * to be cleaned
  				 */
  				tasklet_schedule(&mv_chan->irq_tasklet);
  			}
  		}
ff7b04796   Saeed Bishara   dmaengine: DMA en...
329
330
331
  	}
  
  	if (cookie > 0)
98817b995   Thomas Petazzoni   dma: mv_xor: in m...
332
  		mv_chan->dmachan.completed_cookie = cookie;
ff7b04796   Saeed Bishara   dmaengine: DMA en...
333
  }
ff7b04796   Saeed Bishara   dmaengine: DMA en...
334
335
336
  static void mv_xor_tasklet(unsigned long data)
  {
  	struct mv_xor_chan *chan = (struct mv_xor_chan *) data;
e43147acb   Ezequiel Garcia   dma: mv_xor: Remo...
337
338
  
  	spin_lock_bh(&chan->lock);
0951e728f   Maxime Ripard   dmaengine: mv_xor...
339
  	mv_chan_slot_cleanup(chan);
e43147acb   Ezequiel Garcia   dma: mv_xor: Remo...
340
  	spin_unlock_bh(&chan->lock);
ff7b04796   Saeed Bishara   dmaengine: DMA en...
341
342
343
  }
  
  static struct mv_xor_desc_slot *
0951e728f   Maxime Ripard   dmaengine: mv_xor...
344
  mv_chan_alloc_slot(struct mv_xor_chan *mv_chan)
ff7b04796   Saeed Bishara   dmaengine: DMA en...
345
  {
fbea28a2a   Lior Amsalem   dmaengine: mv_xor...
346
  	struct mv_xor_desc_slot *iter;
ff7b04796   Saeed Bishara   dmaengine: DMA en...
347

fbea28a2a   Lior Amsalem   dmaengine: mv_xor...
348
349
350
351
352
353
354
355
356
357
  	spin_lock_bh(&mv_chan->lock);
  
  	if (!list_empty(&mv_chan->free_slots)) {
  		iter = list_first_entry(&mv_chan->free_slots,
  					struct mv_xor_desc_slot,
  					node);
  
  		list_move_tail(&iter->node, &mv_chan->allocated_slots);
  
  		spin_unlock_bh(&mv_chan->lock);
ff7b04796   Saeed Bishara   dmaengine: DMA en...
358

dfc97661b   Lior Amsalem   dma: mv_xor: Remo...
359
360
  		/* pre-ack descriptor */
  		async_tx_ack(&iter->async_tx);
dfc97661b   Lior Amsalem   dma: mv_xor: Remo...
361
  		iter->async_tx.cookie = -EBUSY;
dfc97661b   Lior Amsalem   dma: mv_xor: Remo...
362
363
  
  		return iter;
ff7b04796   Saeed Bishara   dmaengine: DMA en...
364
  	}
fbea28a2a   Lior Amsalem   dmaengine: mv_xor...
365
366
  
  	spin_unlock_bh(&mv_chan->lock);
ff7b04796   Saeed Bishara   dmaengine: DMA en...
367
368
369
370
371
372
  
  	/* try to free some slots if the allocation fails */
  	tasklet_schedule(&mv_chan->irq_tasklet);
  
  	return NULL;
  }
ff7b04796   Saeed Bishara   dmaengine: DMA en...
373
374
375
376
377
378
  /************************ DMA engine API functions ****************************/
  static dma_cookie_t
  mv_xor_tx_submit(struct dma_async_tx_descriptor *tx)
  {
  	struct mv_xor_desc_slot *sw_desc = to_mv_xor_slot(tx);
  	struct mv_xor_chan *mv_chan = to_mv_xor_chan(tx->chan);
dfc97661b   Lior Amsalem   dma: mv_xor: Remo...
379
  	struct mv_xor_desc_slot *old_chain_tail;
ff7b04796   Saeed Bishara   dmaengine: DMA en...
380
381
  	dma_cookie_t cookie;
  	int new_hw_chain = 1;
c98c17813   Thomas Petazzoni   dma: mv_xor: intr...
382
  	dev_dbg(mv_chan_to_devp(mv_chan),
ff7b04796   Saeed Bishara   dmaengine: DMA en...
383
384
385
  		"%s sw_desc %p: async_tx %p
  ",
  		__func__, sw_desc, &sw_desc->async_tx);
ff7b04796   Saeed Bishara   dmaengine: DMA en...
386
  	spin_lock_bh(&mv_chan->lock);
884485e1f   Russell King - ARM Linux   dmaengine: consol...
387
  	cookie = dma_cookie_assign(tx);
ff7b04796   Saeed Bishara   dmaengine: DMA en...
388
389
  
  	if (list_empty(&mv_chan->chain))
fbea28a2a   Lior Amsalem   dmaengine: mv_xor...
390
  		list_move_tail(&sw_desc->node, &mv_chan->chain);
ff7b04796   Saeed Bishara   dmaengine: DMA en...
391
392
393
394
395
  	else {
  		new_hw_chain = 0;
  
  		old_chain_tail = list_entry(mv_chan->chain.prev,
  					    struct mv_xor_desc_slot,
fbea28a2a   Lior Amsalem   dmaengine: mv_xor...
396
397
  					    node);
  		list_move_tail(&sw_desc->node, &mv_chan->chain);
ff7b04796   Saeed Bishara   dmaengine: DMA en...
398

31fd8f5b8   Olof Johansson   dma: mv_xor: Sile...
399
400
401
  		dev_dbg(mv_chan_to_devp(mv_chan), "Append to last desc %pa
  ",
  			&old_chain_tail->async_tx.phys);
ff7b04796   Saeed Bishara   dmaengine: DMA en...
402
403
  
  		/* fix up the hardware chain */
dfc97661b   Lior Amsalem   dma: mv_xor: Remo...
404
  		mv_desc_set_next_desc(old_chain_tail, sw_desc->async_tx.phys);
ff7b04796   Saeed Bishara   dmaengine: DMA en...
405
406
407
408
409
410
411
412
413
414
415
416
417
418
  
  		/* if the channel is not busy */
  		if (!mv_chan_is_busy(mv_chan)) {
  			u32 current_desc = mv_chan_get_current_desc(mv_chan);
  			/*
  			 * and the curren desc is the end of the chain before
  			 * the append, then we need to start the channel
  			 */
  			if (current_desc == old_chain_tail->async_tx.phys)
  				new_hw_chain = 1;
  		}
  	}
  
  	if (new_hw_chain)
0951e728f   Maxime Ripard   dmaengine: mv_xor...
419
  		mv_chan_start_new_chain(mv_chan, sw_desc);
ff7b04796   Saeed Bishara   dmaengine: DMA en...
420

ff7b04796   Saeed Bishara   dmaengine: DMA en...
421
422
423
424
425
426
  	spin_unlock_bh(&mv_chan->lock);
  
  	return cookie;
  }
  
  /* returns the number of allocated descriptors */
aa1e6f1a3   Dan Williams   dmaengine: kill s...
427
  static int mv_xor_alloc_chan_resources(struct dma_chan *chan)
ff7b04796   Saeed Bishara   dmaengine: DMA en...
428
  {
31fd8f5b8   Olof Johansson   dma: mv_xor: Sile...
429
430
  	void *virt_desc;
  	dma_addr_t dma_desc;
ff7b04796   Saeed Bishara   dmaengine: DMA en...
431
432
433
  	int idx;
  	struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
  	struct mv_xor_desc_slot *slot = NULL;
b503fa019   Thomas Petazzoni   dma: mv_xor: remo...
434
  	int num_descs_in_pool = MV_XOR_POOL_SIZE/MV_XOR_SLOT_SIZE;
ff7b04796   Saeed Bishara   dmaengine: DMA en...
435
436
437
438
439
440
  
  	/* Allocate descriptor slots */
  	idx = mv_chan->slots_allocated;
  	while (idx < num_descs_in_pool) {
  		slot = kzalloc(sizeof(*slot), GFP_KERNEL);
  		if (!slot) {
b8291ddee   Ezequiel Garcia   dma: mv_xor: Repl...
441
442
443
  			dev_info(mv_chan_to_devp(mv_chan),
  				 "channel only initialized %d descriptor slots",
  				 idx);
ff7b04796   Saeed Bishara   dmaengine: DMA en...
444
445
  			break;
  		}
31fd8f5b8   Olof Johansson   dma: mv_xor: Sile...
446
447
  		virt_desc = mv_chan->dma_desc_pool_virt;
  		slot->hw_desc = virt_desc + idx * MV_XOR_SLOT_SIZE;
ff7b04796   Saeed Bishara   dmaengine: DMA en...
448
449
450
  
  		dma_async_tx_descriptor_init(&slot->async_tx, chan);
  		slot->async_tx.tx_submit = mv_xor_tx_submit;
fbea28a2a   Lior Amsalem   dmaengine: mv_xor...
451
  		INIT_LIST_HEAD(&slot->node);
c5db858bd   Stefan Roese   dmaengine: mv_xor...
452
  		INIT_LIST_HEAD(&slot->sg_tx_list);
31fd8f5b8   Olof Johansson   dma: mv_xor: Sile...
453
454
  		dma_desc = mv_chan->dma_desc_pool;
  		slot->async_tx.phys = dma_desc + idx * MV_XOR_SLOT_SIZE;
ff7b04796   Saeed Bishara   dmaengine: DMA en...
455
456
457
458
  		slot->idx = idx++;
  
  		spin_lock_bh(&mv_chan->lock);
  		mv_chan->slots_allocated = idx;
fbea28a2a   Lior Amsalem   dmaengine: mv_xor...
459
  		list_add_tail(&slot->node, &mv_chan->free_slots);
ff7b04796   Saeed Bishara   dmaengine: DMA en...
460
461
  		spin_unlock_bh(&mv_chan->lock);
  	}
c98c17813   Thomas Petazzoni   dma: mv_xor: intr...
462
  	dev_dbg(mv_chan_to_devp(mv_chan),
fbea28a2a   Lior Amsalem   dmaengine: mv_xor...
463
464
465
  		"allocated %d descriptor slots
  ",
  		mv_chan->slots_allocated);
ff7b04796   Saeed Bishara   dmaengine: DMA en...
466
467
468
  
  	return mv_chan->slots_allocated ? : -ENOMEM;
  }
77ff7a706   Stefan Roese   dmaengine: mv_xor...
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
  /*
   * Check if source or destination is an PCIe/IO address (non-SDRAM) and add
   * a new MBus window if necessary. Use a cache for these check so that
   * the MMIO mapped registers don't have to be accessed for this check
   * to speed up this process.
   */
  static int mv_xor_add_io_win(struct mv_xor_chan *mv_chan, u32 addr)
  {
  	struct mv_xor_device *xordev = mv_chan->xordev;
  	void __iomem *base = mv_chan->mmr_high_base;
  	u32 win_enable;
  	u32 size;
  	u8 target, attr;
  	int ret;
  	int i;
  
  	/* Nothing needs to get done for the Armada 3700 */
  	if (xordev->xor_type == XOR_ARMADA_37XX)
  		return 0;
  
  	/*
  	 * Loop over the cached windows to check, if the requested area
  	 * is already mapped. If this the case, nothing needs to be done
  	 * and we can return.
  	 */
  	for (i = 0; i < WINDOW_COUNT; i++) {
  		if (addr >= xordev->win_start[i] &&
  		    addr <= xordev->win_end[i]) {
  			/* Window is already mapped */
  			return 0;
  		}
  	}
  
  	/*
  	 * The window is not mapped, so we need to create the new mapping
  	 */
  
  	/* If no IO window is found that addr has to be located in SDRAM */
  	ret = mvebu_mbus_get_io_win_info(addr, &size, &target, &attr);
  	if (ret < 0)
  		return 0;
  
  	/*
  	 * Mask the base addr 'addr' according to 'size' read back from the
  	 * MBus window. Otherwise we might end up with an address located
  	 * somewhere in the middle of this area here.
  	 */
  	size -= 1;
  	addr &= ~size;
  
  	/*
  	 * Reading one of both enabled register is enough, as they are always
  	 * programmed to the identical values
  	 */
  	win_enable = readl(base + WINDOW_BAR_ENABLE(0));
  
  	/* Set 'i' to the first free window to write the new values to */
  	i = ffs(~win_enable) - 1;
  	if (i >= WINDOW_COUNT)
  		return -ENOMEM;
  
  	writel((addr & 0xffff0000) | (attr << 8) | target,
  	       base + WINDOW_BASE(i));
  	writel(size & 0xffff0000, base + WINDOW_SIZE(i));
  
  	/* Fill the caching variables for later use */
  	xordev->win_start[i] = addr;
  	xordev->win_end[i] = addr + size;
  
  	win_enable |= (1 << i);
  	win_enable |= 3 << (16 + (2 * i));
  	writel(win_enable, base + WINDOW_BAR_ENABLE(0));
  	writel(win_enable, base + WINDOW_BAR_ENABLE(1));
  
  	return 0;
  }
ff7b04796   Saeed Bishara   dmaengine: DMA en...
545
  static struct dma_async_tx_descriptor *
ff7b04796   Saeed Bishara   dmaengine: DMA en...
546
547
548
549
  mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
  		    unsigned int src_cnt, size_t len, unsigned long flags)
  {
  	struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
dfc97661b   Lior Amsalem   dma: mv_xor: Remo...
550
  	struct mv_xor_desc_slot *sw_desc;
77ff7a706   Stefan Roese   dmaengine: mv_xor...
551
  	int ret;
ff7b04796   Saeed Bishara   dmaengine: DMA en...
552
553
554
  
  	if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
  		return NULL;
7912d3000   Coly Li   dma: use BUG_ON c...
555
  	BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
ff7b04796   Saeed Bishara   dmaengine: DMA en...
556

c98c17813   Thomas Petazzoni   dma: mv_xor: intr...
557
  	dev_dbg(mv_chan_to_devp(mv_chan),
bc822e125   Gregory CLEMENT   dmaengine: mv_xor...
558
559
  		"%s src_cnt: %d len: %zu dest %pad flags: %ld
  ",
31fd8f5b8   Olof Johansson   dma: mv_xor: Sile...
560
  		__func__, src_cnt, len, &dest, flags);
ff7b04796   Saeed Bishara   dmaengine: DMA en...
561

77ff7a706   Stefan Roese   dmaengine: mv_xor...
562
563
564
565
  	/* Check if a new window needs to get added for 'dest' */
  	ret = mv_xor_add_io_win(mv_chan, dest);
  	if (ret)
  		return NULL;
0951e728f   Maxime Ripard   dmaengine: mv_xor...
566
  	sw_desc = mv_chan_alloc_slot(mv_chan);
ff7b04796   Saeed Bishara   dmaengine: DMA en...
567
568
569
  	if (sw_desc) {
  		sw_desc->type = DMA_XOR;
  		sw_desc->async_tx.flags = flags;
ba87d1372   Lior Amsalem   dma: mv_xor: Redu...
570
  		mv_desc_init(sw_desc, dest, len, flags);
6f166312c   Lior Amsalem   dmaengine: mv_xor...
571
572
  		if (mv_chan->op_in_desc == XOR_MODE_IN_DESC)
  			mv_desc_set_mode(sw_desc);
77ff7a706   Stefan Roese   dmaengine: mv_xor...
573
574
575
576
577
  		while (src_cnt--) {
  			/* Check if a new window needs to get added for 'src' */
  			ret = mv_xor_add_io_win(mv_chan, src[src_cnt]);
  			if (ret)
  				return NULL;
dfc97661b   Lior Amsalem   dma: mv_xor: Remo...
578
  			mv_desc_set_src_addr(sw_desc, src_cnt, src[src_cnt]);
77ff7a706   Stefan Roese   dmaengine: mv_xor...
579
  		}
ff7b04796   Saeed Bishara   dmaengine: DMA en...
580
  	}
fbea28a2a   Lior Amsalem   dmaengine: mv_xor...
581

c98c17813   Thomas Petazzoni   dma: mv_xor: intr...
582
  	dev_dbg(mv_chan_to_devp(mv_chan),
ff7b04796   Saeed Bishara   dmaengine: DMA en...
583
584
585
586
587
  		"%s sw_desc %p async_tx %p 
  ",
  		__func__, sw_desc, &sw_desc->async_tx);
  	return sw_desc ? &sw_desc->async_tx : NULL;
  }
3e4f52e2d   Lior Amsalem   dma: mv_xor: Simp...
588
589
590
591
592
593
594
595
596
597
  static struct dma_async_tx_descriptor *
  mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
  		size_t len, unsigned long flags)
  {
  	/*
  	 * A MEMCPY operation is identical to an XOR operation with only
  	 * a single source address.
  	 */
  	return mv_xor_prep_dma_xor(chan, dest, &src, 1, len, flags);
  }
22843545b   Lior Amsalem   dma: mv_xor: Add ...
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
  static struct dma_async_tx_descriptor *
  mv_xor_prep_dma_interrupt(struct dma_chan *chan, unsigned long flags)
  {
  	struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
  	dma_addr_t src, dest;
  	size_t len;
  
  	src = mv_chan->dummy_src_addr;
  	dest = mv_chan->dummy_dst_addr;
  	len = MV_XOR_MIN_BYTE_COUNT;
  
  	/*
  	 * We implement the DMA_INTERRUPT operation as a minimum sized
  	 * XOR operation with a single dummy source address.
  	 */
  	return mv_xor_prep_dma_xor(chan, dest, &src, 1, len, flags);
  }
ff7b04796   Saeed Bishara   dmaengine: DMA en...
615
616
617
618
619
  static void mv_xor_free_chan_resources(struct dma_chan *chan)
  {
  	struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
  	struct mv_xor_desc_slot *iter, *_iter;
  	int in_use_descs = 0;
ff7b04796   Saeed Bishara   dmaengine: DMA en...
620
  	spin_lock_bh(&mv_chan->lock);
e43147acb   Ezequiel Garcia   dma: mv_xor: Remo...
621

0951e728f   Maxime Ripard   dmaengine: mv_xor...
622
  	mv_chan_slot_cleanup(mv_chan);
ff7b04796   Saeed Bishara   dmaengine: DMA en...
623

ff7b04796   Saeed Bishara   dmaengine: DMA en...
624
  	list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
fbea28a2a   Lior Amsalem   dmaengine: mv_xor...
625
  					node) {
ff7b04796   Saeed Bishara   dmaengine: DMA en...
626
  		in_use_descs++;
fbea28a2a   Lior Amsalem   dmaengine: mv_xor...
627
  		list_move_tail(&iter->node, &mv_chan->free_slots);
ff7b04796   Saeed Bishara   dmaengine: DMA en...
628
629
  	}
  	list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
fbea28a2a   Lior Amsalem   dmaengine: mv_xor...
630
631
632
633
634
635
  				 node) {
  		in_use_descs++;
  		list_move_tail(&iter->node, &mv_chan->free_slots);
  	}
  	list_for_each_entry_safe(iter, _iter, &mv_chan->allocated_slots,
  				 node) {
ff7b04796   Saeed Bishara   dmaengine: DMA en...
636
  		in_use_descs++;
fbea28a2a   Lior Amsalem   dmaengine: mv_xor...
637
  		list_move_tail(&iter->node, &mv_chan->free_slots);
ff7b04796   Saeed Bishara   dmaengine: DMA en...
638
639
  	}
  	list_for_each_entry_safe_reverse(
fbea28a2a   Lior Amsalem   dmaengine: mv_xor...
640
641
  		iter, _iter, &mv_chan->free_slots, node) {
  		list_del(&iter->node);
ff7b04796   Saeed Bishara   dmaengine: DMA en...
642
643
644
  		kfree(iter);
  		mv_chan->slots_allocated--;
  	}
ff7b04796   Saeed Bishara   dmaengine: DMA en...
645

c98c17813   Thomas Petazzoni   dma: mv_xor: intr...
646
647
  	dev_dbg(mv_chan_to_devp(mv_chan), "%s slots_allocated %d
  ",
ff7b04796   Saeed Bishara   dmaengine: DMA en...
648
649
650
651
  		__func__, mv_chan->slots_allocated);
  	spin_unlock_bh(&mv_chan->lock);
  
  	if (in_use_descs)
c98c17813   Thomas Petazzoni   dma: mv_xor: intr...
652
  		dev_err(mv_chan_to_devp(mv_chan),
ff7b04796   Saeed Bishara   dmaengine: DMA en...
653
654
655
656
657
  			"freeing %d in use descriptors!
  ", in_use_descs);
  }
  
  /**
079344818   Linus Walleij   DMAENGINE: generi...
658
   * mv_xor_status - poll the status of an XOR transaction
ff7b04796   Saeed Bishara   dmaengine: DMA en...
659
660
   * @chan: XOR channel handle
   * @cookie: XOR transaction identifier
079344818   Linus Walleij   DMAENGINE: generi...
661
   * @txstate: XOR transactions state holder (or NULL)
ff7b04796   Saeed Bishara   dmaengine: DMA en...
662
   */
079344818   Linus Walleij   DMAENGINE: generi...
663
  static enum dma_status mv_xor_status(struct dma_chan *chan,
ff7b04796   Saeed Bishara   dmaengine: DMA en...
664
  					  dma_cookie_t cookie,
079344818   Linus Walleij   DMAENGINE: generi...
665
  					  struct dma_tx_state *txstate)
ff7b04796   Saeed Bishara   dmaengine: DMA en...
666
667
  {
  	struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
ff7b04796   Saeed Bishara   dmaengine: DMA en...
668
  	enum dma_status ret;
96a2af41c   Russell King - ARM Linux   dmaengine: consol...
669
  	ret = dma_cookie_status(chan, cookie, txstate);
890766d27   Ezequiel Garcia   dma: mv_xor: Remo...
670
  	if (ret == DMA_COMPLETE)
ff7b04796   Saeed Bishara   dmaengine: DMA en...
671
  		return ret;
e43147acb   Ezequiel Garcia   dma: mv_xor: Remo...
672
673
  
  	spin_lock_bh(&mv_chan->lock);
0951e728f   Maxime Ripard   dmaengine: mv_xor...
674
  	mv_chan_slot_cleanup(mv_chan);
e43147acb   Ezequiel Garcia   dma: mv_xor: Remo...
675
  	spin_unlock_bh(&mv_chan->lock);
ff7b04796   Saeed Bishara   dmaengine: DMA en...
676

96a2af41c   Russell King - ARM Linux   dmaengine: consol...
677
  	return dma_cookie_status(chan, cookie, txstate);
ff7b04796   Saeed Bishara   dmaengine: DMA en...
678
  }
0951e728f   Maxime Ripard   dmaengine: mv_xor...
679
  static void mv_chan_dump_regs(struct mv_xor_chan *chan)
ff7b04796   Saeed Bishara   dmaengine: DMA en...
680
681
  {
  	u32 val;
5733c38ae   Thomas Petazzoni   mv_xor: use {read...
682
  	val = readl_relaxed(XOR_CONFIG(chan));
1ba151cdf   Joe Perches   dma: Convert dev_...
683
684
  	dev_err(mv_chan_to_devp(chan), "config       0x%08x
  ", val);
ff7b04796   Saeed Bishara   dmaengine: DMA en...
685

5733c38ae   Thomas Petazzoni   mv_xor: use {read...
686
  	val = readl_relaxed(XOR_ACTIVATION(chan));
1ba151cdf   Joe Perches   dma: Convert dev_...
687
688
  	dev_err(mv_chan_to_devp(chan), "activation   0x%08x
  ", val);
ff7b04796   Saeed Bishara   dmaengine: DMA en...
689

5733c38ae   Thomas Petazzoni   mv_xor: use {read...
690
  	val = readl_relaxed(XOR_INTR_CAUSE(chan));
1ba151cdf   Joe Perches   dma: Convert dev_...
691
692
  	dev_err(mv_chan_to_devp(chan), "intr cause   0x%08x
  ", val);
ff7b04796   Saeed Bishara   dmaengine: DMA en...
693

5733c38ae   Thomas Petazzoni   mv_xor: use {read...
694
  	val = readl_relaxed(XOR_INTR_MASK(chan));
1ba151cdf   Joe Perches   dma: Convert dev_...
695
696
  	dev_err(mv_chan_to_devp(chan), "intr mask    0x%08x
  ", val);
ff7b04796   Saeed Bishara   dmaengine: DMA en...
697

5733c38ae   Thomas Petazzoni   mv_xor: use {read...
698
  	val = readl_relaxed(XOR_ERROR_CAUSE(chan));
1ba151cdf   Joe Perches   dma: Convert dev_...
699
700
  	dev_err(mv_chan_to_devp(chan), "error cause  0x%08x
  ", val);
ff7b04796   Saeed Bishara   dmaengine: DMA en...
701

5733c38ae   Thomas Petazzoni   mv_xor: use {read...
702
  	val = readl_relaxed(XOR_ERROR_ADDR(chan));
1ba151cdf   Joe Perches   dma: Convert dev_...
703
704
  	dev_err(mv_chan_to_devp(chan), "error addr   0x%08x
  ", val);
ff7b04796   Saeed Bishara   dmaengine: DMA en...
705
  }
0951e728f   Maxime Ripard   dmaengine: mv_xor...
706
707
  static void mv_chan_err_interrupt_handler(struct mv_xor_chan *chan,
  					  u32 intr_cause)
ff7b04796   Saeed Bishara   dmaengine: DMA en...
708
  {
0e7488ed0   Ezequiel Garcia   dma: mv_xor: Remo...
709
710
711
712
  	if (intr_cause & XOR_INT_ERR_DECODE) {
  		dev_dbg(mv_chan_to_devp(chan), "ignoring address decode error
  ");
  		return;
ff7b04796   Saeed Bishara   dmaengine: DMA en...
713
  	}
0e7488ed0   Ezequiel Garcia   dma: mv_xor: Remo...
714
715
  	dev_err(mv_chan_to_devp(chan), "error on chan %d. intr cause 0x%08x
  ",
a3fc74bc9   Thomas Petazzoni   dma: mv_xor: use ...
716
  		chan->idx, intr_cause);
ff7b04796   Saeed Bishara   dmaengine: DMA en...
717

0951e728f   Maxime Ripard   dmaengine: mv_xor...
718
  	mv_chan_dump_regs(chan);
0e7488ed0   Ezequiel Garcia   dma: mv_xor: Remo...
719
  	WARN_ON(1);
ff7b04796   Saeed Bishara   dmaengine: DMA en...
720
721
722
723
724
725
  }
  
  static irqreturn_t mv_xor_interrupt_handler(int irq, void *data)
  {
  	struct mv_xor_chan *chan = data;
  	u32 intr_cause = mv_chan_get_intr_cause(chan);
c98c17813   Thomas Petazzoni   dma: mv_xor: intr...
726
727
  	dev_dbg(mv_chan_to_devp(chan), "intr cause %x
  ", intr_cause);
ff7b04796   Saeed Bishara   dmaengine: DMA en...
728

0e7488ed0   Ezequiel Garcia   dma: mv_xor: Remo...
729
  	if (intr_cause & XOR_INTR_ERRORS)
0951e728f   Maxime Ripard   dmaengine: mv_xor...
730
  		mv_chan_err_interrupt_handler(chan, intr_cause);
ff7b04796   Saeed Bishara   dmaengine: DMA en...
731
732
  
  	tasklet_schedule(&chan->irq_tasklet);
0951e728f   Maxime Ripard   dmaengine: mv_xor...
733
  	mv_chan_clear_eoc_cause(chan);
ff7b04796   Saeed Bishara   dmaengine: DMA en...
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
  
  	return IRQ_HANDLED;
  }
  
  static void mv_xor_issue_pending(struct dma_chan *chan)
  {
  	struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
  
  	if (mv_chan->pending >= MV_XOR_THRESHOLD) {
  		mv_chan->pending = 0;
  		mv_chan_activate(mv_chan);
  	}
  }
  
  /*
   * Perform a transaction to verify the HW works.
   */
ff7b04796   Saeed Bishara   dmaengine: DMA en...
751

0951e728f   Maxime Ripard   dmaengine: mv_xor...
752
  static int mv_chan_memcpy_self_test(struct mv_xor_chan *mv_chan)
ff7b04796   Saeed Bishara   dmaengine: DMA en...
753
  {
b8c01d259   Ezequiel Garcia   dma: mv_xor: Add ...
754
  	int i, ret;
ff7b04796   Saeed Bishara   dmaengine: DMA en...
755
756
757
758
759
  	void *src, *dest;
  	dma_addr_t src_dma, dest_dma;
  	struct dma_chan *dma_chan;
  	dma_cookie_t cookie;
  	struct dma_async_tx_descriptor *tx;
d16695a75   Ezequiel Garcia   dma: mv_xor: Use ...
760
  	struct dmaengine_unmap_data *unmap;
ff7b04796   Saeed Bishara   dmaengine: DMA en...
761
  	int err = 0;
ff7b04796   Saeed Bishara   dmaengine: DMA en...
762

d16695a75   Ezequiel Garcia   dma: mv_xor: Use ...
763
  	src = kmalloc(sizeof(u8) * PAGE_SIZE, GFP_KERNEL);
ff7b04796   Saeed Bishara   dmaengine: DMA en...
764
765
  	if (!src)
  		return -ENOMEM;
d16695a75   Ezequiel Garcia   dma: mv_xor: Use ...
766
  	dest = kzalloc(sizeof(u8) * PAGE_SIZE, GFP_KERNEL);
ff7b04796   Saeed Bishara   dmaengine: DMA en...
767
768
769
770
771
772
  	if (!dest) {
  		kfree(src);
  		return -ENOMEM;
  	}
  
  	/* Fill in src buffer */
d16695a75   Ezequiel Garcia   dma: mv_xor: Use ...
773
  	for (i = 0; i < PAGE_SIZE; i++)
ff7b04796   Saeed Bishara   dmaengine: DMA en...
774
  		((u8 *) src)[i] = (u8)i;
275cc0c8b   Thomas Petazzoni   dma: mv_xor: use ...
775
  	dma_chan = &mv_chan->dmachan;
aa1e6f1a3   Dan Williams   dmaengine: kill s...
776
  	if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
ff7b04796   Saeed Bishara   dmaengine: DMA en...
777
778
779
  		err = -ENODEV;
  		goto out;
  	}
d16695a75   Ezequiel Garcia   dma: mv_xor: Use ...
780
781
782
783
784
  	unmap = dmaengine_get_unmap_data(dma_chan->device->dev, 2, GFP_KERNEL);
  	if (!unmap) {
  		err = -ENOMEM;
  		goto free_resources;
  	}
515646358   Stefan Roese   dmaengine: mv_xor...
785
  	src_dma = dma_map_page(dma_chan->device->dev, virt_to_page(src),
b70e52cac   Geliang Tang   dmaengine: mv_xor...
786
  			       offset_in_page(src), PAGE_SIZE,
515646358   Stefan Roese   dmaengine: mv_xor...
787
  			       DMA_TO_DEVICE);
d16695a75   Ezequiel Garcia   dma: mv_xor: Use ...
788
  	unmap->addr[0] = src_dma;
ff7b04796   Saeed Bishara   dmaengine: DMA en...
789

b8c01d259   Ezequiel Garcia   dma: mv_xor: Add ...
790
791
792
793
794
795
  	ret = dma_mapping_error(dma_chan->device->dev, src_dma);
  	if (ret) {
  		err = -ENOMEM;
  		goto free_resources;
  	}
  	unmap->to_cnt = 1;
515646358   Stefan Roese   dmaengine: mv_xor...
796
  	dest_dma = dma_map_page(dma_chan->device->dev, virt_to_page(dest),
b70e52cac   Geliang Tang   dmaengine: mv_xor...
797
  				offset_in_page(dest), PAGE_SIZE,
515646358   Stefan Roese   dmaengine: mv_xor...
798
  				DMA_FROM_DEVICE);
d16695a75   Ezequiel Garcia   dma: mv_xor: Use ...
799
  	unmap->addr[1] = dest_dma;
b8c01d259   Ezequiel Garcia   dma: mv_xor: Add ...
800
801
802
803
804
805
  	ret = dma_mapping_error(dma_chan->device->dev, dest_dma);
  	if (ret) {
  		err = -ENOMEM;
  		goto free_resources;
  	}
  	unmap->from_cnt = 1;
d16695a75   Ezequiel Garcia   dma: mv_xor: Use ...
806
  	unmap->len = PAGE_SIZE;
ff7b04796   Saeed Bishara   dmaengine: DMA en...
807
808
  
  	tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma,
d16695a75   Ezequiel Garcia   dma: mv_xor: Use ...
809
  				    PAGE_SIZE, 0);
b8c01d259   Ezequiel Garcia   dma: mv_xor: Add ...
810
811
812
813
814
815
816
  	if (!tx) {
  		dev_err(dma_chan->device->dev,
  			"Self-test cannot prepare operation, disabling
  ");
  		err = -ENODEV;
  		goto free_resources;
  	}
ff7b04796   Saeed Bishara   dmaengine: DMA en...
817
  	cookie = mv_xor_tx_submit(tx);
b8c01d259   Ezequiel Garcia   dma: mv_xor: Add ...
818
819
820
821
822
823
824
  	if (dma_submit_error(cookie)) {
  		dev_err(dma_chan->device->dev,
  			"Self-test submit error, disabling
  ");
  		err = -ENODEV;
  		goto free_resources;
  	}
ff7b04796   Saeed Bishara   dmaengine: DMA en...
825
826
827
  	mv_xor_issue_pending(dma_chan);
  	async_tx_ack(tx);
  	msleep(1);
079344818   Linus Walleij   DMAENGINE: generi...
828
  	if (mv_xor_status(dma_chan, cookie, NULL) !=
b3efb8fc9   Vinod Koul   dmaengine: mv_xor...
829
  	    DMA_COMPLETE) {
a3fc74bc9   Thomas Petazzoni   dma: mv_xor: use ...
830
831
832
  		dev_err(dma_chan->device->dev,
  			"Self-test copy timed out, disabling
  ");
ff7b04796   Saeed Bishara   dmaengine: DMA en...
833
834
835
  		err = -ENODEV;
  		goto free_resources;
  	}
c35064c4b   Thomas Petazzoni   dma: mv_xor: simp...
836
  	dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma,
d16695a75   Ezequiel Garcia   dma: mv_xor: Use ...
837
838
  				PAGE_SIZE, DMA_FROM_DEVICE);
  	if (memcmp(src, dest, PAGE_SIZE)) {
a3fc74bc9   Thomas Petazzoni   dma: mv_xor: use ...
839
840
841
  		dev_err(dma_chan->device->dev,
  			"Self-test copy failed compare, disabling
  ");
ff7b04796   Saeed Bishara   dmaengine: DMA en...
842
843
844
845
846
  		err = -ENODEV;
  		goto free_resources;
  	}
  
  free_resources:
d16695a75   Ezequiel Garcia   dma: mv_xor: Use ...
847
  	dmaengine_unmap_put(unmap);
ff7b04796   Saeed Bishara   dmaengine: DMA en...
848
849
850
851
852
853
854
855
  	mv_xor_free_chan_resources(dma_chan);
  out:
  	kfree(src);
  	kfree(dest);
  	return err;
  }
  
  #define MV_XOR_NUM_SRC_TEST 4 /* must be <= 15 */
463a1f8b3   Bill Pemberton   dma: remove use o...
856
  static int
0951e728f   Maxime Ripard   dmaengine: mv_xor...
857
  mv_chan_xor_self_test(struct mv_xor_chan *mv_chan)
ff7b04796   Saeed Bishara   dmaengine: DMA en...
858
  {
b8c01d259   Ezequiel Garcia   dma: mv_xor: Add ...
859
  	int i, src_idx, ret;
ff7b04796   Saeed Bishara   dmaengine: DMA en...
860
861
862
863
864
  	struct page *dest;
  	struct page *xor_srcs[MV_XOR_NUM_SRC_TEST];
  	dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST];
  	dma_addr_t dest_dma;
  	struct dma_async_tx_descriptor *tx;
d16695a75   Ezequiel Garcia   dma: mv_xor: Use ...
865
  	struct dmaengine_unmap_data *unmap;
ff7b04796   Saeed Bishara   dmaengine: DMA en...
866
867
868
869
870
  	struct dma_chan *dma_chan;
  	dma_cookie_t cookie;
  	u8 cmp_byte = 0;
  	u32 cmp_word;
  	int err = 0;
d16695a75   Ezequiel Garcia   dma: mv_xor: Use ...
871
  	int src_count = MV_XOR_NUM_SRC_TEST;
ff7b04796   Saeed Bishara   dmaengine: DMA en...
872

d16695a75   Ezequiel Garcia   dma: mv_xor: Use ...
873
  	for (src_idx = 0; src_idx < src_count; src_idx++) {
ff7b04796   Saeed Bishara   dmaengine: DMA en...
874
  		xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
a09b09ae5   Roel Kluin   iop-adma, mv_xor:...
875
876
  		if (!xor_srcs[src_idx]) {
  			while (src_idx--)
ff7b04796   Saeed Bishara   dmaengine: DMA en...
877
  				__free_page(xor_srcs[src_idx]);
a09b09ae5   Roel Kluin   iop-adma, mv_xor:...
878
879
  			return -ENOMEM;
  		}
ff7b04796   Saeed Bishara   dmaengine: DMA en...
880
881
882
  	}
  
  	dest = alloc_page(GFP_KERNEL);
a09b09ae5   Roel Kluin   iop-adma, mv_xor:...
883
884
  	if (!dest) {
  		while (src_idx--)
ff7b04796   Saeed Bishara   dmaengine: DMA en...
885
  			__free_page(xor_srcs[src_idx]);
a09b09ae5   Roel Kluin   iop-adma, mv_xor:...
886
887
  		return -ENOMEM;
  	}
ff7b04796   Saeed Bishara   dmaengine: DMA en...
888
889
  
  	/* Fill in src buffers */
d16695a75   Ezequiel Garcia   dma: mv_xor: Use ...
890
  	for (src_idx = 0; src_idx < src_count; src_idx++) {
ff7b04796   Saeed Bishara   dmaengine: DMA en...
891
892
893
894
  		u8 *ptr = page_address(xor_srcs[src_idx]);
  		for (i = 0; i < PAGE_SIZE; i++)
  			ptr[i] = (1 << src_idx);
  	}
d16695a75   Ezequiel Garcia   dma: mv_xor: Use ...
895
  	for (src_idx = 0; src_idx < src_count; src_idx++)
ff7b04796   Saeed Bishara   dmaengine: DMA en...
896
897
898
899
900
901
  		cmp_byte ^= (u8) (1 << src_idx);
  
  	cmp_word = (cmp_byte << 24) | (cmp_byte << 16) |
  		(cmp_byte << 8) | cmp_byte;
  
  	memset(page_address(dest), 0, PAGE_SIZE);
275cc0c8b   Thomas Petazzoni   dma: mv_xor: use ...
902
  	dma_chan = &mv_chan->dmachan;
aa1e6f1a3   Dan Williams   dmaengine: kill s...
903
  	if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
ff7b04796   Saeed Bishara   dmaengine: DMA en...
904
905
906
  		err = -ENODEV;
  		goto out;
  	}
d16695a75   Ezequiel Garcia   dma: mv_xor: Use ...
907
908
909
910
911
912
  	unmap = dmaengine_get_unmap_data(dma_chan->device->dev, src_count + 1,
  					 GFP_KERNEL);
  	if (!unmap) {
  		err = -ENOMEM;
  		goto free_resources;
  	}
ff7b04796   Saeed Bishara   dmaengine: DMA en...
913
  	/* test xor */
d16695a75   Ezequiel Garcia   dma: mv_xor: Use ...
914
915
916
917
  	for (i = 0; i < src_count; i++) {
  		unmap->addr[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i],
  					      0, PAGE_SIZE, DMA_TO_DEVICE);
  		dma_srcs[i] = unmap->addr[i];
b8c01d259   Ezequiel Garcia   dma: mv_xor: Add ...
918
919
920
921
922
  		ret = dma_mapping_error(dma_chan->device->dev, unmap->addr[i]);
  		if (ret) {
  			err = -ENOMEM;
  			goto free_resources;
  		}
d16695a75   Ezequiel Garcia   dma: mv_xor: Use ...
923
924
  		unmap->to_cnt++;
  	}
ff7b04796   Saeed Bishara   dmaengine: DMA en...
925

d16695a75   Ezequiel Garcia   dma: mv_xor: Use ...
926
927
928
  	unmap->addr[src_count] = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE,
  				      DMA_FROM_DEVICE);
  	dest_dma = unmap->addr[src_count];
b8c01d259   Ezequiel Garcia   dma: mv_xor: Add ...
929
930
931
932
933
  	ret = dma_mapping_error(dma_chan->device->dev, unmap->addr[src_count]);
  	if (ret) {
  		err = -ENOMEM;
  		goto free_resources;
  	}
d16695a75   Ezequiel Garcia   dma: mv_xor: Use ...
934
935
  	unmap->from_cnt = 1;
  	unmap->len = PAGE_SIZE;
ff7b04796   Saeed Bishara   dmaengine: DMA en...
936
937
  
  	tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
d16695a75   Ezequiel Garcia   dma: mv_xor: Use ...
938
  				 src_count, PAGE_SIZE, 0);
b8c01d259   Ezequiel Garcia   dma: mv_xor: Add ...
939
940
941
942
943
944
945
  	if (!tx) {
  		dev_err(dma_chan->device->dev,
  			"Self-test cannot prepare operation, disabling
  ");
  		err = -ENODEV;
  		goto free_resources;
  	}
ff7b04796   Saeed Bishara   dmaengine: DMA en...
946
947
  
  	cookie = mv_xor_tx_submit(tx);
b8c01d259   Ezequiel Garcia   dma: mv_xor: Add ...
948
949
950
951
952
953
954
  	if (dma_submit_error(cookie)) {
  		dev_err(dma_chan->device->dev,
  			"Self-test submit error, disabling
  ");
  		err = -ENODEV;
  		goto free_resources;
  	}
ff7b04796   Saeed Bishara   dmaengine: DMA en...
955
956
957
  	mv_xor_issue_pending(dma_chan);
  	async_tx_ack(tx);
  	msleep(8);
079344818   Linus Walleij   DMAENGINE: generi...
958
  	if (mv_xor_status(dma_chan, cookie, NULL) !=
b3efb8fc9   Vinod Koul   dmaengine: mv_xor...
959
  	    DMA_COMPLETE) {
a3fc74bc9   Thomas Petazzoni   dma: mv_xor: use ...
960
961
962
  		dev_err(dma_chan->device->dev,
  			"Self-test xor timed out, disabling
  ");
ff7b04796   Saeed Bishara   dmaengine: DMA en...
963
964
965
  		err = -ENODEV;
  		goto free_resources;
  	}
c35064c4b   Thomas Petazzoni   dma: mv_xor: simp...
966
  	dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma,
ff7b04796   Saeed Bishara   dmaengine: DMA en...
967
968
969
970
  				PAGE_SIZE, DMA_FROM_DEVICE);
  	for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) {
  		u32 *ptr = page_address(dest);
  		if (ptr[i] != cmp_word) {
a3fc74bc9   Thomas Petazzoni   dma: mv_xor: use ...
971
  			dev_err(dma_chan->device->dev,
1ba151cdf   Joe Perches   dma: Convert dev_...
972
973
974
  				"Self-test xor failed compare, disabling. index %d, data %x, expected %x
  ",
  				i, ptr[i], cmp_word);
ff7b04796   Saeed Bishara   dmaengine: DMA en...
975
976
977
978
979
980
  			err = -ENODEV;
  			goto free_resources;
  		}
  	}
  
  free_resources:
d16695a75   Ezequiel Garcia   dma: mv_xor: Use ...
981
  	dmaengine_unmap_put(unmap);
ff7b04796   Saeed Bishara   dmaengine: DMA en...
982
983
  	mv_xor_free_chan_resources(dma_chan);
  out:
d16695a75   Ezequiel Garcia   dma: mv_xor: Use ...
984
  	src_idx = src_count;
ff7b04796   Saeed Bishara   dmaengine: DMA en...
985
986
987
988
989
  	while (src_idx--)
  		__free_page(xor_srcs[src_idx]);
  	__free_page(dest);
  	return err;
  }
1ef48a262   Thomas Petazzoni   dma: mv_xor: merg...
990
  static int mv_xor_channel_remove(struct mv_xor_chan *mv_chan)
ff7b04796   Saeed Bishara   dmaengine: DMA en...
991
  {
ff7b04796   Saeed Bishara   dmaengine: DMA en...
992
  	struct dma_chan *chan, *_chan;
1ef48a262   Thomas Petazzoni   dma: mv_xor: merg...
993
  	struct device *dev = mv_chan->dmadev.dev;
ff7b04796   Saeed Bishara   dmaengine: DMA en...
994

1ef48a262   Thomas Petazzoni   dma: mv_xor: merg...
995
  	dma_async_device_unregister(&mv_chan->dmadev);
ff7b04796   Saeed Bishara   dmaengine: DMA en...
996

b503fa019   Thomas Petazzoni   dma: mv_xor: remo...
997
  	dma_free_coherent(dev, MV_XOR_POOL_SIZE,
1ef48a262   Thomas Petazzoni   dma: mv_xor: merg...
998
  			  mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool);
22843545b   Lior Amsalem   dma: mv_xor: Add ...
999
1000
1001
1002
  	dma_unmap_single(dev, mv_chan->dummy_src_addr,
  			 MV_XOR_MIN_BYTE_COUNT, DMA_FROM_DEVICE);
  	dma_unmap_single(dev, mv_chan->dummy_dst_addr,
  			 MV_XOR_MIN_BYTE_COUNT, DMA_TO_DEVICE);
ff7b04796   Saeed Bishara   dmaengine: DMA en...
1003

1ef48a262   Thomas Petazzoni   dma: mv_xor: merg...
1004
  	list_for_each_entry_safe(chan, _chan, &mv_chan->dmadev.channels,
a6b4a9d2c   Thomas Petazzoni   dma: mv_xor: spli...
1005
  				 device_node) {
ff7b04796   Saeed Bishara   dmaengine: DMA en...
1006
1007
  		list_del(&chan->device_node);
  	}
88eb92cb4   Thomas Petazzoni   dma: mv_xor: add ...
1008
  	free_irq(mv_chan->irq, mv_chan);
ff7b04796   Saeed Bishara   dmaengine: DMA en...
1009
1010
  	return 0;
  }
1ef48a262   Thomas Petazzoni   dma: mv_xor: merg...
1011
  static struct mv_xor_chan *
297eedbae   Thomas Petazzoni   dma: mv_xor: rena...
1012
  mv_xor_channel_add(struct mv_xor_device *xordev,
a6b4a9d2c   Thomas Petazzoni   dma: mv_xor: spli...
1013
  		   struct platform_device *pdev,
dd130c652   Gregory CLEMENT   dmaengine: mv_xor...
1014
  		   int idx, dma_cap_mask_t cap_mask, int irq)
ff7b04796   Saeed Bishara   dmaengine: DMA en...
1015
1016
  {
  	int ret = 0;
ff7b04796   Saeed Bishara   dmaengine: DMA en...
1017
1018
  	struct mv_xor_chan *mv_chan;
  	struct dma_device *dma_dev;
ff7b04796   Saeed Bishara   dmaengine: DMA en...
1019

1ef48a262   Thomas Petazzoni   dma: mv_xor: merg...
1020
  	mv_chan = devm_kzalloc(&pdev->dev, sizeof(*mv_chan), GFP_KERNEL);
a577659f4   Sachin Kamat   dma: mv_xor: Fix ...
1021
1022
  	if (!mv_chan)
  		return ERR_PTR(-ENOMEM);
ff7b04796   Saeed Bishara   dmaengine: DMA en...
1023

9aedbdbab   Thomas Petazzoni   dma: mv_xor: remo...
1024
  	mv_chan->idx = idx;
88eb92cb4   Thomas Petazzoni   dma: mv_xor: add ...
1025
  	mv_chan->irq = irq;
dd130c652   Gregory CLEMENT   dmaengine: mv_xor...
1026
1027
1028
1029
  	if (xordev->xor_type == XOR_ORION)
  		mv_chan->op_in_desc = XOR_MODE_IN_REG;
  	else
  		mv_chan->op_in_desc = XOR_MODE_IN_DESC;
ff7b04796   Saeed Bishara   dmaengine: DMA en...
1030

1ef48a262   Thomas Petazzoni   dma: mv_xor: merg...
1031
  	dma_dev = &mv_chan->dmadev;
77ff7a706   Stefan Roese   dmaengine: mv_xor...
1032
  	mv_chan->xordev = xordev;
ff7b04796   Saeed Bishara   dmaengine: DMA en...
1033

22843545b   Lior Amsalem   dma: mv_xor: Add ...
1034
1035
1036
1037
1038
1039
1040
1041
1042
  	/*
  	 * These source and destination dummy buffers are used to implement
  	 * a DMA_INTERRUPT operation as a minimum-sized XOR operation.
  	 * Hence, we only need to map the buffers at initialization-time.
  	 */
  	mv_chan->dummy_src_addr = dma_map_single(dma_dev->dev,
  		mv_chan->dummy_src, MV_XOR_MIN_BYTE_COUNT, DMA_FROM_DEVICE);
  	mv_chan->dummy_dst_addr = dma_map_single(dma_dev->dev,
  		mv_chan->dummy_dst, MV_XOR_MIN_BYTE_COUNT, DMA_TO_DEVICE);
ff7b04796   Saeed Bishara   dmaengine: DMA en...
1043
1044
1045
1046
  	/* allocate coherent memory for hardware descriptors
  	 * note: writecombine gives slightly better performance, but
  	 * requires that we explicitly flush the writes
  	 */
1ef48a262   Thomas Petazzoni   dma: mv_xor: merg...
1047
  	mv_chan->dma_desc_pool_virt =
f6e45661f   Luis R. Rodriguez   dma, mm/pat: Rena...
1048
1049
  	  dma_alloc_wc(&pdev->dev, MV_XOR_POOL_SIZE, &mv_chan->dma_desc_pool,
  		       GFP_KERNEL);
1ef48a262   Thomas Petazzoni   dma: mv_xor: merg...
1050
  	if (!mv_chan->dma_desc_pool_virt)
a6b4a9d2c   Thomas Petazzoni   dma: mv_xor: spli...
1051
  		return ERR_PTR(-ENOMEM);
ff7b04796   Saeed Bishara   dmaengine: DMA en...
1052
1053
  
  	/* discover transaction capabilites from the platform data */
a6b4a9d2c   Thomas Petazzoni   dma: mv_xor: spli...
1054
  	dma_dev->cap_mask = cap_mask;
ff7b04796   Saeed Bishara   dmaengine: DMA en...
1055
1056
1057
1058
1059
1060
  
  	INIT_LIST_HEAD(&dma_dev->channels);
  
  	/* set base routines */
  	dma_dev->device_alloc_chan_resources = mv_xor_alloc_chan_resources;
  	dma_dev->device_free_chan_resources = mv_xor_free_chan_resources;
079344818   Linus Walleij   DMAENGINE: generi...
1061
  	dma_dev->device_tx_status = mv_xor_status;
ff7b04796   Saeed Bishara   dmaengine: DMA en...
1062
1063
1064
1065
  	dma_dev->device_issue_pending = mv_xor_issue_pending;
  	dma_dev->dev = &pdev->dev;
  
  	/* set prep routines based on capability */
22843545b   Lior Amsalem   dma: mv_xor: Add ...
1066
1067
  	if (dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask))
  		dma_dev->device_prep_dma_interrupt = mv_xor_prep_dma_interrupt;
ff7b04796   Saeed Bishara   dmaengine: DMA en...
1068
1069
  	if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask))
  		dma_dev->device_prep_dma_memcpy = mv_xor_prep_dma_memcpy;
ff7b04796   Saeed Bishara   dmaengine: DMA en...
1070
  	if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
c019894ef   Joe Perches   drivers/dma: Remo...
1071
  		dma_dev->max_xor = 8;
ff7b04796   Saeed Bishara   dmaengine: DMA en...
1072
1073
  		dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor;
  	}
297eedbae   Thomas Petazzoni   dma: mv_xor: rena...
1074
  	mv_chan->mmr_base = xordev->xor_base;
82a1402ea   Ezequiel Garcia   dma: mv_xor: Fix ...
1075
  	mv_chan->mmr_high_base = xordev->xor_high_base;
ff7b04796   Saeed Bishara   dmaengine: DMA en...
1076
1077
1078
1079
  	tasklet_init(&mv_chan->irq_tasklet, mv_xor_tasklet, (unsigned long)
  		     mv_chan);
  
  	/* clear errors before enabling interrupts */
0951e728f   Maxime Ripard   dmaengine: mv_xor...
1080
  	mv_chan_clear_err_status(mv_chan);
ff7b04796   Saeed Bishara   dmaengine: DMA en...
1081

2d0a07451   Thomas Petazzoni   dma: mv_xor: use ...
1082
1083
  	ret = request_irq(mv_chan->irq, mv_xor_interrupt_handler,
  			  0, dev_name(&pdev->dev), mv_chan);
ff7b04796   Saeed Bishara   dmaengine: DMA en...
1084
1085
1086
1087
  	if (ret)
  		goto err_free_dma;
  
  	mv_chan_unmask_interrupts(mv_chan);
6f166312c   Lior Amsalem   dmaengine: mv_xor...
1088
  	if (mv_chan->op_in_desc == XOR_MODE_IN_DESC)
81aafb3e0   Thomas Petazzoni   dmaengine: mv_xor...
1089
  		mv_chan_set_mode(mv_chan, XOR_OPERATION_MODE_IN_DESC);
6f166312c   Lior Amsalem   dmaengine: mv_xor...
1090
  	else
81aafb3e0   Thomas Petazzoni   dmaengine: mv_xor...
1091
  		mv_chan_set_mode(mv_chan, XOR_OPERATION_MODE_XOR);
ff7b04796   Saeed Bishara   dmaengine: DMA en...
1092
1093
1094
1095
  
  	spin_lock_init(&mv_chan->lock);
  	INIT_LIST_HEAD(&mv_chan->chain);
  	INIT_LIST_HEAD(&mv_chan->completed_slots);
fbea28a2a   Lior Amsalem   dmaengine: mv_xor...
1096
1097
  	INIT_LIST_HEAD(&mv_chan->free_slots);
  	INIT_LIST_HEAD(&mv_chan->allocated_slots);
98817b995   Thomas Petazzoni   dma: mv_xor: in m...
1098
1099
  	mv_chan->dmachan.device = dma_dev;
  	dma_cookie_init(&mv_chan->dmachan);
ff7b04796   Saeed Bishara   dmaengine: DMA en...
1100

98817b995   Thomas Petazzoni   dma: mv_xor: in m...
1101
  	list_add_tail(&mv_chan->dmachan.device_node, &dma_dev->channels);
ff7b04796   Saeed Bishara   dmaengine: DMA en...
1102
1103
  
  	if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
0951e728f   Maxime Ripard   dmaengine: mv_xor...
1104
  		ret = mv_chan_memcpy_self_test(mv_chan);
ff7b04796   Saeed Bishara   dmaengine: DMA en...
1105
1106
1107
  		dev_dbg(&pdev->dev, "memcpy self test returned %d
  ", ret);
  		if (ret)
2d0a07451   Thomas Petazzoni   dma: mv_xor: use ...
1108
  			goto err_free_irq;
ff7b04796   Saeed Bishara   dmaengine: DMA en...
1109
1110
1111
  	}
  
  	if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
0951e728f   Maxime Ripard   dmaengine: mv_xor...
1112
  		ret = mv_chan_xor_self_test(mv_chan);
ff7b04796   Saeed Bishara   dmaengine: DMA en...
1113
1114
1115
  		dev_dbg(&pdev->dev, "xor self test returned %d
  ", ret);
  		if (ret)
2d0a07451   Thomas Petazzoni   dma: mv_xor: use ...
1116
  			goto err_free_irq;
ff7b04796   Saeed Bishara   dmaengine: DMA en...
1117
  	}
c678fa663   Dave Jiang   dmaengine: remove...
1118
1119
  	dev_info(&pdev->dev, "Marvell XOR (%s): ( %s%s%s)
  ",
6f166312c   Lior Amsalem   dmaengine: mv_xor...
1120
  		 mv_chan->op_in_desc ? "Descriptor Mode" : "Registers Mode",
1ba151cdf   Joe Perches   dma: Convert dev_...
1121
  		 dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
1ba151cdf   Joe Perches   dma: Convert dev_...
1122
1123
  		 dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "",
  		 dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : "");
ff7b04796   Saeed Bishara   dmaengine: DMA en...
1124
1125
  
  	dma_async_device_register(dma_dev);
1ef48a262   Thomas Petazzoni   dma: mv_xor: merg...
1126
  	return mv_chan;
ff7b04796   Saeed Bishara   dmaengine: DMA en...
1127

2d0a07451   Thomas Petazzoni   dma: mv_xor: use ...
1128
1129
  err_free_irq:
  	free_irq(mv_chan->irq, mv_chan);
a4a1e53df   Stefan Roese   dmaengine: mv_xor...
1130
  err_free_dma:
b503fa019   Thomas Petazzoni   dma: mv_xor: remo...
1131
  	dma_free_coherent(&pdev->dev, MV_XOR_POOL_SIZE,
1ef48a262   Thomas Petazzoni   dma: mv_xor: merg...
1132
  			  mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool);
a6b4a9d2c   Thomas Petazzoni   dma: mv_xor: spli...
1133
  	return ERR_PTR(ret);
ff7b04796   Saeed Bishara   dmaengine: DMA en...
1134
1135
1136
  }
  
  static void
297eedbae   Thomas Petazzoni   dma: mv_xor: rena...
1137
  mv_xor_conf_mbus_windows(struct mv_xor_device *xordev,
63a9332b2   Andrew Lunn   ARM: Orion: Get a...
1138
  			 const struct mbus_dram_target_info *dram)
ff7b04796   Saeed Bishara   dmaengine: DMA en...
1139
  {
82a1402ea   Ezequiel Garcia   dma: mv_xor: Fix ...
1140
  	void __iomem *base = xordev->xor_high_base;
ff7b04796   Saeed Bishara   dmaengine: DMA en...
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
  	u32 win_enable = 0;
  	int i;
  
  	for (i = 0; i < 8; i++) {
  		writel(0, base + WINDOW_BASE(i));
  		writel(0, base + WINDOW_SIZE(i));
  		if (i < 4)
  			writel(0, base + WINDOW_REMAP_HIGH(i));
  	}
  
  	for (i = 0; i < dram->num_cs; i++) {
63a9332b2   Andrew Lunn   ARM: Orion: Get a...
1152
  		const struct mbus_dram_window *cs = dram->cs + i;
ff7b04796   Saeed Bishara   dmaengine: DMA en...
1153
1154
1155
1156
1157
  
  		writel((cs->base & 0xffff0000) |
  		       (cs->mbus_attr << 8) |
  		       dram->mbus_dram_target_id, base + WINDOW_BASE(i));
  		writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i));
77ff7a706   Stefan Roese   dmaengine: mv_xor...
1158
1159
1160
  		/* Fill the caching variables for later use */
  		xordev->win_start[i] = cs->base;
  		xordev->win_end[i] = cs->base + cs->size - 1;
ff7b04796   Saeed Bishara   dmaengine: DMA en...
1161
1162
1163
1164
1165
1166
  		win_enable |= (1 << i);
  		win_enable |= 3 << (16 + (2 * i));
  	}
  
  	writel(win_enable, base + WINDOW_BAR_ENABLE(0));
  	writel(win_enable, base + WINDOW_BAR_ENABLE(1));
c4b4b732b   Thomas Petazzoni   dma: mv_xor: clea...
1167
1168
  	writel(0, base + WINDOW_OVERRIDE_CTRL(0));
  	writel(0, base + WINDOW_OVERRIDE_CTRL(1));
ff7b04796   Saeed Bishara   dmaengine: DMA en...
1169
  }
ac5f0f3f8   Marcin Wojtas   dmaengine: mv_xor...
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
  static void
  mv_xor_conf_mbus_windows_a3700(struct mv_xor_device *xordev)
  {
  	void __iomem *base = xordev->xor_high_base;
  	u32 win_enable = 0;
  	int i;
  
  	for (i = 0; i < 8; i++) {
  		writel(0, base + WINDOW_BASE(i));
  		writel(0, base + WINDOW_SIZE(i));
  		if (i < 4)
  			writel(0, base + WINDOW_REMAP_HIGH(i));
  	}
  	/*
  	 * For Armada3700 open default 4GB Mbus window. The dram
  	 * related configuration are done at AXIS level.
  	 */
  	writel(0xffff0000, base + WINDOW_SIZE(0));
  	win_enable |= 1;
  	win_enable |= 3 << 16;
  
  	writel(win_enable, base + WINDOW_BAR_ENABLE(0));
  	writel(win_enable, base + WINDOW_BAR_ENABLE(1));
  	writel(0, base + WINDOW_OVERRIDE_CTRL(0));
  	writel(0, base + WINDOW_OVERRIDE_CTRL(1));
  }
8b648436e   Thomas Petazzoni   dmaengine: mv_xor...
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
  /*
   * Since this XOR driver is basically used only for RAID5, we don't
   * need to care about synchronizing ->suspend with DMA activity,
   * because the DMA engine will naturally be quiet due to the block
   * devices being suspended.
   */
  static int mv_xor_suspend(struct platform_device *pdev, pm_message_t state)
  {
  	struct mv_xor_device *xordev = platform_get_drvdata(pdev);
  	int i;
  
  	for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) {
  		struct mv_xor_chan *mv_chan = xordev->channels[i];
  
  		if (!mv_chan)
  			continue;
  
  		mv_chan->saved_config_reg =
  			readl_relaxed(XOR_CONFIG(mv_chan));
  		mv_chan->saved_int_mask_reg =
  			readl_relaxed(XOR_INTR_MASK(mv_chan));
  	}
  
  	return 0;
  }
  
  static int mv_xor_resume(struct platform_device *dev)
  {
  	struct mv_xor_device *xordev = platform_get_drvdata(dev);
  	const struct mbus_dram_target_info *dram;
  	int i;
  
  	for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) {
  		struct mv_xor_chan *mv_chan = xordev->channels[i];
  
  		if (!mv_chan)
  			continue;
  
  		writel_relaxed(mv_chan->saved_config_reg,
  			       XOR_CONFIG(mv_chan));
  		writel_relaxed(mv_chan->saved_int_mask_reg,
  			       XOR_INTR_MASK(mv_chan));
  	}
ac5f0f3f8   Marcin Wojtas   dmaengine: mv_xor...
1239
1240
1241
1242
  	if (xordev->xor_type == XOR_ARMADA_37XX) {
  		mv_xor_conf_mbus_windows_a3700(xordev);
  		return 0;
  	}
8b648436e   Thomas Petazzoni   dmaengine: mv_xor...
1243
1244
1245
1246
1247
1248
  	dram = mv_mbus_dram_info();
  	if (dram)
  		mv_xor_conf_mbus_windows(xordev, dram);
  
  	return 0;
  }
6f166312c   Lior Amsalem   dmaengine: mv_xor...
1249
  static const struct of_device_id mv_xor_dt_ids[] = {
dd130c652   Gregory CLEMENT   dmaengine: mv_xor...
1250
1251
  	{ .compatible = "marvell,orion-xor", .data = (void *)XOR_ORION },
  	{ .compatible = "marvell,armada-380-xor", .data = (void *)XOR_ARMADA_38X },
ac5f0f3f8   Marcin Wojtas   dmaengine: mv_xor...
1252
  	{ .compatible = "marvell,armada-3700-xor", .data = (void *)XOR_ARMADA_37XX },
6f166312c   Lior Amsalem   dmaengine: mv_xor...
1253
1254
  	{},
  };
6f166312c   Lior Amsalem   dmaengine: mv_xor...
1255

777572911   Thomas Petazzoni   dmaengine: mv_xor...
1256
  static unsigned int mv_xor_engine_count;
6f166312c   Lior Amsalem   dmaengine: mv_xor...
1257

c2714334b   Linus Torvalds   Merge tag 'mvebu'...
1258
  static int mv_xor_probe(struct platform_device *pdev)
ff7b04796   Saeed Bishara   dmaengine: DMA en...
1259
  {
63a9332b2   Andrew Lunn   ARM: Orion: Get a...
1260
  	const struct mbus_dram_target_info *dram;
297eedbae   Thomas Petazzoni   dma: mv_xor: rena...
1261
  	struct mv_xor_device *xordev;
d4adcc016   Jingoo Han   dma: use dev_get_...
1262
  	struct mv_xor_platform_data *pdata = dev_get_platdata(&pdev->dev);
ff7b04796   Saeed Bishara   dmaengine: DMA en...
1263
  	struct resource *res;
777572911   Thomas Petazzoni   dmaengine: mv_xor...
1264
  	unsigned int max_engines, max_channels;
60d151f38   Thomas Petazzoni   dma: mv_xor: allo...
1265
  	int i, ret;
ff7b04796   Saeed Bishara   dmaengine: DMA en...
1266

1ba151cdf   Joe Perches   dma: Convert dev_...
1267
1268
  	dev_notice(&pdev->dev, "Marvell shared XOR driver
  ");
ff7b04796   Saeed Bishara   dmaengine: DMA en...
1269

297eedbae   Thomas Petazzoni   dma: mv_xor: rena...
1270
1271
  	xordev = devm_kzalloc(&pdev->dev, sizeof(*xordev), GFP_KERNEL);
  	if (!xordev)
ff7b04796   Saeed Bishara   dmaengine: DMA en...
1272
1273
1274
1275
1276
  		return -ENOMEM;
  
  	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  	if (!res)
  		return -ENODEV;
297eedbae   Thomas Petazzoni   dma: mv_xor: rena...
1277
1278
1279
  	xordev->xor_base = devm_ioremap(&pdev->dev, res->start,
  					resource_size(res));
  	if (!xordev->xor_base)
ff7b04796   Saeed Bishara   dmaengine: DMA en...
1280
1281
1282
1283
1284
  		return -EBUSY;
  
  	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
  	if (!res)
  		return -ENODEV;
297eedbae   Thomas Petazzoni   dma: mv_xor: rena...
1285
1286
1287
  	xordev->xor_high_base = devm_ioremap(&pdev->dev, res->start,
  					     resource_size(res));
  	if (!xordev->xor_high_base)
ff7b04796   Saeed Bishara   dmaengine: DMA en...
1288
  		return -EBUSY;
297eedbae   Thomas Petazzoni   dma: mv_xor: rena...
1289
  	platform_set_drvdata(pdev, xordev);
ff7b04796   Saeed Bishara   dmaengine: DMA en...
1290

dd130c652   Gregory CLEMENT   dmaengine: mv_xor...
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
  
  	/*
  	 * We need to know which type of XOR device we use before
  	 * setting up. In non-dt case it can only be the legacy one.
  	 */
  	xordev->xor_type = XOR_ORION;
  	if (pdev->dev.of_node) {
  		const struct of_device_id *of_id =
  			of_match_device(mv_xor_dt_ids,
  					&pdev->dev);
  
  		xordev->xor_type = (uintptr_t)of_id->data;
  	}
ff7b04796   Saeed Bishara   dmaengine: DMA en...
1304
1305
1306
  	/*
  	 * (Re-)program MBUS remapping windows if we are asked to.
  	 */
ac5f0f3f8   Marcin Wojtas   dmaengine: mv_xor...
1307
1308
1309
1310
1311
1312
1313
  	if (xordev->xor_type == XOR_ARMADA_37XX) {
  		mv_xor_conf_mbus_windows_a3700(xordev);
  	} else {
  		dram = mv_mbus_dram_info();
  		if (dram)
  			mv_xor_conf_mbus_windows(xordev, dram);
  	}
ff7b04796   Saeed Bishara   dmaengine: DMA en...
1314

c510182b1   Andrew Lunn   ARM: Orion: XOR: ...
1315
1316
1317
  	/* Not all platforms can gate the clock, so it is not
  	 * an error if the clock does not exists.
  	 */
297eedbae   Thomas Petazzoni   dma: mv_xor: rena...
1318
1319
1320
  	xordev->clk = clk_get(&pdev->dev, NULL);
  	if (!IS_ERR(xordev->clk))
  		clk_prepare_enable(xordev->clk);
c510182b1   Andrew Lunn   ARM: Orion: XOR: ...
1321

777572911   Thomas Petazzoni   dmaengine: mv_xor...
1322
1323
1324
1325
1326
  	/*
  	 * We don't want to have more than one channel per CPU in
  	 * order for async_tx to perform well. So we limit the number
  	 * of engines and channels so that we take into account this
  	 * constraint. Note that we also want to use channels from
ac5f0f3f8   Marcin Wojtas   dmaengine: mv_xor...
1327
1328
  	 * separate engines when possible.  For dual-CPU Armada 3700
  	 * SoC with single XOR engine allow using its both channels.
777572911   Thomas Petazzoni   dmaengine: mv_xor...
1329
1330
  	 */
  	max_engines = num_present_cpus();
ac5f0f3f8   Marcin Wojtas   dmaengine: mv_xor...
1331
1332
1333
1334
1335
1336
  	if (xordev->xor_type == XOR_ARMADA_37XX)
  		max_channels =	num_present_cpus();
  	else
  		max_channels = min_t(unsigned int,
  				     MV_XOR_MAX_CHANNELS,
  				     DIV_ROUND_UP(num_present_cpus(), 2));
777572911   Thomas Petazzoni   dmaengine: mv_xor...
1337
1338
1339
  
  	if (mv_xor_engine_count >= max_engines)
  		return 0;
f7d12ef53   Thomas Petazzoni   dma: mv_xor: add ...
1340
1341
1342
1343
1344
  	if (pdev->dev.of_node) {
  		struct device_node *np;
  		int i = 0;
  
  		for_each_child_of_node(pdev->dev.of_node, np) {
0be8253fa   Russell King   dmaengine: mv_xor...
1345
  			struct mv_xor_chan *chan;
f7d12ef53   Thomas Petazzoni   dma: mv_xor: add ...
1346
1347
  			dma_cap_mask_t cap_mask;
  			int irq;
777572911   Thomas Petazzoni   dmaengine: mv_xor...
1348
1349
  			if (i >= max_channels)
  				continue;
f7d12ef53   Thomas Petazzoni   dma: mv_xor: add ...
1350
  			dma_cap_zero(cap_mask);
6d8f7abd2   Thomas Petazzoni   dmaengine: mv_xor...
1351
1352
1353
  			dma_cap_set(DMA_MEMCPY, cap_mask);
  			dma_cap_set(DMA_XOR, cap_mask);
  			dma_cap_set(DMA_INTERRUPT, cap_mask);
f7d12ef53   Thomas Petazzoni   dma: mv_xor: add ...
1354
1355
  
  			irq = irq_of_parse_and_map(np, 0);
f8eb9e7d2   Thomas Petazzoni   dma: mv_xor: fix ...
1356
1357
  			if (!irq) {
  				ret = -ENODEV;
f7d12ef53   Thomas Petazzoni   dma: mv_xor: add ...
1358
1359
  				goto err_channel_add;
  			}
0be8253fa   Russell King   dmaengine: mv_xor...
1360
  			chan = mv_xor_channel_add(xordev, pdev, i,
dd130c652   Gregory CLEMENT   dmaengine: mv_xor...
1361
  						  cap_mask, irq);
0be8253fa   Russell King   dmaengine: mv_xor...
1362
1363
  			if (IS_ERR(chan)) {
  				ret = PTR_ERR(chan);
f7d12ef53   Thomas Petazzoni   dma: mv_xor: add ...
1364
1365
1366
  				irq_dispose_mapping(irq);
  				goto err_channel_add;
  			}
0be8253fa   Russell King   dmaengine: mv_xor...
1367
  			xordev->channels[i] = chan;
f7d12ef53   Thomas Petazzoni   dma: mv_xor: add ...
1368
1369
1370
  			i++;
  		}
  	} else if (pdata && pdata->channels) {
777572911   Thomas Petazzoni   dmaengine: mv_xor...
1371
  		for (i = 0; i < max_channels; i++) {
e39f6ec1f   Thomas Petazzoni   dma: mv_xor: rena...
1372
  			struct mv_xor_channel_data *cd;
0be8253fa   Russell King   dmaengine: mv_xor...
1373
  			struct mv_xor_chan *chan;
60d151f38   Thomas Petazzoni   dma: mv_xor: allo...
1374
1375
1376
  			int irq;
  
  			cd = &pdata->channels[i];
60d151f38   Thomas Petazzoni   dma: mv_xor: allo...
1377
1378
1379
1380
1381
  			irq = platform_get_irq(pdev, i);
  			if (irq < 0) {
  				ret = irq;
  				goto err_channel_add;
  			}
0be8253fa   Russell King   dmaengine: mv_xor...
1382
  			chan = mv_xor_channel_add(xordev, pdev, i,
dd130c652   Gregory CLEMENT   dmaengine: mv_xor...
1383
  						  cd->cap_mask, irq);
0be8253fa   Russell King   dmaengine: mv_xor...
1384
1385
  			if (IS_ERR(chan)) {
  				ret = PTR_ERR(chan);
60d151f38   Thomas Petazzoni   dma: mv_xor: allo...
1386
1387
  				goto err_channel_add;
  			}
0be8253fa   Russell King   dmaengine: mv_xor...
1388
1389
  
  			xordev->channels[i] = chan;
60d151f38   Thomas Petazzoni   dma: mv_xor: allo...
1390
1391
  		}
  	}
c510182b1   Andrew Lunn   ARM: Orion: XOR: ...
1392

ff7b04796   Saeed Bishara   dmaengine: DMA en...
1393
  	return 0;
60d151f38   Thomas Petazzoni   dma: mv_xor: allo...
1394
1395
1396
  
  err_channel_add:
  	for (i = 0; i < MV_XOR_MAX_CHANNELS; i++)
f7d12ef53   Thomas Petazzoni   dma: mv_xor: add ...
1397
  		if (xordev->channels[i]) {
ab6e439fd   Thomas Petazzoni   dma: mv_xor: fix ...
1398
  			mv_xor_channel_remove(xordev->channels[i]);
f7d12ef53   Thomas Petazzoni   dma: mv_xor: add ...
1399
1400
  			if (pdev->dev.of_node)
  				irq_dispose_mapping(xordev->channels[i]->irq);
f7d12ef53   Thomas Petazzoni   dma: mv_xor: add ...
1401
  		}
60d151f38   Thomas Petazzoni   dma: mv_xor: allo...
1402

dab920644   Thomas Petazzoni   dma: mv_xor: fix ...
1403
1404
1405
1406
  	if (!IS_ERR(xordev->clk)) {
  		clk_disable_unprepare(xordev->clk);
  		clk_put(xordev->clk);
  	}
60d151f38   Thomas Petazzoni   dma: mv_xor: allo...
1407
  	return ret;
ff7b04796   Saeed Bishara   dmaengine: DMA en...
1408
  }
61971656c   Thomas Petazzoni   dma: mv_xor: rena...
1409
1410
  static struct platform_driver mv_xor_driver = {
  	.probe		= mv_xor_probe,
8b648436e   Thomas Petazzoni   dmaengine: mv_xor...
1411
1412
  	.suspend        = mv_xor_suspend,
  	.resume         = mv_xor_resume,
ff7b04796   Saeed Bishara   dmaengine: DMA en...
1413
  	.driver		= {
f7d12ef53   Thomas Petazzoni   dma: mv_xor: add ...
1414
1415
  		.name	        = MV_XOR_NAME,
  		.of_match_table = of_match_ptr(mv_xor_dt_ids),
ff7b04796   Saeed Bishara   dmaengine: DMA en...
1416
1417
  	},
  };
812608d19   Geliang Tang   dmaengine: mv_xor...
1418
  builtin_platform_driver(mv_xor_driver);
ff7b04796   Saeed Bishara   dmaengine: DMA en...
1419

25cf68da0   Paul Gortmaker   drivers/dma: make...
1420
  /*
ff7b04796   Saeed Bishara   dmaengine: DMA en...
1421
1422
1423
  MODULE_AUTHOR("Saeed Bishara <saeed@marvell.com>");
  MODULE_DESCRIPTION("DMA engine driver for Marvell's XOR engine");
  MODULE_LICENSE("GPL");
25cf68da0   Paul Gortmaker   drivers/dma: make...
1424
  */