Blame view
drivers/dma/mv_xor.c
37.5 KB
ff7b04796
|
1 2 3 4 5 6 7 8 9 10 11 12 |
/* * offload engine driver for the Marvell XOR engine * Copyright (C) 2007, 2008, Marvell International Ltd. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. |
ff7b04796
|
13 14 15 |
*/ #include <linux/init.h> |
5a0e3ad6a
|
16 |
#include <linux/slab.h> |
ff7b04796
|
17 18 19 20 |
#include <linux/delay.h> #include <linux/dma-mapping.h> #include <linux/spinlock.h> #include <linux/interrupt.h> |
6f166312c
|
21 |
#include <linux/of_device.h> |
ff7b04796
|
22 23 |
#include <linux/platform_device.h> #include <linux/memory.h> |
c510182b1
|
24 |
#include <linux/clk.h> |
f7d12ef53
|
25 26 27 |
#include <linux/of.h> #include <linux/of_irq.h> #include <linux/irqdomain.h> |
777572911
|
28 |
#include <linux/cpumask.h> |
c02cecb92
|
29 |
#include <linux/platform_data/dma-mv_xor.h> |
d2ebfb335
|
30 31 |
#include "dmaengine.h" |
ff7b04796
|
32 |
#include "mv_xor.h" |
dd130c652
|
33 34 35 |
enum mv_xor_type { XOR_ORION, XOR_ARMADA_38X, |
ac5f0f3f8
|
36 |
XOR_ARMADA_37XX, |
dd130c652
|
37 |
}; |
6f166312c
|
38 39 40 41 |
enum mv_xor_mode { XOR_MODE_IN_REG, XOR_MODE_IN_DESC, }; |
ff7b04796
|
42 43 44 |
static void mv_xor_issue_pending(struct dma_chan *chan); #define to_mv_xor_chan(chan) \ |
98817b995
|
45 |
container_of(chan, struct mv_xor_chan, dmachan) |
ff7b04796
|
46 47 48 |
#define to_mv_xor_slot(tx) \ container_of(tx, struct mv_xor_desc_slot, async_tx) |
c98c17813
|
49 |
#define mv_chan_to_devp(chan) \ |
1ef48a262
|
50 |
((chan)->dmadev.dev) |
c98c17813
|
51 |
|
dfc97661b
|
52 |
static void mv_desc_init(struct mv_xor_desc_slot *desc, |
ba87d1372
|
53 54 |
dma_addr_t addr, u32 byte_count, enum dma_ctrl_flags flags) |
ff7b04796
|
55 56 |
{ struct mv_xor_desc *hw_desc = desc->hw_desc; |
0e7488ed0
|
57 |
hw_desc->status = XOR_DESC_DMA_OWNED; |
ff7b04796
|
58 |
hw_desc->phy_next_desc = 0; |
ba87d1372
|
59 60 61 |
/* Enable end-of-descriptor interrupts only for DMA_PREP_INTERRUPT */ hw_desc->desc_command = (flags & DMA_PREP_INTERRUPT) ? XOR_DESC_EOD_INT_EN : 0; |
dfc97661b
|
62 |
hw_desc->phy_dest_addr = addr; |
ff7b04796
|
63 64 |
hw_desc->byte_count = byte_count; } |
6f166312c
|
65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 |
static void mv_desc_set_mode(struct mv_xor_desc_slot *desc) { struct mv_xor_desc *hw_desc = desc->hw_desc; switch (desc->type) { case DMA_XOR: case DMA_INTERRUPT: hw_desc->desc_command |= XOR_DESC_OPERATION_XOR; break; case DMA_MEMCPY: hw_desc->desc_command |= XOR_DESC_OPERATION_MEMCPY; break; default: BUG(); return; } } |
ff7b04796
|
82 83 84 85 86 87 88 |
static void mv_desc_set_next_desc(struct mv_xor_desc_slot *desc, u32 next_desc_addr) { struct mv_xor_desc *hw_desc = desc->hw_desc; BUG_ON(hw_desc->phy_next_desc); hw_desc->phy_next_desc = next_desc_addr; } |
ff7b04796
|
89 90 91 92 |
static void mv_desc_set_src_addr(struct mv_xor_desc_slot *desc, int index, dma_addr_t addr) { struct mv_xor_desc *hw_desc = desc->hw_desc; |
e03bc654f
|
93 |
hw_desc->phy_src_addr[mv_phy_src_idx(index)] = addr; |
ff7b04796
|
94 95 96 97 98 99 |
if (desc->type == DMA_XOR) hw_desc->desc_command |= (1 << index); } static u32 mv_chan_get_current_desc(struct mv_xor_chan *chan) { |
5733c38ae
|
100 |
return readl_relaxed(XOR_CURR_DESC(chan)); |
ff7b04796
|
101 102 103 104 105 |
} static void mv_chan_set_next_descriptor(struct mv_xor_chan *chan, u32 next_desc_addr) { |
5733c38ae
|
106 |
writel_relaxed(next_desc_addr, XOR_NEXT_DESC(chan)); |
ff7b04796
|
107 |
} |
ff7b04796
|
108 109 |
static void mv_chan_unmask_interrupts(struct mv_xor_chan *chan) { |
5733c38ae
|
110 |
u32 val = readl_relaxed(XOR_INTR_MASK(chan)); |
ff7b04796
|
111 |
val |= XOR_INTR_MASK_VALUE << (chan->idx * 16); |
5733c38ae
|
112 |
writel_relaxed(val, XOR_INTR_MASK(chan)); |
ff7b04796
|
113 114 115 116 |
} static u32 mv_chan_get_intr_cause(struct mv_xor_chan *chan) { |
5733c38ae
|
117 |
u32 intr_cause = readl_relaxed(XOR_INTR_CAUSE(chan)); |
ff7b04796
|
118 119 120 |
intr_cause = (intr_cause >> (chan->idx * 16)) & 0xFFFF; return intr_cause; } |
0951e728f
|
121 |
static void mv_chan_clear_eoc_cause(struct mv_xor_chan *chan) |
ff7b04796
|
122 |
{ |
ba87d1372
|
123 124 125 126 |
u32 val; val = XOR_INT_END_OF_DESC | XOR_INT_END_OF_CHAIN | XOR_INT_STOPPED; val = ~(val << (chan->idx * 16)); |
c98c17813
|
127 128 |
dev_dbg(mv_chan_to_devp(chan), "%s, val 0x%08x ", __func__, val); |
5733c38ae
|
129 |
writel_relaxed(val, XOR_INTR_CAUSE(chan)); |
ff7b04796
|
130 |
} |
0951e728f
|
131 |
static void mv_chan_clear_err_status(struct mv_xor_chan *chan) |
ff7b04796
|
132 133 |
{ u32 val = 0xFFFF0000 >> (chan->idx * 16); |
5733c38ae
|
134 |
writel_relaxed(val, XOR_INTR_CAUSE(chan)); |
ff7b04796
|
135 |
} |
0951e728f
|
136 |
static void mv_chan_set_mode(struct mv_xor_chan *chan, |
81aafb3e0
|
137 |
u32 op_mode) |
ff7b04796
|
138 |
{ |
5733c38ae
|
139 |
u32 config = readl_relaxed(XOR_CONFIG(chan)); |
ff7b04796
|
140 |
|
6f166312c
|
141 142 |
config &= ~0x7; config |= op_mode; |
e03bc654f
|
143 144 145 146 147 |
#if defined(__BIG_ENDIAN) config |= XOR_DESCRIPTOR_SWAP; #else config &= ~XOR_DESCRIPTOR_SWAP; #endif |
5733c38ae
|
148 |
writel_relaxed(config, XOR_CONFIG(chan)); |
ff7b04796
|
149 150 151 152 |
} static void mv_chan_activate(struct mv_xor_chan *chan) { |
c98c17813
|
153 154 |
dev_dbg(mv_chan_to_devp(chan), " activate chan. "); |
5a9a55bf9
|
155 156 157 |
/* writel ensures all descriptors are flushed before activation */ writel(BIT(0), XOR_ACTIVATION(chan)); |
ff7b04796
|
158 159 160 161 |
} static char mv_chan_is_busy(struct mv_xor_chan *chan) { |
5733c38ae
|
162 |
u32 state = readl_relaxed(XOR_ACTIVATION(chan)); |
ff7b04796
|
163 164 165 166 167 |
state = (state >> 4) & 0x3; return (state == 1) ? 1 : 0; } |
ff7b04796
|
168 |
/* |
0951e728f
|
169 170 |
* mv_chan_start_new_chain - program the engine to operate on new * chain headed by sw_desc |
ff7b04796
|
171 172 |
* Caller must hold &mv_chan->lock while calling this function */ |
0951e728f
|
173 174 |
static void mv_chan_start_new_chain(struct mv_xor_chan *mv_chan, struct mv_xor_desc_slot *sw_desc) |
ff7b04796
|
175 |
{ |
c98c17813
|
176 177 |
dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: sw_desc %p ", |
ff7b04796
|
178 |
__func__, __LINE__, sw_desc); |
ff7b04796
|
179 |
|
48a9db462
|
180 181 |
/* set the hardware chain */ mv_chan_set_next_descriptor(mv_chan, sw_desc->async_tx.phys); |
dfc97661b
|
182 |
mv_chan->pending++; |
98817b995
|
183 |
mv_xor_issue_pending(&mv_chan->dmachan); |
ff7b04796
|
184 185 186 |
} static dma_cookie_t |
0951e728f
|
187 188 189 |
mv_desc_run_tx_complete_actions(struct mv_xor_desc_slot *desc, struct mv_xor_chan *mv_chan, dma_cookie_t cookie) |
ff7b04796
|
190 191 192 193 194 |
{ BUG_ON(desc->async_tx.cookie < 0); if (desc->async_tx.cookie > 0) { cookie = desc->async_tx.cookie; |
8058e2580
|
195 |
dma_descriptor_unmap(&desc->async_tx); |
ff7b04796
|
196 197 198 |
/* call the callback (must not sleep or submit new * operations to this channel) */ |
ee7681a48
|
199 |
dmaengine_desc_get_callback_invoke(&desc->async_tx, NULL); |
ff7b04796
|
200 201 202 |
} /* run dependent operations */ |
07f2211e4
|
203 |
dma_run_dependencies(&desc->async_tx); |
ff7b04796
|
204 205 206 207 208 |
return cookie; } static int |
0951e728f
|
209 |
mv_chan_clean_completed_slots(struct mv_xor_chan *mv_chan) |
ff7b04796
|
210 211 |
{ struct mv_xor_desc_slot *iter, *_iter; |
c98c17813
|
212 213 |
dev_dbg(mv_chan_to_devp(mv_chan), "%s %d ", __func__, __LINE__); |
ff7b04796
|
214 |
list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots, |
fbea28a2a
|
215 |
node) { |
ff7b04796
|
216 |
|
c5db858bd
|
217 |
if (async_tx_test_ack(&iter->async_tx)) { |
fbea28a2a
|
218 |
list_move_tail(&iter->node, &mv_chan->free_slots); |
c5db858bd
|
219 220 221 222 223 |
if (!list_empty(&iter->sg_tx_list)) { list_splice_tail_init(&iter->sg_tx_list, &mv_chan->free_slots); } } |
ff7b04796
|
224 225 226 227 228 |
} return 0; } static int |
0951e728f
|
229 230 |
mv_desc_clean_slot(struct mv_xor_desc_slot *desc, struct mv_xor_chan *mv_chan) |
ff7b04796
|
231 |
{ |
c98c17813
|
232 233 |
dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: desc %p flags %d ", |
ff7b04796
|
234 |
__func__, __LINE__, desc, desc->async_tx.flags); |
fbea28a2a
|
235 |
|
ff7b04796
|
236 237 238 |
/* the client is allowed to attach dependent operations * until 'ack' is set */ |
c5db858bd
|
239 |
if (!async_tx_test_ack(&desc->async_tx)) { |
ff7b04796
|
240 |
/* move this slot to the completed_slots */ |
fbea28a2a
|
241 |
list_move_tail(&desc->node, &mv_chan->completed_slots); |
c5db858bd
|
242 243 244 245 246 |
if (!list_empty(&desc->sg_tx_list)) { list_splice_tail_init(&desc->sg_tx_list, &mv_chan->completed_slots); } } else { |
fbea28a2a
|
247 |
list_move_tail(&desc->node, &mv_chan->free_slots); |
c5db858bd
|
248 249 250 251 252 |
if (!list_empty(&desc->sg_tx_list)) { list_splice_tail_init(&desc->sg_tx_list, &mv_chan->free_slots); } } |
ff7b04796
|
253 |
|
ff7b04796
|
254 255 |
return 0; } |
fbeec99ad
|
256 |
/* This function must be called with the mv_xor_chan spinlock held */ |
0951e728f
|
257 |
static void mv_chan_slot_cleanup(struct mv_xor_chan *mv_chan) |
ff7b04796
|
258 259 260 261 262 |
{ struct mv_xor_desc_slot *iter, *_iter; dma_cookie_t cookie = 0; int busy = mv_chan_is_busy(mv_chan); u32 current_desc = mv_chan_get_current_desc(mv_chan); |
9136291f1
|
263 264 |
int current_cleaned = 0; struct mv_xor_desc *hw_desc; |
ff7b04796
|
265 |
|
c98c17813
|
266 267 268 269 |
dev_dbg(mv_chan_to_devp(mv_chan), "%s %d ", __func__, __LINE__); dev_dbg(mv_chan_to_devp(mv_chan), "current_desc %x ", current_desc); |
0951e728f
|
270 |
mv_chan_clean_completed_slots(mv_chan); |
ff7b04796
|
271 272 273 274 275 276 |
/* free completed slots from the chain starting with * the oldest descriptor */ list_for_each_entry_safe(iter, _iter, &mv_chan->chain, |
fbea28a2a
|
277 |
node) { |
ff7b04796
|
278 |
|
9136291f1
|
279 280 281 |
/* clean finished descriptors */ hw_desc = iter->hw_desc; if (hw_desc->status & XOR_DESC_SUCCESS) { |
0951e728f
|
282 283 |
cookie = mv_desc_run_tx_complete_actions(iter, mv_chan, cookie); |
ff7b04796
|
284 |
|
9136291f1
|
285 |
/* done processing desc, clean slot */ |
0951e728f
|
286 |
mv_desc_clean_slot(iter, mv_chan); |
9136291f1
|
287 288 289 290 291 292 293 294 295 |
/* break if we did cleaned the current */ if (iter->async_tx.phys == current_desc) { current_cleaned = 1; break; } } else { if (iter->async_tx.phys == current_desc) { current_cleaned = 0; |
ff7b04796
|
296 |
break; |
9136291f1
|
297 |
} |
ff7b04796
|
298 |
} |
ff7b04796
|
299 300 301 |
} if ((busy == 0) && !list_empty(&mv_chan->chain)) { |
9136291f1
|
302 303 304 305 306 307 308 |
if (current_cleaned) { /* * current descriptor cleaned and removed, run * from list head */ iter = list_entry(mv_chan->chain.next, struct mv_xor_desc_slot, |
fbea28a2a
|
309 |
node); |
0951e728f
|
310 |
mv_chan_start_new_chain(mv_chan, iter); |
9136291f1
|
311 |
} else { |
fbea28a2a
|
312 |
if (!list_is_last(&iter->node, &mv_chan->chain)) { |
9136291f1
|
313 314 315 316 |
/* * descriptors are still waiting after * current, trigger them */ |
fbea28a2a
|
317 |
iter = list_entry(iter->node.next, |
9136291f1
|
318 |
struct mv_xor_desc_slot, |
fbea28a2a
|
319 |
node); |
0951e728f
|
320 |
mv_chan_start_new_chain(mv_chan, iter); |
9136291f1
|
321 322 323 324 325 326 327 328 |
} else { /* * some descriptors are still waiting * to be cleaned */ tasklet_schedule(&mv_chan->irq_tasklet); } } |
ff7b04796
|
329 330 331 |
} if (cookie > 0) |
98817b995
|
332 |
mv_chan->dmachan.completed_cookie = cookie; |
ff7b04796
|
333 |
} |
ff7b04796
|
334 335 336 |
static void mv_xor_tasklet(unsigned long data) { struct mv_xor_chan *chan = (struct mv_xor_chan *) data; |
e43147acb
|
337 338 |
spin_lock_bh(&chan->lock); |
0951e728f
|
339 |
mv_chan_slot_cleanup(chan); |
e43147acb
|
340 |
spin_unlock_bh(&chan->lock); |
ff7b04796
|
341 342 343 |
} static struct mv_xor_desc_slot * |
0951e728f
|
344 |
mv_chan_alloc_slot(struct mv_xor_chan *mv_chan) |
ff7b04796
|
345 |
{ |
fbea28a2a
|
346 |
struct mv_xor_desc_slot *iter; |
ff7b04796
|
347 |
|
fbea28a2a
|
348 349 350 351 352 353 354 355 356 357 |
spin_lock_bh(&mv_chan->lock); if (!list_empty(&mv_chan->free_slots)) { iter = list_first_entry(&mv_chan->free_slots, struct mv_xor_desc_slot, node); list_move_tail(&iter->node, &mv_chan->allocated_slots); spin_unlock_bh(&mv_chan->lock); |
ff7b04796
|
358 |
|
dfc97661b
|
359 360 |
/* pre-ack descriptor */ async_tx_ack(&iter->async_tx); |
dfc97661b
|
361 |
iter->async_tx.cookie = -EBUSY; |
dfc97661b
|
362 363 |
return iter; |
ff7b04796
|
364 |
} |
fbea28a2a
|
365 366 |
spin_unlock_bh(&mv_chan->lock); |
ff7b04796
|
367 368 369 370 371 372 |
/* try to free some slots if the allocation fails */ tasklet_schedule(&mv_chan->irq_tasklet); return NULL; } |
ff7b04796
|
373 374 375 376 377 378 |
/************************ DMA engine API functions ****************************/ static dma_cookie_t mv_xor_tx_submit(struct dma_async_tx_descriptor *tx) { struct mv_xor_desc_slot *sw_desc = to_mv_xor_slot(tx); struct mv_xor_chan *mv_chan = to_mv_xor_chan(tx->chan); |
dfc97661b
|
379 |
struct mv_xor_desc_slot *old_chain_tail; |
ff7b04796
|
380 381 |
dma_cookie_t cookie; int new_hw_chain = 1; |
c98c17813
|
382 |
dev_dbg(mv_chan_to_devp(mv_chan), |
ff7b04796
|
383 384 385 |
"%s sw_desc %p: async_tx %p ", __func__, sw_desc, &sw_desc->async_tx); |
ff7b04796
|
386 |
spin_lock_bh(&mv_chan->lock); |
884485e1f
|
387 |
cookie = dma_cookie_assign(tx); |
ff7b04796
|
388 389 |
if (list_empty(&mv_chan->chain)) |
fbea28a2a
|
390 |
list_move_tail(&sw_desc->node, &mv_chan->chain); |
ff7b04796
|
391 392 393 394 395 |
else { new_hw_chain = 0; old_chain_tail = list_entry(mv_chan->chain.prev, struct mv_xor_desc_slot, |
fbea28a2a
|
396 397 |
node); list_move_tail(&sw_desc->node, &mv_chan->chain); |
ff7b04796
|
398 |
|
31fd8f5b8
|
399 400 401 |
dev_dbg(mv_chan_to_devp(mv_chan), "Append to last desc %pa ", &old_chain_tail->async_tx.phys); |
ff7b04796
|
402 403 |
/* fix up the hardware chain */ |
dfc97661b
|
404 |
mv_desc_set_next_desc(old_chain_tail, sw_desc->async_tx.phys); |
ff7b04796
|
405 406 407 408 409 410 411 412 413 414 415 416 417 418 |
/* if the channel is not busy */ if (!mv_chan_is_busy(mv_chan)) { u32 current_desc = mv_chan_get_current_desc(mv_chan); /* * and the curren desc is the end of the chain before * the append, then we need to start the channel */ if (current_desc == old_chain_tail->async_tx.phys) new_hw_chain = 1; } } if (new_hw_chain) |
0951e728f
|
419 |
mv_chan_start_new_chain(mv_chan, sw_desc); |
ff7b04796
|
420 |
|
ff7b04796
|
421 422 423 424 425 426 |
spin_unlock_bh(&mv_chan->lock); return cookie; } /* returns the number of allocated descriptors */ |
aa1e6f1a3
|
427 |
static int mv_xor_alloc_chan_resources(struct dma_chan *chan) |
ff7b04796
|
428 |
{ |
31fd8f5b8
|
429 430 |
void *virt_desc; dma_addr_t dma_desc; |
ff7b04796
|
431 432 433 |
int idx; struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); struct mv_xor_desc_slot *slot = NULL; |
b503fa019
|
434 |
int num_descs_in_pool = MV_XOR_POOL_SIZE/MV_XOR_SLOT_SIZE; |
ff7b04796
|
435 436 437 438 439 440 |
/* Allocate descriptor slots */ idx = mv_chan->slots_allocated; while (idx < num_descs_in_pool) { slot = kzalloc(sizeof(*slot), GFP_KERNEL); if (!slot) { |
b8291ddee
|
441 442 443 |
dev_info(mv_chan_to_devp(mv_chan), "channel only initialized %d descriptor slots", idx); |
ff7b04796
|
444 445 |
break; } |
31fd8f5b8
|
446 447 |
virt_desc = mv_chan->dma_desc_pool_virt; slot->hw_desc = virt_desc + idx * MV_XOR_SLOT_SIZE; |
ff7b04796
|
448 449 450 |
dma_async_tx_descriptor_init(&slot->async_tx, chan); slot->async_tx.tx_submit = mv_xor_tx_submit; |
fbea28a2a
|
451 |
INIT_LIST_HEAD(&slot->node); |
c5db858bd
|
452 |
INIT_LIST_HEAD(&slot->sg_tx_list); |
31fd8f5b8
|
453 454 |
dma_desc = mv_chan->dma_desc_pool; slot->async_tx.phys = dma_desc + idx * MV_XOR_SLOT_SIZE; |
ff7b04796
|
455 456 457 458 |
slot->idx = idx++; spin_lock_bh(&mv_chan->lock); mv_chan->slots_allocated = idx; |
fbea28a2a
|
459 |
list_add_tail(&slot->node, &mv_chan->free_slots); |
ff7b04796
|
460 461 |
spin_unlock_bh(&mv_chan->lock); } |
c98c17813
|
462 |
dev_dbg(mv_chan_to_devp(mv_chan), |
fbea28a2a
|
463 464 465 |
"allocated %d descriptor slots ", mv_chan->slots_allocated); |
ff7b04796
|
466 467 468 |
return mv_chan->slots_allocated ? : -ENOMEM; } |
77ff7a706
|
469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 |
/* * Check if source or destination is an PCIe/IO address (non-SDRAM) and add * a new MBus window if necessary. Use a cache for these check so that * the MMIO mapped registers don't have to be accessed for this check * to speed up this process. */ static int mv_xor_add_io_win(struct mv_xor_chan *mv_chan, u32 addr) { struct mv_xor_device *xordev = mv_chan->xordev; void __iomem *base = mv_chan->mmr_high_base; u32 win_enable; u32 size; u8 target, attr; int ret; int i; /* Nothing needs to get done for the Armada 3700 */ if (xordev->xor_type == XOR_ARMADA_37XX) return 0; /* * Loop over the cached windows to check, if the requested area * is already mapped. If this the case, nothing needs to be done * and we can return. */ for (i = 0; i < WINDOW_COUNT; i++) { if (addr >= xordev->win_start[i] && addr <= xordev->win_end[i]) { /* Window is already mapped */ return 0; } } /* * The window is not mapped, so we need to create the new mapping */ /* If no IO window is found that addr has to be located in SDRAM */ ret = mvebu_mbus_get_io_win_info(addr, &size, &target, &attr); if (ret < 0) return 0; /* * Mask the base addr 'addr' according to 'size' read back from the * MBus window. Otherwise we might end up with an address located * somewhere in the middle of this area here. */ size -= 1; addr &= ~size; /* * Reading one of both enabled register is enough, as they are always * programmed to the identical values */ win_enable = readl(base + WINDOW_BAR_ENABLE(0)); /* Set 'i' to the first free window to write the new values to */ i = ffs(~win_enable) - 1; if (i >= WINDOW_COUNT) return -ENOMEM; writel((addr & 0xffff0000) | (attr << 8) | target, base + WINDOW_BASE(i)); writel(size & 0xffff0000, base + WINDOW_SIZE(i)); /* Fill the caching variables for later use */ xordev->win_start[i] = addr; xordev->win_end[i] = addr + size; win_enable |= (1 << i); win_enable |= 3 << (16 + (2 * i)); writel(win_enable, base + WINDOW_BAR_ENABLE(0)); writel(win_enable, base + WINDOW_BAR_ENABLE(1)); return 0; } |
ff7b04796
|
545 |
static struct dma_async_tx_descriptor * |
ff7b04796
|
546 547 548 549 |
mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, unsigned int src_cnt, size_t len, unsigned long flags) { struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); |
dfc97661b
|
550 |
struct mv_xor_desc_slot *sw_desc; |
77ff7a706
|
551 |
int ret; |
ff7b04796
|
552 553 554 |
if (unlikely(len < MV_XOR_MIN_BYTE_COUNT)) return NULL; |
7912d3000
|
555 |
BUG_ON(len > MV_XOR_MAX_BYTE_COUNT); |
ff7b04796
|
556 |
|
c98c17813
|
557 |
dev_dbg(mv_chan_to_devp(mv_chan), |
bc822e125
|
558 559 |
"%s src_cnt: %d len: %zu dest %pad flags: %ld ", |
31fd8f5b8
|
560 |
__func__, src_cnt, len, &dest, flags); |
ff7b04796
|
561 |
|
77ff7a706
|
562 563 564 565 |
/* Check if a new window needs to get added for 'dest' */ ret = mv_xor_add_io_win(mv_chan, dest); if (ret) return NULL; |
0951e728f
|
566 |
sw_desc = mv_chan_alloc_slot(mv_chan); |
ff7b04796
|
567 568 569 |
if (sw_desc) { sw_desc->type = DMA_XOR; sw_desc->async_tx.flags = flags; |
ba87d1372
|
570 |
mv_desc_init(sw_desc, dest, len, flags); |
6f166312c
|
571 572 |
if (mv_chan->op_in_desc == XOR_MODE_IN_DESC) mv_desc_set_mode(sw_desc); |
77ff7a706
|
573 574 575 576 577 |
while (src_cnt--) { /* Check if a new window needs to get added for 'src' */ ret = mv_xor_add_io_win(mv_chan, src[src_cnt]); if (ret) return NULL; |
dfc97661b
|
578 |
mv_desc_set_src_addr(sw_desc, src_cnt, src[src_cnt]); |
77ff7a706
|
579 |
} |
ff7b04796
|
580 |
} |
fbea28a2a
|
581 |
|
c98c17813
|
582 |
dev_dbg(mv_chan_to_devp(mv_chan), |
ff7b04796
|
583 584 585 586 587 |
"%s sw_desc %p async_tx %p ", __func__, sw_desc, &sw_desc->async_tx); return sw_desc ? &sw_desc->async_tx : NULL; } |
3e4f52e2d
|
588 589 590 591 592 593 594 595 596 597 |
static struct dma_async_tx_descriptor * mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, size_t len, unsigned long flags) { /* * A MEMCPY operation is identical to an XOR operation with only * a single source address. */ return mv_xor_prep_dma_xor(chan, dest, &src, 1, len, flags); } |
22843545b
|
598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 |
static struct dma_async_tx_descriptor * mv_xor_prep_dma_interrupt(struct dma_chan *chan, unsigned long flags) { struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); dma_addr_t src, dest; size_t len; src = mv_chan->dummy_src_addr; dest = mv_chan->dummy_dst_addr; len = MV_XOR_MIN_BYTE_COUNT; /* * We implement the DMA_INTERRUPT operation as a minimum sized * XOR operation with a single dummy source address. */ return mv_xor_prep_dma_xor(chan, dest, &src, 1, len, flags); } |
ff7b04796
|
615 616 617 618 619 |
static void mv_xor_free_chan_resources(struct dma_chan *chan) { struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); struct mv_xor_desc_slot *iter, *_iter; int in_use_descs = 0; |
ff7b04796
|
620 |
spin_lock_bh(&mv_chan->lock); |
e43147acb
|
621 |
|
0951e728f
|
622 |
mv_chan_slot_cleanup(mv_chan); |
ff7b04796
|
623 |
|
ff7b04796
|
624 |
list_for_each_entry_safe(iter, _iter, &mv_chan->chain, |
fbea28a2a
|
625 |
node) { |
ff7b04796
|
626 |
in_use_descs++; |
fbea28a2a
|
627 |
list_move_tail(&iter->node, &mv_chan->free_slots); |
ff7b04796
|
628 629 |
} list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots, |
fbea28a2a
|
630 631 632 633 634 635 |
node) { in_use_descs++; list_move_tail(&iter->node, &mv_chan->free_slots); } list_for_each_entry_safe(iter, _iter, &mv_chan->allocated_slots, node) { |
ff7b04796
|
636 |
in_use_descs++; |
fbea28a2a
|
637 |
list_move_tail(&iter->node, &mv_chan->free_slots); |
ff7b04796
|
638 639 |
} list_for_each_entry_safe_reverse( |
fbea28a2a
|
640 641 |
iter, _iter, &mv_chan->free_slots, node) { list_del(&iter->node); |
ff7b04796
|
642 643 644 |
kfree(iter); mv_chan->slots_allocated--; } |
ff7b04796
|
645 |
|
c98c17813
|
646 647 |
dev_dbg(mv_chan_to_devp(mv_chan), "%s slots_allocated %d ", |
ff7b04796
|
648 649 650 651 |
__func__, mv_chan->slots_allocated); spin_unlock_bh(&mv_chan->lock); if (in_use_descs) |
c98c17813
|
652 |
dev_err(mv_chan_to_devp(mv_chan), |
ff7b04796
|
653 654 655 656 657 |
"freeing %d in use descriptors! ", in_use_descs); } /** |
079344818
|
658 |
* mv_xor_status - poll the status of an XOR transaction |
ff7b04796
|
659 660 |
* @chan: XOR channel handle * @cookie: XOR transaction identifier |
079344818
|
661 |
* @txstate: XOR transactions state holder (or NULL) |
ff7b04796
|
662 |
*/ |
079344818
|
663 |
static enum dma_status mv_xor_status(struct dma_chan *chan, |
ff7b04796
|
664 |
dma_cookie_t cookie, |
079344818
|
665 |
struct dma_tx_state *txstate) |
ff7b04796
|
666 667 |
{ struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); |
ff7b04796
|
668 |
enum dma_status ret; |
96a2af41c
|
669 |
ret = dma_cookie_status(chan, cookie, txstate); |
890766d27
|
670 |
if (ret == DMA_COMPLETE) |
ff7b04796
|
671 |
return ret; |
e43147acb
|
672 673 |
spin_lock_bh(&mv_chan->lock); |
0951e728f
|
674 |
mv_chan_slot_cleanup(mv_chan); |
e43147acb
|
675 |
spin_unlock_bh(&mv_chan->lock); |
ff7b04796
|
676 |
|
96a2af41c
|
677 |
return dma_cookie_status(chan, cookie, txstate); |
ff7b04796
|
678 |
} |
0951e728f
|
679 |
static void mv_chan_dump_regs(struct mv_xor_chan *chan) |
ff7b04796
|
680 681 |
{ u32 val; |
5733c38ae
|
682 |
val = readl_relaxed(XOR_CONFIG(chan)); |
1ba151cdf
|
683 684 |
dev_err(mv_chan_to_devp(chan), "config 0x%08x ", val); |
ff7b04796
|
685 |
|
5733c38ae
|
686 |
val = readl_relaxed(XOR_ACTIVATION(chan)); |
1ba151cdf
|
687 688 |
dev_err(mv_chan_to_devp(chan), "activation 0x%08x ", val); |
ff7b04796
|
689 |
|
5733c38ae
|
690 |
val = readl_relaxed(XOR_INTR_CAUSE(chan)); |
1ba151cdf
|
691 692 |
dev_err(mv_chan_to_devp(chan), "intr cause 0x%08x ", val); |
ff7b04796
|
693 |
|
5733c38ae
|
694 |
val = readl_relaxed(XOR_INTR_MASK(chan)); |
1ba151cdf
|
695 696 |
dev_err(mv_chan_to_devp(chan), "intr mask 0x%08x ", val); |
ff7b04796
|
697 |
|
5733c38ae
|
698 |
val = readl_relaxed(XOR_ERROR_CAUSE(chan)); |
1ba151cdf
|
699 700 |
dev_err(mv_chan_to_devp(chan), "error cause 0x%08x ", val); |
ff7b04796
|
701 |
|
5733c38ae
|
702 |
val = readl_relaxed(XOR_ERROR_ADDR(chan)); |
1ba151cdf
|
703 704 |
dev_err(mv_chan_to_devp(chan), "error addr 0x%08x ", val); |
ff7b04796
|
705 |
} |
0951e728f
|
706 707 |
static void mv_chan_err_interrupt_handler(struct mv_xor_chan *chan, u32 intr_cause) |
ff7b04796
|
708 |
{ |
0e7488ed0
|
709 710 711 712 |
if (intr_cause & XOR_INT_ERR_DECODE) { dev_dbg(mv_chan_to_devp(chan), "ignoring address decode error "); return; |
ff7b04796
|
713 |
} |
0e7488ed0
|
714 715 |
dev_err(mv_chan_to_devp(chan), "error on chan %d. intr cause 0x%08x ", |
a3fc74bc9
|
716 |
chan->idx, intr_cause); |
ff7b04796
|
717 |
|
0951e728f
|
718 |
mv_chan_dump_regs(chan); |
0e7488ed0
|
719 |
WARN_ON(1); |
ff7b04796
|
720 721 722 723 724 725 |
} static irqreturn_t mv_xor_interrupt_handler(int irq, void *data) { struct mv_xor_chan *chan = data; u32 intr_cause = mv_chan_get_intr_cause(chan); |
c98c17813
|
726 727 |
dev_dbg(mv_chan_to_devp(chan), "intr cause %x ", intr_cause); |
ff7b04796
|
728 |
|
0e7488ed0
|
729 |
if (intr_cause & XOR_INTR_ERRORS) |
0951e728f
|
730 |
mv_chan_err_interrupt_handler(chan, intr_cause); |
ff7b04796
|
731 732 |
tasklet_schedule(&chan->irq_tasklet); |
0951e728f
|
733 |
mv_chan_clear_eoc_cause(chan); |
ff7b04796
|
734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 |
return IRQ_HANDLED; } static void mv_xor_issue_pending(struct dma_chan *chan) { struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); if (mv_chan->pending >= MV_XOR_THRESHOLD) { mv_chan->pending = 0; mv_chan_activate(mv_chan); } } /* * Perform a transaction to verify the HW works. */ |
ff7b04796
|
751 |
|
0951e728f
|
752 |
static int mv_chan_memcpy_self_test(struct mv_xor_chan *mv_chan) |
ff7b04796
|
753 |
{ |
b8c01d259
|
754 |
int i, ret; |
ff7b04796
|
755 756 757 758 759 |
void *src, *dest; dma_addr_t src_dma, dest_dma; struct dma_chan *dma_chan; dma_cookie_t cookie; struct dma_async_tx_descriptor *tx; |
d16695a75
|
760 |
struct dmaengine_unmap_data *unmap; |
ff7b04796
|
761 |
int err = 0; |
ff7b04796
|
762 |
|
d16695a75
|
763 |
src = kmalloc(sizeof(u8) * PAGE_SIZE, GFP_KERNEL); |
ff7b04796
|
764 765 |
if (!src) return -ENOMEM; |
d16695a75
|
766 |
dest = kzalloc(sizeof(u8) * PAGE_SIZE, GFP_KERNEL); |
ff7b04796
|
767 768 769 770 771 772 |
if (!dest) { kfree(src); return -ENOMEM; } /* Fill in src buffer */ |
d16695a75
|
773 |
for (i = 0; i < PAGE_SIZE; i++) |
ff7b04796
|
774 |
((u8 *) src)[i] = (u8)i; |
275cc0c8b
|
775 |
dma_chan = &mv_chan->dmachan; |
aa1e6f1a3
|
776 |
if (mv_xor_alloc_chan_resources(dma_chan) < 1) { |
ff7b04796
|
777 778 779 |
err = -ENODEV; goto out; } |
d16695a75
|
780 781 782 783 784 |
unmap = dmaengine_get_unmap_data(dma_chan->device->dev, 2, GFP_KERNEL); if (!unmap) { err = -ENOMEM; goto free_resources; } |
515646358
|
785 |
src_dma = dma_map_page(dma_chan->device->dev, virt_to_page(src), |
b70e52cac
|
786 |
offset_in_page(src), PAGE_SIZE, |
515646358
|
787 |
DMA_TO_DEVICE); |
d16695a75
|
788 |
unmap->addr[0] = src_dma; |
ff7b04796
|
789 |
|
b8c01d259
|
790 791 792 793 794 795 |
ret = dma_mapping_error(dma_chan->device->dev, src_dma); if (ret) { err = -ENOMEM; goto free_resources; } unmap->to_cnt = 1; |
515646358
|
796 |
dest_dma = dma_map_page(dma_chan->device->dev, virt_to_page(dest), |
b70e52cac
|
797 |
offset_in_page(dest), PAGE_SIZE, |
515646358
|
798 |
DMA_FROM_DEVICE); |
d16695a75
|
799 |
unmap->addr[1] = dest_dma; |
b8c01d259
|
800 801 802 803 804 805 |
ret = dma_mapping_error(dma_chan->device->dev, dest_dma); if (ret) { err = -ENOMEM; goto free_resources; } unmap->from_cnt = 1; |
d16695a75
|
806 |
unmap->len = PAGE_SIZE; |
ff7b04796
|
807 808 |
tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma, |
d16695a75
|
809 |
PAGE_SIZE, 0); |
b8c01d259
|
810 811 812 813 814 815 816 |
if (!tx) { dev_err(dma_chan->device->dev, "Self-test cannot prepare operation, disabling "); err = -ENODEV; goto free_resources; } |
ff7b04796
|
817 |
cookie = mv_xor_tx_submit(tx); |
b8c01d259
|
818 819 820 821 822 823 824 |
if (dma_submit_error(cookie)) { dev_err(dma_chan->device->dev, "Self-test submit error, disabling "); err = -ENODEV; goto free_resources; } |
ff7b04796
|
825 826 827 |
mv_xor_issue_pending(dma_chan); async_tx_ack(tx); msleep(1); |
079344818
|
828 |
if (mv_xor_status(dma_chan, cookie, NULL) != |
b3efb8fc9
|
829 |
DMA_COMPLETE) { |
a3fc74bc9
|
830 831 832 |
dev_err(dma_chan->device->dev, "Self-test copy timed out, disabling "); |
ff7b04796
|
833 834 835 |
err = -ENODEV; goto free_resources; } |
c35064c4b
|
836 |
dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma, |
d16695a75
|
837 838 |
PAGE_SIZE, DMA_FROM_DEVICE); if (memcmp(src, dest, PAGE_SIZE)) { |
a3fc74bc9
|
839 840 841 |
dev_err(dma_chan->device->dev, "Self-test copy failed compare, disabling "); |
ff7b04796
|
842 843 844 845 846 |
err = -ENODEV; goto free_resources; } free_resources: |
d16695a75
|
847 |
dmaengine_unmap_put(unmap); |
ff7b04796
|
848 849 850 851 852 853 854 855 |
mv_xor_free_chan_resources(dma_chan); out: kfree(src); kfree(dest); return err; } #define MV_XOR_NUM_SRC_TEST 4 /* must be <= 15 */ |
463a1f8b3
|
856 |
static int |
0951e728f
|
857 |
mv_chan_xor_self_test(struct mv_xor_chan *mv_chan) |
ff7b04796
|
858 |
{ |
b8c01d259
|
859 |
int i, src_idx, ret; |
ff7b04796
|
860 861 862 863 864 |
struct page *dest; struct page *xor_srcs[MV_XOR_NUM_SRC_TEST]; dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST]; dma_addr_t dest_dma; struct dma_async_tx_descriptor *tx; |
d16695a75
|
865 |
struct dmaengine_unmap_data *unmap; |
ff7b04796
|
866 867 868 869 870 |
struct dma_chan *dma_chan; dma_cookie_t cookie; u8 cmp_byte = 0; u32 cmp_word; int err = 0; |
d16695a75
|
871 |
int src_count = MV_XOR_NUM_SRC_TEST; |
ff7b04796
|
872 |
|
d16695a75
|
873 |
for (src_idx = 0; src_idx < src_count; src_idx++) { |
ff7b04796
|
874 |
xor_srcs[src_idx] = alloc_page(GFP_KERNEL); |
a09b09ae5
|
875 876 |
if (!xor_srcs[src_idx]) { while (src_idx--) |
ff7b04796
|
877 |
__free_page(xor_srcs[src_idx]); |
a09b09ae5
|
878 879 |
return -ENOMEM; } |
ff7b04796
|
880 881 882 |
} dest = alloc_page(GFP_KERNEL); |
a09b09ae5
|
883 884 |
if (!dest) { while (src_idx--) |
ff7b04796
|
885 |
__free_page(xor_srcs[src_idx]); |
a09b09ae5
|
886 887 |
return -ENOMEM; } |
ff7b04796
|
888 889 |
/* Fill in src buffers */ |
d16695a75
|
890 |
for (src_idx = 0; src_idx < src_count; src_idx++) { |
ff7b04796
|
891 892 893 894 |
u8 *ptr = page_address(xor_srcs[src_idx]); for (i = 0; i < PAGE_SIZE; i++) ptr[i] = (1 << src_idx); } |
d16695a75
|
895 |
for (src_idx = 0; src_idx < src_count; src_idx++) |
ff7b04796
|
896 897 898 899 900 901 |
cmp_byte ^= (u8) (1 << src_idx); cmp_word = (cmp_byte << 24) | (cmp_byte << 16) | (cmp_byte << 8) | cmp_byte; memset(page_address(dest), 0, PAGE_SIZE); |
275cc0c8b
|
902 |
dma_chan = &mv_chan->dmachan; |
aa1e6f1a3
|
903 |
if (mv_xor_alloc_chan_resources(dma_chan) < 1) { |
ff7b04796
|
904 905 906 |
err = -ENODEV; goto out; } |
d16695a75
|
907 908 909 910 911 912 |
unmap = dmaengine_get_unmap_data(dma_chan->device->dev, src_count + 1, GFP_KERNEL); if (!unmap) { err = -ENOMEM; goto free_resources; } |
ff7b04796
|
913 |
/* test xor */ |
d16695a75
|
914 915 916 917 |
for (i = 0; i < src_count; i++) { unmap->addr[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i], 0, PAGE_SIZE, DMA_TO_DEVICE); dma_srcs[i] = unmap->addr[i]; |
b8c01d259
|
918 919 920 921 922 |
ret = dma_mapping_error(dma_chan->device->dev, unmap->addr[i]); if (ret) { err = -ENOMEM; goto free_resources; } |
d16695a75
|
923 924 |
unmap->to_cnt++; } |
ff7b04796
|
925 |
|
d16695a75
|
926 927 928 |
unmap->addr[src_count] = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE, DMA_FROM_DEVICE); dest_dma = unmap->addr[src_count]; |
b8c01d259
|
929 930 931 932 933 |
ret = dma_mapping_error(dma_chan->device->dev, unmap->addr[src_count]); if (ret) { err = -ENOMEM; goto free_resources; } |
d16695a75
|
934 935 |
unmap->from_cnt = 1; unmap->len = PAGE_SIZE; |
ff7b04796
|
936 937 |
tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs, |
d16695a75
|
938 |
src_count, PAGE_SIZE, 0); |
b8c01d259
|
939 940 941 942 943 944 945 |
if (!tx) { dev_err(dma_chan->device->dev, "Self-test cannot prepare operation, disabling "); err = -ENODEV; goto free_resources; } |
ff7b04796
|
946 947 |
cookie = mv_xor_tx_submit(tx); |
b8c01d259
|
948 949 950 951 952 953 954 |
if (dma_submit_error(cookie)) { dev_err(dma_chan->device->dev, "Self-test submit error, disabling "); err = -ENODEV; goto free_resources; } |
ff7b04796
|
955 956 957 |
mv_xor_issue_pending(dma_chan); async_tx_ack(tx); msleep(8); |
079344818
|
958 |
if (mv_xor_status(dma_chan, cookie, NULL) != |
b3efb8fc9
|
959 |
DMA_COMPLETE) { |
a3fc74bc9
|
960 961 962 |
dev_err(dma_chan->device->dev, "Self-test xor timed out, disabling "); |
ff7b04796
|
963 964 965 |
err = -ENODEV; goto free_resources; } |
c35064c4b
|
966 |
dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma, |
ff7b04796
|
967 968 969 970 |
PAGE_SIZE, DMA_FROM_DEVICE); for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) { u32 *ptr = page_address(dest); if (ptr[i] != cmp_word) { |
a3fc74bc9
|
971 |
dev_err(dma_chan->device->dev, |
1ba151cdf
|
972 973 974 |
"Self-test xor failed compare, disabling. index %d, data %x, expected %x ", i, ptr[i], cmp_word); |
ff7b04796
|
975 976 977 978 979 980 |
err = -ENODEV; goto free_resources; } } free_resources: |
d16695a75
|
981 |
dmaengine_unmap_put(unmap); |
ff7b04796
|
982 983 |
mv_xor_free_chan_resources(dma_chan); out: |
d16695a75
|
984 |
src_idx = src_count; |
ff7b04796
|
985 986 987 988 989 |
while (src_idx--) __free_page(xor_srcs[src_idx]); __free_page(dest); return err; } |
1ef48a262
|
990 |
static int mv_xor_channel_remove(struct mv_xor_chan *mv_chan) |
ff7b04796
|
991 |
{ |
ff7b04796
|
992 |
struct dma_chan *chan, *_chan; |
1ef48a262
|
993 |
struct device *dev = mv_chan->dmadev.dev; |
ff7b04796
|
994 |
|
1ef48a262
|
995 |
dma_async_device_unregister(&mv_chan->dmadev); |
ff7b04796
|
996 |
|
b503fa019
|
997 |
dma_free_coherent(dev, MV_XOR_POOL_SIZE, |
1ef48a262
|
998 |
mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool); |
22843545b
|
999 1000 1001 1002 |
dma_unmap_single(dev, mv_chan->dummy_src_addr, MV_XOR_MIN_BYTE_COUNT, DMA_FROM_DEVICE); dma_unmap_single(dev, mv_chan->dummy_dst_addr, MV_XOR_MIN_BYTE_COUNT, DMA_TO_DEVICE); |
ff7b04796
|
1003 |
|
1ef48a262
|
1004 |
list_for_each_entry_safe(chan, _chan, &mv_chan->dmadev.channels, |
a6b4a9d2c
|
1005 |
device_node) { |
ff7b04796
|
1006 1007 |
list_del(&chan->device_node); } |
88eb92cb4
|
1008 |
free_irq(mv_chan->irq, mv_chan); |
ff7b04796
|
1009 1010 |
return 0; } |
1ef48a262
|
1011 |
static struct mv_xor_chan * |
297eedbae
|
1012 |
mv_xor_channel_add(struct mv_xor_device *xordev, |
a6b4a9d2c
|
1013 |
struct platform_device *pdev, |
dd130c652
|
1014 |
int idx, dma_cap_mask_t cap_mask, int irq) |
ff7b04796
|
1015 1016 |
{ int ret = 0; |
ff7b04796
|
1017 1018 |
struct mv_xor_chan *mv_chan; struct dma_device *dma_dev; |
ff7b04796
|
1019 |
|
1ef48a262
|
1020 |
mv_chan = devm_kzalloc(&pdev->dev, sizeof(*mv_chan), GFP_KERNEL); |
a577659f4
|
1021 1022 |
if (!mv_chan) return ERR_PTR(-ENOMEM); |
ff7b04796
|
1023 |
|
9aedbdbab
|
1024 |
mv_chan->idx = idx; |
88eb92cb4
|
1025 |
mv_chan->irq = irq; |
dd130c652
|
1026 1027 1028 1029 |
if (xordev->xor_type == XOR_ORION) mv_chan->op_in_desc = XOR_MODE_IN_REG; else mv_chan->op_in_desc = XOR_MODE_IN_DESC; |
ff7b04796
|
1030 |
|
1ef48a262
|
1031 |
dma_dev = &mv_chan->dmadev; |
77ff7a706
|
1032 |
mv_chan->xordev = xordev; |
ff7b04796
|
1033 |
|
22843545b
|
1034 1035 1036 1037 1038 1039 1040 1041 1042 |
/* * These source and destination dummy buffers are used to implement * a DMA_INTERRUPT operation as a minimum-sized XOR operation. * Hence, we only need to map the buffers at initialization-time. */ mv_chan->dummy_src_addr = dma_map_single(dma_dev->dev, mv_chan->dummy_src, MV_XOR_MIN_BYTE_COUNT, DMA_FROM_DEVICE); mv_chan->dummy_dst_addr = dma_map_single(dma_dev->dev, mv_chan->dummy_dst, MV_XOR_MIN_BYTE_COUNT, DMA_TO_DEVICE); |
ff7b04796
|
1043 1044 1045 1046 |
/* allocate coherent memory for hardware descriptors * note: writecombine gives slightly better performance, but * requires that we explicitly flush the writes */ |
1ef48a262
|
1047 |
mv_chan->dma_desc_pool_virt = |
f6e45661f
|
1048 1049 |
dma_alloc_wc(&pdev->dev, MV_XOR_POOL_SIZE, &mv_chan->dma_desc_pool, GFP_KERNEL); |
1ef48a262
|
1050 |
if (!mv_chan->dma_desc_pool_virt) |
a6b4a9d2c
|
1051 |
return ERR_PTR(-ENOMEM); |
ff7b04796
|
1052 1053 |
/* discover transaction capabilites from the platform data */ |
a6b4a9d2c
|
1054 |
dma_dev->cap_mask = cap_mask; |
ff7b04796
|
1055 1056 1057 1058 1059 1060 |
INIT_LIST_HEAD(&dma_dev->channels); /* set base routines */ dma_dev->device_alloc_chan_resources = mv_xor_alloc_chan_resources; dma_dev->device_free_chan_resources = mv_xor_free_chan_resources; |
079344818
|
1061 |
dma_dev->device_tx_status = mv_xor_status; |
ff7b04796
|
1062 1063 1064 1065 |
dma_dev->device_issue_pending = mv_xor_issue_pending; dma_dev->dev = &pdev->dev; /* set prep routines based on capability */ |
22843545b
|
1066 1067 |
if (dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask)) dma_dev->device_prep_dma_interrupt = mv_xor_prep_dma_interrupt; |
ff7b04796
|
1068 1069 |
if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) dma_dev->device_prep_dma_memcpy = mv_xor_prep_dma_memcpy; |
ff7b04796
|
1070 |
if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { |
c019894ef
|
1071 |
dma_dev->max_xor = 8; |
ff7b04796
|
1072 1073 |
dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor; } |
297eedbae
|
1074 |
mv_chan->mmr_base = xordev->xor_base; |
82a1402ea
|
1075 |
mv_chan->mmr_high_base = xordev->xor_high_base; |
ff7b04796
|
1076 1077 1078 1079 |
tasklet_init(&mv_chan->irq_tasklet, mv_xor_tasklet, (unsigned long) mv_chan); /* clear errors before enabling interrupts */ |
0951e728f
|
1080 |
mv_chan_clear_err_status(mv_chan); |
ff7b04796
|
1081 |
|
2d0a07451
|
1082 1083 |
ret = request_irq(mv_chan->irq, mv_xor_interrupt_handler, 0, dev_name(&pdev->dev), mv_chan); |
ff7b04796
|
1084 1085 1086 1087 |
if (ret) goto err_free_dma; mv_chan_unmask_interrupts(mv_chan); |
6f166312c
|
1088 |
if (mv_chan->op_in_desc == XOR_MODE_IN_DESC) |
81aafb3e0
|
1089 |
mv_chan_set_mode(mv_chan, XOR_OPERATION_MODE_IN_DESC); |
6f166312c
|
1090 |
else |
81aafb3e0
|
1091 |
mv_chan_set_mode(mv_chan, XOR_OPERATION_MODE_XOR); |
ff7b04796
|
1092 1093 1094 1095 |
spin_lock_init(&mv_chan->lock); INIT_LIST_HEAD(&mv_chan->chain); INIT_LIST_HEAD(&mv_chan->completed_slots); |
fbea28a2a
|
1096 1097 |
INIT_LIST_HEAD(&mv_chan->free_slots); INIT_LIST_HEAD(&mv_chan->allocated_slots); |
98817b995
|
1098 1099 |
mv_chan->dmachan.device = dma_dev; dma_cookie_init(&mv_chan->dmachan); |
ff7b04796
|
1100 |
|
98817b995
|
1101 |
list_add_tail(&mv_chan->dmachan.device_node, &dma_dev->channels); |
ff7b04796
|
1102 1103 |
if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) { |
0951e728f
|
1104 |
ret = mv_chan_memcpy_self_test(mv_chan); |
ff7b04796
|
1105 1106 1107 |
dev_dbg(&pdev->dev, "memcpy self test returned %d ", ret); if (ret) |
2d0a07451
|
1108 |
goto err_free_irq; |
ff7b04796
|
1109 1110 1111 |
} if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { |
0951e728f
|
1112 |
ret = mv_chan_xor_self_test(mv_chan); |
ff7b04796
|
1113 1114 1115 |
dev_dbg(&pdev->dev, "xor self test returned %d ", ret); if (ret) |
2d0a07451
|
1116 |
goto err_free_irq; |
ff7b04796
|
1117 |
} |
c678fa663
|
1118 1119 |
dev_info(&pdev->dev, "Marvell XOR (%s): ( %s%s%s) ", |
6f166312c
|
1120 |
mv_chan->op_in_desc ? "Descriptor Mode" : "Registers Mode", |
1ba151cdf
|
1121 |
dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "", |
1ba151cdf
|
1122 1123 |
dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "", dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : ""); |
ff7b04796
|
1124 1125 |
dma_async_device_register(dma_dev); |
1ef48a262
|
1126 |
return mv_chan; |
ff7b04796
|
1127 |
|
2d0a07451
|
1128 1129 |
err_free_irq: free_irq(mv_chan->irq, mv_chan); |
a4a1e53df
|
1130 |
err_free_dma: |
b503fa019
|
1131 |
dma_free_coherent(&pdev->dev, MV_XOR_POOL_SIZE, |
1ef48a262
|
1132 |
mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool); |
a6b4a9d2c
|
1133 |
return ERR_PTR(ret); |
ff7b04796
|
1134 1135 1136 |
} static void |
297eedbae
|
1137 |
mv_xor_conf_mbus_windows(struct mv_xor_device *xordev, |
63a9332b2
|
1138 |
const struct mbus_dram_target_info *dram) |
ff7b04796
|
1139 |
{ |
82a1402ea
|
1140 |
void __iomem *base = xordev->xor_high_base; |
ff7b04796
|
1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 |
u32 win_enable = 0; int i; for (i = 0; i < 8; i++) { writel(0, base + WINDOW_BASE(i)); writel(0, base + WINDOW_SIZE(i)); if (i < 4) writel(0, base + WINDOW_REMAP_HIGH(i)); } for (i = 0; i < dram->num_cs; i++) { |
63a9332b2
|
1152 |
const struct mbus_dram_window *cs = dram->cs + i; |
ff7b04796
|
1153 1154 1155 1156 1157 |
writel((cs->base & 0xffff0000) | (cs->mbus_attr << 8) | dram->mbus_dram_target_id, base + WINDOW_BASE(i)); writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i)); |
77ff7a706
|
1158 1159 1160 |
/* Fill the caching variables for later use */ xordev->win_start[i] = cs->base; xordev->win_end[i] = cs->base + cs->size - 1; |
ff7b04796
|
1161 1162 1163 1164 1165 1166 |
win_enable |= (1 << i); win_enable |= 3 << (16 + (2 * i)); } writel(win_enable, base + WINDOW_BAR_ENABLE(0)); writel(win_enable, base + WINDOW_BAR_ENABLE(1)); |
c4b4b732b
|
1167 1168 |
writel(0, base + WINDOW_OVERRIDE_CTRL(0)); writel(0, base + WINDOW_OVERRIDE_CTRL(1)); |
ff7b04796
|
1169 |
} |
ac5f0f3f8
|
1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 |
static void mv_xor_conf_mbus_windows_a3700(struct mv_xor_device *xordev) { void __iomem *base = xordev->xor_high_base; u32 win_enable = 0; int i; for (i = 0; i < 8; i++) { writel(0, base + WINDOW_BASE(i)); writel(0, base + WINDOW_SIZE(i)); if (i < 4) writel(0, base + WINDOW_REMAP_HIGH(i)); } /* * For Armada3700 open default 4GB Mbus window. The dram * related configuration are done at AXIS level. */ writel(0xffff0000, base + WINDOW_SIZE(0)); win_enable |= 1; win_enable |= 3 << 16; writel(win_enable, base + WINDOW_BAR_ENABLE(0)); writel(win_enable, base + WINDOW_BAR_ENABLE(1)); writel(0, base + WINDOW_OVERRIDE_CTRL(0)); writel(0, base + WINDOW_OVERRIDE_CTRL(1)); } |
8b648436e
|
1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 |
/* * Since this XOR driver is basically used only for RAID5, we don't * need to care about synchronizing ->suspend with DMA activity, * because the DMA engine will naturally be quiet due to the block * devices being suspended. */ static int mv_xor_suspend(struct platform_device *pdev, pm_message_t state) { struct mv_xor_device *xordev = platform_get_drvdata(pdev); int i; for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) { struct mv_xor_chan *mv_chan = xordev->channels[i]; if (!mv_chan) continue; mv_chan->saved_config_reg = readl_relaxed(XOR_CONFIG(mv_chan)); mv_chan->saved_int_mask_reg = readl_relaxed(XOR_INTR_MASK(mv_chan)); } return 0; } static int mv_xor_resume(struct platform_device *dev) { struct mv_xor_device *xordev = platform_get_drvdata(dev); const struct mbus_dram_target_info *dram; int i; for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) { struct mv_xor_chan *mv_chan = xordev->channels[i]; if (!mv_chan) continue; writel_relaxed(mv_chan->saved_config_reg, XOR_CONFIG(mv_chan)); writel_relaxed(mv_chan->saved_int_mask_reg, XOR_INTR_MASK(mv_chan)); } |
ac5f0f3f8
|
1239 1240 1241 1242 |
if (xordev->xor_type == XOR_ARMADA_37XX) { mv_xor_conf_mbus_windows_a3700(xordev); return 0; } |
8b648436e
|
1243 1244 1245 1246 1247 1248 |
dram = mv_mbus_dram_info(); if (dram) mv_xor_conf_mbus_windows(xordev, dram); return 0; } |
6f166312c
|
1249 |
static const struct of_device_id mv_xor_dt_ids[] = { |
dd130c652
|
1250 1251 |
{ .compatible = "marvell,orion-xor", .data = (void *)XOR_ORION }, { .compatible = "marvell,armada-380-xor", .data = (void *)XOR_ARMADA_38X }, |
ac5f0f3f8
|
1252 |
{ .compatible = "marvell,armada-3700-xor", .data = (void *)XOR_ARMADA_37XX }, |
6f166312c
|
1253 1254 |
{}, }; |
6f166312c
|
1255 |
|
777572911
|
1256 |
static unsigned int mv_xor_engine_count; |
6f166312c
|
1257 |
|
c2714334b
|
1258 |
static int mv_xor_probe(struct platform_device *pdev) |
ff7b04796
|
1259 |
{ |
63a9332b2
|
1260 |
const struct mbus_dram_target_info *dram; |
297eedbae
|
1261 |
struct mv_xor_device *xordev; |
d4adcc016
|
1262 |
struct mv_xor_platform_data *pdata = dev_get_platdata(&pdev->dev); |
ff7b04796
|
1263 |
struct resource *res; |
777572911
|
1264 |
unsigned int max_engines, max_channels; |
60d151f38
|
1265 |
int i, ret; |
ff7b04796
|
1266 |
|
1ba151cdf
|
1267 1268 |
dev_notice(&pdev->dev, "Marvell shared XOR driver "); |
ff7b04796
|
1269 |
|
297eedbae
|
1270 1271 |
xordev = devm_kzalloc(&pdev->dev, sizeof(*xordev), GFP_KERNEL); if (!xordev) |
ff7b04796
|
1272 1273 1274 1275 1276 |
return -ENOMEM; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) return -ENODEV; |
297eedbae
|
1277 1278 1279 |
xordev->xor_base = devm_ioremap(&pdev->dev, res->start, resource_size(res)); if (!xordev->xor_base) |
ff7b04796
|
1280 1281 1282 1283 1284 |
return -EBUSY; res = platform_get_resource(pdev, IORESOURCE_MEM, 1); if (!res) return -ENODEV; |
297eedbae
|
1285 1286 1287 |
xordev->xor_high_base = devm_ioremap(&pdev->dev, res->start, resource_size(res)); if (!xordev->xor_high_base) |
ff7b04796
|
1288 |
return -EBUSY; |
297eedbae
|
1289 |
platform_set_drvdata(pdev, xordev); |
ff7b04796
|
1290 |
|
dd130c652
|
1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 |
/* * We need to know which type of XOR device we use before * setting up. In non-dt case it can only be the legacy one. */ xordev->xor_type = XOR_ORION; if (pdev->dev.of_node) { const struct of_device_id *of_id = of_match_device(mv_xor_dt_ids, &pdev->dev); xordev->xor_type = (uintptr_t)of_id->data; } |
ff7b04796
|
1304 1305 1306 |
/* * (Re-)program MBUS remapping windows if we are asked to. */ |
ac5f0f3f8
|
1307 1308 1309 1310 1311 1312 1313 |
if (xordev->xor_type == XOR_ARMADA_37XX) { mv_xor_conf_mbus_windows_a3700(xordev); } else { dram = mv_mbus_dram_info(); if (dram) mv_xor_conf_mbus_windows(xordev, dram); } |
ff7b04796
|
1314 |
|
c510182b1
|
1315 1316 1317 |
/* Not all platforms can gate the clock, so it is not * an error if the clock does not exists. */ |
297eedbae
|
1318 1319 1320 |
xordev->clk = clk_get(&pdev->dev, NULL); if (!IS_ERR(xordev->clk)) clk_prepare_enable(xordev->clk); |
c510182b1
|
1321 |
|
777572911
|
1322 1323 1324 1325 1326 |
/* * We don't want to have more than one channel per CPU in * order for async_tx to perform well. So we limit the number * of engines and channels so that we take into account this * constraint. Note that we also want to use channels from |
ac5f0f3f8
|
1327 1328 |
* separate engines when possible. For dual-CPU Armada 3700 * SoC with single XOR engine allow using its both channels. |
777572911
|
1329 1330 |
*/ max_engines = num_present_cpus(); |
ac5f0f3f8
|
1331 1332 1333 1334 1335 1336 |
if (xordev->xor_type == XOR_ARMADA_37XX) max_channels = num_present_cpus(); else max_channels = min_t(unsigned int, MV_XOR_MAX_CHANNELS, DIV_ROUND_UP(num_present_cpus(), 2)); |
777572911
|
1337 1338 1339 |
if (mv_xor_engine_count >= max_engines) return 0; |
f7d12ef53
|
1340 1341 1342 1343 1344 |
if (pdev->dev.of_node) { struct device_node *np; int i = 0; for_each_child_of_node(pdev->dev.of_node, np) { |
0be8253fa
|
1345 |
struct mv_xor_chan *chan; |
f7d12ef53
|
1346 1347 |
dma_cap_mask_t cap_mask; int irq; |
777572911
|
1348 1349 |
if (i >= max_channels) continue; |
f7d12ef53
|
1350 |
dma_cap_zero(cap_mask); |
6d8f7abd2
|
1351 1352 1353 |
dma_cap_set(DMA_MEMCPY, cap_mask); dma_cap_set(DMA_XOR, cap_mask); dma_cap_set(DMA_INTERRUPT, cap_mask); |
f7d12ef53
|
1354 1355 |
irq = irq_of_parse_and_map(np, 0); |
f8eb9e7d2
|
1356 1357 |
if (!irq) { ret = -ENODEV; |
f7d12ef53
|
1358 1359 |
goto err_channel_add; } |
0be8253fa
|
1360 |
chan = mv_xor_channel_add(xordev, pdev, i, |
dd130c652
|
1361 |
cap_mask, irq); |
0be8253fa
|
1362 1363 |
if (IS_ERR(chan)) { ret = PTR_ERR(chan); |
f7d12ef53
|
1364 1365 1366 |
irq_dispose_mapping(irq); goto err_channel_add; } |
0be8253fa
|
1367 |
xordev->channels[i] = chan; |
f7d12ef53
|
1368 1369 1370 |
i++; } } else if (pdata && pdata->channels) { |
777572911
|
1371 |
for (i = 0; i < max_channels; i++) { |
e39f6ec1f
|
1372 |
struct mv_xor_channel_data *cd; |
0be8253fa
|
1373 |
struct mv_xor_chan *chan; |
60d151f38
|
1374 1375 1376 |
int irq; cd = &pdata->channels[i]; |
60d151f38
|
1377 1378 1379 1380 1381 |
irq = platform_get_irq(pdev, i); if (irq < 0) { ret = irq; goto err_channel_add; } |
0be8253fa
|
1382 |
chan = mv_xor_channel_add(xordev, pdev, i, |
dd130c652
|
1383 |
cd->cap_mask, irq); |
0be8253fa
|
1384 1385 |
if (IS_ERR(chan)) { ret = PTR_ERR(chan); |
60d151f38
|
1386 1387 |
goto err_channel_add; } |
0be8253fa
|
1388 1389 |
xordev->channels[i] = chan; |
60d151f38
|
1390 1391 |
} } |
c510182b1
|
1392 |
|
ff7b04796
|
1393 |
return 0; |
60d151f38
|
1394 1395 1396 |
err_channel_add: for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) |
f7d12ef53
|
1397 |
if (xordev->channels[i]) { |
ab6e439fd
|
1398 |
mv_xor_channel_remove(xordev->channels[i]); |
f7d12ef53
|
1399 1400 |
if (pdev->dev.of_node) irq_dispose_mapping(xordev->channels[i]->irq); |
f7d12ef53
|
1401 |
} |
60d151f38
|
1402 |
|
dab920644
|
1403 1404 1405 1406 |
if (!IS_ERR(xordev->clk)) { clk_disable_unprepare(xordev->clk); clk_put(xordev->clk); } |
60d151f38
|
1407 |
return ret; |
ff7b04796
|
1408 |
} |
61971656c
|
1409 1410 |
static struct platform_driver mv_xor_driver = { .probe = mv_xor_probe, |
8b648436e
|
1411 1412 |
.suspend = mv_xor_suspend, .resume = mv_xor_resume, |
ff7b04796
|
1413 |
.driver = { |
f7d12ef53
|
1414 1415 |
.name = MV_XOR_NAME, .of_match_table = of_match_ptr(mv_xor_dt_ids), |
ff7b04796
|
1416 1417 |
}, }; |
812608d19
|
1418 |
builtin_platform_driver(mv_xor_driver); |
ff7b04796
|
1419 |
|
25cf68da0
|
1420 |
/* |
ff7b04796
|
1421 1422 1423 |
MODULE_AUTHOR("Saeed Bishara <saeed@marvell.com>"); MODULE_DESCRIPTION("DMA engine driver for Marvell's XOR engine"); MODULE_LICENSE("GPL"); |
25cf68da0
|
1424 |
*/ |