Blame view
drivers/dma/mpc512x_dma.c
30.4 KB
0fb6f739b dma: Add MPC512x ... |
1 2 3 |
/* * Copyright (C) Freescale Semicondutor, Inc. 2007, 2008. * Copyright (C) Semihalf 2009 |
ba2eea251 powerpc/512x: add... |
4 |
* Copyright (C) Ilya Yanok, Emcraft Systems 2010 |
63da8e0d4 dmaengine: mpc512... |
5 |
* Copyright (C) Alexander Popov, Promcontroller 2014 |
899ed9dd4 dmaengine: mpc512... |
6 |
* Copyright (C) Mario Six, Guntermann & Drunck GmbH, 2016 |
0fb6f739b dma: Add MPC512x ... |
7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 |
* * Written by Piotr Ziecik <kosmo@semihalf.com>. Hardware description * (defines, structures and comments) was taken from MPC5121 DMA driver * written by Hongjun Chen <hong-jun.chen@freescale.com>. * * Approved as OSADL project by a majority of OSADL members and funded * by OSADL membership fees in 2009; for details see www.osadl.org. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * |
0fb6f739b dma: Add MPC512x ... |
25 26 27 28 29 |
* The full GNU General Public License is included in this distribution in the * file called COPYING. */ /* |
899ed9dd4 dmaengine: mpc512... |
30 31 32 33 34 35 36 37 38 39 40 41 42 |
* MPC512x and MPC8308 DMA driver. It supports memory to memory data transfers * (tested using dmatest module) and data transfers between memory and * peripheral I/O memory by means of slave scatter/gather with these * limitations: * - chunked transfers (described by s/g lists with more than one item) are * refused as long as proper support for scatter/gather is missing * - transfers on MPC8308 always start from software as this SoC does not have * external request lines for peripheral flow control * - memory <-> I/O memory transfer chunks of sizes of 1, 2, 4, 16 (for * MPC512x), and 32 bytes are supported, and, consequently, source * addresses and destination addresses must be aligned accordingly; * furthermore, for MPC512x SoCs, the transfer size must be aligned on * (chunk size * maxburst) |
0fb6f739b dma: Add MPC512x ... |
43 44 45 46 47 48 49 |
*/ #include <linux/module.h> #include <linux/dmaengine.h> #include <linux/dma-mapping.h> #include <linux/interrupt.h> #include <linux/io.h> |
5a0e3ad6a include cleanup: ... |
50 |
#include <linux/slab.h> |
5af507300 drivers: clean-up... |
51 |
#include <linux/of_address.h> |
0fb6f739b dma: Add MPC512x ... |
52 |
#include <linux/of_device.h> |
5af507300 drivers: clean-up... |
53 |
#include <linux/of_irq.h> |
ec1f0c966 dmaengine: mpc512... |
54 |
#include <linux/of_dma.h> |
0fb6f739b dma: Add MPC512x ... |
55 56 57 |
#include <linux/of_platform.h> #include <linux/random.h> |
d2ebfb335 dmaengine: add pr... |
58 |
#include "dmaengine.h" |
0fb6f739b dma: Add MPC512x ... |
59 60 61 62 |
/* Number of DMA Transfer descriptors allocated per channel */ #define MPC_DMA_DESCRIPTORS 64 /* Macro definitions */ |
0fb6f739b dma: Add MPC512x ... |
63 |
#define MPC_DMA_TCD_OFFSET 0x1000 |
78a4f0367 dma: mpc512x: reo... |
64 65 66 67 68 69 70 71 |
/* * Maximum channel counts for individual hardware variants * and the maximum channel count over all supported controllers, * used for data structure size */ #define MPC8308_DMACHAN_MAX 16 #define MPC512x_DMACHAN_MAX 64 #define MPC_DMA_CHANNELS 64 |
0fb6f739b dma: Add MPC512x ... |
72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 |
/* Arbitration mode of group and channel */ #define MPC_DMA_DMACR_EDCG (1 << 31) #define MPC_DMA_DMACR_ERGA (1 << 3) #define MPC_DMA_DMACR_ERCA (1 << 2) /* Error codes */ #define MPC_DMA_DMAES_VLD (1 << 31) #define MPC_DMA_DMAES_GPE (1 << 15) #define MPC_DMA_DMAES_CPE (1 << 14) #define MPC_DMA_DMAES_ERRCHN(err) \ (((err) >> 8) & 0x3f) #define MPC_DMA_DMAES_SAE (1 << 7) #define MPC_DMA_DMAES_SOE (1 << 6) #define MPC_DMA_DMAES_DAE (1 << 5) #define MPC_DMA_DMAES_DOE (1 << 4) #define MPC_DMA_DMAES_NCE (1 << 3) #define MPC_DMA_DMAES_SGE (1 << 2) #define MPC_DMA_DMAES_SBE (1 << 1) #define MPC_DMA_DMAES_DBE (1 << 0) |
ba2eea251 powerpc/512x: add... |
91 |
#define MPC_DMA_DMAGPOR_SNOOP_ENABLE (1 << 6) |
0fb6f739b dma: Add MPC512x ... |
92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 |
#define MPC_DMA_TSIZE_1 0x00 #define MPC_DMA_TSIZE_2 0x01 #define MPC_DMA_TSIZE_4 0x02 #define MPC_DMA_TSIZE_16 0x04 #define MPC_DMA_TSIZE_32 0x05 /* MPC5121 DMA engine registers */ struct __attribute__ ((__packed__)) mpc_dma_regs { /* 0x00 */ u32 dmacr; /* DMA control register */ u32 dmaes; /* DMA error status */ /* 0x08 */ u32 dmaerqh; /* DMA enable request high(channels 63~32) */ u32 dmaerql; /* DMA enable request low(channels 31~0) */ u32 dmaeeih; /* DMA enable error interrupt high(ch63~32) */ u32 dmaeeil; /* DMA enable error interrupt low(ch31~0) */ /* 0x18 */ u8 dmaserq; /* DMA set enable request */ u8 dmacerq; /* DMA clear enable request */ u8 dmaseei; /* DMA set enable error interrupt */ u8 dmaceei; /* DMA clear enable error interrupt */ /* 0x1c */ u8 dmacint; /* DMA clear interrupt request */ u8 dmacerr; /* DMA clear error */ u8 dmassrt; /* DMA set start bit */ u8 dmacdne; /* DMA clear DONE status bit */ /* 0x20 */ u32 dmainth; /* DMA interrupt request high(ch63~32) */ u32 dmaintl; /* DMA interrupt request low(ch31~0) */ u32 dmaerrh; /* DMA error high(ch63~32) */ u32 dmaerrl; /* DMA error low(ch31~0) */ /* 0x30 */ u32 dmahrsh; /* DMA hw request status high(ch63~32) */ u32 dmahrsl; /* DMA hardware request status low(ch31~0) */ |
ba2eea251 powerpc/512x: add... |
126 127 128 129 |
union { u32 dmaihsa; /* DMA interrupt high select AXE(ch63~32) */ u32 dmagpor; /* (General purpose register on MPC8308) */ }; |
0fb6f739b dma: Add MPC512x ... |
130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 |
u32 dmailsa; /* DMA interrupt low select AXE(ch31~0) */ /* 0x40 ~ 0xff */ u32 reserve0[48]; /* Reserved */ /* 0x100 */ u8 dchpri[MPC_DMA_CHANNELS]; /* DMA channels(0~63) priority */ }; struct __attribute__ ((__packed__)) mpc_dma_tcd { /* 0x00 */ u32 saddr; /* Source address */ u32 smod:5; /* Source address modulo */ u32 ssize:3; /* Source data transfer size */ u32 dmod:5; /* Destination address modulo */ u32 dsize:3; /* Destination data transfer size */ u32 soff:16; /* Signed source address offset */ /* 0x08 */ u32 nbytes; /* Inner "minor" byte count */ u32 slast; /* Last source address adjustment */ u32 daddr; /* Destination address */ /* 0x14 */ u32 citer_elink:1; /* Enable channel-to-channel linking on * minor loop complete */ u32 citer_linkch:6; /* Link channel for minor loop complete */ u32 citer:9; /* Current "major" iteration count */ u32 doff:16; /* Signed destination address offset */ /* 0x18 */ u32 dlast_sga; /* Last Destination address adjustment/scatter * gather address */ /* 0x1c */ u32 biter_elink:1; /* Enable channel-to-channel linking on major * loop complete */ u32 biter_linkch:6; u32 biter:9; /* Beginning "major" iteration count */ u32 bwc:2; /* Bandwidth control */ u32 major_linkch:6; /* Link channel number */ u32 done:1; /* Channel done */ u32 active:1; /* Channel active */ u32 major_elink:1; /* Enable channel-to-channel linking on major * loop complete */ u32 e_sg:1; /* Enable scatter/gather processing */ u32 d_req:1; /* Disable request */ u32 int_half:1; /* Enable an interrupt when major counter is * half complete */ u32 int_maj:1; /* Enable an interrupt when major iteration * count completes */ u32 start:1; /* Channel start */ }; struct mpc_dma_desc { struct dma_async_tx_descriptor desc; struct mpc_dma_tcd *tcd; dma_addr_t tcd_paddr; int error; struct list_head node; |
63da8e0d4 dmaengine: mpc512... |
196 |
int will_access_peripheral; |
0fb6f739b dma: Add MPC512x ... |
197 198 199 200 201 202 203 204 205 206 207 |
}; struct mpc_dma_chan { struct dma_chan chan; struct list_head free; struct list_head prepared; struct list_head queued; struct list_head active; struct list_head completed; struct mpc_dma_tcd *tcd; dma_addr_t tcd_paddr; |
0fb6f739b dma: Add MPC512x ... |
208 |
|
63da8e0d4 dmaengine: mpc512... |
209 210 211 |
/* Settings for access to peripheral FIFO */ dma_addr_t src_per_paddr; u32 src_tcd_nunits; |
899ed9dd4 dmaengine: mpc512... |
212 |
u8 swidth; |
63da8e0d4 dmaengine: mpc512... |
213 214 |
dma_addr_t dst_per_paddr; u32 dst_tcd_nunits; |
899ed9dd4 dmaengine: mpc512... |
215 |
u8 dwidth; |
63da8e0d4 dmaengine: mpc512... |
216 |
|
0fb6f739b dma: Add MPC512x ... |
217 218 219 220 221 222 223 224 225 226 227 |
/* Lock for this structure */ spinlock_t lock; }; struct mpc_dma { struct dma_device dma; struct tasklet_struct tasklet; struct mpc_dma_chan channels[MPC_DMA_CHANNELS]; struct mpc_dma_regs __iomem *regs; struct mpc_dma_tcd __iomem *tcd; int irq; |
ba2eea251 powerpc/512x: add... |
228 |
int irq2; |
0fb6f739b dma: Add MPC512x ... |
229 |
uint error_status; |
ba2eea251 powerpc/512x: add... |
230 |
int is_mpc8308; |
0fb6f739b dma: Add MPC512x ... |
231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 |
/* Lock for error_status field in this structure */ spinlock_t error_status_lock; }; #define DRV_NAME "mpc512x_dma" /* Convert struct dma_chan to struct mpc_dma_chan */ static inline struct mpc_dma_chan *dma_chan_to_mpc_dma_chan(struct dma_chan *c) { return container_of(c, struct mpc_dma_chan, chan); } /* Convert struct dma_chan to struct mpc_dma */ static inline struct mpc_dma *dma_chan_to_mpc_dma(struct dma_chan *c) { struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(c); |
77fc39766 dmaengine: mpc512... |
248 |
|
0fb6f739b dma: Add MPC512x ... |
249 250 251 252 253 254 255 |
return container_of(mchan, struct mpc_dma, channels[c->chan_id]); } /* * Execute all queued DMA descriptors. * * Following requirements must be met while calling mpc_dma_execute(): |
77fc39766 dmaengine: mpc512... |
256 257 258 |
* a) mchan->lock is acquired, * b) mchan->active list is empty, * c) mchan->queued list contains at least one entry. |
0fb6f739b dma: Add MPC512x ... |
259 260 261 262 263 264 265 266 |
*/ static void mpc_dma_execute(struct mpc_dma_chan *mchan) { struct mpc_dma *mdma = dma_chan_to_mpc_dma(&mchan->chan); struct mpc_dma_desc *first = NULL; struct mpc_dma_desc *prev = NULL; struct mpc_dma_desc *mdesc; int cid = mchan->chan.chan_id; |
63da8e0d4 dmaengine: mpc512... |
267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 |
while (!list_empty(&mchan->queued)) { mdesc = list_first_entry(&mchan->queued, struct mpc_dma_desc, node); /* * Grab either several mem-to-mem transfer descriptors * or one peripheral transfer descriptor, * don't mix mem-to-mem and peripheral transfer descriptors * within the same 'active' list. */ if (mdesc->will_access_peripheral) { if (list_empty(&mchan->active)) list_move_tail(&mdesc->node, &mchan->active); break; } else { list_move_tail(&mdesc->node, &mchan->active); } } |
0fb6f739b dma: Add MPC512x ... |
284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 |
/* Chain descriptors into one transaction */ list_for_each_entry(mdesc, &mchan->active, node) { if (!first) first = mdesc; if (!prev) { prev = mdesc; continue; } prev->tcd->dlast_sga = mdesc->tcd_paddr; prev->tcd->e_sg = 1; mdesc->tcd->start = 1; prev = mdesc; } |
0fb6f739b dma: Add MPC512x ... |
301 302 303 304 |
prev->tcd->int_maj = 1; /* Send first descriptor in chain into hardware */ memcpy_toio(&mdma->tcd[cid], first->tcd, sizeof(struct mpc_dma_tcd)); |
6504cf341 powerpc/512x: sca... |
305 306 307 |
if (first != prev) mdma->tcd[cid].e_sg = 1; |
63da8e0d4 dmaengine: mpc512... |
308 309 310 311 312 313 314 315 316 317 318 |
if (mdma->is_mpc8308) { /* MPC8308, no request lines, software initiated start */ out_8(&mdma->regs->dmassrt, cid); } else if (first->will_access_peripheral) { /* Peripherals involved, start by external request signal */ out_8(&mdma->regs->dmaserq, cid); } else { /* Memory to memory transfer, software initiated start */ out_8(&mdma->regs->dmassrt, cid); } |
0fb6f739b dma: Add MPC512x ... |
319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 |
} /* Handle interrupt on one half of DMA controller (32 channels) */ static void mpc_dma_irq_process(struct mpc_dma *mdma, u32 is, u32 es, int off) { struct mpc_dma_chan *mchan; struct mpc_dma_desc *mdesc; u32 status = is | es; int ch; while ((ch = fls(status) - 1) >= 0) { status &= ~(1 << ch); mchan = &mdma->channels[ch + off]; spin_lock(&mchan->lock); |
2862559e8 powerpc/512x: fix... |
334 335 |
out_8(&mdma->regs->dmacint, ch + off); out_8(&mdma->regs->dmacerr, ch + off); |
0fb6f739b dma: Add MPC512x ... |
336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 |
/* Check error status */ if (es & (1 << ch)) list_for_each_entry(mdesc, &mchan->active, node) mdesc->error = -EIO; /* Execute queued descriptors */ list_splice_tail_init(&mchan->active, &mchan->completed); if (!list_empty(&mchan->queued)) mpc_dma_execute(mchan); spin_unlock(&mchan->lock); } } /* Interrupt handler */ static irqreturn_t mpc_dma_irq(int irq, void *data) { struct mpc_dma *mdma = data; uint es; /* Save error status register */ es = in_be32(&mdma->regs->dmaes); spin_lock(&mdma->error_status_lock); if ((es & MPC_DMA_DMAES_VLD) && mdma->error_status == 0) mdma->error_status = es; spin_unlock(&mdma->error_status_lock); /* Handle interrupt on each channel */ |
ba2eea251 powerpc/512x: add... |
364 365 |
if (mdma->dma.chancnt > 32) { mpc_dma_irq_process(mdma, in_be32(&mdma->regs->dmainth), |
0fb6f739b dma: Add MPC512x ... |
366 |
in_be32(&mdma->regs->dmaerrh), 32); |
ba2eea251 powerpc/512x: add... |
367 |
} |
0fb6f739b dma: Add MPC512x ... |
368 369 |
mpc_dma_irq_process(mdma, in_be32(&mdma->regs->dmaintl), in_be32(&mdma->regs->dmaerrl), 0); |
0fb6f739b dma: Add MPC512x ... |
370 371 372 373 374 |
/* Schedule tasklet */ tasklet_schedule(&mdma->tasklet); return IRQ_HANDLED; } |
25985edce Fix common misspe... |
375 |
/* process completed descriptors */ |
a27699135 powerpc/512x: try... |
376 |
static void mpc_dma_process_completed(struct mpc_dma *mdma) |
0fb6f739b dma: Add MPC512x ... |
377 |
{ |
0fb6f739b dma: Add MPC512x ... |
378 379 380 381 382 383 |
dma_cookie_t last_cookie = 0; struct mpc_dma_chan *mchan; struct mpc_dma_desc *mdesc; struct dma_async_tx_descriptor *desc; unsigned long flags; LIST_HEAD(list); |
0fb6f739b dma: Add MPC512x ... |
384 |
int i; |
a27699135 powerpc/512x: try... |
385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 |
for (i = 0; i < mdma->dma.chancnt; i++) { mchan = &mdma->channels[i]; /* Get all completed descriptors */ spin_lock_irqsave(&mchan->lock, flags); if (!list_empty(&mchan->completed)) list_splice_tail_init(&mchan->completed, &list); spin_unlock_irqrestore(&mchan->lock, flags); if (list_empty(&list)) continue; /* Execute callbacks and run dependencies */ list_for_each_entry(mdesc, &list, node) { desc = &mdesc->desc; |
ad3463688 dmaengine: mpc512... |
400 |
dmaengine_desc_get_callback_invoke(desc, NULL); |
a27699135 powerpc/512x: try... |
401 402 403 404 405 406 407 408 |
last_cookie = desc->cookie; dma_run_dependencies(desc); } /* Free descriptors */ spin_lock_irqsave(&mchan->lock, flags); list_splice_tail_init(&list, &mchan->free); |
4d4e58de3 dmaengine: move l... |
409 |
mchan->chan.completed_cookie = last_cookie; |
a27699135 powerpc/512x: try... |
410 411 412 413 414 415 416 417 418 419 |
spin_unlock_irqrestore(&mchan->lock, flags); } } /* DMA Tasklet */ static void mpc_dma_tasklet(unsigned long data) { struct mpc_dma *mdma = (void *)data; unsigned long flags; uint es; |
0fb6f739b dma: Add MPC512x ... |
420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 |
spin_lock_irqsave(&mdma->error_status_lock, flags); es = mdma->error_status; mdma->error_status = 0; spin_unlock_irqrestore(&mdma->error_status_lock, flags); /* Print nice error report */ if (es) { dev_err(mdma->dma.dev, "Hardware reported following error(s) on channel %u: ", MPC_DMA_DMAES_ERRCHN(es)); if (es & MPC_DMA_DMAES_GPE) dev_err(mdma->dma.dev, "- Group Priority Error "); if (es & MPC_DMA_DMAES_CPE) dev_err(mdma->dma.dev, "- Channel Priority Error "); if (es & MPC_DMA_DMAES_SAE) dev_err(mdma->dma.dev, "- Source Address Error "); if (es & MPC_DMA_DMAES_SOE) |
77fc39766 dmaengine: mpc512... |
442 443 |
dev_err(mdma->dma.dev, "- Source Offset Configuration Error "); |
0fb6f739b dma: Add MPC512x ... |
444 |
if (es & MPC_DMA_DMAES_DAE) |
77fc39766 dmaengine: mpc512... |
445 446 |
dev_err(mdma->dma.dev, "- Destination Address Error "); |
0fb6f739b dma: Add MPC512x ... |
447 |
if (es & MPC_DMA_DMAES_DOE) |
77fc39766 dmaengine: mpc512... |
448 449 |
dev_err(mdma->dma.dev, "- Destination Offset Configuration Error "); |
0fb6f739b dma: Add MPC512x ... |
450 |
if (es & MPC_DMA_DMAES_NCE) |
77fc39766 dmaengine: mpc512... |
451 452 |
dev_err(mdma->dma.dev, "- NBytes/Citter Configuration Error "); |
0fb6f739b dma: Add MPC512x ... |
453 |
if (es & MPC_DMA_DMAES_SGE) |
77fc39766 dmaengine: mpc512... |
454 455 |
dev_err(mdma->dma.dev, "- Scatter/Gather Configuration Error "); |
0fb6f739b dma: Add MPC512x ... |
456 457 458 459 460 461 462 |
if (es & MPC_DMA_DMAES_SBE) dev_err(mdma->dma.dev, "- Source Bus Error "); if (es & MPC_DMA_DMAES_DBE) dev_err(mdma->dma.dev, "- Destination Bus Error "); } |
a27699135 powerpc/512x: try... |
463 |
mpc_dma_process_completed(mdma); |
0fb6f739b dma: Add MPC512x ... |
464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 |
} /* Submit descriptor to hardware */ static dma_cookie_t mpc_dma_tx_submit(struct dma_async_tx_descriptor *txd) { struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(txd->chan); struct mpc_dma_desc *mdesc; unsigned long flags; dma_cookie_t cookie; mdesc = container_of(txd, struct mpc_dma_desc, desc); spin_lock_irqsave(&mchan->lock, flags); /* Move descriptor to queue */ list_move_tail(&mdesc->node, &mchan->queued); /* If channel is idle, execute all queued descriptors */ if (list_empty(&mchan->active)) mpc_dma_execute(mchan); /* Update cookie */ |
884485e1f dmaengine: consol... |
486 |
cookie = dma_cookie_assign(txd); |
0fb6f739b dma: Add MPC512x ... |
487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 |
spin_unlock_irqrestore(&mchan->lock, flags); return cookie; } /* Alloc channel resources */ static int mpc_dma_alloc_chan_resources(struct dma_chan *chan) { struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan); struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan); struct mpc_dma_desc *mdesc; struct mpc_dma_tcd *tcd; dma_addr_t tcd_paddr; unsigned long flags; LIST_HEAD(descs); int i; /* Alloc DMA memory for Transfer Control Descriptors */ tcd = dma_alloc_coherent(mdma->dma.dev, MPC_DMA_DESCRIPTORS * sizeof(struct mpc_dma_tcd), &tcd_paddr, GFP_KERNEL); if (!tcd) return -ENOMEM; /* Alloc descriptors for this channel */ for (i = 0; i < MPC_DMA_DESCRIPTORS; i++) { mdesc = kzalloc(sizeof(struct mpc_dma_desc), GFP_KERNEL); if (!mdesc) { |
77fc39766 dmaengine: mpc512... |
515 516 517 |
dev_notice(mdma->dma.dev, "Memory allocation error. Allocated only %u descriptors ", i); |
0fb6f739b dma: Add MPC512x ... |
518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 |
break; } dma_async_tx_descriptor_init(&mdesc->desc, chan); mdesc->desc.flags = DMA_CTRL_ACK; mdesc->desc.tx_submit = mpc_dma_tx_submit; mdesc->tcd = &tcd[i]; mdesc->tcd_paddr = tcd_paddr + (i * sizeof(struct mpc_dma_tcd)); list_add_tail(&mdesc->node, &descs); } /* Return error only if no descriptors were allocated */ if (i == 0) { dma_free_coherent(mdma->dma.dev, MPC_DMA_DESCRIPTORS * sizeof(struct mpc_dma_tcd), tcd, tcd_paddr); return -ENOMEM; } spin_lock_irqsave(&mchan->lock, flags); mchan->tcd = tcd; mchan->tcd_paddr = tcd_paddr; list_splice_tail_init(&descs, &mchan->free); spin_unlock_irqrestore(&mchan->lock, flags); /* Enable Error Interrupt */ out_8(&mdma->regs->dmaseei, chan->chan_id); return 0; } /* Free channel resources */ static void mpc_dma_free_chan_resources(struct dma_chan *chan) { struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan); struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan); struct mpc_dma_desc *mdesc, *tmp; struct mpc_dma_tcd *tcd; dma_addr_t tcd_paddr; unsigned long flags; LIST_HEAD(descs); spin_lock_irqsave(&mchan->lock, flags); /* Channel must be idle */ BUG_ON(!list_empty(&mchan->prepared)); BUG_ON(!list_empty(&mchan->queued)); BUG_ON(!list_empty(&mchan->active)); BUG_ON(!list_empty(&mchan->completed)); /* Move data */ list_splice_tail_init(&mchan->free, &descs); tcd = mchan->tcd; tcd_paddr = mchan->tcd_paddr; spin_unlock_irqrestore(&mchan->lock, flags); /* Free DMA memory used by descriptors */ dma_free_coherent(mdma->dma.dev, MPC_DMA_DESCRIPTORS * sizeof(struct mpc_dma_tcd), tcd, tcd_paddr); /* Free descriptors */ list_for_each_entry_safe(mdesc, tmp, &descs, node) kfree(mdesc); /* Disable Error Interrupt */ out_8(&mdma->regs->dmaceei, chan->chan_id); } /* Send all pending descriptor to hardware */ static void mpc_dma_issue_pending(struct dma_chan *chan) { /* * We are posting descriptors to the hardware as soon as * they are ready, so this function does nothing. */ } /* Check request completion status */ static enum dma_status |
079344818 DMAENGINE: generi... |
601 602 |
mpc_dma_tx_status(struct dma_chan *chan, dma_cookie_t cookie, struct dma_tx_state *txstate) |
0fb6f739b dma: Add MPC512x ... |
603 |
{ |
108fae842 mpc512x_dma: remo... |
604 |
return dma_cookie_status(chan, cookie, txstate); |
0fb6f739b dma: Add MPC512x ... |
605 606 607 608 609 610 611 |
} /* Prepare descriptor for memory to memory copy */ static struct dma_async_tx_descriptor * mpc_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst, dma_addr_t src, size_t len, unsigned long flags) { |
ba2eea251 powerpc/512x: add... |
612 |
struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan); |
0fb6f739b dma: Add MPC512x ... |
613 614 615 616 617 618 619 620 621 622 623 624 625 |
struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan); struct mpc_dma_desc *mdesc = NULL; struct mpc_dma_tcd *tcd; unsigned long iflags; /* Get free descriptor */ spin_lock_irqsave(&mchan->lock, iflags); if (!list_empty(&mchan->free)) { mdesc = list_first_entry(&mchan->free, struct mpc_dma_desc, node); list_del(&mdesc->node); } spin_unlock_irqrestore(&mchan->lock, iflags); |
a27699135 powerpc/512x: try... |
626 627 628 |
if (!mdesc) { /* try to free completed descriptors */ mpc_dma_process_completed(mdma); |
0fb6f739b dma: Add MPC512x ... |
629 |
return NULL; |
a27699135 powerpc/512x: try... |
630 |
} |
0fb6f739b dma: Add MPC512x ... |
631 632 |
mdesc->error = 0; |
63da8e0d4 dmaengine: mpc512... |
633 |
mdesc->will_access_peripheral = 0; |
0fb6f739b dma: Add MPC512x ... |
634 635 636 637 638 639 640 641 642 643 |
tcd = mdesc->tcd; /* Prepare Transfer Control Descriptor for this transaction */ memset(tcd, 0, sizeof(struct mpc_dma_tcd)); if (IS_ALIGNED(src | dst | len, 32)) { tcd->ssize = MPC_DMA_TSIZE_32; tcd->dsize = MPC_DMA_TSIZE_32; tcd->soff = 32; tcd->doff = 32; |
ba2eea251 powerpc/512x: add... |
644 645 |
} else if (!mdma->is_mpc8308 && IS_ALIGNED(src | dst | len, 16)) { /* MPC8308 doesn't support 16 byte transfers */ |
0fb6f739b dma: Add MPC512x ... |
646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 |
tcd->ssize = MPC_DMA_TSIZE_16; tcd->dsize = MPC_DMA_TSIZE_16; tcd->soff = 16; tcd->doff = 16; } else if (IS_ALIGNED(src | dst | len, 4)) { tcd->ssize = MPC_DMA_TSIZE_4; tcd->dsize = MPC_DMA_TSIZE_4; tcd->soff = 4; tcd->doff = 4; } else if (IS_ALIGNED(src | dst | len, 2)) { tcd->ssize = MPC_DMA_TSIZE_2; tcd->dsize = MPC_DMA_TSIZE_2; tcd->soff = 2; tcd->doff = 2; } else { tcd->ssize = MPC_DMA_TSIZE_1; tcd->dsize = MPC_DMA_TSIZE_1; tcd->soff = 1; tcd->doff = 1; } tcd->saddr = src; tcd->daddr = dst; tcd->nbytes = len; tcd->biter = 1; tcd->citer = 1; /* Place descriptor in prepared list */ spin_lock_irqsave(&mchan->lock, iflags); list_add_tail(&mdesc->node, &mchan->prepared); spin_unlock_irqrestore(&mchan->lock, iflags); return &mdesc->desc; } |
899ed9dd4 dmaengine: mpc512... |
680 681 682 683 684 685 686 687 |
inline u8 buswidth_to_dmatsize(u8 buswidth) { u8 res; for (res = 0; buswidth > 1; buswidth /= 2) res++; return res; } |
63da8e0d4 dmaengine: mpc512... |
688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 |
static struct dma_async_tx_descriptor * mpc_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len, enum dma_transfer_direction direction, unsigned long flags, void *context) { struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan); struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan); struct mpc_dma_desc *mdesc = NULL; dma_addr_t per_paddr; u32 tcd_nunits; struct mpc_dma_tcd *tcd; unsigned long iflags; struct scatterlist *sg; size_t len; int iter, i; /* Currently there is no proper support for scatter/gather */ if (sg_len != 1) return NULL; if (!is_slave_direction(direction)) return NULL; for_each_sg(sgl, sg, sg_len, i) { spin_lock_irqsave(&mchan->lock, iflags); mdesc = list_first_entry(&mchan->free, struct mpc_dma_desc, node); if (!mdesc) { spin_unlock_irqrestore(&mchan->lock, iflags); /* Try to free completed descriptors */ mpc_dma_process_completed(mdma); return NULL; } list_del(&mdesc->node); if (direction == DMA_DEV_TO_MEM) { per_paddr = mchan->src_per_paddr; tcd_nunits = mchan->src_tcd_nunits; } else { per_paddr = mchan->dst_per_paddr; tcd_nunits = mchan->dst_tcd_nunits; } spin_unlock_irqrestore(&mchan->lock, iflags); if (per_paddr == 0 || tcd_nunits == 0) goto err_prep; mdesc->error = 0; mdesc->will_access_peripheral = 1; /* Prepare Transfer Control Descriptor for this transaction */ tcd = mdesc->tcd; memset(tcd, 0, sizeof(struct mpc_dma_tcd)); |
63da8e0d4 dmaengine: mpc512... |
745 746 747 |
if (direction == DMA_DEV_TO_MEM) { tcd->saddr = per_paddr; tcd->daddr = sg_dma_address(sg); |
899ed9dd4 dmaengine: mpc512... |
748 749 750 |
if (!IS_ALIGNED(sg_dma_address(sg), mchan->dwidth)) goto err_prep; |
63da8e0d4 dmaengine: mpc512... |
751 |
tcd->soff = 0; |
899ed9dd4 dmaengine: mpc512... |
752 |
tcd->doff = mchan->dwidth; |
63da8e0d4 dmaengine: mpc512... |
753 754 755 |
} else { tcd->saddr = sg_dma_address(sg); tcd->daddr = per_paddr; |
899ed9dd4 dmaengine: mpc512... |
756 757 758 759 760 |
if (!IS_ALIGNED(sg_dma_address(sg), mchan->swidth)) goto err_prep; tcd->soff = mchan->swidth; |
63da8e0d4 dmaengine: mpc512... |
761 762 |
tcd->doff = 0; } |
899ed9dd4 dmaengine: mpc512... |
763 764 |
tcd->ssize = buswidth_to_dmatsize(mchan->swidth); tcd->dsize = buswidth_to_dmatsize(mchan->dwidth); |
63da8e0d4 dmaengine: mpc512... |
765 |
|
237ec7090 dmaengine: mpc512... |
766 767 |
if (mdma->is_mpc8308) { tcd->nbytes = sg_dma_len(sg); |
899ed9dd4 dmaengine: mpc512... |
768 |
if (!IS_ALIGNED(tcd->nbytes, mchan->swidth)) |
237ec7090 dmaengine: mpc512... |
769 770 771 772 773 774 775 |
goto err_prep; /* No major loops for MPC8303 */ tcd->biter = 1; tcd->citer = 1; } else { len = sg_dma_len(sg); |
899ed9dd4 dmaengine: mpc512... |
776 |
tcd->nbytes = tcd_nunits * tcd->ssize; |
237ec7090 dmaengine: mpc512... |
777 778 779 780 781 782 783 784 785 786 787 788 789 |
if (!IS_ALIGNED(len, tcd->nbytes)) goto err_prep; iter = len / tcd->nbytes; if (iter >= 1 << 15) { /* len is too big */ goto err_prep; } /* citer_linkch contains the high bits of iter */ tcd->biter = iter & 0x1ff; tcd->biter_linkch = iter >> 9; tcd->citer = tcd->biter; tcd->citer_linkch = tcd->biter_linkch; |
63da8e0d4 dmaengine: mpc512... |
790 |
} |
63da8e0d4 dmaengine: mpc512... |
791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 |
tcd->e_sg = 0; tcd->d_req = 1; /* Place descriptor in prepared list */ spin_lock_irqsave(&mchan->lock, iflags); list_add_tail(&mdesc->node, &mchan->prepared); spin_unlock_irqrestore(&mchan->lock, iflags); } return &mdesc->desc; err_prep: /* Put the descriptor back */ spin_lock_irqsave(&mchan->lock, iflags); list_add_tail(&mdesc->node, &mchan->free); spin_unlock_irqrestore(&mchan->lock, iflags); return NULL; } |
899ed9dd4 dmaengine: mpc512... |
811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 |
inline bool is_buswidth_valid(u8 buswidth, bool is_mpc8308) { switch (buswidth) { case 16: if (is_mpc8308) return false; case 1: case 2: case 4: case 32: break; default: return false; } return true; } |
95335f1ff dmaengine: mpc512... |
828 829 |
static int mpc_dma_device_config(struct dma_chan *chan, struct dma_slave_config *cfg) |
63da8e0d4 dmaengine: mpc512... |
830 |
{ |
95335f1ff dmaengine: mpc512... |
831 |
struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan); |
899ed9dd4 dmaengine: mpc512... |
832 |
struct mpc_dma *mdma = dma_chan_to_mpc_dma(&mchan->chan); |
63da8e0d4 dmaengine: mpc512... |
833 |
unsigned long flags; |
95335f1ff dmaengine: mpc512... |
834 835 |
/* * Software constraints: |
899ed9dd4 dmaengine: mpc512... |
836 837 838 839 840 841 842 843 844 845 |
* - only transfers between a peripheral device and memory are * supported * - transfer chunk sizes of 1, 2, 4, 16 (for MPC512x), and 32 bytes * are supported, and, consequently, source addresses and * destination addresses; must be aligned accordingly; furthermore, * for MPC512x SoCs, the transfer size must be aligned on (chunk * size * maxburst) * - during the transfer, the RAM address is incremented by the size * of transfer chunk * - the peripheral port's address is constant during the transfer. |
95335f1ff dmaengine: mpc512... |
846 |
*/ |
63da8e0d4 dmaengine: mpc512... |
847 |
|
899ed9dd4 dmaengine: mpc512... |
848 849 |
if (!IS_ALIGNED(cfg->src_addr, cfg->src_addr_width) || !IS_ALIGNED(cfg->dst_addr, cfg->dst_addr_width)) { |
95335f1ff dmaengine: mpc512... |
850 851 |
return -EINVAL; } |
63da8e0d4 dmaengine: mpc512... |
852 |
|
899ed9dd4 dmaengine: mpc512... |
853 854 855 |
if (!is_buswidth_valid(cfg->src_addr_width, mdma->is_mpc8308) || !is_buswidth_valid(cfg->dst_addr_width, mdma->is_mpc8308)) return -EINVAL; |
95335f1ff dmaengine: mpc512... |
856 |
spin_lock_irqsave(&mchan->lock, flags); |
63da8e0d4 dmaengine: mpc512... |
857 |
|
95335f1ff dmaengine: mpc512... |
858 859 |
mchan->src_per_paddr = cfg->src_addr; mchan->src_tcd_nunits = cfg->src_maxburst; |
899ed9dd4 dmaengine: mpc512... |
860 |
mchan->swidth = cfg->src_addr_width; |
95335f1ff dmaengine: mpc512... |
861 862 |
mchan->dst_per_paddr = cfg->dst_addr; mchan->dst_tcd_nunits = cfg->dst_maxburst; |
899ed9dd4 dmaengine: mpc512... |
863 |
mchan->dwidth = cfg->dst_addr_width; |
63da8e0d4 dmaengine: mpc512... |
864 |
|
95335f1ff dmaengine: mpc512... |
865 866 867 868 869 |
/* Apply defaults */ if (mchan->src_tcd_nunits == 0) mchan->src_tcd_nunits = 1; if (mchan->dst_tcd_nunits == 0) mchan->dst_tcd_nunits = 1; |
63da8e0d4 dmaengine: mpc512... |
870 |
|
95335f1ff dmaengine: mpc512... |
871 |
spin_unlock_irqrestore(&mchan->lock, flags); |
63da8e0d4 dmaengine: mpc512... |
872 |
|
95335f1ff dmaengine: mpc512... |
873 874 |
return 0; } |
63da8e0d4 dmaengine: mpc512... |
875 |
|
95335f1ff dmaengine: mpc512... |
876 877 878 879 880 |
static int mpc_dma_device_terminate_all(struct dma_chan *chan) { struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan); struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan); unsigned long flags; |
63da8e0d4 dmaengine: mpc512... |
881 |
|
95335f1ff dmaengine: mpc512... |
882 883 |
/* Disable channel requests */ spin_lock_irqsave(&mchan->lock, flags); |
63da8e0d4 dmaengine: mpc512... |
884 |
|
95335f1ff dmaengine: mpc512... |
885 886 887 888 |
out_8(&mdma->regs->dmacerq, chan->chan_id); list_splice_tail_init(&mchan->prepared, &mchan->free); list_splice_tail_init(&mchan->queued, &mchan->free); list_splice_tail_init(&mchan->active, &mchan->free); |
63da8e0d4 dmaengine: mpc512... |
889 |
|
95335f1ff dmaengine: mpc512... |
890 |
spin_unlock_irqrestore(&mchan->lock, flags); |
63da8e0d4 dmaengine: mpc512... |
891 |
|
95335f1ff dmaengine: mpc512... |
892 |
return 0; |
63da8e0d4 dmaengine: mpc512... |
893 |
} |
463a1f8b3 dma: remove use o... |
894 |
static int mpc_dma_probe(struct platform_device *op) |
0fb6f739b dma: Add MPC512x ... |
895 |
{ |
b4a75c91b of/dma: mpc512x_d... |
896 |
struct device_node *dn = op->dev.of_node; |
0fb6f739b dma: Add MPC512x ... |
897 898 899 900 901 902 903 |
struct device *dev = &op->dev; struct dma_device *dma; struct mpc_dma *mdma; struct mpc_dma_chan *mchan; struct resource res; ulong regs_start, regs_size; int retval, i; |
9d82faeb7 dmaengine: mpc512... |
904 |
u8 chancnt; |
0fb6f739b dma: Add MPC512x ... |
905 906 907 |
mdma = devm_kzalloc(dev, sizeof(struct mpc_dma), GFP_KERNEL); if (!mdma) { |
baca66f79 dma: mpc512x: fix... |
908 909 |
retval = -ENOMEM; goto err; |
0fb6f739b dma: Add MPC512x ... |
910 911 912 |
} mdma->irq = irq_of_parse_and_map(dn, 0); |
aa570be6d dmaengine: NO_IRQ... |
913 |
if (!mdma->irq) { |
0fb6f739b dma: Add MPC512x ... |
914 915 |
dev_err(dev, "Error mapping IRQ! "); |
baca66f79 dma: mpc512x: fix... |
916 917 |
retval = -EINVAL; goto err; |
0fb6f739b dma: Add MPC512x ... |
918 |
} |
ba2eea251 powerpc/512x: add... |
919 920 921 |
if (of_device_is_compatible(dn, "fsl,mpc8308-dma")) { mdma->is_mpc8308 = 1; mdma->irq2 = irq_of_parse_and_map(dn, 1); |
aa570be6d dmaengine: NO_IRQ... |
922 |
if (!mdma->irq2) { |
ba2eea251 powerpc/512x: add... |
923 924 |
dev_err(dev, "Error mapping IRQ! "); |
baca66f79 dma: mpc512x: fix... |
925 926 |
retval = -EINVAL; goto err_dispose1; |
ba2eea251 powerpc/512x: add... |
927 928 |
} } |
0fb6f739b dma: Add MPC512x ... |
929 930 931 932 |
retval = of_address_to_resource(dn, 0, &res); if (retval) { dev_err(dev, "Error parsing memory region! "); |
baca66f79 dma: mpc512x: fix... |
933 |
goto err_dispose2; |
0fb6f739b dma: Add MPC512x ... |
934 935 936 |
} regs_start = res.start; |
8381fc352 dmaengine: mpc512... |
937 |
regs_size = resource_size(&res); |
0fb6f739b dma: Add MPC512x ... |
938 939 940 941 |
if (!devm_request_mem_region(dev, regs_start, regs_size, DRV_NAME)) { dev_err(dev, "Error requesting memory region! "); |
baca66f79 dma: mpc512x: fix... |
942 943 |
retval = -EBUSY; goto err_dispose2; |
0fb6f739b dma: Add MPC512x ... |
944 945 946 947 948 949 |
} mdma->regs = devm_ioremap(dev, regs_start, regs_size); if (!mdma->regs) { dev_err(dev, "Error mapping memory region! "); |
baca66f79 dma: mpc512x: fix... |
950 951 |
retval = -ENOMEM; goto err_dispose2; |
0fb6f739b dma: Add MPC512x ... |
952 953 954 955 |
} mdma->tcd = (struct mpc_dma_tcd *)((u8 *)(mdma->regs) + MPC_DMA_TCD_OFFSET); |
baca66f79 dma: mpc512x: fix... |
956 |
retval = request_irq(mdma->irq, &mpc_dma_irq, 0, DRV_NAME, mdma); |
0fb6f739b dma: Add MPC512x ... |
957 958 959 |
if (retval) { dev_err(dev, "Error requesting IRQ! "); |
baca66f79 dma: mpc512x: fix... |
960 961 |
retval = -EINVAL; goto err_dispose2; |
0fb6f739b dma: Add MPC512x ... |
962 |
} |
ba2eea251 powerpc/512x: add... |
963 |
if (mdma->is_mpc8308) { |
baca66f79 dma: mpc512x: fix... |
964 965 |
retval = request_irq(mdma->irq2, &mpc_dma_irq, 0, DRV_NAME, mdma); |
ba2eea251 powerpc/512x: add... |
966 967 968 |
if (retval) { dev_err(dev, "Error requesting IRQ2! "); |
baca66f79 dma: mpc512x: fix... |
969 970 |
retval = -EINVAL; goto err_free1; |
ba2eea251 powerpc/512x: add... |
971 972 |
} } |
0fb6f739b dma: Add MPC512x ... |
973 974 975 976 |
spin_lock_init(&mdma->error_status_lock); dma = &mdma->dma; dma->dev = dev; |
0fb6f739b dma: Add MPC512x ... |
977 978 979 |
dma->device_alloc_chan_resources = mpc_dma_alloc_chan_resources; dma->device_free_chan_resources = mpc_dma_free_chan_resources; dma->device_issue_pending = mpc_dma_issue_pending; |
079344818 DMAENGINE: generi... |
980 |
dma->device_tx_status = mpc_dma_tx_status; |
0fb6f739b dma: Add MPC512x ... |
981 |
dma->device_prep_dma_memcpy = mpc_dma_prep_memcpy; |
63da8e0d4 dmaengine: mpc512... |
982 |
dma->device_prep_slave_sg = mpc_dma_prep_slave_sg; |
95335f1ff dmaengine: mpc512... |
983 984 |
dma->device_config = mpc_dma_device_config; dma->device_terminate_all = mpc_dma_device_terminate_all; |
0fb6f739b dma: Add MPC512x ... |
985 986 987 |
INIT_LIST_HEAD(&dma->channels); dma_cap_set(DMA_MEMCPY, dma->cap_mask); |
63da8e0d4 dmaengine: mpc512... |
988 |
dma_cap_set(DMA_SLAVE, dma->cap_mask); |
0fb6f739b dma: Add MPC512x ... |
989 |
|
9d82faeb7 dmaengine: mpc512... |
990 991 992 993 994 995 |
if (mdma->is_mpc8308) chancnt = MPC8308_DMACHAN_MAX; else chancnt = MPC512x_DMACHAN_MAX; for (i = 0; i < chancnt; i++) { |
0fb6f739b dma: Add MPC512x ... |
996 997 998 |
mchan = &mdma->channels[i]; mchan->chan.device = dma; |
d3ee98cdc dmaengine: consol... |
999 |
dma_cookie_init(&mchan->chan); |
0fb6f739b dma: Add MPC512x ... |
1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 |
INIT_LIST_HEAD(&mchan->free); INIT_LIST_HEAD(&mchan->prepared); INIT_LIST_HEAD(&mchan->queued); INIT_LIST_HEAD(&mchan->active); INIT_LIST_HEAD(&mchan->completed); spin_lock_init(&mchan->lock); list_add_tail(&mchan->chan.device_node, &dma->channels); } tasklet_init(&mdma->tasklet, mpc_dma_tasklet, (unsigned long)mdma); /* * Configure DMA Engine: * - Dynamic clock, * - Round-robin group arbitration, * - Round-robin channel arbitration. */ |
78a4f0367 dma: mpc512x: reo... |
1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 |
if (mdma->is_mpc8308) { /* MPC8308 has 16 channels and lacks some registers */ out_be32(&mdma->regs->dmacr, MPC_DMA_DMACR_ERCA); /* enable snooping */ out_be32(&mdma->regs->dmagpor, MPC_DMA_DMAGPOR_SNOOP_ENABLE); /* Disable error interrupts */ out_be32(&mdma->regs->dmaeeil, 0); /* Clear interrupts status */ out_be32(&mdma->regs->dmaintl, 0xFFFF); out_be32(&mdma->regs->dmaerrl, 0xFFFF); } else { |
ba2eea251 powerpc/512x: add... |
1032 |
out_be32(&mdma->regs->dmacr, MPC_DMA_DMACR_EDCG | |
77fc39766 dmaengine: mpc512... |
1033 1034 |
MPC_DMA_DMACR_ERGA | MPC_DMA_DMACR_ERCA); |
ba2eea251 powerpc/512x: add... |
1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 |
/* Disable hardware DMA requests */ out_be32(&mdma->regs->dmaerqh, 0); out_be32(&mdma->regs->dmaerql, 0); /* Disable error interrupts */ out_be32(&mdma->regs->dmaeeih, 0); out_be32(&mdma->regs->dmaeeil, 0); /* Clear interrupts status */ out_be32(&mdma->regs->dmainth, 0xFFFFFFFF); out_be32(&mdma->regs->dmaintl, 0xFFFFFFFF); out_be32(&mdma->regs->dmaerrh, 0xFFFFFFFF); out_be32(&mdma->regs->dmaerrl, 0xFFFFFFFF); /* Route interrupts to IPIC */ out_be32(&mdma->regs->dmaihsa, 0); out_be32(&mdma->regs->dmailsa, 0); |
ba2eea251 powerpc/512x: add... |
1053 |
} |
0fb6f739b dma: Add MPC512x ... |
1054 1055 1056 1057 |
/* Register DMA engine */ dev_set_drvdata(dev, mdma); retval = dma_async_device_register(dma); |
baca66f79 dma: mpc512x: fix... |
1058 1059 |
if (retval) goto err_free2; |
0fb6f739b dma: Add MPC512x ... |
1060 |
|
ec1f0c966 dmaengine: mpc512... |
1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 |
/* Register with OF helpers for DMA lookups (nonfatal) */ if (dev->of_node) { retval = of_dma_controller_register(dev->of_node, of_dma_xlate_by_chan_id, mdma); if (retval) dev_warn(dev, "Could not register for OF lookup "); } return 0; |
baca66f79 dma: mpc512x: fix... |
1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 |
err_free2: if (mdma->is_mpc8308) free_irq(mdma->irq2, mdma); err_free1: free_irq(mdma->irq, mdma); err_dispose2: if (mdma->is_mpc8308) irq_dispose_mapping(mdma->irq2); err_dispose1: irq_dispose_mapping(mdma->irq); err: return retval; |
0fb6f739b dma: Add MPC512x ... |
1084 |
} |
4bf27b8b3 Drivers: dma: rem... |
1085 |
static int mpc_dma_remove(struct platform_device *op) |
0fb6f739b dma: Add MPC512x ... |
1086 1087 1088 |
{ struct device *dev = &op->dev; struct mpc_dma *mdma = dev_get_drvdata(dev); |
ec1f0c966 dmaengine: mpc512... |
1089 1090 |
if (dev->of_node) of_dma_controller_free(dev->of_node); |
0fb6f739b dma: Add MPC512x ... |
1091 |
dma_async_device_unregister(&mdma->dma); |
baca66f79 dma: mpc512x: fix... |
1092 1093 1094 1095 1096 |
if (mdma->is_mpc8308) { free_irq(mdma->irq2, mdma); irq_dispose_mapping(mdma->irq2); } free_irq(mdma->irq, mdma); |
0fb6f739b dma: Add MPC512x ... |
1097 |
irq_dispose_mapping(mdma->irq); |
085fedf7e dmaengine: mpc512... |
1098 |
tasklet_kill(&mdma->tasklet); |
0fb6f739b dma: Add MPC512x ... |
1099 1100 1101 |
return 0; } |
57c034223 dmaengine: consti... |
1102 |
static const struct of_device_id mpc_dma_match[] = { |
0fb6f739b dma: Add MPC512x ... |
1103 |
{ .compatible = "fsl,mpc5121-dma", }, |
62057d337 dma: mpc512x: sep... |
1104 |
{ .compatible = "fsl,mpc8308-dma", }, |
0fb6f739b dma: Add MPC512x ... |
1105 1106 |
{}, }; |
9ace300c9 dmaengine: mpc512... |
1107 |
MODULE_DEVICE_TABLE(of, mpc_dma_match); |
0fb6f739b dma: Add MPC512x ... |
1108 |
|
000061245 dt/powerpc: Elimi... |
1109 |
static struct platform_driver mpc_dma_driver = { |
0fb6f739b dma: Add MPC512x ... |
1110 |
.probe = mpc_dma_probe, |
a7d6e3ec2 dma: remove use o... |
1111 |
.remove = mpc_dma_remove, |
b4a75c91b of/dma: mpc512x_d... |
1112 1113 |
.driver = { .name = DRV_NAME, |
b4a75c91b of/dma: mpc512x_d... |
1114 |
.of_match_table = mpc_dma_match, |
0fb6f739b dma: Add MPC512x ... |
1115 1116 |
}, }; |
c94e91053 dmaengine: conver... |
1117 |
module_platform_driver(mpc_dma_driver); |
0fb6f739b dma: Add MPC512x ... |
1118 1119 1120 |
MODULE_LICENSE("GPL"); MODULE_AUTHOR("Piotr Ziecik <kosmo@semihalf.com>"); |