Commit 6ddeb6d844596cac13c4a3665c0bd61f074a81a7

Authored by Russell King
1 parent 596c471b69

dmaengine: omap-dma: move IRQ handling to omap-dma

Move the interrupt handling for OMAP2+ into omap-dma, rather than using
the legacy support in the platform code.

Acked-by: Tony Lindgren <tony@atomide.com>
Acked-by: Vinod Koul <vinod.koul@intel.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>

Showing 1 changed file with 115 additions and 6 deletions Side-by-side Diff

drivers/dma/omap-dma.c
... ... @@ -30,6 +30,10 @@
30 30 void __iomem *base;
31 31 const struct omap_dma_reg *reg_map;
32 32 struct omap_system_dma_plat_info *plat;
  33 + bool legacy;
  34 + spinlock_t irq_lock;
  35 + uint32_t irq_enable_mask;
  36 + struct omap_chan *lch_map[32];
33 37 };
34 38  
35 39 struct omap_chan {
36 40  
... ... @@ -254,10 +258,22 @@
254 258 omap_dma_chan_write(c, CSR, ~0);
255 259 }
256 260  
  261 +static unsigned omap_dma_get_csr(struct omap_chan *c)
  262 +{
  263 + unsigned val = omap_dma_chan_read(c, CSR);
  264 +
  265 + if (!dma_omap1())
  266 + omap_dma_chan_write(c, CSR, val);
  267 +
  268 + return val;
  269 +}
  270 +
257 271 static void omap_dma_assign(struct omap_dmadev *od, struct omap_chan *c,
258 272 unsigned lch)
259 273 {
260 274 c->channel_base = od->base + od->plat->channel_stride * lch;
  275 +
  276 + od->lch_map[lch] = c;
261 277 }
262 278  
263 279 static void omap_dma_start(struct omap_chan *c, struct omap_desc *d)
264 280  
265 281  
266 282  
267 283  
268 284  
269 285  
270 286  
271 287  
... ... @@ -460,32 +476,103 @@
460 476 }
461 477 }
462 478  
  479 +static irqreturn_t omap_dma_irq(int irq, void *devid)
  480 +{
  481 + struct omap_dmadev *od = devid;
  482 + unsigned status, channel;
  483 +
  484 + spin_lock(&od->irq_lock);
  485 +
  486 + status = omap_dma_glbl_read(od, IRQSTATUS_L1);
  487 + status &= od->irq_enable_mask;
  488 + if (status == 0) {
  489 + spin_unlock(&od->irq_lock);
  490 + return IRQ_NONE;
  491 + }
  492 +
  493 + while ((channel = ffs(status)) != 0) {
  494 + unsigned mask, csr;
  495 + struct omap_chan *c;
  496 +
  497 + channel -= 1;
  498 + mask = BIT(channel);
  499 + status &= ~mask;
  500 +
  501 + c = od->lch_map[channel];
  502 + if (c == NULL) {
  503 + /* This should never happen */
  504 + dev_err(od->ddev.dev, "invalid channel %u\n", channel);
  505 + continue;
  506 + }
  507 +
  508 + csr = omap_dma_get_csr(c);
  509 + omap_dma_glbl_write(od, IRQSTATUS_L1, mask);
  510 +
  511 + omap_dma_callback(channel, csr, c);
  512 + }
  513 +
  514 + spin_unlock(&od->irq_lock);
  515 +
  516 + return IRQ_HANDLED;
  517 +}
  518 +
463 519 static int omap_dma_alloc_chan_resources(struct dma_chan *chan)
464 520 {
465 521 struct omap_dmadev *od = to_omap_dma_dev(chan->device);
466 522 struct omap_chan *c = to_omap_dma_chan(chan);
467 523 int ret;
468 524  
469   - dev_dbg(od->ddev.dev, "allocating channel for %u\n", c->dma_sig);
  525 + if (od->legacy) {
  526 + ret = omap_request_dma(c->dma_sig, "DMA engine",
  527 + omap_dma_callback, c, &c->dma_ch);
  528 + } else {
  529 + ret = omap_request_dma(c->dma_sig, "DMA engine", NULL, NULL,
  530 + &c->dma_ch);
  531 + }
470 532  
471   - ret = omap_request_dma(c->dma_sig, "DMA engine", omap_dma_callback,
472   - c, &c->dma_ch);
  533 + dev_dbg(od->ddev.dev, "allocating channel %u for %u\n",
  534 + c->dma_ch, c->dma_sig);
473 535  
474   - if (ret >= 0)
  536 + if (ret >= 0) {
475 537 omap_dma_assign(od, c, c->dma_ch);
476 538  
  539 + if (!od->legacy) {
  540 + unsigned val;
  541 +
  542 + spin_lock_irq(&od->irq_lock);
  543 + val = BIT(c->dma_ch);
  544 + omap_dma_glbl_write(od, IRQSTATUS_L1, val);
  545 + od->irq_enable_mask |= val;
  546 + omap_dma_glbl_write(od, IRQENABLE_L1, od->irq_enable_mask);
  547 +
  548 + val = omap_dma_glbl_read(od, IRQENABLE_L0);
  549 + val &= ~BIT(c->dma_ch);
  550 + omap_dma_glbl_write(od, IRQENABLE_L0, val);
  551 + spin_unlock_irq(&od->irq_lock);
  552 + }
  553 + }
  554 +
477 555 return ret;
478 556 }
479 557  
480 558 static void omap_dma_free_chan_resources(struct dma_chan *chan)
481 559 {
  560 + struct omap_dmadev *od = to_omap_dma_dev(chan->device);
482 561 struct omap_chan *c = to_omap_dma_chan(chan);
483 562  
  563 + if (!od->legacy) {
  564 + spin_lock_irq(&od->irq_lock);
  565 + od->irq_enable_mask &= ~BIT(c->dma_ch);
  566 + omap_dma_glbl_write(od, IRQENABLE_L1, od->irq_enable_mask);
  567 + spin_unlock_irq(&od->irq_lock);
  568 + }
  569 +
484 570 c->channel_base = NULL;
  571 + od->lch_map[c->dma_ch] = NULL;
485 572 vchan_free_chan_resources(&c->vc);
486 573 omap_free_dma(c->dma_ch);
487 574  
488   - dev_dbg(c->vc.chan.device->dev, "freeing channel for %u\n", c->dma_sig);
  575 + dev_dbg(od->ddev.dev, "freeing channel for %u\n", c->dma_sig);
489 576 }
490 577  
491 578 static size_t omap_dma_sg_size(struct omap_sg *sg)
... ... @@ -1015,7 +1102,7 @@
1015 1102 {
1016 1103 struct omap_dmadev *od;
1017 1104 struct resource *res;
1018   - int rc, i;
  1105 + int rc, i, irq;
1019 1106  
1020 1107 od = devm_kzalloc(&pdev->dev, sizeof(*od), GFP_KERNEL);
1021 1108 if (!od)
... ... @@ -1045,6 +1132,7 @@
1045 1132 INIT_LIST_HEAD(&od->ddev.channels);
1046 1133 INIT_LIST_HEAD(&od->pending);
1047 1134 spin_lock_init(&od->lock);
  1135 + spin_lock_init(&od->irq_lock);
1048 1136  
1049 1137 tasklet_init(&od->task, omap_dma_sched, (unsigned long)od);
1050 1138  
... ... @@ -1056,6 +1144,21 @@
1056 1144 }
1057 1145 }
1058 1146  
  1147 + irq = platform_get_irq(pdev, 1);
  1148 + if (irq <= 0) {
  1149 + dev_info(&pdev->dev, "failed to get L1 IRQ: %d\n", irq);
  1150 + od->legacy = true;
  1151 + } else {
  1152 + /* Disable all interrupts */
  1153 + od->irq_enable_mask = 0;
  1154 + omap_dma_glbl_write(od, IRQENABLE_L1, 0);
  1155 +
  1156 + rc = devm_request_irq(&pdev->dev, irq, omap_dma_irq,
  1157 + IRQF_SHARED, "omap-dma-engine", od);
  1158 + if (rc)
  1159 + return rc;
  1160 + }
  1161 +
1059 1162 rc = dma_async_device_register(&od->ddev);
1060 1163 if (rc) {
1061 1164 pr_warn("OMAP-DMA: failed to register slave DMA engine device: %d\n",
... ... @@ -1092,6 +1195,12 @@
1092 1195 of_dma_controller_free(pdev->dev.of_node);
1093 1196  
1094 1197 dma_async_device_unregister(&od->ddev);
  1198 +
  1199 + if (!od->legacy) {
  1200 + /* Disable all interrupts */
  1201 + omap_dma_glbl_write(od, IRQENABLE_L0, 0);
  1202 + }
  1203 +
1095 1204 omap_dma_free(od);
1096 1205  
1097 1206 return 0;