Commit 3a774ea91a5d05e7af58db6ae1ba298263c4a3d3

Authored by Russell King
1 parent 7c836bc7f9

dmaengine: omap: add support for cyclic DMA

Add support for cyclic DMA to the OMAP DMA engine driver.

Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>

Showing 1 changed file with 89 additions and 4 deletions Side-by-side Diff

drivers/dma/omap-dma.c
... ... @@ -33,6 +33,7 @@
33 33  
34 34 struct dma_slave_config cfg;
35 35 unsigned dma_sig;
  36 + bool cyclic;
36 37  
37 38 int dma_ch;
38 39 struct omap_desc *desc;
39 40  
... ... @@ -138,11 +139,15 @@
138 139 spin_lock_irqsave(&c->vc.lock, flags);
139 140 d = c->desc;
140 141 if (d) {
141   - if (++c->sgidx < d->sglen) {
142   - omap_dma_start_sg(c, d, c->sgidx);
  142 + if (!c->cyclic) {
  143 + if (++c->sgidx < d->sglen) {
  144 + omap_dma_start_sg(c, d, c->sgidx);
  145 + } else {
  146 + omap_dma_start_desc(c);
  147 + vchan_cookie_complete(&d->vd);
  148 + }
143 149 } else {
144   - omap_dma_start_desc(c);
145   - vchan_cookie_complete(&d->vd);
  150 + vchan_cyclic_callback(&d->vd);
146 151 }
147 152 }
148 153 spin_unlock_irqrestore(&c->vc.lock, flags);
... ... @@ -358,6 +363,79 @@
358 363 return vchan_tx_prep(&c->vc, &d->vd, tx_flags);
359 364 }
360 365  
  366 +static struct dma_async_tx_descriptor *omap_dma_prep_dma_cyclic(
  367 + struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
  368 + size_t period_len, enum dma_transfer_direction dir, void *context)
  369 +{
  370 + struct omap_chan *c = to_omap_dma_chan(chan);
  371 + enum dma_slave_buswidth dev_width;
  372 + struct omap_desc *d;
  373 + dma_addr_t dev_addr;
  374 + unsigned es, sync_type;
  375 + u32 burst;
  376 +
  377 + if (dir == DMA_DEV_TO_MEM) {
  378 + dev_addr = c->cfg.src_addr;
  379 + dev_width = c->cfg.src_addr_width;
  380 + burst = c->cfg.src_maxburst;
  381 + sync_type = OMAP_DMA_SRC_SYNC;
  382 + } else if (dir == DMA_MEM_TO_DEV) {
  383 + dev_addr = c->cfg.dst_addr;
  384 + dev_width = c->cfg.dst_addr_width;
  385 + burst = c->cfg.dst_maxburst;
  386 + sync_type = OMAP_DMA_DST_SYNC;
  387 + } else {
  388 + dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
  389 + return NULL;
  390 + }
  391 +
  392 + /* Bus width translates to the element size (ES) */
  393 + switch (dev_width) {
  394 + case DMA_SLAVE_BUSWIDTH_1_BYTE:
  395 + es = OMAP_DMA_DATA_TYPE_S8;
  396 + break;
  397 + case DMA_SLAVE_BUSWIDTH_2_BYTES:
  398 + es = OMAP_DMA_DATA_TYPE_S16;
  399 + break;
  400 + case DMA_SLAVE_BUSWIDTH_4_BYTES:
  401 + es = OMAP_DMA_DATA_TYPE_S32;
  402 + break;
  403 + default: /* not reached */
  404 + return NULL;
  405 + }
  406 +
  407 + /* Now allocate and setup the descriptor. */
  408 + d = kzalloc(sizeof(*d) + sizeof(d->sg[0]), GFP_ATOMIC);
  409 + if (!d)
  410 + return NULL;
  411 +
  412 + d->dir = dir;
  413 + d->dev_addr = dev_addr;
  414 + d->fi = burst;
  415 + d->es = es;
  416 + d->sync_mode = OMAP_DMA_SYNC_PACKET;
  417 + d->sync_type = sync_type;
  418 + d->periph_port = OMAP_DMA_PORT_MPUI;
  419 + d->sg[0].addr = buf_addr;
  420 + d->sg[0].en = period_len / es_bytes[es];
  421 + d->sg[0].fn = buf_len / period_len;
  422 + d->sglen = 1;
  423 +
  424 + if (!c->cyclic) {
  425 + c->cyclic = true;
  426 + omap_dma_link_lch(c->dma_ch, c->dma_ch);
  427 + omap_enable_dma_irq(c->dma_ch, OMAP_DMA_FRAME_IRQ);
  428 + omap_disable_dma_irq(c->dma_ch, OMAP_DMA_BLOCK_IRQ);
  429 + }
  430 +
  431 + if (!cpu_class_is_omap1()) {
  432 + omap_set_dma_src_burst_mode(c->dma_ch, OMAP_DMA_DATA_BURST_16);
  433 + omap_set_dma_dest_burst_mode(c->dma_ch, OMAP_DMA_DATA_BURST_16);
  434 + }
  435 +
  436 + return vchan_tx_prep(&c->vc, &d->vd, DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
  437 +}
  438 +
361 439 static int omap_dma_slave_config(struct omap_chan *c, struct dma_slave_config *cfg)
362 440 {
363 441 if (cfg->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
... ... @@ -392,6 +470,11 @@
392 470 omap_stop_dma(c->dma_ch);
393 471 }
394 472  
  473 + if (c->cyclic) {
  474 + c->cyclic = false;
  475 + omap_dma_unlink_lch(c->dma_ch, c->dma_ch);
  476 + }
  477 +
395 478 vchan_get_all_descriptors(&c->vc, &head);
396 479 spin_unlock_irqrestore(&c->vc.lock, flags);
397 480 vchan_dma_desc_free_list(&c->vc, &head);
398 481  
... ... @@ -484,11 +567,13 @@
484 567 return -ENOMEM;
485 568  
486 569 dma_cap_set(DMA_SLAVE, od->ddev.cap_mask);
  570 + dma_cap_set(DMA_CYCLIC, od->ddev.cap_mask);
487 571 od->ddev.device_alloc_chan_resources = omap_dma_alloc_chan_resources;
488 572 od->ddev.device_free_chan_resources = omap_dma_free_chan_resources;
489 573 od->ddev.device_tx_status = omap_dma_tx_status;
490 574 od->ddev.device_issue_pending = omap_dma_issue_pending;
491 575 od->ddev.device_prep_slave_sg = omap_dma_prep_slave_sg;
  576 + od->ddev.device_prep_dma_cyclic = omap_dma_prep_dma_cyclic;
492 577 od->ddev.device_control = omap_dma_control;
493 578 od->ddev.dev = &pdev->dev;
494 579 INIT_LIST_HEAD(&od->ddev.channels);