Commit 58c7ccbf9109abcc6b7ed2f76c21ebee244d31a8
Committed by
Chris Ball
1 parent
9782aff8df
Exists in
master
and in
6 other branches
mmc: mmci: implement pre_req() and post_req()
pre_req() runs dma_map_sg() and prepares the dma descriptor for the next mmc data transfer. post_req() runs dma_unmap_sg. If not calling pre_req() before mmci_request(), mmci_request() will prepare the cache and dma just like it did it before. It is optional to use pre_req() and post_req() for mmci. Signed-off-by: Per Forlin <per.forlin@linaro.org> Tested-by: Linus Walleij <linus.walleij@linaro.org> Signed-off-by: Chris Ball <cjb@laptop.org>
Showing 2 changed files with 142 additions and 13 deletions Side-by-side Diff
drivers/mmc/host/mmci.c
... | ... | @@ -226,6 +226,9 @@ |
226 | 226 | return; |
227 | 227 | } |
228 | 228 | |
229 | + /* initialize pre request cookie */ | |
230 | + host->next_data.cookie = 1; | |
231 | + | |
229 | 232 | /* Try to acquire a generic DMA engine slave channel */ |
230 | 233 | dma_cap_zero(mask); |
231 | 234 | dma_cap_set(DMA_SLAVE, mask); |
... | ... | @@ -335,7 +338,8 @@ |
335 | 338 | dir = DMA_FROM_DEVICE; |
336 | 339 | } |
337 | 340 | |
338 | - dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir); | |
341 | + if (!data->host_cookie) | |
342 | + dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir); | |
339 | 343 | |
340 | 344 | /* |
341 | 345 | * Use of DMA with scatter-gather is impossible. |
... | ... | @@ -353,7 +357,8 @@ |
353 | 357 | dmaengine_terminate_all(host->dma_current); |
354 | 358 | } |
355 | 359 | |
356 | -static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl) | |
360 | +static int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data, | |
361 | + struct mmci_host_next *next) | |
357 | 362 | { |
358 | 363 | struct variant_data *variant = host->variant; |
359 | 364 | struct dma_slave_config conf = { |
360 | 365 | |
361 | 366 | |
... | ... | @@ -364,14 +369,21 @@ |
364 | 369 | .src_maxburst = variant->fifohalfsize >> 2, /* # of words */ |
365 | 370 | .dst_maxburst = variant->fifohalfsize >> 2, /* # of words */ |
366 | 371 | }; |
367 | - struct mmc_data *data = host->data; | |
368 | 372 | struct dma_chan *chan; |
369 | 373 | struct dma_device *device; |
370 | 374 | struct dma_async_tx_descriptor *desc; |
371 | 375 | int nr_sg; |
372 | 376 | |
373 | - host->dma_current = NULL; | |
377 | + /* Check if next job is already prepared */ | |
378 | + if (data->host_cookie && !next && | |
379 | + host->dma_current && host->dma_desc_current) | |
380 | + return 0; | |
374 | 381 | |
382 | + if (!next) { | |
383 | + host->dma_current = NULL; | |
384 | + host->dma_desc_current = NULL; | |
385 | + } | |
386 | + | |
375 | 387 | if (data->flags & MMC_DATA_READ) { |
376 | 388 | conf.direction = DMA_FROM_DEVICE; |
377 | 389 | chan = host->dma_rx_channel; |
... | ... | @@ -385,7 +397,7 @@ |
385 | 397 | return -EINVAL; |
386 | 398 | |
387 | 399 | /* If less than or equal to the fifo size, don't bother with DMA */ |
388 | - if (host->size <= variant->fifosize) | |
400 | + if (data->blksz * data->blocks <= variant->fifosize) | |
389 | 401 | return -EINVAL; |
390 | 402 | |
391 | 403 | device = chan->device; |
392 | 404 | |
393 | 405 | |
... | ... | @@ -399,14 +411,38 @@ |
399 | 411 | if (!desc) |
400 | 412 | goto unmap_exit; |
401 | 413 | |
402 | - /* Okay, go for it. */ | |
403 | - host->dma_current = chan; | |
414 | + if (next) { | |
415 | + next->dma_chan = chan; | |
416 | + next->dma_desc = desc; | |
417 | + } else { | |
418 | + host->dma_current = chan; | |
419 | + host->dma_desc_current = desc; | |
420 | + } | |
404 | 421 | |
422 | + return 0; | |
423 | + | |
424 | + unmap_exit: | |
425 | + if (!next) | |
426 | + dmaengine_terminate_all(chan); | |
427 | + dma_unmap_sg(device->dev, data->sg, data->sg_len, conf.direction); | |
428 | + return -ENOMEM; | |
429 | +} | |
430 | + | |
431 | +static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl) | |
432 | +{ | |
433 | + int ret; | |
434 | + struct mmc_data *data = host->data; | |
435 | + | |
436 | + ret = mmci_dma_prep_data(host, host->data, NULL); | |
437 | + if (ret) | |
438 | + return ret; | |
439 | + | |
440 | + /* Okay, go for it. */ | |
405 | 441 | dev_vdbg(mmc_dev(host->mmc), |
406 | 442 | "Submit MMCI DMA job, sglen %d blksz %04x blks %04x flags %08x\n", |
407 | 443 | data->sg_len, data->blksz, data->blocks, data->flags); |
408 | - dmaengine_submit(desc); | |
409 | - dma_async_issue_pending(chan); | |
444 | + dmaengine_submit(host->dma_desc_current); | |
445 | + dma_async_issue_pending(host->dma_current); | |
410 | 446 | |
411 | 447 | datactrl |= MCI_DPSM_DMAENABLE; |
412 | 448 | |
413 | 449 | |
414 | 450 | |
415 | 451 | |
... | ... | @@ -421,14 +457,90 @@ |
421 | 457 | writel(readl(host->base + MMCIMASK0) | MCI_DATAENDMASK, |
422 | 458 | host->base + MMCIMASK0); |
423 | 459 | return 0; |
460 | +} | |
424 | 461 | |
425 | -unmap_exit: | |
426 | - dmaengine_terminate_all(chan); | |
427 | - dma_unmap_sg(device->dev, data->sg, data->sg_len, conf.direction); | |
428 | - return -ENOMEM; | |
462 | +static void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data) | |
463 | +{ | |
464 | + struct mmci_host_next *next = &host->next_data; | |
465 | + | |
466 | + if (data->host_cookie && data->host_cookie != next->cookie) { | |
467 | + printk(KERN_WARNING "[%s] invalid cookie: data->host_cookie %d" | |
468 | + " host->next_data.cookie %d\n", | |
469 | + __func__, data->host_cookie, host->next_data.cookie); | |
470 | + data->host_cookie = 0; | |
471 | + } | |
472 | + | |
473 | + if (!data->host_cookie) | |
474 | + return; | |
475 | + | |
476 | + host->dma_desc_current = next->dma_desc; | |
477 | + host->dma_current = next->dma_chan; | |
478 | + | |
479 | + next->dma_desc = NULL; | |
480 | + next->dma_chan = NULL; | |
429 | 481 | } |
482 | + | |
483 | +static void mmci_pre_request(struct mmc_host *mmc, struct mmc_request *mrq, | |
484 | + bool is_first_req) | |
485 | +{ | |
486 | + struct mmci_host *host = mmc_priv(mmc); | |
487 | + struct mmc_data *data = mrq->data; | |
488 | + struct mmci_host_next *nd = &host->next_data; | |
489 | + | |
490 | + if (!data) | |
491 | + return; | |
492 | + | |
493 | + if (data->host_cookie) { | |
494 | + data->host_cookie = 0; | |
495 | + return; | |
496 | + } | |
497 | + | |
498 | + /* if config for dma */ | |
499 | + if (((data->flags & MMC_DATA_WRITE) && host->dma_tx_channel) || | |
500 | + ((data->flags & MMC_DATA_READ) && host->dma_rx_channel)) { | |
501 | + if (mmci_dma_prep_data(host, data, nd)) | |
502 | + data->host_cookie = 0; | |
503 | + else | |
504 | + data->host_cookie = ++nd->cookie < 0 ? 1 : nd->cookie; | |
505 | + } | |
506 | +} | |
507 | + | |
508 | +static void mmci_post_request(struct mmc_host *mmc, struct mmc_request *mrq, | |
509 | + int err) | |
510 | +{ | |
511 | + struct mmci_host *host = mmc_priv(mmc); | |
512 | + struct mmc_data *data = mrq->data; | |
513 | + struct dma_chan *chan; | |
514 | + enum dma_data_direction dir; | |
515 | + | |
516 | + if (!data) | |
517 | + return; | |
518 | + | |
519 | + if (data->flags & MMC_DATA_READ) { | |
520 | + dir = DMA_FROM_DEVICE; | |
521 | + chan = host->dma_rx_channel; | |
522 | + } else { | |
523 | + dir = DMA_TO_DEVICE; | |
524 | + chan = host->dma_tx_channel; | |
525 | + } | |
526 | + | |
527 | + | |
528 | + /* if config for dma */ | |
529 | + if (chan) { | |
530 | + if (err) | |
531 | + dmaengine_terminate_all(chan); | |
532 | + if (err || data->host_cookie) | |
533 | + dma_unmap_sg(mmc_dev(host->mmc), data->sg, | |
534 | + data->sg_len, dir); | |
535 | + mrq->data->host_cookie = 0; | |
536 | + } | |
537 | +} | |
538 | + | |
430 | 539 | #else |
431 | 540 | /* Blank functions if the DMA engine is not available */ |
541 | +static void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data) | |
542 | +{ | |
543 | +} | |
432 | 544 | static inline void mmci_dma_setup(struct mmci_host *host) |
433 | 545 | { |
434 | 546 | } |
... | ... | @@ -449,6 +561,10 @@ |
449 | 561 | { |
450 | 562 | return -ENOSYS; |
451 | 563 | } |
564 | + | |
565 | +#define mmci_pre_request NULL | |
566 | +#define mmci_post_request NULL | |
567 | + | |
452 | 568 | #endif |
453 | 569 | |
454 | 570 | static void mmci_start_data(struct mmci_host *host, struct mmc_data *data) |
... | ... | @@ -872,6 +988,9 @@ |
872 | 988 | |
873 | 989 | host->mrq = mrq; |
874 | 990 | |
991 | + if (mrq->data) | |
992 | + mmci_get_next_data(host, mrq->data); | |
993 | + | |
875 | 994 | if (mrq->data && mrq->data->flags & MMC_DATA_READ) |
876 | 995 | mmci_start_data(host, mrq->data); |
877 | 996 | |
... | ... | @@ -986,6 +1105,8 @@ |
986 | 1105 | |
987 | 1106 | static const struct mmc_host_ops mmci_ops = { |
988 | 1107 | .request = mmci_request, |
1108 | + .pre_req = mmci_pre_request, | |
1109 | + .post_req = mmci_post_request, | |
989 | 1110 | .set_ios = mmci_set_ios, |
990 | 1111 | .get_ro = mmci_get_ro, |
991 | 1112 | .get_cd = mmci_get_cd, |
drivers/mmc/host/mmci.h
... | ... | @@ -166,6 +166,12 @@ |
166 | 166 | struct variant_data; |
167 | 167 | struct dma_chan; |
168 | 168 | |
169 | +struct mmci_host_next { | |
170 | + struct dma_async_tx_descriptor *dma_desc; | |
171 | + struct dma_chan *dma_chan; | |
172 | + s32 cookie; | |
173 | +}; | |
174 | + | |
169 | 175 | struct mmci_host { |
170 | 176 | phys_addr_t phybase; |
171 | 177 | void __iomem *base; |
... | ... | @@ -203,6 +209,8 @@ |
203 | 209 | struct dma_chan *dma_current; |
204 | 210 | struct dma_chan *dma_rx_channel; |
205 | 211 | struct dma_chan *dma_tx_channel; |
212 | + struct dma_async_tx_descriptor *dma_desc_current; | |
213 | + struct mmci_host_next next_data; | |
206 | 214 | |
207 | 215 | #define dma_inprogress(host) ((host)->dma_current) |
208 | 216 | #else |