Commit f8f9a8c38644d27dc8671009209922531b072110

Authored by Daniel Mack
Committed by Rajendra Nayak
1 parent dfc2e2a6ca

ARM: omap: edma: add suspend suspend/resume hooks(v4-modified)

This patch makes the edma driver resume correctly after suspend. Tested
on an AM33xx platform with cyclic audio streams and omap_hsmmc.

All information can be reconstructed by already known runtime
information.

As we now use some functions that were previously only used from __init
context, annotations had to be dropped.

[nm@ti.com: added error handling for runtime + suspend_late/early_resume]
Signed-off-by: Nishanth Menon <nm@ti.com>
Signed-off-by: Daniel Mack <zonque@gmail.com>
Tested-by: Joel Fernandes <joelf@ti.com>
Acked-by: Joel Fernandes <joelf@ti.com>

Showing 1 changed file with 90 additions and 3 deletions Side-by-side Diff

arch/arm/common/edma.c
... ... @@ -239,6 +239,8 @@
239 239 /* list of channels with no even trigger; terminated by "-1" */
240 240 const s8 *noevent;
241 241  
  242 + struct edma_soc_info *info;
  243 +
242 244 /* The edma_inuse bit for each PaRAM slot is clear unless the
243 245 * channel is in use ... by ARM or DSP, for QDMA, or whatever.
244 246 */
245 247  
... ... @@ -285,13 +287,13 @@
285 287 ~(0x7 << bit), queue_no << bit);
286 288 }
287 289  
288   -static void __init map_queue_tc(unsigned ctlr, int queue_no, int tc_no)
  290 +static void map_queue_tc(unsigned ctlr, int queue_no, int tc_no)
289 291 {
290 292 int bit = queue_no * 4;
291 293 edma_modify(ctlr, EDMA_QUETCMAP, ~(0x7 << bit), ((tc_no & 0x7) << bit));
292 294 }
293 295  
294   -static void __init assign_priority_to_queue(unsigned ctlr, int queue_no,
  296 +static void assign_priority_to_queue(unsigned ctlr, int queue_no,
295 297 int priority)
296 298 {
297 299 int bit = queue_no * 4;
... ... @@ -310,7 +312,7 @@
310 312 * included in that particular EDMA variant (Eg : dm646x)
311 313 *
312 314 */
313   -static void __init map_dmach_param(unsigned ctlr)
  315 +static void map_dmach_param(unsigned ctlr)
314 316 {
315 317 int i;
316 318 for (i = 0; i < EDMA_MAX_DMACH; i++)
317 319  
... ... @@ -1772,12 +1774,96 @@
1772 1774 edma_write_array2(j, EDMA_DRAE, i, 1, 0x0);
1773 1775 edma_write_array(j, EDMA_QRAE, i, 0x0);
1774 1776 }
  1777 + edma_cc[j]->info = info[j];
1775 1778 arch_num_cc++;
1776 1779 }
1777 1780  
1778 1781 return 0;
1779 1782 }
1780 1783  
  1784 +static int edma_pm_suspend(struct device *dev)
  1785 +{
  1786 + int j, r;
  1787 +
  1788 + r = pm_runtime_get_sync(dev);
  1789 + if (IS_ERR_VALUE(r)) {
  1790 + dev_err(dev, "%s: get_sync returned %d\n", __func__, r);
  1791 + return r;
  1792 + }
  1793 +
  1794 + for (j = 0; j < arch_num_cc; j++) {
  1795 + struct edma *ecc = edma_cc[j];
  1796 +
  1797 + disable_irq(ecc->irq_res_start);
  1798 + disable_irq(ecc->irq_res_end);
  1799 + }
  1800 +
  1801 + pm_runtime_put_sync(dev);
  1802 +
  1803 + return 0;
  1804 +}
  1805 +
  1806 +static int edma_pm_resume(struct device *dev)
  1807 +{
  1808 + int i, j, r;
  1809 +
  1810 + r = pm_runtime_get_sync(dev);
  1811 + if (IS_ERR_VALUE(r)) {
  1812 + dev_err(dev, "%s: get_sync returned %d\n", __func__, r);
  1813 + return r;
  1814 + }
  1815 +
  1816 + for (j = 0; j < arch_num_cc; j++) {
  1817 + struct edma *cc = edma_cc[j];
  1818 +
  1819 + s8 (*queue_priority_mapping)[2];
  1820 + s8 (*queue_tc_mapping)[2];
  1821 +
  1822 + queue_tc_mapping = cc->info->queue_tc_mapping;
  1823 + queue_priority_mapping = cc->info->queue_priority_mapping;
  1824 +
  1825 + /* Event queue to TC mapping */
  1826 + for (i = 0; queue_tc_mapping[i][0] != -1; i++)
  1827 + map_queue_tc(j, queue_tc_mapping[i][0],
  1828 + queue_tc_mapping[i][1]);
  1829 +
  1830 + /* Event queue priority mapping */
  1831 + for (i = 0; queue_priority_mapping[i][0] != -1; i++)
  1832 + assign_priority_to_queue(j,
  1833 + queue_priority_mapping[i][0],
  1834 + queue_priority_mapping[i][1]);
  1835 +
  1836 + /* Map the channel to param entry if channel mapping logic
  1837 + * exist
  1838 + */
  1839 + if (edma_read(j, EDMA_CCCFG) & CHMAP_EXIST)
  1840 + map_dmach_param(j);
  1841 +
  1842 + for (i = 0; i < cc->num_channels; i++)
  1843 + if (test_bit(i, cc->edma_inuse)) {
  1844 + /* ensure access through shadow region 0 */
  1845 + edma_or_array2(j, EDMA_DRAE, 0, i >> 5,
  1846 + BIT(i & 0x1f));
  1847 +
  1848 + setup_dma_interrupt(i,
  1849 + cc->intr_data[i].callback,
  1850 + cc->intr_data[i].data);
  1851 + }
  1852 +
  1853 + enable_irq(cc->irq_res_start);
  1854 + enable_irq(cc->irq_res_end);
  1855 + }
  1856 +
  1857 + pm_runtime_put_sync(dev);
  1858 +
  1859 + return 0;
  1860 +}
  1861 +
  1862 +static const struct dev_pm_ops edma_pm_ops = {
  1863 + .suspend_late = edma_pm_suspend,
  1864 + .resume_early = edma_pm_resume,
  1865 +};
  1866 +
1781 1867 static const struct of_device_id edma_of_ids[] = {
1782 1868 { .compatible = "ti,edma3", },
1783 1869 {}
... ... @@ -1786,6 +1872,7 @@
1786 1872 static struct platform_driver edma_driver = {
1787 1873 .driver = {
1788 1874 .name = "edma",
  1875 + .pm = &edma_pm_ops,
1789 1876 .of_match_table = edma_of_ids,
1790 1877 },
1791 1878 .probe = edma_probe,