Commit 97991657be8d85c2883ca477964f271d8c1bb96d

Authored by Magnus Damm
Committed by Paul Mundt
1 parent c3dada1894

ARM: mach-shmobile: sh7372 Core Standby Suspend-to-RAM

Add sh7372 Core Standby sleep mode support and tie it
in with the shared SH-Mobile ARM suspend code.

The Core Standby mode is the lightest sh7372-specific
sleep mode, cutting power to the ARM core excluding the
L2 cache. Any interrupt source can be used for wakeups.

The low level portion of this code is based on the
TI OMAP sleep code in sleep34xx.S, thanks to them.

Signed-off-by: Magnus Damm <damm@opensource.se>
Signed-off-by: Paul Mundt <lethal@linux-sh.org>

Showing 6 changed files with 345 additions and 0 deletions Side-by-side Diff

arch/arm/mach-shmobile/Makefile
... ... @@ -32,6 +32,7 @@
32 32  
33 33 # PM objects
34 34 obj-$(CONFIG_SUSPEND) += suspend.o
  35 +obj-$(CONFIG_ARCH_SH7372) += pm-sh7372.o sleep-sh7372.o
35 36  
36 37 # Board objects
37 38 obj-$(CONFIG_MACH_G3EVM) += board-g3evm.o
arch/arm/mach-shmobile/board-ap4evb.c
... ... @@ -1354,6 +1354,7 @@
1354 1354  
1355 1355 hdmi_init_pm_clock();
1356 1356 fsi_init_pm_clock();
  1357 + sh7372_pm_init();
1357 1358 }
1358 1359  
1359 1360 static void __init ap4evb_timer_init(void)
arch/arm/mach-shmobile/board-mackerel.c
... ... @@ -1230,6 +1230,7 @@
1230 1230 platform_add_devices(mackerel_devices, ARRAY_SIZE(mackerel_devices));
1231 1231  
1232 1232 hdmi_init_pm_clock();
  1233 + sh7372_pm_init();
1233 1234 }
1234 1235  
1235 1236 static void __init mackerel_timer_init(void)
arch/arm/mach-shmobile/include/mach/common.h
... ... @@ -31,6 +31,9 @@
31 31 extern void sh7372_add_standard_devices(void);
32 32 extern void sh7372_clock_init(void);
33 33 extern void sh7372_pinmux_init(void);
  34 +extern void sh7372_pm_init(void);
  35 +extern void sh7372_cpu_suspend(void);
  36 +extern void sh7372_cpu_resume(void);
34 37 extern struct clk sh7372_extal1_clk;
35 38 extern struct clk sh7372_extal2_clk;
36 39  
arch/arm/mach-shmobile/pm-sh7372.c
  1 +/*
  2 + * sh7372 Power management support
  3 + *
  4 + * Copyright (C) 2011 Magnus Damm
  5 + *
  6 + * This file is subject to the terms and conditions of the GNU General Public
  7 + * License. See the file "COPYING" in the main directory of this archive
  8 + * for more details.
  9 + */
  10 +
  11 +#include <linux/pm.h>
  12 +#include <linux/suspend.h>
  13 +#include <linux/module.h>
  14 +#include <linux/list.h>
  15 +#include <linux/err.h>
  16 +#include <linux/slab.h>
  17 +#include <asm/system.h>
  18 +#include <asm/io.h>
  19 +#include <asm/tlbflush.h>
  20 +#include <mach/common.h>
  21 +
  22 +#define SMFRAM 0xe6a70000
  23 +#define SYSTBCR 0xe6150024
  24 +#define SBAR 0xe6180020
  25 +#define APARMBAREA 0xe6f10020
  26 +
  27 +#ifdef CONFIG_SUSPEND
  28 +static void sh7372_enter_core_standby(void)
  29 +{
  30 + void __iomem *smfram = (void __iomem *)SMFRAM;
  31 +
  32 + __raw_writel(0, APARMBAREA); /* translate 4k */
  33 + __raw_writel(__pa(sh7372_cpu_resume), SBAR); /* set reset vector */
  34 + __raw_writel(0x10, SYSTBCR); /* enable core standby */
  35 +
  36 + __raw_writel(0, smfram + 0x3c); /* clear page table address */
  37 +
  38 + sh7372_cpu_suspend();
  39 + cpu_init();
  40 +
  41 + /* if page table address is non-NULL then we have been powered down */
  42 + if (__raw_readl(smfram + 0x3c)) {
  43 + __raw_writel(__raw_readl(smfram + 0x40),
  44 + __va(__raw_readl(smfram + 0x3c)));
  45 +
  46 + flush_tlb_all();
  47 + set_cr(__raw_readl(smfram + 0x38));
  48 + }
  49 +
  50 + __raw_writel(0, SYSTBCR); /* disable core standby */
  51 + __raw_writel(0, SBAR); /* disable reset vector translation */
  52 +}
  53 +
  54 +static int sh7372_enter_suspend(suspend_state_t suspend_state)
  55 +{
  56 + sh7372_enter_core_standby();
  57 + return 0;
  58 +}
  59 +
  60 +static void sh7372_suspend_init(void)
  61 +{
  62 + shmobile_suspend_ops.enter = sh7372_enter_suspend;
  63 +}
  64 +#else
  65 +static void sh7372_suspend_init(void) {}
  66 +#endif
  67 +
  68 +#define DBGREG1 0xe6100020
  69 +#define DBGREG9 0xe6100040
  70 +
  71 +void __init sh7372_pm_init(void)
  72 +{
  73 + /* enable DBG hardware block to kick SYSC */
  74 + __raw_writel(0x0000a500, DBGREG9);
  75 + __raw_writel(0x0000a501, DBGREG9);
  76 + __raw_writel(0x00000000, DBGREG1);
  77 +
  78 + sh7372_suspend_init();
  79 +}
arch/arm/mach-shmobile/sleep-sh7372.S
  1 +/*
  2 + * sh7372 lowlevel sleep code for "Core Standby Mode"
  3 + *
  4 + * Copyright (C) 2011 Magnus Damm
  5 + *
  6 + * In "Core Standby Mode" the ARM core is off, but L2 cache is still on
  7 + *
  8 + * Based on mach-omap2/sleep34xx.S
  9 + *
  10 + * (C) Copyright 2007 Texas Instruments
  11 + * Karthik Dasu <karthik-dp@ti.com>
  12 + *
  13 + * (C) Copyright 2004 Texas Instruments, <www.ti.com>
  14 + * Richard Woodruff <r-woodruff2@ti.com>
  15 + *
  16 + * This program is free software; you can redistribute it and/or
  17 + * modify it under the terms of the GNU General Public License as
  18 + * published by the Free Software Foundation; either version 2 of
  19 + * the License, or (at your option) any later version.
  20 + *
  21 + * This program is distributed in the hope that it will be useful,
  22 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
  23 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR /PURPOSE. See the
  24 + * GNU General Public License for more details.
  25 + *
  26 + * You should have received a copy of the GNU General Public License
  27 + * along with this program; if not, write to the Free Software
  28 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
  29 + * MA 02111-1307 USA
  30 + */
  31 +
  32 +#include <linux/linkage.h>
  33 +#include <asm/assembler.h>
  34 +
  35 +#define SMFRAM 0xe6a70000
  36 +
  37 + .align
  38 +kernel_flush:
  39 + .word v7_flush_dcache_all
  40 +
  41 + .align 3
  42 +ENTRY(sh7372_cpu_suspend)
  43 + stmfd sp!, {r0-r12, lr} @ save registers on stack
  44 +
  45 + ldr r8, =SMFRAM
  46 +
  47 + mov r4, sp @ Store sp
  48 + mrs r5, spsr @ Store spsr
  49 + mov r6, lr @ Store lr
  50 + stmia r8!, {r4-r6}
  51 +
  52 + mrc p15, 0, r4, c1, c0, 2 @ Coprocessor access control register
  53 + mrc p15, 0, r5, c2, c0, 0 @ TTBR0
  54 + mrc p15, 0, r6, c2, c0, 1 @ TTBR1
  55 + mrc p15, 0, r7, c2, c0, 2 @ TTBCR
  56 + stmia r8!, {r4-r7}
  57 +
  58 + mrc p15, 0, r4, c3, c0, 0 @ Domain access Control Register
  59 + mrc p15, 0, r5, c10, c2, 0 @ PRRR
  60 + mrc p15, 0, r6, c10, c2, 1 @ NMRR
  61 + stmia r8!,{r4-r6}
  62 +
  63 + mrc p15, 0, r4, c13, c0, 1 @ Context ID
  64 + mrc p15, 0, r5, c13, c0, 2 @ User r/w thread and process ID
  65 + mrc p15, 0, r6, c12, c0, 0 @ Secure or NS vector base address
  66 + mrs r7, cpsr @ Store current cpsr
  67 + stmia r8!, {r4-r7}
  68 +
  69 + mrc p15, 0, r4, c1, c0, 0 @ save control register
  70 + stmia r8!, {r4}
  71 +
  72 + /*
  73 + * jump out to kernel flush routine
  74 + * - reuse that code is better
  75 + * - it executes in a cached space so is faster than refetch per-block
  76 + * - should be faster and will change with kernel
  77 + * - 'might' have to copy address, load and jump to it
  78 + * Flush all data from the L1 data cache before disabling
  79 + * SCTLR.C bit.
  80 + */
  81 + ldr r1, kernel_flush
  82 + mov lr, pc
  83 + bx r1
  84 +
  85 + /*
  86 + * Clear the SCTLR.C bit to prevent further data cache
  87 + * allocation. Clearing SCTLR.C would make all the data accesses
  88 + * strongly ordered and would not hit the cache.
  89 + */
  90 + mrc p15, 0, r0, c1, c0, 0
  91 + bic r0, r0, #(1 << 2) @ Disable the C bit
  92 + mcr p15, 0, r0, c1, c0, 0
  93 + isb
  94 +
  95 + /*
  96 + * Invalidate L1 data cache. Even though only invalidate is
  97 + * necessary exported flush API is used here. Doing clean
  98 + * on already clean cache would be almost NOP.
  99 + */
  100 + ldr r1, kernel_flush
  101 + blx r1
  102 + /*
  103 + * The kernel doesn't interwork: v7_flush_dcache_all in particluar will
  104 + * always return in Thumb state when CONFIG_THUMB2_KERNEL is enabled.
  105 + * This sequence switches back to ARM. Note that .align may insert a
  106 + * nop: bx pc needs to be word-aligned in order to work.
  107 + */
  108 + THUMB( .thumb )
  109 + THUMB( .align )
  110 + THUMB( bx pc )
  111 + THUMB( nop )
  112 + .arm
  113 +
  114 + /* Data memory barrier and Data sync barrier */
  115 + dsb
  116 + dmb
  117 +
  118 +/*
  119 + * ===================================
  120 + * == WFI instruction => Enter idle ==
  121 + * ===================================
  122 + */
  123 + wfi @ wait for interrupt
  124 +
  125 +/*
  126 + * ===================================
  127 + * == Resume path for non-OFF modes ==
  128 + * ===================================
  129 + */
  130 + mrc p15, 0, r0, c1, c0, 0
  131 + tst r0, #(1 << 2) @ Check C bit enabled?
  132 + orreq r0, r0, #(1 << 2) @ Enable the C bit if cleared
  133 + mcreq p15, 0, r0, c1, c0, 0
  134 + isb
  135 +
  136 +/*
  137 + * ===================================
  138 + * == Exit point from non-OFF modes ==
  139 + * ===================================
  140 + */
  141 + ldmfd sp!, {r0-r12, pc} @ restore regs and return
  142 +
  143 + .pool
  144 +
  145 + .align 12
  146 + .text
  147 + .global sh7372_cpu_resume
  148 +sh7372_cpu_resume:
  149 +
  150 + mov r1, #0
  151 + /*
  152 + * Invalidate all instruction caches to PoU
  153 + * and flush branch target cache
  154 + */
  155 + mcr p15, 0, r1, c7, c5, 0
  156 +
  157 + ldr r3, =SMFRAM
  158 +
  159 + ldmia r3!, {r4-r6}
  160 + mov sp, r4 @ Restore sp
  161 + msr spsr_cxsf, r5 @ Restore spsr
  162 + mov lr, r6 @ Restore lr
  163 +
  164 + ldmia r3!, {r4-r7}
  165 + mcr p15, 0, r4, c1, c0, 2 @ Coprocessor access Control Register
  166 + mcr p15, 0, r5, c2, c0, 0 @ TTBR0
  167 + mcr p15, 0, r6, c2, c0, 1 @ TTBR1
  168 + mcr p15, 0, r7, c2, c0, 2 @ TTBCR
  169 +
  170 + ldmia r3!,{r4-r6}
  171 + mcr p15, 0, r4, c3, c0, 0 @ Domain access Control Register
  172 + mcr p15, 0, r5, c10, c2, 0 @ PRRR
  173 + mcr p15, 0, r6, c10, c2, 1 @ NMRR
  174 +
  175 + ldmia r3!,{r4-r7}
  176 + mcr p15, 0, r4, c13, c0, 1 @ Context ID
  177 + mcr p15, 0, r5, c13, c0, 2 @ User r/w thread and process ID
  178 + mrc p15, 0, r6, c12, c0, 0 @ Secure or NS vector base address
  179 + msr cpsr, r7 @ store cpsr
  180 +
  181 + /* Starting to enable MMU here */
  182 + mrc p15, 0, r7, c2, c0, 2 @ Read TTBRControl
  183 + /* Extract N (0:2) bits and decide whether to use TTBR0 or TTBR1 */
  184 + and r7, #0x7
  185 + cmp r7, #0x0
  186 + beq usettbr0
  187 +ttbr_error:
  188 + /*
  189 + * More work needs to be done to support N[0:2] value other than 0
  190 + * So looping here so that the error can be detected
  191 + */
  192 + b ttbr_error
  193 +
  194 + .align
  195 +cache_pred_disable_mask:
  196 + .word 0xFFFFE7FB
  197 +ttbrbit_mask:
  198 + .word 0xFFFFC000
  199 +table_index_mask:
  200 + .word 0xFFF00000
  201 +table_entry:
  202 + .word 0x00000C02
  203 +usettbr0:
  204 +
  205 + mrc p15, 0, r2, c2, c0, 0
  206 + ldr r5, ttbrbit_mask
  207 + and r2, r5
  208 + mov r4, pc
  209 + ldr r5, table_index_mask
  210 + and r4, r5 @ r4 = 31 to 20 bits of pc
  211 + /* Extract the value to be written to table entry */
  212 + ldr r6, table_entry
  213 + /* r6 has the value to be written to table entry */
  214 + add r6, r6, r4
  215 + /* Getting the address of table entry to modify */
  216 + lsr r4, #18
  217 + /* r2 has the location which needs to be modified */
  218 + add r2, r4
  219 + ldr r4, [r2]
  220 + str r6, [r2] /* modify the table entry */
  221 +
  222 + mov r7, r6
  223 + mov r5, r2
  224 + mov r6, r4
  225 + /* r5 = original page table address */
  226 + /* r6 = original page table data */
  227 +
  228 + mov r0, #0
  229 + mcr p15, 0, r0, c7, c5, 4 @ Flush prefetch buffer
  230 + mcr p15, 0, r0, c7, c5, 6 @ Invalidate branch predictor array
  231 + mcr p15, 0, r0, c8, c5, 0 @ Invalidate instruction TLB
  232 + mcr p15, 0, r0, c8, c6, 0 @ Invalidate data TLB
  233 +
  234 + /*
  235 + * Restore control register. This enables the MMU.
  236 + * The caches and prediction are not enabled here, they
  237 + * will be enabled after restoring the MMU table entry.
  238 + */
  239 + ldmia r3!, {r4}
  240 + stmia r3!, {r5} /* save original page table address */
  241 + stmia r3!, {r6} /* save original page table data */
  242 + stmia r3!, {r7} /* save modified page table data */
  243 +
  244 + ldr r2, cache_pred_disable_mask
  245 + and r4, r2
  246 + mcr p15, 0, r4, c1, c0, 0
  247 + dsb
  248 + isb
  249 +
  250 + ldr r0, =restoremmu_on
  251 + bx r0
  252 +
  253 +/*
  254 + * ==============================
  255 + * == Exit point from OFF mode ==
  256 + * ==============================
  257 + */
  258 +restoremmu_on:
  259 +
  260 + ldmfd sp!, {r0-r12, pc} @ restore regs and return