Commit 158e06653bf8de65087d7bbf56d0ebac303379d4
1 parent
3bd1e28683
Exists in
v3.2_SMARCT335xPSP_04.06.00.11
and in
3 other branches
ARM: OMAP: AM33XX: PM: Enable GPIO0 wakeup for standby
Keep GPIO0 module enabled during standby to support GPIO0 io-pads to wakeup the system from standby mode. Signed-off-by: Satyanarayana Sandhya <sandhya.satyanarayana@ti.com>
Showing 3 changed files with 57 additions and 3 deletions Inline Diff
arch/arm/mach-omap2/pm33xx.c
1 | /* | 1 | /* |
2 | * AM33XX Power Management Routines | 2 | * AM33XX Power Management Routines |
3 | * | 3 | * |
4 | * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/ | 4 | * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/ |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or | 6 | * This program is free software; you can redistribute it and/or |
7 | * modify it under the terms of the GNU General Public License as | 7 | * modify it under the terms of the GNU General Public License as |
8 | * published by the Free Software Foundation version 2. | 8 | * published by the Free Software Foundation version 2. |
9 | * | 9 | * |
10 | * This program is distributed "as is" WITHOUT ANY WARRANTY of any | 10 | * This program is distributed "as is" WITHOUT ANY WARRANTY of any |
11 | * kind, whether express or implied; without even the implied warranty | 11 | * kind, whether express or implied; without even the implied warranty |
12 | * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 12 | * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
13 | * GNU General Public License for more details. | 13 | * GNU General Public License for more details. |
14 | */ | 14 | */ |
15 | 15 | ||
16 | #include <linux/kernel.h> | 16 | #include <linux/kernel.h> |
17 | #include <linux/init.h> | 17 | #include <linux/init.h> |
18 | #include <linux/clk.h> | 18 | #include <linux/clk.h> |
19 | #include <linux/console.h> | 19 | #include <linux/console.h> |
20 | #include <linux/err.h> | 20 | #include <linux/err.h> |
21 | #include <linux/firmware.h> | 21 | #include <linux/firmware.h> |
22 | #include <linux/io.h> | 22 | #include <linux/io.h> |
23 | #include <linux/platform_device.h> | 23 | #include <linux/platform_device.h> |
24 | #include <linux/sched.h> | 24 | #include <linux/sched.h> |
25 | #include <linux/suspend.h> | 25 | #include <linux/suspend.h> |
26 | #include <linux/completion.h> | 26 | #include <linux/completion.h> |
27 | #include <linux/pm_runtime.h> | 27 | #include <linux/pm_runtime.h> |
28 | 28 | ||
29 | #include <mach/board-am335xevm.h> | 29 | #include <mach/board-am335xevm.h> |
30 | #include <plat/prcm.h> | 30 | #include <plat/prcm.h> |
31 | #include <plat/mailbox.h> | 31 | #include <plat/mailbox.h> |
32 | #include <plat/sram.h> | 32 | #include <plat/sram.h> |
33 | #include <plat/omap_hwmod.h> | 33 | #include <plat/omap_hwmod.h> |
34 | #include <plat/omap_device.h> | 34 | #include <plat/omap_device.h> |
35 | #include <plat/emif.h> | 35 | #include <plat/emif.h> |
36 | 36 | ||
37 | #include <asm/suspend.h> | 37 | #include <asm/suspend.h> |
38 | #include <asm/proc-fns.h> | 38 | #include <asm/proc-fns.h> |
39 | #include <asm/sizes.h> | 39 | #include <asm/sizes.h> |
40 | 40 | ||
41 | #include "pm.h" | 41 | #include "pm.h" |
42 | #include "cm33xx.h" | 42 | #include "cm33xx.h" |
43 | #include "pm33xx.h" | 43 | #include "pm33xx.h" |
44 | #include "control.h" | 44 | #include "control.h" |
45 | #include "clockdomain.h" | 45 | #include "clockdomain.h" |
46 | #include "powerdomain.h" | 46 | #include "powerdomain.h" |
47 | 47 | ||
48 | void (*am33xx_do_wfi_sram)(u32 *); | 48 | void (*am33xx_do_wfi_sram)(u32 *); |
49 | 49 | ||
50 | #define DS_MODE DS0_ID /* DS0/1_ID */ | 50 | #define DS_MODE DS0_ID /* DS0/1_ID */ |
51 | #define MODULE_DISABLE 0x0 | 51 | #define MODULE_DISABLE 0x0 |
52 | #define MODULE_ENABLE 0x2 | 52 | #define MODULE_ENABLE 0x2 |
53 | 53 | ||
54 | #ifdef CONFIG_SUSPEND | 54 | #ifdef CONFIG_SUSPEND |
55 | void __iomem *ipc_regs; | 55 | void __iomem *ipc_regs; |
56 | void __iomem *m3_eoi; | 56 | void __iomem *m3_eoi; |
57 | void __iomem *m3_code; | 57 | void __iomem *m3_code; |
58 | u32 suspend_cfg_param_list[SUSPEND_CFG_PARAMS_END]; | 58 | u32 suspend_cfg_param_list[SUSPEND_CFG_PARAMS_END]; |
59 | 59 | ||
60 | bool enable_deep_sleep = true; | 60 | bool enable_deep_sleep = true; |
61 | static suspend_state_t suspend_state = PM_SUSPEND_ON; | 61 | static suspend_state_t suspend_state = PM_SUSPEND_ON; |
62 | 62 | ||
63 | static struct device *mpu_dev; | 63 | static struct device *mpu_dev; |
64 | static struct omap_mbox *m3_mbox; | 64 | static struct omap_mbox *m3_mbox; |
65 | static struct powerdomain *cefuse_pwrdm, *gfx_pwrdm, *per_pwrdm; | 65 | static struct powerdomain *cefuse_pwrdm, *gfx_pwrdm, *per_pwrdm; |
66 | static struct clockdomain *gfx_l3_clkdm, *gfx_l4ls_clkdm; | 66 | static struct clockdomain *gfx_l3_clkdm, *gfx_l4ls_clkdm; |
67 | 67 | ||
68 | static int m3_state = M3_STATE_UNKNOWN; | 68 | static int m3_state = M3_STATE_UNKNOWN; |
69 | static int m3_version = M3_VERSION_UNKNOWN; | 69 | static int m3_version = M3_VERSION_UNKNOWN; |
70 | 70 | ||
71 | static int am33xx_ipc_cmd(struct a8_wkup_m3_ipc_data *); | 71 | static int am33xx_ipc_cmd(struct a8_wkup_m3_ipc_data *); |
72 | static int am33xx_verify_lp_state(int); | 72 | static int am33xx_verify_lp_state(int); |
73 | static void am33xx_m3_state_machine_reset(void); | 73 | static void am33xx_m3_state_machine_reset(void); |
74 | 74 | ||
75 | static DECLARE_COMPLETION(a8_m3_sync); | 75 | static DECLARE_COMPLETION(a8_m3_sync); |
76 | 76 | ||
77 | static int am33xx_pm_prepare_late(void) | 77 | static int am33xx_pm_prepare_late(void) |
78 | { | 78 | { |
79 | int ret = 0; | 79 | int ret = 0; |
80 | 80 | ||
81 | am335x_save_padconf(); | 81 | am335x_save_padconf(); |
82 | am33xx_setup_pinmux_on_suspend(); | 82 | am33xx_setup_pinmux_on_suspend(); |
83 | 83 | ||
84 | return ret; | 84 | return ret; |
85 | } | 85 | } |
86 | 86 | ||
87 | static void am33xx_pm_finish(void) | 87 | static void am33xx_pm_finish(void) |
88 | { | 88 | { |
89 | am335x_restore_padconf(); | 89 | am335x_restore_padconf(); |
90 | } | 90 | } |
91 | 91 | ||
92 | static int am33xx_do_sram_idle(long unsigned int state) | 92 | static int am33xx_do_sram_idle(long unsigned int state) |
93 | { | 93 | { |
94 | am33xx_do_wfi_sram(&suspend_cfg_param_list[0]); | 94 | am33xx_do_wfi_sram(&suspend_cfg_param_list[0]); |
95 | 95 | ||
96 | return 0; | 96 | return 0; |
97 | } | 97 | } |
98 | 98 | ||
99 | static int am33xx_pm_suspend(void) | 99 | static int am33xx_pm_suspend(void) |
100 | { | 100 | { |
101 | int state, ret = 0; | 101 | int state, ret = 0; |
102 | 102 | ||
103 | struct omap_hwmod *gpmc_oh, *usb_oh, *gpio1_oh; | 103 | struct omap_hwmod *gpmc_oh, *usb_oh, *gpio1_oh; |
104 | 104 | ||
105 | usb_oh = omap_hwmod_lookup("usb_otg_hs"); | 105 | usb_oh = omap_hwmod_lookup("usb_otg_hs"); |
106 | gpmc_oh = omap_hwmod_lookup("gpmc"); | 106 | gpmc_oh = omap_hwmod_lookup("gpmc"); |
107 | gpio1_oh = omap_hwmod_lookup("gpio1"); /* WKUP domain GPIO */ | 107 | gpio1_oh = omap_hwmod_lookup("gpio1"); /* WKUP domain GPIO */ |
108 | 108 | ||
109 | omap_hwmod_enable(usb_oh); | 109 | omap_hwmod_enable(usb_oh); |
110 | omap_hwmod_enable(gpmc_oh); | 110 | omap_hwmod_enable(gpmc_oh); |
111 | 111 | ||
112 | /* | 112 | /* |
113 | * Keep USB module enabled during standby | 113 | * Keep USB module enabled during standby |
114 | * to enable USB remote wakeup | 114 | * to enable USB remote wakeup |
115 | * Note: This will result in hard-coding USB state | 115 | * Note: This will result in hard-coding USB state |
116 | * during standby | 116 | * during standby |
117 | */ | 117 | */ |
118 | if (suspend_state != PM_SUSPEND_STANDBY) | 118 | if (suspend_state != PM_SUSPEND_STANDBY) |
119 | omap_hwmod_idle(usb_oh); | 119 | omap_hwmod_idle(usb_oh); |
120 | 120 | ||
121 | omap_hwmod_idle(gpmc_oh); | 121 | omap_hwmod_idle(gpmc_oh); |
122 | 122 | ||
123 | /* | 123 | /* |
124 | * Disable the GPIO module. This ensure that | 124 | * Disable the GPIO module. This ensure that |
125 | * only sWAKEUP interrupts to Cortex-M3 get generated | 125 | * only sWAKEUP interrupts to Cortex-M3 get generated |
126 | * | 126 | * |
127 | * XXX: EVM_SK uses a GPIO0 pin for VTP control | 127 | * XXX: EVM_SK uses a GPIO0 pin for VTP control |
128 | * in suspend and hence we can't do this for EVM_SK | 128 | * in suspend and hence we can't do this for EVM_SK |
129 | * alone. The side-effect of this is that GPIO wakeup | 129 | * alone. The side-effect of this is that GPIO wakeup |
130 | * might have issues. Refer to commit 672639b for the | 130 | * might have issues. Refer to commit 672639b for the |
131 | * details | 131 | * details |
132 | */ | 132 | */ |
133 | if (suspend_cfg_param_list[EVM_ID] != EVM_SK) | 133 | /* |
134 | * Keep GPIO0 module enabled during standby to | ||
135 | * support wakeup via GPIO0 keys. | ||
136 | */ | ||
137 | if ((suspend_cfg_param_list[EVM_ID] != EVM_SK) && | ||
138 | (suspend_state != PM_SUSPEND_STANDBY)) | ||
134 | omap_hwmod_idle(gpio1_oh); | 139 | omap_hwmod_idle(gpio1_oh); |
140 | /* | ||
141 | * Update Suspend_State value to be used in sleep33xx.S to keep | ||
142 | * GPIO0 module enabled during standby for EVM-SK | ||
143 | */ | ||
144 | if (suspend_state == PM_SUSPEND_STANDBY) | ||
145 | suspend_cfg_param_list[SUSPEND_STATE] = PM_STANDBY; | ||
146 | else | ||
147 | suspend_cfg_param_list[SUSPEND_STATE] = PM_DS0; | ||
135 | 148 | ||
136 | /* | 149 | /* |
137 | * Keep Touchscreen module enabled during standby | 150 | * Keep Touchscreen module enabled during standby |
138 | * to enable wakeup from standby. | 151 | * to enable wakeup from standby. |
139 | */ | 152 | */ |
140 | if (suspend_state == PM_SUSPEND_STANDBY) | 153 | if (suspend_state == PM_SUSPEND_STANDBY) |
141 | writel(0x2, AM33XX_CM_WKUP_ADC_TSC_CLKCTRL); | 154 | writel(0x2, AM33XX_CM_WKUP_ADC_TSC_CLKCTRL); |
142 | 155 | ||
143 | if (gfx_l3_clkdm && gfx_l4ls_clkdm) { | 156 | if (gfx_l3_clkdm && gfx_l4ls_clkdm) { |
144 | clkdm_sleep(gfx_l3_clkdm); | 157 | clkdm_sleep(gfx_l3_clkdm); |
145 | clkdm_sleep(gfx_l4ls_clkdm); | 158 | clkdm_sleep(gfx_l4ls_clkdm); |
146 | } | 159 | } |
147 | 160 | ||
148 | /* Try to put GFX to sleep */ | 161 | /* Try to put GFX to sleep */ |
149 | if (gfx_pwrdm) | 162 | if (gfx_pwrdm) |
150 | pwrdm_set_next_pwrst(gfx_pwrdm, PWRDM_POWER_OFF); | 163 | pwrdm_set_next_pwrst(gfx_pwrdm, PWRDM_POWER_OFF); |
151 | else | 164 | else |
152 | pr_err("Could not program GFX to low power state\n"); | 165 | pr_err("Could not program GFX to low power state\n"); |
153 | 166 | ||
154 | omap3_intc_suspend(); | 167 | omap3_intc_suspend(); |
155 | 168 | ||
156 | writel(0x0, AM33XX_CM_MPU_MPU_CLKCTRL); | 169 | writel(0x0, AM33XX_CM_MPU_MPU_CLKCTRL); |
157 | 170 | ||
158 | ret = cpu_suspend(0, am33xx_do_sram_idle); | 171 | ret = cpu_suspend(0, am33xx_do_sram_idle); |
159 | 172 | ||
160 | writel(0x2, AM33XX_CM_MPU_MPU_CLKCTRL); | 173 | writel(0x2, AM33XX_CM_MPU_MPU_CLKCTRL); |
161 | 174 | ||
162 | if (gfx_pwrdm) { | 175 | if (gfx_pwrdm) { |
163 | state = pwrdm_read_pwrst(gfx_pwrdm); | 176 | state = pwrdm_read_pwrst(gfx_pwrdm); |
164 | if (state != PWRDM_POWER_OFF) | 177 | if (state != PWRDM_POWER_OFF) |
165 | pr_err("GFX domain did not transition to low power state\n"); | 178 | pr_err("GFX domain did not transition to low power state\n"); |
166 | else | 179 | else |
167 | pr_info("GFX domain entered low power state\n"); | 180 | pr_info("GFX domain entered low power state\n"); |
168 | } | 181 | } |
169 | 182 | ||
170 | /* XXX: Why do we need to wakeup the clockdomains? */ | 183 | /* XXX: Why do we need to wakeup the clockdomains? */ |
171 | if(gfx_l3_clkdm && gfx_l4ls_clkdm) { | 184 | if(gfx_l3_clkdm && gfx_l4ls_clkdm) { |
172 | clkdm_wakeup(gfx_l3_clkdm); | 185 | clkdm_wakeup(gfx_l3_clkdm); |
173 | clkdm_wakeup(gfx_l4ls_clkdm); | 186 | clkdm_wakeup(gfx_l4ls_clkdm); |
174 | } | 187 | } |
175 | 188 | ||
176 | /* | 189 | /* |
177 | * Touchscreen module was enabled during standby | 190 | * Touchscreen module was enabled during standby |
178 | * Disable it here. | 191 | * Disable it here. |
179 | */ | 192 | */ |
180 | if (suspend_state == PM_SUSPEND_STANDBY) | 193 | if (suspend_state == PM_SUSPEND_STANDBY) |
181 | writel(0x2, AM33XX_CM_WKUP_ADC_TSC_CLKCTRL); | 194 | writel(0x2, AM33XX_CM_WKUP_ADC_TSC_CLKCTRL); |
182 | 195 | ||
183 | /* | 196 | /* |
184 | * Put USB module to idle on resume from standby | 197 | * Put USB module to idle on resume from standby |
185 | */ | 198 | */ |
186 | if (suspend_state == PM_SUSPEND_STANDBY) | 199 | if (suspend_state == PM_SUSPEND_STANDBY) |
187 | omap_hwmod_idle(usb_oh); | 200 | omap_hwmod_idle(usb_oh); |
188 | 201 | ||
189 | ret = am33xx_verify_lp_state(ret); | 202 | ret = am33xx_verify_lp_state(ret); |
190 | 203 | ||
191 | /* | 204 | /* |
192 | * Enable the GPIO module. Once the driver is | 205 | * Enable the GPIO module. Once the driver is |
193 | * fully adapted to runtime PM this will go away | 206 | * fully adapted to runtime PM this will go away |
194 | */ | 207 | */ |
195 | if (suspend_cfg_param_list[EVM_ID] != EVM_SK) | 208 | /* |
209 | * During standby, GPIO was not disabled. Hence no | ||
210 | * need to enable it here. | ||
211 | */ | ||
212 | if ((suspend_cfg_param_list[EVM_ID] != EVM_SK) && | ||
213 | (suspend_state != PM_SUSPEND_STANDBY)) | ||
196 | omap_hwmod_enable(gpio1_oh); | 214 | omap_hwmod_enable(gpio1_oh); |
197 | 215 | ||
198 | return ret; | 216 | return ret; |
199 | } | 217 | } |
200 | 218 | ||
201 | static int am33xx_pm_enter(suspend_state_t unused) | 219 | static int am33xx_pm_enter(suspend_state_t unused) |
202 | { | 220 | { |
203 | int ret = 0; | 221 | int ret = 0; |
204 | 222 | ||
205 | switch (suspend_state) { | 223 | switch (suspend_state) { |
206 | case PM_SUSPEND_STANDBY: | 224 | case PM_SUSPEND_STANDBY: |
207 | case PM_SUSPEND_MEM: | 225 | case PM_SUSPEND_MEM: |
208 | ret = am33xx_pm_suspend(); | 226 | ret = am33xx_pm_suspend(); |
209 | break; | 227 | break; |
210 | default: | 228 | default: |
211 | ret = -EINVAL; | 229 | ret = -EINVAL; |
212 | } | 230 | } |
213 | 231 | ||
214 | return ret; | 232 | return ret; |
215 | } | 233 | } |
216 | 234 | ||
217 | static int am33xx_pm_begin(suspend_state_t state) | 235 | static int am33xx_pm_begin(suspend_state_t state) |
218 | { | 236 | { |
219 | int ret = 0; | 237 | int ret = 0; |
220 | int state_id = 0; | 238 | int state_id = 0; |
221 | 239 | ||
222 | disable_hlt(); | 240 | disable_hlt(); |
223 | 241 | ||
224 | switch (state) { | 242 | switch (state) { |
225 | case PM_SUSPEND_STANDBY: | 243 | case PM_SUSPEND_STANDBY: |
226 | state_id = 0xb; | 244 | state_id = 0xb; |
227 | break; | 245 | break; |
228 | case PM_SUSPEND_MEM: | 246 | case PM_SUSPEND_MEM: |
229 | state_id = 0x3; | 247 | state_id = 0x3; |
230 | break; | 248 | break; |
231 | } | 249 | } |
232 | 250 | ||
233 | /* | 251 | /* |
234 | * Populate the resume address as part of IPC data | 252 | * Populate the resume address as part of IPC data |
235 | * The offset to be added comes from sleep33xx.S | 253 | * The offset to be added comes from sleep33xx.S |
236 | * Add 4 bytes to ensure that resume happens from | 254 | * Add 4 bytes to ensure that resume happens from |
237 | * the word *after* the word which holds the resume offset | 255 | * the word *after* the word which holds the resume offset |
238 | */ | 256 | */ |
239 | am33xx_lp_ipc.resume_addr = (DS_RESUME_BASE + am33xx_resume_offset + 4); | 257 | am33xx_lp_ipc.resume_addr = (DS_RESUME_BASE + am33xx_resume_offset + 4); |
240 | am33xx_lp_ipc.sleep_mode = state_id; | 258 | am33xx_lp_ipc.sleep_mode = state_id; |
241 | am33xx_lp_ipc.ipc_data1 = DS_IPC_DEFAULT; | 259 | am33xx_lp_ipc.ipc_data1 = DS_IPC_DEFAULT; |
242 | am33xx_lp_ipc.ipc_data2 = DS_IPC_DEFAULT; | 260 | am33xx_lp_ipc.ipc_data2 = DS_IPC_DEFAULT; |
243 | 261 | ||
244 | am33xx_ipc_cmd(&am33xx_lp_ipc); | 262 | am33xx_ipc_cmd(&am33xx_lp_ipc); |
245 | 263 | ||
246 | m3_state = M3_STATE_MSG_FOR_LP; | 264 | m3_state = M3_STATE_MSG_FOR_LP; |
247 | 265 | ||
248 | omap_mbox_enable_irq(m3_mbox, IRQ_RX); | 266 | omap_mbox_enable_irq(m3_mbox, IRQ_RX); |
249 | 267 | ||
250 | ret = omap_mbox_msg_send(m3_mbox, 0xABCDABCD); | 268 | ret = omap_mbox_msg_send(m3_mbox, 0xABCDABCD); |
251 | if (ret) { | 269 | if (ret) { |
252 | pr_err("A8<->CM3 MSG for LP failed\n"); | 270 | pr_err("A8<->CM3 MSG for LP failed\n"); |
253 | am33xx_m3_state_machine_reset(); | 271 | am33xx_m3_state_machine_reset(); |
254 | ret = -1; | 272 | ret = -1; |
255 | } | 273 | } |
256 | 274 | ||
257 | if (!wait_for_completion_timeout(&a8_m3_sync, msecs_to_jiffies(5000))) { | 275 | if (!wait_for_completion_timeout(&a8_m3_sync, msecs_to_jiffies(5000))) { |
258 | pr_err("A8<->CM3 sync failure\n"); | 276 | pr_err("A8<->CM3 sync failure\n"); |
259 | am33xx_m3_state_machine_reset(); | 277 | am33xx_m3_state_machine_reset(); |
260 | ret = -1; | 278 | ret = -1; |
261 | } else { | 279 | } else { |
262 | pr_debug("Message sent for entering %s\n", | 280 | pr_debug("Message sent for entering %s\n", |
263 | (DS_MODE == DS0_ID ? "DS0" : "DS1")); | 281 | (DS_MODE == DS0_ID ? "DS0" : "DS1")); |
264 | omap_mbox_disable_irq(m3_mbox, IRQ_RX); | 282 | omap_mbox_disable_irq(m3_mbox, IRQ_RX); |
265 | } | 283 | } |
266 | 284 | ||
267 | suspend_state = state; | 285 | suspend_state = state; |
268 | return ret; | 286 | return ret; |
269 | } | 287 | } |
270 | 288 | ||
271 | static void am33xx_m3_state_machine_reset(void) | 289 | static void am33xx_m3_state_machine_reset(void) |
272 | { | 290 | { |
273 | int ret = 0; | 291 | int ret = 0; |
274 | 292 | ||
275 | am33xx_lp_ipc.resume_addr = 0x0; | 293 | am33xx_lp_ipc.resume_addr = 0x0; |
276 | am33xx_lp_ipc.sleep_mode = 0xe; | 294 | am33xx_lp_ipc.sleep_mode = 0xe; |
277 | am33xx_lp_ipc.ipc_data1 = DS_IPC_DEFAULT; | 295 | am33xx_lp_ipc.ipc_data1 = DS_IPC_DEFAULT; |
278 | am33xx_lp_ipc.ipc_data2 = DS_IPC_DEFAULT; | 296 | am33xx_lp_ipc.ipc_data2 = DS_IPC_DEFAULT; |
279 | 297 | ||
280 | am33xx_ipc_cmd(&am33xx_lp_ipc); | 298 | am33xx_ipc_cmd(&am33xx_lp_ipc); |
281 | 299 | ||
282 | m3_state = M3_STATE_MSG_FOR_RESET; | 300 | m3_state = M3_STATE_MSG_FOR_RESET; |
283 | 301 | ||
284 | ret = omap_mbox_msg_send(m3_mbox, 0xABCDABCD); | 302 | ret = omap_mbox_msg_send(m3_mbox, 0xABCDABCD); |
285 | if (!ret) { | 303 | if (!ret) { |
286 | pr_debug("Message sent for resetting M3 state machine\n"); | 304 | pr_debug("Message sent for resetting M3 state machine\n"); |
287 | if (!wait_for_completion_timeout(&a8_m3_sync, msecs_to_jiffies(5000))) | 305 | if (!wait_for_completion_timeout(&a8_m3_sync, msecs_to_jiffies(5000))) |
288 | pr_err("A8<->CM3 sync failure\n"); | 306 | pr_err("A8<->CM3 sync failure\n"); |
289 | } else { | 307 | } else { |
290 | pr_err("Could not reset M3 state machine!!!\n"); | 308 | pr_err("Could not reset M3 state machine!!!\n"); |
291 | m3_state = M3_STATE_UNKNOWN; | 309 | m3_state = M3_STATE_UNKNOWN; |
292 | } | 310 | } |
293 | } | 311 | } |
294 | 312 | ||
295 | static void am33xx_pm_end(void) | 313 | static void am33xx_pm_end(void) |
296 | { | 314 | { |
297 | suspend_state = PM_SUSPEND_ON; | 315 | suspend_state = PM_SUSPEND_ON; |
298 | 316 | ||
299 | omap_mbox_enable_irq(m3_mbox, IRQ_RX); | 317 | omap_mbox_enable_irq(m3_mbox, IRQ_RX); |
300 | 318 | ||
301 | am33xx_m3_state_machine_reset(); | 319 | am33xx_m3_state_machine_reset(); |
302 | 320 | ||
303 | enable_hlt(); | 321 | enable_hlt(); |
304 | 322 | ||
305 | return; | 323 | return; |
306 | } | 324 | } |
307 | 325 | ||
308 | static int am33xx_pm_valid(suspend_state_t state) | 326 | static int am33xx_pm_valid(suspend_state_t state) |
309 | { | 327 | { |
310 | switch (state) { | 328 | switch (state) { |
311 | case PM_SUSPEND_STANDBY: | 329 | case PM_SUSPEND_STANDBY: |
312 | case PM_SUSPEND_MEM: | 330 | case PM_SUSPEND_MEM: |
313 | return 1; | 331 | return 1; |
314 | default: | 332 | default: |
315 | return 0; | 333 | return 0; |
316 | } | 334 | } |
317 | } | 335 | } |
318 | 336 | ||
319 | static const struct platform_suspend_ops am33xx_pm_ops = { | 337 | static const struct platform_suspend_ops am33xx_pm_ops = { |
320 | .begin = am33xx_pm_begin, | 338 | .begin = am33xx_pm_begin, |
321 | .end = am33xx_pm_end, | 339 | .end = am33xx_pm_end, |
322 | .enter = am33xx_pm_enter, | 340 | .enter = am33xx_pm_enter, |
323 | .valid = am33xx_pm_valid, | 341 | .valid = am33xx_pm_valid, |
324 | .prepare = am33xx_pm_prepare_late, | 342 | .prepare = am33xx_pm_prepare_late, |
325 | .finish = am33xx_pm_finish, | 343 | .finish = am33xx_pm_finish, |
326 | }; | 344 | }; |
327 | 345 | ||
328 | int am33xx_ipc_cmd(struct a8_wkup_m3_ipc_data *data) | 346 | int am33xx_ipc_cmd(struct a8_wkup_m3_ipc_data *data) |
329 | { | 347 | { |
330 | writel(data->resume_addr, ipc_regs); | 348 | writel(data->resume_addr, ipc_regs); |
331 | writel(data->sleep_mode, ipc_regs + 0x4); | 349 | writel(data->sleep_mode, ipc_regs + 0x4); |
332 | writel(data->ipc_data1, ipc_regs + 0x8); | 350 | writel(data->ipc_data1, ipc_regs + 0x8); |
333 | writel(data->ipc_data2, ipc_regs + 0xc); | 351 | writel(data->ipc_data2, ipc_regs + 0xc); |
334 | 352 | ||
335 | return 0; | 353 | return 0; |
336 | } | 354 | } |
337 | 355 | ||
338 | /* return 0 if no reset M3 needed, 1 otherwise */ | 356 | /* return 0 if no reset M3 needed, 1 otherwise */ |
339 | static int am33xx_verify_lp_state(int core_suspend_stat) | 357 | static int am33xx_verify_lp_state(int core_suspend_stat) |
340 | { | 358 | { |
341 | int status, ret = 0; | 359 | int status, ret = 0; |
342 | 360 | ||
343 | if (core_suspend_stat) { | 361 | if (core_suspend_stat) { |
344 | pr_err("Kernel core reported suspend failure\n"); | 362 | pr_err("Kernel core reported suspend failure\n"); |
345 | ret = -1; | 363 | ret = -1; |
346 | goto clear_old_status; | 364 | goto clear_old_status; |
347 | } | 365 | } |
348 | 366 | ||
349 | status = readl(ipc_regs + 0x4); | 367 | status = readl(ipc_regs + 0x4); |
350 | status &= 0xffff0000; | 368 | status &= 0xffff0000; |
351 | 369 | ||
352 | if (status == 0x0) { | 370 | if (status == 0x0) { |
353 | pr_info("Successfully transitioned all domains to low power state\n"); | 371 | pr_info("Successfully transitioned all domains to low power state\n"); |
354 | if (am33xx_lp_ipc.sleep_mode == DS0_ID) | 372 | if (am33xx_lp_ipc.sleep_mode == DS0_ID) |
355 | per_pwrdm->ret_logic_off_counter++; | 373 | per_pwrdm->ret_logic_off_counter++; |
356 | goto clear_old_status; | 374 | goto clear_old_status; |
357 | } else if (status == 0x10000) { | 375 | } else if (status == 0x10000) { |
358 | pr_err("Could not enter low power state\n" | 376 | pr_err("Could not enter low power state\n" |
359 | "Please check for active clocks in PER domain\n"); | 377 | "Please check for active clocks in PER domain\n"); |
360 | ret = -1; | 378 | ret = -1; |
361 | goto clear_old_status; | 379 | goto clear_old_status; |
362 | } else { | 380 | } else { |
363 | pr_err("Something is terribly wrong :(\nStatus = %0x\n", | 381 | pr_err("Something is terribly wrong :(\nStatus = %0x\n", |
364 | status); | 382 | status); |
365 | ret = -1; | 383 | ret = -1; |
366 | } | 384 | } |
367 | 385 | ||
368 | clear_old_status: | 386 | clear_old_status: |
369 | /* After decoding write back the bad status */ | 387 | /* After decoding write back the bad status */ |
370 | status = readl(ipc_regs + 0x4); | 388 | status = readl(ipc_regs + 0x4); |
371 | status &= 0xffff0000; | 389 | status &= 0xffff0000; |
372 | status |= 0x10000; | 390 | status |= 0x10000; |
373 | writel(status, ipc_regs + 0x4); | 391 | writel(status, ipc_regs + 0x4); |
374 | 392 | ||
375 | return ret; | 393 | return ret; |
376 | } | 394 | } |
377 | 395 | ||
378 | /* | 396 | /* |
379 | * Dummy notifier for the mailbox | 397 | * Dummy notifier for the mailbox |
380 | * TODO: Can this be completely removed? | 398 | * TODO: Can this be completely removed? |
381 | */ | 399 | */ |
382 | int wkup_m3_mbox_msg(struct notifier_block *self, unsigned long len, void *msg) | 400 | int wkup_m3_mbox_msg(struct notifier_block *self, unsigned long len, void *msg) |
383 | { | 401 | { |
384 | return 0; | 402 | return 0; |
385 | } | 403 | } |
386 | 404 | ||
387 | static struct notifier_block wkup_m3_mbox_notifier = { | 405 | static struct notifier_block wkup_m3_mbox_notifier = { |
388 | .notifier_call = wkup_m3_mbox_msg, | 406 | .notifier_call = wkup_m3_mbox_msg, |
389 | }; | 407 | }; |
390 | 408 | ||
391 | static irqreturn_t wkup_m3_txev_handler(int irq, void *unused) | 409 | static irqreturn_t wkup_m3_txev_handler(int irq, void *unused) |
392 | { | 410 | { |
393 | writel(0x1, m3_eoi); | 411 | writel(0x1, m3_eoi); |
394 | 412 | ||
395 | if (m3_state == M3_STATE_RESET) { | 413 | if (m3_state == M3_STATE_RESET) { |
396 | m3_state = M3_STATE_INITED; | 414 | m3_state = M3_STATE_INITED; |
397 | m3_version = readl(ipc_regs + 0x8); | 415 | m3_version = readl(ipc_regs + 0x8); |
398 | m3_version &= 0x0000ffff; | 416 | m3_version &= 0x0000ffff; |
399 | if (m3_version == M3_VERSION_UNKNOWN) { | 417 | if (m3_version == M3_VERSION_UNKNOWN) { |
400 | pr_warning("Unable to read CM3 firmware version\n"); | 418 | pr_warning("Unable to read CM3 firmware version\n"); |
401 | } else { | 419 | } else { |
402 | pr_info("Cortex M3 Firmware Version = 0x%x\n", | 420 | pr_info("Cortex M3 Firmware Version = 0x%x\n", |
403 | m3_version); | 421 | m3_version); |
404 | } | 422 | } |
405 | } else if (m3_state == M3_STATE_MSG_FOR_RESET) { | 423 | } else if (m3_state == M3_STATE_MSG_FOR_RESET) { |
406 | m3_state = M3_STATE_INITED; | 424 | m3_state = M3_STATE_INITED; |
407 | omap_mbox_msg_rx_flush(m3_mbox); | 425 | omap_mbox_msg_rx_flush(m3_mbox); |
408 | if (m3_mbox->ops->ack_irq) | 426 | if (m3_mbox->ops->ack_irq) |
409 | m3_mbox->ops->ack_irq(m3_mbox, IRQ_RX); | 427 | m3_mbox->ops->ack_irq(m3_mbox, IRQ_RX); |
410 | complete(&a8_m3_sync); | 428 | complete(&a8_m3_sync); |
411 | } else if (m3_state == M3_STATE_MSG_FOR_LP) { | 429 | } else if (m3_state == M3_STATE_MSG_FOR_LP) { |
412 | omap_mbox_msg_rx_flush(m3_mbox); | 430 | omap_mbox_msg_rx_flush(m3_mbox); |
413 | if (m3_mbox->ops->ack_irq) | 431 | if (m3_mbox->ops->ack_irq) |
414 | m3_mbox->ops->ack_irq(m3_mbox, IRQ_RX); | 432 | m3_mbox->ops->ack_irq(m3_mbox, IRQ_RX); |
415 | complete(&a8_m3_sync); | 433 | complete(&a8_m3_sync); |
416 | } else if (m3_state == M3_STATE_UNKNOWN) { | 434 | } else if (m3_state == M3_STATE_UNKNOWN) { |
417 | pr_err("IRQ %d with CM3 in unknown state\n", irq); | 435 | pr_err("IRQ %d with CM3 in unknown state\n", irq); |
418 | omap_mbox_msg_rx_flush(m3_mbox); | 436 | omap_mbox_msg_rx_flush(m3_mbox); |
419 | if (m3_mbox->ops->ack_irq) | 437 | if (m3_mbox->ops->ack_irq) |
420 | m3_mbox->ops->ack_irq(m3_mbox, IRQ_RX); | 438 | m3_mbox->ops->ack_irq(m3_mbox, IRQ_RX); |
421 | return IRQ_NONE; | 439 | return IRQ_NONE; |
422 | } | 440 | } |
423 | 441 | ||
424 | writel(0x0, m3_eoi); | 442 | writel(0x0, m3_eoi); |
425 | 443 | ||
426 | return IRQ_HANDLED; | 444 | return IRQ_HANDLED; |
427 | } | 445 | } |
428 | 446 | ||
429 | /* Initiliaze WKUP_M3, load the binary blob and let it run */ | 447 | /* Initiliaze WKUP_M3, load the binary blob and let it run */ |
430 | static int wkup_m3_init(void) | 448 | static int wkup_m3_init(void) |
431 | { | 449 | { |
432 | struct clk *m3_clk; | 450 | struct clk *m3_clk; |
433 | struct omap_hwmod *wkup_m3_oh; | 451 | struct omap_hwmod *wkup_m3_oh; |
434 | const struct firmware *firmware; | 452 | const struct firmware *firmware; |
435 | int ret = 0; | 453 | int ret = 0; |
436 | int ipc_reg_r = 0; | 454 | int ipc_reg_r = 0; |
437 | 455 | ||
438 | wkup_m3_oh = omap_hwmod_lookup("wkup_m3"); | 456 | wkup_m3_oh = omap_hwmod_lookup("wkup_m3"); |
439 | 457 | ||
440 | if (!wkup_m3_oh) { | 458 | if (!wkup_m3_oh) { |
441 | pr_err("%s: could not find omap_hwmod\n", __func__); | 459 | pr_err("%s: could not find omap_hwmod\n", __func__); |
442 | ret = -ENODEV; | 460 | ret = -ENODEV; |
443 | goto exit; | 461 | goto exit; |
444 | } | 462 | } |
445 | 463 | ||
446 | ipc_regs = ioremap(A8_M3_IPC_REGS, 0x4*8); | 464 | ipc_regs = ioremap(A8_M3_IPC_REGS, 0x4*8); |
447 | if (!ipc_regs) { | 465 | if (!ipc_regs) { |
448 | pr_err("Could not ioremap the IPC area\b"); | 466 | pr_err("Could not ioremap the IPC area\b"); |
449 | ret = -ENOMEM; | 467 | ret = -ENOMEM; |
450 | goto exit; | 468 | goto exit; |
451 | } | 469 | } |
452 | 470 | ||
453 | m3_eoi = ioremap(M3_TXEV_EOI, 0x4); | 471 | m3_eoi = ioremap(M3_TXEV_EOI, 0x4); |
454 | if (!m3_eoi) { | 472 | if (!m3_eoi) { |
455 | pr_err("Could not ioremap the EOI register\n"); | 473 | pr_err("Could not ioremap the EOI register\n"); |
456 | ret = -ENOMEM; | 474 | ret = -ENOMEM; |
457 | goto err1; | 475 | goto err1; |
458 | } | 476 | } |
459 | 477 | ||
460 | /* Reserve the MBOX for sending messages to M3 */ | 478 | /* Reserve the MBOX for sending messages to M3 */ |
461 | m3_mbox = omap_mbox_get("wkup_m3", &wkup_m3_mbox_notifier); | 479 | m3_mbox = omap_mbox_get("wkup_m3", &wkup_m3_mbox_notifier); |
462 | if (IS_ERR(m3_mbox)) { | 480 | if (IS_ERR(m3_mbox)) { |
463 | pr_err("Could not reserve mailbox for A8->M3 IPC\n"); | 481 | pr_err("Could not reserve mailbox for A8->M3 IPC\n"); |
464 | ret = -ENODEV; | 482 | ret = -ENODEV; |
465 | goto err2; | 483 | goto err2; |
466 | } | 484 | } |
467 | 485 | ||
468 | /* Enable access to the M3 code and data area from A8 */ | 486 | /* Enable access to the M3 code and data area from A8 */ |
469 | m3_clk = clk_get(NULL, "wkup_m3_fck"); | 487 | m3_clk = clk_get(NULL, "wkup_m3_fck"); |
470 | if (IS_ERR(m3_clk)) { | 488 | if (IS_ERR(m3_clk)) { |
471 | pr_err("%s failed to enable WKUP_M3 clock\n", __func__); | 489 | pr_err("%s failed to enable WKUP_M3 clock\n", __func__); |
472 | goto err3; | 490 | goto err3; |
473 | } | 491 | } |
474 | 492 | ||
475 | if (clk_enable(m3_clk)) { | 493 | if (clk_enable(m3_clk)) { |
476 | pr_err("%s WKUP_M3: clock enable Failed\n", __func__); | 494 | pr_err("%s WKUP_M3: clock enable Failed\n", __func__); |
477 | goto err4; | 495 | goto err4; |
478 | } | 496 | } |
479 | 497 | ||
480 | m3_code = ioremap(M3_UMEM, SZ_16K); | 498 | m3_code = ioremap(M3_UMEM, SZ_16K); |
481 | if (!m3_code) { | 499 | if (!m3_code) { |
482 | pr_err("%s Could not ioremap M3 code space\n", __func__); | 500 | pr_err("%s Could not ioremap M3 code space\n", __func__); |
483 | ret = -ENOMEM; | 501 | ret = -ENOMEM; |
484 | goto err5; | 502 | goto err5; |
485 | } | 503 | } |
486 | 504 | ||
487 | pr_info("Trying to load am335x-pm-firmware.bin (60 secs timeout)\n"); | 505 | pr_info("Trying to load am335x-pm-firmware.bin (60 secs timeout)\n"); |
488 | 506 | ||
489 | ret = request_firmware(&firmware, "am335x-pm-firmware.bin", mpu_dev); | 507 | ret = request_firmware(&firmware, "am335x-pm-firmware.bin", mpu_dev); |
490 | if (ret < 0) { | 508 | if (ret < 0) { |
491 | dev_err(mpu_dev, "request_firmware failed\n"); | 509 | dev_err(mpu_dev, "request_firmware failed\n"); |
492 | goto err6; | 510 | goto err6; |
493 | } else { | 511 | } else { |
494 | memcpy(m3_code, firmware->data, firmware->size); | 512 | memcpy(m3_code, firmware->data, firmware->size); |
495 | pr_info("Copied the M3 firmware to UMEM\n"); | 513 | pr_info("Copied the M3 firmware to UMEM\n"); |
496 | } | 514 | } |
497 | 515 | ||
498 | ret = request_irq(AM33XX_IRQ_M3_M3SP_TXEV, wkup_m3_txev_handler, | 516 | ret = request_irq(AM33XX_IRQ_M3_M3SP_TXEV, wkup_m3_txev_handler, |
499 | IRQF_DISABLED, "wkup_m3_txev", NULL); | 517 | IRQF_DISABLED, "wkup_m3_txev", NULL); |
500 | if (ret) { | 518 | if (ret) { |
501 | pr_err("%s request_irq failed for 0x%x\n", __func__, | 519 | pr_err("%s request_irq failed for 0x%x\n", __func__, |
502 | AM33XX_IRQ_M3_M3SP_TXEV); | 520 | AM33XX_IRQ_M3_M3SP_TXEV); |
503 | goto err6; | 521 | goto err6; |
504 | } | 522 | } |
505 | 523 | ||
506 | m3_state = M3_STATE_RESET; | 524 | m3_state = M3_STATE_RESET; |
507 | 525 | ||
508 | /* | 526 | /* |
509 | * Invalidate M3 firmware version before hardreset. | 527 | * Invalidate M3 firmware version before hardreset. |
510 | * Write invalid version in lower 4 nibbles of parameter | 528 | * Write invalid version in lower 4 nibbles of parameter |
511 | * register (ipc_regs + 0x8). | 529 | * register (ipc_regs + 0x8). |
512 | */ | 530 | */ |
513 | ipc_reg_r = readl(ipc_regs + 0x8); | 531 | ipc_reg_r = readl(ipc_regs + 0x8); |
514 | ipc_reg_r &= 0xffff0000; | 532 | ipc_reg_r &= 0xffff0000; |
515 | m3_version |= ipc_reg_r; | 533 | m3_version |= ipc_reg_r; |
516 | writel(m3_version, ipc_regs + 0x8); | 534 | writel(m3_version, ipc_regs + 0x8); |
517 | 535 | ||
518 | ret = omap_hwmod_deassert_hardreset(wkup_m3_oh, "wkup_m3"); | 536 | ret = omap_hwmod_deassert_hardreset(wkup_m3_oh, "wkup_m3"); |
519 | if (ret) { | 537 | if (ret) { |
520 | pr_err("Could not deassert the reset for WKUP_M3\n"); | 538 | pr_err("Could not deassert the reset for WKUP_M3\n"); |
521 | goto err6; | 539 | goto err6; |
522 | } else { | 540 | } else { |
523 | return 0; | 541 | return 0; |
524 | } | 542 | } |
525 | 543 | ||
526 | err6: | 544 | err6: |
527 | release_firmware(firmware); | 545 | release_firmware(firmware); |
528 | iounmap(m3_code); | 546 | iounmap(m3_code); |
529 | err5: | 547 | err5: |
530 | clk_disable(m3_clk); | 548 | clk_disable(m3_clk); |
531 | err4: | 549 | err4: |
532 | clk_put(m3_clk); | 550 | clk_put(m3_clk); |
533 | err3: | 551 | err3: |
534 | omap_mbox_put(m3_mbox, &wkup_m3_mbox_notifier); | 552 | omap_mbox_put(m3_mbox, &wkup_m3_mbox_notifier); |
535 | err2: | 553 | err2: |
536 | iounmap(m3_eoi); | 554 | iounmap(m3_eoi); |
537 | err1: | 555 | err1: |
538 | iounmap(ipc_regs); | 556 | iounmap(ipc_regs); |
539 | exit: | 557 | exit: |
540 | return ret; | 558 | return ret; |
541 | } | 559 | } |
542 | 560 | ||
543 | /* | 561 | /* |
544 | * Initiate sleep transition for other clockdomains, if | 562 | * Initiate sleep transition for other clockdomains, if |
545 | * they are not used | 563 | * they are not used |
546 | */ | 564 | */ |
547 | static int __init clkdms_setup(struct clockdomain *clkdm, void *unused) | 565 | static int __init clkdms_setup(struct clockdomain *clkdm, void *unused) |
548 | { | 566 | { |
549 | if (clkdm->flags & CLKDM_CAN_FORCE_SLEEP && | 567 | if (clkdm->flags & CLKDM_CAN_FORCE_SLEEP && |
550 | atomic_read(&clkdm->usecount) == 0) | 568 | atomic_read(&clkdm->usecount) == 0) |
551 | clkdm_sleep(clkdm); | 569 | clkdm_sleep(clkdm); |
552 | return 0; | 570 | return 0; |
553 | } | 571 | } |
554 | #endif /* CONFIG_SUSPEND */ | 572 | #endif /* CONFIG_SUSPEND */ |
555 | 573 | ||
556 | /* | 574 | /* |
557 | * Push the minimal suspend-resume code to SRAM | 575 | * Push the minimal suspend-resume code to SRAM |
558 | */ | 576 | */ |
559 | void am33xx_push_sram_idle(void) | 577 | void am33xx_push_sram_idle(void) |
560 | { | 578 | { |
561 | am33xx_do_wfi_sram = (void *)omap_sram_push | 579 | am33xx_do_wfi_sram = (void *)omap_sram_push |
562 | (am33xx_do_wfi, am33xx_do_wfi_sz); | 580 | (am33xx_do_wfi, am33xx_do_wfi_sz); |
563 | } | 581 | } |
564 | 582 | ||
565 | static int __init am33xx_pm_init(void) | 583 | static int __init am33xx_pm_init(void) |
566 | { | 584 | { |
567 | int ret; | 585 | int ret; |
568 | #ifdef CONFIG_SUSPEND | 586 | #ifdef CONFIG_SUSPEND |
569 | void __iomem *base; | 587 | void __iomem *base; |
570 | u32 reg; | 588 | u32 reg; |
571 | u32 evm_id; | 589 | u32 evm_id; |
572 | 590 | ||
573 | #endif | 591 | #endif |
574 | if (!cpu_is_am33xx()) | 592 | if (!cpu_is_am33xx()) |
575 | return -ENODEV; | 593 | return -ENODEV; |
576 | 594 | ||
577 | pr_info("Power Management for AM33XX family\n"); | 595 | pr_info("Power Management for AM33XX family\n"); |
578 | 596 | ||
579 | #ifdef CONFIG_SUSPEND | 597 | #ifdef CONFIG_SUSPEND |
580 | /* Read SDRAM_CONFIG register to determine Memory Type */ | 598 | /* Read SDRAM_CONFIG register to determine Memory Type */ |
581 | base = am33xx_get_ram_base(); | 599 | base = am33xx_get_ram_base(); |
582 | reg = readl(base + EMIF4_0_SDRAM_CONFIG); | 600 | reg = readl(base + EMIF4_0_SDRAM_CONFIG); |
583 | reg = (reg & SDRAM_TYPE_MASK) >> SDRAM_TYPE_SHIFT; | 601 | reg = (reg & SDRAM_TYPE_MASK) >> SDRAM_TYPE_SHIFT; |
584 | suspend_cfg_param_list[MEMORY_TYPE] = reg; | 602 | suspend_cfg_param_list[MEMORY_TYPE] = reg; |
585 | 603 | ||
586 | /* | 604 | /* |
587 | * vtp_ctrl register value for DDR2 and DDR3 as suggested | 605 | * vtp_ctrl register value for DDR2 and DDR3 as suggested |
588 | * by h/w team | 606 | * by h/w team |
589 | */ | 607 | */ |
590 | if (reg == MEM_TYPE_DDR2) | 608 | if (reg == MEM_TYPE_DDR2) |
591 | suspend_cfg_param_list[SUSP_VTP_CTRL_VAL] = SUSP_VTP_CTRL_DDR2; | 609 | suspend_cfg_param_list[SUSP_VTP_CTRL_VAL] = SUSP_VTP_CTRL_DDR2; |
592 | else | 610 | else |
593 | suspend_cfg_param_list[SUSP_VTP_CTRL_VAL] = SUSP_VTP_CTRL_DDR3; | 611 | suspend_cfg_param_list[SUSP_VTP_CTRL_VAL] = SUSP_VTP_CTRL_DDR3; |
594 | 612 | ||
595 | 613 | ||
596 | /* Get Board Id */ | 614 | /* Get Board Id */ |
597 | evm_id = am335x_evm_get_id(); | 615 | evm_id = am335x_evm_get_id(); |
598 | if (evm_id != -EINVAL) | 616 | if (evm_id != -EINVAL) |
599 | suspend_cfg_param_list[EVM_ID] = evm_id; | 617 | suspend_cfg_param_list[EVM_ID] = evm_id; |
600 | else | 618 | else |
601 | suspend_cfg_param_list[EVM_ID] = 0xff; | 619 | suspend_cfg_param_list[EVM_ID] = 0xff; |
602 | 620 | ||
603 | /* CPU Revision */ | 621 | /* CPU Revision */ |
604 | reg = omap_rev(); | 622 | reg = omap_rev(); |
605 | if (reg == AM335X_REV_ES2_0) | 623 | if (reg == AM335X_REV_ES2_0) |
606 | suspend_cfg_param_list[CPU_REV] = CPU_REV_2; | 624 | suspend_cfg_param_list[CPU_REV] = CPU_REV_2; |
607 | else | 625 | else |
608 | suspend_cfg_param_list[CPU_REV] = CPU_REV_1; | 626 | suspend_cfg_param_list[CPU_REV] = CPU_REV_1; |
609 | 627 | ||
610 | (void) clkdm_for_each(clkdms_setup, NULL); | 628 | (void) clkdm_for_each(clkdms_setup, NULL); |
611 | 629 | ||
612 | /* CEFUSE domain should be turned off post bootup */ | 630 | /* CEFUSE domain should be turned off post bootup */ |
613 | cefuse_pwrdm = pwrdm_lookup("cefuse_pwrdm"); | 631 | cefuse_pwrdm = pwrdm_lookup("cefuse_pwrdm"); |
614 | if (cefuse_pwrdm == NULL) | 632 | if (cefuse_pwrdm == NULL) |
615 | pr_err("Failed to get cefuse_pwrdm\n"); | 633 | pr_err("Failed to get cefuse_pwrdm\n"); |
616 | else | 634 | else |
617 | pwrdm_set_next_pwrst(cefuse_pwrdm, PWRDM_POWER_OFF); | 635 | pwrdm_set_next_pwrst(cefuse_pwrdm, PWRDM_POWER_OFF); |
618 | 636 | ||
619 | gfx_pwrdm = pwrdm_lookup("gfx_pwrdm"); | 637 | gfx_pwrdm = pwrdm_lookup("gfx_pwrdm"); |
620 | if (gfx_pwrdm == NULL) | 638 | if (gfx_pwrdm == NULL) |
621 | pr_err("Failed to get gfx_pwrdm\n"); | 639 | pr_err("Failed to get gfx_pwrdm\n"); |
622 | 640 | ||
623 | per_pwrdm = pwrdm_lookup("per_pwrdm"); | 641 | per_pwrdm = pwrdm_lookup("per_pwrdm"); |
624 | if (per_pwrdm == NULL) | 642 | if (per_pwrdm == NULL) |
625 | pr_err("Failed to get per_pwrdm\n"); | 643 | pr_err("Failed to get per_pwrdm\n"); |
626 | 644 | ||
627 | gfx_l3_clkdm = clkdm_lookup("gfx_l3_clkdm"); | 645 | gfx_l3_clkdm = clkdm_lookup("gfx_l3_clkdm"); |
628 | if (gfx_l3_clkdm == NULL) | 646 | if (gfx_l3_clkdm == NULL) |
629 | pr_err("Failed to get gfx_l3_clkdm\n"); | 647 | pr_err("Failed to get gfx_l3_clkdm\n"); |
630 | 648 | ||
631 | gfx_l4ls_clkdm = clkdm_lookup("gfx_l4ls_gfx_clkdm"); | 649 | gfx_l4ls_clkdm = clkdm_lookup("gfx_l4ls_gfx_clkdm"); |
632 | if (gfx_l4ls_clkdm == NULL) | 650 | if (gfx_l4ls_clkdm == NULL) |
633 | pr_err("Failed to get gfx_l4ls_gfx_clkdm\n"); | 651 | pr_err("Failed to get gfx_l4ls_gfx_clkdm\n"); |
634 | 652 | ||
635 | mpu_dev = omap_device_get_by_hwmod_name("mpu"); | 653 | mpu_dev = omap_device_get_by_hwmod_name("mpu"); |
636 | 654 | ||
637 | if (!mpu_dev) { | 655 | if (!mpu_dev) { |
638 | pr_warning("%s: unable to get the mpu device\n", __func__); | 656 | pr_warning("%s: unable to get the mpu device\n", __func__); |
639 | return -EINVAL; | 657 | return -EINVAL; |
640 | } | 658 | } |
641 | 659 | ||
642 | ret = wkup_m3_init(); | 660 | ret = wkup_m3_init(); |
643 | 661 | ||
644 | if (ret) { | 662 | if (ret) { |
645 | pr_err("Could not initialise WKUP_M3. " | 663 | pr_err("Could not initialise WKUP_M3. " |
646 | "Power management will be compromised\n"); | 664 | "Power management will be compromised\n"); |
647 | enable_deep_sleep = false; | 665 | enable_deep_sleep = false; |
648 | } | 666 | } |
649 | 667 | ||
650 | if (enable_deep_sleep) | 668 | if (enable_deep_sleep) |
651 | suspend_set_ops(&am33xx_pm_ops); | 669 | suspend_set_ops(&am33xx_pm_ops); |
652 | #endif /* CONFIG_SUSPEND */ | 670 | #endif /* CONFIG_SUSPEND */ |
653 | 671 | ||
654 | return ret; | 672 | return ret; |
655 | } | 673 | } |
656 | late_initcall(am33xx_pm_init); | 674 | late_initcall(am33xx_pm_init); |
657 | 675 |
arch/arm/mach-omap2/pm33xx.h
1 | /* | 1 | /* |
2 | * AM33XX Power Management Routines | 2 | * AM33XX Power Management Routines |
3 | * | 3 | * |
4 | * Copyright (C) 2012 Texas Instruments Inc. | 4 | * Copyright (C) 2012 Texas Instruments Inc. |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify | 6 | * This program is free software; you can redistribute it and/or modify |
7 | * it under the terms of the GNU General Public License version 2 as | 7 | * it under the terms of the GNU General Public License version 2 as |
8 | * published by the Free Software Foundation. | 8 | * published by the Free Software Foundation. |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #ifndef __ARCH_ARM_MACH_OMAP2_PM33XX_H | 11 | #ifndef __ARCH_ARM_MACH_OMAP2_PM33XX_H |
12 | #define __ARCH_ARM_MACH_OMAP2_PM33XX_H | 12 | #define __ARCH_ARM_MACH_OMAP2_PM33XX_H |
13 | 13 | ||
14 | #include <mach/hardware.h> /* XXX Is this the right one to include? */ | 14 | #include <mach/hardware.h> /* XXX Is this the right one to include? */ |
15 | #include "control.h" | 15 | #include "control.h" |
16 | #include "mux33xx.h" | 16 | #include "mux33xx.h" |
17 | 17 | ||
18 | #ifndef __ASSEMBLER__ | 18 | #ifndef __ASSEMBLER__ |
19 | extern void __iomem *am33xx_get_ram_base(void); | 19 | extern void __iomem *am33xx_get_ram_base(void); |
20 | 20 | ||
21 | /* | 21 | /* |
22 | * This enum is used to index the array passed to suspend routine with | 22 | * This enum is used to index the array passed to suspend routine with |
23 | * parameters that vary across DDR2 and DDR3 sleep sequence. | 23 | * parameters that vary across DDR2 and DDR3 sleep sequence. |
24 | * | 24 | * |
25 | * Since these are used to load into registers by suspend code, | 25 | * Since these are used to load into registers by suspend code, |
26 | * entries here must always be in sync with the suspend code | 26 | * entries here must always be in sync with the suspend code |
27 | * in arm/mach-omap2/sleep33xx.S | 27 | * in arm/mach-omap2/sleep33xx.S |
28 | */ | 28 | */ |
29 | enum suspend_cfg_params { | 29 | enum suspend_cfg_params { |
30 | MEMORY_TYPE = 0, | 30 | MEMORY_TYPE = 0, |
31 | SUSP_VTP_CTRL_VAL, | 31 | SUSP_VTP_CTRL_VAL, |
32 | EVM_ID, | 32 | EVM_ID, |
33 | CPU_REV, | 33 | CPU_REV, |
34 | SUSPEND_STATE, | ||
34 | SUSPEND_CFG_PARAMS_END /* Must be the last entry */ | 35 | SUSPEND_CFG_PARAMS_END /* Must be the last entry */ |
35 | }; | 36 | }; |
36 | 37 | ||
37 | struct a8_wkup_m3_ipc_data { | 38 | struct a8_wkup_m3_ipc_data { |
38 | int resume_addr; | 39 | int resume_addr; |
39 | int sleep_mode; | 40 | int sleep_mode; |
40 | int ipc_data1; | 41 | int ipc_data1; |
41 | int ipc_data2; | 42 | int ipc_data2; |
42 | } am33xx_lp_ipc; | 43 | } am33xx_lp_ipc; |
43 | 44 | ||
44 | #endif /* ASSEMBLER */ | 45 | #endif /* ASSEMBLER */ |
45 | 46 | ||
46 | #define M3_TXEV_EOI (AM33XX_CTRL_BASE + 0x1324) | 47 | #define M3_TXEV_EOI (AM33XX_CTRL_BASE + 0x1324) |
47 | #define A8_M3_IPC_REGS (AM33XX_CTRL_BASE + 0x1328) | 48 | #define A8_M3_IPC_REGS (AM33XX_CTRL_BASE + 0x1328) |
48 | #define DS_RESUME_BASE 0x40300000 | 49 | #define DS_RESUME_BASE 0x40300000 |
49 | #define DS_IPC_DEFAULT 0xffffffff | 50 | #define DS_IPC_DEFAULT 0xffffffff |
50 | #define M3_UMEM 0x44D00000 | 51 | #define M3_UMEM 0x44D00000 |
51 | 52 | ||
52 | #define DS0_ID 0x3 | 53 | #define DS0_ID 0x3 |
53 | #define DS1_ID 0x5 | 54 | #define DS1_ID 0x5 |
54 | 55 | ||
55 | #define M3_STATE_UNKNOWN -1 | 56 | #define M3_STATE_UNKNOWN -1 |
56 | #define M3_STATE_RESET 0 | 57 | #define M3_STATE_RESET 0 |
57 | #define M3_STATE_INITED 1 | 58 | #define M3_STATE_INITED 1 |
58 | #define M3_STATE_MSG_FOR_LP 2 | 59 | #define M3_STATE_MSG_FOR_LP 2 |
59 | #define M3_STATE_MSG_FOR_RESET 3 | 60 | #define M3_STATE_MSG_FOR_RESET 3 |
60 | 61 | ||
61 | #define VTP_CTRL_READY (0x1 << 5) | 62 | #define VTP_CTRL_READY (0x1 << 5) |
62 | #define VTP_CTRL_ENABLE (0x1 << 6) | 63 | #define VTP_CTRL_ENABLE (0x1 << 6) |
63 | #define VTP_CTRL_LOCK_EN (0x1 << 4) | 64 | #define VTP_CTRL_LOCK_EN (0x1 << 4) |
64 | #define VTP_CTRL_START_EN (0x1) | 65 | #define VTP_CTRL_START_EN (0x1) |
65 | 66 | ||
66 | #define DDR_IO_CTRL (AM33XX_CTRL_BASE + 0x0E04) | 67 | #define DDR_IO_CTRL (AM33XX_CTRL_BASE + 0x0E04) |
67 | #define VTP0_CTRL_REG (AM33XX_CTRL_BASE + 0x0E0C) | 68 | #define VTP0_CTRL_REG (AM33XX_CTRL_BASE + 0x0E0C) |
68 | #define DDR_CMD0_IOCTRL (AM33XX_CTRL_BASE + 0x1404) | 69 | #define DDR_CMD0_IOCTRL (AM33XX_CTRL_BASE + 0x1404) |
69 | #define DDR_CMD1_IOCTRL (AM33XX_CTRL_BASE + 0x1408) | 70 | #define DDR_CMD1_IOCTRL (AM33XX_CTRL_BASE + 0x1408) |
70 | #define DDR_CMD2_IOCTRL (AM33XX_CTRL_BASE + 0x140C) | 71 | #define DDR_CMD2_IOCTRL (AM33XX_CTRL_BASE + 0x140C) |
71 | #define DDR_DATA0_IOCTRL (AM33XX_CTRL_BASE + 0x1440) | 72 | #define DDR_DATA0_IOCTRL (AM33XX_CTRL_BASE + 0x1440) |
72 | #define DDR_DATA1_IOCTRL (AM33XX_CTRL_BASE + 0x1444) | 73 | #define DDR_DATA1_IOCTRL (AM33XX_CTRL_BASE + 0x1444) |
73 | 74 | ||
74 | #define MEM_TYPE_DDR2 2 | 75 | #define MEM_TYPE_DDR2 2 |
75 | 76 | ||
76 | #define SUSP_VTP_CTRL_DDR2 0x10117 | 77 | #define SUSP_VTP_CTRL_DDR2 0x10117 |
77 | #define SUSP_VTP_CTRL_DDR3 0x0 | 78 | #define SUSP_VTP_CTRL_DDR3 0x0 |
78 | 79 | ||
79 | #define CPU_REV_1 1 | 80 | #define CPU_REV_1 1 |
80 | #define CPU_REV_2 2 | 81 | #define CPU_REV_2 2 |
81 | 82 | ||
82 | #define M3_VERSION_UNKNOWN 0x0000ffff | 83 | #define M3_VERSION_UNKNOWN 0x0000ffff |
84 | |||
85 | #define PM_DS0 0 | ||
86 | #define PM_STANDBY 1 | ||
83 | 87 | ||
84 | #endif | 88 | #endif |
85 | 89 |
arch/arm/mach-omap2/sleep33xx.S
1 | 1 | ||
2 | /* | 2 | /* |
3 | * Low level suspend code for AM33XX SoCs | 3 | * Low level suspend code for AM33XX SoCs |
4 | * | 4 | * |
5 | * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/ | 5 | * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/ |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or | 7 | * This program is free software; you can redistribute it and/or |
8 | * modify it under the terms of the GNU General Public License as | 8 | * modify it under the terms of the GNU General Public License as |
9 | * published by the Free Software Foundation version 2. | 9 | * published by the Free Software Foundation version 2. |
10 | * | 10 | * |
11 | * This program is distributed "as is" WITHOUT ANY WARRANTY of any | 11 | * This program is distributed "as is" WITHOUT ANY WARRANTY of any |
12 | * kind, whether express or implied; without even the implied warranty | 12 | * kind, whether express or implied; without even the implied warranty |
13 | * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 13 | * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
14 | * GNU General Public License for more details. | 14 | * GNU General Public License for more details. |
15 | */ | 15 | */ |
16 | 16 | ||
17 | #include <linux/linkage.h> | 17 | #include <linux/linkage.h> |
18 | #include <linux/init.h> | 18 | #include <linux/init.h> |
19 | #include <asm/memory.h> | 19 | #include <asm/memory.h> |
20 | #include <asm/assembler.h> | 20 | #include <asm/assembler.h> |
21 | #include <mach/io.h> | 21 | #include <mach/io.h> |
22 | #include <mach/board-am335xevm.h> | 22 | #include <mach/board-am335xevm.h> |
23 | 23 | ||
24 | #include <plat/emif.h> | 24 | #include <plat/emif.h> |
25 | #include <plat/sram.h> | 25 | #include <plat/sram.h> |
26 | #include <plat/gpio.h> | 26 | #include <plat/gpio.h> |
27 | 27 | ||
28 | #include "cm33xx.h" | 28 | #include "cm33xx.h" |
29 | #include "pm33xx.h" | 29 | #include "pm33xx.h" |
30 | #include "prm33xx.h" | 30 | #include "prm33xx.h" |
31 | #include "control.h" | 31 | #include "control.h" |
32 | 32 | ||
33 | /* | 33 | /* |
34 | * We should probably pass in the virtual address of PRCM, Control and EMIF | 34 | * We should probably pass in the virtual address of PRCM, Control and EMIF |
35 | * along with the physical addresses, load it into the registers | 35 | * along with the physical addresses, load it into the registers |
36 | * and then continue. | 36 | * and then continue. |
37 | * | 37 | * |
38 | * This routine is executed from internal RAM and expects the memory type and | 38 | * This routine is executed from internal RAM and expects the memory type and |
39 | * other DDR configuration values which are different across different memory | 39 | * other DDR configuration values which are different across different memory |
40 | * types to be passed in r0 _strictly_ in following order: | 40 | * types to be passed in r0 _strictly_ in following order: |
41 | * 1) memory_type [r0] | 41 | * 1) memory_type [r0] |
42 | * 2) susp_vtp_ctrl_val [r1] | 42 | * 2) susp_vtp_ctrl_val [r1] |
43 | * 3) evm_id [r2] | 43 | * 3) evm_id [r2] |
44 | * 4) cpu_rev [r3] | 44 | * 4) cpu_rev [r3] |
45 | * 5) suspend_state[r4] | ||
45 | * | 46 | * |
46 | * The code loads these values taking r0 value as reference to the array in | 47 | * The code loads these values taking r0 value as reference to the array in |
47 | * registers starting from r0, i,e memory_type goes to r0 and susp_vtp_ctrl_val | 48 | * registers starting from r0, i,e memory_type goes to r0 and susp_vtp_ctrl_val |
48 | * goes to r1 and so on. These are then saved into memory locations before | 49 | * goes to r1 and so on. These are then saved into memory locations before |
49 | * proceeding with the sleep sequence and hence registers r0,r1 can still be | 50 | * proceeding with the sleep sequence and hence registers r0,r1 can still be |
50 | * used in the rest of the sleep code. | 51 | * used in the rest of the sleep code. |
51 | * | 52 | * |
52 | * mem_type is used to decide different suspend-resume sequences for DDR2 | 53 | * mem_type is used to decide different suspend-resume sequences for DDR2 |
53 | * and DDR3. | 54 | * and DDR3. |
54 | * cpu_rev is used to have/skip certain steps selectively for PG2.0 suspend-resume | 55 | * cpu_rev is used to have/skip certain steps selectively for PG2.0 suspend-resume |
55 | * sequence like switching to LVCMOS mode is not required for PG2.0. | 56 | * sequence like switching to LVCMOS mode is not required for PG2.0. |
56 | */ | 57 | */ |
57 | .align 3 | 58 | .align 3 |
58 | ENTRY(am33xx_do_wfi) | 59 | ENTRY(am33xx_do_wfi) |
59 | stmfd sp!, {r4 - r11, lr} @ save registers on stack | 60 | stmfd sp!, {r4 - r11, lr} @ save registers on stack |
60 | 61 | ||
61 | .macro pll_bypass, name, clk_mode_addr, idlest_addr, pll_mode | 62 | .macro pll_bypass, name, clk_mode_addr, idlest_addr, pll_mode |
62 | pll_bypass_\name: | 63 | pll_bypass_\name: |
63 | ldr r0, \clk_mode_addr | 64 | ldr r0, \clk_mode_addr |
64 | ldr r1, [r0] | 65 | ldr r1, [r0] |
65 | str r1, clk_mode_\pll_mode | 66 | str r1, clk_mode_\pll_mode |
66 | bic r1, r1, #(7 << 0) | 67 | bic r1, r1, #(7 << 0) |
67 | orr r1, r1, #0x5 | 68 | orr r1, r1, #0x5 |
68 | str r1, [r0] | 69 | str r1, [r0] |
69 | ldr r0, \idlest_addr | 70 | ldr r0, \idlest_addr |
70 | wait_pll_bypass_\name: | 71 | wait_pll_bypass_\name: |
71 | ldr r1, [r0] | 72 | ldr r1, [r0] |
72 | tst r1, #0x0 | 73 | tst r1, #0x0 |
73 | bne wait_pll_bypass_\name | 74 | bne wait_pll_bypass_\name |
74 | .endm | 75 | .endm |
75 | 76 | ||
76 | .macro pll_lock, name, clk_mode_addr, idlest_addr, pll_mode | 77 | .macro pll_lock, name, clk_mode_addr, idlest_addr, pll_mode |
77 | pll_lock_\name: | 78 | pll_lock_\name: |
78 | ldr r0, \clk_mode_addr | 79 | ldr r0, \clk_mode_addr |
79 | ldr r1, clk_mode_\pll_mode | 80 | ldr r1, clk_mode_\pll_mode |
80 | str r1, [r0] | 81 | str r1, [r0] |
81 | and r1, r1, #0x7 | 82 | and r1, r1, #0x7 |
82 | cmp r1, #0x7 | 83 | cmp r1, #0x7 |
83 | bne pll_mode_restored_\name | 84 | bne pll_mode_restored_\name |
84 | ldr r0, \idlest_addr | 85 | ldr r0, \idlest_addr |
85 | wait_pll_lock_\name: | 86 | wait_pll_lock_\name: |
86 | ldr r1, [r0] | 87 | ldr r1, [r0] |
87 | ands r1, #0x1 | 88 | ands r1, #0x1 |
88 | beq wait_pll_lock_\name | 89 | beq wait_pll_lock_\name |
89 | pll_mode_restored_\name: | 90 | pll_mode_restored_\name: |
90 | nop | 91 | nop |
91 | .endm | 92 | .endm |
92 | 93 | ||
93 | .macro ddr_self_refresh, num | 94 | .macro ddr_self_refresh, num |
94 | ddr_self_refresh_\num: | 95 | ddr_self_refresh_\num: |
95 | add r1, r0, #EMIF4_0_SDRAM_MGMT_CTRL | 96 | add r1, r0, #EMIF4_0_SDRAM_MGMT_CTRL |
96 | ldr r2, [r1] | 97 | ldr r2, [r1] |
97 | orr r2, r2, #0xa0 @ a reasonable delay for entering SR | 98 | orr r2, r2, #0xa0 @ a reasonable delay for entering SR |
98 | str r2, [r1, #0] | 99 | str r2, [r1, #0] |
99 | str r2, [r1, #4] @ write to shadow register also | 100 | str r2, [r1, #4] @ write to shadow register also |
100 | 101 | ||
101 | ldr r2, ddr_start @ do a dummy access to DDR | 102 | ldr r2, ddr_start @ do a dummy access to DDR |
102 | ldr r3, [r2, #0] | 103 | ldr r3, [r2, #0] |
103 | ldr r3, [r1, #0] | 104 | ldr r3, [r1, #0] |
104 | orr r3, r3, #0x200 @ now set the LP MODE to Self-Refresh | 105 | orr r3, r3, #0x200 @ now set the LP MODE to Self-Refresh |
105 | str r3, [r1, #0] | 106 | str r3, [r1, #0] |
106 | 107 | ||
107 | mov r1, #0x1000 @ Give some time for system to enter SR | 108 | mov r1, #0x1000 @ Give some time for system to enter SR |
108 | wait_sr_\num: | 109 | wait_sr_\num: |
109 | subs r1, r1, #1 | 110 | subs r1, r1, #1 |
110 | bne wait_sr_\num | 111 | bne wait_sr_\num |
111 | .endm | 112 | .endm |
112 | 113 | ||
113 | .macro wait_sdram_config ,num | 114 | .macro wait_sdram_config ,num |
114 | wait_sdram_config_\num: | 115 | wait_sdram_config_\num: |
115 | mov r0, #0x100 | 116 | mov r0, #0x100 |
116 | wait_sc_\num: | 117 | wait_sc_\num: |
117 | subs r0, r0 ,#1 | 118 | subs r0, r0 ,#1 |
118 | bne wait_sc_\num | 119 | bne wait_sc_\num |
119 | .endm | 120 | .endm |
120 | 121 | ||
121 | mov r8, r0 @ same arg list passed to us | 122 | mov r8, r0 @ same arg list passed to us |
122 | 123 | ||
123 | /* EMIF config for low power mode */ | 124 | /* EMIF config for low power mode */ |
124 | ldr r0, emif_addr_func | 125 | ldr r0, emif_addr_func |
125 | blx r0 | 126 | blx r0 |
126 | 127 | ||
127 | str r0, emif_addr_virt | 128 | str r0, emif_addr_virt |
128 | 129 | ||
129 | ldr r0, gpio0_addr_func | 130 | ldr r0, gpio0_addr_func |
130 | blx r0 | 131 | blx r0 |
131 | 132 | ||
132 | str r0, gpio0_addr_virt | 133 | str r0, gpio0_addr_virt |
133 | 134 | ||
134 | /* This ensures isb */ | 135 | /* This ensures isb */ |
135 | ldr r0, dcache_flush | 136 | ldr r0, dcache_flush |
136 | blx r0 | 137 | blx r0 |
137 | 138 | ||
138 | /* Same as v7_flush_icache_all - saving a branch */ | 139 | /* Same as v7_flush_icache_all - saving a branch */ |
139 | mov r0, #0 | 140 | mov r0, #0 |
140 | mcr p15, 0, r0, c7, c5, 0 @ I+BTB cache invalidate | 141 | mcr p15, 0, r0, c7, c5, 0 @ I+BTB cache invalidate |
141 | 142 | ||
142 | ldm r8, {r0-r3} @ gather values passed | 143 | ldm r8, {r0-r4} @ gather values passed |
143 | 144 | ||
144 | /* Save the values passed */ | 145 | /* Save the values passed */ |
145 | str r0, mem_type | 146 | str r0, mem_type |
146 | str r1, susp_vtp_ctrl_val | 147 | str r1, susp_vtp_ctrl_val |
147 | str r2, evm_id | 148 | str r2, evm_id |
148 | str r3, cpu_rev | 149 | str r3, cpu_rev |
150 | str r4, suspend_state | ||
149 | 151 | ||
150 | ldr r0, emif_addr_virt | 152 | ldr r0, emif_addr_virt |
151 | 153 | ||
152 | /* Save EMIF configuration */ | 154 | /* Save EMIF configuration */ |
153 | ldr r1, [r0, #EMIF4_0_SDRAM_CONFIG] | 155 | ldr r1, [r0, #EMIF4_0_SDRAM_CONFIG] |
154 | str r1, emif_sdcfg_val | 156 | str r1, emif_sdcfg_val |
155 | ldr r1, [r0, #EMIF4_0_SDRAM_REF_CTRL] | 157 | ldr r1, [r0, #EMIF4_0_SDRAM_REF_CTRL] |
156 | str r1, emif_ref_ctrl_val | 158 | str r1, emif_ref_ctrl_val |
157 | ldr r1, [r0, #EMIF4_0_SDRAM_TIM_1] | 159 | ldr r1, [r0, #EMIF4_0_SDRAM_TIM_1] |
158 | str r1, emif_timing1_val | 160 | str r1, emif_timing1_val |
159 | ldr r1, [r0, #EMIF4_0_SDRAM_TIM_2] | 161 | ldr r1, [r0, #EMIF4_0_SDRAM_TIM_2] |
160 | str r1, emif_timing2_val | 162 | str r1, emif_timing2_val |
161 | ldr r1, [r0, #EMIF4_0_SDRAM_TIM_3] | 163 | ldr r1, [r0, #EMIF4_0_SDRAM_TIM_3] |
162 | str r1, emif_timing3_val | 164 | str r1, emif_timing3_val |
163 | ldr r1, [r0, #EMIF4_0_SDRAM_MGMT_CTRL] | 165 | ldr r1, [r0, #EMIF4_0_SDRAM_MGMT_CTRL] |
164 | str r1, emif_pmcr_val | 166 | str r1, emif_pmcr_val |
165 | ldr r1, [r0, #EMIF4_0_SDRAM_MGMT_CTRL_SHADOW] | 167 | ldr r1, [r0, #EMIF4_0_SDRAM_MGMT_CTRL_SHADOW] |
166 | str r1, emif_pmcr_shdw_val | 168 | str r1, emif_pmcr_shdw_val |
167 | ldr r1, [r0, #EMIF4_0_ZQ_CONFIG] | 169 | ldr r1, [r0, #EMIF4_0_ZQ_CONFIG] |
168 | str r1, emif_zqcfg_val | 170 | str r1, emif_zqcfg_val |
169 | ldr r1, [r0, #EMIF4_0_DDR_PHY_CTRL_1] | 171 | ldr r1, [r0, #EMIF4_0_DDR_PHY_CTRL_1] |
170 | str r1, emif_rd_lat_val | 172 | str r1, emif_rd_lat_val |
171 | 173 | ||
172 | /* Ensure that all the writes to DDR leave the A8 */ | 174 | /* Ensure that all the writes to DDR leave the A8 */ |
173 | dsb | 175 | dsb |
174 | dmb | 176 | dmb |
175 | isb | 177 | isb |
176 | 178 | ||
177 | /* Different sleep sequences for DDR2 and DDR3 */ | 179 | /* Different sleep sequences for DDR2 and DDR3 */ |
178 | ldr r6, mem_type | 180 | ldr r6, mem_type |
179 | cmp r6, #MEM_TYPE_DDR2 | 181 | cmp r6, #MEM_TYPE_DDR2 |
180 | beq ddr2_susp_seq | 182 | beq ddr2_susp_seq |
181 | 183 | ||
182 | /* DDR3 suspend sequence */ | 184 | /* DDR3 suspend sequence */ |
183 | 185 | ||
184 | /* For DDR3, hold DDR_RESET high via control module */ | 186 | /* For DDR3, hold DDR_RESET high via control module */ |
185 | ldr r2, virt_ddr_io_ctrl | 187 | ldr r2, virt_ddr_io_ctrl |
186 | ldr r1, [r2] | 188 | ldr r1, [r2] |
187 | mov r3,#1 | 189 | mov r3,#1 |
188 | mov r3,r3,lsl #31 | 190 | mov r3,r3,lsl #31 |
189 | orr r1,r1,r3 @ set ddr3_rst_def_val | 191 | orr r1,r1,r3 @ set ddr3_rst_def_val |
190 | str r1, [r2] | 192 | str r1, [r2] |
191 | 193 | ||
192 | ddr_self_refresh 1 | 194 | ddr_self_refresh 1 |
193 | 195 | ||
194 | /* Weak pull down for macro DATA0 */ | 196 | /* Weak pull down for macro DATA0 */ |
195 | ldr r1, virt_ddr_data0_ioctrl | 197 | ldr r1, virt_ddr_data0_ioctrl |
196 | ldr r2, susp_io_pull_data | 198 | ldr r2, susp_io_pull_data |
197 | str r2, [r1] | 199 | str r2, [r1] |
198 | 200 | ||
199 | /* Weak pull down for macro DATA1 */ | 201 | /* Weak pull down for macro DATA1 */ |
200 | ldr r1, virt_ddr_data1_ioctrl | 202 | ldr r1, virt_ddr_data1_ioctrl |
201 | ldr r2, susp_io_pull_data | 203 | ldr r2, susp_io_pull_data |
202 | str r2, [r1] | 204 | str r2, [r1] |
203 | 205 | ||
204 | /* Weak pull down for macro CMD0 */ | 206 | /* Weak pull down for macro CMD0 */ |
205 | ldr r1, virt_ddr_cmd0_ioctrl | 207 | ldr r1, virt_ddr_cmd0_ioctrl |
206 | ldr r2, susp_io_pull_cmd1 | 208 | ldr r2, susp_io_pull_cmd1 |
207 | str r2, [r1] | 209 | str r2, [r1] |
208 | 210 | ||
209 | /* Weak pull down for macro CMD1 */ | 211 | /* Weak pull down for macro CMD1 */ |
210 | ldr r1, virt_ddr_cmd1_ioctrl | 212 | ldr r1, virt_ddr_cmd1_ioctrl |
211 | ldr r2, susp_io_pull_cmd1 | 213 | ldr r2, susp_io_pull_cmd1 |
212 | str r2, [r1] | 214 | str r2, [r1] |
213 | 215 | ||
214 | /* | 216 | /* |
215 | * Weak pull down for macro CMD2 | 217 | * Weak pull down for macro CMD2 |
216 | * exception: keep DDR_RESET pullup | 218 | * exception: keep DDR_RESET pullup |
217 | */ | 219 | */ |
218 | ldr r1, virt_ddr_cmd2_ioctrl | 220 | ldr r1, virt_ddr_cmd2_ioctrl |
219 | ldr r2, susp_io_pull_cmd2 | 221 | ldr r2, susp_io_pull_cmd2 |
220 | str r2, [r1] | 222 | str r2, [r1] |
221 | 223 | ||
222 | /* | 224 | /* |
223 | * Disable VTT_Regulator on EVM-SK | 225 | * Disable VTT_Regulator on EVM-SK |
224 | * | 226 | * |
225 | * Explanation from Hardware team : | 227 | * Explanation from Hardware team : |
226 | * On EVM-SK, the CKE signal does not properly go low during DDR self refresh because of two issues: | 228 | * On EVM-SK, the CKE signal does not properly go low during DDR self refresh because of two issues: |
227 | * 1.Because of a bug in the silicon, we have to disable VTP in order to get to a low power state. | 229 | * 1.Because of a bug in the silicon, we have to disable VTP in order to get to a low power state. |
228 | * 2.As a consequence of #1, none of the ctrl/addr outputs are driven by the EMIF/PHY. | 230 | * 2.As a consequence of #1, none of the ctrl/addr outputs are driven by the EMIF/PHY. |
229 | * This is why we have to enable a pull up on DDR_RESET. | 231 | * This is why we have to enable a pull up on DDR_RESET. |
230 | * Similarly, we would need to be able to pull down CKE to keep the DDR in Self refresh. | 232 | * Similarly, we would need to be able to pull down CKE to keep the DDR in Self refresh. |
231 | * The pull down enabled on CKE is a weak pull down, which is not enough to overcome the | 233 | * The pull down enabled on CKE is a weak pull down, which is not enough to overcome the |
232 | * VTT pull up to 0.75V. Thus, the DDR never properly goes to self refresh. | 234 | * VTT pull up to 0.75V. Thus, the DDR never properly goes to self refresh. |
233 | * | 235 | * |
234 | * EVM-SK Rev 1.2, has an added external pull down on CKE and the ability to disable | 236 | * EVM-SK Rev 1.2, has an added external pull down on CKE and the ability to disable |
235 | * VTT regulator via gpio to address the above issues. | 237 | * VTT regulator via gpio to address the above issues. |
236 | */ | 238 | */ |
237 | 239 | ||
238 | ldr r6, evm_id | 240 | ldr r6, evm_id |
239 | cmp r6, #EVM_SK | 241 | cmp r6, #EVM_SK |
240 | bne no_gpio_toggle | 242 | bne no_gpio_toggle |
241 | 243 | ||
242 | /* Drive GPIO0_7 LOW */ | 244 | /* Drive GPIO0_7 LOW */ |
243 | ldr r0, gpio0_addr_virt | 245 | ldr r0, gpio0_addr_virt |
244 | ldr r1, [r0, #OMAP4_GPIO_CLEARDATAOUT] | 246 | ldr r1, [r0, #OMAP4_GPIO_CLEARDATAOUT] |
245 | mov r2, #(1 << 7) | 247 | mov r2, #(1 << 7) |
246 | str r2, [r0, #OMAP4_GPIO_CLEARDATAOUT] | 248 | str r2, [r0, #OMAP4_GPIO_CLEARDATAOUT] |
247 | 249 | ||
250 | /* | ||
251 | * Keep GPIO0 enabled during standby to support | ||
252 | * wakeup from Standby via GPIO0 keys. | ||
253 | * | ||
254 | * GPIO0 is disabled here for EVM_SK only in DS0. | ||
255 | */ | ||
256 | ldr r6, suspend_state | ||
257 | cmp r6, #PM_STANDBY | ||
258 | beq no_gpio_toggle | ||
259 | |||
248 | /* Disable GPIO0 for EVM_SK here */ | 260 | /* Disable GPIO0 for EVM_SK here */ |
249 | ldr r1, virt_gpio0_clkctrl | 261 | ldr r1, virt_gpio0_clkctrl |
250 | ldr r2, [r1] | 262 | ldr r2, [r1] |
251 | bic r2, r2, #(3 << 0) | 263 | bic r2, r2, #(3 << 0) |
252 | str r2, [r1] | 264 | str r2, [r1] |
253 | 265 | ||
254 | ldr r1, virt_gpio0_clkctrl | 266 | ldr r1, virt_gpio0_clkctrl |
255 | wait_gpio0_disable: | 267 | wait_gpio0_disable: |
256 | ldr r2, [r1] | 268 | ldr r2, [r1] |
257 | ldr r3, module_disabled_val | 269 | ldr r3, module_disabled_val |
258 | cmp r2, r3 | 270 | cmp r2, r3 |
259 | bne wait_gpio0_disable | 271 | bne wait_gpio0_disable |
260 | 272 | ||
261 | no_gpio_toggle: | 273 | no_gpio_toggle: |
262 | /* mddr mode selection not required for PG2.0 */ | 274 | /* mddr mode selection not required for PG2.0 */ |
263 | ldr r6, cpu_rev | 275 | ldr r6, cpu_rev |
264 | cmp r6, #CPU_REV_2 | 276 | cmp r6, #CPU_REV_2 |
265 | beq mddr_modesel_skip | 277 | beq mddr_modesel_skip |
266 | 278 | ||
267 | /* Put IO in mDDR (cmos) mode */ | 279 | /* Put IO in mDDR (cmos) mode */ |
268 | ldr r0, virt_ddr_io_ctrl | 280 | ldr r0, virt_ddr_io_ctrl |
269 | ldr r1, [r0] | 281 | ldr r1, [r0] |
270 | mov r2, #(0x1 << 28) | 282 | mov r2, #(0x1 << 28) |
271 | orr r3,r2,r1 | 283 | orr r3,r2,r1 |
272 | str r3, [r0] | 284 | str r3, [r0] |
273 | 285 | ||
274 | mddr_modesel_skip: | 286 | mddr_modesel_skip: |
275 | /* Disable EMIF at this point */ | 287 | /* Disable EMIF at this point */ |
276 | ldr r1, virt_emif_clkctrl | 288 | ldr r1, virt_emif_clkctrl |
277 | ldr r2, [r1] | 289 | ldr r2, [r1] |
278 | bic r2, r2, #(3 << 0) | 290 | bic r2, r2, #(3 << 0) |
279 | str r2, [r1] | 291 | str r2, [r1] |
280 | 292 | ||
281 | ldr r1, virt_emif_clkctrl | 293 | ldr r1, virt_emif_clkctrl |
282 | wait_emif_disable3: | 294 | wait_emif_disable3: |
283 | ldr r2, [r1] | 295 | ldr r2, [r1] |
284 | ldr r3, module_disabled_val | 296 | ldr r3, module_disabled_val |
285 | cmp r2, r3 | 297 | cmp r2, r3 |
286 | bne wait_emif_disable3 | 298 | bne wait_emif_disable3 |
287 | 299 | ||
288 | /* Disable VTP */ | 300 | /* Disable VTP */ |
289 | ldr r1, virt_ddr_vtp_ctrl | 301 | ldr r1, virt_ddr_vtp_ctrl |
290 | ldr r2, susp_vtp_ctrl_val | 302 | ldr r2, susp_vtp_ctrl_val |
291 | str r2, [r1] | 303 | str r2, [r1] |
292 | 304 | ||
293 | /* Enable SRAM LDO ret mode */ | 305 | /* Enable SRAM LDO ret mode */ |
294 | ldr r0, virt_sram_ldo_addr | 306 | ldr r0, virt_sram_ldo_addr |
295 | ldr r1, [r0] | 307 | ldr r1, [r0] |
296 | orr r1, #1 | 308 | orr r1, #1 |
297 | str r1, [r0] | 309 | str r1, [r0] |
298 | 310 | ||
299 | b put_pll_bypass | 311 | b put_pll_bypass |
300 | 312 | ||
301 | ddr2_susp_seq: | 313 | ddr2_susp_seq: |
302 | ddr_self_refresh 2 | 314 | ddr_self_refresh 2 |
303 | 315 | ||
304 | /* Disable EMIF at this point */ | 316 | /* Disable EMIF at this point */ |
305 | ldr r1, virt_emif_clkctrl | 317 | ldr r1, virt_emif_clkctrl |
306 | ldr r2, [r1] | 318 | ldr r2, [r1] |
307 | bic r2, r2, #(3 << 0) | 319 | bic r2, r2, #(3 << 0) |
308 | str r2, [r1] | 320 | str r2, [r1] |
309 | 321 | ||
310 | ldr r1, virt_emif_clkctrl | 322 | ldr r1, virt_emif_clkctrl |
311 | wait_emif_disable: | 323 | wait_emif_disable: |
312 | ldr r2, [r1] | 324 | ldr r2, [r1] |
313 | ldr r3, module_disabled_val | 325 | ldr r3, module_disabled_val |
314 | cmp r2, r3 | 326 | cmp r2, r3 |
315 | bne wait_emif_disable | 327 | bne wait_emif_disable |
316 | 328 | ||
317 | /* DDR3 reset override and mDDR mode selection */ | 329 | /* DDR3 reset override and mDDR mode selection */ |
318 | ldr r0, virt_ddr_io_ctrl | 330 | ldr r0, virt_ddr_io_ctrl |
319 | mov r1, #(0x9 << 28) | 331 | mov r1, #(0x9 << 28) |
320 | str r1, [r0] | 332 | str r1, [r0] |
321 | 333 | ||
322 | /* Weak pull down for DQ, DM */ | 334 | /* Weak pull down for DQ, DM */ |
323 | ldr r1, virt_ddr_io_pull1 | 335 | ldr r1, virt_ddr_io_pull1 |
324 | ldr r2, susp_io_pull_data | 336 | ldr r2, susp_io_pull_data |
325 | str r2, [r1] | 337 | str r2, [r1] |
326 | 338 | ||
327 | ldr r1, virt_ddr_io_pull2 | 339 | ldr r1, virt_ddr_io_pull2 |
328 | ldr r2, susp_io_pull_data | 340 | ldr r2, susp_io_pull_data |
329 | str r2, [r1] | 341 | str r2, [r1] |
330 | 342 | ||
331 | /* Disable VTP */ | 343 | /* Disable VTP */ |
332 | ldr r1, virt_ddr_vtp_ctrl | 344 | ldr r1, virt_ddr_vtp_ctrl |
333 | ldr r2, susp_vtp_ctrl_val | 345 | ldr r2, susp_vtp_ctrl_val |
334 | str r2, [r1] | 346 | str r2, [r1] |
335 | 347 | ||
336 | /* Enable SRAM LDO ret mode */ | 348 | /* Enable SRAM LDO ret mode */ |
337 | ldr r0, virt_sram_ldo_addr | 349 | ldr r0, virt_sram_ldo_addr |
338 | ldr r1, [r0] | 350 | ldr r1, [r0] |
339 | orr r1, #1 | 351 | orr r1, #1 |
340 | str r1, [r0] | 352 | str r1, [r0] |
341 | 353 | ||
342 | put_pll_bypass: | 354 | put_pll_bypass: |
343 | /* Put the PLLs in bypass mode */ | 355 | /* Put the PLLs in bypass mode */ |
344 | pll_bypass core, virt_core_clk_mode, virt_core_idlest, core_val | 356 | pll_bypass core, virt_core_clk_mode, virt_core_idlest, core_val |
345 | pll_bypass ddr, virt_ddr_clk_mode, virt_ddr_idlest, ddr_val | 357 | pll_bypass ddr, virt_ddr_clk_mode, virt_ddr_idlest, ddr_val |
346 | pll_bypass disp, virt_disp_clk_mode, virt_disp_idlest, disp_val | 358 | pll_bypass disp, virt_disp_clk_mode, virt_disp_idlest, disp_val |
347 | pll_bypass per, virt_per_clk_mode, virt_per_idlest, per_val | 359 | pll_bypass per, virt_per_clk_mode, virt_per_idlest, per_val |
348 | pll_bypass mpu, virt_mpu_clk_mode, virt_mpu_idlest, mpu_val | 360 | pll_bypass mpu, virt_mpu_clk_mode, virt_mpu_idlest, mpu_val |
349 | 361 | ||
350 | dsb | 362 | dsb |
351 | dmb | 363 | dmb |
352 | isb | 364 | isb |
353 | 365 | ||
354 | wfi | 366 | wfi |
355 | nop | 367 | nop |
356 | nop | 368 | nop |
357 | nop | 369 | nop |
358 | nop | 370 | nop |
359 | nop | 371 | nop |
360 | nop | 372 | nop |
361 | nop | 373 | nop |
362 | nop | 374 | nop |
363 | nop | 375 | nop |
364 | nop | 376 | nop |
365 | nop | 377 | nop |
366 | nop | 378 | nop |
367 | nop | 379 | nop |
368 | 380 | ||
369 | /* We come here in case of an abort */ | 381 | /* We come here in case of an abort */ |
370 | 382 | ||
371 | /* Relock the PLLs */ | 383 | /* Relock the PLLs */ |
372 | pll_lock mpu_abt, virt_mpu_clk_mode, virt_mpu_idlest, mpu_val | 384 | pll_lock mpu_abt, virt_mpu_clk_mode, virt_mpu_idlest, mpu_val |
373 | pll_lock per_abt, virt_per_clk_mode, virt_per_idlest, per_val | 385 | pll_lock per_abt, virt_per_clk_mode, virt_per_idlest, per_val |
374 | pll_lock disp_abt, virt_disp_clk_mode, virt_disp_idlest, disp_val | 386 | pll_lock disp_abt, virt_disp_clk_mode, virt_disp_idlest, disp_val |
375 | pll_lock ddr_abt, virt_ddr_clk_mode, virt_ddr_idlest, ddr_val | 387 | pll_lock ddr_abt, virt_ddr_clk_mode, virt_ddr_idlest, ddr_val |
376 | pll_lock core_abt, virt_core_clk_mode, virt_core_idlest, core_val | 388 | pll_lock core_abt, virt_core_clk_mode, virt_core_idlest, core_val |
377 | 389 | ||
378 | ldr r6, mem_type | 390 | ldr r6, mem_type |
379 | cmp r6, #MEM_TYPE_DDR2 | 391 | cmp r6, #MEM_TYPE_DDR2 |
380 | beq ddr2_resume_seq_abt | 392 | beq ddr2_resume_seq_abt |
381 | 393 | ||
382 | /* DDR3 resume path */ | 394 | /* DDR3 resume path */ |
383 | 395 | ||
384 | /* Disable SRAM LDO ret mode */ | 396 | /* Disable SRAM LDO ret mode */ |
385 | ldr r0, virt_sram_ldo_addr | 397 | ldr r0, virt_sram_ldo_addr |
386 | ldr r1, [r0] | 398 | ldr r1, [r0] |
387 | bic r1, #1 | 399 | bic r1, #1 |
388 | str r1, [r0] | 400 | str r1, [r0] |
389 | 401 | ||
390 | /* Enable EMIF */ | 402 | /* Enable EMIF */ |
391 | ldr r1, virt_emif_clkctrl | 403 | ldr r1, virt_emif_clkctrl |
392 | mov r2, #0x2 | 404 | mov r2, #0x2 |
393 | str r2, [r1] | 405 | str r2, [r1] |
394 | wait_emif_enable3: | 406 | wait_emif_enable3: |
395 | ldr r3, [r1] | 407 | ldr r3, [r1] |
396 | cmp r2, r3 | 408 | cmp r2, r3 |
397 | bne wait_emif_enable3 | 409 | bne wait_emif_enable3 |
398 | 410 | ||
399 | /* mddr mode selection not required for PG2.0 */ | 411 | /* mddr mode selection not required for PG2.0 */ |
400 | ldr r6, cpu_rev | 412 | ldr r6, cpu_rev |
401 | cmp r6, #CPU_REV_2 | 413 | cmp r6, #CPU_REV_2 |
402 | beq config_vtp_abt3 | 414 | beq config_vtp_abt3 |
403 | 415 | ||
404 | /* Take out IO of mDDR mode */ | 416 | /* Take out IO of mDDR mode */ |
405 | ldr r0, virt_ddr_io_ctrl | 417 | ldr r0, virt_ddr_io_ctrl |
406 | ldr r1, [r0] | 418 | ldr r1, [r0] |
407 | bic r1, r1, #(1 << 28) | 419 | bic r1, r1, #(1 << 28) |
408 | str r1, [r0] | 420 | str r1, [r0] |
409 | 421 | ||
410 | /* Enable VTP */ | 422 | /* Enable VTP */ |
411 | config_vtp_abt3: | 423 | config_vtp_abt3: |
412 | ldr r0, virt_ddr_vtp_ctrl | 424 | ldr r0, virt_ddr_vtp_ctrl |
413 | ldr r1, [r0] | 425 | ldr r1, [r0] |
414 | mov r2, #0x0 @ clear the register | 426 | mov r2, #0x0 @ clear the register |
415 | str r2, [r0] | 427 | str r2, [r0] |
416 | mov r2, #0x6 @ write the filter value | 428 | mov r2, #0x6 @ write the filter value |
417 | str r2, [r0] | 429 | str r2, [r0] |
418 | 430 | ||
419 | ldr r1, [r0] | 431 | ldr r1, [r0] |
420 | ldr r2, vtp_enable @ set the enable bit | 432 | ldr r2, vtp_enable @ set the enable bit |
421 | orr r2, r2, r1 | 433 | orr r2, r2, r1 |
422 | str r2, [r0] | 434 | str r2, [r0] |
423 | 435 | ||
424 | ldr r1, [r0] @ toggle the CLRZ bit | 436 | ldr r1, [r0] @ toggle the CLRZ bit |
425 | bic r1, #1 | 437 | bic r1, #1 |
426 | str r1, [r0] | 438 | str r1, [r0] |
427 | 439 | ||
428 | ldr r1, [r0] | 440 | ldr r1, [r0] |
429 | orr r1, #1 | 441 | orr r1, #1 |
430 | str r1, [r0] | 442 | str r1, [r0] |
431 | 443 | ||
432 | poll_vtp_ready_abt3: | 444 | poll_vtp_ready_abt3: |
433 | ldr r1, [r0] @ poll for VTP ready | 445 | ldr r1, [r0] @ poll for VTP ready |
434 | tst r1, #(1 << 5) | 446 | tst r1, #(1 << 5) |
435 | beq poll_vtp_ready_abt3 | 447 | beq poll_vtp_ready_abt3 |
436 | 448 | ||
437 | /* Disable the pull for CMD2 */ | 449 | /* Disable the pull for CMD2 */ |
438 | ldr r1, virt_ddr_cmd2_ioctrl | 450 | ldr r1, virt_ddr_cmd2_ioctrl |
439 | ldr r2, resume_io_pull_cmd | 451 | ldr r2, resume_io_pull_cmd |
440 | str r2, [r1] | 452 | str r2, [r1] |
441 | /* Disable the pull for CMD1 */ | 453 | /* Disable the pull for CMD1 */ |
442 | ldr r1, virt_ddr_cmd1_ioctrl | 454 | ldr r1, virt_ddr_cmd1_ioctrl |
443 | ldr r2, resume_io_pull_cmd | 455 | ldr r2, resume_io_pull_cmd |
444 | str r2, [r1] | 456 | str r2, [r1] |
445 | /* Disable the pull for CMD0 */ | 457 | /* Disable the pull for CMD0 */ |
446 | ldr r1, virt_ddr_cmd0_ioctrl | 458 | ldr r1, virt_ddr_cmd0_ioctrl |
447 | ldr r2, resume_io_pull_cmd | 459 | ldr r2, resume_io_pull_cmd |
448 | str r2, [r1] | 460 | str r2, [r1] |
449 | /* Disable the pull for DATA1 */ | 461 | /* Disable the pull for DATA1 */ |
450 | ldr r1, virt_ddr_data1_ioctrl | 462 | ldr r1, virt_ddr_data1_ioctrl |
451 | ldr r2, resume_io_pull_data | 463 | ldr r2, resume_io_pull_data |
452 | str r2, [r1] | 464 | str r2, [r1] |
453 | /* Disable the pull for DATA0 */ | 465 | /* Disable the pull for DATA0 */ |
454 | ldr r1, virt_ddr_data0_ioctrl | 466 | ldr r1, virt_ddr_data0_ioctrl |
455 | ldr r2, resume_io_pull_data | 467 | ldr r2, resume_io_pull_data |
456 | str r2, [r1] | 468 | str r2, [r1] |
457 | 469 | ||
458 | wait_sdram_config 1 | 470 | wait_sdram_config 1 |
459 | 471 | ||
460 | /* Enable VTT_Regulator on EVM-SK */ | 472 | /* Enable VTT_Regulator on EVM-SK */ |
461 | ldr r6, evm_id | 473 | ldr r6, evm_id |
462 | cmp r6, #EVM_SK | 474 | cmp r6, #EVM_SK |
463 | bne no_gpio_toggle2 | 475 | bne no_gpio_toggle2 |
464 | 476 | ||
477 | /* | ||
478 | * GPIO0 was not disabled during standby for EVM_SK | ||
479 | * Hence no need to enable it back here. | ||
480 | */ | ||
481 | ldr r6, suspend_state | ||
482 | cmp r6, #PM_STANDBY | ||
483 | beq skip_gpio_enable1 | ||
484 | |||
465 | /* Enable GPIO0 for EVM-SK here */ | 485 | /* Enable GPIO0 for EVM-SK here */ |
466 | ldr r1, virt_gpio0_clkctrl | 486 | ldr r1, virt_gpio0_clkctrl |
467 | mov r2, #0x2 | 487 | mov r2, #0x2 |
468 | str r2, [r1] | 488 | str r2, [r1] |
469 | wait_gpio0_enable: | 489 | wait_gpio0_enable: |
470 | ldr r3, [r1] | 490 | ldr r3, [r1] |
471 | cmp r2, r3 | 491 | cmp r2, r3 |
472 | bne wait_gpio0_enable | 492 | bne wait_gpio0_enable |
473 | 493 | ||
494 | skip_gpio_enable1: | ||
474 | /* Drive GPIO0_7 HIGH */ | 495 | /* Drive GPIO0_7 HIGH */ |
475 | ldr r0, gpio0_addr_virt | 496 | ldr r0, gpio0_addr_virt |
476 | ldr r1, [r0, #OMAP4_GPIO_SETDATAOUT] | 497 | ldr r1, [r0, #OMAP4_GPIO_SETDATAOUT] |
477 | mov r2, #(1 << 7) | 498 | mov r2, #(1 << 7) |
478 | str r2, [r0, #OMAP4_GPIO_SETDATAOUT] | 499 | str r2, [r0, #OMAP4_GPIO_SETDATAOUT] |
479 | 500 | ||
480 | no_gpio_toggle2: | 501 | no_gpio_toggle2: |
481 | /* Disable EMIF self-refresh */ | 502 | /* Disable EMIF self-refresh */ |
482 | ldr r0, emif_addr_virt | 503 | ldr r0, emif_addr_virt |
483 | add r0, r0, #EMIF4_0_SDRAM_MGMT_CTRL | 504 | add r0, r0, #EMIF4_0_SDRAM_MGMT_CTRL |
484 | ldr r1, [r0] | 505 | ldr r1, [r0] |
485 | bic r1, r1, #(0x7 << 8) | 506 | bic r1, r1, #(0x7 << 8) |
486 | str r1, [r0] | 507 | str r1, [r0] |
487 | str r1, [r0, #4] | 508 | str r1, [r0, #4] |
488 | 509 | ||
489 | /* For DDR3, make DDR_RESET low via control module */ | 510 | /* For DDR3, make DDR_RESET low via control module */ |
490 | ldr r0, virt_ddr_io_ctrl | 511 | ldr r0, virt_ddr_io_ctrl |
491 | ldr r1, [r0] | 512 | ldr r1, [r0] |
492 | bic r1, r1, #(1 << 31) | 513 | bic r1, r1, #(1 << 31) |
493 | str r1, [r0] | 514 | str r1, [r0] |
494 | 515 | ||
495 | b return_to_ddr_abt | 516 | b return_to_ddr_abt |
496 | 517 | ||
497 | /* DDR2 resume path */ | 518 | /* DDR2 resume path */ |
498 | ddr2_resume_seq_abt: | 519 | ddr2_resume_seq_abt: |
499 | /* Disable SRAM LDO ret mode */ | 520 | /* Disable SRAM LDO ret mode */ |
500 | ldr r0, virt_sram_ldo_addr | 521 | ldr r0, virt_sram_ldo_addr |
501 | ldr r1, [r0] | 522 | ldr r1, [r0] |
502 | bic r1, #1 | 523 | bic r1, #1 |
503 | str r1, [r0] | 524 | str r1, [r0] |
504 | 525 | ||
505 | /* Restore the pull for DQ, DM */ | 526 | /* Restore the pull for DQ, DM */ |
506 | ldr r1, virt_ddr_io_pull1 | 527 | ldr r1, virt_ddr_io_pull1 |
507 | ldr r2, resume_io_pull_data | 528 | ldr r2, resume_io_pull_data |
508 | str r2, [r1] | 529 | str r2, [r1] |
509 | 530 | ||
510 | ldr r1, virt_ddr_io_pull2 | 531 | ldr r1, virt_ddr_io_pull2 |
511 | ldr r2, resume_io_pull_data | 532 | ldr r2, resume_io_pull_data |
512 | str r2, [r1] | 533 | str r2, [r1] |
513 | 534 | ||
514 | /* Enable EMIF */ | 535 | /* Enable EMIF */ |
515 | ldr r1, virt_emif_clkctrl | 536 | ldr r1, virt_emif_clkctrl |
516 | mov r2, #0x2 | 537 | mov r2, #0x2 |
517 | str r2, [r1] | 538 | str r2, [r1] |
518 | wait_emif_enable: | 539 | wait_emif_enable: |
519 | ldr r3, [r1] | 540 | ldr r3, [r1] |
520 | cmp r2, r3 | 541 | cmp r2, r3 |
521 | bne wait_emif_enable | 542 | bne wait_emif_enable |
522 | 543 | ||
523 | /* Enable VTP */ | 544 | /* Enable VTP */ |
524 | config_vtp_abt: | 545 | config_vtp_abt: |
525 | ldr r0, virt_ddr_vtp_ctrl | 546 | ldr r0, virt_ddr_vtp_ctrl |
526 | ldr r1, [r0] | 547 | ldr r1, [r0] |
527 | mov r2, #0x0 @ clear the register | 548 | mov r2, #0x0 @ clear the register |
528 | str r2, [r0] | 549 | str r2, [r0] |
529 | mov r2, #0x6 @ write the filter value | 550 | mov r2, #0x6 @ write the filter value |
530 | str r2, [r0] | 551 | str r2, [r0] |
531 | 552 | ||
532 | ldr r1, [r0] | 553 | ldr r1, [r0] |
533 | ldr r2, vtp_enable @ set the enable bit | 554 | ldr r2, vtp_enable @ set the enable bit |
534 | orr r2, r2, r1 | 555 | orr r2, r2, r1 |
535 | str r2, [r0] | 556 | str r2, [r0] |
536 | 557 | ||
537 | ldr r1, [r0] @ toggle the CLRZ bit | 558 | ldr r1, [r0] @ toggle the CLRZ bit |
538 | bic r1, #1 | 559 | bic r1, #1 |
539 | str r1, [r0] | 560 | str r1, [r0] |
540 | 561 | ||
541 | ldr r1, [r0] | 562 | ldr r1, [r0] |
542 | orr r1, #1 | 563 | orr r1, #1 |
543 | str r1, [r0] | 564 | str r1, [r0] |
544 | 565 | ||
545 | poll_vtp_ready_abt: | 566 | poll_vtp_ready_abt: |
546 | ldr r1, [r0] @ poll for VTP ready | 567 | ldr r1, [r0] @ poll for VTP ready |
547 | tst r1, #(1 << 5) | 568 | tst r1, #(1 << 5) |
548 | beq poll_vtp_ready_abt | 569 | beq poll_vtp_ready_abt |
549 | 570 | ||
550 | /* DDR3 reset override and mDDR mode clear */ | 571 | /* DDR3 reset override and mDDR mode clear */ |
551 | ldr r0, virt_ddr_io_ctrl | 572 | ldr r0, virt_ddr_io_ctrl |
552 | mov r1, #0 | 573 | mov r1, #0 |
553 | str r1, [r0] | 574 | str r1, [r0] |
554 | 575 | ||
555 | emif_self_refresh_dis: | 576 | emif_self_refresh_dis: |
556 | /* Disable EMIF self-refresh */ | 577 | /* Disable EMIF self-refresh */ |
557 | ldr r0, emif_addr_virt | 578 | ldr r0, emif_addr_virt |
558 | add r0, r0, #EMIF4_0_SDRAM_MGMT_CTRL | 579 | add r0, r0, #EMIF4_0_SDRAM_MGMT_CTRL |
559 | ldr r1, [r0] | 580 | ldr r1, [r0] |
560 | bic r1, r1, #(0x7 << 8) | 581 | bic r1, r1, #(0x7 << 8) |
561 | str r1, [r0] | 582 | str r1, [r0] |
562 | str r1, [r0, #4] | 583 | str r1, [r0, #4] |
563 | 584 | ||
564 | /* | 585 | /* |
565 | * A write to SDRAM CONFIG register triggers | 586 | * A write to SDRAM CONFIG register triggers |
566 | * an init sequence and hence it must be done | 587 | * an init sequence and hence it must be done |
567 | * at the end | 588 | * at the end |
568 | */ | 589 | */ |
569 | ldr r0, emif_addr_virt | 590 | ldr r0, emif_addr_virt |
570 | add r0, r0, #EMIF4_0_SDRAM_CONFIG | 591 | add r0, r0, #EMIF4_0_SDRAM_CONFIG |
571 | ldr r4, emif_sdcfg_val | 592 | ldr r4, emif_sdcfg_val |
572 | str r4, [r0] | 593 | str r4, [r0] |
573 | 594 | ||
574 | mov r0, #0x2000 | 595 | mov r0, #0x2000 |
575 | wait_loop4: | 596 | wait_loop4: |
576 | subs r0, r0, #1 | 597 | subs r0, r0, #1 |
577 | bne wait_loop4 | 598 | bne wait_loop4 |
578 | 599 | ||
579 | return_to_ddr_abt: | 600 | return_to_ddr_abt: |
580 | mov r0, #7 | 601 | mov r0, #7 |
581 | ldmfd sp!, {r4 - r11, pc} @ restore regs and return | 602 | ldmfd sp!, {r4 - r11, pc} @ restore regs and return |
582 | 603 | ||
583 | ENTRY(am33xx_resume_offset) | 604 | ENTRY(am33xx_resume_offset) |
584 | .word . - am33xx_do_wfi | 605 | .word . - am33xx_do_wfi |
585 | 606 | ||
586 | ENTRY(am33xx_resume_from_deep_sleep) | 607 | ENTRY(am33xx_resume_from_deep_sleep) |
587 | /* Take the PLLs out of LP_BYPASS */ | 608 | /* Take the PLLs out of LP_BYPASS */ |
588 | pll_lock mpu, phys_mpu_clk_mode, phys_mpu_idlest, mpu_val | 609 | pll_lock mpu, phys_mpu_clk_mode, phys_mpu_idlest, mpu_val |
589 | pll_lock per, phys_per_clk_mode, phys_per_idlest, per_val | 610 | pll_lock per, phys_per_clk_mode, phys_per_idlest, per_val |
590 | pll_lock disp, phys_disp_clk_mode, phys_disp_idlest, disp_val | 611 | pll_lock disp, phys_disp_clk_mode, phys_disp_idlest, disp_val |
591 | pll_lock ddr, phys_ddr_clk_mode, phys_ddr_idlest, ddr_val | 612 | pll_lock ddr, phys_ddr_clk_mode, phys_ddr_idlest, ddr_val |
592 | pll_lock core, phys_core_clk_mode, phys_core_idlest, core_val | 613 | pll_lock core, phys_core_clk_mode, phys_core_idlest, core_val |
593 | 614 | ||
594 | ldr r6, mem_type | 615 | ldr r6, mem_type |
595 | cmp r6, #MEM_TYPE_DDR2 | 616 | cmp r6, #MEM_TYPE_DDR2 |
596 | beq ddr2_resume_seq | 617 | beq ddr2_resume_seq |
597 | 618 | ||
598 | /* DDR3 resume path */ | 619 | /* DDR3 resume path */ |
599 | 620 | ||
600 | /* Disable SRAM LDO ret mode */ | 621 | /* Disable SRAM LDO ret mode */ |
601 | ldr r0, phys_sram_ldo_addr | 622 | ldr r0, phys_sram_ldo_addr |
602 | ldr r1, [r0] | 623 | ldr r1, [r0] |
603 | bic r1, #1 | 624 | bic r1, #1 |
604 | str r1, [r0] | 625 | str r1, [r0] |
605 | 626 | ||
606 | /* TODO: Put EMIF enable here */ | 627 | /* TODO: Put EMIF enable here */ |
607 | 628 | ||
608 | /* mddr mode selection not required for PG2.0 */ | 629 | /* mddr mode selection not required for PG2.0 */ |
609 | ldr r6, cpu_rev | 630 | ldr r6, cpu_rev |
610 | cmp r6, #CPU_REV_2 | 631 | cmp r6, #CPU_REV_2 |
611 | beq config_vtp3 | 632 | beq config_vtp3 |
612 | 633 | ||
613 | /* Take out IO of mDDR mode */ | 634 | /* Take out IO of mDDR mode */ |
614 | ldr r0, phys_ddr_io_ctrl | 635 | ldr r0, phys_ddr_io_ctrl |
615 | ldr r1, [r0] | 636 | ldr r1, [r0] |
616 | bic r1, r1, #(1 << 28) | 637 | bic r1, r1, #(1 << 28) |
617 | str r1, [r0] | 638 | str r1, [r0] |
618 | 639 | ||
619 | config_vtp3: | 640 | config_vtp3: |
620 | ldr r0, phys_ddr_vtp_ctrl | 641 | ldr r0, phys_ddr_vtp_ctrl |
621 | ldr r1, [r0] | 642 | ldr r1, [r0] |
622 | mov r2, #0x0 @ clear the register | 643 | mov r2, #0x0 @ clear the register |
623 | str r2, [r0] | 644 | str r2, [r0] |
624 | mov r2, #0x6 @ write the filter value | 645 | mov r2, #0x6 @ write the filter value |
625 | str r2, [r0] | 646 | str r2, [r0] |
626 | 647 | ||
627 | ldr r1, [r0] | 648 | ldr r1, [r0] |
628 | ldr r2, vtp_enable @ set the enable bit | 649 | ldr r2, vtp_enable @ set the enable bit |
629 | orr r2, r2, r1 | 650 | orr r2, r2, r1 |
630 | str r2, [r0] | 651 | str r2, [r0] |
631 | 652 | ||
632 | ldr r1, [r0] @ toggle the CLRZ bit | 653 | ldr r1, [r0] @ toggle the CLRZ bit |
633 | bic r1, #1 | 654 | bic r1, #1 |
634 | str r1, [r0] | 655 | str r1, [r0] |
635 | 656 | ||
636 | ldr r1, [r0] | 657 | ldr r1, [r0] |
637 | orr r1, #1 | 658 | orr r1, #1 |
638 | str r1, [r0] | 659 | str r1, [r0] |
639 | poll_vtp_ready3: | 660 | poll_vtp_ready3: |
640 | ldr r1, [r0] @ poll for VTP ready | 661 | ldr r1, [r0] @ poll for VTP ready |
641 | tst r1, #(1 << 5) | 662 | tst r1, #(1 << 5) |
642 | beq poll_vtp_ready3 | 663 | beq poll_vtp_ready3 |
643 | 664 | ||
644 | /* Disable the pull for CMD2 */ | 665 | /* Disable the pull for CMD2 */ |
645 | ldr r1, phys_ddr_cmd2_ioctrl | 666 | ldr r1, phys_ddr_cmd2_ioctrl |
646 | ldr r2, resume_io_pull_cmd | 667 | ldr r2, resume_io_pull_cmd |
647 | str r2, [r1] | 668 | str r2, [r1] |
648 | /* Disable the pull for CMD1 */ | 669 | /* Disable the pull for CMD1 */ |
649 | ldr r1, phys_ddr_cmd1_ioctrl | 670 | ldr r1, phys_ddr_cmd1_ioctrl |
650 | ldr r2, resume_io_pull_cmd | 671 | ldr r2, resume_io_pull_cmd |
651 | str r2, [r1] | 672 | str r2, [r1] |
652 | /* Disable the pull for CMD0 */ | 673 | /* Disable the pull for CMD0 */ |
653 | ldr r1, phys_ddr_cmd0_ioctrl | 674 | ldr r1, phys_ddr_cmd0_ioctrl |
654 | ldr r2, resume_io_pull_cmd | 675 | ldr r2, resume_io_pull_cmd |
655 | str r2, [r1] | 676 | str r2, [r1] |
656 | /* Disable the pull for DATA1 */ | 677 | /* Disable the pull for DATA1 */ |
657 | ldr r1, phys_ddr_data1_ioctrl | 678 | ldr r1, phys_ddr_data1_ioctrl |
658 | ldr r2, resume_io_pull_data | 679 | ldr r2, resume_io_pull_data |
659 | str r2, [r1] | 680 | str r2, [r1] |
660 | /* Disable the pull for DATA0 */ | 681 | /* Disable the pull for DATA0 */ |
661 | ldr r1, phys_ddr_data0_ioctrl | 682 | ldr r1, phys_ddr_data0_ioctrl |
662 | ldr r2, resume_io_pull_data | 683 | ldr r2, resume_io_pull_data |
663 | str r2, [r1] | 684 | str r2, [r1] |
664 | 685 | ||
665 | wait_sdram_config 2 | 686 | wait_sdram_config 2 |
666 | 687 | ||
667 | /* Enable VTT_Regulator on EVM-SK */ | 688 | /* Enable VTT_Regulator on EVM-SK */ |
668 | ldr r6, evm_id | 689 | ldr r6, evm_id |
669 | cmp r6, #EVM_SK | 690 | cmp r6, #EVM_SK |
670 | bne no_gpio_toggle3 | 691 | bne no_gpio_toggle3 |
671 | 692 | ||
693 | /* | ||
694 | * GPIO0 was not disabled during standby for EVM_SK | ||
695 | * Hence no need to enable it back here. | ||
696 | */ | ||
697 | ldr r6, suspend_state | ||
698 | cmp r6, #PM_STANDBY | ||
699 | beq skip_gpio_enable2 | ||
700 | |||
672 | /* Enable GPIO0 for EVM-SK here */ | 701 | /* Enable GPIO0 for EVM-SK here */ |
673 | ldr r1, phys_gpio0_clkctrl | 702 | ldr r1, phys_gpio0_clkctrl |
674 | mov r2, #0x2 | 703 | mov r2, #0x2 |
675 | str r2, [r1] | 704 | str r2, [r1] |
676 | wait_gpio0_phys_enable: | 705 | wait_gpio0_phys_enable: |
677 | ldr r3, [r1] | 706 | ldr r3, [r1] |
678 | cmp r2, r3 | 707 | cmp r2, r3 |
679 | bne wait_gpio0_phys_enable | 708 | bne wait_gpio0_phys_enable |
680 | 709 | ||
710 | skip_gpio_enable2: | ||
681 | /* Drive GPIO0_7 HIGH */ | 711 | /* Drive GPIO0_7 HIGH */ |
682 | ldr r0, gpio0_phys_addr | 712 | ldr r0, gpio0_phys_addr |
683 | ldr r1, [r0, #OMAP4_GPIO_SETDATAOUT] | 713 | ldr r1, [r0, #OMAP4_GPIO_SETDATAOUT] |
684 | mov r2, #(1 << 7) | 714 | mov r2, #(1 << 7) |
685 | str r2, [r0, #OMAP4_GPIO_SETDATAOUT] | 715 | str r2, [r0, #OMAP4_GPIO_SETDATAOUT] |
686 | 716 | ||
687 | no_gpio_toggle3: | 717 | no_gpio_toggle3: |
688 | b config_emif_timings | 718 | b config_emif_timings |
689 | 719 | ||
690 | /* DDR2 resume path */ | 720 | /* DDR2 resume path */ |
691 | ddr2_resume_seq: | 721 | ddr2_resume_seq: |
692 | /* Disable SRAM LDO ret mode */ | 722 | /* Disable SRAM LDO ret mode */ |
693 | ldr r0, phys_sram_ldo_addr | 723 | ldr r0, phys_sram_ldo_addr |
694 | ldr r1, [r0] | 724 | ldr r1, [r0] |
695 | bic r1, #1 | 725 | bic r1, #1 |
696 | str r1, [r0] | 726 | str r1, [r0] |
697 | 727 | ||
698 | /* Restore the pull for DQ, DM */ | 728 | /* Restore the pull for DQ, DM */ |
699 | ldr r1, phys_ddr_io_pull1 | 729 | ldr r1, phys_ddr_io_pull1 |
700 | ldr r2, resume_io_pull_data | 730 | ldr r2, resume_io_pull_data |
701 | str r2, [r1] | 731 | str r2, [r1] |
702 | 732 | ||
703 | ldr r1, phys_ddr_io_pull2 | 733 | ldr r1, phys_ddr_io_pull2 |
704 | ldr r2, resume_io_pull_data | 734 | ldr r2, resume_io_pull_data |
705 | str r2, [r1] | 735 | str r2, [r1] |
706 | 736 | ||
707 | config_vtp: | 737 | config_vtp: |
708 | ldr r0, phys_ddr_vtp_ctrl | 738 | ldr r0, phys_ddr_vtp_ctrl |
709 | ldr r1, [r0] | 739 | ldr r1, [r0] |
710 | mov r2, #0x0 @ clear the register | 740 | mov r2, #0x0 @ clear the register |
711 | str r2, [r0] | 741 | str r2, [r0] |
712 | mov r2, #0x6 @ write the filter value | 742 | mov r2, #0x6 @ write the filter value |
713 | str r2, [r0] | 743 | str r2, [r0] |
714 | 744 | ||
715 | ldr r1, [r0] | 745 | ldr r1, [r0] |
716 | ldr r2, vtp_enable @ set the enable bit | 746 | ldr r2, vtp_enable @ set the enable bit |
717 | orr r2, r2, r1 | 747 | orr r2, r2, r1 |
718 | str r2, [r0] | 748 | str r2, [r0] |
719 | 749 | ||
720 | ldr r1, [r0] @ toggle the CLRZ bit | 750 | ldr r1, [r0] @ toggle the CLRZ bit |
721 | bic r1, #1 | 751 | bic r1, #1 |
722 | str r1, [r0] | 752 | str r1, [r0] |
723 | 753 | ||
724 | ldr r1, [r0] | 754 | ldr r1, [r0] |
725 | orr r1, #1 | 755 | orr r1, #1 |
726 | str r1, [r0] | 756 | str r1, [r0] |
727 | 757 | ||
728 | poll_vtp_ready: | 758 | poll_vtp_ready: |
729 | ldr r1, [r0] @ poll for VTP ready | 759 | ldr r1, [r0] @ poll for VTP ready |
730 | tst r1, #(1 << 5) | 760 | tst r1, #(1 << 5) |
731 | beq poll_vtp_ready | 761 | beq poll_vtp_ready |
732 | 762 | ||
733 | /* DDR3 reset override and mDDR mode clear */ | 763 | /* DDR3 reset override and mDDR mode clear */ |
734 | ldr r0, phys_ddr_io_ctrl | 764 | ldr r0, phys_ddr_io_ctrl |
735 | mov r1, #0 | 765 | mov r1, #0 |
736 | str r1, [r0] | 766 | str r1, [r0] |
737 | 767 | ||
738 | config_emif_timings: | 768 | config_emif_timings: |
739 | ldr r3, emif_phys_addr | 769 | ldr r3, emif_phys_addr |
740 | ldr r4, emif_rd_lat_val | 770 | ldr r4, emif_rd_lat_val |
741 | rd_lat: | 771 | rd_lat: |
742 | str r4, [r3, #EMIF4_0_DDR_PHY_CTRL_1] | 772 | str r4, [r3, #EMIF4_0_DDR_PHY_CTRL_1] |
743 | str r4, [r3, #EMIF4_0_DDR_PHY_CTRL_1_SHADOW] | 773 | str r4, [r3, #EMIF4_0_DDR_PHY_CTRL_1_SHADOW] |
744 | timing1: | 774 | timing1: |
745 | ldr r4, emif_timing1_val | 775 | ldr r4, emif_timing1_val |
746 | str r4, [r3, #EMIF4_0_SDRAM_TIM_1] | 776 | str r4, [r3, #EMIF4_0_SDRAM_TIM_1] |
747 | str r4, [r3, #EMIF4_0_SDRAM_TIM_1_SHADOW] | 777 | str r4, [r3, #EMIF4_0_SDRAM_TIM_1_SHADOW] |
748 | timing2: | 778 | timing2: |
749 | ldr r4, emif_timing2_val | 779 | ldr r4, emif_timing2_val |
750 | str r4, [r3, #EMIF4_0_SDRAM_TIM_2] | 780 | str r4, [r3, #EMIF4_0_SDRAM_TIM_2] |
751 | str r4, [r3, #EMIF4_0_SDRAM_TIM_2_SHADOW] | 781 | str r4, [r3, #EMIF4_0_SDRAM_TIM_2_SHADOW] |
752 | timing3: | 782 | timing3: |
753 | ldr r4, emif_timing3_val | 783 | ldr r4, emif_timing3_val |
754 | str r4, [r3, #EMIF4_0_SDRAM_TIM_3] | 784 | str r4, [r3, #EMIF4_0_SDRAM_TIM_3] |
755 | str r4, [r3, #EMIF4_0_SDRAM_TIM_3_SHADOW] | 785 | str r4, [r3, #EMIF4_0_SDRAM_TIM_3_SHADOW] |
756 | sdram_ref_ctrl: | 786 | sdram_ref_ctrl: |
757 | ldr r4, emif_ref_ctrl_val | 787 | ldr r4, emif_ref_ctrl_val |
758 | str r4, [r3, #EMIF4_0_SDRAM_REF_CTRL] | 788 | str r4, [r3, #EMIF4_0_SDRAM_REF_CTRL] |
759 | str r4, [r3, #EMIF4_0_SDRAM_REF_CTRL_SHADOW] | 789 | str r4, [r3, #EMIF4_0_SDRAM_REF_CTRL_SHADOW] |
760 | pmcr: | 790 | pmcr: |
761 | ldr r4, emif_pmcr_val | 791 | ldr r4, emif_pmcr_val |
762 | str r4, [r3, #EMIF4_0_SDRAM_MGMT_CTRL] | 792 | str r4, [r3, #EMIF4_0_SDRAM_MGMT_CTRL] |
763 | pmcr_shdw: | 793 | pmcr_shdw: |
764 | ldr r4, emif_pmcr_shdw_val | 794 | ldr r4, emif_pmcr_shdw_val |
765 | str r4, [r3, #EMIF4_0_SDRAM_MGMT_CTRL_SHADOW] | 795 | str r4, [r3, #EMIF4_0_SDRAM_MGMT_CTRL_SHADOW] |
766 | 796 | ||
767 | /* | 797 | /* |
768 | * Output impedence calib needed only for DDR3 | 798 | * Output impedence calib needed only for DDR3 |
769 | * but since the initial state of this will be | 799 | * but since the initial state of this will be |
770 | * disabled for DDR2 no harm in restoring the | 800 | * disabled for DDR2 no harm in restoring the |
771 | * old configuration | 801 | * old configuration |
772 | */ | 802 | */ |
773 | zqcfg: | 803 | zqcfg: |
774 | ldr r4, emif_zqcfg_val | 804 | ldr r4, emif_zqcfg_val |
775 | str r4, [r3, #EMIF4_0_ZQ_CONFIG] | 805 | str r4, [r3, #EMIF4_0_ZQ_CONFIG] |
776 | 806 | ||
777 | /* Write to SDRAM_CONFIG only for DDR2 */ | 807 | /* Write to SDRAM_CONFIG only for DDR2 */ |
778 | ldr r6, mem_type | 808 | ldr r6, mem_type |
779 | cmp r6, #MEM_TYPE_DDR2 | 809 | cmp r6, #MEM_TYPE_DDR2 |
780 | beq ddr2_sdcfg | 810 | beq ddr2_sdcfg |
781 | 811 | ||
782 | /* For DDR3, make DDR_RESET low via control module */ | 812 | /* For DDR3, make DDR_RESET low via control module */ |
783 | ldr r0, phys_ddr_io_ctrl | 813 | ldr r0, phys_ddr_io_ctrl |
784 | ldr r1, [r0] | 814 | ldr r1, [r0] |
785 | bic r1, r1, #(1 << 31) | 815 | bic r1, r1, #(1 << 31) |
786 | str r1, [r0] | 816 | str r1, [r0] |
787 | 817 | ||
788 | b return_to_ddr | 818 | b return_to_ddr |
789 | 819 | ||
790 | /* | 820 | /* |
791 | * A write to SDRAM CONFIG register triggers | 821 | * A write to SDRAM CONFIG register triggers |
792 | * an init sequence and hence it must be done | 822 | * an init sequence and hence it must be done |
793 | * at the end for DDR2 | 823 | * at the end for DDR2 |
794 | */ | 824 | */ |
795 | ddr2_sdcfg: | 825 | ddr2_sdcfg: |
796 | ldr r4, emif_sdcfg_val | 826 | ldr r4, emif_sdcfg_val |
797 | str r4, [r3, #EMIF4_0_SDRAM_CONFIG] | 827 | str r4, [r3, #EMIF4_0_SDRAM_CONFIG] |
798 | 828 | ||
799 | return_to_ddr: | 829 | return_to_ddr: |
800 | /* Back from la-la-land. Kill some time for sanity to settle in */ | 830 | /* Back from la-la-land. Kill some time for sanity to settle in */ |
801 | mov r0, #0x1000 | 831 | mov r0, #0x1000 |
802 | wait_loop2: | 832 | wait_loop2: |
803 | subs r0, r0, #1 | 833 | subs r0, r0, #1 |
804 | bne wait_loop2 | 834 | bne wait_loop2 |
805 | 835 | ||
806 | /* We are back. Branch to the common CPU resume routine */ | 836 | /* We are back. Branch to the common CPU resume routine */ |
807 | ENTRY(am33xx_resume_vector) | 837 | ENTRY(am33xx_resume_vector) |
808 | ldr pc, resume_addr | 838 | ldr pc, resume_addr |
809 | 839 | ||
810 | /* | 840 | /* |
811 | * Local variables | 841 | * Local variables |
812 | */ | 842 | */ |
813 | 843 | ||
814 | dcache_flush: | 844 | dcache_flush: |
815 | .word v7_flush_dcache_all | 845 | .word v7_flush_dcache_all |
816 | resume_addr: | 846 | resume_addr: |
817 | .word cpu_resume - PAGE_OFFSET + 0x80000000 | 847 | .word cpu_resume - PAGE_OFFSET + 0x80000000 |
818 | 848 | ||
819 | emif_addr_func: | 849 | emif_addr_func: |
820 | .word am33xx_get_ram_base | 850 | .word am33xx_get_ram_base |
821 | emif_phys_addr: | 851 | emif_phys_addr: |
822 | .word AM33XX_EMIF0_BASE | 852 | .word AM33XX_EMIF0_BASE |
823 | 853 | ||
824 | gpio0_addr_func: | 854 | gpio0_addr_func: |
825 | .word am33xx_get_gpio0_base | 855 | .word am33xx_get_gpio0_base |
826 | gpio0_phys_addr: | 856 | gpio0_phys_addr: |
827 | .word AM33XX_GPIO0_BASE | 857 | .word AM33XX_GPIO0_BASE |
828 | 858 | ||
829 | ddr_start: | 859 | ddr_start: |
830 | .word PAGE_OFFSET | 860 | .word PAGE_OFFSET |
831 | 861 | ||
832 | virt_mpu_idlest: | 862 | virt_mpu_idlest: |
833 | .word AM33XX_CM_IDLEST_DPLL_MPU | 863 | .word AM33XX_CM_IDLEST_DPLL_MPU |
834 | virt_mpu_clk_mode: | 864 | virt_mpu_clk_mode: |
835 | .word AM33XX_CM_CLKMODE_DPLL_MPU | 865 | .word AM33XX_CM_CLKMODE_DPLL_MPU |
836 | 866 | ||
837 | phys_pll_mod: | 867 | phys_pll_mod: |
838 | .word AM33XX_CM_BASE + AM33XX_CM_WKUP_MOD | 868 | .word AM33XX_CM_BASE + AM33XX_CM_WKUP_MOD |
839 | phys_mpu_clk_mode: | 869 | phys_mpu_clk_mode: |
840 | .word AM33XX_CM_BASE + AM33XX_CM_WKUP_MOD + AM33XX_CM_CLKMODE_DPLL_MPU_OFFSET | 870 | .word AM33XX_CM_BASE + AM33XX_CM_WKUP_MOD + AM33XX_CM_CLKMODE_DPLL_MPU_OFFSET |
841 | phys_mpu_idlest: | 871 | phys_mpu_idlest: |
842 | .word AM33XX_CM_BASE + AM33XX_CM_WKUP_MOD + AM33XX_CM_IDLEST_DPLL_MPU_OFFSET | 872 | .word AM33XX_CM_BASE + AM33XX_CM_WKUP_MOD + AM33XX_CM_IDLEST_DPLL_MPU_OFFSET |
843 | 873 | ||
844 | virt_core_idlest: | 874 | virt_core_idlest: |
845 | .word AM33XX_CM_IDLEST_DPLL_CORE | 875 | .word AM33XX_CM_IDLEST_DPLL_CORE |
846 | virt_core_clk_mode: | 876 | virt_core_clk_mode: |
847 | .word AM33XX_CM_CLKMODE_DPLL_CORE | 877 | .word AM33XX_CM_CLKMODE_DPLL_CORE |
848 | phys_core_clk_mode: | 878 | phys_core_clk_mode: |
849 | .word AM33XX_CM_BASE + AM33XX_CM_WKUP_MOD + AM33XX_CM_CLKMODE_DPLL_CORE_OFFSET | 879 | .word AM33XX_CM_BASE + AM33XX_CM_WKUP_MOD + AM33XX_CM_CLKMODE_DPLL_CORE_OFFSET |
850 | phys_core_idlest: | 880 | phys_core_idlest: |
851 | .word AM33XX_CM_BASE + AM33XX_CM_WKUP_MOD + AM33XX_CM_IDLEST_DPLL_CORE_OFFSET | 881 | .word AM33XX_CM_BASE + AM33XX_CM_WKUP_MOD + AM33XX_CM_IDLEST_DPLL_CORE_OFFSET |
852 | 882 | ||
853 | virt_per_idlest: | 883 | virt_per_idlest: |
854 | .word AM33XX_CM_IDLEST_DPLL_PER | 884 | .word AM33XX_CM_IDLEST_DPLL_PER |
855 | virt_per_clk_mode: | 885 | virt_per_clk_mode: |
856 | .word AM33XX_CM_CLKMODE_DPLL_PER | 886 | .word AM33XX_CM_CLKMODE_DPLL_PER |
857 | phys_per_clk_mode: | 887 | phys_per_clk_mode: |
858 | .word AM33XX_CM_BASE + AM33XX_CM_WKUP_MOD + AM33XX_CM_CLKMODE_DPLL_PER_OFFSET | 888 | .word AM33XX_CM_BASE + AM33XX_CM_WKUP_MOD + AM33XX_CM_CLKMODE_DPLL_PER_OFFSET |
859 | phys_per_idlest: | 889 | phys_per_idlest: |
860 | .word AM33XX_CM_BASE + AM33XX_CM_WKUP_MOD + AM33XX_CM_IDLEST_DPLL_PER_OFFSET | 890 | .word AM33XX_CM_BASE + AM33XX_CM_WKUP_MOD + AM33XX_CM_IDLEST_DPLL_PER_OFFSET |
861 | 891 | ||
862 | virt_disp_idlest: | 892 | virt_disp_idlest: |
863 | .word AM33XX_CM_IDLEST_DPLL_DISP | 893 | .word AM33XX_CM_IDLEST_DPLL_DISP |
864 | virt_disp_clk_mode: | 894 | virt_disp_clk_mode: |
865 | .word AM33XX_CM_CLKMODE_DPLL_DISP | 895 | .word AM33XX_CM_CLKMODE_DPLL_DISP |
866 | phys_disp_clk_mode: | 896 | phys_disp_clk_mode: |
867 | .word AM33XX_CM_BASE + AM33XX_CM_WKUP_MOD + AM33XX_CM_CLKMODE_DPLL_DISP_OFFSET | 897 | .word AM33XX_CM_BASE + AM33XX_CM_WKUP_MOD + AM33XX_CM_CLKMODE_DPLL_DISP_OFFSET |
868 | phys_disp_idlest: | 898 | phys_disp_idlest: |
869 | .word AM33XX_CM_BASE + AM33XX_CM_WKUP_MOD + AM33XX_CM_IDLEST_DPLL_DISP_OFFSET | 899 | .word AM33XX_CM_BASE + AM33XX_CM_WKUP_MOD + AM33XX_CM_IDLEST_DPLL_DISP_OFFSET |
870 | 900 | ||
871 | virt_ddr_idlest: | 901 | virt_ddr_idlest: |
872 | .word AM33XX_CM_IDLEST_DPLL_DDR | 902 | .word AM33XX_CM_IDLEST_DPLL_DDR |
873 | virt_ddr_clk_mode: | 903 | virt_ddr_clk_mode: |
874 | .word AM33XX_CM_CLKMODE_DPLL_DDR | 904 | .word AM33XX_CM_CLKMODE_DPLL_DDR |
875 | phys_ddr_clk_mode: | 905 | phys_ddr_clk_mode: |
876 | .word AM33XX_CM_BASE + AM33XX_CM_WKUP_MOD + AM33XX_CM_CLKMODE_DPLL_DDR_OFFSET | 906 | .word AM33XX_CM_BASE + AM33XX_CM_WKUP_MOD + AM33XX_CM_CLKMODE_DPLL_DDR_OFFSET |
877 | phys_ddr_idlest: | 907 | phys_ddr_idlest: |
878 | .word AM33XX_CM_BASE + AM33XX_CM_WKUP_MOD + AM33XX_CM_IDLEST_DPLL_DDR_OFFSET | 908 | .word AM33XX_CM_BASE + AM33XX_CM_WKUP_MOD + AM33XX_CM_IDLEST_DPLL_DDR_OFFSET |
879 | 909 | ||
880 | virt_sram_ldo_addr: | 910 | virt_sram_ldo_addr: |
881 | .word AM33XX_PRM_LDO_SRAM_MPU_CTRL | 911 | .word AM33XX_PRM_LDO_SRAM_MPU_CTRL |
882 | phys_sram_ldo_addr: | 912 | phys_sram_ldo_addr: |
883 | .word AM33XX_PRM_BASE + AM33XX_PRM_DEVICE_MOD + AM33XX_PRM_LDO_SRAM_MPU_CTRL_OFFSET | 913 | .word AM33XX_PRM_BASE + AM33XX_PRM_DEVICE_MOD + AM33XX_PRM_LDO_SRAM_MPU_CTRL_OFFSET |
884 | 914 | ||
885 | virt_gpio0_clkctrl: | 915 | virt_gpio0_clkctrl: |
886 | .word AM33XX_CM_WKUP_GPIO0_CLKCTRL | 916 | .word AM33XX_CM_WKUP_GPIO0_CLKCTRL |
887 | phys_gpio0_clkctrl: | 917 | phys_gpio0_clkctrl: |
888 | .word AM33XX_CM_BASE + AM33XX_CM_WKUP_MOD + AM33XX_CM_WKUP_GPIO0_CLKCTRL_OFFSET | 918 | .word AM33XX_CM_BASE + AM33XX_CM_WKUP_MOD + AM33XX_CM_WKUP_GPIO0_CLKCTRL_OFFSET |
889 | 919 | ||
890 | virt_emif_clkctrl: | 920 | virt_emif_clkctrl: |
891 | .word AM33XX_CM_PER_EMIF_CLKCTRL | 921 | .word AM33XX_CM_PER_EMIF_CLKCTRL |
892 | phys_emif_clkctrl: | 922 | phys_emif_clkctrl: |
893 | .word AM33XX_CM_BASE + AM33XX_CM_PER_MOD + AM33XX_CM_PER_EMIF_CLKCTRL_OFFSET | 923 | .word AM33XX_CM_BASE + AM33XX_CM_PER_MOD + AM33XX_CM_PER_EMIF_CLKCTRL_OFFSET |
894 | module_disabled_val: | 924 | module_disabled_val: |
895 | .word 0x30000 | 925 | .word 0x30000 |
896 | 926 | ||
897 | /* DDR related stuff */ | 927 | /* DDR related stuff */ |
898 | virt_ddr_io_ctrl: | 928 | virt_ddr_io_ctrl: |
899 | .word AM33XX_CTRL_REGADDR(0x0E04) | 929 | .word AM33XX_CTRL_REGADDR(0x0E04) |
900 | phys_ddr_io_ctrl: | 930 | phys_ddr_io_ctrl: |
901 | .word DDR_IO_CTRL | 931 | .word DDR_IO_CTRL |
902 | virt_ddr_vtp_ctrl: | 932 | virt_ddr_vtp_ctrl: |
903 | .word AM33XX_CTRL_REGADDR(0x0E0C) | 933 | .word AM33XX_CTRL_REGADDR(0x0E0C) |
904 | phys_ddr_vtp_ctrl: | 934 | phys_ddr_vtp_ctrl: |
905 | .word VTP0_CTRL_REG | 935 | .word VTP0_CTRL_REG |
906 | virt_ddr_cmd0_ioctrl: | 936 | virt_ddr_cmd0_ioctrl: |
907 | .word AM33XX_CTRL_REGADDR(0x1404) | 937 | .word AM33XX_CTRL_REGADDR(0x1404) |
908 | phys_ddr_cmd0_ioctrl: | 938 | phys_ddr_cmd0_ioctrl: |
909 | .word DDR_CMD0_IOCTRL | 939 | .word DDR_CMD0_IOCTRL |
910 | virt_ddr_cmd1_ioctrl: | 940 | virt_ddr_cmd1_ioctrl: |
911 | .word AM33XX_CTRL_REGADDR(0x1408) | 941 | .word AM33XX_CTRL_REGADDR(0x1408) |
912 | phys_ddr_cmd1_ioctrl: | 942 | phys_ddr_cmd1_ioctrl: |
913 | .word DDR_CMD1_IOCTRL | 943 | .word DDR_CMD1_IOCTRL |
914 | virt_ddr_cmd2_ioctrl: | 944 | virt_ddr_cmd2_ioctrl: |
915 | .word AM33XX_CTRL_REGADDR(0x140C) | 945 | .word AM33XX_CTRL_REGADDR(0x140C) |
916 | phys_ddr_cmd2_ioctrl: | 946 | phys_ddr_cmd2_ioctrl: |
917 | .word DDR_CMD2_IOCTRL | 947 | .word DDR_CMD2_IOCTRL |
918 | virt_ddr_data0_ioctrl: | 948 | virt_ddr_data0_ioctrl: |
919 | .word AM33XX_CTRL_REGADDR(0x1440) | 949 | .word AM33XX_CTRL_REGADDR(0x1440) |
920 | phys_ddr_data0_ioctrl: | 950 | phys_ddr_data0_ioctrl: |
921 | .word DDR_DATA0_IOCTRL | 951 | .word DDR_DATA0_IOCTRL |
922 | virt_ddr_data1_ioctrl: | 952 | virt_ddr_data1_ioctrl: |
923 | .word AM33XX_CTRL_REGADDR(0x1444) | 953 | .word AM33XX_CTRL_REGADDR(0x1444) |
924 | phys_ddr_data1_ioctrl: | 954 | phys_ddr_data1_ioctrl: |
925 | .word DDR_DATA1_IOCTRL | 955 | .word DDR_DATA1_IOCTRL |
926 | vtp_enable: | 956 | vtp_enable: |
927 | .word VTP_CTRL_ENABLE | 957 | .word VTP_CTRL_ENABLE |
928 | 958 | ||
929 | virt_ddr_io_pull1: | 959 | virt_ddr_io_pull1: |
930 | .word AM33XX_CTRL_REGADDR(0x1440) | 960 | .word AM33XX_CTRL_REGADDR(0x1440) |
931 | phys_ddr_io_pull1: | 961 | phys_ddr_io_pull1: |
932 | .word AM33XX_CTRL_BASE + (0x1440) | 962 | .word AM33XX_CTRL_BASE + (0x1440) |
933 | virt_ddr_io_pull2: | 963 | virt_ddr_io_pull2: |
934 | .word AM33XX_CTRL_REGADDR(0x1444) | 964 | .word AM33XX_CTRL_REGADDR(0x1444) |
935 | phys_ddr_io_pull2: | 965 | phys_ddr_io_pull2: |
936 | .word AM33XX_CTRL_BASE + (0x1444) | 966 | .word AM33XX_CTRL_BASE + (0x1444) |
937 | virt_ddr_io_pull3: | 967 | virt_ddr_io_pull3: |
938 | .word AM33XX_CTRL_REGADDR(0x1448) | 968 | .word AM33XX_CTRL_REGADDR(0x1448) |
939 | phys_ddr_io_pull3: | 969 | phys_ddr_io_pull3: |
940 | .word AM33XX_CTRL_BASE + (0x1448) | 970 | .word AM33XX_CTRL_BASE + (0x1448) |
941 | 971 | ||
942 | susp_io_pull_data: | 972 | susp_io_pull_data: |
943 | .word 0x3FF00003 | 973 | .word 0x3FF00003 |
944 | susp_io_pull_cmd1: | 974 | susp_io_pull_cmd1: |
945 | .word 0xFFE0018B | 975 | .word 0xFFE0018B |
946 | susp_io_pull_cmd2: | 976 | susp_io_pull_cmd2: |
947 | .word 0xFFA0098B | 977 | .word 0xFFA0098B |
948 | 978 | ||
949 | resume_io_pull_data: | 979 | resume_io_pull_data: |
950 | .word 0x18B | 980 | .word 0x18B |
951 | resume_io_pull_cmd: | 981 | resume_io_pull_cmd: |
952 | .word 0x18B | 982 | .word 0x18B |
953 | 983 | ||
954 | susp_vtp_ctrl_val: | 984 | susp_vtp_ctrl_val: |
955 | .word 0xDEADBEEF | 985 | .word 0xDEADBEEF |
956 | mem_type: | 986 | mem_type: |
957 | .word 0xDEADBEEF | 987 | .word 0xDEADBEEF |
958 | evm_id: | 988 | evm_id: |
959 | .word 0xDEADBEEF | 989 | .word 0xDEADBEEF |
960 | cpu_rev: | 990 | cpu_rev: |
991 | .word 0xDEADBEEF | ||
992 | suspend_state: | ||
961 | .word 0xDEADBEEF | 993 | .word 0xDEADBEEF |
962 | emif_addr_virt: | 994 | emif_addr_virt: |
963 | .word 0xDEADBEEF | 995 | .word 0xDEADBEEF |
964 | gpio0_addr_virt: | 996 | gpio0_addr_virt: |
965 | .word 0xDEADBEEF | 997 | .word 0xDEADBEEF |
966 | emif_rd_lat_val: | 998 | emif_rd_lat_val: |
967 | .word 0xDEADBEEF | 999 | .word 0xDEADBEEF |
968 | emif_timing1_val: | 1000 | emif_timing1_val: |
969 | .word 0xDEADBEEF | 1001 | .word 0xDEADBEEF |
970 | emif_timing2_val: | 1002 | emif_timing2_val: |
971 | .word 0xDEADBEEF | 1003 | .word 0xDEADBEEF |
972 | emif_timing3_val: | 1004 | emif_timing3_val: |
973 | .word 0xDEADBEEF | 1005 | .word 0xDEADBEEF |
974 | emif_sdcfg_val: | 1006 | emif_sdcfg_val: |
975 | .word 0xDEADBEEF | 1007 | .word 0xDEADBEEF |
976 | emif_ref_ctrl_val: | 1008 | emif_ref_ctrl_val: |
977 | .word 0xDEADBEEF | 1009 | .word 0xDEADBEEF |
978 | emif_zqcfg_val: | 1010 | emif_zqcfg_val: |
979 | .word 0xDEADBEEF | 1011 | .word 0xDEADBEEF |
980 | emif_pmcr_val: | 1012 | emif_pmcr_val: |
981 | .word 0xDEADBEEF | 1013 | .word 0xDEADBEEF |
982 | emif_pmcr_shdw_val: | 1014 | emif_pmcr_shdw_val: |
983 | .word 0xDEADBEEF | 1015 | .word 0xDEADBEEF |
984 | 1016 | ||
985 | /* PLL CLKMODE before suspend */ | 1017 | /* PLL CLKMODE before suspend */ |
986 | clk_mode_mpu_val: | 1018 | clk_mode_mpu_val: |
987 | .word 0xDEADBEEF | 1019 | .word 0xDEADBEEF |
988 | clk_mode_per_val: | 1020 | clk_mode_per_val: |
989 | .word 0xDEADBEEF | 1021 | .word 0xDEADBEEF |
990 | clk_mode_disp_val: | 1022 | clk_mode_disp_val: |
991 | .word 0xDEADBEEF | 1023 | .word 0xDEADBEEF |
992 | clk_mode_ddr_val: | 1024 | clk_mode_ddr_val: |
993 | .word 0xDEADBEEF | 1025 | .word 0xDEADBEEF |
994 | clk_mode_core_val: | 1026 | clk_mode_core_val: |
995 | .word 0xDEADBEEF | 1027 | .word 0xDEADBEEF |
996 | 1028 | ||
997 | ENTRY(am33xx_do_wfi_sz) | 1029 | ENTRY(am33xx_do_wfi_sz) |
998 | .word . - am33xx_do_wfi | 1030 | .word . - am33xx_do_wfi |
999 | 1031 |