Blame view
include/linux/suspend.h
12.1 KB
95d9ffbe0
|
1 2 |
#ifndef _LINUX_SUSPEND_H #define _LINUX_SUSPEND_H |
1da177e4c
|
3 |
|
1da177e4c
|
4 5 |
#include <linux/swap.h> #include <linux/notifier.h> |
1da177e4c
|
6 7 |
#include <linux/init.h> #include <linux/pm.h> |
7be982349
|
8 |
#include <linux/mm.h> |
95d9ffbe0
|
9 10 11 |
#include <asm/errno.h> #if defined(CONFIG_PM_SLEEP) && defined(CONFIG_VT) && defined(CONFIG_VT_CONSOLE) |
b6f448e99
|
12 |
extern void pm_set_vt_switch(int); |
95d9ffbe0
|
13 14 15 |
extern int pm_prepare_console(void); extern void pm_restore_console(void); #else |
b6f448e99
|
16 17 18 19 20 21 22 23 24 25 26 27 |
static inline void pm_set_vt_switch(int do_switch) { } static inline int pm_prepare_console(void) { return 0; } static inline void pm_restore_console(void) { } |
95d9ffbe0
|
28 29 30 31 32 33 34 35 36 37 |
#endif typedef int __bitwise suspend_state_t; #define PM_SUSPEND_ON ((__force suspend_state_t) 0) #define PM_SUSPEND_STANDBY ((__force suspend_state_t) 1) #define PM_SUSPEND_MEM ((__force suspend_state_t) 3) #define PM_SUSPEND_MAX ((__force suspend_state_t) 4) /** |
26398a70e
|
38 39 |
* struct platform_suspend_ops - Callbacks for managing platform dependent * system sleep states. |
95d9ffbe0
|
40 41 42 43 44 45 |
* * @valid: Callback to determine if given system sleep state is supported by * the platform. * Valid (ie. supported) states are advertised in /sys/power/state. Note * that it still may be impossible to enter given system sleep state if the * conditions aren't right. |
26398a70e
|
46 47 |
* There is the %suspend_valid_only_mem function available that can be * assigned to this if the platform only supports mem sleep. |
95d9ffbe0
|
48 |
* |
c697eeceb
|
49 50 51 52 |
* @begin: Initialise a transition to given system sleep state. * @begin() is executed right prior to suspending devices. The information * conveyed to the platform code by @begin() should be disregarded by it as * soon as @end() is executed. If @begin() fails (ie. returns nonzero), |
95d9ffbe0
|
53 54 |
* @prepare(), @enter() and @finish() will not be called by the PM core. * This callback is optional. However, if it is implemented, the argument |
c697eeceb
|
55 |
* passed to @enter() is redundant and should be ignored. |
95d9ffbe0
|
56 57 |
* * @prepare: Prepare the platform for entering the system sleep state indicated |
c697eeceb
|
58 |
* by @begin(). |
95d9ffbe0
|
59 60 |
* @prepare() is called right after devices have been suspended (ie. the * appropriate .suspend() method has been executed for each device) and |
6a7c7eaf7
|
61 62 63 |
* before device drivers' late suspend callbacks are executed. It returns * 0 on success or a negative error code otherwise, in which case the * system cannot enter the desired sleep state (@prepare_late(), @enter(), |
ce4410116
|
64 |
* and @wake() will not be called in that case). |
6a7c7eaf7
|
65 66 67 68 69 70 |
* * @prepare_late: Finish preparing the platform for entering the system sleep * state indicated by @begin(). * @prepare_late is called before disabling nonboot CPUs and after * device drivers' late suspend callbacks have been executed. It returns * 0 on success or a negative error code otherwise, in which case the |
ce4410116
|
71 72 |
* system cannot enter the desired sleep state (@enter() will not be * executed). |
95d9ffbe0
|
73 |
* |
c697eeceb
|
74 75 |
* @enter: Enter the system sleep state indicated by @begin() or represented by * the argument if @begin() is not implemented. |
95d9ffbe0
|
76 77 78 79 |
* This callback is mandatory. It returns 0 on success or a negative * error code otherwise, in which case the system cannot enter the desired * sleep state. * |
6a7c7eaf7
|
80 81 82 83 84 |
* @wake: Called when the system has just left a sleep state, right after * the nonboot CPUs have been enabled and before device drivers' early * resume callbacks are executed. * This callback is optional, but should be implemented by the platforms * that implement @prepare_late(). If implemented, it is always called |
ce4410116
|
85 |
* after @prepare_late and @enter(), even if one of them fails. |
6a7c7eaf7
|
86 87 88 89 |
* * @finish: Finish wake-up of the platform. * @finish is called right prior to calling device drivers' regular suspend * callbacks. |
95d9ffbe0
|
90 91 |
* This callback is optional, but should be implemented by the platforms * that implement @prepare(). If implemented, it is always called after |
ce4410116
|
92 93 |
* @enter() and @wake(), even if any of them fails. It is executed after * a failing @prepare. |
c697eeceb
|
94 95 96 97 98 |
* * @end: Called by the PM core right after resuming devices, to indicate to * the platform that the system has returned to the working state or * the transition to the sleep state has been aborted. * This callback is optional, but should be implemented by the platforms |
6a7c7eaf7
|
99 100 |
* that implement @begin(). Accordingly, platforms implementing @begin() * should also provide a @end() which cleans up transitions aborted before |
c697eeceb
|
101 |
* @enter(). |
d8f3de0d2
|
102 103 104 105 106 |
* * @recover: Recover the platform from a suspend failure. * Called by the PM core if the suspending of devices fails. * This callback is optional and should only be implemented by platforms * which require special recovery actions in that situation. |
95d9ffbe0
|
107 |
*/ |
26398a70e
|
108 |
struct platform_suspend_ops { |
95d9ffbe0
|
109 |
int (*valid)(suspend_state_t state); |
c697eeceb
|
110 |
int (*begin)(suspend_state_t state); |
e6c5eb954
|
111 |
int (*prepare)(void); |
6a7c7eaf7
|
112 |
int (*prepare_late)(void); |
95d9ffbe0
|
113 |
int (*enter)(suspend_state_t state); |
6a7c7eaf7
|
114 |
void (*wake)(void); |
e6c5eb954
|
115 |
void (*finish)(void); |
c697eeceb
|
116 |
void (*end)(void); |
d8f3de0d2
|
117 |
void (*recover)(void); |
95d9ffbe0
|
118 119 120 |
}; #ifdef CONFIG_SUSPEND |
95d9ffbe0
|
121 |
/** |
26398a70e
|
122 123 |
* suspend_set_ops - set platform dependent suspend operations * @ops: The new suspend operations to set. |
95d9ffbe0
|
124 |
*/ |
26398a70e
|
125 126 |
extern void suspend_set_ops(struct platform_suspend_ops *ops); extern int suspend_valid_only_mem(suspend_state_t state); |
95d9ffbe0
|
127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 |
/** * arch_suspend_disable_irqs - disable IRQs for suspend * * Disables IRQs (in the default case). This is a weak symbol in the common * code and thus allows architectures to override it if more needs to be * done. Not called for suspend to disk. */ extern void arch_suspend_disable_irqs(void); /** * arch_suspend_enable_irqs - enable IRQs after suspend * * Enables IRQs (in the default case). This is a weak symbol in the common * code and thus allows architectures to override it if more needs to be * done. Not called for suspend to disk. */ extern void arch_suspend_enable_irqs(void); extern int pm_suspend(suspend_state_t state); #else /* !CONFIG_SUSPEND */ #define suspend_valid_only_mem NULL |
26398a70e
|
149 |
static inline void suspend_set_ops(struct platform_suspend_ops *ops) {} |
95d9ffbe0
|
150 151 |
static inline int pm_suspend(suspend_state_t state) { return -ENOSYS; } #endif /* !CONFIG_SUSPEND */ |
1da177e4c
|
152 |
|
8357376d3
|
153 154 155 156 |
/* struct pbe is used for creating lists of pages that should be restored * atomically during the resume from disk, because the page frames they have * occupied before the suspend are in use. */ |
dcbb5a54f
|
157 |
struct pbe { |
8357376d3
|
158 159 |
void *address; /* address of the copy */ void *orig_address; /* original address of a page */ |
7088a5c00
|
160 |
struct pbe *next; |
dcbb5a54f
|
161 |
}; |
1da177e4c
|
162 |
|
1da177e4c
|
163 |
/* mm/page_alloc.c */ |
1da177e4c
|
164 |
extern void mark_free_pages(struct zone *zone); |
a3d25c275
|
165 |
/** |
b3dac3b30
|
166 |
* struct platform_hibernation_ops - hibernation platform support |
a3d25c275
|
167 |
* |
caea99ef3
|
168 169 |
* The methods in this structure allow a platform to carry out special * operations required by it during a hibernation transition. |
a3d25c275
|
170 |
* |
d8f3de0d2
|
171 |
* All the methods below, except for @recover(), must be implemented. |
a3d25c275
|
172 |
* |
caea99ef3
|
173 |
* @begin: Tell the platform driver that we're starting hibernation. |
74f270af0
|
174 175 |
* Called right after shrinking memory and before freezing devices. * |
caea99ef3
|
176 177 178 |
* @end: Called by the PM core right after resuming devices, to indicate to * the platform that the system has returned to the working state. * |
74f270af0
|
179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 |
* @pre_snapshot: Prepare the platform for creating the hibernation image. * Called right after devices have been frozen and before the nonboot * CPUs are disabled (runs with IRQs on). * * @finish: Restore the previous state of the platform after the hibernation * image has been created *or* put the platform into the normal operation * mode after the hibernation (the same method is executed in both cases). * Called right after the nonboot CPUs have been enabled and before * thawing devices (runs with IRQs on). * * @prepare: Prepare the platform for entering the low power state. * Called right after the hibernation image has been saved and before * devices are prepared for entering the low power state. * * @enter: Put the system into the low power state after the hibernation image * has been saved to disk. * Called after the nonboot CPUs have been disabled and all of the low * level devices have been shut down (runs with IRQs off). * |
c7e0831d3
|
198 199 200 201 202 203 |
* @leave: Perform the first stage of the cleanup after the system sleep state * indicated by @set_target() has been left. * Called right after the control has been passed from the boot kernel to * the image kernel, before the nonboot CPUs are enabled and before devices * are resumed. Executed with interrupts disabled. * |
74f270af0
|
204 205 206 207 208 209 210 |
* @pre_restore: Prepare system for the restoration from a hibernation image. * Called right after devices have been frozen and before the nonboot * CPUs are disabled (runs with IRQs on). * * @restore_cleanup: Clean up after a failing image restoration. * Called right after the nonboot CPUs have been enabled and before * thawing devices (runs with IRQs on). |
d8f3de0d2
|
211 212 213 214 215 |
* * @recover: Recover the platform from a failure to suspend devices. * Called by the PM core if the suspending of devices during hibernation * fails. This callback is optional and should only be implemented by * platforms which require special recovery actions in that situation. |
a3d25c275
|
216 |
*/ |
b3dac3b30
|
217 |
struct platform_hibernation_ops { |
caea99ef3
|
218 219 |
int (*begin)(void); void (*end)(void); |
74f270af0
|
220 221 |
int (*pre_snapshot)(void); void (*finish)(void); |
a3d25c275
|
222 223 |
int (*prepare)(void); int (*enter)(void); |
c7e0831d3
|
224 |
void (*leave)(void); |
a634cc101
|
225 226 |
int (*pre_restore)(void); void (*restore_cleanup)(void); |
d8f3de0d2
|
227 |
void (*recover)(void); |
a3d25c275
|
228 |
}; |
b0cb1a19d
|
229 |
#ifdef CONFIG_HIBERNATION |
74dfd666d
|
230 |
/* kernel/power/snapshot.c */ |
940d67f6b
|
231 |
extern void __register_nosave_region(unsigned long b, unsigned long e, int km); |
ce289e897
|
232 |
static inline void __init register_nosave_region(unsigned long b, unsigned long e) |
940d67f6b
|
233 234 235 |
{ __register_nosave_region(b, e, 0); } |
ce289e897
|
236 |
static inline void __init register_nosave_region_late(unsigned long b, unsigned long e) |
940d67f6b
|
237 238 239 |
{ __register_nosave_region(b, e, 1); } |
74dfd666d
|
240 241 242 243 |
extern int swsusp_page_is_forbidden(struct page *); extern void swsusp_set_page_free(struct page *); extern void swsusp_unset_page_free(struct page *); extern unsigned long get_safe_page(gfp_t gfp_mask); |
a3d25c275
|
244 |
|
b3dac3b30
|
245 |
extern void hibernation_set_ops(struct platform_hibernation_ops *ops); |
a3d25c275
|
246 |
extern int hibernate(void); |
abfe2d7b9
|
247 |
extern bool system_entering_hibernation(void); |
b0cb1a19d
|
248 |
#else /* CONFIG_HIBERNATION */ |
74dfd666d
|
249 250 251 |
static inline int swsusp_page_is_forbidden(struct page *p) { return 0; } static inline void swsusp_set_page_free(struct page *p) {} static inline void swsusp_unset_page_free(struct page *p) {} |
a3d25c275
|
252 |
|
b3dac3b30
|
253 |
static inline void hibernation_set_ops(struct platform_hibernation_ops *ops) {} |
a3d25c275
|
254 |
static inline int hibernate(void) { return -ENOSYS; } |
fce2b111f
|
255 256 |
static inline bool system_entering_hibernation(void) { return false; } #endif /* CONFIG_HIBERNATION */ |
dd4c4f17d
|
257 258 259 260 261 262 263 264 |
#ifdef CONFIG_SUSPEND_NVS extern int suspend_nvs_register(unsigned long start, unsigned long size); extern int suspend_nvs_alloc(void); extern void suspend_nvs_free(void); extern void suspend_nvs_save(void); extern void suspend_nvs_restore(void); #else /* CONFIG_SUSPEND_NVS */ static inline int suspend_nvs_register(unsigned long a, unsigned long b) |
3f4b0ef7f
|
265 266 267 |
{ return 0; } |
dd4c4f17d
|
268 269 270 271 272 |
static inline int suspend_nvs_alloc(void) { return 0; } static inline void suspend_nvs_free(void) {} static inline void suspend_nvs_save(void) {} static inline void suspend_nvs_restore(void) {} #endif /* CONFIG_SUSPEND_NVS */ |
1da177e4c
|
273 |
|
296699de6
|
274 |
#ifdef CONFIG_PM_SLEEP |
1da177e4c
|
275 276 |
void save_processor_state(void); void restore_processor_state(void); |
25761b6eb
|
277 |
|
b10d91174
|
278 |
/* kernel/power/main.c */ |
825257569
|
279 280 |
extern int register_pm_notifier(struct notifier_block *nb); extern int unregister_pm_notifier(struct notifier_block *nb); |
b10d91174
|
281 282 283 284 285 286 |
#define pm_notifier(fn, pri) { \ static struct notifier_block fn##_nb = \ { .notifier_call = fn, .priority = pri }; \ register_pm_notifier(&fn##_nb); \ } |
c125e96f0
|
287 288 289 290 291 292 293 |
/* drivers/base/power/wakeup.c */ extern bool events_check_enabled; extern bool pm_check_wakeup_events(void); extern bool pm_get_wakeup_count(unsigned long *count); extern bool pm_save_wakeup_count(unsigned long count); |
296699de6
|
294 |
#else /* !CONFIG_PM_SLEEP */ |
b10d91174
|
295 296 297 298 299 300 301 302 303 304 305 306 |
static inline int register_pm_notifier(struct notifier_block *nb) { return 0; } static inline int unregister_pm_notifier(struct notifier_block *nb) { return 0; } #define pm_notifier(fn, pri) do { (void)(fn); } while (0) |
296699de6
|
307 |
#endif /* !CONFIG_PM_SLEEP */ |
b10d91174
|
308 |
|
6ad696d2c
|
309 |
extern struct mutex pm_mutex; |
296699de6
|
310 |
#ifndef CONFIG_HIBERNATION |
b10d91174
|
311 312 313 |
static inline void register_nosave_region(unsigned long b, unsigned long e) { } |
70f38db60
|
314 315 316 |
static inline void register_nosave_region_late(unsigned long b, unsigned long e) { } |
b10d91174
|
317 |
|
6ad696d2c
|
318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 |
static inline void lock_system_sleep(void) {} static inline void unlock_system_sleep(void) {} #else /* Let some subsystems like memory hotadd exclude hibernation */ static inline void lock_system_sleep(void) { mutex_lock(&pm_mutex); } static inline void unlock_system_sleep(void) { mutex_unlock(&pm_mutex); } #endif |
89081d17f
|
335 |
|
95d9ffbe0
|
336 |
#endif /* _LINUX_SUSPEND_H */ |