Commit 31a52a9e9d71afc65afb9c54479052b1663a5532

Authored by Rafael J. Wysocki
Committed by Greg Kroah-Hartman
1 parent 9321e2ecf4

PM / sleep: Add state field to pm_states[] entries

commit 27ddcc6596e50cb8f03d2e83248897667811d8f6 upstream.

To allow sleep states corresponding to the "mem", "standby" and
"freeze" lables to be different from the pm_states[] indexes of
those strings, introduce struct pm_sleep_state, consisting of
a string label and a state number, and turn pm_states[] into an
array of objects of that type.

This modification should not lead to any functional changes.

Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Cc: Brian Norris <computersforpeace@gmail.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>

Showing 4 changed files with 30 additions and 27 deletions Inline Diff

1 /* 1 /*
2 * kernel/power/main.c - PM subsystem core functionality. 2 * kernel/power/main.c - PM subsystem core functionality.
3 * 3 *
4 * Copyright (c) 2003 Patrick Mochel 4 * Copyright (c) 2003 Patrick Mochel
5 * Copyright (c) 2003 Open Source Development Lab 5 * Copyright (c) 2003 Open Source Development Lab
6 * 6 *
7 * This file is released under the GPLv2 7 * This file is released under the GPLv2
8 * 8 *
9 */ 9 */
10 10
11 #include <linux/export.h> 11 #include <linux/export.h>
12 #include <linux/kobject.h> 12 #include <linux/kobject.h>
13 #include <linux/string.h> 13 #include <linux/string.h>
14 #include <linux/resume-trace.h> 14 #include <linux/resume-trace.h>
15 #include <linux/workqueue.h> 15 #include <linux/workqueue.h>
16 #include <linux/debugfs.h> 16 #include <linux/debugfs.h>
17 #include <linux/seq_file.h> 17 #include <linux/seq_file.h>
18 18
19 #include "power.h" 19 #include "power.h"
20 20
21 DEFINE_MUTEX(pm_mutex); 21 DEFINE_MUTEX(pm_mutex);
22 22
23 #ifdef CONFIG_PM_SLEEP 23 #ifdef CONFIG_PM_SLEEP
24 24
25 /* Routines for PM-transition notifications */ 25 /* Routines for PM-transition notifications */
26 26
27 static BLOCKING_NOTIFIER_HEAD(pm_chain_head); 27 static BLOCKING_NOTIFIER_HEAD(pm_chain_head);
28 28
29 int register_pm_notifier(struct notifier_block *nb) 29 int register_pm_notifier(struct notifier_block *nb)
30 { 30 {
31 return blocking_notifier_chain_register(&pm_chain_head, nb); 31 return blocking_notifier_chain_register(&pm_chain_head, nb);
32 } 32 }
33 EXPORT_SYMBOL_GPL(register_pm_notifier); 33 EXPORT_SYMBOL_GPL(register_pm_notifier);
34 34
35 int unregister_pm_notifier(struct notifier_block *nb) 35 int unregister_pm_notifier(struct notifier_block *nb)
36 { 36 {
37 return blocking_notifier_chain_unregister(&pm_chain_head, nb); 37 return blocking_notifier_chain_unregister(&pm_chain_head, nb);
38 } 38 }
39 EXPORT_SYMBOL_GPL(unregister_pm_notifier); 39 EXPORT_SYMBOL_GPL(unregister_pm_notifier);
40 40
41 int pm_notifier_call_chain(unsigned long val) 41 int pm_notifier_call_chain(unsigned long val)
42 { 42 {
43 int ret = blocking_notifier_call_chain(&pm_chain_head, val, NULL); 43 int ret = blocking_notifier_call_chain(&pm_chain_head, val, NULL);
44 44
45 return notifier_to_errno(ret); 45 return notifier_to_errno(ret);
46 } 46 }
47 47
48 /* If set, devices may be suspended and resumed asynchronously. */ 48 /* If set, devices may be suspended and resumed asynchronously. */
49 int pm_async_enabled = 1; 49 int pm_async_enabled = 1;
50 50
51 static ssize_t pm_async_show(struct kobject *kobj, struct kobj_attribute *attr, 51 static ssize_t pm_async_show(struct kobject *kobj, struct kobj_attribute *attr,
52 char *buf) 52 char *buf)
53 { 53 {
54 return sprintf(buf, "%d\n", pm_async_enabled); 54 return sprintf(buf, "%d\n", pm_async_enabled);
55 } 55 }
56 56
57 static ssize_t pm_async_store(struct kobject *kobj, struct kobj_attribute *attr, 57 static ssize_t pm_async_store(struct kobject *kobj, struct kobj_attribute *attr,
58 const char *buf, size_t n) 58 const char *buf, size_t n)
59 { 59 {
60 unsigned long val; 60 unsigned long val;
61 61
62 if (kstrtoul(buf, 10, &val)) 62 if (kstrtoul(buf, 10, &val))
63 return -EINVAL; 63 return -EINVAL;
64 64
65 if (val > 1) 65 if (val > 1)
66 return -EINVAL; 66 return -EINVAL;
67 67
68 pm_async_enabled = val; 68 pm_async_enabled = val;
69 return n; 69 return n;
70 } 70 }
71 71
72 power_attr(pm_async); 72 power_attr(pm_async);
73 73
74 #ifdef CONFIG_PM_DEBUG 74 #ifdef CONFIG_PM_DEBUG
75 int pm_test_level = TEST_NONE; 75 int pm_test_level = TEST_NONE;
76 76
77 static const char * const pm_tests[__TEST_AFTER_LAST] = { 77 static const char * const pm_tests[__TEST_AFTER_LAST] = {
78 [TEST_NONE] = "none", 78 [TEST_NONE] = "none",
79 [TEST_CORE] = "core", 79 [TEST_CORE] = "core",
80 [TEST_CPUS] = "processors", 80 [TEST_CPUS] = "processors",
81 [TEST_PLATFORM] = "platform", 81 [TEST_PLATFORM] = "platform",
82 [TEST_DEVICES] = "devices", 82 [TEST_DEVICES] = "devices",
83 [TEST_FREEZER] = "freezer", 83 [TEST_FREEZER] = "freezer",
84 }; 84 };
85 85
86 static ssize_t pm_test_show(struct kobject *kobj, struct kobj_attribute *attr, 86 static ssize_t pm_test_show(struct kobject *kobj, struct kobj_attribute *attr,
87 char *buf) 87 char *buf)
88 { 88 {
89 char *s = buf; 89 char *s = buf;
90 int level; 90 int level;
91 91
92 for (level = TEST_FIRST; level <= TEST_MAX; level++) 92 for (level = TEST_FIRST; level <= TEST_MAX; level++)
93 if (pm_tests[level]) { 93 if (pm_tests[level]) {
94 if (level == pm_test_level) 94 if (level == pm_test_level)
95 s += sprintf(s, "[%s] ", pm_tests[level]); 95 s += sprintf(s, "[%s] ", pm_tests[level]);
96 else 96 else
97 s += sprintf(s, "%s ", pm_tests[level]); 97 s += sprintf(s, "%s ", pm_tests[level]);
98 } 98 }
99 99
100 if (s != buf) 100 if (s != buf)
101 /* convert the last space to a newline */ 101 /* convert the last space to a newline */
102 *(s-1) = '\n'; 102 *(s-1) = '\n';
103 103
104 return (s - buf); 104 return (s - buf);
105 } 105 }
106 106
107 static ssize_t pm_test_store(struct kobject *kobj, struct kobj_attribute *attr, 107 static ssize_t pm_test_store(struct kobject *kobj, struct kobj_attribute *attr,
108 const char *buf, size_t n) 108 const char *buf, size_t n)
109 { 109 {
110 const char * const *s; 110 const char * const *s;
111 int level; 111 int level;
112 char *p; 112 char *p;
113 int len; 113 int len;
114 int error = -EINVAL; 114 int error = -EINVAL;
115 115
116 p = memchr(buf, '\n', n); 116 p = memchr(buf, '\n', n);
117 len = p ? p - buf : n; 117 len = p ? p - buf : n;
118 118
119 lock_system_sleep(); 119 lock_system_sleep();
120 120
121 level = TEST_FIRST; 121 level = TEST_FIRST;
122 for (s = &pm_tests[level]; level <= TEST_MAX; s++, level++) 122 for (s = &pm_tests[level]; level <= TEST_MAX; s++, level++)
123 if (*s && len == strlen(*s) && !strncmp(buf, *s, len)) { 123 if (*s && len == strlen(*s) && !strncmp(buf, *s, len)) {
124 pm_test_level = level; 124 pm_test_level = level;
125 error = 0; 125 error = 0;
126 break; 126 break;
127 } 127 }
128 128
129 unlock_system_sleep(); 129 unlock_system_sleep();
130 130
131 return error ? error : n; 131 return error ? error : n;
132 } 132 }
133 133
134 power_attr(pm_test); 134 power_attr(pm_test);
135 #endif /* CONFIG_PM_DEBUG */ 135 #endif /* CONFIG_PM_DEBUG */
136 136
137 #ifdef CONFIG_DEBUG_FS 137 #ifdef CONFIG_DEBUG_FS
138 static char *suspend_step_name(enum suspend_stat_step step) 138 static char *suspend_step_name(enum suspend_stat_step step)
139 { 139 {
140 switch (step) { 140 switch (step) {
141 case SUSPEND_FREEZE: 141 case SUSPEND_FREEZE:
142 return "freeze"; 142 return "freeze";
143 case SUSPEND_PREPARE: 143 case SUSPEND_PREPARE:
144 return "prepare"; 144 return "prepare";
145 case SUSPEND_SUSPEND: 145 case SUSPEND_SUSPEND:
146 return "suspend"; 146 return "suspend";
147 case SUSPEND_SUSPEND_NOIRQ: 147 case SUSPEND_SUSPEND_NOIRQ:
148 return "suspend_noirq"; 148 return "suspend_noirq";
149 case SUSPEND_RESUME_NOIRQ: 149 case SUSPEND_RESUME_NOIRQ:
150 return "resume_noirq"; 150 return "resume_noirq";
151 case SUSPEND_RESUME: 151 case SUSPEND_RESUME:
152 return "resume"; 152 return "resume";
153 default: 153 default:
154 return ""; 154 return "";
155 } 155 }
156 } 156 }
157 157
158 static int suspend_stats_show(struct seq_file *s, void *unused) 158 static int suspend_stats_show(struct seq_file *s, void *unused)
159 { 159 {
160 int i, index, last_dev, last_errno, last_step; 160 int i, index, last_dev, last_errno, last_step;
161 161
162 last_dev = suspend_stats.last_failed_dev + REC_FAILED_NUM - 1; 162 last_dev = suspend_stats.last_failed_dev + REC_FAILED_NUM - 1;
163 last_dev %= REC_FAILED_NUM; 163 last_dev %= REC_FAILED_NUM;
164 last_errno = suspend_stats.last_failed_errno + REC_FAILED_NUM - 1; 164 last_errno = suspend_stats.last_failed_errno + REC_FAILED_NUM - 1;
165 last_errno %= REC_FAILED_NUM; 165 last_errno %= REC_FAILED_NUM;
166 last_step = suspend_stats.last_failed_step + REC_FAILED_NUM - 1; 166 last_step = suspend_stats.last_failed_step + REC_FAILED_NUM - 1;
167 last_step %= REC_FAILED_NUM; 167 last_step %= REC_FAILED_NUM;
168 seq_printf(s, "%s: %d\n%s: %d\n%s: %d\n%s: %d\n%s: %d\n" 168 seq_printf(s, "%s: %d\n%s: %d\n%s: %d\n%s: %d\n%s: %d\n"
169 "%s: %d\n%s: %d\n%s: %d\n%s: %d\n%s: %d\n", 169 "%s: %d\n%s: %d\n%s: %d\n%s: %d\n%s: %d\n",
170 "success", suspend_stats.success, 170 "success", suspend_stats.success,
171 "fail", suspend_stats.fail, 171 "fail", suspend_stats.fail,
172 "failed_freeze", suspend_stats.failed_freeze, 172 "failed_freeze", suspend_stats.failed_freeze,
173 "failed_prepare", suspend_stats.failed_prepare, 173 "failed_prepare", suspend_stats.failed_prepare,
174 "failed_suspend", suspend_stats.failed_suspend, 174 "failed_suspend", suspend_stats.failed_suspend,
175 "failed_suspend_late", 175 "failed_suspend_late",
176 suspend_stats.failed_suspend_late, 176 suspend_stats.failed_suspend_late,
177 "failed_suspend_noirq", 177 "failed_suspend_noirq",
178 suspend_stats.failed_suspend_noirq, 178 suspend_stats.failed_suspend_noirq,
179 "failed_resume", suspend_stats.failed_resume, 179 "failed_resume", suspend_stats.failed_resume,
180 "failed_resume_early", 180 "failed_resume_early",
181 suspend_stats.failed_resume_early, 181 suspend_stats.failed_resume_early,
182 "failed_resume_noirq", 182 "failed_resume_noirq",
183 suspend_stats.failed_resume_noirq); 183 suspend_stats.failed_resume_noirq);
184 seq_printf(s, "failures:\n last_failed_dev:\t%-s\n", 184 seq_printf(s, "failures:\n last_failed_dev:\t%-s\n",
185 suspend_stats.failed_devs[last_dev]); 185 suspend_stats.failed_devs[last_dev]);
186 for (i = 1; i < REC_FAILED_NUM; i++) { 186 for (i = 1; i < REC_FAILED_NUM; i++) {
187 index = last_dev + REC_FAILED_NUM - i; 187 index = last_dev + REC_FAILED_NUM - i;
188 index %= REC_FAILED_NUM; 188 index %= REC_FAILED_NUM;
189 seq_printf(s, "\t\t\t%-s\n", 189 seq_printf(s, "\t\t\t%-s\n",
190 suspend_stats.failed_devs[index]); 190 suspend_stats.failed_devs[index]);
191 } 191 }
192 seq_printf(s, " last_failed_errno:\t%-d\n", 192 seq_printf(s, " last_failed_errno:\t%-d\n",
193 suspend_stats.errno[last_errno]); 193 suspend_stats.errno[last_errno]);
194 for (i = 1; i < REC_FAILED_NUM; i++) { 194 for (i = 1; i < REC_FAILED_NUM; i++) {
195 index = last_errno + REC_FAILED_NUM - i; 195 index = last_errno + REC_FAILED_NUM - i;
196 index %= REC_FAILED_NUM; 196 index %= REC_FAILED_NUM;
197 seq_printf(s, "\t\t\t%-d\n", 197 seq_printf(s, "\t\t\t%-d\n",
198 suspend_stats.errno[index]); 198 suspend_stats.errno[index]);
199 } 199 }
200 seq_printf(s, " last_failed_step:\t%-s\n", 200 seq_printf(s, " last_failed_step:\t%-s\n",
201 suspend_step_name( 201 suspend_step_name(
202 suspend_stats.failed_steps[last_step])); 202 suspend_stats.failed_steps[last_step]));
203 for (i = 1; i < REC_FAILED_NUM; i++) { 203 for (i = 1; i < REC_FAILED_NUM; i++) {
204 index = last_step + REC_FAILED_NUM - i; 204 index = last_step + REC_FAILED_NUM - i;
205 index %= REC_FAILED_NUM; 205 index %= REC_FAILED_NUM;
206 seq_printf(s, "\t\t\t%-s\n", 206 seq_printf(s, "\t\t\t%-s\n",
207 suspend_step_name( 207 suspend_step_name(
208 suspend_stats.failed_steps[index])); 208 suspend_stats.failed_steps[index]));
209 } 209 }
210 210
211 return 0; 211 return 0;
212 } 212 }
213 213
214 static int suspend_stats_open(struct inode *inode, struct file *file) 214 static int suspend_stats_open(struct inode *inode, struct file *file)
215 { 215 {
216 return single_open(file, suspend_stats_show, NULL); 216 return single_open(file, suspend_stats_show, NULL);
217 } 217 }
218 218
219 static const struct file_operations suspend_stats_operations = { 219 static const struct file_operations suspend_stats_operations = {
220 .open = suspend_stats_open, 220 .open = suspend_stats_open,
221 .read = seq_read, 221 .read = seq_read,
222 .llseek = seq_lseek, 222 .llseek = seq_lseek,
223 .release = single_release, 223 .release = single_release,
224 }; 224 };
225 225
226 static int __init pm_debugfs_init(void) 226 static int __init pm_debugfs_init(void)
227 { 227 {
228 debugfs_create_file("suspend_stats", S_IFREG | S_IRUGO, 228 debugfs_create_file("suspend_stats", S_IFREG | S_IRUGO,
229 NULL, NULL, &suspend_stats_operations); 229 NULL, NULL, &suspend_stats_operations);
230 return 0; 230 return 0;
231 } 231 }
232 232
233 late_initcall(pm_debugfs_init); 233 late_initcall(pm_debugfs_init);
234 #endif /* CONFIG_DEBUG_FS */ 234 #endif /* CONFIG_DEBUG_FS */
235 235
236 #endif /* CONFIG_PM_SLEEP */ 236 #endif /* CONFIG_PM_SLEEP */
237 237
238 #ifdef CONFIG_PM_SLEEP_DEBUG 238 #ifdef CONFIG_PM_SLEEP_DEBUG
239 /* 239 /*
240 * pm_print_times: print time taken by devices to suspend and resume. 240 * pm_print_times: print time taken by devices to suspend and resume.
241 * 241 *
242 * show() returns whether printing of suspend and resume times is enabled. 242 * show() returns whether printing of suspend and resume times is enabled.
243 * store() accepts 0 or 1. 0 disables printing and 1 enables it. 243 * store() accepts 0 or 1. 0 disables printing and 1 enables it.
244 */ 244 */
245 bool pm_print_times_enabled; 245 bool pm_print_times_enabled;
246 246
247 static ssize_t pm_print_times_show(struct kobject *kobj, 247 static ssize_t pm_print_times_show(struct kobject *kobj,
248 struct kobj_attribute *attr, char *buf) 248 struct kobj_attribute *attr, char *buf)
249 { 249 {
250 return sprintf(buf, "%d\n", pm_print_times_enabled); 250 return sprintf(buf, "%d\n", pm_print_times_enabled);
251 } 251 }
252 252
253 static ssize_t pm_print_times_store(struct kobject *kobj, 253 static ssize_t pm_print_times_store(struct kobject *kobj,
254 struct kobj_attribute *attr, 254 struct kobj_attribute *attr,
255 const char *buf, size_t n) 255 const char *buf, size_t n)
256 { 256 {
257 unsigned long val; 257 unsigned long val;
258 258
259 if (kstrtoul(buf, 10, &val)) 259 if (kstrtoul(buf, 10, &val))
260 return -EINVAL; 260 return -EINVAL;
261 261
262 if (val > 1) 262 if (val > 1)
263 return -EINVAL; 263 return -EINVAL;
264 264
265 pm_print_times_enabled = !!val; 265 pm_print_times_enabled = !!val;
266 return n; 266 return n;
267 } 267 }
268 268
269 power_attr(pm_print_times); 269 power_attr(pm_print_times);
270 270
271 static inline void pm_print_times_init(void) 271 static inline void pm_print_times_init(void)
272 { 272 {
273 pm_print_times_enabled = !!initcall_debug; 273 pm_print_times_enabled = !!initcall_debug;
274 } 274 }
275 #else /* !CONFIG_PP_SLEEP_DEBUG */ 275 #else /* !CONFIG_PP_SLEEP_DEBUG */
276 static inline void pm_print_times_init(void) {} 276 static inline void pm_print_times_init(void) {}
277 #endif /* CONFIG_PM_SLEEP_DEBUG */ 277 #endif /* CONFIG_PM_SLEEP_DEBUG */
278 278
279 struct kobject *power_kobj; 279 struct kobject *power_kobj;
280 280
281 /** 281 /**
282 * state - control system power state. 282 * state - control system power state.
283 * 283 *
284 * show() returns what states are supported, which is hard-coded to 284 * show() returns what states are supported, which is hard-coded to
285 * 'standby' (Power-On Suspend), 'mem' (Suspend-to-RAM), and 285 * 'standby' (Power-On Suspend), 'mem' (Suspend-to-RAM), and
286 * 'disk' (Suspend-to-Disk). 286 * 'disk' (Suspend-to-Disk).
287 * 287 *
288 * store() accepts one of those strings, translates it into the 288 * store() accepts one of those strings, translates it into the
289 * proper enumerated value, and initiates a suspend transition. 289 * proper enumerated value, and initiates a suspend transition.
290 */ 290 */
291 static ssize_t state_show(struct kobject *kobj, struct kobj_attribute *attr, 291 static ssize_t state_show(struct kobject *kobj, struct kobj_attribute *attr,
292 char *buf) 292 char *buf)
293 { 293 {
294 char *s = buf; 294 char *s = buf;
295 #ifdef CONFIG_SUSPEND 295 #ifdef CONFIG_SUSPEND
296 int i; 296 suspend_state_t i;
297 297
298 for (i = 0; i < PM_SUSPEND_MAX; i++) { 298 for (i = PM_SUSPEND_MIN; i < PM_SUSPEND_MAX; i++)
299 if (pm_states[i] && valid_state(i)) 299 if (valid_state(i))
300 s += sprintf(s,"%s ", pm_states[i]); 300 s += sprintf(s,"%s ", pm_states[i].label);
301 } 301
302 #endif 302 #endif
303 #ifdef CONFIG_HIBERNATION 303 #ifdef CONFIG_HIBERNATION
304 s += sprintf(s, "%s\n", "disk"); 304 s += sprintf(s, "%s\n", "disk");
305 #else 305 #else
306 if (s != buf) 306 if (s != buf)
307 /* convert the last space to a newline */ 307 /* convert the last space to a newline */
308 *(s-1) = '\n'; 308 *(s-1) = '\n';
309 #endif 309 #endif
310 return (s - buf); 310 return (s - buf);
311 } 311 }
312 312
313 static suspend_state_t decode_state(const char *buf, size_t n) 313 static suspend_state_t decode_state(const char *buf, size_t n)
314 { 314 {
315 #ifdef CONFIG_SUSPEND 315 #ifdef CONFIG_SUSPEND
316 suspend_state_t state = PM_SUSPEND_MIN; 316 suspend_state_t state = PM_SUSPEND_MIN;
317 const char * const *s; 317 struct pm_sleep_state *s;
318 #endif 318 #endif
319 char *p; 319 char *p;
320 int len; 320 int len;
321 321
322 p = memchr(buf, '\n', n); 322 p = memchr(buf, '\n', n);
323 len = p ? p - buf : n; 323 len = p ? p - buf : n;
324 324
325 /* Check hibernation first. */ 325 /* Check hibernation first. */
326 if (len == 4 && !strncmp(buf, "disk", len)) 326 if (len == 4 && !strncmp(buf, "disk", len))
327 return PM_SUSPEND_MAX; 327 return PM_SUSPEND_MAX;
328 328
329 #ifdef CONFIG_SUSPEND 329 #ifdef CONFIG_SUSPEND
330 for (s = &pm_states[state]; state < PM_SUSPEND_MAX; s++, state++) 330 for (s = &pm_states[state]; state < PM_SUSPEND_MAX; s++, state++)
331 if (*s && len == strlen(*s) && !strncmp(buf, *s, len)) 331 if (len == strlen(s->label) && !strncmp(buf, s->label, len))
332 return state; 332 return state;
333 #endif 333 #endif
334 334
335 return PM_SUSPEND_ON; 335 return PM_SUSPEND_ON;
336 } 336 }
337 337
338 static ssize_t state_store(struct kobject *kobj, struct kobj_attribute *attr, 338 static ssize_t state_store(struct kobject *kobj, struct kobj_attribute *attr,
339 const char *buf, size_t n) 339 const char *buf, size_t n)
340 { 340 {
341 suspend_state_t state; 341 suspend_state_t state;
342 int error; 342 int error;
343 343
344 error = pm_autosleep_lock(); 344 error = pm_autosleep_lock();
345 if (error) 345 if (error)
346 return error; 346 return error;
347 347
348 if (pm_autosleep_state() > PM_SUSPEND_ON) { 348 if (pm_autosleep_state() > PM_SUSPEND_ON) {
349 error = -EBUSY; 349 error = -EBUSY;
350 goto out; 350 goto out;
351 } 351 }
352 352
353 state = decode_state(buf, n); 353 state = decode_state(buf, n);
354 if (state < PM_SUSPEND_MAX) 354 if (state < PM_SUSPEND_MAX)
355 error = pm_suspend(state); 355 error = pm_suspend(state);
356 else if (state == PM_SUSPEND_MAX) 356 else if (state == PM_SUSPEND_MAX)
357 error = hibernate(); 357 error = hibernate();
358 else 358 else
359 error = -EINVAL; 359 error = -EINVAL;
360 360
361 out: 361 out:
362 pm_autosleep_unlock(); 362 pm_autosleep_unlock();
363 return error ? error : n; 363 return error ? error : n;
364 } 364 }
365 365
366 power_attr(state); 366 power_attr(state);
367 367
368 #ifdef CONFIG_PM_SLEEP 368 #ifdef CONFIG_PM_SLEEP
369 /* 369 /*
370 * The 'wakeup_count' attribute, along with the functions defined in 370 * The 'wakeup_count' attribute, along with the functions defined in
371 * drivers/base/power/wakeup.c, provides a means by which wakeup events can be 371 * drivers/base/power/wakeup.c, provides a means by which wakeup events can be
372 * handled in a non-racy way. 372 * handled in a non-racy way.
373 * 373 *
374 * If a wakeup event occurs when the system is in a sleep state, it simply is 374 * If a wakeup event occurs when the system is in a sleep state, it simply is
375 * woken up. In turn, if an event that would wake the system up from a sleep 375 * woken up. In turn, if an event that would wake the system up from a sleep
376 * state occurs when it is undergoing a transition to that sleep state, the 376 * state occurs when it is undergoing a transition to that sleep state, the
377 * transition should be aborted. Moreover, if such an event occurs when the 377 * transition should be aborted. Moreover, if such an event occurs when the
378 * system is in the working state, an attempt to start a transition to the 378 * system is in the working state, an attempt to start a transition to the
379 * given sleep state should fail during certain period after the detection of 379 * given sleep state should fail during certain period after the detection of
380 * the event. Using the 'state' attribute alone is not sufficient to satisfy 380 * the event. Using the 'state' attribute alone is not sufficient to satisfy
381 * these requirements, because a wakeup event may occur exactly when 'state' 381 * these requirements, because a wakeup event may occur exactly when 'state'
382 * is being written to and may be delivered to user space right before it is 382 * is being written to and may be delivered to user space right before it is
383 * frozen, so the event will remain only partially processed until the system is 383 * frozen, so the event will remain only partially processed until the system is
384 * woken up by another event. In particular, it won't cause the transition to 384 * woken up by another event. In particular, it won't cause the transition to
385 * a sleep state to be aborted. 385 * a sleep state to be aborted.
386 * 386 *
387 * This difficulty may be overcome if user space uses 'wakeup_count' before 387 * This difficulty may be overcome if user space uses 'wakeup_count' before
388 * writing to 'state'. It first should read from 'wakeup_count' and store 388 * writing to 'state'. It first should read from 'wakeup_count' and store
389 * the read value. Then, after carrying out its own preparations for the system 389 * the read value. Then, after carrying out its own preparations for the system
390 * transition to a sleep state, it should write the stored value to 390 * transition to a sleep state, it should write the stored value to
391 * 'wakeup_count'. If that fails, at least one wakeup event has occurred since 391 * 'wakeup_count'. If that fails, at least one wakeup event has occurred since
392 * 'wakeup_count' was read and 'state' should not be written to. Otherwise, it 392 * 'wakeup_count' was read and 'state' should not be written to. Otherwise, it
393 * is allowed to write to 'state', but the transition will be aborted if there 393 * is allowed to write to 'state', but the transition will be aborted if there
394 * are any wakeup events detected after 'wakeup_count' was written to. 394 * are any wakeup events detected after 'wakeup_count' was written to.
395 */ 395 */
396 396
397 static ssize_t wakeup_count_show(struct kobject *kobj, 397 static ssize_t wakeup_count_show(struct kobject *kobj,
398 struct kobj_attribute *attr, 398 struct kobj_attribute *attr,
399 char *buf) 399 char *buf)
400 { 400 {
401 unsigned int val; 401 unsigned int val;
402 402
403 return pm_get_wakeup_count(&val, true) ? 403 return pm_get_wakeup_count(&val, true) ?
404 sprintf(buf, "%u\n", val) : -EINTR; 404 sprintf(buf, "%u\n", val) : -EINTR;
405 } 405 }
406 406
407 static ssize_t wakeup_count_store(struct kobject *kobj, 407 static ssize_t wakeup_count_store(struct kobject *kobj,
408 struct kobj_attribute *attr, 408 struct kobj_attribute *attr,
409 const char *buf, size_t n) 409 const char *buf, size_t n)
410 { 410 {
411 unsigned int val; 411 unsigned int val;
412 int error; 412 int error;
413 413
414 error = pm_autosleep_lock(); 414 error = pm_autosleep_lock();
415 if (error) 415 if (error)
416 return error; 416 return error;
417 417
418 if (pm_autosleep_state() > PM_SUSPEND_ON) { 418 if (pm_autosleep_state() > PM_SUSPEND_ON) {
419 error = -EBUSY; 419 error = -EBUSY;
420 goto out; 420 goto out;
421 } 421 }
422 422
423 error = -EINVAL; 423 error = -EINVAL;
424 if (sscanf(buf, "%u", &val) == 1) { 424 if (sscanf(buf, "%u", &val) == 1) {
425 if (pm_save_wakeup_count(val)) 425 if (pm_save_wakeup_count(val))
426 error = n; 426 error = n;
427 else 427 else
428 pm_print_active_wakeup_sources(); 428 pm_print_active_wakeup_sources();
429 } 429 }
430 430
431 out: 431 out:
432 pm_autosleep_unlock(); 432 pm_autosleep_unlock();
433 return error; 433 return error;
434 } 434 }
435 435
436 power_attr(wakeup_count); 436 power_attr(wakeup_count);
437 437
438 #ifdef CONFIG_PM_AUTOSLEEP 438 #ifdef CONFIG_PM_AUTOSLEEP
439 static ssize_t autosleep_show(struct kobject *kobj, 439 static ssize_t autosleep_show(struct kobject *kobj,
440 struct kobj_attribute *attr, 440 struct kobj_attribute *attr,
441 char *buf) 441 char *buf)
442 { 442 {
443 suspend_state_t state = pm_autosleep_state(); 443 suspend_state_t state = pm_autosleep_state();
444 444
445 if (state == PM_SUSPEND_ON) 445 if (state == PM_SUSPEND_ON)
446 return sprintf(buf, "off\n"); 446 return sprintf(buf, "off\n");
447 447
448 #ifdef CONFIG_SUSPEND 448 #ifdef CONFIG_SUSPEND
449 if (state < PM_SUSPEND_MAX) 449 if (state < PM_SUSPEND_MAX)
450 return sprintf(buf, "%s\n", valid_state(state) ? 450 return sprintf(buf, "%s\n", valid_state(state) ?
451 pm_states[state] : "error"); 451 pm_states[state].label : "error");
452 #endif 452 #endif
453 #ifdef CONFIG_HIBERNATION 453 #ifdef CONFIG_HIBERNATION
454 return sprintf(buf, "disk\n"); 454 return sprintf(buf, "disk\n");
455 #else 455 #else
456 return sprintf(buf, "error"); 456 return sprintf(buf, "error");
457 #endif 457 #endif
458 } 458 }
459 459
460 static ssize_t autosleep_store(struct kobject *kobj, 460 static ssize_t autosleep_store(struct kobject *kobj,
461 struct kobj_attribute *attr, 461 struct kobj_attribute *attr,
462 const char *buf, size_t n) 462 const char *buf, size_t n)
463 { 463 {
464 suspend_state_t state = decode_state(buf, n); 464 suspend_state_t state = decode_state(buf, n);
465 int error; 465 int error;
466 466
467 if (state == PM_SUSPEND_ON 467 if (state == PM_SUSPEND_ON
468 && strcmp(buf, "off") && strcmp(buf, "off\n")) 468 && strcmp(buf, "off") && strcmp(buf, "off\n"))
469 return -EINVAL; 469 return -EINVAL;
470 470
471 error = pm_autosleep_set_state(state); 471 error = pm_autosleep_set_state(state);
472 return error ? error : n; 472 return error ? error : n;
473 } 473 }
474 474
475 power_attr(autosleep); 475 power_attr(autosleep);
476 #endif /* CONFIG_PM_AUTOSLEEP */ 476 #endif /* CONFIG_PM_AUTOSLEEP */
477 477
478 #ifdef CONFIG_PM_WAKELOCKS 478 #ifdef CONFIG_PM_WAKELOCKS
479 static ssize_t wake_lock_show(struct kobject *kobj, 479 static ssize_t wake_lock_show(struct kobject *kobj,
480 struct kobj_attribute *attr, 480 struct kobj_attribute *attr,
481 char *buf) 481 char *buf)
482 { 482 {
483 return pm_show_wakelocks(buf, true); 483 return pm_show_wakelocks(buf, true);
484 } 484 }
485 485
486 static ssize_t wake_lock_store(struct kobject *kobj, 486 static ssize_t wake_lock_store(struct kobject *kobj,
487 struct kobj_attribute *attr, 487 struct kobj_attribute *attr,
488 const char *buf, size_t n) 488 const char *buf, size_t n)
489 { 489 {
490 int error = pm_wake_lock(buf); 490 int error = pm_wake_lock(buf);
491 return error ? error : n; 491 return error ? error : n;
492 } 492 }
493 493
494 power_attr(wake_lock); 494 power_attr(wake_lock);
495 495
496 static ssize_t wake_unlock_show(struct kobject *kobj, 496 static ssize_t wake_unlock_show(struct kobject *kobj,
497 struct kobj_attribute *attr, 497 struct kobj_attribute *attr,
498 char *buf) 498 char *buf)
499 { 499 {
500 return pm_show_wakelocks(buf, false); 500 return pm_show_wakelocks(buf, false);
501 } 501 }
502 502
503 static ssize_t wake_unlock_store(struct kobject *kobj, 503 static ssize_t wake_unlock_store(struct kobject *kobj,
504 struct kobj_attribute *attr, 504 struct kobj_attribute *attr,
505 const char *buf, size_t n) 505 const char *buf, size_t n)
506 { 506 {
507 int error = pm_wake_unlock(buf); 507 int error = pm_wake_unlock(buf);
508 return error ? error : n; 508 return error ? error : n;
509 } 509 }
510 510
511 power_attr(wake_unlock); 511 power_attr(wake_unlock);
512 512
513 #endif /* CONFIG_PM_WAKELOCKS */ 513 #endif /* CONFIG_PM_WAKELOCKS */
514 #endif /* CONFIG_PM_SLEEP */ 514 #endif /* CONFIG_PM_SLEEP */
515 515
516 #ifdef CONFIG_PM_TRACE 516 #ifdef CONFIG_PM_TRACE
517 int pm_trace_enabled; 517 int pm_trace_enabled;
518 518
519 static ssize_t pm_trace_show(struct kobject *kobj, struct kobj_attribute *attr, 519 static ssize_t pm_trace_show(struct kobject *kobj, struct kobj_attribute *attr,
520 char *buf) 520 char *buf)
521 { 521 {
522 return sprintf(buf, "%d\n", pm_trace_enabled); 522 return sprintf(buf, "%d\n", pm_trace_enabled);
523 } 523 }
524 524
525 static ssize_t 525 static ssize_t
526 pm_trace_store(struct kobject *kobj, struct kobj_attribute *attr, 526 pm_trace_store(struct kobject *kobj, struct kobj_attribute *attr,
527 const char *buf, size_t n) 527 const char *buf, size_t n)
528 { 528 {
529 int val; 529 int val;
530 530
531 if (sscanf(buf, "%d", &val) == 1) { 531 if (sscanf(buf, "%d", &val) == 1) {
532 pm_trace_enabled = !!val; 532 pm_trace_enabled = !!val;
533 if (pm_trace_enabled) { 533 if (pm_trace_enabled) {
534 pr_warn("PM: Enabling pm_trace changes system date and time during resume.\n" 534 pr_warn("PM: Enabling pm_trace changes system date and time during resume.\n"
535 "PM: Correct system time has to be restored manually after resume.\n"); 535 "PM: Correct system time has to be restored manually after resume.\n");
536 } 536 }
537 return n; 537 return n;
538 } 538 }
539 return -EINVAL; 539 return -EINVAL;
540 } 540 }
541 541
542 power_attr(pm_trace); 542 power_attr(pm_trace);
543 543
544 static ssize_t pm_trace_dev_match_show(struct kobject *kobj, 544 static ssize_t pm_trace_dev_match_show(struct kobject *kobj,
545 struct kobj_attribute *attr, 545 struct kobj_attribute *attr,
546 char *buf) 546 char *buf)
547 { 547 {
548 return show_trace_dev_match(buf, PAGE_SIZE); 548 return show_trace_dev_match(buf, PAGE_SIZE);
549 } 549 }
550 550
551 static ssize_t 551 static ssize_t
552 pm_trace_dev_match_store(struct kobject *kobj, struct kobj_attribute *attr, 552 pm_trace_dev_match_store(struct kobject *kobj, struct kobj_attribute *attr,
553 const char *buf, size_t n) 553 const char *buf, size_t n)
554 { 554 {
555 return -EINVAL; 555 return -EINVAL;
556 } 556 }
557 557
558 power_attr(pm_trace_dev_match); 558 power_attr(pm_trace_dev_match);
559 559
560 #endif /* CONFIG_PM_TRACE */ 560 #endif /* CONFIG_PM_TRACE */
561 561
562 #ifdef CONFIG_FREEZER 562 #ifdef CONFIG_FREEZER
563 static ssize_t pm_freeze_timeout_show(struct kobject *kobj, 563 static ssize_t pm_freeze_timeout_show(struct kobject *kobj,
564 struct kobj_attribute *attr, char *buf) 564 struct kobj_attribute *attr, char *buf)
565 { 565 {
566 return sprintf(buf, "%u\n", freeze_timeout_msecs); 566 return sprintf(buf, "%u\n", freeze_timeout_msecs);
567 } 567 }
568 568
569 static ssize_t pm_freeze_timeout_store(struct kobject *kobj, 569 static ssize_t pm_freeze_timeout_store(struct kobject *kobj,
570 struct kobj_attribute *attr, 570 struct kobj_attribute *attr,
571 const char *buf, size_t n) 571 const char *buf, size_t n)
572 { 572 {
573 unsigned long val; 573 unsigned long val;
574 574
575 if (kstrtoul(buf, 10, &val)) 575 if (kstrtoul(buf, 10, &val))
576 return -EINVAL; 576 return -EINVAL;
577 577
578 freeze_timeout_msecs = val; 578 freeze_timeout_msecs = val;
579 return n; 579 return n;
580 } 580 }
581 581
582 power_attr(pm_freeze_timeout); 582 power_attr(pm_freeze_timeout);
583 583
584 #endif /* CONFIG_FREEZER*/ 584 #endif /* CONFIG_FREEZER*/
585 585
586 static struct attribute * g[] = { 586 static struct attribute * g[] = {
587 &state_attr.attr, 587 &state_attr.attr,
588 #ifdef CONFIG_PM_TRACE 588 #ifdef CONFIG_PM_TRACE
589 &pm_trace_attr.attr, 589 &pm_trace_attr.attr,
590 &pm_trace_dev_match_attr.attr, 590 &pm_trace_dev_match_attr.attr,
591 #endif 591 #endif
592 #ifdef CONFIG_PM_SLEEP 592 #ifdef CONFIG_PM_SLEEP
593 &pm_async_attr.attr, 593 &pm_async_attr.attr,
594 &wakeup_count_attr.attr, 594 &wakeup_count_attr.attr,
595 #ifdef CONFIG_PM_AUTOSLEEP 595 #ifdef CONFIG_PM_AUTOSLEEP
596 &autosleep_attr.attr, 596 &autosleep_attr.attr,
597 #endif 597 #endif
598 #ifdef CONFIG_PM_WAKELOCKS 598 #ifdef CONFIG_PM_WAKELOCKS
599 &wake_lock_attr.attr, 599 &wake_lock_attr.attr,
600 &wake_unlock_attr.attr, 600 &wake_unlock_attr.attr,
601 #endif 601 #endif
602 #ifdef CONFIG_PM_DEBUG 602 #ifdef CONFIG_PM_DEBUG
603 &pm_test_attr.attr, 603 &pm_test_attr.attr,
604 #endif 604 #endif
605 #ifdef CONFIG_PM_SLEEP_DEBUG 605 #ifdef CONFIG_PM_SLEEP_DEBUG
606 &pm_print_times_attr.attr, 606 &pm_print_times_attr.attr,
607 #endif 607 #endif
608 #endif 608 #endif
609 #ifdef CONFIG_FREEZER 609 #ifdef CONFIG_FREEZER
610 &pm_freeze_timeout_attr.attr, 610 &pm_freeze_timeout_attr.attr,
611 #endif 611 #endif
612 NULL, 612 NULL,
613 }; 613 };
614 614
615 static struct attribute_group attr_group = { 615 static struct attribute_group attr_group = {
616 .attrs = g, 616 .attrs = g,
617 }; 617 };
618 618
619 #ifdef CONFIG_PM_RUNTIME 619 #ifdef CONFIG_PM_RUNTIME
620 struct workqueue_struct *pm_wq; 620 struct workqueue_struct *pm_wq;
621 EXPORT_SYMBOL_GPL(pm_wq); 621 EXPORT_SYMBOL_GPL(pm_wq);
622 622
623 static int __init pm_start_workqueue(void) 623 static int __init pm_start_workqueue(void)
624 { 624 {
625 pm_wq = alloc_workqueue("pm", WQ_FREEZABLE, 0); 625 pm_wq = alloc_workqueue("pm", WQ_FREEZABLE, 0);
626 626
627 return pm_wq ? 0 : -ENOMEM; 627 return pm_wq ? 0 : -ENOMEM;
628 } 628 }
629 #else 629 #else
630 static inline int pm_start_workqueue(void) { return 0; } 630 static inline int pm_start_workqueue(void) { return 0; }
631 #endif 631 #endif
632 632
633 static int __init pm_init(void) 633 static int __init pm_init(void)
634 { 634 {
635 int error = pm_start_workqueue(); 635 int error = pm_start_workqueue();
636 if (error) 636 if (error)
637 return error; 637 return error;
638 hibernate_image_size_init(); 638 hibernate_image_size_init();
639 hibernate_reserved_size_init(); 639 hibernate_reserved_size_init();
640 power_kobj = kobject_create_and_add("power", NULL); 640 power_kobj = kobject_create_and_add("power", NULL);
641 if (!power_kobj) 641 if (!power_kobj)
642 return -ENOMEM; 642 return -ENOMEM;
643 error = sysfs_create_group(power_kobj, &attr_group); 643 error = sysfs_create_group(power_kobj, &attr_group);
644 if (error) 644 if (error)
645 return error; 645 return error;
646 pm_print_times_init(); 646 pm_print_times_init();
647 return pm_autosleep_init(); 647 return pm_autosleep_init();
648 } 648 }
649 649
650 core_initcall(pm_init); 650 core_initcall(pm_init);
651 651
kernel/power/power.h
1 #include <linux/suspend.h> 1 #include <linux/suspend.h>
2 #include <linux/suspend_ioctls.h> 2 #include <linux/suspend_ioctls.h>
3 #include <linux/utsname.h> 3 #include <linux/utsname.h>
4 #include <linux/freezer.h> 4 #include <linux/freezer.h>
5 5
6 struct swsusp_info { 6 struct swsusp_info {
7 struct new_utsname uts; 7 struct new_utsname uts;
8 u32 version_code; 8 u32 version_code;
9 unsigned long num_physpages; 9 unsigned long num_physpages;
10 int cpus; 10 int cpus;
11 unsigned long image_pages; 11 unsigned long image_pages;
12 unsigned long pages; 12 unsigned long pages;
13 unsigned long size; 13 unsigned long size;
14 } __attribute__((aligned(PAGE_SIZE))); 14 } __attribute__((aligned(PAGE_SIZE)));
15 15
16 #ifdef CONFIG_HIBERNATION 16 #ifdef CONFIG_HIBERNATION
17 /* kernel/power/snapshot.c */ 17 /* kernel/power/snapshot.c */
18 extern void __init hibernate_reserved_size_init(void); 18 extern void __init hibernate_reserved_size_init(void);
19 extern void __init hibernate_image_size_init(void); 19 extern void __init hibernate_image_size_init(void);
20 20
21 #ifdef CONFIG_ARCH_HIBERNATION_HEADER 21 #ifdef CONFIG_ARCH_HIBERNATION_HEADER
22 /* Maximum size of architecture specific data in a hibernation header */ 22 /* Maximum size of architecture specific data in a hibernation header */
23 #define MAX_ARCH_HEADER_SIZE (sizeof(struct new_utsname) + 4) 23 #define MAX_ARCH_HEADER_SIZE (sizeof(struct new_utsname) + 4)
24 24
25 extern int arch_hibernation_header_save(void *addr, unsigned int max_size); 25 extern int arch_hibernation_header_save(void *addr, unsigned int max_size);
26 extern int arch_hibernation_header_restore(void *addr); 26 extern int arch_hibernation_header_restore(void *addr);
27 27
28 static inline int init_header_complete(struct swsusp_info *info) 28 static inline int init_header_complete(struct swsusp_info *info)
29 { 29 {
30 return arch_hibernation_header_save(info, MAX_ARCH_HEADER_SIZE); 30 return arch_hibernation_header_save(info, MAX_ARCH_HEADER_SIZE);
31 } 31 }
32 32
33 static inline char *check_image_kernel(struct swsusp_info *info) 33 static inline char *check_image_kernel(struct swsusp_info *info)
34 { 34 {
35 return arch_hibernation_header_restore(info) ? 35 return arch_hibernation_header_restore(info) ?
36 "architecture specific data" : NULL; 36 "architecture specific data" : NULL;
37 } 37 }
38 #endif /* CONFIG_ARCH_HIBERNATION_HEADER */ 38 #endif /* CONFIG_ARCH_HIBERNATION_HEADER */
39 39
40 /* 40 /*
41 * Keep some memory free so that I/O operations can succeed without paging 41 * Keep some memory free so that I/O operations can succeed without paging
42 * [Might this be more than 4 MB?] 42 * [Might this be more than 4 MB?]
43 */ 43 */
44 #define PAGES_FOR_IO ((4096 * 1024) >> PAGE_SHIFT) 44 #define PAGES_FOR_IO ((4096 * 1024) >> PAGE_SHIFT)
45 45
46 /* 46 /*
47 * Keep 1 MB of memory free so that device drivers can allocate some pages in 47 * Keep 1 MB of memory free so that device drivers can allocate some pages in
48 * their .suspend() routines without breaking the suspend to disk. 48 * their .suspend() routines without breaking the suspend to disk.
49 */ 49 */
50 #define SPARE_PAGES ((1024 * 1024) >> PAGE_SHIFT) 50 #define SPARE_PAGES ((1024 * 1024) >> PAGE_SHIFT)
51 51
52 /* kernel/power/hibernate.c */ 52 /* kernel/power/hibernate.c */
53 extern bool freezer_test_done; 53 extern bool freezer_test_done;
54 54
55 extern int hibernation_snapshot(int platform_mode); 55 extern int hibernation_snapshot(int platform_mode);
56 extern int hibernation_restore(int platform_mode); 56 extern int hibernation_restore(int platform_mode);
57 extern int hibernation_platform_enter(void); 57 extern int hibernation_platform_enter(void);
58 58
59 #else /* !CONFIG_HIBERNATION */ 59 #else /* !CONFIG_HIBERNATION */
60 60
61 static inline void hibernate_reserved_size_init(void) {} 61 static inline void hibernate_reserved_size_init(void) {}
62 static inline void hibernate_image_size_init(void) {} 62 static inline void hibernate_image_size_init(void) {}
63 #endif /* !CONFIG_HIBERNATION */ 63 #endif /* !CONFIG_HIBERNATION */
64 64
65 extern int pfn_is_nosave(unsigned long); 65 extern int pfn_is_nosave(unsigned long);
66 66
67 #define power_attr(_name) \ 67 #define power_attr(_name) \
68 static struct kobj_attribute _name##_attr = { \ 68 static struct kobj_attribute _name##_attr = { \
69 .attr = { \ 69 .attr = { \
70 .name = __stringify(_name), \ 70 .name = __stringify(_name), \
71 .mode = 0644, \ 71 .mode = 0644, \
72 }, \ 72 }, \
73 .show = _name##_show, \ 73 .show = _name##_show, \
74 .store = _name##_store, \ 74 .store = _name##_store, \
75 } 75 }
76 76
77 /* Preferred image size in bytes (default 500 MB) */ 77 /* Preferred image size in bytes (default 500 MB) */
78 extern unsigned long image_size; 78 extern unsigned long image_size;
79 /* Size of memory reserved for drivers (default SPARE_PAGES x PAGE_SIZE) */ 79 /* Size of memory reserved for drivers (default SPARE_PAGES x PAGE_SIZE) */
80 extern unsigned long reserved_size; 80 extern unsigned long reserved_size;
81 extern int in_suspend; 81 extern int in_suspend;
82 extern dev_t swsusp_resume_device; 82 extern dev_t swsusp_resume_device;
83 extern sector_t swsusp_resume_block; 83 extern sector_t swsusp_resume_block;
84 84
85 extern asmlinkage int swsusp_arch_suspend(void); 85 extern asmlinkage int swsusp_arch_suspend(void);
86 extern asmlinkage int swsusp_arch_resume(void); 86 extern asmlinkage int swsusp_arch_resume(void);
87 87
88 extern int create_basic_memory_bitmaps(void); 88 extern int create_basic_memory_bitmaps(void);
89 extern void free_basic_memory_bitmaps(void); 89 extern void free_basic_memory_bitmaps(void);
90 extern int hibernate_preallocate_memory(void); 90 extern int hibernate_preallocate_memory(void);
91 91
92 /** 92 /**
93 * Auxiliary structure used for reading the snapshot image data and 93 * Auxiliary structure used for reading the snapshot image data and
94 * metadata from and writing them to the list of page backup entries 94 * metadata from and writing them to the list of page backup entries
95 * (PBEs) which is the main data structure of swsusp. 95 * (PBEs) which is the main data structure of swsusp.
96 * 96 *
97 * Using struct snapshot_handle we can transfer the image, including its 97 * Using struct snapshot_handle we can transfer the image, including its
98 * metadata, as a continuous sequence of bytes with the help of 98 * metadata, as a continuous sequence of bytes with the help of
99 * snapshot_read_next() and snapshot_write_next(). 99 * snapshot_read_next() and snapshot_write_next().
100 * 100 *
101 * The code that writes the image to a storage or transfers it to 101 * The code that writes the image to a storage or transfers it to
102 * the user land is required to use snapshot_read_next() for this 102 * the user land is required to use snapshot_read_next() for this
103 * purpose and it should not make any assumptions regarding the internal 103 * purpose and it should not make any assumptions regarding the internal
104 * structure of the image. Similarly, the code that reads the image from 104 * structure of the image. Similarly, the code that reads the image from
105 * a storage or transfers it from the user land is required to use 105 * a storage or transfers it from the user land is required to use
106 * snapshot_write_next(). 106 * snapshot_write_next().
107 * 107 *
108 * This may allow us to change the internal structure of the image 108 * This may allow us to change the internal structure of the image
109 * in the future with considerably less effort. 109 * in the future with considerably less effort.
110 */ 110 */
111 111
112 struct snapshot_handle { 112 struct snapshot_handle {
113 unsigned int cur; /* number of the block of PAGE_SIZE bytes the 113 unsigned int cur; /* number of the block of PAGE_SIZE bytes the
114 * next operation will refer to (ie. current) 114 * next operation will refer to (ie. current)
115 */ 115 */
116 void *buffer; /* address of the block to read from 116 void *buffer; /* address of the block to read from
117 * or write to 117 * or write to
118 */ 118 */
119 int sync_read; /* Set to one to notify the caller of 119 int sync_read; /* Set to one to notify the caller of
120 * snapshot_write_next() that it may 120 * snapshot_write_next() that it may
121 * need to call wait_on_bio_chain() 121 * need to call wait_on_bio_chain()
122 */ 122 */
123 }; 123 };
124 124
125 /* This macro returns the address from/to which the caller of 125 /* This macro returns the address from/to which the caller of
126 * snapshot_read_next()/snapshot_write_next() is allowed to 126 * snapshot_read_next()/snapshot_write_next() is allowed to
127 * read/write data after the function returns 127 * read/write data after the function returns
128 */ 128 */
129 #define data_of(handle) ((handle).buffer) 129 #define data_of(handle) ((handle).buffer)
130 130
131 extern unsigned int snapshot_additional_pages(struct zone *zone); 131 extern unsigned int snapshot_additional_pages(struct zone *zone);
132 extern unsigned long snapshot_get_image_size(void); 132 extern unsigned long snapshot_get_image_size(void);
133 extern int snapshot_read_next(struct snapshot_handle *handle); 133 extern int snapshot_read_next(struct snapshot_handle *handle);
134 extern int snapshot_write_next(struct snapshot_handle *handle); 134 extern int snapshot_write_next(struct snapshot_handle *handle);
135 extern void snapshot_write_finalize(struct snapshot_handle *handle); 135 extern void snapshot_write_finalize(struct snapshot_handle *handle);
136 extern int snapshot_image_loaded(struct snapshot_handle *handle); 136 extern int snapshot_image_loaded(struct snapshot_handle *handle);
137 137
138 /* If unset, the snapshot device cannot be open. */ 138 /* If unset, the snapshot device cannot be open. */
139 extern atomic_t snapshot_device_available; 139 extern atomic_t snapshot_device_available;
140 140
141 extern sector_t alloc_swapdev_block(int swap); 141 extern sector_t alloc_swapdev_block(int swap);
142 extern void free_all_swap_pages(int swap); 142 extern void free_all_swap_pages(int swap);
143 extern int swsusp_swap_in_use(void); 143 extern int swsusp_swap_in_use(void);
144 144
145 /* 145 /*
146 * Flags that can be passed from the hibernatig hernel to the "boot" kernel in 146 * Flags that can be passed from the hibernatig hernel to the "boot" kernel in
147 * the image header. 147 * the image header.
148 */ 148 */
149 #define SF_PLATFORM_MODE 1 149 #define SF_PLATFORM_MODE 1
150 #define SF_NOCOMPRESS_MODE 2 150 #define SF_NOCOMPRESS_MODE 2
151 #define SF_CRC32_MODE 4 151 #define SF_CRC32_MODE 4
152 152
153 /* kernel/power/hibernate.c */ 153 /* kernel/power/hibernate.c */
154 extern int swsusp_check(void); 154 extern int swsusp_check(void);
155 extern void swsusp_free(void); 155 extern void swsusp_free(void);
156 extern int swsusp_read(unsigned int *flags_p); 156 extern int swsusp_read(unsigned int *flags_p);
157 extern int swsusp_write(unsigned int flags); 157 extern int swsusp_write(unsigned int flags);
158 extern void swsusp_close(fmode_t); 158 extern void swsusp_close(fmode_t);
159 #ifdef CONFIG_SUSPEND 159 #ifdef CONFIG_SUSPEND
160 extern int swsusp_unmark(void); 160 extern int swsusp_unmark(void);
161 #endif 161 #endif
162 162
163 /* kernel/power/block_io.c */ 163 /* kernel/power/block_io.c */
164 extern struct block_device *hib_resume_bdev; 164 extern struct block_device *hib_resume_bdev;
165 165
166 extern int hib_bio_read_page(pgoff_t page_off, void *addr, 166 extern int hib_bio_read_page(pgoff_t page_off, void *addr,
167 struct bio **bio_chain); 167 struct bio **bio_chain);
168 extern int hib_bio_write_page(pgoff_t page_off, void *addr, 168 extern int hib_bio_write_page(pgoff_t page_off, void *addr,
169 struct bio **bio_chain); 169 struct bio **bio_chain);
170 extern int hib_wait_on_bio_chain(struct bio **bio_chain); 170 extern int hib_wait_on_bio_chain(struct bio **bio_chain);
171 171
172 struct timeval; 172 struct timeval;
173 /* kernel/power/swsusp.c */ 173 /* kernel/power/swsusp.c */
174 extern void swsusp_show_speed(struct timeval *, struct timeval *, 174 extern void swsusp_show_speed(struct timeval *, struct timeval *,
175 unsigned int, char *); 175 unsigned int, char *);
176 176
177 #ifdef CONFIG_SUSPEND 177 #ifdef CONFIG_SUSPEND
178 struct pm_sleep_state {
179 const char *label;
180 suspend_state_t state;
181 };
182
178 /* kernel/power/suspend.c */ 183 /* kernel/power/suspend.c */
179 extern const char *const pm_states[]; 184 extern struct pm_sleep_state pm_states[];
180 185
181 extern bool valid_state(suspend_state_t state); 186 extern bool valid_state(suspend_state_t state);
182 extern int suspend_devices_and_enter(suspend_state_t state); 187 extern int suspend_devices_and_enter(suspend_state_t state);
183 #else /* !CONFIG_SUSPEND */ 188 #else /* !CONFIG_SUSPEND */
184 static inline int suspend_devices_and_enter(suspend_state_t state) 189 static inline int suspend_devices_and_enter(suspend_state_t state)
185 { 190 {
186 return -ENOSYS; 191 return -ENOSYS;
187 } 192 }
188 static inline bool valid_state(suspend_state_t state) { return false; } 193 static inline bool valid_state(suspend_state_t state) { return false; }
189 #endif /* !CONFIG_SUSPEND */ 194 #endif /* !CONFIG_SUSPEND */
190 195
191 #ifdef CONFIG_PM_TEST_SUSPEND 196 #ifdef CONFIG_PM_TEST_SUSPEND
192 /* kernel/power/suspend_test.c */ 197 /* kernel/power/suspend_test.c */
193 extern void suspend_test_start(void); 198 extern void suspend_test_start(void);
194 extern void suspend_test_finish(const char *label); 199 extern void suspend_test_finish(const char *label);
195 #else /* !CONFIG_PM_TEST_SUSPEND */ 200 #else /* !CONFIG_PM_TEST_SUSPEND */
196 static inline void suspend_test_start(void) {} 201 static inline void suspend_test_start(void) {}
197 static inline void suspend_test_finish(const char *label) {} 202 static inline void suspend_test_finish(const char *label) {}
198 #endif /* !CONFIG_PM_TEST_SUSPEND */ 203 #endif /* !CONFIG_PM_TEST_SUSPEND */
199 204
200 #ifdef CONFIG_PM_SLEEP 205 #ifdef CONFIG_PM_SLEEP
201 /* kernel/power/main.c */ 206 /* kernel/power/main.c */
202 extern int pm_notifier_call_chain(unsigned long val); 207 extern int pm_notifier_call_chain(unsigned long val);
203 #endif 208 #endif
204 209
205 #ifdef CONFIG_HIGHMEM 210 #ifdef CONFIG_HIGHMEM
206 int restore_highmem(void); 211 int restore_highmem(void);
207 #else 212 #else
208 static inline unsigned int count_highmem_pages(void) { return 0; } 213 static inline unsigned int count_highmem_pages(void) { return 0; }
209 static inline int restore_highmem(void) { return 0; } 214 static inline int restore_highmem(void) { return 0; }
210 #endif 215 #endif
211 216
212 /* 217 /*
213 * Suspend test levels 218 * Suspend test levels
214 */ 219 */
215 enum { 220 enum {
216 /* keep first */ 221 /* keep first */
217 TEST_NONE, 222 TEST_NONE,
218 TEST_CORE, 223 TEST_CORE,
219 TEST_CPUS, 224 TEST_CPUS,
220 TEST_PLATFORM, 225 TEST_PLATFORM,
221 TEST_DEVICES, 226 TEST_DEVICES,
222 TEST_FREEZER, 227 TEST_FREEZER,
223 /* keep last */ 228 /* keep last */
224 __TEST_AFTER_LAST 229 __TEST_AFTER_LAST
225 }; 230 };
226 231
227 #define TEST_FIRST TEST_NONE 232 #define TEST_FIRST TEST_NONE
228 #define TEST_MAX (__TEST_AFTER_LAST - 1) 233 #define TEST_MAX (__TEST_AFTER_LAST - 1)
229 234
230 extern int pm_test_level; 235 extern int pm_test_level;
231 236
232 #ifdef CONFIG_SUSPEND_FREEZER 237 #ifdef CONFIG_SUSPEND_FREEZER
233 static inline int suspend_freeze_processes(void) 238 static inline int suspend_freeze_processes(void)
234 { 239 {
235 int error; 240 int error;
236 241
237 error = freeze_processes(); 242 error = freeze_processes();
238 /* 243 /*
239 * freeze_processes() automatically thaws every task if freezing 244 * freeze_processes() automatically thaws every task if freezing
240 * fails. So we need not do anything extra upon error. 245 * fails. So we need not do anything extra upon error.
241 */ 246 */
242 if (error) 247 if (error)
243 return error; 248 return error;
244 249
245 error = freeze_kernel_threads(); 250 error = freeze_kernel_threads();
246 /* 251 /*
247 * freeze_kernel_threads() thaws only kernel threads upon freezing 252 * freeze_kernel_threads() thaws only kernel threads upon freezing
248 * failure. So we have to thaw the userspace tasks ourselves. 253 * failure. So we have to thaw the userspace tasks ourselves.
249 */ 254 */
250 if (error) 255 if (error)
251 thaw_processes(); 256 thaw_processes();
252 257
253 return error; 258 return error;
254 } 259 }
255 260
256 static inline void suspend_thaw_processes(void) 261 static inline void suspend_thaw_processes(void)
257 { 262 {
258 thaw_processes(); 263 thaw_processes();
259 } 264 }
260 #else 265 #else
261 static inline int suspend_freeze_processes(void) 266 static inline int suspend_freeze_processes(void)
262 { 267 {
263 return 0; 268 return 0;
264 } 269 }
265 270
266 static inline void suspend_thaw_processes(void) 271 static inline void suspend_thaw_processes(void)
267 { 272 {
268 } 273 }
269 #endif 274 #endif
270 275
271 #ifdef CONFIG_PM_AUTOSLEEP 276 #ifdef CONFIG_PM_AUTOSLEEP
272 277
273 /* kernel/power/autosleep.c */ 278 /* kernel/power/autosleep.c */
274 extern int pm_autosleep_init(void); 279 extern int pm_autosleep_init(void);
275 extern int pm_autosleep_lock(void); 280 extern int pm_autosleep_lock(void);
276 extern void pm_autosleep_unlock(void); 281 extern void pm_autosleep_unlock(void);
277 extern suspend_state_t pm_autosleep_state(void); 282 extern suspend_state_t pm_autosleep_state(void);
278 extern int pm_autosleep_set_state(suspend_state_t state); 283 extern int pm_autosleep_set_state(suspend_state_t state);
279 284
280 #else /* !CONFIG_PM_AUTOSLEEP */ 285 #else /* !CONFIG_PM_AUTOSLEEP */
281 286
282 static inline int pm_autosleep_init(void) { return 0; } 287 static inline int pm_autosleep_init(void) { return 0; }
283 static inline int pm_autosleep_lock(void) { return 0; } 288 static inline int pm_autosleep_lock(void) { return 0; }
284 static inline void pm_autosleep_unlock(void) {} 289 static inline void pm_autosleep_unlock(void) {}
285 static inline suspend_state_t pm_autosleep_state(void) { return PM_SUSPEND_ON; } 290 static inline suspend_state_t pm_autosleep_state(void) { return PM_SUSPEND_ON; }
286 291
287 #endif /* !CONFIG_PM_AUTOSLEEP */ 292 #endif /* !CONFIG_PM_AUTOSLEEP */
288 293
289 #ifdef CONFIG_PM_WAKELOCKS 294 #ifdef CONFIG_PM_WAKELOCKS
290 295
291 /* kernel/power/wakelock.c */ 296 /* kernel/power/wakelock.c */
292 extern ssize_t pm_show_wakelocks(char *buf, bool show_active); 297 extern ssize_t pm_show_wakelocks(char *buf, bool show_active);
293 extern int pm_wake_lock(const char *buf); 298 extern int pm_wake_lock(const char *buf);
294 extern int pm_wake_unlock(const char *buf); 299 extern int pm_wake_unlock(const char *buf);
295 300
296 #endif /* !CONFIG_PM_WAKELOCKS */ 301 #endif /* !CONFIG_PM_WAKELOCKS */
297 302
kernel/power/suspend.c
1 /* 1 /*
2 * kernel/power/suspend.c - Suspend to RAM and standby functionality. 2 * kernel/power/suspend.c - Suspend to RAM and standby functionality.
3 * 3 *
4 * Copyright (c) 2003 Patrick Mochel 4 * Copyright (c) 2003 Patrick Mochel
5 * Copyright (c) 2003 Open Source Development Lab 5 * Copyright (c) 2003 Open Source Development Lab
6 * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc. 6 * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
7 * 7 *
8 * This file is released under the GPLv2. 8 * This file is released under the GPLv2.
9 */ 9 */
10 10
11 #include <linux/string.h> 11 #include <linux/string.h>
12 #include <linux/delay.h> 12 #include <linux/delay.h>
13 #include <linux/errno.h> 13 #include <linux/errno.h>
14 #include <linux/init.h> 14 #include <linux/init.h>
15 #include <linux/console.h> 15 #include <linux/console.h>
16 #include <linux/cpu.h> 16 #include <linux/cpu.h>
17 #include <linux/syscalls.h> 17 #include <linux/syscalls.h>
18 #include <linux/gfp.h> 18 #include <linux/gfp.h>
19 #include <linux/io.h> 19 #include <linux/io.h>
20 #include <linux/kernel.h> 20 #include <linux/kernel.h>
21 #include <linux/list.h> 21 #include <linux/list.h>
22 #include <linux/mm.h> 22 #include <linux/mm.h>
23 #include <linux/slab.h> 23 #include <linux/slab.h>
24 #include <linux/export.h> 24 #include <linux/export.h>
25 #include <linux/suspend.h> 25 #include <linux/suspend.h>
26 #include <linux/syscore_ops.h> 26 #include <linux/syscore_ops.h>
27 #include <linux/ftrace.h> 27 #include <linux/ftrace.h>
28 #include <trace/events/power.h> 28 #include <trace/events/power.h>
29 29
30 #include "power.h" 30 #include "power.h"
31 31
32 const char *const pm_states[PM_SUSPEND_MAX] = { 32 struct pm_sleep_state pm_states[PM_SUSPEND_MAX] = {
33 [PM_SUSPEND_FREEZE] = "freeze", 33 [PM_SUSPEND_FREEZE] = { "freeze", PM_SUSPEND_FREEZE },
34 [PM_SUSPEND_STANDBY] = "standby", 34 [PM_SUSPEND_STANDBY] = { "standby", PM_SUSPEND_STANDBY },
35 [PM_SUSPEND_MEM] = "mem", 35 [PM_SUSPEND_MEM] = { "mem", PM_SUSPEND_MEM },
36 }; 36 };
37 37
38 static const struct platform_suspend_ops *suspend_ops; 38 static const struct platform_suspend_ops *suspend_ops;
39 39
40 static bool need_suspend_ops(suspend_state_t state) 40 static bool need_suspend_ops(suspend_state_t state)
41 { 41 {
42 return !!(state > PM_SUSPEND_FREEZE); 42 return !!(state > PM_SUSPEND_FREEZE);
43 } 43 }
44 44
45 static DECLARE_WAIT_QUEUE_HEAD(suspend_freeze_wait_head); 45 static DECLARE_WAIT_QUEUE_HEAD(suspend_freeze_wait_head);
46 static bool suspend_freeze_wake; 46 static bool suspend_freeze_wake;
47 47
48 static void freeze_begin(void) 48 static void freeze_begin(void)
49 { 49 {
50 suspend_freeze_wake = false; 50 suspend_freeze_wake = false;
51 } 51 }
52 52
53 static void freeze_enter(void) 53 static void freeze_enter(void)
54 { 54 {
55 wait_event(suspend_freeze_wait_head, suspend_freeze_wake); 55 wait_event(suspend_freeze_wait_head, suspend_freeze_wake);
56 } 56 }
57 57
58 void freeze_wake(void) 58 void freeze_wake(void)
59 { 59 {
60 suspend_freeze_wake = true; 60 suspend_freeze_wake = true;
61 wake_up(&suspend_freeze_wait_head); 61 wake_up(&suspend_freeze_wait_head);
62 } 62 }
63 EXPORT_SYMBOL_GPL(freeze_wake); 63 EXPORT_SYMBOL_GPL(freeze_wake);
64 64
65 /** 65 /**
66 * suspend_set_ops - Set the global suspend method table. 66 * suspend_set_ops - Set the global suspend method table.
67 * @ops: Suspend operations to use. 67 * @ops: Suspend operations to use.
68 */ 68 */
69 void suspend_set_ops(const struct platform_suspend_ops *ops) 69 void suspend_set_ops(const struct platform_suspend_ops *ops)
70 { 70 {
71 lock_system_sleep(); 71 lock_system_sleep();
72 suspend_ops = ops; 72 suspend_ops = ops;
73 unlock_system_sleep(); 73 unlock_system_sleep();
74 } 74 }
75 EXPORT_SYMBOL_GPL(suspend_set_ops); 75 EXPORT_SYMBOL_GPL(suspend_set_ops);
76 76
77 bool valid_state(suspend_state_t state) 77 bool valid_state(suspend_state_t state)
78 { 78 {
79 if (state == PM_SUSPEND_FREEZE) { 79 if (state == PM_SUSPEND_FREEZE) {
80 #ifdef CONFIG_PM_DEBUG 80 #ifdef CONFIG_PM_DEBUG
81 if (pm_test_level != TEST_NONE && 81 if (pm_test_level != TEST_NONE &&
82 pm_test_level != TEST_FREEZER && 82 pm_test_level != TEST_FREEZER &&
83 pm_test_level != TEST_DEVICES && 83 pm_test_level != TEST_DEVICES &&
84 pm_test_level != TEST_PLATFORM) { 84 pm_test_level != TEST_PLATFORM) {
85 printk(KERN_WARNING "Unsupported pm_test mode for " 85 printk(KERN_WARNING "Unsupported pm_test mode for "
86 "freeze state, please choose " 86 "freeze state, please choose "
87 "none/freezer/devices/platform.\n"); 87 "none/freezer/devices/platform.\n");
88 return false; 88 return false;
89 } 89 }
90 #endif 90 #endif
91 return true; 91 return true;
92 } 92 }
93 /* 93 /*
94 * PM_SUSPEND_STANDBY and PM_SUSPEND_MEMORY states need lowlevel 94 * PM_SUSPEND_STANDBY and PM_SUSPEND_MEMORY states need lowlevel
95 * support and need to be valid to the lowlevel 95 * support and need to be valid to the lowlevel
96 * implementation, no valid callback implies that none are valid. 96 * implementation, no valid callback implies that none are valid.
97 */ 97 */
98 return suspend_ops && suspend_ops->valid && suspend_ops->valid(state); 98 return suspend_ops && suspend_ops->valid && suspend_ops->valid(state);
99 } 99 }
100 100
101 /** 101 /**
102 * suspend_valid_only_mem - Generic memory-only valid callback. 102 * suspend_valid_only_mem - Generic memory-only valid callback.
103 * 103 *
104 * Platform drivers that implement mem suspend only and only need to check for 104 * Platform drivers that implement mem suspend only and only need to check for
105 * that in their .valid() callback can use this instead of rolling their own 105 * that in their .valid() callback can use this instead of rolling their own
106 * .valid() callback. 106 * .valid() callback.
107 */ 107 */
108 int suspend_valid_only_mem(suspend_state_t state) 108 int suspend_valid_only_mem(suspend_state_t state)
109 { 109 {
110 return state == PM_SUSPEND_MEM; 110 return state == PM_SUSPEND_MEM;
111 } 111 }
112 EXPORT_SYMBOL_GPL(suspend_valid_only_mem); 112 EXPORT_SYMBOL_GPL(suspend_valid_only_mem);
113 113
114 static int suspend_test(int level) 114 static int suspend_test(int level)
115 { 115 {
116 #ifdef CONFIG_PM_DEBUG 116 #ifdef CONFIG_PM_DEBUG
117 if (pm_test_level == level) { 117 if (pm_test_level == level) {
118 printk(KERN_INFO "suspend debug: Waiting for 5 seconds.\n"); 118 printk(KERN_INFO "suspend debug: Waiting for 5 seconds.\n");
119 mdelay(5000); 119 mdelay(5000);
120 return 1; 120 return 1;
121 } 121 }
122 #endif /* !CONFIG_PM_DEBUG */ 122 #endif /* !CONFIG_PM_DEBUG */
123 return 0; 123 return 0;
124 } 124 }
125 125
126 /** 126 /**
127 * suspend_prepare - Prepare for entering system sleep state. 127 * suspend_prepare - Prepare for entering system sleep state.
128 * 128 *
129 * Common code run for every system sleep state that can be entered (except for 129 * Common code run for every system sleep state that can be entered (except for
130 * hibernation). Run suspend notifiers, allocate the "suspend" console and 130 * hibernation). Run suspend notifiers, allocate the "suspend" console and
131 * freeze processes. 131 * freeze processes.
132 */ 132 */
133 static int suspend_prepare(suspend_state_t state) 133 static int suspend_prepare(suspend_state_t state)
134 { 134 {
135 int error; 135 int error;
136 136
137 if (need_suspend_ops(state) && (!suspend_ops || !suspend_ops->enter)) 137 if (need_suspend_ops(state) && (!suspend_ops || !suspend_ops->enter))
138 return -EPERM; 138 return -EPERM;
139 139
140 pm_prepare_console(); 140 pm_prepare_console();
141 141
142 error = pm_notifier_call_chain(PM_SUSPEND_PREPARE); 142 error = pm_notifier_call_chain(PM_SUSPEND_PREPARE);
143 if (error) 143 if (error)
144 goto Finish; 144 goto Finish;
145 145
146 error = suspend_freeze_processes(); 146 error = suspend_freeze_processes();
147 if (!error) 147 if (!error)
148 return 0; 148 return 0;
149 149
150 suspend_stats.failed_freeze++; 150 suspend_stats.failed_freeze++;
151 dpm_save_failed_step(SUSPEND_FREEZE); 151 dpm_save_failed_step(SUSPEND_FREEZE);
152 Finish: 152 Finish:
153 pm_notifier_call_chain(PM_POST_SUSPEND); 153 pm_notifier_call_chain(PM_POST_SUSPEND);
154 pm_restore_console(); 154 pm_restore_console();
155 return error; 155 return error;
156 } 156 }
157 157
158 /* default implementation */ 158 /* default implementation */
159 void __attribute__ ((weak)) arch_suspend_disable_irqs(void) 159 void __attribute__ ((weak)) arch_suspend_disable_irqs(void)
160 { 160 {
161 local_irq_disable(); 161 local_irq_disable();
162 } 162 }
163 163
164 /* default implementation */ 164 /* default implementation */
165 void __attribute__ ((weak)) arch_suspend_enable_irqs(void) 165 void __attribute__ ((weak)) arch_suspend_enable_irqs(void)
166 { 166 {
167 local_irq_enable(); 167 local_irq_enable();
168 } 168 }
169 169
170 /** 170 /**
171 * suspend_enter - Make the system enter the given sleep state. 171 * suspend_enter - Make the system enter the given sleep state.
172 * @state: System sleep state to enter. 172 * @state: System sleep state to enter.
173 * @wakeup: Returns information that the sleep state should not be re-entered. 173 * @wakeup: Returns information that the sleep state should not be re-entered.
174 * 174 *
175 * This function should be called after devices have been suspended. 175 * This function should be called after devices have been suspended.
176 */ 176 */
177 static int suspend_enter(suspend_state_t state, bool *wakeup) 177 static int suspend_enter(suspend_state_t state, bool *wakeup)
178 { 178 {
179 int error; 179 int error;
180 180
181 if (need_suspend_ops(state) && suspend_ops->prepare) { 181 if (need_suspend_ops(state) && suspend_ops->prepare) {
182 error = suspend_ops->prepare(); 182 error = suspend_ops->prepare();
183 if (error) 183 if (error)
184 goto Platform_finish; 184 goto Platform_finish;
185 } 185 }
186 186
187 error = dpm_suspend_end(PMSG_SUSPEND); 187 error = dpm_suspend_end(PMSG_SUSPEND);
188 if (error) { 188 if (error) {
189 printk(KERN_ERR "PM: Some devices failed to power down\n"); 189 printk(KERN_ERR "PM: Some devices failed to power down\n");
190 goto Platform_finish; 190 goto Platform_finish;
191 } 191 }
192 192
193 if (need_suspend_ops(state) && suspend_ops->prepare_late) { 193 if (need_suspend_ops(state) && suspend_ops->prepare_late) {
194 error = suspend_ops->prepare_late(); 194 error = suspend_ops->prepare_late();
195 if (error) 195 if (error)
196 goto Platform_wake; 196 goto Platform_wake;
197 } 197 }
198 198
199 if (suspend_test(TEST_PLATFORM)) 199 if (suspend_test(TEST_PLATFORM))
200 goto Platform_wake; 200 goto Platform_wake;
201 201
202 /* 202 /*
203 * PM_SUSPEND_FREEZE equals 203 * PM_SUSPEND_FREEZE equals
204 * frozen processes + suspended devices + idle processors. 204 * frozen processes + suspended devices + idle processors.
205 * Thus we should invoke freeze_enter() soon after 205 * Thus we should invoke freeze_enter() soon after
206 * all the devices are suspended. 206 * all the devices are suspended.
207 */ 207 */
208 if (state == PM_SUSPEND_FREEZE) { 208 if (state == PM_SUSPEND_FREEZE) {
209 freeze_enter(); 209 freeze_enter();
210 goto Platform_wake; 210 goto Platform_wake;
211 } 211 }
212 212
213 ftrace_stop(); 213 ftrace_stop();
214 error = disable_nonboot_cpus(); 214 error = disable_nonboot_cpus();
215 if (error || suspend_test(TEST_CPUS)) 215 if (error || suspend_test(TEST_CPUS))
216 goto Enable_cpus; 216 goto Enable_cpus;
217 217
218 arch_suspend_disable_irqs(); 218 arch_suspend_disable_irqs();
219 BUG_ON(!irqs_disabled()); 219 BUG_ON(!irqs_disabled());
220 220
221 error = syscore_suspend(); 221 error = syscore_suspend();
222 if (!error) { 222 if (!error) {
223 *wakeup = pm_wakeup_pending(); 223 *wakeup = pm_wakeup_pending();
224 if (!(suspend_test(TEST_CORE) || *wakeup)) { 224 if (!(suspend_test(TEST_CORE) || *wakeup)) {
225 error = suspend_ops->enter(state); 225 error = suspend_ops->enter(state);
226 events_check_enabled = false; 226 events_check_enabled = false;
227 } 227 }
228 syscore_resume(); 228 syscore_resume();
229 } 229 }
230 230
231 arch_suspend_enable_irqs(); 231 arch_suspend_enable_irqs();
232 BUG_ON(irqs_disabled()); 232 BUG_ON(irqs_disabled());
233 233
234 Enable_cpus: 234 Enable_cpus:
235 enable_nonboot_cpus(); 235 enable_nonboot_cpus();
236 ftrace_start(); 236 ftrace_start();
237 237
238 Platform_wake: 238 Platform_wake:
239 if (need_suspend_ops(state) && suspend_ops->wake) 239 if (need_suspend_ops(state) && suspend_ops->wake)
240 suspend_ops->wake(); 240 suspend_ops->wake();
241 241
242 dpm_resume_start(PMSG_RESUME); 242 dpm_resume_start(PMSG_RESUME);
243 243
244 Platform_finish: 244 Platform_finish:
245 if (need_suspend_ops(state) && suspend_ops->finish) 245 if (need_suspend_ops(state) && suspend_ops->finish)
246 suspend_ops->finish(); 246 suspend_ops->finish();
247 247
248 return error; 248 return error;
249 } 249 }
250 250
251 /** 251 /**
252 * suspend_devices_and_enter - Suspend devices and enter system sleep state. 252 * suspend_devices_and_enter - Suspend devices and enter system sleep state.
253 * @state: System sleep state to enter. 253 * @state: System sleep state to enter.
254 */ 254 */
255 int suspend_devices_and_enter(suspend_state_t state) 255 int suspend_devices_and_enter(suspend_state_t state)
256 { 256 {
257 int error; 257 int error;
258 bool wakeup = false; 258 bool wakeup = false;
259 259
260 if (need_suspend_ops(state) && !suspend_ops) 260 if (need_suspend_ops(state) && !suspend_ops)
261 return -ENOSYS; 261 return -ENOSYS;
262 262
263 trace_machine_suspend(state); 263 trace_machine_suspend(state);
264 if (need_suspend_ops(state) && suspend_ops->begin) { 264 if (need_suspend_ops(state) && suspend_ops->begin) {
265 error = suspend_ops->begin(state); 265 error = suspend_ops->begin(state);
266 if (error) 266 if (error)
267 goto Close; 267 goto Close;
268 } 268 }
269 suspend_console(); 269 suspend_console();
270 suspend_test_start(); 270 suspend_test_start();
271 error = dpm_suspend_start(PMSG_SUSPEND); 271 error = dpm_suspend_start(PMSG_SUSPEND);
272 if (error) { 272 if (error) {
273 pr_err("PM: Some devices failed to suspend, or early wake event detected\n"); 273 pr_err("PM: Some devices failed to suspend, or early wake event detected\n");
274 goto Recover_platform; 274 goto Recover_platform;
275 } 275 }
276 suspend_test_finish("suspend devices"); 276 suspend_test_finish("suspend devices");
277 if (suspend_test(TEST_DEVICES)) 277 if (suspend_test(TEST_DEVICES))
278 goto Recover_platform; 278 goto Recover_platform;
279 279
280 do { 280 do {
281 error = suspend_enter(state, &wakeup); 281 error = suspend_enter(state, &wakeup);
282 } while (!error && !wakeup && need_suspend_ops(state) 282 } while (!error && !wakeup && need_suspend_ops(state)
283 && suspend_ops->suspend_again && suspend_ops->suspend_again()); 283 && suspend_ops->suspend_again && suspend_ops->suspend_again());
284 284
285 Resume_devices: 285 Resume_devices:
286 suspend_test_start(); 286 suspend_test_start();
287 dpm_resume_end(PMSG_RESUME); 287 dpm_resume_end(PMSG_RESUME);
288 suspend_test_finish("resume devices"); 288 suspend_test_finish("resume devices");
289 resume_console(); 289 resume_console();
290 Close: 290 Close:
291 if (need_suspend_ops(state) && suspend_ops->end) 291 if (need_suspend_ops(state) && suspend_ops->end)
292 suspend_ops->end(); 292 suspend_ops->end();
293 trace_machine_suspend(PWR_EVENT_EXIT); 293 trace_machine_suspend(PWR_EVENT_EXIT);
294 return error; 294 return error;
295 295
296 Recover_platform: 296 Recover_platform:
297 if (need_suspend_ops(state) && suspend_ops->recover) 297 if (need_suspend_ops(state) && suspend_ops->recover)
298 suspend_ops->recover(); 298 suspend_ops->recover();
299 goto Resume_devices; 299 goto Resume_devices;
300 } 300 }
301 301
302 /** 302 /**
303 * suspend_finish - Clean up before finishing the suspend sequence. 303 * suspend_finish - Clean up before finishing the suspend sequence.
304 * 304 *
305 * Call platform code to clean up, restart processes, and free the console that 305 * Call platform code to clean up, restart processes, and free the console that
306 * we've allocated. This routine is not called for hibernation. 306 * we've allocated. This routine is not called for hibernation.
307 */ 307 */
308 static void suspend_finish(void) 308 static void suspend_finish(void)
309 { 309 {
310 suspend_thaw_processes(); 310 suspend_thaw_processes();
311 pm_notifier_call_chain(PM_POST_SUSPEND); 311 pm_notifier_call_chain(PM_POST_SUSPEND);
312 pm_restore_console(); 312 pm_restore_console();
313 } 313 }
314 314
315 /** 315 /**
316 * enter_state - Do common work needed to enter system sleep state. 316 * enter_state - Do common work needed to enter system sleep state.
317 * @state: System sleep state to enter. 317 * @state: System sleep state to enter.
318 * 318 *
319 * Make sure that no one else is trying to put the system into a sleep state. 319 * Make sure that no one else is trying to put the system into a sleep state.
320 * Fail if that's not the case. Otherwise, prepare for system suspend, make the 320 * Fail if that's not the case. Otherwise, prepare for system suspend, make the
321 * system enter the given sleep state and clean up after wakeup. 321 * system enter the given sleep state and clean up after wakeup.
322 */ 322 */
323 static int enter_state(suspend_state_t state) 323 static int enter_state(suspend_state_t state)
324 { 324 {
325 int error; 325 int error;
326 326
327 if (!valid_state(state)) 327 if (!valid_state(state))
328 return -ENODEV; 328 return -ENODEV;
329 329
330 if (!mutex_trylock(&pm_mutex)) 330 if (!mutex_trylock(&pm_mutex))
331 return -EBUSY; 331 return -EBUSY;
332 332
333 if (state == PM_SUSPEND_FREEZE) 333 if (state == PM_SUSPEND_FREEZE)
334 freeze_begin(); 334 freeze_begin();
335 335
336 printk(KERN_INFO "PM: Syncing filesystems ... "); 336 printk(KERN_INFO "PM: Syncing filesystems ... ");
337 sys_sync(); 337 sys_sync();
338 printk("done.\n"); 338 printk("done.\n");
339 339
340 pr_debug("PM: Preparing system for %s sleep\n", pm_states[state]); 340 pr_debug("PM: Preparing system for %s sleep\n", pm_states[state].label);
341 error = suspend_prepare(state); 341 error = suspend_prepare(state);
342 if (error) 342 if (error)
343 goto Unlock; 343 goto Unlock;
344 344
345 if (suspend_test(TEST_FREEZER)) 345 if (suspend_test(TEST_FREEZER))
346 goto Finish; 346 goto Finish;
347 347
348 pr_debug("PM: Entering %s sleep\n", pm_states[state]); 348 pr_debug("PM: Entering %s sleep\n", pm_states[state].label);
349 pm_restrict_gfp_mask(); 349 pm_restrict_gfp_mask();
350 error = suspend_devices_and_enter(state); 350 error = suspend_devices_and_enter(state);
351 pm_restore_gfp_mask(); 351 pm_restore_gfp_mask();
352 352
353 Finish: 353 Finish:
354 pr_debug("PM: Finishing wakeup.\n"); 354 pr_debug("PM: Finishing wakeup.\n");
355 suspend_finish(); 355 suspend_finish();
356 Unlock: 356 Unlock:
357 mutex_unlock(&pm_mutex); 357 mutex_unlock(&pm_mutex);
358 return error; 358 return error;
359 } 359 }
360 360
361 /** 361 /**
362 * pm_suspend - Externally visible function for suspending the system. 362 * pm_suspend - Externally visible function for suspending the system.
363 * @state: System sleep state to enter. 363 * @state: System sleep state to enter.
364 * 364 *
365 * Check if the value of @state represents one of the supported states, 365 * Check if the value of @state represents one of the supported states,
366 * execute enter_state() and update system suspend statistics. 366 * execute enter_state() and update system suspend statistics.
367 */ 367 */
368 int pm_suspend(suspend_state_t state) 368 int pm_suspend(suspend_state_t state)
369 { 369 {
370 int error; 370 int error;
371 371
372 if (state <= PM_SUSPEND_ON || state >= PM_SUSPEND_MAX) 372 if (state <= PM_SUSPEND_ON || state >= PM_SUSPEND_MAX)
373 return -EINVAL; 373 return -EINVAL;
374 374
375 error = enter_state(state); 375 error = enter_state(state);
376 if (error) { 376 if (error) {
377 suspend_stats.fail++; 377 suspend_stats.fail++;
378 dpm_save_failed_errno(error); 378 dpm_save_failed_errno(error);
379 } else { 379 } else {
380 suspend_stats.success++; 380 suspend_stats.success++;
381 } 381 }
382 return error; 382 return error;
383 } 383 }
384 EXPORT_SYMBOL(pm_suspend); 384 EXPORT_SYMBOL(pm_suspend);
385 385
kernel/power/suspend_test.c
1 /* 1 /*
2 * kernel/power/suspend_test.c - Suspend to RAM and standby test facility. 2 * kernel/power/suspend_test.c - Suspend to RAM and standby test facility.
3 * 3 *
4 * Copyright (c) 2009 Pavel Machek <pavel@ucw.cz> 4 * Copyright (c) 2009 Pavel Machek <pavel@ucw.cz>
5 * 5 *
6 * This file is released under the GPLv2. 6 * This file is released under the GPLv2.
7 */ 7 */
8 8
9 #include <linux/init.h> 9 #include <linux/init.h>
10 #include <linux/rtc.h> 10 #include <linux/rtc.h>
11 11
12 #include "power.h" 12 #include "power.h"
13 13
14 /* 14 /*
15 * We test the system suspend code by setting an RTC wakealarm a short 15 * We test the system suspend code by setting an RTC wakealarm a short
16 * time in the future, then suspending. Suspending the devices won't 16 * time in the future, then suspending. Suspending the devices won't
17 * normally take long ... some systems only need a few milliseconds. 17 * normally take long ... some systems only need a few milliseconds.
18 * 18 *
19 * The time it takes is system-specific though, so when we test this 19 * The time it takes is system-specific though, so when we test this
20 * during system bootup we allow a LOT of time. 20 * during system bootup we allow a LOT of time.
21 */ 21 */
22 #define TEST_SUSPEND_SECONDS 10 22 #define TEST_SUSPEND_SECONDS 10
23 23
24 static unsigned long suspend_test_start_time; 24 static unsigned long suspend_test_start_time;
25 25
26 void suspend_test_start(void) 26 void suspend_test_start(void)
27 { 27 {
28 /* FIXME Use better timebase than "jiffies", ideally a clocksource. 28 /* FIXME Use better timebase than "jiffies", ideally a clocksource.
29 * What we want is a hardware counter that will work correctly even 29 * What we want is a hardware counter that will work correctly even
30 * during the irqs-are-off stages of the suspend/resume cycle... 30 * during the irqs-are-off stages of the suspend/resume cycle...
31 */ 31 */
32 suspend_test_start_time = jiffies; 32 suspend_test_start_time = jiffies;
33 } 33 }
34 34
35 void suspend_test_finish(const char *label) 35 void suspend_test_finish(const char *label)
36 { 36 {
37 long nj = jiffies - suspend_test_start_time; 37 long nj = jiffies - suspend_test_start_time;
38 unsigned msec; 38 unsigned msec;
39 39
40 msec = jiffies_to_msecs(abs(nj)); 40 msec = jiffies_to_msecs(abs(nj));
41 pr_info("PM: %s took %d.%03d seconds\n", label, 41 pr_info("PM: %s took %d.%03d seconds\n", label,
42 msec / 1000, msec % 1000); 42 msec / 1000, msec % 1000);
43 43
44 /* Warning on suspend means the RTC alarm period needs to be 44 /* Warning on suspend means the RTC alarm period needs to be
45 * larger -- the system was sooo slooowwww to suspend that the 45 * larger -- the system was sooo slooowwww to suspend that the
46 * alarm (should have) fired before the system went to sleep! 46 * alarm (should have) fired before the system went to sleep!
47 * 47 *
48 * Warning on either suspend or resume also means the system 48 * Warning on either suspend or resume also means the system
49 * has some performance issues. The stack dump of a WARN_ON 49 * has some performance issues. The stack dump of a WARN_ON
50 * is more likely to get the right attention than a printk... 50 * is more likely to get the right attention than a printk...
51 */ 51 */
52 WARN(msec > (TEST_SUSPEND_SECONDS * 1000), 52 WARN(msec > (TEST_SUSPEND_SECONDS * 1000),
53 "Component: %s, time: %u\n", label, msec); 53 "Component: %s, time: %u\n", label, msec);
54 } 54 }
55 55
56 /* 56 /*
57 * To test system suspend, we need a hands-off mechanism to resume the 57 * To test system suspend, we need a hands-off mechanism to resume the
58 * system. RTCs wake alarms are a common self-contained mechanism. 58 * system. RTCs wake alarms are a common self-contained mechanism.
59 */ 59 */
60 60
61 static void __init test_wakealarm(struct rtc_device *rtc, suspend_state_t state) 61 static void __init test_wakealarm(struct rtc_device *rtc, suspend_state_t state)
62 { 62 {
63 static char err_readtime[] __initdata = 63 static char err_readtime[] __initdata =
64 KERN_ERR "PM: can't read %s time, err %d\n"; 64 KERN_ERR "PM: can't read %s time, err %d\n";
65 static char err_wakealarm [] __initdata = 65 static char err_wakealarm [] __initdata =
66 KERN_ERR "PM: can't set %s wakealarm, err %d\n"; 66 KERN_ERR "PM: can't set %s wakealarm, err %d\n";
67 static char err_suspend[] __initdata = 67 static char err_suspend[] __initdata =
68 KERN_ERR "PM: suspend test failed, error %d\n"; 68 KERN_ERR "PM: suspend test failed, error %d\n";
69 static char info_test[] __initdata = 69 static char info_test[] __initdata =
70 KERN_INFO "PM: test RTC wakeup from '%s' suspend\n"; 70 KERN_INFO "PM: test RTC wakeup from '%s' suspend\n";
71 71
72 unsigned long now; 72 unsigned long now;
73 struct rtc_wkalrm alm; 73 struct rtc_wkalrm alm;
74 int status; 74 int status;
75 75
76 /* this may fail if the RTC hasn't been initialized */ 76 /* this may fail if the RTC hasn't been initialized */
77 status = rtc_read_time(rtc, &alm.time); 77 status = rtc_read_time(rtc, &alm.time);
78 if (status < 0) { 78 if (status < 0) {
79 printk(err_readtime, dev_name(&rtc->dev), status); 79 printk(err_readtime, dev_name(&rtc->dev), status);
80 return; 80 return;
81 } 81 }
82 rtc_tm_to_time(&alm.time, &now); 82 rtc_tm_to_time(&alm.time, &now);
83 83
84 memset(&alm, 0, sizeof alm); 84 memset(&alm, 0, sizeof alm);
85 rtc_time_to_tm(now + TEST_SUSPEND_SECONDS, &alm.time); 85 rtc_time_to_tm(now + TEST_SUSPEND_SECONDS, &alm.time);
86 alm.enabled = true; 86 alm.enabled = true;
87 87
88 status = rtc_set_alarm(rtc, &alm); 88 status = rtc_set_alarm(rtc, &alm);
89 if (status < 0) { 89 if (status < 0) {
90 printk(err_wakealarm, dev_name(&rtc->dev), status); 90 printk(err_wakealarm, dev_name(&rtc->dev), status);
91 return; 91 return;
92 } 92 }
93 93
94 if (state == PM_SUSPEND_MEM) { 94 if (state == PM_SUSPEND_MEM) {
95 printk(info_test, pm_states[state]); 95 printk(info_test, pm_states[state].label);
96 status = pm_suspend(state); 96 status = pm_suspend(state);
97 if (status == -ENODEV) 97 if (status == -ENODEV)
98 state = PM_SUSPEND_STANDBY; 98 state = PM_SUSPEND_STANDBY;
99 } 99 }
100 if (state == PM_SUSPEND_STANDBY) { 100 if (state == PM_SUSPEND_STANDBY) {
101 printk(info_test, pm_states[state]); 101 printk(info_test, pm_states[state].label);
102 status = pm_suspend(state); 102 status = pm_suspend(state);
103 } 103 }
104 if (status < 0) 104 if (status < 0)
105 printk(err_suspend, status); 105 printk(err_suspend, status);
106 106
107 /* Some platforms can't detect that the alarm triggered the 107 /* Some platforms can't detect that the alarm triggered the
108 * wakeup, or (accordingly) disable it after it afterwards. 108 * wakeup, or (accordingly) disable it after it afterwards.
109 * It's supposed to give oneshot behavior; cope. 109 * It's supposed to give oneshot behavior; cope.
110 */ 110 */
111 alm.enabled = false; 111 alm.enabled = false;
112 rtc_set_alarm(rtc, &alm); 112 rtc_set_alarm(rtc, &alm);
113 } 113 }
114 114
115 static int __init has_wakealarm(struct device *dev, const void *data) 115 static int __init has_wakealarm(struct device *dev, const void *data)
116 { 116 {
117 struct rtc_device *candidate = to_rtc_device(dev); 117 struct rtc_device *candidate = to_rtc_device(dev);
118 118
119 if (!candidate->ops->set_alarm) 119 if (!candidate->ops->set_alarm)
120 return 0; 120 return 0;
121 if (!device_may_wakeup(candidate->dev.parent)) 121 if (!device_may_wakeup(candidate->dev.parent))
122 return 0; 122 return 0;
123 123
124 return 1; 124 return 1;
125 } 125 }
126 126
127 /* 127 /*
128 * Kernel options like "test_suspend=mem" force suspend/resume sanity tests 128 * Kernel options like "test_suspend=mem" force suspend/resume sanity tests
129 * at startup time. They're normally disabled, for faster boot and because 129 * at startup time. They're normally disabled, for faster boot and because
130 * we can't know which states really work on this particular system. 130 * we can't know which states really work on this particular system.
131 */ 131 */
132 static suspend_state_t test_state __initdata = PM_SUSPEND_ON; 132 static suspend_state_t test_state __initdata = PM_SUSPEND_ON;
133 133
134 static char warn_bad_state[] __initdata = 134 static char warn_bad_state[] __initdata =
135 KERN_WARNING "PM: can't test '%s' suspend state\n"; 135 KERN_WARNING "PM: can't test '%s' suspend state\n";
136 136
137 static int __init setup_test_suspend(char *value) 137 static int __init setup_test_suspend(char *value)
138 { 138 {
139 unsigned i; 139 suspend_state_t i;
140 140
141 /* "=mem" ==> "mem" */ 141 /* "=mem" ==> "mem" */
142 value++; 142 value++;
143 for (i = 0; i < PM_SUSPEND_MAX; i++) { 143 for (i = PM_SUSPEND_MIN; i < PM_SUSPEND_MAX; i++)
144 if (!pm_states[i]) 144 if (!strcmp(pm_states[i].label, value)) {
145 continue; 145 test_state = pm_states[i].state;
146 if (strcmp(pm_states[i], value) != 0) 146 return 0;
147 continue; 147 }
148 test_state = (__force suspend_state_t) i; 148
149 return 0;
150 }
151 printk(warn_bad_state, value); 149 printk(warn_bad_state, value);
152 return 0; 150 return 0;
153 } 151 }
154 __setup("test_suspend", setup_test_suspend); 152 __setup("test_suspend", setup_test_suspend);
155 153
156 static int __init test_suspend(void) 154 static int __init test_suspend(void)
157 { 155 {
158 static char warn_no_rtc[] __initdata = 156 static char warn_no_rtc[] __initdata =
159 KERN_WARNING "PM: no wakealarm-capable RTC driver is ready\n"; 157 KERN_WARNING "PM: no wakealarm-capable RTC driver is ready\n";
160 158
161 struct rtc_device *rtc = NULL; 159 struct rtc_device *rtc = NULL;
162 struct device *dev; 160 struct device *dev;
163 161
164 /* PM is initialized by now; is that state testable? */ 162 /* PM is initialized by now; is that state testable? */
165 if (test_state == PM_SUSPEND_ON) 163 if (test_state == PM_SUSPEND_ON)
166 goto done; 164 goto done;
167 if (!valid_state(test_state)) { 165 if (!valid_state(test_state)) {
168 printk(warn_bad_state, pm_states[test_state]); 166 printk(warn_bad_state, pm_states[test_state].label);
169 goto done; 167 goto done;
170 } 168 }
171 169
172 /* RTCs have initialized by now too ... can we use one? */ 170 /* RTCs have initialized by now too ... can we use one? */
173 dev = class_find_device(rtc_class, NULL, NULL, has_wakealarm); 171 dev = class_find_device(rtc_class, NULL, NULL, has_wakealarm);
174 if (dev) 172 if (dev)
175 rtc = rtc_class_open(dev_name(dev)); 173 rtc = rtc_class_open(dev_name(dev));
176 if (!rtc) { 174 if (!rtc) {
177 printk(warn_no_rtc); 175 printk(warn_no_rtc);
178 goto done; 176 goto done;
179 } 177 }
180 178
181 /* go for it */ 179 /* go for it */
182 test_wakealarm(rtc, test_state); 180 test_wakealarm(rtc, test_state);
183 rtc_class_close(rtc); 181 rtc_class_close(rtc);
184 done: 182 done:
185 return 0; 183 return 0;
186 } 184 }
187 late_initcall(test_suspend); 185 late_initcall(test_suspend);
188 186