Commit 97740400bc76b64781d01f8cdfbcf750582006ef
Exists in
master
and in
6 other branches
Merge branch 'pm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
* 'pm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm: PM / Hibernate: Drop the check of swap space size for compressed image PM / shmobile: fix A3SP suspend method PM / Domains: Skip governor functions for CONFIG_PM_RUNTIME unset PM / Domains: Fix build for CONFIG_PM_SLEEP unset PM: Make sysrq-o be available for CONFIG_PM unset
Showing 5 changed files Inline Diff
arch/arm/mach-shmobile/pm-sh7372.c
1 | /* | 1 | /* |
2 | * sh7372 Power management support | 2 | * sh7372 Power management support |
3 | * | 3 | * |
4 | * Copyright (C) 2011 Magnus Damm | 4 | * Copyright (C) 2011 Magnus Damm |
5 | * | 5 | * |
6 | * This file is subject to the terms and conditions of the GNU General Public | 6 | * This file is subject to the terms and conditions of the GNU General Public |
7 | * License. See the file "COPYING" in the main directory of this archive | 7 | * License. See the file "COPYING" in the main directory of this archive |
8 | * for more details. | 8 | * for more details. |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #include <linux/pm.h> | 11 | #include <linux/pm.h> |
12 | #include <linux/suspend.h> | 12 | #include <linux/suspend.h> |
13 | #include <linux/cpuidle.h> | 13 | #include <linux/cpuidle.h> |
14 | #include <linux/module.h> | 14 | #include <linux/module.h> |
15 | #include <linux/list.h> | 15 | #include <linux/list.h> |
16 | #include <linux/err.h> | 16 | #include <linux/err.h> |
17 | #include <linux/slab.h> | 17 | #include <linux/slab.h> |
18 | #include <linux/pm_clock.h> | 18 | #include <linux/pm_clock.h> |
19 | #include <linux/platform_device.h> | 19 | #include <linux/platform_device.h> |
20 | #include <linux/delay.h> | 20 | #include <linux/delay.h> |
21 | #include <linux/irq.h> | 21 | #include <linux/irq.h> |
22 | #include <linux/bitrev.h> | 22 | #include <linux/bitrev.h> |
23 | #include <linux/console.h> | 23 | #include <linux/console.h> |
24 | #include <asm/system.h> | 24 | #include <asm/system.h> |
25 | #include <asm/io.h> | 25 | #include <asm/io.h> |
26 | #include <asm/tlbflush.h> | 26 | #include <asm/tlbflush.h> |
27 | #include <asm/suspend.h> | 27 | #include <asm/suspend.h> |
28 | #include <mach/common.h> | 28 | #include <mach/common.h> |
29 | #include <mach/sh7372.h> | 29 | #include <mach/sh7372.h> |
30 | 30 | ||
31 | /* DBG */ | 31 | /* DBG */ |
32 | #define DBGREG1 0xe6100020 | 32 | #define DBGREG1 0xe6100020 |
33 | #define DBGREG9 0xe6100040 | 33 | #define DBGREG9 0xe6100040 |
34 | 34 | ||
35 | /* CPGA */ | 35 | /* CPGA */ |
36 | #define SYSTBCR 0xe6150024 | 36 | #define SYSTBCR 0xe6150024 |
37 | #define MSTPSR0 0xe6150030 | 37 | #define MSTPSR0 0xe6150030 |
38 | #define MSTPSR1 0xe6150038 | 38 | #define MSTPSR1 0xe6150038 |
39 | #define MSTPSR2 0xe6150040 | 39 | #define MSTPSR2 0xe6150040 |
40 | #define MSTPSR3 0xe6150048 | 40 | #define MSTPSR3 0xe6150048 |
41 | #define MSTPSR4 0xe615004c | 41 | #define MSTPSR4 0xe615004c |
42 | #define PLLC01STPCR 0xe61500c8 | 42 | #define PLLC01STPCR 0xe61500c8 |
43 | 43 | ||
44 | /* SYSC */ | 44 | /* SYSC */ |
45 | #define SPDCR 0xe6180008 | 45 | #define SPDCR 0xe6180008 |
46 | #define SWUCR 0xe6180014 | 46 | #define SWUCR 0xe6180014 |
47 | #define SBAR 0xe6180020 | 47 | #define SBAR 0xe6180020 |
48 | #define WUPRMSK 0xe6180028 | 48 | #define WUPRMSK 0xe6180028 |
49 | #define WUPSMSK 0xe618002c | 49 | #define WUPSMSK 0xe618002c |
50 | #define WUPSMSK2 0xe6180048 | 50 | #define WUPSMSK2 0xe6180048 |
51 | #define PSTR 0xe6180080 | 51 | #define PSTR 0xe6180080 |
52 | #define WUPSFAC 0xe6180098 | 52 | #define WUPSFAC 0xe6180098 |
53 | #define IRQCR 0xe618022c | 53 | #define IRQCR 0xe618022c |
54 | #define IRQCR2 0xe6180238 | 54 | #define IRQCR2 0xe6180238 |
55 | #define IRQCR3 0xe6180244 | 55 | #define IRQCR3 0xe6180244 |
56 | #define IRQCR4 0xe6180248 | 56 | #define IRQCR4 0xe6180248 |
57 | #define PDNSEL 0xe6180254 | 57 | #define PDNSEL 0xe6180254 |
58 | 58 | ||
59 | /* INTC */ | 59 | /* INTC */ |
60 | #define ICR1A 0xe6900000 | 60 | #define ICR1A 0xe6900000 |
61 | #define ICR2A 0xe6900004 | 61 | #define ICR2A 0xe6900004 |
62 | #define ICR3A 0xe6900008 | 62 | #define ICR3A 0xe6900008 |
63 | #define ICR4A 0xe690000c | 63 | #define ICR4A 0xe690000c |
64 | #define INTMSK00A 0xe6900040 | 64 | #define INTMSK00A 0xe6900040 |
65 | #define INTMSK10A 0xe6900044 | 65 | #define INTMSK10A 0xe6900044 |
66 | #define INTMSK20A 0xe6900048 | 66 | #define INTMSK20A 0xe6900048 |
67 | #define INTMSK30A 0xe690004c | 67 | #define INTMSK30A 0xe690004c |
68 | 68 | ||
69 | /* MFIS */ | 69 | /* MFIS */ |
70 | #define SMFRAM 0xe6a70000 | 70 | #define SMFRAM 0xe6a70000 |
71 | 71 | ||
72 | /* AP-System Core */ | 72 | /* AP-System Core */ |
73 | #define APARMBAREA 0xe6f10020 | 73 | #define APARMBAREA 0xe6f10020 |
74 | 74 | ||
75 | #define PSTR_RETRIES 100 | 75 | #define PSTR_RETRIES 100 |
76 | #define PSTR_DELAY_US 10 | 76 | #define PSTR_DELAY_US 10 |
77 | 77 | ||
78 | #ifdef CONFIG_PM | 78 | #ifdef CONFIG_PM |
79 | 79 | ||
80 | static int pd_power_down(struct generic_pm_domain *genpd) | 80 | static int pd_power_down(struct generic_pm_domain *genpd) |
81 | { | 81 | { |
82 | struct sh7372_pm_domain *sh7372_pd = to_sh7372_pd(genpd); | 82 | struct sh7372_pm_domain *sh7372_pd = to_sh7372_pd(genpd); |
83 | unsigned int mask = 1 << sh7372_pd->bit_shift; | 83 | unsigned int mask = 1 << sh7372_pd->bit_shift; |
84 | 84 | ||
85 | if (sh7372_pd->suspend) { | 85 | if (sh7372_pd->suspend) { |
86 | int ret = sh7372_pd->suspend(); | 86 | int ret = sh7372_pd->suspend(); |
87 | 87 | ||
88 | if (ret) | 88 | if (ret) |
89 | return ret; | 89 | return ret; |
90 | } | 90 | } |
91 | 91 | ||
92 | if (__raw_readl(PSTR) & mask) { | 92 | if (__raw_readl(PSTR) & mask) { |
93 | unsigned int retry_count; | 93 | unsigned int retry_count; |
94 | 94 | ||
95 | __raw_writel(mask, SPDCR); | 95 | __raw_writel(mask, SPDCR); |
96 | 96 | ||
97 | for (retry_count = PSTR_RETRIES; retry_count; retry_count--) { | 97 | for (retry_count = PSTR_RETRIES; retry_count; retry_count--) { |
98 | if (!(__raw_readl(SPDCR) & mask)) | 98 | if (!(__raw_readl(SPDCR) & mask)) |
99 | break; | 99 | break; |
100 | cpu_relax(); | 100 | cpu_relax(); |
101 | } | 101 | } |
102 | } | 102 | } |
103 | 103 | ||
104 | if (!sh7372_pd->no_debug) | 104 | if (!sh7372_pd->no_debug) |
105 | pr_debug("%s: Power off, 0x%08x -> PSTR = 0x%08x\n", | 105 | pr_debug("%s: Power off, 0x%08x -> PSTR = 0x%08x\n", |
106 | genpd->name, mask, __raw_readl(PSTR)); | 106 | genpd->name, mask, __raw_readl(PSTR)); |
107 | 107 | ||
108 | return 0; | 108 | return 0; |
109 | } | 109 | } |
110 | 110 | ||
111 | static int __pd_power_up(struct sh7372_pm_domain *sh7372_pd, bool do_resume) | 111 | static int __pd_power_up(struct sh7372_pm_domain *sh7372_pd, bool do_resume) |
112 | { | 112 | { |
113 | unsigned int mask = 1 << sh7372_pd->bit_shift; | 113 | unsigned int mask = 1 << sh7372_pd->bit_shift; |
114 | unsigned int retry_count; | 114 | unsigned int retry_count; |
115 | int ret = 0; | 115 | int ret = 0; |
116 | 116 | ||
117 | if (__raw_readl(PSTR) & mask) | 117 | if (__raw_readl(PSTR) & mask) |
118 | goto out; | 118 | goto out; |
119 | 119 | ||
120 | __raw_writel(mask, SWUCR); | 120 | __raw_writel(mask, SWUCR); |
121 | 121 | ||
122 | for (retry_count = 2 * PSTR_RETRIES; retry_count; retry_count--) { | 122 | for (retry_count = 2 * PSTR_RETRIES; retry_count; retry_count--) { |
123 | if (!(__raw_readl(SWUCR) & mask)) | 123 | if (!(__raw_readl(SWUCR) & mask)) |
124 | break; | 124 | break; |
125 | if (retry_count > PSTR_RETRIES) | 125 | if (retry_count > PSTR_RETRIES) |
126 | udelay(PSTR_DELAY_US); | 126 | udelay(PSTR_DELAY_US); |
127 | else | 127 | else |
128 | cpu_relax(); | 128 | cpu_relax(); |
129 | } | 129 | } |
130 | if (!retry_count) | 130 | if (!retry_count) |
131 | ret = -EIO; | 131 | ret = -EIO; |
132 | 132 | ||
133 | if (!sh7372_pd->no_debug) | 133 | if (!sh7372_pd->no_debug) |
134 | pr_debug("%s: Power on, 0x%08x -> PSTR = 0x%08x\n", | 134 | pr_debug("%s: Power on, 0x%08x -> PSTR = 0x%08x\n", |
135 | sh7372_pd->genpd.name, mask, __raw_readl(PSTR)); | 135 | sh7372_pd->genpd.name, mask, __raw_readl(PSTR)); |
136 | 136 | ||
137 | out: | 137 | out: |
138 | if (ret == 0 && sh7372_pd->resume && do_resume) | 138 | if (ret == 0 && sh7372_pd->resume && do_resume) |
139 | sh7372_pd->resume(); | 139 | sh7372_pd->resume(); |
140 | 140 | ||
141 | return ret; | 141 | return ret; |
142 | } | 142 | } |
143 | 143 | ||
144 | static int pd_power_up(struct generic_pm_domain *genpd) | 144 | static int pd_power_up(struct generic_pm_domain *genpd) |
145 | { | 145 | { |
146 | return __pd_power_up(to_sh7372_pd(genpd), true); | 146 | return __pd_power_up(to_sh7372_pd(genpd), true); |
147 | } | 147 | } |
148 | 148 | ||
149 | static int sh7372_a4r_suspend(void) | 149 | static int sh7372_a4r_suspend(void) |
150 | { | 150 | { |
151 | sh7372_intcs_suspend(); | 151 | sh7372_intcs_suspend(); |
152 | __raw_writel(0x300fffff, WUPRMSK); /* avoid wakeup */ | 152 | __raw_writel(0x300fffff, WUPRMSK); /* avoid wakeup */ |
153 | return 0; | 153 | return 0; |
154 | } | 154 | } |
155 | 155 | ||
156 | static bool pd_active_wakeup(struct device *dev) | 156 | static bool pd_active_wakeup(struct device *dev) |
157 | { | 157 | { |
158 | bool (*active_wakeup)(struct device *dev); | 158 | bool (*active_wakeup)(struct device *dev); |
159 | 159 | ||
160 | active_wakeup = dev_gpd_data(dev)->ops.active_wakeup; | 160 | active_wakeup = dev_gpd_data(dev)->ops.active_wakeup; |
161 | return active_wakeup ? active_wakeup(dev) : true; | 161 | return active_wakeup ? active_wakeup(dev) : true; |
162 | } | 162 | } |
163 | 163 | ||
164 | static int sh7372_stop_dev(struct device *dev) | 164 | static int sh7372_stop_dev(struct device *dev) |
165 | { | 165 | { |
166 | int (*stop)(struct device *dev); | 166 | int (*stop)(struct device *dev); |
167 | 167 | ||
168 | stop = dev_gpd_data(dev)->ops.stop; | 168 | stop = dev_gpd_data(dev)->ops.stop; |
169 | if (stop) { | 169 | if (stop) { |
170 | int ret = stop(dev); | 170 | int ret = stop(dev); |
171 | if (ret) | 171 | if (ret) |
172 | return ret; | 172 | return ret; |
173 | } | 173 | } |
174 | return pm_clk_suspend(dev); | 174 | return pm_clk_suspend(dev); |
175 | } | 175 | } |
176 | 176 | ||
177 | static int sh7372_start_dev(struct device *dev) | 177 | static int sh7372_start_dev(struct device *dev) |
178 | { | 178 | { |
179 | int (*start)(struct device *dev); | 179 | int (*start)(struct device *dev); |
180 | int ret; | 180 | int ret; |
181 | 181 | ||
182 | ret = pm_clk_resume(dev); | 182 | ret = pm_clk_resume(dev); |
183 | if (ret) | 183 | if (ret) |
184 | return ret; | 184 | return ret; |
185 | 185 | ||
186 | start = dev_gpd_data(dev)->ops.start; | 186 | start = dev_gpd_data(dev)->ops.start; |
187 | if (start) | 187 | if (start) |
188 | ret = start(dev); | 188 | ret = start(dev); |
189 | 189 | ||
190 | return ret; | 190 | return ret; |
191 | } | 191 | } |
192 | 192 | ||
193 | void sh7372_init_pm_domain(struct sh7372_pm_domain *sh7372_pd) | 193 | void sh7372_init_pm_domain(struct sh7372_pm_domain *sh7372_pd) |
194 | { | 194 | { |
195 | struct generic_pm_domain *genpd = &sh7372_pd->genpd; | 195 | struct generic_pm_domain *genpd = &sh7372_pd->genpd; |
196 | struct dev_power_governor *gov = sh7372_pd->gov; | 196 | struct dev_power_governor *gov = sh7372_pd->gov; |
197 | 197 | ||
198 | pm_genpd_init(genpd, gov ? : &simple_qos_governor, false); | 198 | pm_genpd_init(genpd, gov ? : &simple_qos_governor, false); |
199 | genpd->dev_ops.stop = sh7372_stop_dev; | 199 | genpd->dev_ops.stop = sh7372_stop_dev; |
200 | genpd->dev_ops.start = sh7372_start_dev; | 200 | genpd->dev_ops.start = sh7372_start_dev; |
201 | genpd->dev_ops.active_wakeup = pd_active_wakeup; | 201 | genpd->dev_ops.active_wakeup = pd_active_wakeup; |
202 | genpd->dev_irq_safe = true; | 202 | genpd->dev_irq_safe = true; |
203 | genpd->power_off = pd_power_down; | 203 | genpd->power_off = pd_power_down; |
204 | genpd->power_on = pd_power_up; | 204 | genpd->power_on = pd_power_up; |
205 | __pd_power_up(sh7372_pd, false); | 205 | __pd_power_up(sh7372_pd, false); |
206 | } | 206 | } |
207 | 207 | ||
208 | void sh7372_add_device_to_domain(struct sh7372_pm_domain *sh7372_pd, | 208 | void sh7372_add_device_to_domain(struct sh7372_pm_domain *sh7372_pd, |
209 | struct platform_device *pdev) | 209 | struct platform_device *pdev) |
210 | { | 210 | { |
211 | struct device *dev = &pdev->dev; | 211 | struct device *dev = &pdev->dev; |
212 | 212 | ||
213 | pm_genpd_add_device(&sh7372_pd->genpd, dev); | 213 | pm_genpd_add_device(&sh7372_pd->genpd, dev); |
214 | if (pm_clk_no_clocks(dev)) | 214 | if (pm_clk_no_clocks(dev)) |
215 | pm_clk_add(dev, NULL); | 215 | pm_clk_add(dev, NULL); |
216 | } | 216 | } |
217 | 217 | ||
218 | void sh7372_pm_add_subdomain(struct sh7372_pm_domain *sh7372_pd, | 218 | void sh7372_pm_add_subdomain(struct sh7372_pm_domain *sh7372_pd, |
219 | struct sh7372_pm_domain *sh7372_sd) | 219 | struct sh7372_pm_domain *sh7372_sd) |
220 | { | 220 | { |
221 | pm_genpd_add_subdomain(&sh7372_pd->genpd, &sh7372_sd->genpd); | 221 | pm_genpd_add_subdomain(&sh7372_pd->genpd, &sh7372_sd->genpd); |
222 | } | 222 | } |
223 | 223 | ||
224 | struct sh7372_pm_domain sh7372_a4lc = { | 224 | struct sh7372_pm_domain sh7372_a4lc = { |
225 | .genpd.name = "A4LC", | 225 | .genpd.name = "A4LC", |
226 | .bit_shift = 1, | 226 | .bit_shift = 1, |
227 | }; | 227 | }; |
228 | 228 | ||
229 | struct sh7372_pm_domain sh7372_a4mp = { | 229 | struct sh7372_pm_domain sh7372_a4mp = { |
230 | .genpd.name = "A4MP", | 230 | .genpd.name = "A4MP", |
231 | .bit_shift = 2, | 231 | .bit_shift = 2, |
232 | }; | 232 | }; |
233 | 233 | ||
234 | struct sh7372_pm_domain sh7372_d4 = { | 234 | struct sh7372_pm_domain sh7372_d4 = { |
235 | .genpd.name = "D4", | 235 | .genpd.name = "D4", |
236 | .bit_shift = 3, | 236 | .bit_shift = 3, |
237 | }; | 237 | }; |
238 | 238 | ||
239 | struct sh7372_pm_domain sh7372_a4r = { | 239 | struct sh7372_pm_domain sh7372_a4r = { |
240 | .genpd.name = "A4R", | 240 | .genpd.name = "A4R", |
241 | .bit_shift = 5, | 241 | .bit_shift = 5, |
242 | .suspend = sh7372_a4r_suspend, | 242 | .suspend = sh7372_a4r_suspend, |
243 | .resume = sh7372_intcs_resume, | 243 | .resume = sh7372_intcs_resume, |
244 | }; | 244 | }; |
245 | 245 | ||
246 | struct sh7372_pm_domain sh7372_a3rv = { | 246 | struct sh7372_pm_domain sh7372_a3rv = { |
247 | .genpd.name = "A3RV", | 247 | .genpd.name = "A3RV", |
248 | .bit_shift = 6, | 248 | .bit_shift = 6, |
249 | }; | 249 | }; |
250 | 250 | ||
251 | struct sh7372_pm_domain sh7372_a3ri = { | 251 | struct sh7372_pm_domain sh7372_a3ri = { |
252 | .genpd.name = "A3RI", | 252 | .genpd.name = "A3RI", |
253 | .bit_shift = 8, | 253 | .bit_shift = 8, |
254 | }; | 254 | }; |
255 | 255 | ||
256 | static int sh7372_a4s_suspend(void) | 256 | static int sh7372_a4s_suspend(void) |
257 | { | 257 | { |
258 | /* | 258 | /* |
259 | * The A4S domain contains the CPU core and therefore it should | 259 | * The A4S domain contains the CPU core and therefore it should |
260 | * only be turned off if the CPU is in use. | 260 | * only be turned off if the CPU is in use. |
261 | */ | 261 | */ |
262 | return -EBUSY; | 262 | return -EBUSY; |
263 | } | 263 | } |
264 | 264 | ||
265 | struct sh7372_pm_domain sh7372_a4s = { | 265 | struct sh7372_pm_domain sh7372_a4s = { |
266 | .genpd.name = "A4S", | 266 | .genpd.name = "A4S", |
267 | .bit_shift = 10, | 267 | .bit_shift = 10, |
268 | .gov = &pm_domain_always_on_gov, | 268 | .gov = &pm_domain_always_on_gov, |
269 | .no_debug = true, | 269 | .no_debug = true, |
270 | .suspend = sh7372_a4s_suspend, | 270 | .suspend = sh7372_a4s_suspend, |
271 | }; | 271 | }; |
272 | 272 | ||
273 | static int sh7372_a3sp_suspend(void) | 273 | static int sh7372_a3sp_suspend(void) |
274 | { | 274 | { |
275 | /* | 275 | /* |
276 | * Serial consoles make use of SCIF hardware located in A3SP, | 276 | * Serial consoles make use of SCIF hardware located in A3SP, |
277 | * keep such power domain on if "no_console_suspend" is set. | 277 | * keep such power domain on if "no_console_suspend" is set. |
278 | */ | 278 | */ |
279 | return console_suspend_enabled ? -EBUSY : 0; | 279 | return console_suspend_enabled ? 0 : -EBUSY; |
280 | } | 280 | } |
281 | 281 | ||
282 | struct sh7372_pm_domain sh7372_a3sp = { | 282 | struct sh7372_pm_domain sh7372_a3sp = { |
283 | .genpd.name = "A3SP", | 283 | .genpd.name = "A3SP", |
284 | .bit_shift = 11, | 284 | .bit_shift = 11, |
285 | .gov = &pm_domain_always_on_gov, | 285 | .gov = &pm_domain_always_on_gov, |
286 | .no_debug = true, | 286 | .no_debug = true, |
287 | .suspend = sh7372_a3sp_suspend, | 287 | .suspend = sh7372_a3sp_suspend, |
288 | }; | 288 | }; |
289 | 289 | ||
290 | struct sh7372_pm_domain sh7372_a3sg = { | 290 | struct sh7372_pm_domain sh7372_a3sg = { |
291 | .genpd.name = "A3SG", | 291 | .genpd.name = "A3SG", |
292 | .bit_shift = 13, | 292 | .bit_shift = 13, |
293 | }; | 293 | }; |
294 | 294 | ||
295 | #else /* !CONFIG_PM */ | 295 | #else /* !CONFIG_PM */ |
296 | 296 | ||
297 | static inline void sh7372_a3sp_init(void) {} | 297 | static inline void sh7372_a3sp_init(void) {} |
298 | 298 | ||
299 | #endif /* !CONFIG_PM */ | 299 | #endif /* !CONFIG_PM */ |
300 | 300 | ||
301 | #if defined(CONFIG_SUSPEND) || defined(CONFIG_CPU_IDLE) | 301 | #if defined(CONFIG_SUSPEND) || defined(CONFIG_CPU_IDLE) |
302 | static int sh7372_do_idle_core_standby(unsigned long unused) | 302 | static int sh7372_do_idle_core_standby(unsigned long unused) |
303 | { | 303 | { |
304 | cpu_do_idle(); /* WFI when SYSTBCR == 0x10 -> Core Standby */ | 304 | cpu_do_idle(); /* WFI when SYSTBCR == 0x10 -> Core Standby */ |
305 | return 0; | 305 | return 0; |
306 | } | 306 | } |
307 | 307 | ||
308 | static void sh7372_set_reset_vector(unsigned long address) | 308 | static void sh7372_set_reset_vector(unsigned long address) |
309 | { | 309 | { |
310 | /* set reset vector, translate 4k */ | 310 | /* set reset vector, translate 4k */ |
311 | __raw_writel(address, SBAR); | 311 | __raw_writel(address, SBAR); |
312 | __raw_writel(0, APARMBAREA); | 312 | __raw_writel(0, APARMBAREA); |
313 | } | 313 | } |
314 | 314 | ||
315 | static void sh7372_enter_core_standby(void) | 315 | static void sh7372_enter_core_standby(void) |
316 | { | 316 | { |
317 | sh7372_set_reset_vector(__pa(sh7372_resume_core_standby_sysc)); | 317 | sh7372_set_reset_vector(__pa(sh7372_resume_core_standby_sysc)); |
318 | 318 | ||
319 | /* enter sleep mode with SYSTBCR to 0x10 */ | 319 | /* enter sleep mode with SYSTBCR to 0x10 */ |
320 | __raw_writel(0x10, SYSTBCR); | 320 | __raw_writel(0x10, SYSTBCR); |
321 | cpu_suspend(0, sh7372_do_idle_core_standby); | 321 | cpu_suspend(0, sh7372_do_idle_core_standby); |
322 | __raw_writel(0, SYSTBCR); | 322 | __raw_writel(0, SYSTBCR); |
323 | 323 | ||
324 | /* disable reset vector translation */ | 324 | /* disable reset vector translation */ |
325 | __raw_writel(0, SBAR); | 325 | __raw_writel(0, SBAR); |
326 | } | 326 | } |
327 | #endif | 327 | #endif |
328 | 328 | ||
329 | #ifdef CONFIG_SUSPEND | 329 | #ifdef CONFIG_SUSPEND |
330 | static void sh7372_enter_sysc(int pllc0_on, unsigned long sleep_mode) | 330 | static void sh7372_enter_sysc(int pllc0_on, unsigned long sleep_mode) |
331 | { | 331 | { |
332 | if (pllc0_on) | 332 | if (pllc0_on) |
333 | __raw_writel(0, PLLC01STPCR); | 333 | __raw_writel(0, PLLC01STPCR); |
334 | else | 334 | else |
335 | __raw_writel(1 << 28, PLLC01STPCR); | 335 | __raw_writel(1 << 28, PLLC01STPCR); |
336 | 336 | ||
337 | __raw_readl(WUPSFAC); /* read wakeup int. factor before sleep */ | 337 | __raw_readl(WUPSFAC); /* read wakeup int. factor before sleep */ |
338 | cpu_suspend(sleep_mode, sh7372_do_idle_sysc); | 338 | cpu_suspend(sleep_mode, sh7372_do_idle_sysc); |
339 | __raw_readl(WUPSFAC); /* read wakeup int. factor after wakeup */ | 339 | __raw_readl(WUPSFAC); /* read wakeup int. factor after wakeup */ |
340 | 340 | ||
341 | /* disable reset vector translation */ | 341 | /* disable reset vector translation */ |
342 | __raw_writel(0, SBAR); | 342 | __raw_writel(0, SBAR); |
343 | } | 343 | } |
344 | 344 | ||
345 | static int sh7372_sysc_valid(unsigned long *mskp, unsigned long *msk2p) | 345 | static int sh7372_sysc_valid(unsigned long *mskp, unsigned long *msk2p) |
346 | { | 346 | { |
347 | unsigned long mstpsr0, mstpsr1, mstpsr2, mstpsr3, mstpsr4; | 347 | unsigned long mstpsr0, mstpsr1, mstpsr2, mstpsr3, mstpsr4; |
348 | unsigned long msk, msk2; | 348 | unsigned long msk, msk2; |
349 | 349 | ||
350 | /* check active clocks to determine potential wakeup sources */ | 350 | /* check active clocks to determine potential wakeup sources */ |
351 | 351 | ||
352 | mstpsr0 = __raw_readl(MSTPSR0); | 352 | mstpsr0 = __raw_readl(MSTPSR0); |
353 | if ((mstpsr0 & 0x00000003) != 0x00000003) { | 353 | if ((mstpsr0 & 0x00000003) != 0x00000003) { |
354 | pr_debug("sh7372 mstpsr0 0x%08lx\n", mstpsr0); | 354 | pr_debug("sh7372 mstpsr0 0x%08lx\n", mstpsr0); |
355 | return 0; | 355 | return 0; |
356 | } | 356 | } |
357 | 357 | ||
358 | mstpsr1 = __raw_readl(MSTPSR1); | 358 | mstpsr1 = __raw_readl(MSTPSR1); |
359 | if ((mstpsr1 & 0xff079b7f) != 0xff079b7f) { | 359 | if ((mstpsr1 & 0xff079b7f) != 0xff079b7f) { |
360 | pr_debug("sh7372 mstpsr1 0x%08lx\n", mstpsr1); | 360 | pr_debug("sh7372 mstpsr1 0x%08lx\n", mstpsr1); |
361 | return 0; | 361 | return 0; |
362 | } | 362 | } |
363 | 363 | ||
364 | mstpsr2 = __raw_readl(MSTPSR2); | 364 | mstpsr2 = __raw_readl(MSTPSR2); |
365 | if ((mstpsr2 & 0x000741ff) != 0x000741ff) { | 365 | if ((mstpsr2 & 0x000741ff) != 0x000741ff) { |
366 | pr_debug("sh7372 mstpsr2 0x%08lx\n", mstpsr2); | 366 | pr_debug("sh7372 mstpsr2 0x%08lx\n", mstpsr2); |
367 | return 0; | 367 | return 0; |
368 | } | 368 | } |
369 | 369 | ||
370 | mstpsr3 = __raw_readl(MSTPSR3); | 370 | mstpsr3 = __raw_readl(MSTPSR3); |
371 | if ((mstpsr3 & 0x1a60f010) != 0x1a60f010) { | 371 | if ((mstpsr3 & 0x1a60f010) != 0x1a60f010) { |
372 | pr_debug("sh7372 mstpsr3 0x%08lx\n", mstpsr3); | 372 | pr_debug("sh7372 mstpsr3 0x%08lx\n", mstpsr3); |
373 | return 0; | 373 | return 0; |
374 | } | 374 | } |
375 | 375 | ||
376 | mstpsr4 = __raw_readl(MSTPSR4); | 376 | mstpsr4 = __raw_readl(MSTPSR4); |
377 | if ((mstpsr4 & 0x00008cf0) != 0x00008cf0) { | 377 | if ((mstpsr4 & 0x00008cf0) != 0x00008cf0) { |
378 | pr_debug("sh7372 mstpsr4 0x%08lx\n", mstpsr4); | 378 | pr_debug("sh7372 mstpsr4 0x%08lx\n", mstpsr4); |
379 | return 0; | 379 | return 0; |
380 | } | 380 | } |
381 | 381 | ||
382 | msk = 0; | 382 | msk = 0; |
383 | msk2 = 0; | 383 | msk2 = 0; |
384 | 384 | ||
385 | /* make bitmaps of limited number of wakeup sources */ | 385 | /* make bitmaps of limited number of wakeup sources */ |
386 | 386 | ||
387 | if ((mstpsr2 & (1 << 23)) == 0) /* SPU2 */ | 387 | if ((mstpsr2 & (1 << 23)) == 0) /* SPU2 */ |
388 | msk |= 1 << 31; | 388 | msk |= 1 << 31; |
389 | 389 | ||
390 | if ((mstpsr2 & (1 << 12)) == 0) /* MFI_MFIM */ | 390 | if ((mstpsr2 & (1 << 12)) == 0) /* MFI_MFIM */ |
391 | msk |= 1 << 21; | 391 | msk |= 1 << 21; |
392 | 392 | ||
393 | if ((mstpsr4 & (1 << 3)) == 0) /* KEYSC */ | 393 | if ((mstpsr4 & (1 << 3)) == 0) /* KEYSC */ |
394 | msk |= 1 << 2; | 394 | msk |= 1 << 2; |
395 | 395 | ||
396 | if ((mstpsr1 & (1 << 24)) == 0) /* CMT0 */ | 396 | if ((mstpsr1 & (1 << 24)) == 0) /* CMT0 */ |
397 | msk |= 1 << 1; | 397 | msk |= 1 << 1; |
398 | 398 | ||
399 | if ((mstpsr3 & (1 << 29)) == 0) /* CMT1 */ | 399 | if ((mstpsr3 & (1 << 29)) == 0) /* CMT1 */ |
400 | msk |= 1 << 1; | 400 | msk |= 1 << 1; |
401 | 401 | ||
402 | if ((mstpsr4 & (1 << 0)) == 0) /* CMT2 */ | 402 | if ((mstpsr4 & (1 << 0)) == 0) /* CMT2 */ |
403 | msk |= 1 << 1; | 403 | msk |= 1 << 1; |
404 | 404 | ||
405 | if ((mstpsr2 & (1 << 13)) == 0) /* MFI_MFIS */ | 405 | if ((mstpsr2 & (1 << 13)) == 0) /* MFI_MFIS */ |
406 | msk2 |= 1 << 17; | 406 | msk2 |= 1 << 17; |
407 | 407 | ||
408 | *mskp = msk; | 408 | *mskp = msk; |
409 | *msk2p = msk2; | 409 | *msk2p = msk2; |
410 | 410 | ||
411 | return 1; | 411 | return 1; |
412 | } | 412 | } |
413 | 413 | ||
414 | static void sh7372_icr_to_irqcr(unsigned long icr, u16 *irqcr1p, u16 *irqcr2p) | 414 | static void sh7372_icr_to_irqcr(unsigned long icr, u16 *irqcr1p, u16 *irqcr2p) |
415 | { | 415 | { |
416 | u16 tmp, irqcr1, irqcr2; | 416 | u16 tmp, irqcr1, irqcr2; |
417 | int k; | 417 | int k; |
418 | 418 | ||
419 | irqcr1 = 0; | 419 | irqcr1 = 0; |
420 | irqcr2 = 0; | 420 | irqcr2 = 0; |
421 | 421 | ||
422 | /* convert INTCA ICR register layout to SYSC IRQCR+IRQCR2 */ | 422 | /* convert INTCA ICR register layout to SYSC IRQCR+IRQCR2 */ |
423 | for (k = 0; k <= 7; k++) { | 423 | for (k = 0; k <= 7; k++) { |
424 | tmp = (icr >> ((7 - k) * 4)) & 0xf; | 424 | tmp = (icr >> ((7 - k) * 4)) & 0xf; |
425 | irqcr1 |= (tmp & 0x03) << (k * 2); | 425 | irqcr1 |= (tmp & 0x03) << (k * 2); |
426 | irqcr2 |= (tmp >> 2) << (k * 2); | 426 | irqcr2 |= (tmp >> 2) << (k * 2); |
427 | } | 427 | } |
428 | 428 | ||
429 | *irqcr1p = irqcr1; | 429 | *irqcr1p = irqcr1; |
430 | *irqcr2p = irqcr2; | 430 | *irqcr2p = irqcr2; |
431 | } | 431 | } |
432 | 432 | ||
433 | static void sh7372_setup_sysc(unsigned long msk, unsigned long msk2) | 433 | static void sh7372_setup_sysc(unsigned long msk, unsigned long msk2) |
434 | { | 434 | { |
435 | u16 irqcrx_low, irqcrx_high, irqcry_low, irqcry_high; | 435 | u16 irqcrx_low, irqcrx_high, irqcry_low, irqcry_high; |
436 | unsigned long tmp; | 436 | unsigned long tmp; |
437 | 437 | ||
438 | /* read IRQ0A -> IRQ15A mask */ | 438 | /* read IRQ0A -> IRQ15A mask */ |
439 | tmp = bitrev8(__raw_readb(INTMSK00A)); | 439 | tmp = bitrev8(__raw_readb(INTMSK00A)); |
440 | tmp |= bitrev8(__raw_readb(INTMSK10A)) << 8; | 440 | tmp |= bitrev8(__raw_readb(INTMSK10A)) << 8; |
441 | 441 | ||
442 | /* setup WUPSMSK from clocks and external IRQ mask */ | 442 | /* setup WUPSMSK from clocks and external IRQ mask */ |
443 | msk = (~msk & 0xc030000f) | (tmp << 4); | 443 | msk = (~msk & 0xc030000f) | (tmp << 4); |
444 | __raw_writel(msk, WUPSMSK); | 444 | __raw_writel(msk, WUPSMSK); |
445 | 445 | ||
446 | /* propage level/edge trigger for external IRQ 0->15 */ | 446 | /* propage level/edge trigger for external IRQ 0->15 */ |
447 | sh7372_icr_to_irqcr(__raw_readl(ICR1A), &irqcrx_low, &irqcry_low); | 447 | sh7372_icr_to_irqcr(__raw_readl(ICR1A), &irqcrx_low, &irqcry_low); |
448 | sh7372_icr_to_irqcr(__raw_readl(ICR2A), &irqcrx_high, &irqcry_high); | 448 | sh7372_icr_to_irqcr(__raw_readl(ICR2A), &irqcrx_high, &irqcry_high); |
449 | __raw_writel((irqcrx_high << 16) | irqcrx_low, IRQCR); | 449 | __raw_writel((irqcrx_high << 16) | irqcrx_low, IRQCR); |
450 | __raw_writel((irqcry_high << 16) | irqcry_low, IRQCR2); | 450 | __raw_writel((irqcry_high << 16) | irqcry_low, IRQCR2); |
451 | 451 | ||
452 | /* read IRQ16A -> IRQ31A mask */ | 452 | /* read IRQ16A -> IRQ31A mask */ |
453 | tmp = bitrev8(__raw_readb(INTMSK20A)); | 453 | tmp = bitrev8(__raw_readb(INTMSK20A)); |
454 | tmp |= bitrev8(__raw_readb(INTMSK30A)) << 8; | 454 | tmp |= bitrev8(__raw_readb(INTMSK30A)) << 8; |
455 | 455 | ||
456 | /* setup WUPSMSK2 from clocks and external IRQ mask */ | 456 | /* setup WUPSMSK2 from clocks and external IRQ mask */ |
457 | msk2 = (~msk2 & 0x00030000) | tmp; | 457 | msk2 = (~msk2 & 0x00030000) | tmp; |
458 | __raw_writel(msk2, WUPSMSK2); | 458 | __raw_writel(msk2, WUPSMSK2); |
459 | 459 | ||
460 | /* propage level/edge trigger for external IRQ 16->31 */ | 460 | /* propage level/edge trigger for external IRQ 16->31 */ |
461 | sh7372_icr_to_irqcr(__raw_readl(ICR3A), &irqcrx_low, &irqcry_low); | 461 | sh7372_icr_to_irqcr(__raw_readl(ICR3A), &irqcrx_low, &irqcry_low); |
462 | sh7372_icr_to_irqcr(__raw_readl(ICR4A), &irqcrx_high, &irqcry_high); | 462 | sh7372_icr_to_irqcr(__raw_readl(ICR4A), &irqcrx_high, &irqcry_high); |
463 | __raw_writel((irqcrx_high << 16) | irqcrx_low, IRQCR3); | 463 | __raw_writel((irqcrx_high << 16) | irqcrx_low, IRQCR3); |
464 | __raw_writel((irqcry_high << 16) | irqcry_low, IRQCR4); | 464 | __raw_writel((irqcry_high << 16) | irqcry_low, IRQCR4); |
465 | } | 465 | } |
466 | 466 | ||
467 | static void sh7372_enter_a3sm_common(int pllc0_on) | 467 | static void sh7372_enter_a3sm_common(int pllc0_on) |
468 | { | 468 | { |
469 | sh7372_set_reset_vector(__pa(sh7372_resume_core_standby_sysc)); | 469 | sh7372_set_reset_vector(__pa(sh7372_resume_core_standby_sysc)); |
470 | sh7372_enter_sysc(pllc0_on, 1 << 12); | 470 | sh7372_enter_sysc(pllc0_on, 1 << 12); |
471 | } | 471 | } |
472 | 472 | ||
473 | static void sh7372_enter_a4s_common(int pllc0_on) | 473 | static void sh7372_enter_a4s_common(int pllc0_on) |
474 | { | 474 | { |
475 | sh7372_intca_suspend(); | 475 | sh7372_intca_suspend(); |
476 | memcpy((void *)SMFRAM, sh7372_resume_core_standby_sysc, 0x100); | 476 | memcpy((void *)SMFRAM, sh7372_resume_core_standby_sysc, 0x100); |
477 | sh7372_set_reset_vector(SMFRAM); | 477 | sh7372_set_reset_vector(SMFRAM); |
478 | sh7372_enter_sysc(pllc0_on, 1 << 10); | 478 | sh7372_enter_sysc(pllc0_on, 1 << 10); |
479 | sh7372_intca_resume(); | 479 | sh7372_intca_resume(); |
480 | } | 480 | } |
481 | 481 | ||
482 | #endif | 482 | #endif |
483 | 483 | ||
484 | #ifdef CONFIG_CPU_IDLE | 484 | #ifdef CONFIG_CPU_IDLE |
485 | 485 | ||
486 | static void sh7372_cpuidle_setup(struct cpuidle_driver *drv) | 486 | static void sh7372_cpuidle_setup(struct cpuidle_driver *drv) |
487 | { | 487 | { |
488 | struct cpuidle_state *state = &drv->states[drv->state_count]; | 488 | struct cpuidle_state *state = &drv->states[drv->state_count]; |
489 | 489 | ||
490 | snprintf(state->name, CPUIDLE_NAME_LEN, "C2"); | 490 | snprintf(state->name, CPUIDLE_NAME_LEN, "C2"); |
491 | strncpy(state->desc, "Core Standby Mode", CPUIDLE_DESC_LEN); | 491 | strncpy(state->desc, "Core Standby Mode", CPUIDLE_DESC_LEN); |
492 | state->exit_latency = 10; | 492 | state->exit_latency = 10; |
493 | state->target_residency = 20 + 10; | 493 | state->target_residency = 20 + 10; |
494 | state->flags = CPUIDLE_FLAG_TIME_VALID; | 494 | state->flags = CPUIDLE_FLAG_TIME_VALID; |
495 | shmobile_cpuidle_modes[drv->state_count] = sh7372_enter_core_standby; | 495 | shmobile_cpuidle_modes[drv->state_count] = sh7372_enter_core_standby; |
496 | 496 | ||
497 | drv->state_count++; | 497 | drv->state_count++; |
498 | } | 498 | } |
499 | 499 | ||
500 | static void sh7372_cpuidle_init(void) | 500 | static void sh7372_cpuidle_init(void) |
501 | { | 501 | { |
502 | shmobile_cpuidle_setup = sh7372_cpuidle_setup; | 502 | shmobile_cpuidle_setup = sh7372_cpuidle_setup; |
503 | } | 503 | } |
504 | #else | 504 | #else |
505 | static void sh7372_cpuidle_init(void) {} | 505 | static void sh7372_cpuidle_init(void) {} |
506 | #endif | 506 | #endif |
507 | 507 | ||
508 | #ifdef CONFIG_SUSPEND | 508 | #ifdef CONFIG_SUSPEND |
509 | 509 | ||
510 | static int sh7372_enter_suspend(suspend_state_t suspend_state) | 510 | static int sh7372_enter_suspend(suspend_state_t suspend_state) |
511 | { | 511 | { |
512 | unsigned long msk, msk2; | 512 | unsigned long msk, msk2; |
513 | 513 | ||
514 | /* check active clocks to determine potential wakeup sources */ | 514 | /* check active clocks to determine potential wakeup sources */ |
515 | if (sh7372_sysc_valid(&msk, &msk2)) { | 515 | if (sh7372_sysc_valid(&msk, &msk2)) { |
516 | /* convert INTC mask and sense to SYSC mask and sense */ | 516 | /* convert INTC mask and sense to SYSC mask and sense */ |
517 | sh7372_setup_sysc(msk, msk2); | 517 | sh7372_setup_sysc(msk, msk2); |
518 | 518 | ||
519 | if (!console_suspend_enabled && | 519 | if (!console_suspend_enabled && |
520 | sh7372_a4s.genpd.status == GPD_STATE_POWER_OFF) { | 520 | sh7372_a4s.genpd.status == GPD_STATE_POWER_OFF) { |
521 | /* enter A4S sleep with PLLC0 off */ | 521 | /* enter A4S sleep with PLLC0 off */ |
522 | pr_debug("entering A4S\n"); | 522 | pr_debug("entering A4S\n"); |
523 | sh7372_enter_a4s_common(0); | 523 | sh7372_enter_a4s_common(0); |
524 | } else { | 524 | } else { |
525 | /* enter A3SM sleep with PLLC0 off */ | 525 | /* enter A3SM sleep with PLLC0 off */ |
526 | pr_debug("entering A3SM\n"); | 526 | pr_debug("entering A3SM\n"); |
527 | sh7372_enter_a3sm_common(0); | 527 | sh7372_enter_a3sm_common(0); |
528 | } | 528 | } |
529 | } else { | 529 | } else { |
530 | /* default to Core Standby that supports all wakeup sources */ | 530 | /* default to Core Standby that supports all wakeup sources */ |
531 | pr_debug("entering Core Standby\n"); | 531 | pr_debug("entering Core Standby\n"); |
532 | sh7372_enter_core_standby(); | 532 | sh7372_enter_core_standby(); |
533 | } | 533 | } |
534 | return 0; | 534 | return 0; |
535 | } | 535 | } |
536 | 536 | ||
537 | /** | 537 | /** |
538 | * sh7372_pm_notifier_fn - SH7372 PM notifier routine. | 538 | * sh7372_pm_notifier_fn - SH7372 PM notifier routine. |
539 | * @notifier: Unused. | 539 | * @notifier: Unused. |
540 | * @pm_event: Event being handled. | 540 | * @pm_event: Event being handled. |
541 | * @unused: Unused. | 541 | * @unused: Unused. |
542 | */ | 542 | */ |
543 | static int sh7372_pm_notifier_fn(struct notifier_block *notifier, | 543 | static int sh7372_pm_notifier_fn(struct notifier_block *notifier, |
544 | unsigned long pm_event, void *unused) | 544 | unsigned long pm_event, void *unused) |
545 | { | 545 | { |
546 | switch (pm_event) { | 546 | switch (pm_event) { |
547 | case PM_SUSPEND_PREPARE: | 547 | case PM_SUSPEND_PREPARE: |
548 | /* | 548 | /* |
549 | * This is necessary, because the A4R domain has to be "on" | 549 | * This is necessary, because the A4R domain has to be "on" |
550 | * when suspend_device_irqs() and resume_device_irqs() are | 550 | * when suspend_device_irqs() and resume_device_irqs() are |
551 | * executed during system suspend and resume, respectively, so | 551 | * executed during system suspend and resume, respectively, so |
552 | * that those functions don't crash while accessing the INTCS. | 552 | * that those functions don't crash while accessing the INTCS. |
553 | */ | 553 | */ |
554 | pm_genpd_poweron(&sh7372_a4r.genpd); | 554 | pm_genpd_poweron(&sh7372_a4r.genpd); |
555 | break; | 555 | break; |
556 | case PM_POST_SUSPEND: | 556 | case PM_POST_SUSPEND: |
557 | pm_genpd_poweroff_unused(); | 557 | pm_genpd_poweroff_unused(); |
558 | break; | 558 | break; |
559 | } | 559 | } |
560 | 560 | ||
561 | return NOTIFY_DONE; | 561 | return NOTIFY_DONE; |
562 | } | 562 | } |
563 | 563 | ||
564 | static void sh7372_suspend_init(void) | 564 | static void sh7372_suspend_init(void) |
565 | { | 565 | { |
566 | shmobile_suspend_ops.enter = sh7372_enter_suspend; | 566 | shmobile_suspend_ops.enter = sh7372_enter_suspend; |
567 | pm_notifier(sh7372_pm_notifier_fn, 0); | 567 | pm_notifier(sh7372_pm_notifier_fn, 0); |
568 | } | 568 | } |
569 | #else | 569 | #else |
570 | static void sh7372_suspend_init(void) {} | 570 | static void sh7372_suspend_init(void) {} |
571 | #endif | 571 | #endif |
572 | 572 | ||
573 | void __init sh7372_pm_init(void) | 573 | void __init sh7372_pm_init(void) |
574 | { | 574 | { |
575 | /* enable DBG hardware block to kick SYSC */ | 575 | /* enable DBG hardware block to kick SYSC */ |
576 | __raw_writel(0x0000a500, DBGREG9); | 576 | __raw_writel(0x0000a500, DBGREG9); |
577 | __raw_writel(0x0000a501, DBGREG9); | 577 | __raw_writel(0x0000a501, DBGREG9); |
578 | __raw_writel(0x00000000, DBGREG1); | 578 | __raw_writel(0x00000000, DBGREG1); |
579 | 579 | ||
580 | /* do not convert A3SM, A3SP, A3SG, A4R power down into A4S */ | 580 | /* do not convert A3SM, A3SP, A3SG, A4R power down into A4S */ |
581 | __raw_writel(0, PDNSEL); | 581 | __raw_writel(0, PDNSEL); |
582 | 582 | ||
583 | sh7372_suspend_init(); | 583 | sh7372_suspend_init(); |
584 | sh7372_cpuidle_init(); | 584 | sh7372_cpuidle_init(); |
585 | } | 585 | } |
586 | 586 |
drivers/base/power/domain.c
1 | /* | 1 | /* |
2 | * drivers/base/power/domain.c - Common code related to device power domains. | 2 | * drivers/base/power/domain.c - Common code related to device power domains. |
3 | * | 3 | * |
4 | * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp. | 4 | * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp. |
5 | * | 5 | * |
6 | * This file is released under the GPLv2. | 6 | * This file is released under the GPLv2. |
7 | */ | 7 | */ |
8 | 8 | ||
9 | #include <linux/init.h> | 9 | #include <linux/init.h> |
10 | #include <linux/kernel.h> | 10 | #include <linux/kernel.h> |
11 | #include <linux/io.h> | 11 | #include <linux/io.h> |
12 | #include <linux/pm_runtime.h> | 12 | #include <linux/pm_runtime.h> |
13 | #include <linux/pm_domain.h> | 13 | #include <linux/pm_domain.h> |
14 | #include <linux/slab.h> | 14 | #include <linux/slab.h> |
15 | #include <linux/err.h> | 15 | #include <linux/err.h> |
16 | #include <linux/sched.h> | 16 | #include <linux/sched.h> |
17 | #include <linux/suspend.h> | 17 | #include <linux/suspend.h> |
18 | #include <linux/export.h> | 18 | #include <linux/export.h> |
19 | 19 | ||
20 | #define GENPD_DEV_CALLBACK(genpd, type, callback, dev) \ | 20 | #define GENPD_DEV_CALLBACK(genpd, type, callback, dev) \ |
21 | ({ \ | 21 | ({ \ |
22 | type (*__routine)(struct device *__d); \ | 22 | type (*__routine)(struct device *__d); \ |
23 | type __ret = (type)0; \ | 23 | type __ret = (type)0; \ |
24 | \ | 24 | \ |
25 | __routine = genpd->dev_ops.callback; \ | 25 | __routine = genpd->dev_ops.callback; \ |
26 | if (__routine) { \ | 26 | if (__routine) { \ |
27 | __ret = __routine(dev); \ | 27 | __ret = __routine(dev); \ |
28 | } else { \ | 28 | } else { \ |
29 | __routine = dev_gpd_data(dev)->ops.callback; \ | 29 | __routine = dev_gpd_data(dev)->ops.callback; \ |
30 | if (__routine) \ | 30 | if (__routine) \ |
31 | __ret = __routine(dev); \ | 31 | __ret = __routine(dev); \ |
32 | } \ | 32 | } \ |
33 | __ret; \ | 33 | __ret; \ |
34 | }) | 34 | }) |
35 | 35 | ||
36 | #define GENPD_DEV_TIMED_CALLBACK(genpd, type, callback, dev, field, name) \ | 36 | #define GENPD_DEV_TIMED_CALLBACK(genpd, type, callback, dev, field, name) \ |
37 | ({ \ | 37 | ({ \ |
38 | ktime_t __start = ktime_get(); \ | 38 | ktime_t __start = ktime_get(); \ |
39 | type __retval = GENPD_DEV_CALLBACK(genpd, type, callback, dev); \ | 39 | type __retval = GENPD_DEV_CALLBACK(genpd, type, callback, dev); \ |
40 | s64 __elapsed = ktime_to_ns(ktime_sub(ktime_get(), __start)); \ | 40 | s64 __elapsed = ktime_to_ns(ktime_sub(ktime_get(), __start)); \ |
41 | struct generic_pm_domain_data *__gpd_data = dev_gpd_data(dev); \ | 41 | struct generic_pm_domain_data *__gpd_data = dev_gpd_data(dev); \ |
42 | if (__elapsed > __gpd_data->td.field) { \ | 42 | if (__elapsed > __gpd_data->td.field) { \ |
43 | __gpd_data->td.field = __elapsed; \ | 43 | __gpd_data->td.field = __elapsed; \ |
44 | dev_warn(dev, name " latency exceeded, new value %lld ns\n", \ | 44 | dev_warn(dev, name " latency exceeded, new value %lld ns\n", \ |
45 | __elapsed); \ | 45 | __elapsed); \ |
46 | } \ | 46 | } \ |
47 | __retval; \ | 47 | __retval; \ |
48 | }) | 48 | }) |
49 | 49 | ||
50 | static LIST_HEAD(gpd_list); | 50 | static LIST_HEAD(gpd_list); |
51 | static DEFINE_MUTEX(gpd_list_lock); | 51 | static DEFINE_MUTEX(gpd_list_lock); |
52 | 52 | ||
53 | #ifdef CONFIG_PM | 53 | #ifdef CONFIG_PM |
54 | 54 | ||
55 | struct generic_pm_domain *dev_to_genpd(struct device *dev) | 55 | struct generic_pm_domain *dev_to_genpd(struct device *dev) |
56 | { | 56 | { |
57 | if (IS_ERR_OR_NULL(dev->pm_domain)) | 57 | if (IS_ERR_OR_NULL(dev->pm_domain)) |
58 | return ERR_PTR(-EINVAL); | 58 | return ERR_PTR(-EINVAL); |
59 | 59 | ||
60 | return pd_to_genpd(dev->pm_domain); | 60 | return pd_to_genpd(dev->pm_domain); |
61 | } | 61 | } |
62 | 62 | ||
63 | static int genpd_stop_dev(struct generic_pm_domain *genpd, struct device *dev) | 63 | static int genpd_stop_dev(struct generic_pm_domain *genpd, struct device *dev) |
64 | { | 64 | { |
65 | return GENPD_DEV_TIMED_CALLBACK(genpd, int, stop, dev, | 65 | return GENPD_DEV_TIMED_CALLBACK(genpd, int, stop, dev, |
66 | stop_latency_ns, "stop"); | 66 | stop_latency_ns, "stop"); |
67 | } | 67 | } |
68 | 68 | ||
69 | static int genpd_start_dev(struct generic_pm_domain *genpd, struct device *dev) | 69 | static int genpd_start_dev(struct generic_pm_domain *genpd, struct device *dev) |
70 | { | 70 | { |
71 | return GENPD_DEV_TIMED_CALLBACK(genpd, int, start, dev, | 71 | return GENPD_DEV_TIMED_CALLBACK(genpd, int, start, dev, |
72 | start_latency_ns, "start"); | 72 | start_latency_ns, "start"); |
73 | } | 73 | } |
74 | 74 | ||
75 | static int genpd_save_dev(struct generic_pm_domain *genpd, struct device *dev) | 75 | static int genpd_save_dev(struct generic_pm_domain *genpd, struct device *dev) |
76 | { | 76 | { |
77 | return GENPD_DEV_TIMED_CALLBACK(genpd, int, save_state, dev, | 77 | return GENPD_DEV_TIMED_CALLBACK(genpd, int, save_state, dev, |
78 | save_state_latency_ns, "state save"); | 78 | save_state_latency_ns, "state save"); |
79 | } | 79 | } |
80 | 80 | ||
81 | static int genpd_restore_dev(struct generic_pm_domain *genpd, struct device *dev) | 81 | static int genpd_restore_dev(struct generic_pm_domain *genpd, struct device *dev) |
82 | { | 82 | { |
83 | return GENPD_DEV_TIMED_CALLBACK(genpd, int, restore_state, dev, | 83 | return GENPD_DEV_TIMED_CALLBACK(genpd, int, restore_state, dev, |
84 | restore_state_latency_ns, | 84 | restore_state_latency_ns, |
85 | "state restore"); | 85 | "state restore"); |
86 | } | 86 | } |
87 | 87 | ||
88 | static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd) | 88 | static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd) |
89 | { | 89 | { |
90 | bool ret = false; | 90 | bool ret = false; |
91 | 91 | ||
92 | if (!WARN_ON(atomic_read(&genpd->sd_count) == 0)) | 92 | if (!WARN_ON(atomic_read(&genpd->sd_count) == 0)) |
93 | ret = !!atomic_dec_and_test(&genpd->sd_count); | 93 | ret = !!atomic_dec_and_test(&genpd->sd_count); |
94 | 94 | ||
95 | return ret; | 95 | return ret; |
96 | } | 96 | } |
97 | 97 | ||
98 | static void genpd_sd_counter_inc(struct generic_pm_domain *genpd) | 98 | static void genpd_sd_counter_inc(struct generic_pm_domain *genpd) |
99 | { | 99 | { |
100 | atomic_inc(&genpd->sd_count); | 100 | atomic_inc(&genpd->sd_count); |
101 | smp_mb__after_atomic_inc(); | 101 | smp_mb__after_atomic_inc(); |
102 | } | 102 | } |
103 | 103 | ||
104 | static void genpd_acquire_lock(struct generic_pm_domain *genpd) | 104 | static void genpd_acquire_lock(struct generic_pm_domain *genpd) |
105 | { | 105 | { |
106 | DEFINE_WAIT(wait); | 106 | DEFINE_WAIT(wait); |
107 | 107 | ||
108 | mutex_lock(&genpd->lock); | 108 | mutex_lock(&genpd->lock); |
109 | /* | 109 | /* |
110 | * Wait for the domain to transition into either the active, | 110 | * Wait for the domain to transition into either the active, |
111 | * or the power off state. | 111 | * or the power off state. |
112 | */ | 112 | */ |
113 | for (;;) { | 113 | for (;;) { |
114 | prepare_to_wait(&genpd->status_wait_queue, &wait, | 114 | prepare_to_wait(&genpd->status_wait_queue, &wait, |
115 | TASK_UNINTERRUPTIBLE); | 115 | TASK_UNINTERRUPTIBLE); |
116 | if (genpd->status == GPD_STATE_ACTIVE | 116 | if (genpd->status == GPD_STATE_ACTIVE |
117 | || genpd->status == GPD_STATE_POWER_OFF) | 117 | || genpd->status == GPD_STATE_POWER_OFF) |
118 | break; | 118 | break; |
119 | mutex_unlock(&genpd->lock); | 119 | mutex_unlock(&genpd->lock); |
120 | 120 | ||
121 | schedule(); | 121 | schedule(); |
122 | 122 | ||
123 | mutex_lock(&genpd->lock); | 123 | mutex_lock(&genpd->lock); |
124 | } | 124 | } |
125 | finish_wait(&genpd->status_wait_queue, &wait); | 125 | finish_wait(&genpd->status_wait_queue, &wait); |
126 | } | 126 | } |
127 | 127 | ||
128 | static void genpd_release_lock(struct generic_pm_domain *genpd) | 128 | static void genpd_release_lock(struct generic_pm_domain *genpd) |
129 | { | 129 | { |
130 | mutex_unlock(&genpd->lock); | 130 | mutex_unlock(&genpd->lock); |
131 | } | 131 | } |
132 | 132 | ||
133 | static void genpd_set_active(struct generic_pm_domain *genpd) | 133 | static void genpd_set_active(struct generic_pm_domain *genpd) |
134 | { | 134 | { |
135 | if (genpd->resume_count == 0) | 135 | if (genpd->resume_count == 0) |
136 | genpd->status = GPD_STATE_ACTIVE; | 136 | genpd->status = GPD_STATE_ACTIVE; |
137 | } | 137 | } |
138 | 138 | ||
139 | /** | 139 | /** |
140 | * __pm_genpd_poweron - Restore power to a given PM domain and its masters. | 140 | * __pm_genpd_poweron - Restore power to a given PM domain and its masters. |
141 | * @genpd: PM domain to power up. | 141 | * @genpd: PM domain to power up. |
142 | * | 142 | * |
143 | * Restore power to @genpd and all of its masters so that it is possible to | 143 | * Restore power to @genpd and all of its masters so that it is possible to |
144 | * resume a device belonging to it. | 144 | * resume a device belonging to it. |
145 | */ | 145 | */ |
146 | int __pm_genpd_poweron(struct generic_pm_domain *genpd) | 146 | int __pm_genpd_poweron(struct generic_pm_domain *genpd) |
147 | __releases(&genpd->lock) __acquires(&genpd->lock) | 147 | __releases(&genpd->lock) __acquires(&genpd->lock) |
148 | { | 148 | { |
149 | struct gpd_link *link; | 149 | struct gpd_link *link; |
150 | DEFINE_WAIT(wait); | 150 | DEFINE_WAIT(wait); |
151 | int ret = 0; | 151 | int ret = 0; |
152 | 152 | ||
153 | /* If the domain's master is being waited for, we have to wait too. */ | 153 | /* If the domain's master is being waited for, we have to wait too. */ |
154 | for (;;) { | 154 | for (;;) { |
155 | prepare_to_wait(&genpd->status_wait_queue, &wait, | 155 | prepare_to_wait(&genpd->status_wait_queue, &wait, |
156 | TASK_UNINTERRUPTIBLE); | 156 | TASK_UNINTERRUPTIBLE); |
157 | if (genpd->status != GPD_STATE_WAIT_MASTER) | 157 | if (genpd->status != GPD_STATE_WAIT_MASTER) |
158 | break; | 158 | break; |
159 | mutex_unlock(&genpd->lock); | 159 | mutex_unlock(&genpd->lock); |
160 | 160 | ||
161 | schedule(); | 161 | schedule(); |
162 | 162 | ||
163 | mutex_lock(&genpd->lock); | 163 | mutex_lock(&genpd->lock); |
164 | } | 164 | } |
165 | finish_wait(&genpd->status_wait_queue, &wait); | 165 | finish_wait(&genpd->status_wait_queue, &wait); |
166 | 166 | ||
167 | if (genpd->status == GPD_STATE_ACTIVE | 167 | if (genpd->status == GPD_STATE_ACTIVE |
168 | || (genpd->prepared_count > 0 && genpd->suspend_power_off)) | 168 | || (genpd->prepared_count > 0 && genpd->suspend_power_off)) |
169 | return 0; | 169 | return 0; |
170 | 170 | ||
171 | if (genpd->status != GPD_STATE_POWER_OFF) { | 171 | if (genpd->status != GPD_STATE_POWER_OFF) { |
172 | genpd_set_active(genpd); | 172 | genpd_set_active(genpd); |
173 | return 0; | 173 | return 0; |
174 | } | 174 | } |
175 | 175 | ||
176 | /* | 176 | /* |
177 | * The list is guaranteed not to change while the loop below is being | 177 | * The list is guaranteed not to change while the loop below is being |
178 | * executed, unless one of the masters' .power_on() callbacks fiddles | 178 | * executed, unless one of the masters' .power_on() callbacks fiddles |
179 | * with it. | 179 | * with it. |
180 | */ | 180 | */ |
181 | list_for_each_entry(link, &genpd->slave_links, slave_node) { | 181 | list_for_each_entry(link, &genpd->slave_links, slave_node) { |
182 | genpd_sd_counter_inc(link->master); | 182 | genpd_sd_counter_inc(link->master); |
183 | genpd->status = GPD_STATE_WAIT_MASTER; | 183 | genpd->status = GPD_STATE_WAIT_MASTER; |
184 | 184 | ||
185 | mutex_unlock(&genpd->lock); | 185 | mutex_unlock(&genpd->lock); |
186 | 186 | ||
187 | ret = pm_genpd_poweron(link->master); | 187 | ret = pm_genpd_poweron(link->master); |
188 | 188 | ||
189 | mutex_lock(&genpd->lock); | 189 | mutex_lock(&genpd->lock); |
190 | 190 | ||
191 | /* | 191 | /* |
192 | * The "wait for parent" status is guaranteed not to change | 192 | * The "wait for parent" status is guaranteed not to change |
193 | * while the master is powering on. | 193 | * while the master is powering on. |
194 | */ | 194 | */ |
195 | genpd->status = GPD_STATE_POWER_OFF; | 195 | genpd->status = GPD_STATE_POWER_OFF; |
196 | wake_up_all(&genpd->status_wait_queue); | 196 | wake_up_all(&genpd->status_wait_queue); |
197 | if (ret) { | 197 | if (ret) { |
198 | genpd_sd_counter_dec(link->master); | 198 | genpd_sd_counter_dec(link->master); |
199 | goto err; | 199 | goto err; |
200 | } | 200 | } |
201 | } | 201 | } |
202 | 202 | ||
203 | if (genpd->power_on) { | 203 | if (genpd->power_on) { |
204 | ktime_t time_start = ktime_get(); | 204 | ktime_t time_start = ktime_get(); |
205 | s64 elapsed_ns; | 205 | s64 elapsed_ns; |
206 | 206 | ||
207 | ret = genpd->power_on(genpd); | 207 | ret = genpd->power_on(genpd); |
208 | if (ret) | 208 | if (ret) |
209 | goto err; | 209 | goto err; |
210 | 210 | ||
211 | elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start)); | 211 | elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start)); |
212 | if (elapsed_ns > genpd->power_on_latency_ns) { | 212 | if (elapsed_ns > genpd->power_on_latency_ns) { |
213 | genpd->power_on_latency_ns = elapsed_ns; | 213 | genpd->power_on_latency_ns = elapsed_ns; |
214 | if (genpd->name) | 214 | if (genpd->name) |
215 | pr_warning("%s: Power-on latency exceeded, " | 215 | pr_warning("%s: Power-on latency exceeded, " |
216 | "new value %lld ns\n", genpd->name, | 216 | "new value %lld ns\n", genpd->name, |
217 | elapsed_ns); | 217 | elapsed_ns); |
218 | } | 218 | } |
219 | } | 219 | } |
220 | 220 | ||
221 | genpd_set_active(genpd); | 221 | genpd_set_active(genpd); |
222 | 222 | ||
223 | return 0; | 223 | return 0; |
224 | 224 | ||
225 | err: | 225 | err: |
226 | list_for_each_entry_continue_reverse(link, &genpd->slave_links, slave_node) | 226 | list_for_each_entry_continue_reverse(link, &genpd->slave_links, slave_node) |
227 | genpd_sd_counter_dec(link->master); | 227 | genpd_sd_counter_dec(link->master); |
228 | 228 | ||
229 | return ret; | 229 | return ret; |
230 | } | 230 | } |
231 | 231 | ||
232 | /** | 232 | /** |
233 | * pm_genpd_poweron - Restore power to a given PM domain and its masters. | 233 | * pm_genpd_poweron - Restore power to a given PM domain and its masters. |
234 | * @genpd: PM domain to power up. | 234 | * @genpd: PM domain to power up. |
235 | */ | 235 | */ |
236 | int pm_genpd_poweron(struct generic_pm_domain *genpd) | 236 | int pm_genpd_poweron(struct generic_pm_domain *genpd) |
237 | { | 237 | { |
238 | int ret; | 238 | int ret; |
239 | 239 | ||
240 | mutex_lock(&genpd->lock); | 240 | mutex_lock(&genpd->lock); |
241 | ret = __pm_genpd_poweron(genpd); | 241 | ret = __pm_genpd_poweron(genpd); |
242 | mutex_unlock(&genpd->lock); | 242 | mutex_unlock(&genpd->lock); |
243 | return ret; | 243 | return ret; |
244 | } | 244 | } |
245 | 245 | ||
246 | #endif /* CONFIG_PM */ | 246 | #endif /* CONFIG_PM */ |
247 | 247 | ||
248 | #ifdef CONFIG_PM_RUNTIME | 248 | #ifdef CONFIG_PM_RUNTIME |
249 | 249 | ||
250 | /** | 250 | /** |
251 | * __pm_genpd_save_device - Save the pre-suspend state of a device. | 251 | * __pm_genpd_save_device - Save the pre-suspend state of a device. |
252 | * @pdd: Domain data of the device to save the state of. | 252 | * @pdd: Domain data of the device to save the state of. |
253 | * @genpd: PM domain the device belongs to. | 253 | * @genpd: PM domain the device belongs to. |
254 | */ | 254 | */ |
255 | static int __pm_genpd_save_device(struct pm_domain_data *pdd, | 255 | static int __pm_genpd_save_device(struct pm_domain_data *pdd, |
256 | struct generic_pm_domain *genpd) | 256 | struct generic_pm_domain *genpd) |
257 | __releases(&genpd->lock) __acquires(&genpd->lock) | 257 | __releases(&genpd->lock) __acquires(&genpd->lock) |
258 | { | 258 | { |
259 | struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd); | 259 | struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd); |
260 | struct device *dev = pdd->dev; | 260 | struct device *dev = pdd->dev; |
261 | int ret = 0; | 261 | int ret = 0; |
262 | 262 | ||
263 | if (gpd_data->need_restore) | 263 | if (gpd_data->need_restore) |
264 | return 0; | 264 | return 0; |
265 | 265 | ||
266 | mutex_unlock(&genpd->lock); | 266 | mutex_unlock(&genpd->lock); |
267 | 267 | ||
268 | genpd_start_dev(genpd, dev); | 268 | genpd_start_dev(genpd, dev); |
269 | ret = genpd_save_dev(genpd, dev); | 269 | ret = genpd_save_dev(genpd, dev); |
270 | genpd_stop_dev(genpd, dev); | 270 | genpd_stop_dev(genpd, dev); |
271 | 271 | ||
272 | mutex_lock(&genpd->lock); | 272 | mutex_lock(&genpd->lock); |
273 | 273 | ||
274 | if (!ret) | 274 | if (!ret) |
275 | gpd_data->need_restore = true; | 275 | gpd_data->need_restore = true; |
276 | 276 | ||
277 | return ret; | 277 | return ret; |
278 | } | 278 | } |
279 | 279 | ||
280 | /** | 280 | /** |
281 | * __pm_genpd_restore_device - Restore the pre-suspend state of a device. | 281 | * __pm_genpd_restore_device - Restore the pre-suspend state of a device. |
282 | * @pdd: Domain data of the device to restore the state of. | 282 | * @pdd: Domain data of the device to restore the state of. |
283 | * @genpd: PM domain the device belongs to. | 283 | * @genpd: PM domain the device belongs to. |
284 | */ | 284 | */ |
285 | static void __pm_genpd_restore_device(struct pm_domain_data *pdd, | 285 | static void __pm_genpd_restore_device(struct pm_domain_data *pdd, |
286 | struct generic_pm_domain *genpd) | 286 | struct generic_pm_domain *genpd) |
287 | __releases(&genpd->lock) __acquires(&genpd->lock) | 287 | __releases(&genpd->lock) __acquires(&genpd->lock) |
288 | { | 288 | { |
289 | struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd); | 289 | struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd); |
290 | struct device *dev = pdd->dev; | 290 | struct device *dev = pdd->dev; |
291 | 291 | ||
292 | if (!gpd_data->need_restore) | 292 | if (!gpd_data->need_restore) |
293 | return; | 293 | return; |
294 | 294 | ||
295 | mutex_unlock(&genpd->lock); | 295 | mutex_unlock(&genpd->lock); |
296 | 296 | ||
297 | genpd_start_dev(genpd, dev); | 297 | genpd_start_dev(genpd, dev); |
298 | genpd_restore_dev(genpd, dev); | 298 | genpd_restore_dev(genpd, dev); |
299 | genpd_stop_dev(genpd, dev); | 299 | genpd_stop_dev(genpd, dev); |
300 | 300 | ||
301 | mutex_lock(&genpd->lock); | 301 | mutex_lock(&genpd->lock); |
302 | 302 | ||
303 | gpd_data->need_restore = false; | 303 | gpd_data->need_restore = false; |
304 | } | 304 | } |
305 | 305 | ||
306 | /** | 306 | /** |
307 | * genpd_abort_poweroff - Check if a PM domain power off should be aborted. | 307 | * genpd_abort_poweroff - Check if a PM domain power off should be aborted. |
308 | * @genpd: PM domain to check. | 308 | * @genpd: PM domain to check. |
309 | * | 309 | * |
310 | * Return true if a PM domain's status changed to GPD_STATE_ACTIVE during | 310 | * Return true if a PM domain's status changed to GPD_STATE_ACTIVE during |
311 | * a "power off" operation, which means that a "power on" has occured in the | 311 | * a "power off" operation, which means that a "power on" has occured in the |
312 | * meantime, or if its resume_count field is different from zero, which means | 312 | * meantime, or if its resume_count field is different from zero, which means |
313 | * that one of its devices has been resumed in the meantime. | 313 | * that one of its devices has been resumed in the meantime. |
314 | */ | 314 | */ |
315 | static bool genpd_abort_poweroff(struct generic_pm_domain *genpd) | 315 | static bool genpd_abort_poweroff(struct generic_pm_domain *genpd) |
316 | { | 316 | { |
317 | return genpd->status == GPD_STATE_WAIT_MASTER | 317 | return genpd->status == GPD_STATE_WAIT_MASTER |
318 | || genpd->status == GPD_STATE_ACTIVE || genpd->resume_count > 0; | 318 | || genpd->status == GPD_STATE_ACTIVE || genpd->resume_count > 0; |
319 | } | 319 | } |
320 | 320 | ||
321 | /** | 321 | /** |
322 | * genpd_queue_power_off_work - Queue up the execution of pm_genpd_poweroff(). | 322 | * genpd_queue_power_off_work - Queue up the execution of pm_genpd_poweroff(). |
323 | * @genpd: PM domait to power off. | 323 | * @genpd: PM domait to power off. |
324 | * | 324 | * |
325 | * Queue up the execution of pm_genpd_poweroff() unless it's already been done | 325 | * Queue up the execution of pm_genpd_poweroff() unless it's already been done |
326 | * before. | 326 | * before. |
327 | */ | 327 | */ |
328 | void genpd_queue_power_off_work(struct generic_pm_domain *genpd) | 328 | void genpd_queue_power_off_work(struct generic_pm_domain *genpd) |
329 | { | 329 | { |
330 | if (!work_pending(&genpd->power_off_work)) | 330 | if (!work_pending(&genpd->power_off_work)) |
331 | queue_work(pm_wq, &genpd->power_off_work); | 331 | queue_work(pm_wq, &genpd->power_off_work); |
332 | } | 332 | } |
333 | 333 | ||
334 | /** | 334 | /** |
335 | * pm_genpd_poweroff - Remove power from a given PM domain. | 335 | * pm_genpd_poweroff - Remove power from a given PM domain. |
336 | * @genpd: PM domain to power down. | 336 | * @genpd: PM domain to power down. |
337 | * | 337 | * |
338 | * If all of the @genpd's devices have been suspended and all of its subdomains | 338 | * If all of the @genpd's devices have been suspended and all of its subdomains |
339 | * have been powered down, run the runtime suspend callbacks provided by all of | 339 | * have been powered down, run the runtime suspend callbacks provided by all of |
340 | * the @genpd's devices' drivers and remove power from @genpd. | 340 | * the @genpd's devices' drivers and remove power from @genpd. |
341 | */ | 341 | */ |
342 | static int pm_genpd_poweroff(struct generic_pm_domain *genpd) | 342 | static int pm_genpd_poweroff(struct generic_pm_domain *genpd) |
343 | __releases(&genpd->lock) __acquires(&genpd->lock) | 343 | __releases(&genpd->lock) __acquires(&genpd->lock) |
344 | { | 344 | { |
345 | struct pm_domain_data *pdd; | 345 | struct pm_domain_data *pdd; |
346 | struct gpd_link *link; | 346 | struct gpd_link *link; |
347 | unsigned int not_suspended; | 347 | unsigned int not_suspended; |
348 | int ret = 0; | 348 | int ret = 0; |
349 | 349 | ||
350 | start: | 350 | start: |
351 | /* | 351 | /* |
352 | * Do not try to power off the domain in the following situations: | 352 | * Do not try to power off the domain in the following situations: |
353 | * (1) The domain is already in the "power off" state. | 353 | * (1) The domain is already in the "power off" state. |
354 | * (2) The domain is waiting for its master to power up. | 354 | * (2) The domain is waiting for its master to power up. |
355 | * (3) One of the domain's devices is being resumed right now. | 355 | * (3) One of the domain's devices is being resumed right now. |
356 | * (4) System suspend is in progress. | 356 | * (4) System suspend is in progress. |
357 | */ | 357 | */ |
358 | if (genpd->status == GPD_STATE_POWER_OFF | 358 | if (genpd->status == GPD_STATE_POWER_OFF |
359 | || genpd->status == GPD_STATE_WAIT_MASTER | 359 | || genpd->status == GPD_STATE_WAIT_MASTER |
360 | || genpd->resume_count > 0 || genpd->prepared_count > 0) | 360 | || genpd->resume_count > 0 || genpd->prepared_count > 0) |
361 | return 0; | 361 | return 0; |
362 | 362 | ||
363 | if (atomic_read(&genpd->sd_count) > 0) | 363 | if (atomic_read(&genpd->sd_count) > 0) |
364 | return -EBUSY; | 364 | return -EBUSY; |
365 | 365 | ||
366 | not_suspended = 0; | 366 | not_suspended = 0; |
367 | list_for_each_entry(pdd, &genpd->dev_list, list_node) | 367 | list_for_each_entry(pdd, &genpd->dev_list, list_node) |
368 | if (pdd->dev->driver && (!pm_runtime_suspended(pdd->dev) | 368 | if (pdd->dev->driver && (!pm_runtime_suspended(pdd->dev) |
369 | || pdd->dev->power.irq_safe)) | 369 | || pdd->dev->power.irq_safe)) |
370 | not_suspended++; | 370 | not_suspended++; |
371 | 371 | ||
372 | if (not_suspended > genpd->in_progress) | 372 | if (not_suspended > genpd->in_progress) |
373 | return -EBUSY; | 373 | return -EBUSY; |
374 | 374 | ||
375 | if (genpd->poweroff_task) { | 375 | if (genpd->poweroff_task) { |
376 | /* | 376 | /* |
377 | * Another instance of pm_genpd_poweroff() is executing | 377 | * Another instance of pm_genpd_poweroff() is executing |
378 | * callbacks, so tell it to start over and return. | 378 | * callbacks, so tell it to start over and return. |
379 | */ | 379 | */ |
380 | genpd->status = GPD_STATE_REPEAT; | 380 | genpd->status = GPD_STATE_REPEAT; |
381 | return 0; | 381 | return 0; |
382 | } | 382 | } |
383 | 383 | ||
384 | if (genpd->gov && genpd->gov->power_down_ok) { | 384 | if (genpd->gov && genpd->gov->power_down_ok) { |
385 | if (!genpd->gov->power_down_ok(&genpd->domain)) | 385 | if (!genpd->gov->power_down_ok(&genpd->domain)) |
386 | return -EAGAIN; | 386 | return -EAGAIN; |
387 | } | 387 | } |
388 | 388 | ||
389 | genpd->status = GPD_STATE_BUSY; | 389 | genpd->status = GPD_STATE_BUSY; |
390 | genpd->poweroff_task = current; | 390 | genpd->poweroff_task = current; |
391 | 391 | ||
392 | list_for_each_entry_reverse(pdd, &genpd->dev_list, list_node) { | 392 | list_for_each_entry_reverse(pdd, &genpd->dev_list, list_node) { |
393 | ret = atomic_read(&genpd->sd_count) == 0 ? | 393 | ret = atomic_read(&genpd->sd_count) == 0 ? |
394 | __pm_genpd_save_device(pdd, genpd) : -EBUSY; | 394 | __pm_genpd_save_device(pdd, genpd) : -EBUSY; |
395 | 395 | ||
396 | if (genpd_abort_poweroff(genpd)) | 396 | if (genpd_abort_poweroff(genpd)) |
397 | goto out; | 397 | goto out; |
398 | 398 | ||
399 | if (ret) { | 399 | if (ret) { |
400 | genpd_set_active(genpd); | 400 | genpd_set_active(genpd); |
401 | goto out; | 401 | goto out; |
402 | } | 402 | } |
403 | 403 | ||
404 | if (genpd->status == GPD_STATE_REPEAT) { | 404 | if (genpd->status == GPD_STATE_REPEAT) { |
405 | genpd->poweroff_task = NULL; | 405 | genpd->poweroff_task = NULL; |
406 | goto start; | 406 | goto start; |
407 | } | 407 | } |
408 | } | 408 | } |
409 | 409 | ||
410 | if (genpd->power_off) { | 410 | if (genpd->power_off) { |
411 | ktime_t time_start; | 411 | ktime_t time_start; |
412 | s64 elapsed_ns; | 412 | s64 elapsed_ns; |
413 | 413 | ||
414 | if (atomic_read(&genpd->sd_count) > 0) { | 414 | if (atomic_read(&genpd->sd_count) > 0) { |
415 | ret = -EBUSY; | 415 | ret = -EBUSY; |
416 | goto out; | 416 | goto out; |
417 | } | 417 | } |
418 | 418 | ||
419 | time_start = ktime_get(); | 419 | time_start = ktime_get(); |
420 | 420 | ||
421 | /* | 421 | /* |
422 | * If sd_count > 0 at this point, one of the subdomains hasn't | 422 | * If sd_count > 0 at this point, one of the subdomains hasn't |
423 | * managed to call pm_genpd_poweron() for the master yet after | 423 | * managed to call pm_genpd_poweron() for the master yet after |
424 | * incrementing it. In that case pm_genpd_poweron() will wait | 424 | * incrementing it. In that case pm_genpd_poweron() will wait |
425 | * for us to drop the lock, so we can call .power_off() and let | 425 | * for us to drop the lock, so we can call .power_off() and let |
426 | * the pm_genpd_poweron() restore power for us (this shouldn't | 426 | * the pm_genpd_poweron() restore power for us (this shouldn't |
427 | * happen very often). | 427 | * happen very often). |
428 | */ | 428 | */ |
429 | ret = genpd->power_off(genpd); | 429 | ret = genpd->power_off(genpd); |
430 | if (ret == -EBUSY) { | 430 | if (ret == -EBUSY) { |
431 | genpd_set_active(genpd); | 431 | genpd_set_active(genpd); |
432 | goto out; | 432 | goto out; |
433 | } | 433 | } |
434 | 434 | ||
435 | elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start)); | 435 | elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start)); |
436 | if (elapsed_ns > genpd->power_off_latency_ns) { | 436 | if (elapsed_ns > genpd->power_off_latency_ns) { |
437 | genpd->power_off_latency_ns = elapsed_ns; | 437 | genpd->power_off_latency_ns = elapsed_ns; |
438 | if (genpd->name) | 438 | if (genpd->name) |
439 | pr_warning("%s: Power-off latency exceeded, " | 439 | pr_warning("%s: Power-off latency exceeded, " |
440 | "new value %lld ns\n", genpd->name, | 440 | "new value %lld ns\n", genpd->name, |
441 | elapsed_ns); | 441 | elapsed_ns); |
442 | } | 442 | } |
443 | } | 443 | } |
444 | 444 | ||
445 | genpd->status = GPD_STATE_POWER_OFF; | 445 | genpd->status = GPD_STATE_POWER_OFF; |
446 | genpd->power_off_time = ktime_get(); | 446 | genpd->power_off_time = ktime_get(); |
447 | 447 | ||
448 | /* Update PM QoS information for devices in the domain. */ | 448 | /* Update PM QoS information for devices in the domain. */ |
449 | list_for_each_entry_reverse(pdd, &genpd->dev_list, list_node) { | 449 | list_for_each_entry_reverse(pdd, &genpd->dev_list, list_node) { |
450 | struct gpd_timing_data *td = &to_gpd_data(pdd)->td; | 450 | struct gpd_timing_data *td = &to_gpd_data(pdd)->td; |
451 | 451 | ||
452 | pm_runtime_update_max_time_suspended(pdd->dev, | 452 | pm_runtime_update_max_time_suspended(pdd->dev, |
453 | td->start_latency_ns + | 453 | td->start_latency_ns + |
454 | td->restore_state_latency_ns + | 454 | td->restore_state_latency_ns + |
455 | genpd->power_on_latency_ns); | 455 | genpd->power_on_latency_ns); |
456 | } | 456 | } |
457 | 457 | ||
458 | list_for_each_entry(link, &genpd->slave_links, slave_node) { | 458 | list_for_each_entry(link, &genpd->slave_links, slave_node) { |
459 | genpd_sd_counter_dec(link->master); | 459 | genpd_sd_counter_dec(link->master); |
460 | genpd_queue_power_off_work(link->master); | 460 | genpd_queue_power_off_work(link->master); |
461 | } | 461 | } |
462 | 462 | ||
463 | out: | 463 | out: |
464 | genpd->poweroff_task = NULL; | 464 | genpd->poweroff_task = NULL; |
465 | wake_up_all(&genpd->status_wait_queue); | 465 | wake_up_all(&genpd->status_wait_queue); |
466 | return ret; | 466 | return ret; |
467 | } | 467 | } |
468 | 468 | ||
469 | /** | 469 | /** |
470 | * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0. | 470 | * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0. |
471 | * @work: Work structure used for scheduling the execution of this function. | 471 | * @work: Work structure used for scheduling the execution of this function. |
472 | */ | 472 | */ |
473 | static void genpd_power_off_work_fn(struct work_struct *work) | 473 | static void genpd_power_off_work_fn(struct work_struct *work) |
474 | { | 474 | { |
475 | struct generic_pm_domain *genpd; | 475 | struct generic_pm_domain *genpd; |
476 | 476 | ||
477 | genpd = container_of(work, struct generic_pm_domain, power_off_work); | 477 | genpd = container_of(work, struct generic_pm_domain, power_off_work); |
478 | 478 | ||
479 | genpd_acquire_lock(genpd); | 479 | genpd_acquire_lock(genpd); |
480 | pm_genpd_poweroff(genpd); | 480 | pm_genpd_poweroff(genpd); |
481 | genpd_release_lock(genpd); | 481 | genpd_release_lock(genpd); |
482 | } | 482 | } |
483 | 483 | ||
484 | /** | 484 | /** |
485 | * pm_genpd_runtime_suspend - Suspend a device belonging to I/O PM domain. | 485 | * pm_genpd_runtime_suspend - Suspend a device belonging to I/O PM domain. |
486 | * @dev: Device to suspend. | 486 | * @dev: Device to suspend. |
487 | * | 487 | * |
488 | * Carry out a runtime suspend of a device under the assumption that its | 488 | * Carry out a runtime suspend of a device under the assumption that its |
489 | * pm_domain field points to the domain member of an object of type | 489 | * pm_domain field points to the domain member of an object of type |
490 | * struct generic_pm_domain representing a PM domain consisting of I/O devices. | 490 | * struct generic_pm_domain representing a PM domain consisting of I/O devices. |
491 | */ | 491 | */ |
492 | static int pm_genpd_runtime_suspend(struct device *dev) | 492 | static int pm_genpd_runtime_suspend(struct device *dev) |
493 | { | 493 | { |
494 | struct generic_pm_domain *genpd; | 494 | struct generic_pm_domain *genpd; |
495 | bool (*stop_ok)(struct device *__dev); | 495 | bool (*stop_ok)(struct device *__dev); |
496 | int ret; | 496 | int ret; |
497 | 497 | ||
498 | dev_dbg(dev, "%s()\n", __func__); | 498 | dev_dbg(dev, "%s()\n", __func__); |
499 | 499 | ||
500 | genpd = dev_to_genpd(dev); | 500 | genpd = dev_to_genpd(dev); |
501 | if (IS_ERR(genpd)) | 501 | if (IS_ERR(genpd)) |
502 | return -EINVAL; | 502 | return -EINVAL; |
503 | 503 | ||
504 | might_sleep_if(!genpd->dev_irq_safe); | 504 | might_sleep_if(!genpd->dev_irq_safe); |
505 | 505 | ||
506 | stop_ok = genpd->gov ? genpd->gov->stop_ok : NULL; | 506 | stop_ok = genpd->gov ? genpd->gov->stop_ok : NULL; |
507 | if (stop_ok && !stop_ok(dev)) | 507 | if (stop_ok && !stop_ok(dev)) |
508 | return -EBUSY; | 508 | return -EBUSY; |
509 | 509 | ||
510 | ret = genpd_stop_dev(genpd, dev); | 510 | ret = genpd_stop_dev(genpd, dev); |
511 | if (ret) | 511 | if (ret) |
512 | return ret; | 512 | return ret; |
513 | 513 | ||
514 | pm_runtime_update_max_time_suspended(dev, | 514 | pm_runtime_update_max_time_suspended(dev, |
515 | dev_gpd_data(dev)->td.start_latency_ns); | 515 | dev_gpd_data(dev)->td.start_latency_ns); |
516 | 516 | ||
517 | /* | 517 | /* |
518 | * If power.irq_safe is set, this routine will be run with interrupts | 518 | * If power.irq_safe is set, this routine will be run with interrupts |
519 | * off, so it can't use mutexes. | 519 | * off, so it can't use mutexes. |
520 | */ | 520 | */ |
521 | if (dev->power.irq_safe) | 521 | if (dev->power.irq_safe) |
522 | return 0; | 522 | return 0; |
523 | 523 | ||
524 | mutex_lock(&genpd->lock); | 524 | mutex_lock(&genpd->lock); |
525 | genpd->in_progress++; | 525 | genpd->in_progress++; |
526 | pm_genpd_poweroff(genpd); | 526 | pm_genpd_poweroff(genpd); |
527 | genpd->in_progress--; | 527 | genpd->in_progress--; |
528 | mutex_unlock(&genpd->lock); | 528 | mutex_unlock(&genpd->lock); |
529 | 529 | ||
530 | return 0; | 530 | return 0; |
531 | } | 531 | } |
532 | 532 | ||
533 | /** | 533 | /** |
534 | * pm_genpd_runtime_resume - Resume a device belonging to I/O PM domain. | 534 | * pm_genpd_runtime_resume - Resume a device belonging to I/O PM domain. |
535 | * @dev: Device to resume. | 535 | * @dev: Device to resume. |
536 | * | 536 | * |
537 | * Carry out a runtime resume of a device under the assumption that its | 537 | * Carry out a runtime resume of a device under the assumption that its |
538 | * pm_domain field points to the domain member of an object of type | 538 | * pm_domain field points to the domain member of an object of type |
539 | * struct generic_pm_domain representing a PM domain consisting of I/O devices. | 539 | * struct generic_pm_domain representing a PM domain consisting of I/O devices. |
540 | */ | 540 | */ |
541 | static int pm_genpd_runtime_resume(struct device *dev) | 541 | static int pm_genpd_runtime_resume(struct device *dev) |
542 | { | 542 | { |
543 | struct generic_pm_domain *genpd; | 543 | struct generic_pm_domain *genpd; |
544 | DEFINE_WAIT(wait); | 544 | DEFINE_WAIT(wait); |
545 | int ret; | 545 | int ret; |
546 | 546 | ||
547 | dev_dbg(dev, "%s()\n", __func__); | 547 | dev_dbg(dev, "%s()\n", __func__); |
548 | 548 | ||
549 | genpd = dev_to_genpd(dev); | 549 | genpd = dev_to_genpd(dev); |
550 | if (IS_ERR(genpd)) | 550 | if (IS_ERR(genpd)) |
551 | return -EINVAL; | 551 | return -EINVAL; |
552 | 552 | ||
553 | might_sleep_if(!genpd->dev_irq_safe); | 553 | might_sleep_if(!genpd->dev_irq_safe); |
554 | 554 | ||
555 | /* If power.irq_safe, the PM domain is never powered off. */ | 555 | /* If power.irq_safe, the PM domain is never powered off. */ |
556 | if (dev->power.irq_safe) | 556 | if (dev->power.irq_safe) |
557 | goto out; | 557 | goto out; |
558 | 558 | ||
559 | mutex_lock(&genpd->lock); | 559 | mutex_lock(&genpd->lock); |
560 | ret = __pm_genpd_poweron(genpd); | 560 | ret = __pm_genpd_poweron(genpd); |
561 | if (ret) { | 561 | if (ret) { |
562 | mutex_unlock(&genpd->lock); | 562 | mutex_unlock(&genpd->lock); |
563 | return ret; | 563 | return ret; |
564 | } | 564 | } |
565 | genpd->status = GPD_STATE_BUSY; | 565 | genpd->status = GPD_STATE_BUSY; |
566 | genpd->resume_count++; | 566 | genpd->resume_count++; |
567 | for (;;) { | 567 | for (;;) { |
568 | prepare_to_wait(&genpd->status_wait_queue, &wait, | 568 | prepare_to_wait(&genpd->status_wait_queue, &wait, |
569 | TASK_UNINTERRUPTIBLE); | 569 | TASK_UNINTERRUPTIBLE); |
570 | /* | 570 | /* |
571 | * If current is the powering off task, we have been called | 571 | * If current is the powering off task, we have been called |
572 | * reentrantly from one of the device callbacks, so we should | 572 | * reentrantly from one of the device callbacks, so we should |
573 | * not wait. | 573 | * not wait. |
574 | */ | 574 | */ |
575 | if (!genpd->poweroff_task || genpd->poweroff_task == current) | 575 | if (!genpd->poweroff_task || genpd->poweroff_task == current) |
576 | break; | 576 | break; |
577 | mutex_unlock(&genpd->lock); | 577 | mutex_unlock(&genpd->lock); |
578 | 578 | ||
579 | schedule(); | 579 | schedule(); |
580 | 580 | ||
581 | mutex_lock(&genpd->lock); | 581 | mutex_lock(&genpd->lock); |
582 | } | 582 | } |
583 | finish_wait(&genpd->status_wait_queue, &wait); | 583 | finish_wait(&genpd->status_wait_queue, &wait); |
584 | __pm_genpd_restore_device(dev->power.subsys_data->domain_data, genpd); | 584 | __pm_genpd_restore_device(dev->power.subsys_data->domain_data, genpd); |
585 | genpd->resume_count--; | 585 | genpd->resume_count--; |
586 | genpd_set_active(genpd); | 586 | genpd_set_active(genpd); |
587 | wake_up_all(&genpd->status_wait_queue); | 587 | wake_up_all(&genpd->status_wait_queue); |
588 | mutex_unlock(&genpd->lock); | 588 | mutex_unlock(&genpd->lock); |
589 | 589 | ||
590 | out: | 590 | out: |
591 | genpd_start_dev(genpd, dev); | 591 | genpd_start_dev(genpd, dev); |
592 | 592 | ||
593 | return 0; | 593 | return 0; |
594 | } | 594 | } |
595 | 595 | ||
596 | /** | 596 | /** |
597 | * pm_genpd_poweroff_unused - Power off all PM domains with no devices in use. | 597 | * pm_genpd_poweroff_unused - Power off all PM domains with no devices in use. |
598 | */ | 598 | */ |
599 | void pm_genpd_poweroff_unused(void) | 599 | void pm_genpd_poweroff_unused(void) |
600 | { | 600 | { |
601 | struct generic_pm_domain *genpd; | 601 | struct generic_pm_domain *genpd; |
602 | 602 | ||
603 | mutex_lock(&gpd_list_lock); | 603 | mutex_lock(&gpd_list_lock); |
604 | 604 | ||
605 | list_for_each_entry(genpd, &gpd_list, gpd_list_node) | 605 | list_for_each_entry(genpd, &gpd_list, gpd_list_node) |
606 | genpd_queue_power_off_work(genpd); | 606 | genpd_queue_power_off_work(genpd); |
607 | 607 | ||
608 | mutex_unlock(&gpd_list_lock); | 608 | mutex_unlock(&gpd_list_lock); |
609 | } | 609 | } |
610 | 610 | ||
611 | #else | 611 | #else |
612 | 612 | ||
613 | static inline void genpd_power_off_work_fn(struct work_struct *work) {} | 613 | static inline void genpd_power_off_work_fn(struct work_struct *work) {} |
614 | 614 | ||
615 | #define pm_genpd_runtime_suspend NULL | 615 | #define pm_genpd_runtime_suspend NULL |
616 | #define pm_genpd_runtime_resume NULL | 616 | #define pm_genpd_runtime_resume NULL |
617 | 617 | ||
618 | #endif /* CONFIG_PM_RUNTIME */ | 618 | #endif /* CONFIG_PM_RUNTIME */ |
619 | 619 | ||
620 | #ifdef CONFIG_PM_SLEEP | 620 | #ifdef CONFIG_PM_SLEEP |
621 | 621 | ||
622 | static bool genpd_dev_active_wakeup(struct generic_pm_domain *genpd, | 622 | static bool genpd_dev_active_wakeup(struct generic_pm_domain *genpd, |
623 | struct device *dev) | 623 | struct device *dev) |
624 | { | 624 | { |
625 | return GENPD_DEV_CALLBACK(genpd, bool, active_wakeup, dev); | 625 | return GENPD_DEV_CALLBACK(genpd, bool, active_wakeup, dev); |
626 | } | 626 | } |
627 | 627 | ||
628 | static int genpd_suspend_dev(struct generic_pm_domain *genpd, struct device *dev) | 628 | static int genpd_suspend_dev(struct generic_pm_domain *genpd, struct device *dev) |
629 | { | 629 | { |
630 | return GENPD_DEV_CALLBACK(genpd, int, suspend, dev); | 630 | return GENPD_DEV_CALLBACK(genpd, int, suspend, dev); |
631 | } | 631 | } |
632 | 632 | ||
633 | static int genpd_suspend_late(struct generic_pm_domain *genpd, struct device *dev) | 633 | static int genpd_suspend_late(struct generic_pm_domain *genpd, struct device *dev) |
634 | { | 634 | { |
635 | return GENPD_DEV_CALLBACK(genpd, int, suspend_late, dev); | 635 | return GENPD_DEV_CALLBACK(genpd, int, suspend_late, dev); |
636 | } | 636 | } |
637 | 637 | ||
638 | static int genpd_resume_early(struct generic_pm_domain *genpd, struct device *dev) | 638 | static int genpd_resume_early(struct generic_pm_domain *genpd, struct device *dev) |
639 | { | 639 | { |
640 | return GENPD_DEV_CALLBACK(genpd, int, resume_early, dev); | 640 | return GENPD_DEV_CALLBACK(genpd, int, resume_early, dev); |
641 | } | 641 | } |
642 | 642 | ||
643 | static int genpd_resume_dev(struct generic_pm_domain *genpd, struct device *dev) | 643 | static int genpd_resume_dev(struct generic_pm_domain *genpd, struct device *dev) |
644 | { | 644 | { |
645 | return GENPD_DEV_CALLBACK(genpd, int, resume, dev); | 645 | return GENPD_DEV_CALLBACK(genpd, int, resume, dev); |
646 | } | 646 | } |
647 | 647 | ||
648 | static int genpd_freeze_dev(struct generic_pm_domain *genpd, struct device *dev) | 648 | static int genpd_freeze_dev(struct generic_pm_domain *genpd, struct device *dev) |
649 | { | 649 | { |
650 | return GENPD_DEV_CALLBACK(genpd, int, freeze, dev); | 650 | return GENPD_DEV_CALLBACK(genpd, int, freeze, dev); |
651 | } | 651 | } |
652 | 652 | ||
653 | static int genpd_freeze_late(struct generic_pm_domain *genpd, struct device *dev) | 653 | static int genpd_freeze_late(struct generic_pm_domain *genpd, struct device *dev) |
654 | { | 654 | { |
655 | return GENPD_DEV_CALLBACK(genpd, int, freeze_late, dev); | 655 | return GENPD_DEV_CALLBACK(genpd, int, freeze_late, dev); |
656 | } | 656 | } |
657 | 657 | ||
658 | static int genpd_thaw_early(struct generic_pm_domain *genpd, struct device *dev) | 658 | static int genpd_thaw_early(struct generic_pm_domain *genpd, struct device *dev) |
659 | { | 659 | { |
660 | return GENPD_DEV_CALLBACK(genpd, int, thaw_early, dev); | 660 | return GENPD_DEV_CALLBACK(genpd, int, thaw_early, dev); |
661 | } | 661 | } |
662 | 662 | ||
663 | static int genpd_thaw_dev(struct generic_pm_domain *genpd, struct device *dev) | 663 | static int genpd_thaw_dev(struct generic_pm_domain *genpd, struct device *dev) |
664 | { | 664 | { |
665 | return GENPD_DEV_CALLBACK(genpd, int, thaw, dev); | 665 | return GENPD_DEV_CALLBACK(genpd, int, thaw, dev); |
666 | } | 666 | } |
667 | 667 | ||
668 | /** | 668 | /** |
669 | * pm_genpd_sync_poweroff - Synchronously power off a PM domain and its masters. | 669 | * pm_genpd_sync_poweroff - Synchronously power off a PM domain and its masters. |
670 | * @genpd: PM domain to power off, if possible. | 670 | * @genpd: PM domain to power off, if possible. |
671 | * | 671 | * |
672 | * Check if the given PM domain can be powered off (during system suspend or | 672 | * Check if the given PM domain can be powered off (during system suspend or |
673 | * hibernation) and do that if so. Also, in that case propagate to its masters. | 673 | * hibernation) and do that if so. Also, in that case propagate to its masters. |
674 | * | 674 | * |
675 | * This function is only called in "noirq" stages of system power transitions, | 675 | * This function is only called in "noirq" stages of system power transitions, |
676 | * so it need not acquire locks (all of the "noirq" callbacks are executed | 676 | * so it need not acquire locks (all of the "noirq" callbacks are executed |
677 | * sequentially, so it is guaranteed that it will never run twice in parallel). | 677 | * sequentially, so it is guaranteed that it will never run twice in parallel). |
678 | */ | 678 | */ |
679 | static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd) | 679 | static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd) |
680 | { | 680 | { |
681 | struct gpd_link *link; | 681 | struct gpd_link *link; |
682 | 682 | ||
683 | if (genpd->status == GPD_STATE_POWER_OFF) | 683 | if (genpd->status == GPD_STATE_POWER_OFF) |
684 | return; | 684 | return; |
685 | 685 | ||
686 | if (genpd->suspended_count != genpd->device_count | 686 | if (genpd->suspended_count != genpd->device_count |
687 | || atomic_read(&genpd->sd_count) > 0) | 687 | || atomic_read(&genpd->sd_count) > 0) |
688 | return; | 688 | return; |
689 | 689 | ||
690 | if (genpd->power_off) | 690 | if (genpd->power_off) |
691 | genpd->power_off(genpd); | 691 | genpd->power_off(genpd); |
692 | 692 | ||
693 | genpd->status = GPD_STATE_POWER_OFF; | 693 | genpd->status = GPD_STATE_POWER_OFF; |
694 | 694 | ||
695 | list_for_each_entry(link, &genpd->slave_links, slave_node) { | 695 | list_for_each_entry(link, &genpd->slave_links, slave_node) { |
696 | genpd_sd_counter_dec(link->master); | 696 | genpd_sd_counter_dec(link->master); |
697 | pm_genpd_sync_poweroff(link->master); | 697 | pm_genpd_sync_poweroff(link->master); |
698 | } | 698 | } |
699 | } | 699 | } |
700 | 700 | ||
701 | /** | 701 | /** |
702 | * resume_needed - Check whether to resume a device before system suspend. | 702 | * resume_needed - Check whether to resume a device before system suspend. |
703 | * @dev: Device to check. | 703 | * @dev: Device to check. |
704 | * @genpd: PM domain the device belongs to. | 704 | * @genpd: PM domain the device belongs to. |
705 | * | 705 | * |
706 | * There are two cases in which a device that can wake up the system from sleep | 706 | * There are two cases in which a device that can wake up the system from sleep |
707 | * states should be resumed by pm_genpd_prepare(): (1) if the device is enabled | 707 | * states should be resumed by pm_genpd_prepare(): (1) if the device is enabled |
708 | * to wake up the system and it has to remain active for this purpose while the | 708 | * to wake up the system and it has to remain active for this purpose while the |
709 | * system is in the sleep state and (2) if the device is not enabled to wake up | 709 | * system is in the sleep state and (2) if the device is not enabled to wake up |
710 | * the system from sleep states and it generally doesn't generate wakeup signals | 710 | * the system from sleep states and it generally doesn't generate wakeup signals |
711 | * by itself (those signals are generated on its behalf by other parts of the | 711 | * by itself (those signals are generated on its behalf by other parts of the |
712 | * system). In the latter case it may be necessary to reconfigure the device's | 712 | * system). In the latter case it may be necessary to reconfigure the device's |
713 | * wakeup settings during system suspend, because it may have been set up to | 713 | * wakeup settings during system suspend, because it may have been set up to |
714 | * signal remote wakeup from the system's working state as needed by runtime PM. | 714 | * signal remote wakeup from the system's working state as needed by runtime PM. |
715 | * Return 'true' in either of the above cases. | 715 | * Return 'true' in either of the above cases. |
716 | */ | 716 | */ |
717 | static bool resume_needed(struct device *dev, struct generic_pm_domain *genpd) | 717 | static bool resume_needed(struct device *dev, struct generic_pm_domain *genpd) |
718 | { | 718 | { |
719 | bool active_wakeup; | 719 | bool active_wakeup; |
720 | 720 | ||
721 | if (!device_can_wakeup(dev)) | 721 | if (!device_can_wakeup(dev)) |
722 | return false; | 722 | return false; |
723 | 723 | ||
724 | active_wakeup = genpd_dev_active_wakeup(genpd, dev); | 724 | active_wakeup = genpd_dev_active_wakeup(genpd, dev); |
725 | return device_may_wakeup(dev) ? active_wakeup : !active_wakeup; | 725 | return device_may_wakeup(dev) ? active_wakeup : !active_wakeup; |
726 | } | 726 | } |
727 | 727 | ||
728 | /** | 728 | /** |
729 | * pm_genpd_prepare - Start power transition of a device in a PM domain. | 729 | * pm_genpd_prepare - Start power transition of a device in a PM domain. |
730 | * @dev: Device to start the transition of. | 730 | * @dev: Device to start the transition of. |
731 | * | 731 | * |
732 | * Start a power transition of a device (during a system-wide power transition) | 732 | * Start a power transition of a device (during a system-wide power transition) |
733 | * under the assumption that its pm_domain field points to the domain member of | 733 | * under the assumption that its pm_domain field points to the domain member of |
734 | * an object of type struct generic_pm_domain representing a PM domain | 734 | * an object of type struct generic_pm_domain representing a PM domain |
735 | * consisting of I/O devices. | 735 | * consisting of I/O devices. |
736 | */ | 736 | */ |
737 | static int pm_genpd_prepare(struct device *dev) | 737 | static int pm_genpd_prepare(struct device *dev) |
738 | { | 738 | { |
739 | struct generic_pm_domain *genpd; | 739 | struct generic_pm_domain *genpd; |
740 | int ret; | 740 | int ret; |
741 | 741 | ||
742 | dev_dbg(dev, "%s()\n", __func__); | 742 | dev_dbg(dev, "%s()\n", __func__); |
743 | 743 | ||
744 | genpd = dev_to_genpd(dev); | 744 | genpd = dev_to_genpd(dev); |
745 | if (IS_ERR(genpd)) | 745 | if (IS_ERR(genpd)) |
746 | return -EINVAL; | 746 | return -EINVAL; |
747 | 747 | ||
748 | /* | 748 | /* |
749 | * If a wakeup request is pending for the device, it should be woken up | 749 | * If a wakeup request is pending for the device, it should be woken up |
750 | * at this point and a system wakeup event should be reported if it's | 750 | * at this point and a system wakeup event should be reported if it's |
751 | * set up to wake up the system from sleep states. | 751 | * set up to wake up the system from sleep states. |
752 | */ | 752 | */ |
753 | pm_runtime_get_noresume(dev); | 753 | pm_runtime_get_noresume(dev); |
754 | if (pm_runtime_barrier(dev) && device_may_wakeup(dev)) | 754 | if (pm_runtime_barrier(dev) && device_may_wakeup(dev)) |
755 | pm_wakeup_event(dev, 0); | 755 | pm_wakeup_event(dev, 0); |
756 | 756 | ||
757 | if (pm_wakeup_pending()) { | 757 | if (pm_wakeup_pending()) { |
758 | pm_runtime_put_sync(dev); | 758 | pm_runtime_put_sync(dev); |
759 | return -EBUSY; | 759 | return -EBUSY; |
760 | } | 760 | } |
761 | 761 | ||
762 | if (resume_needed(dev, genpd)) | 762 | if (resume_needed(dev, genpd)) |
763 | pm_runtime_resume(dev); | 763 | pm_runtime_resume(dev); |
764 | 764 | ||
765 | genpd_acquire_lock(genpd); | 765 | genpd_acquire_lock(genpd); |
766 | 766 | ||
767 | if (genpd->prepared_count++ == 0) | 767 | if (genpd->prepared_count++ == 0) |
768 | genpd->suspend_power_off = genpd->status == GPD_STATE_POWER_OFF; | 768 | genpd->suspend_power_off = genpd->status == GPD_STATE_POWER_OFF; |
769 | 769 | ||
770 | genpd_release_lock(genpd); | 770 | genpd_release_lock(genpd); |
771 | 771 | ||
772 | if (genpd->suspend_power_off) { | 772 | if (genpd->suspend_power_off) { |
773 | pm_runtime_put_noidle(dev); | 773 | pm_runtime_put_noidle(dev); |
774 | return 0; | 774 | return 0; |
775 | } | 775 | } |
776 | 776 | ||
777 | /* | 777 | /* |
778 | * The PM domain must be in the GPD_STATE_ACTIVE state at this point, | 778 | * The PM domain must be in the GPD_STATE_ACTIVE state at this point, |
779 | * so pm_genpd_poweron() will return immediately, but if the device | 779 | * so pm_genpd_poweron() will return immediately, but if the device |
780 | * is suspended (e.g. it's been stopped by genpd_stop_dev()), we need | 780 | * is suspended (e.g. it's been stopped by genpd_stop_dev()), we need |
781 | * to make it operational. | 781 | * to make it operational. |
782 | */ | 782 | */ |
783 | pm_runtime_resume(dev); | 783 | pm_runtime_resume(dev); |
784 | __pm_runtime_disable(dev, false); | 784 | __pm_runtime_disable(dev, false); |
785 | 785 | ||
786 | ret = pm_generic_prepare(dev); | 786 | ret = pm_generic_prepare(dev); |
787 | if (ret) { | 787 | if (ret) { |
788 | mutex_lock(&genpd->lock); | 788 | mutex_lock(&genpd->lock); |
789 | 789 | ||
790 | if (--genpd->prepared_count == 0) | 790 | if (--genpd->prepared_count == 0) |
791 | genpd->suspend_power_off = false; | 791 | genpd->suspend_power_off = false; |
792 | 792 | ||
793 | mutex_unlock(&genpd->lock); | 793 | mutex_unlock(&genpd->lock); |
794 | pm_runtime_enable(dev); | 794 | pm_runtime_enable(dev); |
795 | } | 795 | } |
796 | 796 | ||
797 | pm_runtime_put_sync(dev); | 797 | pm_runtime_put_sync(dev); |
798 | return ret; | 798 | return ret; |
799 | } | 799 | } |
800 | 800 | ||
801 | /** | 801 | /** |
802 | * pm_genpd_suspend - Suspend a device belonging to an I/O PM domain. | 802 | * pm_genpd_suspend - Suspend a device belonging to an I/O PM domain. |
803 | * @dev: Device to suspend. | 803 | * @dev: Device to suspend. |
804 | * | 804 | * |
805 | * Suspend a device under the assumption that its pm_domain field points to the | 805 | * Suspend a device under the assumption that its pm_domain field points to the |
806 | * domain member of an object of type struct generic_pm_domain representing | 806 | * domain member of an object of type struct generic_pm_domain representing |
807 | * a PM domain consisting of I/O devices. | 807 | * a PM domain consisting of I/O devices. |
808 | */ | 808 | */ |
809 | static int pm_genpd_suspend(struct device *dev) | 809 | static int pm_genpd_suspend(struct device *dev) |
810 | { | 810 | { |
811 | struct generic_pm_domain *genpd; | 811 | struct generic_pm_domain *genpd; |
812 | 812 | ||
813 | dev_dbg(dev, "%s()\n", __func__); | 813 | dev_dbg(dev, "%s()\n", __func__); |
814 | 814 | ||
815 | genpd = dev_to_genpd(dev); | 815 | genpd = dev_to_genpd(dev); |
816 | if (IS_ERR(genpd)) | 816 | if (IS_ERR(genpd)) |
817 | return -EINVAL; | 817 | return -EINVAL; |
818 | 818 | ||
819 | return genpd->suspend_power_off ? 0 : genpd_suspend_dev(genpd, dev); | 819 | return genpd->suspend_power_off ? 0 : genpd_suspend_dev(genpd, dev); |
820 | } | 820 | } |
821 | 821 | ||
822 | /** | 822 | /** |
823 | * pm_genpd_suspend_noirq - Late suspend of a device from an I/O PM domain. | 823 | * pm_genpd_suspend_noirq - Late suspend of a device from an I/O PM domain. |
824 | * @dev: Device to suspend. | 824 | * @dev: Device to suspend. |
825 | * | 825 | * |
826 | * Carry out a late suspend of a device under the assumption that its | 826 | * Carry out a late suspend of a device under the assumption that its |
827 | * pm_domain field points to the domain member of an object of type | 827 | * pm_domain field points to the domain member of an object of type |
828 | * struct generic_pm_domain representing a PM domain consisting of I/O devices. | 828 | * struct generic_pm_domain representing a PM domain consisting of I/O devices. |
829 | */ | 829 | */ |
830 | static int pm_genpd_suspend_noirq(struct device *dev) | 830 | static int pm_genpd_suspend_noirq(struct device *dev) |
831 | { | 831 | { |
832 | struct generic_pm_domain *genpd; | 832 | struct generic_pm_domain *genpd; |
833 | int ret; | 833 | int ret; |
834 | 834 | ||
835 | dev_dbg(dev, "%s()\n", __func__); | 835 | dev_dbg(dev, "%s()\n", __func__); |
836 | 836 | ||
837 | genpd = dev_to_genpd(dev); | 837 | genpd = dev_to_genpd(dev); |
838 | if (IS_ERR(genpd)) | 838 | if (IS_ERR(genpd)) |
839 | return -EINVAL; | 839 | return -EINVAL; |
840 | 840 | ||
841 | if (genpd->suspend_power_off) | 841 | if (genpd->suspend_power_off) |
842 | return 0; | 842 | return 0; |
843 | 843 | ||
844 | ret = genpd_suspend_late(genpd, dev); | 844 | ret = genpd_suspend_late(genpd, dev); |
845 | if (ret) | 845 | if (ret) |
846 | return ret; | 846 | return ret; |
847 | 847 | ||
848 | if (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev)) | 848 | if (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev)) |
849 | return 0; | 849 | return 0; |
850 | 850 | ||
851 | genpd_stop_dev(genpd, dev); | 851 | genpd_stop_dev(genpd, dev); |
852 | 852 | ||
853 | /* | 853 | /* |
854 | * Since all of the "noirq" callbacks are executed sequentially, it is | 854 | * Since all of the "noirq" callbacks are executed sequentially, it is |
855 | * guaranteed that this function will never run twice in parallel for | 855 | * guaranteed that this function will never run twice in parallel for |
856 | * the same PM domain, so it is not necessary to use locking here. | 856 | * the same PM domain, so it is not necessary to use locking here. |
857 | */ | 857 | */ |
858 | genpd->suspended_count++; | 858 | genpd->suspended_count++; |
859 | pm_genpd_sync_poweroff(genpd); | 859 | pm_genpd_sync_poweroff(genpd); |
860 | 860 | ||
861 | return 0; | 861 | return 0; |
862 | } | 862 | } |
863 | 863 | ||
864 | /** | 864 | /** |
865 | * pm_genpd_resume_noirq - Early resume of a device from an I/O power domain. | 865 | * pm_genpd_resume_noirq - Early resume of a device from an I/O power domain. |
866 | * @dev: Device to resume. | 866 | * @dev: Device to resume. |
867 | * | 867 | * |
868 | * Carry out an early resume of a device under the assumption that its | 868 | * Carry out an early resume of a device under the assumption that its |
869 | * pm_domain field points to the domain member of an object of type | 869 | * pm_domain field points to the domain member of an object of type |
870 | * struct generic_pm_domain representing a power domain consisting of I/O | 870 | * struct generic_pm_domain representing a power domain consisting of I/O |
871 | * devices. | 871 | * devices. |
872 | */ | 872 | */ |
873 | static int pm_genpd_resume_noirq(struct device *dev) | 873 | static int pm_genpd_resume_noirq(struct device *dev) |
874 | { | 874 | { |
875 | struct generic_pm_domain *genpd; | 875 | struct generic_pm_domain *genpd; |
876 | 876 | ||
877 | dev_dbg(dev, "%s()\n", __func__); | 877 | dev_dbg(dev, "%s()\n", __func__); |
878 | 878 | ||
879 | genpd = dev_to_genpd(dev); | 879 | genpd = dev_to_genpd(dev); |
880 | if (IS_ERR(genpd)) | 880 | if (IS_ERR(genpd)) |
881 | return -EINVAL; | 881 | return -EINVAL; |
882 | 882 | ||
883 | if (genpd->suspend_power_off) | 883 | if (genpd->suspend_power_off) |
884 | return 0; | 884 | return 0; |
885 | 885 | ||
886 | /* | 886 | /* |
887 | * Since all of the "noirq" callbacks are executed sequentially, it is | 887 | * Since all of the "noirq" callbacks are executed sequentially, it is |
888 | * guaranteed that this function will never run twice in parallel for | 888 | * guaranteed that this function will never run twice in parallel for |
889 | * the same PM domain, so it is not necessary to use locking here. | 889 | * the same PM domain, so it is not necessary to use locking here. |
890 | */ | 890 | */ |
891 | pm_genpd_poweron(genpd); | 891 | pm_genpd_poweron(genpd); |
892 | genpd->suspended_count--; | 892 | genpd->suspended_count--; |
893 | genpd_start_dev(genpd, dev); | 893 | genpd_start_dev(genpd, dev); |
894 | 894 | ||
895 | return genpd_resume_early(genpd, dev); | 895 | return genpd_resume_early(genpd, dev); |
896 | } | 896 | } |
897 | 897 | ||
898 | /** | 898 | /** |
899 | * pm_genpd_resume - Resume a device belonging to an I/O power domain. | 899 | * pm_genpd_resume - Resume a device belonging to an I/O power domain. |
900 | * @dev: Device to resume. | 900 | * @dev: Device to resume. |
901 | * | 901 | * |
902 | * Resume a device under the assumption that its pm_domain field points to the | 902 | * Resume a device under the assumption that its pm_domain field points to the |
903 | * domain member of an object of type struct generic_pm_domain representing | 903 | * domain member of an object of type struct generic_pm_domain representing |
904 | * a power domain consisting of I/O devices. | 904 | * a power domain consisting of I/O devices. |
905 | */ | 905 | */ |
906 | static int pm_genpd_resume(struct device *dev) | 906 | static int pm_genpd_resume(struct device *dev) |
907 | { | 907 | { |
908 | struct generic_pm_domain *genpd; | 908 | struct generic_pm_domain *genpd; |
909 | 909 | ||
910 | dev_dbg(dev, "%s()\n", __func__); | 910 | dev_dbg(dev, "%s()\n", __func__); |
911 | 911 | ||
912 | genpd = dev_to_genpd(dev); | 912 | genpd = dev_to_genpd(dev); |
913 | if (IS_ERR(genpd)) | 913 | if (IS_ERR(genpd)) |
914 | return -EINVAL; | 914 | return -EINVAL; |
915 | 915 | ||
916 | return genpd->suspend_power_off ? 0 : genpd_resume_dev(genpd, dev); | 916 | return genpd->suspend_power_off ? 0 : genpd_resume_dev(genpd, dev); |
917 | } | 917 | } |
918 | 918 | ||
919 | /** | 919 | /** |
920 | * pm_genpd_freeze - Freeze a device belonging to an I/O power domain. | 920 | * pm_genpd_freeze - Freeze a device belonging to an I/O power domain. |
921 | * @dev: Device to freeze. | 921 | * @dev: Device to freeze. |
922 | * | 922 | * |
923 | * Freeze a device under the assumption that its pm_domain field points to the | 923 | * Freeze a device under the assumption that its pm_domain field points to the |
924 | * domain member of an object of type struct generic_pm_domain representing | 924 | * domain member of an object of type struct generic_pm_domain representing |
925 | * a power domain consisting of I/O devices. | 925 | * a power domain consisting of I/O devices. |
926 | */ | 926 | */ |
927 | static int pm_genpd_freeze(struct device *dev) | 927 | static int pm_genpd_freeze(struct device *dev) |
928 | { | 928 | { |
929 | struct generic_pm_domain *genpd; | 929 | struct generic_pm_domain *genpd; |
930 | 930 | ||
931 | dev_dbg(dev, "%s()\n", __func__); | 931 | dev_dbg(dev, "%s()\n", __func__); |
932 | 932 | ||
933 | genpd = dev_to_genpd(dev); | 933 | genpd = dev_to_genpd(dev); |
934 | if (IS_ERR(genpd)) | 934 | if (IS_ERR(genpd)) |
935 | return -EINVAL; | 935 | return -EINVAL; |
936 | 936 | ||
937 | return genpd->suspend_power_off ? 0 : genpd_freeze_dev(genpd, dev); | 937 | return genpd->suspend_power_off ? 0 : genpd_freeze_dev(genpd, dev); |
938 | } | 938 | } |
939 | 939 | ||
940 | /** | 940 | /** |
941 | * pm_genpd_freeze_noirq - Late freeze of a device from an I/O power domain. | 941 | * pm_genpd_freeze_noirq - Late freeze of a device from an I/O power domain. |
942 | * @dev: Device to freeze. | 942 | * @dev: Device to freeze. |
943 | * | 943 | * |
944 | * Carry out a late freeze of a device under the assumption that its | 944 | * Carry out a late freeze of a device under the assumption that its |
945 | * pm_domain field points to the domain member of an object of type | 945 | * pm_domain field points to the domain member of an object of type |
946 | * struct generic_pm_domain representing a power domain consisting of I/O | 946 | * struct generic_pm_domain representing a power domain consisting of I/O |
947 | * devices. | 947 | * devices. |
948 | */ | 948 | */ |
949 | static int pm_genpd_freeze_noirq(struct device *dev) | 949 | static int pm_genpd_freeze_noirq(struct device *dev) |
950 | { | 950 | { |
951 | struct generic_pm_domain *genpd; | 951 | struct generic_pm_domain *genpd; |
952 | int ret; | 952 | int ret; |
953 | 953 | ||
954 | dev_dbg(dev, "%s()\n", __func__); | 954 | dev_dbg(dev, "%s()\n", __func__); |
955 | 955 | ||
956 | genpd = dev_to_genpd(dev); | 956 | genpd = dev_to_genpd(dev); |
957 | if (IS_ERR(genpd)) | 957 | if (IS_ERR(genpd)) |
958 | return -EINVAL; | 958 | return -EINVAL; |
959 | 959 | ||
960 | if (genpd->suspend_power_off) | 960 | if (genpd->suspend_power_off) |
961 | return 0; | 961 | return 0; |
962 | 962 | ||
963 | ret = genpd_freeze_late(genpd, dev); | 963 | ret = genpd_freeze_late(genpd, dev); |
964 | if (ret) | 964 | if (ret) |
965 | return ret; | 965 | return ret; |
966 | 966 | ||
967 | genpd_stop_dev(genpd, dev); | 967 | genpd_stop_dev(genpd, dev); |
968 | 968 | ||
969 | return 0; | 969 | return 0; |
970 | } | 970 | } |
971 | 971 | ||
972 | /** | 972 | /** |
973 | * pm_genpd_thaw_noirq - Early thaw of a device from an I/O power domain. | 973 | * pm_genpd_thaw_noirq - Early thaw of a device from an I/O power domain. |
974 | * @dev: Device to thaw. | 974 | * @dev: Device to thaw. |
975 | * | 975 | * |
976 | * Carry out an early thaw of a device under the assumption that its | 976 | * Carry out an early thaw of a device under the assumption that its |
977 | * pm_domain field points to the domain member of an object of type | 977 | * pm_domain field points to the domain member of an object of type |
978 | * struct generic_pm_domain representing a power domain consisting of I/O | 978 | * struct generic_pm_domain representing a power domain consisting of I/O |
979 | * devices. | 979 | * devices. |
980 | */ | 980 | */ |
981 | static int pm_genpd_thaw_noirq(struct device *dev) | 981 | static int pm_genpd_thaw_noirq(struct device *dev) |
982 | { | 982 | { |
983 | struct generic_pm_domain *genpd; | 983 | struct generic_pm_domain *genpd; |
984 | 984 | ||
985 | dev_dbg(dev, "%s()\n", __func__); | 985 | dev_dbg(dev, "%s()\n", __func__); |
986 | 986 | ||
987 | genpd = dev_to_genpd(dev); | 987 | genpd = dev_to_genpd(dev); |
988 | if (IS_ERR(genpd)) | 988 | if (IS_ERR(genpd)) |
989 | return -EINVAL; | 989 | return -EINVAL; |
990 | 990 | ||
991 | if (genpd->suspend_power_off) | 991 | if (genpd->suspend_power_off) |
992 | return 0; | 992 | return 0; |
993 | 993 | ||
994 | genpd_start_dev(genpd, dev); | 994 | genpd_start_dev(genpd, dev); |
995 | 995 | ||
996 | return genpd_thaw_early(genpd, dev); | 996 | return genpd_thaw_early(genpd, dev); |
997 | } | 997 | } |
998 | 998 | ||
999 | /** | 999 | /** |
1000 | * pm_genpd_thaw - Thaw a device belonging to an I/O power domain. | 1000 | * pm_genpd_thaw - Thaw a device belonging to an I/O power domain. |
1001 | * @dev: Device to thaw. | 1001 | * @dev: Device to thaw. |
1002 | * | 1002 | * |
1003 | * Thaw a device under the assumption that its pm_domain field points to the | 1003 | * Thaw a device under the assumption that its pm_domain field points to the |
1004 | * domain member of an object of type struct generic_pm_domain representing | 1004 | * domain member of an object of type struct generic_pm_domain representing |
1005 | * a power domain consisting of I/O devices. | 1005 | * a power domain consisting of I/O devices. |
1006 | */ | 1006 | */ |
1007 | static int pm_genpd_thaw(struct device *dev) | 1007 | static int pm_genpd_thaw(struct device *dev) |
1008 | { | 1008 | { |
1009 | struct generic_pm_domain *genpd; | 1009 | struct generic_pm_domain *genpd; |
1010 | 1010 | ||
1011 | dev_dbg(dev, "%s()\n", __func__); | 1011 | dev_dbg(dev, "%s()\n", __func__); |
1012 | 1012 | ||
1013 | genpd = dev_to_genpd(dev); | 1013 | genpd = dev_to_genpd(dev); |
1014 | if (IS_ERR(genpd)) | 1014 | if (IS_ERR(genpd)) |
1015 | return -EINVAL; | 1015 | return -EINVAL; |
1016 | 1016 | ||
1017 | return genpd->suspend_power_off ? 0 : genpd_thaw_dev(genpd, dev); | 1017 | return genpd->suspend_power_off ? 0 : genpd_thaw_dev(genpd, dev); |
1018 | } | 1018 | } |
1019 | 1019 | ||
1020 | /** | 1020 | /** |
1021 | * pm_genpd_restore_noirq - Early restore of a device from an I/O power domain. | 1021 | * pm_genpd_restore_noirq - Early restore of a device from an I/O power domain. |
1022 | * @dev: Device to resume. | 1022 | * @dev: Device to resume. |
1023 | * | 1023 | * |
1024 | * Carry out an early restore of a device under the assumption that its | 1024 | * Carry out an early restore of a device under the assumption that its |
1025 | * pm_domain field points to the domain member of an object of type | 1025 | * pm_domain field points to the domain member of an object of type |
1026 | * struct generic_pm_domain representing a power domain consisting of I/O | 1026 | * struct generic_pm_domain representing a power domain consisting of I/O |
1027 | * devices. | 1027 | * devices. |
1028 | */ | 1028 | */ |
1029 | static int pm_genpd_restore_noirq(struct device *dev) | 1029 | static int pm_genpd_restore_noirq(struct device *dev) |
1030 | { | 1030 | { |
1031 | struct generic_pm_domain *genpd; | 1031 | struct generic_pm_domain *genpd; |
1032 | 1032 | ||
1033 | dev_dbg(dev, "%s()\n", __func__); | 1033 | dev_dbg(dev, "%s()\n", __func__); |
1034 | 1034 | ||
1035 | genpd = dev_to_genpd(dev); | 1035 | genpd = dev_to_genpd(dev); |
1036 | if (IS_ERR(genpd)) | 1036 | if (IS_ERR(genpd)) |
1037 | return -EINVAL; | 1037 | return -EINVAL; |
1038 | 1038 | ||
1039 | /* | 1039 | /* |
1040 | * Since all of the "noirq" callbacks are executed sequentially, it is | 1040 | * Since all of the "noirq" callbacks are executed sequentially, it is |
1041 | * guaranteed that this function will never run twice in parallel for | 1041 | * guaranteed that this function will never run twice in parallel for |
1042 | * the same PM domain, so it is not necessary to use locking here. | 1042 | * the same PM domain, so it is not necessary to use locking here. |
1043 | */ | 1043 | */ |
1044 | genpd->status = GPD_STATE_POWER_OFF; | 1044 | genpd->status = GPD_STATE_POWER_OFF; |
1045 | if (genpd->suspend_power_off) { | 1045 | if (genpd->suspend_power_off) { |
1046 | /* | 1046 | /* |
1047 | * The boot kernel might put the domain into the power on state, | 1047 | * The boot kernel might put the domain into the power on state, |
1048 | * so make sure it really is powered off. | 1048 | * so make sure it really is powered off. |
1049 | */ | 1049 | */ |
1050 | if (genpd->power_off) | 1050 | if (genpd->power_off) |
1051 | genpd->power_off(genpd); | 1051 | genpd->power_off(genpd); |
1052 | return 0; | 1052 | return 0; |
1053 | } | 1053 | } |
1054 | 1054 | ||
1055 | pm_genpd_poweron(genpd); | 1055 | pm_genpd_poweron(genpd); |
1056 | genpd->suspended_count--; | 1056 | genpd->suspended_count--; |
1057 | genpd_start_dev(genpd, dev); | 1057 | genpd_start_dev(genpd, dev); |
1058 | 1058 | ||
1059 | return genpd_resume_early(genpd, dev); | 1059 | return genpd_resume_early(genpd, dev); |
1060 | } | 1060 | } |
1061 | 1061 | ||
1062 | /** | 1062 | /** |
1063 | * pm_genpd_complete - Complete power transition of a device in a power domain. | 1063 | * pm_genpd_complete - Complete power transition of a device in a power domain. |
1064 | * @dev: Device to complete the transition of. | 1064 | * @dev: Device to complete the transition of. |
1065 | * | 1065 | * |
1066 | * Complete a power transition of a device (during a system-wide power | 1066 | * Complete a power transition of a device (during a system-wide power |
1067 | * transition) under the assumption that its pm_domain field points to the | 1067 | * transition) under the assumption that its pm_domain field points to the |
1068 | * domain member of an object of type struct generic_pm_domain representing | 1068 | * domain member of an object of type struct generic_pm_domain representing |
1069 | * a power domain consisting of I/O devices. | 1069 | * a power domain consisting of I/O devices. |
1070 | */ | 1070 | */ |
1071 | static void pm_genpd_complete(struct device *dev) | 1071 | static void pm_genpd_complete(struct device *dev) |
1072 | { | 1072 | { |
1073 | struct generic_pm_domain *genpd; | 1073 | struct generic_pm_domain *genpd; |
1074 | bool run_complete; | 1074 | bool run_complete; |
1075 | 1075 | ||
1076 | dev_dbg(dev, "%s()\n", __func__); | 1076 | dev_dbg(dev, "%s()\n", __func__); |
1077 | 1077 | ||
1078 | genpd = dev_to_genpd(dev); | 1078 | genpd = dev_to_genpd(dev); |
1079 | if (IS_ERR(genpd)) | 1079 | if (IS_ERR(genpd)) |
1080 | return; | 1080 | return; |
1081 | 1081 | ||
1082 | mutex_lock(&genpd->lock); | 1082 | mutex_lock(&genpd->lock); |
1083 | 1083 | ||
1084 | run_complete = !genpd->suspend_power_off; | 1084 | run_complete = !genpd->suspend_power_off; |
1085 | if (--genpd->prepared_count == 0) | 1085 | if (--genpd->prepared_count == 0) |
1086 | genpd->suspend_power_off = false; | 1086 | genpd->suspend_power_off = false; |
1087 | 1087 | ||
1088 | mutex_unlock(&genpd->lock); | 1088 | mutex_unlock(&genpd->lock); |
1089 | 1089 | ||
1090 | if (run_complete) { | 1090 | if (run_complete) { |
1091 | pm_generic_complete(dev); | 1091 | pm_generic_complete(dev); |
1092 | pm_runtime_set_active(dev); | 1092 | pm_runtime_set_active(dev); |
1093 | pm_runtime_enable(dev); | 1093 | pm_runtime_enable(dev); |
1094 | pm_runtime_idle(dev); | 1094 | pm_runtime_idle(dev); |
1095 | } | 1095 | } |
1096 | } | 1096 | } |
1097 | 1097 | ||
1098 | #else | 1098 | #else |
1099 | 1099 | ||
1100 | #define pm_genpd_prepare NULL | 1100 | #define pm_genpd_prepare NULL |
1101 | #define pm_genpd_suspend NULL | 1101 | #define pm_genpd_suspend NULL |
1102 | #define pm_genpd_suspend_noirq NULL | 1102 | #define pm_genpd_suspend_noirq NULL |
1103 | #define pm_genpd_resume_noirq NULL | 1103 | #define pm_genpd_resume_noirq NULL |
1104 | #define pm_genpd_resume NULL | 1104 | #define pm_genpd_resume NULL |
1105 | #define pm_genpd_freeze NULL | 1105 | #define pm_genpd_freeze NULL |
1106 | #define pm_genpd_freeze_noirq NULL | 1106 | #define pm_genpd_freeze_noirq NULL |
1107 | #define pm_genpd_thaw_noirq NULL | 1107 | #define pm_genpd_thaw_noirq NULL |
1108 | #define pm_genpd_thaw NULL | 1108 | #define pm_genpd_thaw NULL |
1109 | #define pm_genpd_restore_noirq NULL | 1109 | #define pm_genpd_restore_noirq NULL |
1110 | #define pm_genpd_complete NULL | 1110 | #define pm_genpd_complete NULL |
1111 | 1111 | ||
1112 | #endif /* CONFIG_PM_SLEEP */ | 1112 | #endif /* CONFIG_PM_SLEEP */ |
1113 | 1113 | ||
1114 | /** | 1114 | /** |
1115 | * __pm_genpd_add_device - Add a device to an I/O PM domain. | 1115 | * __pm_genpd_add_device - Add a device to an I/O PM domain. |
1116 | * @genpd: PM domain to add the device to. | 1116 | * @genpd: PM domain to add the device to. |
1117 | * @dev: Device to be added. | 1117 | * @dev: Device to be added. |
1118 | * @td: Set of PM QoS timing parameters to attach to the device. | 1118 | * @td: Set of PM QoS timing parameters to attach to the device. |
1119 | */ | 1119 | */ |
1120 | int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev, | 1120 | int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev, |
1121 | struct gpd_timing_data *td) | 1121 | struct gpd_timing_data *td) |
1122 | { | 1122 | { |
1123 | struct generic_pm_domain_data *gpd_data; | 1123 | struct generic_pm_domain_data *gpd_data; |
1124 | struct pm_domain_data *pdd; | 1124 | struct pm_domain_data *pdd; |
1125 | int ret = 0; | 1125 | int ret = 0; |
1126 | 1126 | ||
1127 | dev_dbg(dev, "%s()\n", __func__); | 1127 | dev_dbg(dev, "%s()\n", __func__); |
1128 | 1128 | ||
1129 | if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev)) | 1129 | if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev)) |
1130 | return -EINVAL; | 1130 | return -EINVAL; |
1131 | 1131 | ||
1132 | genpd_acquire_lock(genpd); | 1132 | genpd_acquire_lock(genpd); |
1133 | 1133 | ||
1134 | if (genpd->status == GPD_STATE_POWER_OFF) { | 1134 | if (genpd->status == GPD_STATE_POWER_OFF) { |
1135 | ret = -EINVAL; | 1135 | ret = -EINVAL; |
1136 | goto out; | 1136 | goto out; |
1137 | } | 1137 | } |
1138 | 1138 | ||
1139 | if (genpd->prepared_count > 0) { | 1139 | if (genpd->prepared_count > 0) { |
1140 | ret = -EAGAIN; | 1140 | ret = -EAGAIN; |
1141 | goto out; | 1141 | goto out; |
1142 | } | 1142 | } |
1143 | 1143 | ||
1144 | list_for_each_entry(pdd, &genpd->dev_list, list_node) | 1144 | list_for_each_entry(pdd, &genpd->dev_list, list_node) |
1145 | if (pdd->dev == dev) { | 1145 | if (pdd->dev == dev) { |
1146 | ret = -EINVAL; | 1146 | ret = -EINVAL; |
1147 | goto out; | 1147 | goto out; |
1148 | } | 1148 | } |
1149 | 1149 | ||
1150 | gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL); | 1150 | gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL); |
1151 | if (!gpd_data) { | 1151 | if (!gpd_data) { |
1152 | ret = -ENOMEM; | 1152 | ret = -ENOMEM; |
1153 | goto out; | 1153 | goto out; |
1154 | } | 1154 | } |
1155 | 1155 | ||
1156 | genpd->device_count++; | 1156 | genpd->device_count++; |
1157 | 1157 | ||
1158 | dev->pm_domain = &genpd->domain; | 1158 | dev->pm_domain = &genpd->domain; |
1159 | dev_pm_get_subsys_data(dev); | 1159 | dev_pm_get_subsys_data(dev); |
1160 | dev->power.subsys_data->domain_data = &gpd_data->base; | 1160 | dev->power.subsys_data->domain_data = &gpd_data->base; |
1161 | gpd_data->base.dev = dev; | 1161 | gpd_data->base.dev = dev; |
1162 | gpd_data->need_restore = false; | 1162 | gpd_data->need_restore = false; |
1163 | list_add_tail(&gpd_data->base.list_node, &genpd->dev_list); | 1163 | list_add_tail(&gpd_data->base.list_node, &genpd->dev_list); |
1164 | if (td) | 1164 | if (td) |
1165 | gpd_data->td = *td; | 1165 | gpd_data->td = *td; |
1166 | 1166 | ||
1167 | out: | 1167 | out: |
1168 | genpd_release_lock(genpd); | 1168 | genpd_release_lock(genpd); |
1169 | 1169 | ||
1170 | return ret; | 1170 | return ret; |
1171 | } | 1171 | } |
1172 | 1172 | ||
1173 | /** | 1173 | /** |
1174 | * pm_genpd_remove_device - Remove a device from an I/O PM domain. | 1174 | * pm_genpd_remove_device - Remove a device from an I/O PM domain. |
1175 | * @genpd: PM domain to remove the device from. | 1175 | * @genpd: PM domain to remove the device from. |
1176 | * @dev: Device to be removed. | 1176 | * @dev: Device to be removed. |
1177 | */ | 1177 | */ |
1178 | int pm_genpd_remove_device(struct generic_pm_domain *genpd, | 1178 | int pm_genpd_remove_device(struct generic_pm_domain *genpd, |
1179 | struct device *dev) | 1179 | struct device *dev) |
1180 | { | 1180 | { |
1181 | struct pm_domain_data *pdd; | 1181 | struct pm_domain_data *pdd; |
1182 | int ret = -EINVAL; | 1182 | int ret = -EINVAL; |
1183 | 1183 | ||
1184 | dev_dbg(dev, "%s()\n", __func__); | 1184 | dev_dbg(dev, "%s()\n", __func__); |
1185 | 1185 | ||
1186 | if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev)) | 1186 | if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev)) |
1187 | return -EINVAL; | 1187 | return -EINVAL; |
1188 | 1188 | ||
1189 | genpd_acquire_lock(genpd); | 1189 | genpd_acquire_lock(genpd); |
1190 | 1190 | ||
1191 | if (genpd->prepared_count > 0) { | 1191 | if (genpd->prepared_count > 0) { |
1192 | ret = -EAGAIN; | 1192 | ret = -EAGAIN; |
1193 | goto out; | 1193 | goto out; |
1194 | } | 1194 | } |
1195 | 1195 | ||
1196 | list_for_each_entry(pdd, &genpd->dev_list, list_node) { | 1196 | list_for_each_entry(pdd, &genpd->dev_list, list_node) { |
1197 | if (pdd->dev != dev) | 1197 | if (pdd->dev != dev) |
1198 | continue; | 1198 | continue; |
1199 | 1199 | ||
1200 | list_del_init(&pdd->list_node); | 1200 | list_del_init(&pdd->list_node); |
1201 | pdd->dev = NULL; | 1201 | pdd->dev = NULL; |
1202 | dev_pm_put_subsys_data(dev); | 1202 | dev_pm_put_subsys_data(dev); |
1203 | dev->pm_domain = NULL; | 1203 | dev->pm_domain = NULL; |
1204 | kfree(to_gpd_data(pdd)); | 1204 | kfree(to_gpd_data(pdd)); |
1205 | 1205 | ||
1206 | genpd->device_count--; | 1206 | genpd->device_count--; |
1207 | 1207 | ||
1208 | ret = 0; | 1208 | ret = 0; |
1209 | break; | 1209 | break; |
1210 | } | 1210 | } |
1211 | 1211 | ||
1212 | out: | 1212 | out: |
1213 | genpd_release_lock(genpd); | 1213 | genpd_release_lock(genpd); |
1214 | 1214 | ||
1215 | return ret; | 1215 | return ret; |
1216 | } | 1216 | } |
1217 | 1217 | ||
1218 | /** | 1218 | /** |
1219 | * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain. | 1219 | * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain. |
1220 | * @genpd: Master PM domain to add the subdomain to. | 1220 | * @genpd: Master PM domain to add the subdomain to. |
1221 | * @subdomain: Subdomain to be added. | 1221 | * @subdomain: Subdomain to be added. |
1222 | */ | 1222 | */ |
1223 | int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, | 1223 | int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, |
1224 | struct generic_pm_domain *subdomain) | 1224 | struct generic_pm_domain *subdomain) |
1225 | { | 1225 | { |
1226 | struct gpd_link *link; | 1226 | struct gpd_link *link; |
1227 | int ret = 0; | 1227 | int ret = 0; |
1228 | 1228 | ||
1229 | if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)) | 1229 | if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)) |
1230 | return -EINVAL; | 1230 | return -EINVAL; |
1231 | 1231 | ||
1232 | start: | 1232 | start: |
1233 | genpd_acquire_lock(genpd); | 1233 | genpd_acquire_lock(genpd); |
1234 | mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING); | 1234 | mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING); |
1235 | 1235 | ||
1236 | if (subdomain->status != GPD_STATE_POWER_OFF | 1236 | if (subdomain->status != GPD_STATE_POWER_OFF |
1237 | && subdomain->status != GPD_STATE_ACTIVE) { | 1237 | && subdomain->status != GPD_STATE_ACTIVE) { |
1238 | mutex_unlock(&subdomain->lock); | 1238 | mutex_unlock(&subdomain->lock); |
1239 | genpd_release_lock(genpd); | 1239 | genpd_release_lock(genpd); |
1240 | goto start; | 1240 | goto start; |
1241 | } | 1241 | } |
1242 | 1242 | ||
1243 | if (genpd->status == GPD_STATE_POWER_OFF | 1243 | if (genpd->status == GPD_STATE_POWER_OFF |
1244 | && subdomain->status != GPD_STATE_POWER_OFF) { | 1244 | && subdomain->status != GPD_STATE_POWER_OFF) { |
1245 | ret = -EINVAL; | 1245 | ret = -EINVAL; |
1246 | goto out; | 1246 | goto out; |
1247 | } | 1247 | } |
1248 | 1248 | ||
1249 | list_for_each_entry(link, &genpd->slave_links, slave_node) { | 1249 | list_for_each_entry(link, &genpd->slave_links, slave_node) { |
1250 | if (link->slave == subdomain && link->master == genpd) { | 1250 | if (link->slave == subdomain && link->master == genpd) { |
1251 | ret = -EINVAL; | 1251 | ret = -EINVAL; |
1252 | goto out; | 1252 | goto out; |
1253 | } | 1253 | } |
1254 | } | 1254 | } |
1255 | 1255 | ||
1256 | link = kzalloc(sizeof(*link), GFP_KERNEL); | 1256 | link = kzalloc(sizeof(*link), GFP_KERNEL); |
1257 | if (!link) { | 1257 | if (!link) { |
1258 | ret = -ENOMEM; | 1258 | ret = -ENOMEM; |
1259 | goto out; | 1259 | goto out; |
1260 | } | 1260 | } |
1261 | link->master = genpd; | 1261 | link->master = genpd; |
1262 | list_add_tail(&link->master_node, &genpd->master_links); | 1262 | list_add_tail(&link->master_node, &genpd->master_links); |
1263 | link->slave = subdomain; | 1263 | link->slave = subdomain; |
1264 | list_add_tail(&link->slave_node, &subdomain->slave_links); | 1264 | list_add_tail(&link->slave_node, &subdomain->slave_links); |
1265 | if (subdomain->status != GPD_STATE_POWER_OFF) | 1265 | if (subdomain->status != GPD_STATE_POWER_OFF) |
1266 | genpd_sd_counter_inc(genpd); | 1266 | genpd_sd_counter_inc(genpd); |
1267 | 1267 | ||
1268 | out: | 1268 | out: |
1269 | mutex_unlock(&subdomain->lock); | 1269 | mutex_unlock(&subdomain->lock); |
1270 | genpd_release_lock(genpd); | 1270 | genpd_release_lock(genpd); |
1271 | 1271 | ||
1272 | return ret; | 1272 | return ret; |
1273 | } | 1273 | } |
1274 | 1274 | ||
1275 | /** | 1275 | /** |
1276 | * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain. | 1276 | * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain. |
1277 | * @genpd: Master PM domain to remove the subdomain from. | 1277 | * @genpd: Master PM domain to remove the subdomain from. |
1278 | * @subdomain: Subdomain to be removed. | 1278 | * @subdomain: Subdomain to be removed. |
1279 | */ | 1279 | */ |
1280 | int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, | 1280 | int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, |
1281 | struct generic_pm_domain *subdomain) | 1281 | struct generic_pm_domain *subdomain) |
1282 | { | 1282 | { |
1283 | struct gpd_link *link; | 1283 | struct gpd_link *link; |
1284 | int ret = -EINVAL; | 1284 | int ret = -EINVAL; |
1285 | 1285 | ||
1286 | if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)) | 1286 | if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)) |
1287 | return -EINVAL; | 1287 | return -EINVAL; |
1288 | 1288 | ||
1289 | start: | 1289 | start: |
1290 | genpd_acquire_lock(genpd); | 1290 | genpd_acquire_lock(genpd); |
1291 | 1291 | ||
1292 | list_for_each_entry(link, &genpd->master_links, master_node) { | 1292 | list_for_each_entry(link, &genpd->master_links, master_node) { |
1293 | if (link->slave != subdomain) | 1293 | if (link->slave != subdomain) |
1294 | continue; | 1294 | continue; |
1295 | 1295 | ||
1296 | mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING); | 1296 | mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING); |
1297 | 1297 | ||
1298 | if (subdomain->status != GPD_STATE_POWER_OFF | 1298 | if (subdomain->status != GPD_STATE_POWER_OFF |
1299 | && subdomain->status != GPD_STATE_ACTIVE) { | 1299 | && subdomain->status != GPD_STATE_ACTIVE) { |
1300 | mutex_unlock(&subdomain->lock); | 1300 | mutex_unlock(&subdomain->lock); |
1301 | genpd_release_lock(genpd); | 1301 | genpd_release_lock(genpd); |
1302 | goto start; | 1302 | goto start; |
1303 | } | 1303 | } |
1304 | 1304 | ||
1305 | list_del(&link->master_node); | 1305 | list_del(&link->master_node); |
1306 | list_del(&link->slave_node); | 1306 | list_del(&link->slave_node); |
1307 | kfree(link); | 1307 | kfree(link); |
1308 | if (subdomain->status != GPD_STATE_POWER_OFF) | 1308 | if (subdomain->status != GPD_STATE_POWER_OFF) |
1309 | genpd_sd_counter_dec(genpd); | 1309 | genpd_sd_counter_dec(genpd); |
1310 | 1310 | ||
1311 | mutex_unlock(&subdomain->lock); | 1311 | mutex_unlock(&subdomain->lock); |
1312 | 1312 | ||
1313 | ret = 0; | 1313 | ret = 0; |
1314 | break; | 1314 | break; |
1315 | } | 1315 | } |
1316 | 1316 | ||
1317 | genpd_release_lock(genpd); | 1317 | genpd_release_lock(genpd); |
1318 | 1318 | ||
1319 | return ret; | 1319 | return ret; |
1320 | } | 1320 | } |
1321 | 1321 | ||
1322 | /** | 1322 | /** |
1323 | * pm_genpd_add_callbacks - Add PM domain callbacks to a given device. | 1323 | * pm_genpd_add_callbacks - Add PM domain callbacks to a given device. |
1324 | * @dev: Device to add the callbacks to. | 1324 | * @dev: Device to add the callbacks to. |
1325 | * @ops: Set of callbacks to add. | 1325 | * @ops: Set of callbacks to add. |
1326 | * @td: Timing data to add to the device along with the callbacks (optional). | 1326 | * @td: Timing data to add to the device along with the callbacks (optional). |
1327 | */ | 1327 | */ |
1328 | int pm_genpd_add_callbacks(struct device *dev, struct gpd_dev_ops *ops, | 1328 | int pm_genpd_add_callbacks(struct device *dev, struct gpd_dev_ops *ops, |
1329 | struct gpd_timing_data *td) | 1329 | struct gpd_timing_data *td) |
1330 | { | 1330 | { |
1331 | struct pm_domain_data *pdd; | 1331 | struct pm_domain_data *pdd; |
1332 | int ret = 0; | 1332 | int ret = 0; |
1333 | 1333 | ||
1334 | if (!(dev && dev->power.subsys_data && ops)) | 1334 | if (!(dev && dev->power.subsys_data && ops)) |
1335 | return -EINVAL; | 1335 | return -EINVAL; |
1336 | 1336 | ||
1337 | pm_runtime_disable(dev); | 1337 | pm_runtime_disable(dev); |
1338 | device_pm_lock(); | 1338 | device_pm_lock(); |
1339 | 1339 | ||
1340 | pdd = dev->power.subsys_data->domain_data; | 1340 | pdd = dev->power.subsys_data->domain_data; |
1341 | if (pdd) { | 1341 | if (pdd) { |
1342 | struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd); | 1342 | struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd); |
1343 | 1343 | ||
1344 | gpd_data->ops = *ops; | 1344 | gpd_data->ops = *ops; |
1345 | if (td) | 1345 | if (td) |
1346 | gpd_data->td = *td; | 1346 | gpd_data->td = *td; |
1347 | } else { | 1347 | } else { |
1348 | ret = -EINVAL; | 1348 | ret = -EINVAL; |
1349 | } | 1349 | } |
1350 | 1350 | ||
1351 | device_pm_unlock(); | 1351 | device_pm_unlock(); |
1352 | pm_runtime_enable(dev); | 1352 | pm_runtime_enable(dev); |
1353 | 1353 | ||
1354 | return ret; | 1354 | return ret; |
1355 | } | 1355 | } |
1356 | EXPORT_SYMBOL_GPL(pm_genpd_add_callbacks); | 1356 | EXPORT_SYMBOL_GPL(pm_genpd_add_callbacks); |
1357 | 1357 | ||
1358 | /** | 1358 | /** |
1359 | * __pm_genpd_remove_callbacks - Remove PM domain callbacks from a given device. | 1359 | * __pm_genpd_remove_callbacks - Remove PM domain callbacks from a given device. |
1360 | * @dev: Device to remove the callbacks from. | 1360 | * @dev: Device to remove the callbacks from. |
1361 | * @clear_td: If set, clear the device's timing data too. | 1361 | * @clear_td: If set, clear the device's timing data too. |
1362 | */ | 1362 | */ |
1363 | int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td) | 1363 | int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td) |
1364 | { | 1364 | { |
1365 | struct pm_domain_data *pdd; | 1365 | struct pm_domain_data *pdd; |
1366 | int ret = 0; | 1366 | int ret = 0; |
1367 | 1367 | ||
1368 | if (!(dev && dev->power.subsys_data)) | 1368 | if (!(dev && dev->power.subsys_data)) |
1369 | return -EINVAL; | 1369 | return -EINVAL; |
1370 | 1370 | ||
1371 | pm_runtime_disable(dev); | 1371 | pm_runtime_disable(dev); |
1372 | device_pm_lock(); | 1372 | device_pm_lock(); |
1373 | 1373 | ||
1374 | pdd = dev->power.subsys_data->domain_data; | 1374 | pdd = dev->power.subsys_data->domain_data; |
1375 | if (pdd) { | 1375 | if (pdd) { |
1376 | struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd); | 1376 | struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd); |
1377 | 1377 | ||
1378 | gpd_data->ops = (struct gpd_dev_ops){ 0 }; | 1378 | gpd_data->ops = (struct gpd_dev_ops){ 0 }; |
1379 | if (clear_td) | 1379 | if (clear_td) |
1380 | gpd_data->td = (struct gpd_timing_data){ 0 }; | 1380 | gpd_data->td = (struct gpd_timing_data){ 0 }; |
1381 | } else { | 1381 | } else { |
1382 | ret = -EINVAL; | 1382 | ret = -EINVAL; |
1383 | } | 1383 | } |
1384 | 1384 | ||
1385 | device_pm_unlock(); | 1385 | device_pm_unlock(); |
1386 | pm_runtime_enable(dev); | 1386 | pm_runtime_enable(dev); |
1387 | 1387 | ||
1388 | return ret; | 1388 | return ret; |
1389 | } | 1389 | } |
1390 | EXPORT_SYMBOL_GPL(__pm_genpd_remove_callbacks); | 1390 | EXPORT_SYMBOL_GPL(__pm_genpd_remove_callbacks); |
1391 | 1391 | ||
1392 | /* Default device callbacks for generic PM domains. */ | 1392 | /* Default device callbacks for generic PM domains. */ |
1393 | 1393 | ||
1394 | /** | 1394 | /** |
1395 | * pm_genpd_default_save_state - Default "save device state" for PM domians. | 1395 | * pm_genpd_default_save_state - Default "save device state" for PM domians. |
1396 | * @dev: Device to handle. | 1396 | * @dev: Device to handle. |
1397 | */ | 1397 | */ |
1398 | static int pm_genpd_default_save_state(struct device *dev) | 1398 | static int pm_genpd_default_save_state(struct device *dev) |
1399 | { | 1399 | { |
1400 | int (*cb)(struct device *__dev); | 1400 | int (*cb)(struct device *__dev); |
1401 | struct device_driver *drv = dev->driver; | 1401 | struct device_driver *drv = dev->driver; |
1402 | 1402 | ||
1403 | cb = dev_gpd_data(dev)->ops.save_state; | 1403 | cb = dev_gpd_data(dev)->ops.save_state; |
1404 | if (cb) | 1404 | if (cb) |
1405 | return cb(dev); | 1405 | return cb(dev); |
1406 | 1406 | ||
1407 | if (drv && drv->pm && drv->pm->runtime_suspend) | 1407 | if (drv && drv->pm && drv->pm->runtime_suspend) |
1408 | return drv->pm->runtime_suspend(dev); | 1408 | return drv->pm->runtime_suspend(dev); |
1409 | 1409 | ||
1410 | return 0; | 1410 | return 0; |
1411 | } | 1411 | } |
1412 | 1412 | ||
1413 | /** | 1413 | /** |
1414 | * pm_genpd_default_restore_state - Default PM domians "restore device state". | 1414 | * pm_genpd_default_restore_state - Default PM domians "restore device state". |
1415 | * @dev: Device to handle. | 1415 | * @dev: Device to handle. |
1416 | */ | 1416 | */ |
1417 | static int pm_genpd_default_restore_state(struct device *dev) | 1417 | static int pm_genpd_default_restore_state(struct device *dev) |
1418 | { | 1418 | { |
1419 | int (*cb)(struct device *__dev); | 1419 | int (*cb)(struct device *__dev); |
1420 | struct device_driver *drv = dev->driver; | 1420 | struct device_driver *drv = dev->driver; |
1421 | 1421 | ||
1422 | cb = dev_gpd_data(dev)->ops.restore_state; | 1422 | cb = dev_gpd_data(dev)->ops.restore_state; |
1423 | if (cb) | 1423 | if (cb) |
1424 | return cb(dev); | 1424 | return cb(dev); |
1425 | 1425 | ||
1426 | if (drv && drv->pm && drv->pm->runtime_resume) | 1426 | if (drv && drv->pm && drv->pm->runtime_resume) |
1427 | return drv->pm->runtime_resume(dev); | 1427 | return drv->pm->runtime_resume(dev); |
1428 | 1428 | ||
1429 | return 0; | 1429 | return 0; |
1430 | } | 1430 | } |
1431 | 1431 | ||
1432 | #ifdef CONFIG_PM_SLEEP | ||
1433 | |||
1432 | /** | 1434 | /** |
1433 | * pm_genpd_default_suspend - Default "device suspend" for PM domians. | 1435 | * pm_genpd_default_suspend - Default "device suspend" for PM domians. |
1434 | * @dev: Device to handle. | 1436 | * @dev: Device to handle. |
1435 | */ | 1437 | */ |
1436 | static int pm_genpd_default_suspend(struct device *dev) | 1438 | static int pm_genpd_default_suspend(struct device *dev) |
1437 | { | 1439 | { |
1438 | int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.suspend; | 1440 | int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.suspend; |
1439 | 1441 | ||
1440 | return cb ? cb(dev) : pm_generic_suspend(dev); | 1442 | return cb ? cb(dev) : pm_generic_suspend(dev); |
1441 | } | 1443 | } |
1442 | 1444 | ||
1443 | /** | 1445 | /** |
1444 | * pm_genpd_default_suspend_late - Default "late device suspend" for PM domians. | 1446 | * pm_genpd_default_suspend_late - Default "late device suspend" for PM domians. |
1445 | * @dev: Device to handle. | 1447 | * @dev: Device to handle. |
1446 | */ | 1448 | */ |
1447 | static int pm_genpd_default_suspend_late(struct device *dev) | 1449 | static int pm_genpd_default_suspend_late(struct device *dev) |
1448 | { | 1450 | { |
1449 | int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.suspend_late; | 1451 | int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.suspend_late; |
1450 | 1452 | ||
1451 | return cb ? cb(dev) : pm_generic_suspend_noirq(dev); | 1453 | return cb ? cb(dev) : pm_generic_suspend_noirq(dev); |
1452 | } | 1454 | } |
1453 | 1455 | ||
1454 | /** | 1456 | /** |
1455 | * pm_genpd_default_resume_early - Default "early device resume" for PM domians. | 1457 | * pm_genpd_default_resume_early - Default "early device resume" for PM domians. |
1456 | * @dev: Device to handle. | 1458 | * @dev: Device to handle. |
1457 | */ | 1459 | */ |
1458 | static int pm_genpd_default_resume_early(struct device *dev) | 1460 | static int pm_genpd_default_resume_early(struct device *dev) |
1459 | { | 1461 | { |
1460 | int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.resume_early; | 1462 | int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.resume_early; |
1461 | 1463 | ||
1462 | return cb ? cb(dev) : pm_generic_resume_noirq(dev); | 1464 | return cb ? cb(dev) : pm_generic_resume_noirq(dev); |
1463 | } | 1465 | } |
1464 | 1466 | ||
1465 | /** | 1467 | /** |
1466 | * pm_genpd_default_resume - Default "device resume" for PM domians. | 1468 | * pm_genpd_default_resume - Default "device resume" for PM domians. |
1467 | * @dev: Device to handle. | 1469 | * @dev: Device to handle. |
1468 | */ | 1470 | */ |
1469 | static int pm_genpd_default_resume(struct device *dev) | 1471 | static int pm_genpd_default_resume(struct device *dev) |
1470 | { | 1472 | { |
1471 | int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.resume; | 1473 | int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.resume; |
1472 | 1474 | ||
1473 | return cb ? cb(dev) : pm_generic_resume(dev); | 1475 | return cb ? cb(dev) : pm_generic_resume(dev); |
1474 | } | 1476 | } |
1475 | 1477 | ||
1476 | /** | 1478 | /** |
1477 | * pm_genpd_default_freeze - Default "device freeze" for PM domians. | 1479 | * pm_genpd_default_freeze - Default "device freeze" for PM domians. |
1478 | * @dev: Device to handle. | 1480 | * @dev: Device to handle. |
1479 | */ | 1481 | */ |
1480 | static int pm_genpd_default_freeze(struct device *dev) | 1482 | static int pm_genpd_default_freeze(struct device *dev) |
1481 | { | 1483 | { |
1482 | int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.freeze; | 1484 | int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.freeze; |
1483 | 1485 | ||
1484 | return cb ? cb(dev) : pm_generic_freeze(dev); | 1486 | return cb ? cb(dev) : pm_generic_freeze(dev); |
1485 | } | 1487 | } |
1486 | 1488 | ||
1487 | /** | 1489 | /** |
1488 | * pm_genpd_default_freeze_late - Default "late device freeze" for PM domians. | 1490 | * pm_genpd_default_freeze_late - Default "late device freeze" for PM domians. |
1489 | * @dev: Device to handle. | 1491 | * @dev: Device to handle. |
1490 | */ | 1492 | */ |
1491 | static int pm_genpd_default_freeze_late(struct device *dev) | 1493 | static int pm_genpd_default_freeze_late(struct device *dev) |
1492 | { | 1494 | { |
1493 | int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.freeze_late; | 1495 | int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.freeze_late; |
1494 | 1496 | ||
1495 | return cb ? cb(dev) : pm_generic_freeze_noirq(dev); | 1497 | return cb ? cb(dev) : pm_generic_freeze_noirq(dev); |
1496 | } | 1498 | } |
1497 | 1499 | ||
1498 | /** | 1500 | /** |
1499 | * pm_genpd_default_thaw_early - Default "early device thaw" for PM domians. | 1501 | * pm_genpd_default_thaw_early - Default "early device thaw" for PM domians. |
1500 | * @dev: Device to handle. | 1502 | * @dev: Device to handle. |
1501 | */ | 1503 | */ |
1502 | static int pm_genpd_default_thaw_early(struct device *dev) | 1504 | static int pm_genpd_default_thaw_early(struct device *dev) |
1503 | { | 1505 | { |
1504 | int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.thaw_early; | 1506 | int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.thaw_early; |
1505 | 1507 | ||
1506 | return cb ? cb(dev) : pm_generic_thaw_noirq(dev); | 1508 | return cb ? cb(dev) : pm_generic_thaw_noirq(dev); |
1507 | } | 1509 | } |
1508 | 1510 | ||
1509 | /** | 1511 | /** |
1510 | * pm_genpd_default_thaw - Default "device thaw" for PM domians. | 1512 | * pm_genpd_default_thaw - Default "device thaw" for PM domians. |
1511 | * @dev: Device to handle. | 1513 | * @dev: Device to handle. |
1512 | */ | 1514 | */ |
1513 | static int pm_genpd_default_thaw(struct device *dev) | 1515 | static int pm_genpd_default_thaw(struct device *dev) |
1514 | { | 1516 | { |
1515 | int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.thaw; | 1517 | int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.thaw; |
1516 | 1518 | ||
1517 | return cb ? cb(dev) : pm_generic_thaw(dev); | 1519 | return cb ? cb(dev) : pm_generic_thaw(dev); |
1518 | } | 1520 | } |
1521 | |||
1522 | #else /* !CONFIG_PM_SLEEP */ | ||
1523 | |||
1524 | #define pm_genpd_default_suspend NULL | ||
1525 | #define pm_genpd_default_suspend_late NULL | ||
1526 | #define pm_genpd_default_resume_early NULL | ||
1527 | #define pm_genpd_default_resume NULL | ||
1528 | #define pm_genpd_default_freeze NULL | ||
1529 | #define pm_genpd_default_freeze_late NULL | ||
1530 | #define pm_genpd_default_thaw_early NULL | ||
1531 | #define pm_genpd_default_thaw NULL | ||
1532 | |||
1533 | #endif /* !CONFIG_PM_SLEEP */ | ||
1519 | 1534 | ||
1520 | /** | 1535 | /** |
1521 | * pm_genpd_init - Initialize a generic I/O PM domain object. | 1536 | * pm_genpd_init - Initialize a generic I/O PM domain object. |
1522 | * @genpd: PM domain object to initialize. | 1537 | * @genpd: PM domain object to initialize. |
1523 | * @gov: PM domain governor to associate with the domain (may be NULL). | 1538 | * @gov: PM domain governor to associate with the domain (may be NULL). |
1524 | * @is_off: Initial value of the domain's power_is_off field. | 1539 | * @is_off: Initial value of the domain's power_is_off field. |
1525 | */ | 1540 | */ |
1526 | void pm_genpd_init(struct generic_pm_domain *genpd, | 1541 | void pm_genpd_init(struct generic_pm_domain *genpd, |
1527 | struct dev_power_governor *gov, bool is_off) | 1542 | struct dev_power_governor *gov, bool is_off) |
1528 | { | 1543 | { |
1529 | if (IS_ERR_OR_NULL(genpd)) | 1544 | if (IS_ERR_OR_NULL(genpd)) |
1530 | return; | 1545 | return; |
1531 | 1546 | ||
1532 | INIT_LIST_HEAD(&genpd->master_links); | 1547 | INIT_LIST_HEAD(&genpd->master_links); |
1533 | INIT_LIST_HEAD(&genpd->slave_links); | 1548 | INIT_LIST_HEAD(&genpd->slave_links); |
1534 | INIT_LIST_HEAD(&genpd->dev_list); | 1549 | INIT_LIST_HEAD(&genpd->dev_list); |
1535 | mutex_init(&genpd->lock); | 1550 | mutex_init(&genpd->lock); |
1536 | genpd->gov = gov; | 1551 | genpd->gov = gov; |
1537 | INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn); | 1552 | INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn); |
1538 | genpd->in_progress = 0; | 1553 | genpd->in_progress = 0; |
1539 | atomic_set(&genpd->sd_count, 0); | 1554 | atomic_set(&genpd->sd_count, 0); |
1540 | genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE; | 1555 | genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE; |
1541 | init_waitqueue_head(&genpd->status_wait_queue); | 1556 | init_waitqueue_head(&genpd->status_wait_queue); |
1542 | genpd->poweroff_task = NULL; | 1557 | genpd->poweroff_task = NULL; |
1543 | genpd->resume_count = 0; | 1558 | genpd->resume_count = 0; |
1544 | genpd->device_count = 0; | 1559 | genpd->device_count = 0; |
1545 | genpd->suspended_count = 0; | 1560 | genpd->suspended_count = 0; |
1546 | genpd->max_off_time_ns = -1; | 1561 | genpd->max_off_time_ns = -1; |
1547 | genpd->domain.ops.runtime_suspend = pm_genpd_runtime_suspend; | 1562 | genpd->domain.ops.runtime_suspend = pm_genpd_runtime_suspend; |
1548 | genpd->domain.ops.runtime_resume = pm_genpd_runtime_resume; | 1563 | genpd->domain.ops.runtime_resume = pm_genpd_runtime_resume; |
1549 | genpd->domain.ops.runtime_idle = pm_generic_runtime_idle; | 1564 | genpd->domain.ops.runtime_idle = pm_generic_runtime_idle; |
1550 | genpd->domain.ops.prepare = pm_genpd_prepare; | 1565 | genpd->domain.ops.prepare = pm_genpd_prepare; |
1551 | genpd->domain.ops.suspend = pm_genpd_suspend; | 1566 | genpd->domain.ops.suspend = pm_genpd_suspend; |
1552 | genpd->domain.ops.suspend_noirq = pm_genpd_suspend_noirq; | 1567 | genpd->domain.ops.suspend_noirq = pm_genpd_suspend_noirq; |
1553 | genpd->domain.ops.resume_noirq = pm_genpd_resume_noirq; | 1568 | genpd->domain.ops.resume_noirq = pm_genpd_resume_noirq; |
1554 | genpd->domain.ops.resume = pm_genpd_resume; | 1569 | genpd->domain.ops.resume = pm_genpd_resume; |
1555 | genpd->domain.ops.freeze = pm_genpd_freeze; | 1570 | genpd->domain.ops.freeze = pm_genpd_freeze; |
1556 | genpd->domain.ops.freeze_noirq = pm_genpd_freeze_noirq; | 1571 | genpd->domain.ops.freeze_noirq = pm_genpd_freeze_noirq; |
1557 | genpd->domain.ops.thaw_noirq = pm_genpd_thaw_noirq; | 1572 | genpd->domain.ops.thaw_noirq = pm_genpd_thaw_noirq; |
1558 | genpd->domain.ops.thaw = pm_genpd_thaw; | 1573 | genpd->domain.ops.thaw = pm_genpd_thaw; |
1559 | genpd->domain.ops.poweroff = pm_genpd_suspend; | 1574 | genpd->domain.ops.poweroff = pm_genpd_suspend; |
1560 | genpd->domain.ops.poweroff_noirq = pm_genpd_suspend_noirq; | 1575 | genpd->domain.ops.poweroff_noirq = pm_genpd_suspend_noirq; |
1561 | genpd->domain.ops.restore_noirq = pm_genpd_restore_noirq; | 1576 | genpd->domain.ops.restore_noirq = pm_genpd_restore_noirq; |
1562 | genpd->domain.ops.restore = pm_genpd_resume; | 1577 | genpd->domain.ops.restore = pm_genpd_resume; |
1563 | genpd->domain.ops.complete = pm_genpd_complete; | 1578 | genpd->domain.ops.complete = pm_genpd_complete; |
1564 | genpd->dev_ops.save_state = pm_genpd_default_save_state; | 1579 | genpd->dev_ops.save_state = pm_genpd_default_save_state; |
1565 | genpd->dev_ops.restore_state = pm_genpd_default_restore_state; | 1580 | genpd->dev_ops.restore_state = pm_genpd_default_restore_state; |
1566 | genpd->dev_ops.suspend = pm_genpd_default_suspend; | 1581 | genpd->dev_ops.suspend = pm_genpd_default_suspend; |
1567 | genpd->dev_ops.suspend_late = pm_genpd_default_suspend_late; | 1582 | genpd->dev_ops.suspend_late = pm_genpd_default_suspend_late; |
1568 | genpd->dev_ops.resume_early = pm_genpd_default_resume_early; | 1583 | genpd->dev_ops.resume_early = pm_genpd_default_resume_early; |
1569 | genpd->dev_ops.resume = pm_genpd_default_resume; | 1584 | genpd->dev_ops.resume = pm_genpd_default_resume; |
1570 | genpd->dev_ops.freeze = pm_genpd_default_freeze; | 1585 | genpd->dev_ops.freeze = pm_genpd_default_freeze; |
1571 | genpd->dev_ops.freeze_late = pm_genpd_default_freeze_late; | 1586 | genpd->dev_ops.freeze_late = pm_genpd_default_freeze_late; |
1572 | genpd->dev_ops.thaw_early = pm_genpd_default_thaw_early; | 1587 | genpd->dev_ops.thaw_early = pm_genpd_default_thaw_early; |
1573 | genpd->dev_ops.thaw = pm_genpd_default_thaw; | 1588 | genpd->dev_ops.thaw = pm_genpd_default_thaw; |
1574 | mutex_lock(&gpd_list_lock); | 1589 | mutex_lock(&gpd_list_lock); |
1575 | list_add(&genpd->gpd_list_node, &gpd_list); | 1590 | list_add(&genpd->gpd_list_node, &gpd_list); |
1576 | mutex_unlock(&gpd_list_lock); | 1591 | mutex_unlock(&gpd_list_lock); |
1577 | } | 1592 | } |
1578 | 1593 |
drivers/base/power/domain_governor.c
1 | /* | 1 | /* |
2 | * drivers/base/power/domain_governor.c - Governors for device PM domains. | 2 | * drivers/base/power/domain_governor.c - Governors for device PM domains. |
3 | * | 3 | * |
4 | * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp. | 4 | * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp. |
5 | * | 5 | * |
6 | * This file is released under the GPLv2. | 6 | * This file is released under the GPLv2. |
7 | */ | 7 | */ |
8 | 8 | ||
9 | #include <linux/init.h> | 9 | #include <linux/init.h> |
10 | #include <linux/kernel.h> | 10 | #include <linux/kernel.h> |
11 | #include <linux/pm_domain.h> | 11 | #include <linux/pm_domain.h> |
12 | #include <linux/pm_qos.h> | 12 | #include <linux/pm_qos.h> |
13 | #include <linux/hrtimer.h> | 13 | #include <linux/hrtimer.h> |
14 | 14 | ||
15 | #ifdef CONFIG_PM_RUNTIME | ||
16 | |||
15 | /** | 17 | /** |
16 | * default_stop_ok - Default PM domain governor routine for stopping devices. | 18 | * default_stop_ok - Default PM domain governor routine for stopping devices. |
17 | * @dev: Device to check. | 19 | * @dev: Device to check. |
18 | */ | 20 | */ |
19 | bool default_stop_ok(struct device *dev) | 21 | bool default_stop_ok(struct device *dev) |
20 | { | 22 | { |
21 | struct gpd_timing_data *td = &dev_gpd_data(dev)->td; | 23 | struct gpd_timing_data *td = &dev_gpd_data(dev)->td; |
22 | 24 | ||
23 | dev_dbg(dev, "%s()\n", __func__); | 25 | dev_dbg(dev, "%s()\n", __func__); |
24 | 26 | ||
25 | if (dev->power.max_time_suspended_ns < 0 || td->break_even_ns == 0) | 27 | if (dev->power.max_time_suspended_ns < 0 || td->break_even_ns == 0) |
26 | return true; | 28 | return true; |
27 | 29 | ||
28 | return td->stop_latency_ns + td->start_latency_ns < td->break_even_ns | 30 | return td->stop_latency_ns + td->start_latency_ns < td->break_even_ns |
29 | && td->break_even_ns < dev->power.max_time_suspended_ns; | 31 | && td->break_even_ns < dev->power.max_time_suspended_ns; |
30 | } | 32 | } |
31 | 33 | ||
32 | /** | 34 | /** |
33 | * default_power_down_ok - Default generic PM domain power off governor routine. | 35 | * default_power_down_ok - Default generic PM domain power off governor routine. |
34 | * @pd: PM domain to check. | 36 | * @pd: PM domain to check. |
35 | * | 37 | * |
36 | * This routine must be executed under the PM domain's lock. | 38 | * This routine must be executed under the PM domain's lock. |
37 | */ | 39 | */ |
38 | static bool default_power_down_ok(struct dev_pm_domain *pd) | 40 | static bool default_power_down_ok(struct dev_pm_domain *pd) |
39 | { | 41 | { |
40 | struct generic_pm_domain *genpd = pd_to_genpd(pd); | 42 | struct generic_pm_domain *genpd = pd_to_genpd(pd); |
41 | struct gpd_link *link; | 43 | struct gpd_link *link; |
42 | struct pm_domain_data *pdd; | 44 | struct pm_domain_data *pdd; |
43 | s64 min_dev_off_time_ns; | 45 | s64 min_dev_off_time_ns; |
44 | s64 off_on_time_ns; | 46 | s64 off_on_time_ns; |
45 | ktime_t time_now = ktime_get(); | 47 | ktime_t time_now = ktime_get(); |
46 | 48 | ||
47 | off_on_time_ns = genpd->power_off_latency_ns + | 49 | off_on_time_ns = genpd->power_off_latency_ns + |
48 | genpd->power_on_latency_ns; | 50 | genpd->power_on_latency_ns; |
49 | /* | 51 | /* |
50 | * It doesn't make sense to remove power from the domain if saving | 52 | * It doesn't make sense to remove power from the domain if saving |
51 | * the state of all devices in it and the power off/power on operations | 53 | * the state of all devices in it and the power off/power on operations |
52 | * take too much time. | 54 | * take too much time. |
53 | * | 55 | * |
54 | * All devices in this domain have been stopped already at this point. | 56 | * All devices in this domain have been stopped already at this point. |
55 | */ | 57 | */ |
56 | list_for_each_entry(pdd, &genpd->dev_list, list_node) { | 58 | list_for_each_entry(pdd, &genpd->dev_list, list_node) { |
57 | if (pdd->dev->driver) | 59 | if (pdd->dev->driver) |
58 | off_on_time_ns += | 60 | off_on_time_ns += |
59 | to_gpd_data(pdd)->td.save_state_latency_ns; | 61 | to_gpd_data(pdd)->td.save_state_latency_ns; |
60 | } | 62 | } |
61 | 63 | ||
62 | /* | 64 | /* |
63 | * Check if subdomains can be off for enough time. | 65 | * Check if subdomains can be off for enough time. |
64 | * | 66 | * |
65 | * All subdomains have been powered off already at this point. | 67 | * All subdomains have been powered off already at this point. |
66 | */ | 68 | */ |
67 | list_for_each_entry(link, &genpd->master_links, master_node) { | 69 | list_for_each_entry(link, &genpd->master_links, master_node) { |
68 | struct generic_pm_domain *sd = link->slave; | 70 | struct generic_pm_domain *sd = link->slave; |
69 | s64 sd_max_off_ns = sd->max_off_time_ns; | 71 | s64 sd_max_off_ns = sd->max_off_time_ns; |
70 | 72 | ||
71 | if (sd_max_off_ns < 0) | 73 | if (sd_max_off_ns < 0) |
72 | continue; | 74 | continue; |
73 | 75 | ||
74 | sd_max_off_ns -= ktime_to_ns(ktime_sub(time_now, | 76 | sd_max_off_ns -= ktime_to_ns(ktime_sub(time_now, |
75 | sd->power_off_time)); | 77 | sd->power_off_time)); |
76 | /* | 78 | /* |
77 | * Check if the subdomain is allowed to be off long enough for | 79 | * Check if the subdomain is allowed to be off long enough for |
78 | * the current domain to turn off and on (that's how much time | 80 | * the current domain to turn off and on (that's how much time |
79 | * it will have to wait worst case). | 81 | * it will have to wait worst case). |
80 | */ | 82 | */ |
81 | if (sd_max_off_ns <= off_on_time_ns) | 83 | if (sd_max_off_ns <= off_on_time_ns) |
82 | return false; | 84 | return false; |
83 | } | 85 | } |
84 | 86 | ||
85 | /* | 87 | /* |
86 | * Check if the devices in the domain can be off enough time. | 88 | * Check if the devices in the domain can be off enough time. |
87 | */ | 89 | */ |
88 | min_dev_off_time_ns = -1; | 90 | min_dev_off_time_ns = -1; |
89 | list_for_each_entry(pdd, &genpd->dev_list, list_node) { | 91 | list_for_each_entry(pdd, &genpd->dev_list, list_node) { |
90 | struct gpd_timing_data *td; | 92 | struct gpd_timing_data *td; |
91 | struct device *dev = pdd->dev; | 93 | struct device *dev = pdd->dev; |
92 | s64 dev_off_time_ns; | 94 | s64 dev_off_time_ns; |
93 | 95 | ||
94 | if (!dev->driver || dev->power.max_time_suspended_ns < 0) | 96 | if (!dev->driver || dev->power.max_time_suspended_ns < 0) |
95 | continue; | 97 | continue; |
96 | 98 | ||
97 | td = &to_gpd_data(pdd)->td; | 99 | td = &to_gpd_data(pdd)->td; |
98 | dev_off_time_ns = dev->power.max_time_suspended_ns - | 100 | dev_off_time_ns = dev->power.max_time_suspended_ns - |
99 | (td->start_latency_ns + td->restore_state_latency_ns + | 101 | (td->start_latency_ns + td->restore_state_latency_ns + |
100 | ktime_to_ns(ktime_sub(time_now, | 102 | ktime_to_ns(ktime_sub(time_now, |
101 | dev->power.suspend_time))); | 103 | dev->power.suspend_time))); |
102 | if (dev_off_time_ns <= off_on_time_ns) | 104 | if (dev_off_time_ns <= off_on_time_ns) |
103 | return false; | 105 | return false; |
104 | 106 | ||
105 | if (min_dev_off_time_ns > dev_off_time_ns | 107 | if (min_dev_off_time_ns > dev_off_time_ns |
106 | || min_dev_off_time_ns < 0) | 108 | || min_dev_off_time_ns < 0) |
107 | min_dev_off_time_ns = dev_off_time_ns; | 109 | min_dev_off_time_ns = dev_off_time_ns; |
108 | } | 110 | } |
109 | 111 | ||
110 | if (min_dev_off_time_ns < 0) { | 112 | if (min_dev_off_time_ns < 0) { |
111 | /* | 113 | /* |
112 | * There are no latency constraints, so the domain can spend | 114 | * There are no latency constraints, so the domain can spend |
113 | * arbitrary time in the "off" state. | 115 | * arbitrary time in the "off" state. |
114 | */ | 116 | */ |
115 | genpd->max_off_time_ns = -1; | 117 | genpd->max_off_time_ns = -1; |
116 | return true; | 118 | return true; |
117 | } | 119 | } |
118 | 120 | ||
119 | /* | 121 | /* |
120 | * The difference between the computed minimum delta and the time needed | 122 | * The difference between the computed minimum delta and the time needed |
121 | * to turn the domain on is the maximum theoretical time this domain can | 123 | * to turn the domain on is the maximum theoretical time this domain can |
122 | * spend in the "off" state. | 124 | * spend in the "off" state. |
123 | */ | 125 | */ |
124 | min_dev_off_time_ns -= genpd->power_on_latency_ns; | 126 | min_dev_off_time_ns -= genpd->power_on_latency_ns; |
125 | 127 | ||
126 | /* | 128 | /* |
127 | * If the difference between the computed minimum delta and the time | 129 | * If the difference between the computed minimum delta and the time |
128 | * needed to turn the domain off and back on on is smaller than the | 130 | * needed to turn the domain off and back on on is smaller than the |
129 | * domain's power break even time, removing power from the domain is not | 131 | * domain's power break even time, removing power from the domain is not |
130 | * worth it. | 132 | * worth it. |
131 | */ | 133 | */ |
132 | if (genpd->break_even_ns > | 134 | if (genpd->break_even_ns > |
133 | min_dev_off_time_ns - genpd->power_off_latency_ns) | 135 | min_dev_off_time_ns - genpd->power_off_latency_ns) |
134 | return false; | 136 | return false; |
135 | 137 | ||
136 | genpd->max_off_time_ns = min_dev_off_time_ns; | 138 | genpd->max_off_time_ns = min_dev_off_time_ns; |
137 | return true; | 139 | return true; |
138 | } | 140 | } |
139 | 141 | ||
140 | struct dev_power_governor simple_qos_governor = { | ||
141 | .stop_ok = default_stop_ok, | ||
142 | .power_down_ok = default_power_down_ok, | ||
143 | }; | ||
144 | |||
145 | static bool always_on_power_down_ok(struct dev_pm_domain *domain) | 142 | static bool always_on_power_down_ok(struct dev_pm_domain *domain) |
146 | { | 143 | { |
147 | return false; | 144 | return false; |
148 | } | 145 | } |
146 | |||
147 | #else /* !CONFIG_PM_RUNTIME */ | ||
148 | |||
149 | bool default_stop_ok(struct device *dev) | ||
150 | { | ||
151 | return false; | ||
152 | } | ||
153 | |||
154 | #define default_power_down_ok NULL | ||
155 | #define always_on_power_down_ok NULL | ||
156 | |||
157 | #endif /* !CONFIG_PM_RUNTIME */ | ||
158 | |||
159 | struct dev_power_governor simple_qos_governor = { | ||
160 | .stop_ok = default_stop_ok, | ||
161 | .power_down_ok = default_power_down_ok, | ||
162 | }; | ||
149 | 163 | ||
150 | /** | 164 | /** |
151 | * pm_genpd_gov_always_on - A governor implementing an always-on policy | 165 | * pm_genpd_gov_always_on - A governor implementing an always-on policy |
152 | */ | 166 | */ |
kernel/Makefile
1 | # | 1 | # |
2 | # Makefile for the linux kernel. | 2 | # Makefile for the linux kernel. |
3 | # | 3 | # |
4 | 4 | ||
5 | obj-y = fork.o exec_domain.o panic.o printk.o \ | 5 | obj-y = fork.o exec_domain.o panic.o printk.o \ |
6 | cpu.o exit.o itimer.o time.o softirq.o resource.o \ | 6 | cpu.o exit.o itimer.o time.o softirq.o resource.o \ |
7 | sysctl.o sysctl_binary.o capability.o ptrace.o timer.o user.o \ | 7 | sysctl.o sysctl_binary.o capability.o ptrace.o timer.o user.o \ |
8 | signal.o sys.o kmod.o workqueue.o pid.o \ | 8 | signal.o sys.o kmod.o workqueue.o pid.o \ |
9 | rcupdate.o extable.o params.o posix-timers.o \ | 9 | rcupdate.o extable.o params.o posix-timers.o \ |
10 | kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o mutex.o \ | 10 | kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o mutex.o \ |
11 | hrtimer.o rwsem.o nsproxy.o srcu.o semaphore.o \ | 11 | hrtimer.o rwsem.o nsproxy.o srcu.o semaphore.o \ |
12 | notifier.o ksysfs.o cred.o \ | 12 | notifier.o ksysfs.o cred.o \ |
13 | async.o range.o groups.o | 13 | async.o range.o groups.o |
14 | 14 | ||
15 | ifdef CONFIG_FUNCTION_TRACER | 15 | ifdef CONFIG_FUNCTION_TRACER |
16 | # Do not trace debug files and internal ftrace files | 16 | # Do not trace debug files and internal ftrace files |
17 | CFLAGS_REMOVE_lockdep.o = -pg | 17 | CFLAGS_REMOVE_lockdep.o = -pg |
18 | CFLAGS_REMOVE_lockdep_proc.o = -pg | 18 | CFLAGS_REMOVE_lockdep_proc.o = -pg |
19 | CFLAGS_REMOVE_mutex-debug.o = -pg | 19 | CFLAGS_REMOVE_mutex-debug.o = -pg |
20 | CFLAGS_REMOVE_rtmutex-debug.o = -pg | 20 | CFLAGS_REMOVE_rtmutex-debug.o = -pg |
21 | CFLAGS_REMOVE_cgroup-debug.o = -pg | 21 | CFLAGS_REMOVE_cgroup-debug.o = -pg |
22 | CFLAGS_REMOVE_irq_work.o = -pg | 22 | CFLAGS_REMOVE_irq_work.o = -pg |
23 | endif | 23 | endif |
24 | 24 | ||
25 | obj-y += sched/ | 25 | obj-y += sched/ |
26 | obj-y += power/ | ||
26 | 27 | ||
27 | obj-$(CONFIG_FREEZER) += freezer.o | 28 | obj-$(CONFIG_FREEZER) += freezer.o |
28 | obj-$(CONFIG_PROFILING) += profile.o | 29 | obj-$(CONFIG_PROFILING) += profile.o |
29 | obj-$(CONFIG_SYSCTL_SYSCALL_CHECK) += sysctl_check.o | 30 | obj-$(CONFIG_SYSCTL_SYSCALL_CHECK) += sysctl_check.o |
30 | obj-$(CONFIG_STACKTRACE) += stacktrace.o | 31 | obj-$(CONFIG_STACKTRACE) += stacktrace.o |
31 | obj-y += time/ | 32 | obj-y += time/ |
32 | obj-$(CONFIG_DEBUG_MUTEXES) += mutex-debug.o | 33 | obj-$(CONFIG_DEBUG_MUTEXES) += mutex-debug.o |
33 | obj-$(CONFIG_LOCKDEP) += lockdep.o | 34 | obj-$(CONFIG_LOCKDEP) += lockdep.o |
34 | ifeq ($(CONFIG_PROC_FS),y) | 35 | ifeq ($(CONFIG_PROC_FS),y) |
35 | obj-$(CONFIG_LOCKDEP) += lockdep_proc.o | 36 | obj-$(CONFIG_LOCKDEP) += lockdep_proc.o |
36 | endif | 37 | endif |
37 | obj-$(CONFIG_FUTEX) += futex.o | 38 | obj-$(CONFIG_FUTEX) += futex.o |
38 | ifeq ($(CONFIG_COMPAT),y) | 39 | ifeq ($(CONFIG_COMPAT),y) |
39 | obj-$(CONFIG_FUTEX) += futex_compat.o | 40 | obj-$(CONFIG_FUTEX) += futex_compat.o |
40 | endif | 41 | endif |
41 | obj-$(CONFIG_RT_MUTEXES) += rtmutex.o | 42 | obj-$(CONFIG_RT_MUTEXES) += rtmutex.o |
42 | obj-$(CONFIG_DEBUG_RT_MUTEXES) += rtmutex-debug.o | 43 | obj-$(CONFIG_DEBUG_RT_MUTEXES) += rtmutex-debug.o |
43 | obj-$(CONFIG_RT_MUTEX_TESTER) += rtmutex-tester.o | 44 | obj-$(CONFIG_RT_MUTEX_TESTER) += rtmutex-tester.o |
44 | obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o | 45 | obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o |
45 | obj-$(CONFIG_SMP) += smp.o | 46 | obj-$(CONFIG_SMP) += smp.o |
46 | ifneq ($(CONFIG_SMP),y) | 47 | ifneq ($(CONFIG_SMP),y) |
47 | obj-y += up.o | 48 | obj-y += up.o |
48 | endif | 49 | endif |
49 | obj-$(CONFIG_SMP) += spinlock.o | 50 | obj-$(CONFIG_SMP) += spinlock.o |
50 | obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock.o | 51 | obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock.o |
51 | obj-$(CONFIG_PROVE_LOCKING) += spinlock.o | 52 | obj-$(CONFIG_PROVE_LOCKING) += spinlock.o |
52 | obj-$(CONFIG_UID16) += uid16.o | 53 | obj-$(CONFIG_UID16) += uid16.o |
53 | obj-$(CONFIG_MODULES) += module.o | 54 | obj-$(CONFIG_MODULES) += module.o |
54 | obj-$(CONFIG_KALLSYMS) += kallsyms.o | 55 | obj-$(CONFIG_KALLSYMS) += kallsyms.o |
55 | obj-$(CONFIG_PM) += power/ | ||
56 | obj-$(CONFIG_FREEZER) += power/ | ||
57 | obj-$(CONFIG_BSD_PROCESS_ACCT) += acct.o | 56 | obj-$(CONFIG_BSD_PROCESS_ACCT) += acct.o |
58 | obj-$(CONFIG_KEXEC) += kexec.o | 57 | obj-$(CONFIG_KEXEC) += kexec.o |
59 | obj-$(CONFIG_BACKTRACE_SELF_TEST) += backtracetest.o | 58 | obj-$(CONFIG_BACKTRACE_SELF_TEST) += backtracetest.o |
60 | obj-$(CONFIG_COMPAT) += compat.o | 59 | obj-$(CONFIG_COMPAT) += compat.o |
61 | obj-$(CONFIG_CGROUPS) += cgroup.o | 60 | obj-$(CONFIG_CGROUPS) += cgroup.o |
62 | obj-$(CONFIG_CGROUP_FREEZER) += cgroup_freezer.o | 61 | obj-$(CONFIG_CGROUP_FREEZER) += cgroup_freezer.o |
63 | obj-$(CONFIG_CPUSETS) += cpuset.o | 62 | obj-$(CONFIG_CPUSETS) += cpuset.o |
64 | obj-$(CONFIG_UTS_NS) += utsname.o | 63 | obj-$(CONFIG_UTS_NS) += utsname.o |
65 | obj-$(CONFIG_USER_NS) += user_namespace.o | 64 | obj-$(CONFIG_USER_NS) += user_namespace.o |
66 | obj-$(CONFIG_PID_NS) += pid_namespace.o | 65 | obj-$(CONFIG_PID_NS) += pid_namespace.o |
67 | obj-$(CONFIG_IKCONFIG) += configs.o | 66 | obj-$(CONFIG_IKCONFIG) += configs.o |
68 | obj-$(CONFIG_RESOURCE_COUNTERS) += res_counter.o | 67 | obj-$(CONFIG_RESOURCE_COUNTERS) += res_counter.o |
69 | obj-$(CONFIG_SMP) += stop_machine.o | 68 | obj-$(CONFIG_SMP) += stop_machine.o |
70 | obj-$(CONFIG_KPROBES_SANITY_TEST) += test_kprobes.o | 69 | obj-$(CONFIG_KPROBES_SANITY_TEST) += test_kprobes.o |
71 | obj-$(CONFIG_AUDIT) += audit.o auditfilter.o | 70 | obj-$(CONFIG_AUDIT) += audit.o auditfilter.o |
72 | obj-$(CONFIG_AUDITSYSCALL) += auditsc.o | 71 | obj-$(CONFIG_AUDITSYSCALL) += auditsc.o |
73 | obj-$(CONFIG_AUDIT_WATCH) += audit_watch.o | 72 | obj-$(CONFIG_AUDIT_WATCH) += audit_watch.o |
74 | obj-$(CONFIG_AUDIT_TREE) += audit_tree.o | 73 | obj-$(CONFIG_AUDIT_TREE) += audit_tree.o |
75 | obj-$(CONFIG_GCOV_KERNEL) += gcov/ | 74 | obj-$(CONFIG_GCOV_KERNEL) += gcov/ |
76 | obj-$(CONFIG_KPROBES) += kprobes.o | 75 | obj-$(CONFIG_KPROBES) += kprobes.o |
77 | obj-$(CONFIG_KGDB) += debug/ | 76 | obj-$(CONFIG_KGDB) += debug/ |
78 | obj-$(CONFIG_DETECT_HUNG_TASK) += hung_task.o | 77 | obj-$(CONFIG_DETECT_HUNG_TASK) += hung_task.o |
79 | obj-$(CONFIG_LOCKUP_DETECTOR) += watchdog.o | 78 | obj-$(CONFIG_LOCKUP_DETECTOR) += watchdog.o |
80 | obj-$(CONFIG_GENERIC_HARDIRQS) += irq/ | 79 | obj-$(CONFIG_GENERIC_HARDIRQS) += irq/ |
81 | obj-$(CONFIG_SECCOMP) += seccomp.o | 80 | obj-$(CONFIG_SECCOMP) += seccomp.o |
82 | obj-$(CONFIG_RCU_TORTURE_TEST) += rcutorture.o | 81 | obj-$(CONFIG_RCU_TORTURE_TEST) += rcutorture.o |
83 | obj-$(CONFIG_TREE_RCU) += rcutree.o | 82 | obj-$(CONFIG_TREE_RCU) += rcutree.o |
84 | obj-$(CONFIG_TREE_PREEMPT_RCU) += rcutree.o | 83 | obj-$(CONFIG_TREE_PREEMPT_RCU) += rcutree.o |
85 | obj-$(CONFIG_TREE_RCU_TRACE) += rcutree_trace.o | 84 | obj-$(CONFIG_TREE_RCU_TRACE) += rcutree_trace.o |
86 | obj-$(CONFIG_TINY_RCU) += rcutiny.o | 85 | obj-$(CONFIG_TINY_RCU) += rcutiny.o |
87 | obj-$(CONFIG_TINY_PREEMPT_RCU) += rcutiny.o | 86 | obj-$(CONFIG_TINY_PREEMPT_RCU) += rcutiny.o |
88 | obj-$(CONFIG_RELAY) += relay.o | 87 | obj-$(CONFIG_RELAY) += relay.o |
89 | obj-$(CONFIG_SYSCTL) += utsname_sysctl.o | 88 | obj-$(CONFIG_SYSCTL) += utsname_sysctl.o |
90 | obj-$(CONFIG_TASK_DELAY_ACCT) += delayacct.o | 89 | obj-$(CONFIG_TASK_DELAY_ACCT) += delayacct.o |
91 | obj-$(CONFIG_TASKSTATS) += taskstats.o tsacct.o | 90 | obj-$(CONFIG_TASKSTATS) += taskstats.o tsacct.o |
92 | obj-$(CONFIG_TRACEPOINTS) += tracepoint.o | 91 | obj-$(CONFIG_TRACEPOINTS) += tracepoint.o |
93 | obj-$(CONFIG_LATENCYTOP) += latencytop.o | 92 | obj-$(CONFIG_LATENCYTOP) += latencytop.o |
94 | obj-$(CONFIG_BINFMT_ELF) += elfcore.o | 93 | obj-$(CONFIG_BINFMT_ELF) += elfcore.o |
95 | obj-$(CONFIG_COMPAT_BINFMT_ELF) += elfcore.o | 94 | obj-$(CONFIG_COMPAT_BINFMT_ELF) += elfcore.o |
96 | obj-$(CONFIG_BINFMT_ELF_FDPIC) += elfcore.o | 95 | obj-$(CONFIG_BINFMT_ELF_FDPIC) += elfcore.o |
97 | obj-$(CONFIG_FUNCTION_TRACER) += trace/ | 96 | obj-$(CONFIG_FUNCTION_TRACER) += trace/ |
98 | obj-$(CONFIG_TRACING) += trace/ | 97 | obj-$(CONFIG_TRACING) += trace/ |
99 | obj-$(CONFIG_X86_DS) += trace/ | 98 | obj-$(CONFIG_X86_DS) += trace/ |
100 | obj-$(CONFIG_RING_BUFFER) += trace/ | 99 | obj-$(CONFIG_RING_BUFFER) += trace/ |
101 | obj-$(CONFIG_TRACEPOINTS) += trace/ | 100 | obj-$(CONFIG_TRACEPOINTS) += trace/ |
102 | obj-$(CONFIG_IRQ_WORK) += irq_work.o | 101 | obj-$(CONFIG_IRQ_WORK) += irq_work.o |
103 | obj-$(CONFIG_CPU_PM) += cpu_pm.o | 102 | obj-$(CONFIG_CPU_PM) += cpu_pm.o |
104 | 103 | ||
105 | obj-$(CONFIG_PERF_EVENTS) += events/ | 104 | obj-$(CONFIG_PERF_EVENTS) += events/ |
106 | 105 | ||
107 | obj-$(CONFIG_USER_RETURN_NOTIFIER) += user-return-notifier.o | 106 | obj-$(CONFIG_USER_RETURN_NOTIFIER) += user-return-notifier.o |
108 | obj-$(CONFIG_PADATA) += padata.o | 107 | obj-$(CONFIG_PADATA) += padata.o |
109 | obj-$(CONFIG_CRASH_DUMP) += crash_dump.o | 108 | obj-$(CONFIG_CRASH_DUMP) += crash_dump.o |
110 | obj-$(CONFIG_JUMP_LABEL) += jump_label.o | 109 | obj-$(CONFIG_JUMP_LABEL) += jump_label.o |
111 | 110 | ||
112 | $(obj)/configs.o: $(obj)/config_data.h | 111 | $(obj)/configs.o: $(obj)/config_data.h |
113 | 112 | ||
114 | # config_data.h contains the same information as ikconfig.h but gzipped. | 113 | # config_data.h contains the same information as ikconfig.h but gzipped. |
115 | # Info from config_data can be extracted from /proc/config* | 114 | # Info from config_data can be extracted from /proc/config* |
116 | targets += config_data.gz | 115 | targets += config_data.gz |
117 | $(obj)/config_data.gz: $(KCONFIG_CONFIG) FORCE | 116 | $(obj)/config_data.gz: $(KCONFIG_CONFIG) FORCE |
118 | $(call if_changed,gzip) | 117 | $(call if_changed,gzip) |
119 | 118 | ||
120 | filechk_ikconfiggz = (echo "static const char kernel_config_data[] __used = MAGIC_START"; cat $< | scripts/bin2c; echo "MAGIC_END;") | 119 | filechk_ikconfiggz = (echo "static const char kernel_config_data[] __used = MAGIC_START"; cat $< | scripts/bin2c; echo "MAGIC_END;") |
121 | targets += config_data.h | 120 | targets += config_data.h |
122 | $(obj)/config_data.h: $(obj)/config_data.gz FORCE | 121 | $(obj)/config_data.h: $(obj)/config_data.gz FORCE |
123 | $(call filechk,ikconfiggz) | 122 | $(call filechk,ikconfiggz) |
124 | 123 | ||
125 | $(obj)/time.o: $(obj)/timeconst.h | 124 | $(obj)/time.o: $(obj)/timeconst.h |
126 | 125 | ||
127 | quiet_cmd_timeconst = TIMEC $@ | 126 | quiet_cmd_timeconst = TIMEC $@ |
128 | cmd_timeconst = $(PERL) $< $(CONFIG_HZ) > $@ | 127 | cmd_timeconst = $(PERL) $< $(CONFIG_HZ) > $@ |
129 | targets += timeconst.h | 128 | targets += timeconst.h |
130 | $(obj)/timeconst.h: $(src)/timeconst.pl FORCE | 129 | $(obj)/timeconst.h: $(src)/timeconst.pl FORCE |
131 | $(call if_changed,timeconst) | 130 | $(call if_changed,timeconst) |
kernel/power/swap.c
1 | /* | 1 | /* |
2 | * linux/kernel/power/swap.c | 2 | * linux/kernel/power/swap.c |
3 | * | 3 | * |
4 | * This file provides functions for reading the suspend image from | 4 | * This file provides functions for reading the suspend image from |
5 | * and writing it to a swap partition. | 5 | * and writing it to a swap partition. |
6 | * | 6 | * |
7 | * Copyright (C) 1998,2001-2005 Pavel Machek <pavel@ucw.cz> | 7 | * Copyright (C) 1998,2001-2005 Pavel Machek <pavel@ucw.cz> |
8 | * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl> | 8 | * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl> |
9 | * Copyright (C) 2010 Bojan Smojver <bojan@rexursive.com> | 9 | * Copyright (C) 2010 Bojan Smojver <bojan@rexursive.com> |
10 | * | 10 | * |
11 | * This file is released under the GPLv2. | 11 | * This file is released under the GPLv2. |
12 | * | 12 | * |
13 | */ | 13 | */ |
14 | 14 | ||
15 | #include <linux/module.h> | 15 | #include <linux/module.h> |
16 | #include <linux/file.h> | 16 | #include <linux/file.h> |
17 | #include <linux/delay.h> | 17 | #include <linux/delay.h> |
18 | #include <linux/bitops.h> | 18 | #include <linux/bitops.h> |
19 | #include <linux/genhd.h> | 19 | #include <linux/genhd.h> |
20 | #include <linux/device.h> | 20 | #include <linux/device.h> |
21 | #include <linux/bio.h> | 21 | #include <linux/bio.h> |
22 | #include <linux/blkdev.h> | 22 | #include <linux/blkdev.h> |
23 | #include <linux/swap.h> | 23 | #include <linux/swap.h> |
24 | #include <linux/swapops.h> | 24 | #include <linux/swapops.h> |
25 | #include <linux/pm.h> | 25 | #include <linux/pm.h> |
26 | #include <linux/slab.h> | 26 | #include <linux/slab.h> |
27 | #include <linux/lzo.h> | 27 | #include <linux/lzo.h> |
28 | #include <linux/vmalloc.h> | 28 | #include <linux/vmalloc.h> |
29 | #include <linux/cpumask.h> | 29 | #include <linux/cpumask.h> |
30 | #include <linux/atomic.h> | 30 | #include <linux/atomic.h> |
31 | #include <linux/kthread.h> | 31 | #include <linux/kthread.h> |
32 | #include <linux/crc32.h> | 32 | #include <linux/crc32.h> |
33 | 33 | ||
34 | #include "power.h" | 34 | #include "power.h" |
35 | 35 | ||
36 | #define HIBERNATE_SIG "S1SUSPEND" | 36 | #define HIBERNATE_SIG "S1SUSPEND" |
37 | 37 | ||
38 | /* | 38 | /* |
39 | * The swap map is a data structure used for keeping track of each page | 39 | * The swap map is a data structure used for keeping track of each page |
40 | * written to a swap partition. It consists of many swap_map_page | 40 | * written to a swap partition. It consists of many swap_map_page |
41 | * structures that contain each an array of MAP_PAGE_ENTRIES swap entries. | 41 | * structures that contain each an array of MAP_PAGE_ENTRIES swap entries. |
42 | * These structures are stored on the swap and linked together with the | 42 | * These structures are stored on the swap and linked together with the |
43 | * help of the .next_swap member. | 43 | * help of the .next_swap member. |
44 | * | 44 | * |
45 | * The swap map is created during suspend. The swap map pages are | 45 | * The swap map is created during suspend. The swap map pages are |
46 | * allocated and populated one at a time, so we only need one memory | 46 | * allocated and populated one at a time, so we only need one memory |
47 | * page to set up the entire structure. | 47 | * page to set up the entire structure. |
48 | * | 48 | * |
49 | * During resume we pick up all swap_map_page structures into a list. | 49 | * During resume we pick up all swap_map_page structures into a list. |
50 | */ | 50 | */ |
51 | 51 | ||
52 | #define MAP_PAGE_ENTRIES (PAGE_SIZE / sizeof(sector_t) - 1) | 52 | #define MAP_PAGE_ENTRIES (PAGE_SIZE / sizeof(sector_t) - 1) |
53 | 53 | ||
54 | struct swap_map_page { | 54 | struct swap_map_page { |
55 | sector_t entries[MAP_PAGE_ENTRIES]; | 55 | sector_t entries[MAP_PAGE_ENTRIES]; |
56 | sector_t next_swap; | 56 | sector_t next_swap; |
57 | }; | 57 | }; |
58 | 58 | ||
59 | struct swap_map_page_list { | 59 | struct swap_map_page_list { |
60 | struct swap_map_page *map; | 60 | struct swap_map_page *map; |
61 | struct swap_map_page_list *next; | 61 | struct swap_map_page_list *next; |
62 | }; | 62 | }; |
63 | 63 | ||
64 | /** | 64 | /** |
65 | * The swap_map_handle structure is used for handling swap in | 65 | * The swap_map_handle structure is used for handling swap in |
66 | * a file-alike way | 66 | * a file-alike way |
67 | */ | 67 | */ |
68 | 68 | ||
69 | struct swap_map_handle { | 69 | struct swap_map_handle { |
70 | struct swap_map_page *cur; | 70 | struct swap_map_page *cur; |
71 | struct swap_map_page_list *maps; | 71 | struct swap_map_page_list *maps; |
72 | sector_t cur_swap; | 72 | sector_t cur_swap; |
73 | sector_t first_sector; | 73 | sector_t first_sector; |
74 | unsigned int k; | 74 | unsigned int k; |
75 | unsigned long nr_free_pages, written; | 75 | unsigned long nr_free_pages, written; |
76 | u32 crc32; | 76 | u32 crc32; |
77 | }; | 77 | }; |
78 | 78 | ||
79 | struct swsusp_header { | 79 | struct swsusp_header { |
80 | char reserved[PAGE_SIZE - 20 - sizeof(sector_t) - sizeof(int) - | 80 | char reserved[PAGE_SIZE - 20 - sizeof(sector_t) - sizeof(int) - |
81 | sizeof(u32)]; | 81 | sizeof(u32)]; |
82 | u32 crc32; | 82 | u32 crc32; |
83 | sector_t image; | 83 | sector_t image; |
84 | unsigned int flags; /* Flags to pass to the "boot" kernel */ | 84 | unsigned int flags; /* Flags to pass to the "boot" kernel */ |
85 | char orig_sig[10]; | 85 | char orig_sig[10]; |
86 | char sig[10]; | 86 | char sig[10]; |
87 | } __attribute__((packed)); | 87 | } __attribute__((packed)); |
88 | 88 | ||
89 | static struct swsusp_header *swsusp_header; | 89 | static struct swsusp_header *swsusp_header; |
90 | 90 | ||
91 | /** | 91 | /** |
92 | * The following functions are used for tracing the allocated | 92 | * The following functions are used for tracing the allocated |
93 | * swap pages, so that they can be freed in case of an error. | 93 | * swap pages, so that they can be freed in case of an error. |
94 | */ | 94 | */ |
95 | 95 | ||
96 | struct swsusp_extent { | 96 | struct swsusp_extent { |
97 | struct rb_node node; | 97 | struct rb_node node; |
98 | unsigned long start; | 98 | unsigned long start; |
99 | unsigned long end; | 99 | unsigned long end; |
100 | }; | 100 | }; |
101 | 101 | ||
102 | static struct rb_root swsusp_extents = RB_ROOT; | 102 | static struct rb_root swsusp_extents = RB_ROOT; |
103 | 103 | ||
104 | static int swsusp_extents_insert(unsigned long swap_offset) | 104 | static int swsusp_extents_insert(unsigned long swap_offset) |
105 | { | 105 | { |
106 | struct rb_node **new = &(swsusp_extents.rb_node); | 106 | struct rb_node **new = &(swsusp_extents.rb_node); |
107 | struct rb_node *parent = NULL; | 107 | struct rb_node *parent = NULL; |
108 | struct swsusp_extent *ext; | 108 | struct swsusp_extent *ext; |
109 | 109 | ||
110 | /* Figure out where to put the new node */ | 110 | /* Figure out where to put the new node */ |
111 | while (*new) { | 111 | while (*new) { |
112 | ext = container_of(*new, struct swsusp_extent, node); | 112 | ext = container_of(*new, struct swsusp_extent, node); |
113 | parent = *new; | 113 | parent = *new; |
114 | if (swap_offset < ext->start) { | 114 | if (swap_offset < ext->start) { |
115 | /* Try to merge */ | 115 | /* Try to merge */ |
116 | if (swap_offset == ext->start - 1) { | 116 | if (swap_offset == ext->start - 1) { |
117 | ext->start--; | 117 | ext->start--; |
118 | return 0; | 118 | return 0; |
119 | } | 119 | } |
120 | new = &((*new)->rb_left); | 120 | new = &((*new)->rb_left); |
121 | } else if (swap_offset > ext->end) { | 121 | } else if (swap_offset > ext->end) { |
122 | /* Try to merge */ | 122 | /* Try to merge */ |
123 | if (swap_offset == ext->end + 1) { | 123 | if (swap_offset == ext->end + 1) { |
124 | ext->end++; | 124 | ext->end++; |
125 | return 0; | 125 | return 0; |
126 | } | 126 | } |
127 | new = &((*new)->rb_right); | 127 | new = &((*new)->rb_right); |
128 | } else { | 128 | } else { |
129 | /* It already is in the tree */ | 129 | /* It already is in the tree */ |
130 | return -EINVAL; | 130 | return -EINVAL; |
131 | } | 131 | } |
132 | } | 132 | } |
133 | /* Add the new node and rebalance the tree. */ | 133 | /* Add the new node and rebalance the tree. */ |
134 | ext = kzalloc(sizeof(struct swsusp_extent), GFP_KERNEL); | 134 | ext = kzalloc(sizeof(struct swsusp_extent), GFP_KERNEL); |
135 | if (!ext) | 135 | if (!ext) |
136 | return -ENOMEM; | 136 | return -ENOMEM; |
137 | 137 | ||
138 | ext->start = swap_offset; | 138 | ext->start = swap_offset; |
139 | ext->end = swap_offset; | 139 | ext->end = swap_offset; |
140 | rb_link_node(&ext->node, parent, new); | 140 | rb_link_node(&ext->node, parent, new); |
141 | rb_insert_color(&ext->node, &swsusp_extents); | 141 | rb_insert_color(&ext->node, &swsusp_extents); |
142 | return 0; | 142 | return 0; |
143 | } | 143 | } |
144 | 144 | ||
145 | /** | 145 | /** |
146 | * alloc_swapdev_block - allocate a swap page and register that it has | 146 | * alloc_swapdev_block - allocate a swap page and register that it has |
147 | * been allocated, so that it can be freed in case of an error. | 147 | * been allocated, so that it can be freed in case of an error. |
148 | */ | 148 | */ |
149 | 149 | ||
150 | sector_t alloc_swapdev_block(int swap) | 150 | sector_t alloc_swapdev_block(int swap) |
151 | { | 151 | { |
152 | unsigned long offset; | 152 | unsigned long offset; |
153 | 153 | ||
154 | offset = swp_offset(get_swap_page_of_type(swap)); | 154 | offset = swp_offset(get_swap_page_of_type(swap)); |
155 | if (offset) { | 155 | if (offset) { |
156 | if (swsusp_extents_insert(offset)) | 156 | if (swsusp_extents_insert(offset)) |
157 | swap_free(swp_entry(swap, offset)); | 157 | swap_free(swp_entry(swap, offset)); |
158 | else | 158 | else |
159 | return swapdev_block(swap, offset); | 159 | return swapdev_block(swap, offset); |
160 | } | 160 | } |
161 | return 0; | 161 | return 0; |
162 | } | 162 | } |
163 | 163 | ||
164 | /** | 164 | /** |
165 | * free_all_swap_pages - free swap pages allocated for saving image data. | 165 | * free_all_swap_pages - free swap pages allocated for saving image data. |
166 | * It also frees the extents used to register which swap entries had been | 166 | * It also frees the extents used to register which swap entries had been |
167 | * allocated. | 167 | * allocated. |
168 | */ | 168 | */ |
169 | 169 | ||
170 | void free_all_swap_pages(int swap) | 170 | void free_all_swap_pages(int swap) |
171 | { | 171 | { |
172 | struct rb_node *node; | 172 | struct rb_node *node; |
173 | 173 | ||
174 | while ((node = swsusp_extents.rb_node)) { | 174 | while ((node = swsusp_extents.rb_node)) { |
175 | struct swsusp_extent *ext; | 175 | struct swsusp_extent *ext; |
176 | unsigned long offset; | 176 | unsigned long offset; |
177 | 177 | ||
178 | ext = container_of(node, struct swsusp_extent, node); | 178 | ext = container_of(node, struct swsusp_extent, node); |
179 | rb_erase(node, &swsusp_extents); | 179 | rb_erase(node, &swsusp_extents); |
180 | for (offset = ext->start; offset <= ext->end; offset++) | 180 | for (offset = ext->start; offset <= ext->end; offset++) |
181 | swap_free(swp_entry(swap, offset)); | 181 | swap_free(swp_entry(swap, offset)); |
182 | 182 | ||
183 | kfree(ext); | 183 | kfree(ext); |
184 | } | 184 | } |
185 | } | 185 | } |
186 | 186 | ||
187 | int swsusp_swap_in_use(void) | 187 | int swsusp_swap_in_use(void) |
188 | { | 188 | { |
189 | return (swsusp_extents.rb_node != NULL); | 189 | return (swsusp_extents.rb_node != NULL); |
190 | } | 190 | } |
191 | 191 | ||
192 | /* | 192 | /* |
193 | * General things | 193 | * General things |
194 | */ | 194 | */ |
195 | 195 | ||
196 | static unsigned short root_swap = 0xffff; | 196 | static unsigned short root_swap = 0xffff; |
197 | struct block_device *hib_resume_bdev; | 197 | struct block_device *hib_resume_bdev; |
198 | 198 | ||
199 | /* | 199 | /* |
200 | * Saving part | 200 | * Saving part |
201 | */ | 201 | */ |
202 | 202 | ||
203 | static int mark_swapfiles(struct swap_map_handle *handle, unsigned int flags) | 203 | static int mark_swapfiles(struct swap_map_handle *handle, unsigned int flags) |
204 | { | 204 | { |
205 | int error; | 205 | int error; |
206 | 206 | ||
207 | hib_bio_read_page(swsusp_resume_block, swsusp_header, NULL); | 207 | hib_bio_read_page(swsusp_resume_block, swsusp_header, NULL); |
208 | if (!memcmp("SWAP-SPACE",swsusp_header->sig, 10) || | 208 | if (!memcmp("SWAP-SPACE",swsusp_header->sig, 10) || |
209 | !memcmp("SWAPSPACE2",swsusp_header->sig, 10)) { | 209 | !memcmp("SWAPSPACE2",swsusp_header->sig, 10)) { |
210 | memcpy(swsusp_header->orig_sig,swsusp_header->sig, 10); | 210 | memcpy(swsusp_header->orig_sig,swsusp_header->sig, 10); |
211 | memcpy(swsusp_header->sig, HIBERNATE_SIG, 10); | 211 | memcpy(swsusp_header->sig, HIBERNATE_SIG, 10); |
212 | swsusp_header->image = handle->first_sector; | 212 | swsusp_header->image = handle->first_sector; |
213 | swsusp_header->flags = flags; | 213 | swsusp_header->flags = flags; |
214 | if (flags & SF_CRC32_MODE) | 214 | if (flags & SF_CRC32_MODE) |
215 | swsusp_header->crc32 = handle->crc32; | 215 | swsusp_header->crc32 = handle->crc32; |
216 | error = hib_bio_write_page(swsusp_resume_block, | 216 | error = hib_bio_write_page(swsusp_resume_block, |
217 | swsusp_header, NULL); | 217 | swsusp_header, NULL); |
218 | } else { | 218 | } else { |
219 | printk(KERN_ERR "PM: Swap header not found!\n"); | 219 | printk(KERN_ERR "PM: Swap header not found!\n"); |
220 | error = -ENODEV; | 220 | error = -ENODEV; |
221 | } | 221 | } |
222 | return error; | 222 | return error; |
223 | } | 223 | } |
224 | 224 | ||
225 | /** | 225 | /** |
226 | * swsusp_swap_check - check if the resume device is a swap device | 226 | * swsusp_swap_check - check if the resume device is a swap device |
227 | * and get its index (if so) | 227 | * and get its index (if so) |
228 | * | 228 | * |
229 | * This is called before saving image | 229 | * This is called before saving image |
230 | */ | 230 | */ |
231 | static int swsusp_swap_check(void) | 231 | static int swsusp_swap_check(void) |
232 | { | 232 | { |
233 | int res; | 233 | int res; |
234 | 234 | ||
235 | res = swap_type_of(swsusp_resume_device, swsusp_resume_block, | 235 | res = swap_type_of(swsusp_resume_device, swsusp_resume_block, |
236 | &hib_resume_bdev); | 236 | &hib_resume_bdev); |
237 | if (res < 0) | 237 | if (res < 0) |
238 | return res; | 238 | return res; |
239 | 239 | ||
240 | root_swap = res; | 240 | root_swap = res; |
241 | res = blkdev_get(hib_resume_bdev, FMODE_WRITE, NULL); | 241 | res = blkdev_get(hib_resume_bdev, FMODE_WRITE, NULL); |
242 | if (res) | 242 | if (res) |
243 | return res; | 243 | return res; |
244 | 244 | ||
245 | res = set_blocksize(hib_resume_bdev, PAGE_SIZE); | 245 | res = set_blocksize(hib_resume_bdev, PAGE_SIZE); |
246 | if (res < 0) | 246 | if (res < 0) |
247 | blkdev_put(hib_resume_bdev, FMODE_WRITE); | 247 | blkdev_put(hib_resume_bdev, FMODE_WRITE); |
248 | 248 | ||
249 | return res; | 249 | return res; |
250 | } | 250 | } |
251 | 251 | ||
252 | /** | 252 | /** |
253 | * write_page - Write one page to given swap location. | 253 | * write_page - Write one page to given swap location. |
254 | * @buf: Address we're writing. | 254 | * @buf: Address we're writing. |
255 | * @offset: Offset of the swap page we're writing to. | 255 | * @offset: Offset of the swap page we're writing to. |
256 | * @bio_chain: Link the next write BIO here | 256 | * @bio_chain: Link the next write BIO here |
257 | */ | 257 | */ |
258 | 258 | ||
259 | static int write_page(void *buf, sector_t offset, struct bio **bio_chain) | 259 | static int write_page(void *buf, sector_t offset, struct bio **bio_chain) |
260 | { | 260 | { |
261 | void *src; | 261 | void *src; |
262 | int ret; | 262 | int ret; |
263 | 263 | ||
264 | if (!offset) | 264 | if (!offset) |
265 | return -ENOSPC; | 265 | return -ENOSPC; |
266 | 266 | ||
267 | if (bio_chain) { | 267 | if (bio_chain) { |
268 | src = (void *)__get_free_page(__GFP_WAIT | __GFP_HIGH); | 268 | src = (void *)__get_free_page(__GFP_WAIT | __GFP_HIGH); |
269 | if (src) { | 269 | if (src) { |
270 | copy_page(src, buf); | 270 | copy_page(src, buf); |
271 | } else { | 271 | } else { |
272 | ret = hib_wait_on_bio_chain(bio_chain); /* Free pages */ | 272 | ret = hib_wait_on_bio_chain(bio_chain); /* Free pages */ |
273 | if (ret) | 273 | if (ret) |
274 | return ret; | 274 | return ret; |
275 | src = (void *)__get_free_page(__GFP_WAIT | __GFP_HIGH); | 275 | src = (void *)__get_free_page(__GFP_WAIT | __GFP_HIGH); |
276 | if (src) { | 276 | if (src) { |
277 | copy_page(src, buf); | 277 | copy_page(src, buf); |
278 | } else { | 278 | } else { |
279 | WARN_ON_ONCE(1); | 279 | WARN_ON_ONCE(1); |
280 | bio_chain = NULL; /* Go synchronous */ | 280 | bio_chain = NULL; /* Go synchronous */ |
281 | src = buf; | 281 | src = buf; |
282 | } | 282 | } |
283 | } | 283 | } |
284 | } else { | 284 | } else { |
285 | src = buf; | 285 | src = buf; |
286 | } | 286 | } |
287 | return hib_bio_write_page(offset, src, bio_chain); | 287 | return hib_bio_write_page(offset, src, bio_chain); |
288 | } | 288 | } |
289 | 289 | ||
290 | static void release_swap_writer(struct swap_map_handle *handle) | 290 | static void release_swap_writer(struct swap_map_handle *handle) |
291 | { | 291 | { |
292 | if (handle->cur) | 292 | if (handle->cur) |
293 | free_page((unsigned long)handle->cur); | 293 | free_page((unsigned long)handle->cur); |
294 | handle->cur = NULL; | 294 | handle->cur = NULL; |
295 | } | 295 | } |
296 | 296 | ||
297 | static int get_swap_writer(struct swap_map_handle *handle) | 297 | static int get_swap_writer(struct swap_map_handle *handle) |
298 | { | 298 | { |
299 | int ret; | 299 | int ret; |
300 | 300 | ||
301 | ret = swsusp_swap_check(); | 301 | ret = swsusp_swap_check(); |
302 | if (ret) { | 302 | if (ret) { |
303 | if (ret != -ENOSPC) | 303 | if (ret != -ENOSPC) |
304 | printk(KERN_ERR "PM: Cannot find swap device, try " | 304 | printk(KERN_ERR "PM: Cannot find swap device, try " |
305 | "swapon -a.\n"); | 305 | "swapon -a.\n"); |
306 | return ret; | 306 | return ret; |
307 | } | 307 | } |
308 | handle->cur = (struct swap_map_page *)get_zeroed_page(GFP_KERNEL); | 308 | handle->cur = (struct swap_map_page *)get_zeroed_page(GFP_KERNEL); |
309 | if (!handle->cur) { | 309 | if (!handle->cur) { |
310 | ret = -ENOMEM; | 310 | ret = -ENOMEM; |
311 | goto err_close; | 311 | goto err_close; |
312 | } | 312 | } |
313 | handle->cur_swap = alloc_swapdev_block(root_swap); | 313 | handle->cur_swap = alloc_swapdev_block(root_swap); |
314 | if (!handle->cur_swap) { | 314 | if (!handle->cur_swap) { |
315 | ret = -ENOSPC; | 315 | ret = -ENOSPC; |
316 | goto err_rel; | 316 | goto err_rel; |
317 | } | 317 | } |
318 | handle->k = 0; | 318 | handle->k = 0; |
319 | handle->nr_free_pages = nr_free_pages() >> 1; | 319 | handle->nr_free_pages = nr_free_pages() >> 1; |
320 | handle->written = 0; | 320 | handle->written = 0; |
321 | handle->first_sector = handle->cur_swap; | 321 | handle->first_sector = handle->cur_swap; |
322 | return 0; | 322 | return 0; |
323 | err_rel: | 323 | err_rel: |
324 | release_swap_writer(handle); | 324 | release_swap_writer(handle); |
325 | err_close: | 325 | err_close: |
326 | swsusp_close(FMODE_WRITE); | 326 | swsusp_close(FMODE_WRITE); |
327 | return ret; | 327 | return ret; |
328 | } | 328 | } |
329 | 329 | ||
330 | static int swap_write_page(struct swap_map_handle *handle, void *buf, | 330 | static int swap_write_page(struct swap_map_handle *handle, void *buf, |
331 | struct bio **bio_chain) | 331 | struct bio **bio_chain) |
332 | { | 332 | { |
333 | int error = 0; | 333 | int error = 0; |
334 | sector_t offset; | 334 | sector_t offset; |
335 | 335 | ||
336 | if (!handle->cur) | 336 | if (!handle->cur) |
337 | return -EINVAL; | 337 | return -EINVAL; |
338 | offset = alloc_swapdev_block(root_swap); | 338 | offset = alloc_swapdev_block(root_swap); |
339 | error = write_page(buf, offset, bio_chain); | 339 | error = write_page(buf, offset, bio_chain); |
340 | if (error) | 340 | if (error) |
341 | return error; | 341 | return error; |
342 | handle->cur->entries[handle->k++] = offset; | 342 | handle->cur->entries[handle->k++] = offset; |
343 | if (handle->k >= MAP_PAGE_ENTRIES) { | 343 | if (handle->k >= MAP_PAGE_ENTRIES) { |
344 | offset = alloc_swapdev_block(root_swap); | 344 | offset = alloc_swapdev_block(root_swap); |
345 | if (!offset) | 345 | if (!offset) |
346 | return -ENOSPC; | 346 | return -ENOSPC; |
347 | handle->cur->next_swap = offset; | 347 | handle->cur->next_swap = offset; |
348 | error = write_page(handle->cur, handle->cur_swap, bio_chain); | 348 | error = write_page(handle->cur, handle->cur_swap, bio_chain); |
349 | if (error) | 349 | if (error) |
350 | goto out; | 350 | goto out; |
351 | clear_page(handle->cur); | 351 | clear_page(handle->cur); |
352 | handle->cur_swap = offset; | 352 | handle->cur_swap = offset; |
353 | handle->k = 0; | 353 | handle->k = 0; |
354 | } | 354 | } |
355 | if (bio_chain && ++handle->written > handle->nr_free_pages) { | 355 | if (bio_chain && ++handle->written > handle->nr_free_pages) { |
356 | error = hib_wait_on_bio_chain(bio_chain); | 356 | error = hib_wait_on_bio_chain(bio_chain); |
357 | if (error) | 357 | if (error) |
358 | goto out; | 358 | goto out; |
359 | handle->written = 0; | 359 | handle->written = 0; |
360 | } | 360 | } |
361 | out: | 361 | out: |
362 | return error; | 362 | return error; |
363 | } | 363 | } |
364 | 364 | ||
365 | static int flush_swap_writer(struct swap_map_handle *handle) | 365 | static int flush_swap_writer(struct swap_map_handle *handle) |
366 | { | 366 | { |
367 | if (handle->cur && handle->cur_swap) | 367 | if (handle->cur && handle->cur_swap) |
368 | return write_page(handle->cur, handle->cur_swap, NULL); | 368 | return write_page(handle->cur, handle->cur_swap, NULL); |
369 | else | 369 | else |
370 | return -EINVAL; | 370 | return -EINVAL; |
371 | } | 371 | } |
372 | 372 | ||
373 | static int swap_writer_finish(struct swap_map_handle *handle, | 373 | static int swap_writer_finish(struct swap_map_handle *handle, |
374 | unsigned int flags, int error) | 374 | unsigned int flags, int error) |
375 | { | 375 | { |
376 | if (!error) { | 376 | if (!error) { |
377 | flush_swap_writer(handle); | 377 | flush_swap_writer(handle); |
378 | printk(KERN_INFO "PM: S"); | 378 | printk(KERN_INFO "PM: S"); |
379 | error = mark_swapfiles(handle, flags); | 379 | error = mark_swapfiles(handle, flags); |
380 | printk("|\n"); | 380 | printk("|\n"); |
381 | } | 381 | } |
382 | 382 | ||
383 | if (error) | 383 | if (error) |
384 | free_all_swap_pages(root_swap); | 384 | free_all_swap_pages(root_swap); |
385 | release_swap_writer(handle); | 385 | release_swap_writer(handle); |
386 | swsusp_close(FMODE_WRITE); | 386 | swsusp_close(FMODE_WRITE); |
387 | 387 | ||
388 | return error; | 388 | return error; |
389 | } | 389 | } |
390 | 390 | ||
391 | /* We need to remember how much compressed data we need to read. */ | 391 | /* We need to remember how much compressed data we need to read. */ |
392 | #define LZO_HEADER sizeof(size_t) | 392 | #define LZO_HEADER sizeof(size_t) |
393 | 393 | ||
394 | /* Number of pages/bytes we'll compress at one time. */ | 394 | /* Number of pages/bytes we'll compress at one time. */ |
395 | #define LZO_UNC_PAGES 32 | 395 | #define LZO_UNC_PAGES 32 |
396 | #define LZO_UNC_SIZE (LZO_UNC_PAGES * PAGE_SIZE) | 396 | #define LZO_UNC_SIZE (LZO_UNC_PAGES * PAGE_SIZE) |
397 | 397 | ||
398 | /* Number of pages/bytes we need for compressed data (worst case). */ | 398 | /* Number of pages/bytes we need for compressed data (worst case). */ |
399 | #define LZO_CMP_PAGES DIV_ROUND_UP(lzo1x_worst_compress(LZO_UNC_SIZE) + \ | 399 | #define LZO_CMP_PAGES DIV_ROUND_UP(lzo1x_worst_compress(LZO_UNC_SIZE) + \ |
400 | LZO_HEADER, PAGE_SIZE) | 400 | LZO_HEADER, PAGE_SIZE) |
401 | #define LZO_CMP_SIZE (LZO_CMP_PAGES * PAGE_SIZE) | 401 | #define LZO_CMP_SIZE (LZO_CMP_PAGES * PAGE_SIZE) |
402 | 402 | ||
403 | /* Maximum number of threads for compression/decompression. */ | 403 | /* Maximum number of threads for compression/decompression. */ |
404 | #define LZO_THREADS 3 | 404 | #define LZO_THREADS 3 |
405 | 405 | ||
406 | /* Maximum number of pages for read buffering. */ | 406 | /* Maximum number of pages for read buffering. */ |
407 | #define LZO_READ_PAGES (MAP_PAGE_ENTRIES * 8) | 407 | #define LZO_READ_PAGES (MAP_PAGE_ENTRIES * 8) |
408 | 408 | ||
409 | 409 | ||
410 | /** | 410 | /** |
411 | * save_image - save the suspend image data | 411 | * save_image - save the suspend image data |
412 | */ | 412 | */ |
413 | 413 | ||
414 | static int save_image(struct swap_map_handle *handle, | 414 | static int save_image(struct swap_map_handle *handle, |
415 | struct snapshot_handle *snapshot, | 415 | struct snapshot_handle *snapshot, |
416 | unsigned int nr_to_write) | 416 | unsigned int nr_to_write) |
417 | { | 417 | { |
418 | unsigned int m; | 418 | unsigned int m; |
419 | int ret; | 419 | int ret; |
420 | int nr_pages; | 420 | int nr_pages; |
421 | int err2; | 421 | int err2; |
422 | struct bio *bio; | 422 | struct bio *bio; |
423 | struct timeval start; | 423 | struct timeval start; |
424 | struct timeval stop; | 424 | struct timeval stop; |
425 | 425 | ||
426 | printk(KERN_INFO "PM: Saving image data pages (%u pages) ... ", | 426 | printk(KERN_INFO "PM: Saving image data pages (%u pages) ... ", |
427 | nr_to_write); | 427 | nr_to_write); |
428 | m = nr_to_write / 100; | 428 | m = nr_to_write / 100; |
429 | if (!m) | 429 | if (!m) |
430 | m = 1; | 430 | m = 1; |
431 | nr_pages = 0; | 431 | nr_pages = 0; |
432 | bio = NULL; | 432 | bio = NULL; |
433 | do_gettimeofday(&start); | 433 | do_gettimeofday(&start); |
434 | while (1) { | 434 | while (1) { |
435 | ret = snapshot_read_next(snapshot); | 435 | ret = snapshot_read_next(snapshot); |
436 | if (ret <= 0) | 436 | if (ret <= 0) |
437 | break; | 437 | break; |
438 | ret = swap_write_page(handle, data_of(*snapshot), &bio); | 438 | ret = swap_write_page(handle, data_of(*snapshot), &bio); |
439 | if (ret) | 439 | if (ret) |
440 | break; | 440 | break; |
441 | if (!(nr_pages % m)) | 441 | if (!(nr_pages % m)) |
442 | printk(KERN_CONT "\b\b\b\b%3d%%", nr_pages / m); | 442 | printk(KERN_CONT "\b\b\b\b%3d%%", nr_pages / m); |
443 | nr_pages++; | 443 | nr_pages++; |
444 | } | 444 | } |
445 | err2 = hib_wait_on_bio_chain(&bio); | 445 | err2 = hib_wait_on_bio_chain(&bio); |
446 | do_gettimeofday(&stop); | 446 | do_gettimeofday(&stop); |
447 | if (!ret) | 447 | if (!ret) |
448 | ret = err2; | 448 | ret = err2; |
449 | if (!ret) | 449 | if (!ret) |
450 | printk(KERN_CONT "\b\b\b\bdone\n"); | 450 | printk(KERN_CONT "\b\b\b\bdone\n"); |
451 | else | 451 | else |
452 | printk(KERN_CONT "\n"); | 452 | printk(KERN_CONT "\n"); |
453 | swsusp_show_speed(&start, &stop, nr_to_write, "Wrote"); | 453 | swsusp_show_speed(&start, &stop, nr_to_write, "Wrote"); |
454 | return ret; | 454 | return ret; |
455 | } | 455 | } |
456 | 456 | ||
457 | /** | 457 | /** |
458 | * Structure used for CRC32. | 458 | * Structure used for CRC32. |
459 | */ | 459 | */ |
460 | struct crc_data { | 460 | struct crc_data { |
461 | struct task_struct *thr; /* thread */ | 461 | struct task_struct *thr; /* thread */ |
462 | atomic_t ready; /* ready to start flag */ | 462 | atomic_t ready; /* ready to start flag */ |
463 | atomic_t stop; /* ready to stop flag */ | 463 | atomic_t stop; /* ready to stop flag */ |
464 | unsigned run_threads; /* nr current threads */ | 464 | unsigned run_threads; /* nr current threads */ |
465 | wait_queue_head_t go; /* start crc update */ | 465 | wait_queue_head_t go; /* start crc update */ |
466 | wait_queue_head_t done; /* crc update done */ | 466 | wait_queue_head_t done; /* crc update done */ |
467 | u32 *crc32; /* points to handle's crc32 */ | 467 | u32 *crc32; /* points to handle's crc32 */ |
468 | size_t *unc_len[LZO_THREADS]; /* uncompressed lengths */ | 468 | size_t *unc_len[LZO_THREADS]; /* uncompressed lengths */ |
469 | unsigned char *unc[LZO_THREADS]; /* uncompressed data */ | 469 | unsigned char *unc[LZO_THREADS]; /* uncompressed data */ |
470 | }; | 470 | }; |
471 | 471 | ||
472 | /** | 472 | /** |
473 | * CRC32 update function that runs in its own thread. | 473 | * CRC32 update function that runs in its own thread. |
474 | */ | 474 | */ |
475 | static int crc32_threadfn(void *data) | 475 | static int crc32_threadfn(void *data) |
476 | { | 476 | { |
477 | struct crc_data *d = data; | 477 | struct crc_data *d = data; |
478 | unsigned i; | 478 | unsigned i; |
479 | 479 | ||
480 | while (1) { | 480 | while (1) { |
481 | wait_event(d->go, atomic_read(&d->ready) || | 481 | wait_event(d->go, atomic_read(&d->ready) || |
482 | kthread_should_stop()); | 482 | kthread_should_stop()); |
483 | if (kthread_should_stop()) { | 483 | if (kthread_should_stop()) { |
484 | d->thr = NULL; | 484 | d->thr = NULL; |
485 | atomic_set(&d->stop, 1); | 485 | atomic_set(&d->stop, 1); |
486 | wake_up(&d->done); | 486 | wake_up(&d->done); |
487 | break; | 487 | break; |
488 | } | 488 | } |
489 | atomic_set(&d->ready, 0); | 489 | atomic_set(&d->ready, 0); |
490 | 490 | ||
491 | for (i = 0; i < d->run_threads; i++) | 491 | for (i = 0; i < d->run_threads; i++) |
492 | *d->crc32 = crc32_le(*d->crc32, | 492 | *d->crc32 = crc32_le(*d->crc32, |
493 | d->unc[i], *d->unc_len[i]); | 493 | d->unc[i], *d->unc_len[i]); |
494 | atomic_set(&d->stop, 1); | 494 | atomic_set(&d->stop, 1); |
495 | wake_up(&d->done); | 495 | wake_up(&d->done); |
496 | } | 496 | } |
497 | return 0; | 497 | return 0; |
498 | } | 498 | } |
499 | /** | 499 | /** |
500 | * Structure used for LZO data compression. | 500 | * Structure used for LZO data compression. |
501 | */ | 501 | */ |
502 | struct cmp_data { | 502 | struct cmp_data { |
503 | struct task_struct *thr; /* thread */ | 503 | struct task_struct *thr; /* thread */ |
504 | atomic_t ready; /* ready to start flag */ | 504 | atomic_t ready; /* ready to start flag */ |
505 | atomic_t stop; /* ready to stop flag */ | 505 | atomic_t stop; /* ready to stop flag */ |
506 | int ret; /* return code */ | 506 | int ret; /* return code */ |
507 | wait_queue_head_t go; /* start compression */ | 507 | wait_queue_head_t go; /* start compression */ |
508 | wait_queue_head_t done; /* compression done */ | 508 | wait_queue_head_t done; /* compression done */ |
509 | size_t unc_len; /* uncompressed length */ | 509 | size_t unc_len; /* uncompressed length */ |
510 | size_t cmp_len; /* compressed length */ | 510 | size_t cmp_len; /* compressed length */ |
511 | unsigned char unc[LZO_UNC_SIZE]; /* uncompressed buffer */ | 511 | unsigned char unc[LZO_UNC_SIZE]; /* uncompressed buffer */ |
512 | unsigned char cmp[LZO_CMP_SIZE]; /* compressed buffer */ | 512 | unsigned char cmp[LZO_CMP_SIZE]; /* compressed buffer */ |
513 | unsigned char wrk[LZO1X_1_MEM_COMPRESS]; /* compression workspace */ | 513 | unsigned char wrk[LZO1X_1_MEM_COMPRESS]; /* compression workspace */ |
514 | }; | 514 | }; |
515 | 515 | ||
516 | /** | 516 | /** |
517 | * Compression function that runs in its own thread. | 517 | * Compression function that runs in its own thread. |
518 | */ | 518 | */ |
519 | static int lzo_compress_threadfn(void *data) | 519 | static int lzo_compress_threadfn(void *data) |
520 | { | 520 | { |
521 | struct cmp_data *d = data; | 521 | struct cmp_data *d = data; |
522 | 522 | ||
523 | while (1) { | 523 | while (1) { |
524 | wait_event(d->go, atomic_read(&d->ready) || | 524 | wait_event(d->go, atomic_read(&d->ready) || |
525 | kthread_should_stop()); | 525 | kthread_should_stop()); |
526 | if (kthread_should_stop()) { | 526 | if (kthread_should_stop()) { |
527 | d->thr = NULL; | 527 | d->thr = NULL; |
528 | d->ret = -1; | 528 | d->ret = -1; |
529 | atomic_set(&d->stop, 1); | 529 | atomic_set(&d->stop, 1); |
530 | wake_up(&d->done); | 530 | wake_up(&d->done); |
531 | break; | 531 | break; |
532 | } | 532 | } |
533 | atomic_set(&d->ready, 0); | 533 | atomic_set(&d->ready, 0); |
534 | 534 | ||
535 | d->ret = lzo1x_1_compress(d->unc, d->unc_len, | 535 | d->ret = lzo1x_1_compress(d->unc, d->unc_len, |
536 | d->cmp + LZO_HEADER, &d->cmp_len, | 536 | d->cmp + LZO_HEADER, &d->cmp_len, |
537 | d->wrk); | 537 | d->wrk); |
538 | atomic_set(&d->stop, 1); | 538 | atomic_set(&d->stop, 1); |
539 | wake_up(&d->done); | 539 | wake_up(&d->done); |
540 | } | 540 | } |
541 | return 0; | 541 | return 0; |
542 | } | 542 | } |
543 | 543 | ||
544 | /** | 544 | /** |
545 | * save_image_lzo - Save the suspend image data compressed with LZO. | 545 | * save_image_lzo - Save the suspend image data compressed with LZO. |
546 | * @handle: Swap mam handle to use for saving the image. | 546 | * @handle: Swap mam handle to use for saving the image. |
547 | * @snapshot: Image to read data from. | 547 | * @snapshot: Image to read data from. |
548 | * @nr_to_write: Number of pages to save. | 548 | * @nr_to_write: Number of pages to save. |
549 | */ | 549 | */ |
550 | static int save_image_lzo(struct swap_map_handle *handle, | 550 | static int save_image_lzo(struct swap_map_handle *handle, |
551 | struct snapshot_handle *snapshot, | 551 | struct snapshot_handle *snapshot, |
552 | unsigned int nr_to_write) | 552 | unsigned int nr_to_write) |
553 | { | 553 | { |
554 | unsigned int m; | 554 | unsigned int m; |
555 | int ret = 0; | 555 | int ret = 0; |
556 | int nr_pages; | 556 | int nr_pages; |
557 | int err2; | 557 | int err2; |
558 | struct bio *bio; | 558 | struct bio *bio; |
559 | struct timeval start; | 559 | struct timeval start; |
560 | struct timeval stop; | 560 | struct timeval stop; |
561 | size_t off; | 561 | size_t off; |
562 | unsigned thr, run_threads, nr_threads; | 562 | unsigned thr, run_threads, nr_threads; |
563 | unsigned char *page = NULL; | 563 | unsigned char *page = NULL; |
564 | struct cmp_data *data = NULL; | 564 | struct cmp_data *data = NULL; |
565 | struct crc_data *crc = NULL; | 565 | struct crc_data *crc = NULL; |
566 | 566 | ||
567 | /* | 567 | /* |
568 | * We'll limit the number of threads for compression to limit memory | 568 | * We'll limit the number of threads for compression to limit memory |
569 | * footprint. | 569 | * footprint. |
570 | */ | 570 | */ |
571 | nr_threads = num_online_cpus() - 1; | 571 | nr_threads = num_online_cpus() - 1; |
572 | nr_threads = clamp_val(nr_threads, 1, LZO_THREADS); | 572 | nr_threads = clamp_val(nr_threads, 1, LZO_THREADS); |
573 | 573 | ||
574 | page = (void *)__get_free_page(__GFP_WAIT | __GFP_HIGH); | 574 | page = (void *)__get_free_page(__GFP_WAIT | __GFP_HIGH); |
575 | if (!page) { | 575 | if (!page) { |
576 | printk(KERN_ERR "PM: Failed to allocate LZO page\n"); | 576 | printk(KERN_ERR "PM: Failed to allocate LZO page\n"); |
577 | ret = -ENOMEM; | 577 | ret = -ENOMEM; |
578 | goto out_clean; | 578 | goto out_clean; |
579 | } | 579 | } |
580 | 580 | ||
581 | data = vmalloc(sizeof(*data) * nr_threads); | 581 | data = vmalloc(sizeof(*data) * nr_threads); |
582 | if (!data) { | 582 | if (!data) { |
583 | printk(KERN_ERR "PM: Failed to allocate LZO data\n"); | 583 | printk(KERN_ERR "PM: Failed to allocate LZO data\n"); |
584 | ret = -ENOMEM; | 584 | ret = -ENOMEM; |
585 | goto out_clean; | 585 | goto out_clean; |
586 | } | 586 | } |
587 | for (thr = 0; thr < nr_threads; thr++) | 587 | for (thr = 0; thr < nr_threads; thr++) |
588 | memset(&data[thr], 0, offsetof(struct cmp_data, go)); | 588 | memset(&data[thr], 0, offsetof(struct cmp_data, go)); |
589 | 589 | ||
590 | crc = kmalloc(sizeof(*crc), GFP_KERNEL); | 590 | crc = kmalloc(sizeof(*crc), GFP_KERNEL); |
591 | if (!crc) { | 591 | if (!crc) { |
592 | printk(KERN_ERR "PM: Failed to allocate crc\n"); | 592 | printk(KERN_ERR "PM: Failed to allocate crc\n"); |
593 | ret = -ENOMEM; | 593 | ret = -ENOMEM; |
594 | goto out_clean; | 594 | goto out_clean; |
595 | } | 595 | } |
596 | memset(crc, 0, offsetof(struct crc_data, go)); | 596 | memset(crc, 0, offsetof(struct crc_data, go)); |
597 | 597 | ||
598 | /* | 598 | /* |
599 | * Start the compression threads. | 599 | * Start the compression threads. |
600 | */ | 600 | */ |
601 | for (thr = 0; thr < nr_threads; thr++) { | 601 | for (thr = 0; thr < nr_threads; thr++) { |
602 | init_waitqueue_head(&data[thr].go); | 602 | init_waitqueue_head(&data[thr].go); |
603 | init_waitqueue_head(&data[thr].done); | 603 | init_waitqueue_head(&data[thr].done); |
604 | 604 | ||
605 | data[thr].thr = kthread_run(lzo_compress_threadfn, | 605 | data[thr].thr = kthread_run(lzo_compress_threadfn, |
606 | &data[thr], | 606 | &data[thr], |
607 | "image_compress/%u", thr); | 607 | "image_compress/%u", thr); |
608 | if (IS_ERR(data[thr].thr)) { | 608 | if (IS_ERR(data[thr].thr)) { |
609 | data[thr].thr = NULL; | 609 | data[thr].thr = NULL; |
610 | printk(KERN_ERR | 610 | printk(KERN_ERR |
611 | "PM: Cannot start compression threads\n"); | 611 | "PM: Cannot start compression threads\n"); |
612 | ret = -ENOMEM; | 612 | ret = -ENOMEM; |
613 | goto out_clean; | 613 | goto out_clean; |
614 | } | 614 | } |
615 | } | 615 | } |
616 | 616 | ||
617 | /* | 617 | /* |
618 | * Adjust number of free pages after all allocations have been done. | 618 | * Adjust number of free pages after all allocations have been done. |
619 | * We don't want to run out of pages when writing. | 619 | * We don't want to run out of pages when writing. |
620 | */ | 620 | */ |
621 | handle->nr_free_pages = nr_free_pages() >> 1; | 621 | handle->nr_free_pages = nr_free_pages() >> 1; |
622 | 622 | ||
623 | /* | 623 | /* |
624 | * Start the CRC32 thread. | 624 | * Start the CRC32 thread. |
625 | */ | 625 | */ |
626 | init_waitqueue_head(&crc->go); | 626 | init_waitqueue_head(&crc->go); |
627 | init_waitqueue_head(&crc->done); | 627 | init_waitqueue_head(&crc->done); |
628 | 628 | ||
629 | handle->crc32 = 0; | 629 | handle->crc32 = 0; |
630 | crc->crc32 = &handle->crc32; | 630 | crc->crc32 = &handle->crc32; |
631 | for (thr = 0; thr < nr_threads; thr++) { | 631 | for (thr = 0; thr < nr_threads; thr++) { |
632 | crc->unc[thr] = data[thr].unc; | 632 | crc->unc[thr] = data[thr].unc; |
633 | crc->unc_len[thr] = &data[thr].unc_len; | 633 | crc->unc_len[thr] = &data[thr].unc_len; |
634 | } | 634 | } |
635 | 635 | ||
636 | crc->thr = kthread_run(crc32_threadfn, crc, "image_crc32"); | 636 | crc->thr = kthread_run(crc32_threadfn, crc, "image_crc32"); |
637 | if (IS_ERR(crc->thr)) { | 637 | if (IS_ERR(crc->thr)) { |
638 | crc->thr = NULL; | 638 | crc->thr = NULL; |
639 | printk(KERN_ERR "PM: Cannot start CRC32 thread\n"); | 639 | printk(KERN_ERR "PM: Cannot start CRC32 thread\n"); |
640 | ret = -ENOMEM; | 640 | ret = -ENOMEM; |
641 | goto out_clean; | 641 | goto out_clean; |
642 | } | 642 | } |
643 | 643 | ||
644 | printk(KERN_INFO | 644 | printk(KERN_INFO |
645 | "PM: Using %u thread(s) for compression.\n" | 645 | "PM: Using %u thread(s) for compression.\n" |
646 | "PM: Compressing and saving image data (%u pages) ... ", | 646 | "PM: Compressing and saving image data (%u pages) ... ", |
647 | nr_threads, nr_to_write); | 647 | nr_threads, nr_to_write); |
648 | m = nr_to_write / 100; | 648 | m = nr_to_write / 100; |
649 | if (!m) | 649 | if (!m) |
650 | m = 1; | 650 | m = 1; |
651 | nr_pages = 0; | 651 | nr_pages = 0; |
652 | bio = NULL; | 652 | bio = NULL; |
653 | do_gettimeofday(&start); | 653 | do_gettimeofday(&start); |
654 | for (;;) { | 654 | for (;;) { |
655 | for (thr = 0; thr < nr_threads; thr++) { | 655 | for (thr = 0; thr < nr_threads; thr++) { |
656 | for (off = 0; off < LZO_UNC_SIZE; off += PAGE_SIZE) { | 656 | for (off = 0; off < LZO_UNC_SIZE; off += PAGE_SIZE) { |
657 | ret = snapshot_read_next(snapshot); | 657 | ret = snapshot_read_next(snapshot); |
658 | if (ret < 0) | 658 | if (ret < 0) |
659 | goto out_finish; | 659 | goto out_finish; |
660 | 660 | ||
661 | if (!ret) | 661 | if (!ret) |
662 | break; | 662 | break; |
663 | 663 | ||
664 | memcpy(data[thr].unc + off, | 664 | memcpy(data[thr].unc + off, |
665 | data_of(*snapshot), PAGE_SIZE); | 665 | data_of(*snapshot), PAGE_SIZE); |
666 | 666 | ||
667 | if (!(nr_pages % m)) | 667 | if (!(nr_pages % m)) |
668 | printk(KERN_CONT "\b\b\b\b%3d%%", | 668 | printk(KERN_CONT "\b\b\b\b%3d%%", |
669 | nr_pages / m); | 669 | nr_pages / m); |
670 | nr_pages++; | 670 | nr_pages++; |
671 | } | 671 | } |
672 | if (!off) | 672 | if (!off) |
673 | break; | 673 | break; |
674 | 674 | ||
675 | data[thr].unc_len = off; | 675 | data[thr].unc_len = off; |
676 | 676 | ||
677 | atomic_set(&data[thr].ready, 1); | 677 | atomic_set(&data[thr].ready, 1); |
678 | wake_up(&data[thr].go); | 678 | wake_up(&data[thr].go); |
679 | } | 679 | } |
680 | 680 | ||
681 | if (!thr) | 681 | if (!thr) |
682 | break; | 682 | break; |
683 | 683 | ||
684 | crc->run_threads = thr; | 684 | crc->run_threads = thr; |
685 | atomic_set(&crc->ready, 1); | 685 | atomic_set(&crc->ready, 1); |
686 | wake_up(&crc->go); | 686 | wake_up(&crc->go); |
687 | 687 | ||
688 | for (run_threads = thr, thr = 0; thr < run_threads; thr++) { | 688 | for (run_threads = thr, thr = 0; thr < run_threads; thr++) { |
689 | wait_event(data[thr].done, | 689 | wait_event(data[thr].done, |
690 | atomic_read(&data[thr].stop)); | 690 | atomic_read(&data[thr].stop)); |
691 | atomic_set(&data[thr].stop, 0); | 691 | atomic_set(&data[thr].stop, 0); |
692 | 692 | ||
693 | ret = data[thr].ret; | 693 | ret = data[thr].ret; |
694 | 694 | ||
695 | if (ret < 0) { | 695 | if (ret < 0) { |
696 | printk(KERN_ERR "PM: LZO compression failed\n"); | 696 | printk(KERN_ERR "PM: LZO compression failed\n"); |
697 | goto out_finish; | 697 | goto out_finish; |
698 | } | 698 | } |
699 | 699 | ||
700 | if (unlikely(!data[thr].cmp_len || | 700 | if (unlikely(!data[thr].cmp_len || |
701 | data[thr].cmp_len > | 701 | data[thr].cmp_len > |
702 | lzo1x_worst_compress(data[thr].unc_len))) { | 702 | lzo1x_worst_compress(data[thr].unc_len))) { |
703 | printk(KERN_ERR | 703 | printk(KERN_ERR |
704 | "PM: Invalid LZO compressed length\n"); | 704 | "PM: Invalid LZO compressed length\n"); |
705 | ret = -1; | 705 | ret = -1; |
706 | goto out_finish; | 706 | goto out_finish; |
707 | } | 707 | } |
708 | 708 | ||
709 | *(size_t *)data[thr].cmp = data[thr].cmp_len; | 709 | *(size_t *)data[thr].cmp = data[thr].cmp_len; |
710 | 710 | ||
711 | /* | 711 | /* |
712 | * Given we are writing one page at a time to disk, we | 712 | * Given we are writing one page at a time to disk, we |
713 | * copy that much from the buffer, although the last | 713 | * copy that much from the buffer, although the last |
714 | * bit will likely be smaller than full page. This is | 714 | * bit will likely be smaller than full page. This is |
715 | * OK - we saved the length of the compressed data, so | 715 | * OK - we saved the length of the compressed data, so |
716 | * any garbage at the end will be discarded when we | 716 | * any garbage at the end will be discarded when we |
717 | * read it. | 717 | * read it. |
718 | */ | 718 | */ |
719 | for (off = 0; | 719 | for (off = 0; |
720 | off < LZO_HEADER + data[thr].cmp_len; | 720 | off < LZO_HEADER + data[thr].cmp_len; |
721 | off += PAGE_SIZE) { | 721 | off += PAGE_SIZE) { |
722 | memcpy(page, data[thr].cmp + off, PAGE_SIZE); | 722 | memcpy(page, data[thr].cmp + off, PAGE_SIZE); |
723 | 723 | ||
724 | ret = swap_write_page(handle, page, &bio); | 724 | ret = swap_write_page(handle, page, &bio); |
725 | if (ret) | 725 | if (ret) |
726 | goto out_finish; | 726 | goto out_finish; |
727 | } | 727 | } |
728 | } | 728 | } |
729 | 729 | ||
730 | wait_event(crc->done, atomic_read(&crc->stop)); | 730 | wait_event(crc->done, atomic_read(&crc->stop)); |
731 | atomic_set(&crc->stop, 0); | 731 | atomic_set(&crc->stop, 0); |
732 | } | 732 | } |
733 | 733 | ||
734 | out_finish: | 734 | out_finish: |
735 | err2 = hib_wait_on_bio_chain(&bio); | 735 | err2 = hib_wait_on_bio_chain(&bio); |
736 | do_gettimeofday(&stop); | 736 | do_gettimeofday(&stop); |
737 | if (!ret) | 737 | if (!ret) |
738 | ret = err2; | 738 | ret = err2; |
739 | if (!ret) { | 739 | if (!ret) { |
740 | printk(KERN_CONT "\b\b\b\bdone\n"); | 740 | printk(KERN_CONT "\b\b\b\bdone\n"); |
741 | } else { | 741 | } else { |
742 | printk(KERN_CONT "\n"); | 742 | printk(KERN_CONT "\n"); |
743 | } | 743 | } |
744 | swsusp_show_speed(&start, &stop, nr_to_write, "Wrote"); | 744 | swsusp_show_speed(&start, &stop, nr_to_write, "Wrote"); |
745 | out_clean: | 745 | out_clean: |
746 | if (crc) { | 746 | if (crc) { |
747 | if (crc->thr) | 747 | if (crc->thr) |
748 | kthread_stop(crc->thr); | 748 | kthread_stop(crc->thr); |
749 | kfree(crc); | 749 | kfree(crc); |
750 | } | 750 | } |
751 | if (data) { | 751 | if (data) { |
752 | for (thr = 0; thr < nr_threads; thr++) | 752 | for (thr = 0; thr < nr_threads; thr++) |
753 | if (data[thr].thr) | 753 | if (data[thr].thr) |
754 | kthread_stop(data[thr].thr); | 754 | kthread_stop(data[thr].thr); |
755 | vfree(data); | 755 | vfree(data); |
756 | } | 756 | } |
757 | if (page) free_page((unsigned long)page); | 757 | if (page) free_page((unsigned long)page); |
758 | 758 | ||
759 | return ret; | 759 | return ret; |
760 | } | 760 | } |
761 | 761 | ||
762 | /** | 762 | /** |
763 | * enough_swap - Make sure we have enough swap to save the image. | 763 | * enough_swap - Make sure we have enough swap to save the image. |
764 | * | 764 | * |
765 | * Returns TRUE or FALSE after checking the total amount of swap | 765 | * Returns TRUE or FALSE after checking the total amount of swap |
766 | * space avaiable from the resume partition. | 766 | * space avaiable from the resume partition. |
767 | */ | 767 | */ |
768 | 768 | ||
769 | static int enough_swap(unsigned int nr_pages, unsigned int flags) | 769 | static int enough_swap(unsigned int nr_pages, unsigned int flags) |
770 | { | 770 | { |
771 | unsigned int free_swap = count_swap_pages(root_swap, 1); | 771 | unsigned int free_swap = count_swap_pages(root_swap, 1); |
772 | unsigned int required; | 772 | unsigned int required; |
773 | 773 | ||
774 | pr_debug("PM: Free swap pages: %u\n", free_swap); | 774 | pr_debug("PM: Free swap pages: %u\n", free_swap); |
775 | 775 | ||
776 | required = PAGES_FOR_IO + ((flags & SF_NOCOMPRESS_MODE) ? | 776 | required = PAGES_FOR_IO + nr_pages; |
777 | nr_pages : (nr_pages * LZO_CMP_PAGES) / LZO_UNC_PAGES + 1); | ||
778 | return free_swap > required; | 777 | return free_swap > required; |
779 | } | 778 | } |
780 | 779 | ||
781 | /** | 780 | /** |
782 | * swsusp_write - Write entire image and metadata. | 781 | * swsusp_write - Write entire image and metadata. |
783 | * @flags: flags to pass to the "boot" kernel in the image header | 782 | * @flags: flags to pass to the "boot" kernel in the image header |
784 | * | 783 | * |
785 | * It is important _NOT_ to umount filesystems at this point. We want | 784 | * It is important _NOT_ to umount filesystems at this point. We want |
786 | * them synced (in case something goes wrong) but we DO not want to mark | 785 | * them synced (in case something goes wrong) but we DO not want to mark |
787 | * filesystem clean: it is not. (And it does not matter, if we resume | 786 | * filesystem clean: it is not. (And it does not matter, if we resume |
788 | * correctly, we'll mark system clean, anyway.) | 787 | * correctly, we'll mark system clean, anyway.) |
789 | */ | 788 | */ |
790 | 789 | ||
791 | int swsusp_write(unsigned int flags) | 790 | int swsusp_write(unsigned int flags) |
792 | { | 791 | { |
793 | struct swap_map_handle handle; | 792 | struct swap_map_handle handle; |
794 | struct snapshot_handle snapshot; | 793 | struct snapshot_handle snapshot; |
795 | struct swsusp_info *header; | 794 | struct swsusp_info *header; |
796 | unsigned long pages; | 795 | unsigned long pages; |
797 | int error; | 796 | int error; |
798 | 797 | ||
799 | pages = snapshot_get_image_size(); | 798 | pages = snapshot_get_image_size(); |
800 | error = get_swap_writer(&handle); | 799 | error = get_swap_writer(&handle); |
801 | if (error) { | 800 | if (error) { |
802 | printk(KERN_ERR "PM: Cannot get swap writer\n"); | 801 | printk(KERN_ERR "PM: Cannot get swap writer\n"); |
803 | return error; | 802 | return error; |
804 | } | 803 | } |
805 | if (!enough_swap(pages, flags)) { | 804 | if (flags & SF_NOCOMPRESS_MODE) { |
806 | printk(KERN_ERR "PM: Not enough free swap\n"); | 805 | if (!enough_swap(pages, flags)) { |
807 | error = -ENOSPC; | 806 | printk(KERN_ERR "PM: Not enough free swap\n"); |
808 | goto out_finish; | 807 | error = -ENOSPC; |
808 | goto out_finish; | ||
809 | } | ||
809 | } | 810 | } |
810 | memset(&snapshot, 0, sizeof(struct snapshot_handle)); | 811 | memset(&snapshot, 0, sizeof(struct snapshot_handle)); |
811 | error = snapshot_read_next(&snapshot); | 812 | error = snapshot_read_next(&snapshot); |
812 | if (error < PAGE_SIZE) { | 813 | if (error < PAGE_SIZE) { |
813 | if (error >= 0) | 814 | if (error >= 0) |
814 | error = -EFAULT; | 815 | error = -EFAULT; |
815 | 816 | ||
816 | goto out_finish; | 817 | goto out_finish; |
817 | } | 818 | } |
818 | header = (struct swsusp_info *)data_of(snapshot); | 819 | header = (struct swsusp_info *)data_of(snapshot); |
819 | error = swap_write_page(&handle, header, NULL); | 820 | error = swap_write_page(&handle, header, NULL); |
820 | if (!error) { | 821 | if (!error) { |
821 | error = (flags & SF_NOCOMPRESS_MODE) ? | 822 | error = (flags & SF_NOCOMPRESS_MODE) ? |
822 | save_image(&handle, &snapshot, pages - 1) : | 823 | save_image(&handle, &snapshot, pages - 1) : |
823 | save_image_lzo(&handle, &snapshot, pages - 1); | 824 | save_image_lzo(&handle, &snapshot, pages - 1); |
824 | } | 825 | } |
825 | out_finish: | 826 | out_finish: |
826 | error = swap_writer_finish(&handle, flags, error); | 827 | error = swap_writer_finish(&handle, flags, error); |
827 | return error; | 828 | return error; |
828 | } | 829 | } |
829 | 830 | ||
830 | /** | 831 | /** |
831 | * The following functions allow us to read data using a swap map | 832 | * The following functions allow us to read data using a swap map |
832 | * in a file-alike way | 833 | * in a file-alike way |
833 | */ | 834 | */ |
834 | 835 | ||
835 | static void release_swap_reader(struct swap_map_handle *handle) | 836 | static void release_swap_reader(struct swap_map_handle *handle) |
836 | { | 837 | { |
837 | struct swap_map_page_list *tmp; | 838 | struct swap_map_page_list *tmp; |
838 | 839 | ||
839 | while (handle->maps) { | 840 | while (handle->maps) { |
840 | if (handle->maps->map) | 841 | if (handle->maps->map) |
841 | free_page((unsigned long)handle->maps->map); | 842 | free_page((unsigned long)handle->maps->map); |
842 | tmp = handle->maps; | 843 | tmp = handle->maps; |
843 | handle->maps = handle->maps->next; | 844 | handle->maps = handle->maps->next; |
844 | kfree(tmp); | 845 | kfree(tmp); |
845 | } | 846 | } |
846 | handle->cur = NULL; | 847 | handle->cur = NULL; |
847 | } | 848 | } |
848 | 849 | ||
849 | static int get_swap_reader(struct swap_map_handle *handle, | 850 | static int get_swap_reader(struct swap_map_handle *handle, |
850 | unsigned int *flags_p) | 851 | unsigned int *flags_p) |
851 | { | 852 | { |
852 | int error; | 853 | int error; |
853 | struct swap_map_page_list *tmp, *last; | 854 | struct swap_map_page_list *tmp, *last; |
854 | sector_t offset; | 855 | sector_t offset; |
855 | 856 | ||
856 | *flags_p = swsusp_header->flags; | 857 | *flags_p = swsusp_header->flags; |
857 | 858 | ||
858 | if (!swsusp_header->image) /* how can this happen? */ | 859 | if (!swsusp_header->image) /* how can this happen? */ |
859 | return -EINVAL; | 860 | return -EINVAL; |
860 | 861 | ||
861 | handle->cur = NULL; | 862 | handle->cur = NULL; |
862 | last = handle->maps = NULL; | 863 | last = handle->maps = NULL; |
863 | offset = swsusp_header->image; | 864 | offset = swsusp_header->image; |
864 | while (offset) { | 865 | while (offset) { |
865 | tmp = kmalloc(sizeof(*handle->maps), GFP_KERNEL); | 866 | tmp = kmalloc(sizeof(*handle->maps), GFP_KERNEL); |
866 | if (!tmp) { | 867 | if (!tmp) { |
867 | release_swap_reader(handle); | 868 | release_swap_reader(handle); |
868 | return -ENOMEM; | 869 | return -ENOMEM; |
869 | } | 870 | } |
870 | memset(tmp, 0, sizeof(*tmp)); | 871 | memset(tmp, 0, sizeof(*tmp)); |
871 | if (!handle->maps) | 872 | if (!handle->maps) |
872 | handle->maps = tmp; | 873 | handle->maps = tmp; |
873 | if (last) | 874 | if (last) |
874 | last->next = tmp; | 875 | last->next = tmp; |
875 | last = tmp; | 876 | last = tmp; |
876 | 877 | ||
877 | tmp->map = (struct swap_map_page *) | 878 | tmp->map = (struct swap_map_page *) |
878 | __get_free_page(__GFP_WAIT | __GFP_HIGH); | 879 | __get_free_page(__GFP_WAIT | __GFP_HIGH); |
879 | if (!tmp->map) { | 880 | if (!tmp->map) { |
880 | release_swap_reader(handle); | 881 | release_swap_reader(handle); |
881 | return -ENOMEM; | 882 | return -ENOMEM; |
882 | } | 883 | } |
883 | 884 | ||
884 | error = hib_bio_read_page(offset, tmp->map, NULL); | 885 | error = hib_bio_read_page(offset, tmp->map, NULL); |
885 | if (error) { | 886 | if (error) { |
886 | release_swap_reader(handle); | 887 | release_swap_reader(handle); |
887 | return error; | 888 | return error; |
888 | } | 889 | } |
889 | offset = tmp->map->next_swap; | 890 | offset = tmp->map->next_swap; |
890 | } | 891 | } |
891 | handle->k = 0; | 892 | handle->k = 0; |
892 | handle->cur = handle->maps->map; | 893 | handle->cur = handle->maps->map; |
893 | return 0; | 894 | return 0; |
894 | } | 895 | } |
895 | 896 | ||
896 | static int swap_read_page(struct swap_map_handle *handle, void *buf, | 897 | static int swap_read_page(struct swap_map_handle *handle, void *buf, |
897 | struct bio **bio_chain) | 898 | struct bio **bio_chain) |
898 | { | 899 | { |
899 | sector_t offset; | 900 | sector_t offset; |
900 | int error; | 901 | int error; |
901 | struct swap_map_page_list *tmp; | 902 | struct swap_map_page_list *tmp; |
902 | 903 | ||
903 | if (!handle->cur) | 904 | if (!handle->cur) |
904 | return -EINVAL; | 905 | return -EINVAL; |
905 | offset = handle->cur->entries[handle->k]; | 906 | offset = handle->cur->entries[handle->k]; |
906 | if (!offset) | 907 | if (!offset) |
907 | return -EFAULT; | 908 | return -EFAULT; |
908 | error = hib_bio_read_page(offset, buf, bio_chain); | 909 | error = hib_bio_read_page(offset, buf, bio_chain); |
909 | if (error) | 910 | if (error) |
910 | return error; | 911 | return error; |
911 | if (++handle->k >= MAP_PAGE_ENTRIES) { | 912 | if (++handle->k >= MAP_PAGE_ENTRIES) { |
912 | handle->k = 0; | 913 | handle->k = 0; |
913 | free_page((unsigned long)handle->maps->map); | 914 | free_page((unsigned long)handle->maps->map); |
914 | tmp = handle->maps; | 915 | tmp = handle->maps; |
915 | handle->maps = handle->maps->next; | 916 | handle->maps = handle->maps->next; |
916 | kfree(tmp); | 917 | kfree(tmp); |
917 | if (!handle->maps) | 918 | if (!handle->maps) |
918 | release_swap_reader(handle); | 919 | release_swap_reader(handle); |
919 | else | 920 | else |
920 | handle->cur = handle->maps->map; | 921 | handle->cur = handle->maps->map; |
921 | } | 922 | } |
922 | return error; | 923 | return error; |
923 | } | 924 | } |
924 | 925 | ||
925 | static int swap_reader_finish(struct swap_map_handle *handle) | 926 | static int swap_reader_finish(struct swap_map_handle *handle) |
926 | { | 927 | { |
927 | release_swap_reader(handle); | 928 | release_swap_reader(handle); |
928 | 929 | ||
929 | return 0; | 930 | return 0; |
930 | } | 931 | } |
931 | 932 | ||
932 | /** | 933 | /** |
933 | * load_image - load the image using the swap map handle | 934 | * load_image - load the image using the swap map handle |
934 | * @handle and the snapshot handle @snapshot | 935 | * @handle and the snapshot handle @snapshot |
935 | * (assume there are @nr_pages pages to load) | 936 | * (assume there are @nr_pages pages to load) |
936 | */ | 937 | */ |
937 | 938 | ||
938 | static int load_image(struct swap_map_handle *handle, | 939 | static int load_image(struct swap_map_handle *handle, |
939 | struct snapshot_handle *snapshot, | 940 | struct snapshot_handle *snapshot, |
940 | unsigned int nr_to_read) | 941 | unsigned int nr_to_read) |
941 | { | 942 | { |
942 | unsigned int m; | 943 | unsigned int m; |
943 | int ret = 0; | 944 | int ret = 0; |
944 | struct timeval start; | 945 | struct timeval start; |
945 | struct timeval stop; | 946 | struct timeval stop; |
946 | struct bio *bio; | 947 | struct bio *bio; |
947 | int err2; | 948 | int err2; |
948 | unsigned nr_pages; | 949 | unsigned nr_pages; |
949 | 950 | ||
950 | printk(KERN_INFO "PM: Loading image data pages (%u pages) ... ", | 951 | printk(KERN_INFO "PM: Loading image data pages (%u pages) ... ", |
951 | nr_to_read); | 952 | nr_to_read); |
952 | m = nr_to_read / 100; | 953 | m = nr_to_read / 100; |
953 | if (!m) | 954 | if (!m) |
954 | m = 1; | 955 | m = 1; |
955 | nr_pages = 0; | 956 | nr_pages = 0; |
956 | bio = NULL; | 957 | bio = NULL; |
957 | do_gettimeofday(&start); | 958 | do_gettimeofday(&start); |
958 | for ( ; ; ) { | 959 | for ( ; ; ) { |
959 | ret = snapshot_write_next(snapshot); | 960 | ret = snapshot_write_next(snapshot); |
960 | if (ret <= 0) | 961 | if (ret <= 0) |
961 | break; | 962 | break; |
962 | ret = swap_read_page(handle, data_of(*snapshot), &bio); | 963 | ret = swap_read_page(handle, data_of(*snapshot), &bio); |
963 | if (ret) | 964 | if (ret) |
964 | break; | 965 | break; |
965 | if (snapshot->sync_read) | 966 | if (snapshot->sync_read) |
966 | ret = hib_wait_on_bio_chain(&bio); | 967 | ret = hib_wait_on_bio_chain(&bio); |
967 | if (ret) | 968 | if (ret) |
968 | break; | 969 | break; |
969 | if (!(nr_pages % m)) | 970 | if (!(nr_pages % m)) |
970 | printk("\b\b\b\b%3d%%", nr_pages / m); | 971 | printk("\b\b\b\b%3d%%", nr_pages / m); |
971 | nr_pages++; | 972 | nr_pages++; |
972 | } | 973 | } |
973 | err2 = hib_wait_on_bio_chain(&bio); | 974 | err2 = hib_wait_on_bio_chain(&bio); |
974 | do_gettimeofday(&stop); | 975 | do_gettimeofday(&stop); |
975 | if (!ret) | 976 | if (!ret) |
976 | ret = err2; | 977 | ret = err2; |
977 | if (!ret) { | 978 | if (!ret) { |
978 | printk("\b\b\b\bdone\n"); | 979 | printk("\b\b\b\bdone\n"); |
979 | snapshot_write_finalize(snapshot); | 980 | snapshot_write_finalize(snapshot); |
980 | if (!snapshot_image_loaded(snapshot)) | 981 | if (!snapshot_image_loaded(snapshot)) |
981 | ret = -ENODATA; | 982 | ret = -ENODATA; |
982 | } else | 983 | } else |
983 | printk("\n"); | 984 | printk("\n"); |
984 | swsusp_show_speed(&start, &stop, nr_to_read, "Read"); | 985 | swsusp_show_speed(&start, &stop, nr_to_read, "Read"); |
985 | return ret; | 986 | return ret; |
986 | } | 987 | } |
987 | 988 | ||
988 | /** | 989 | /** |
989 | * Structure used for LZO data decompression. | 990 | * Structure used for LZO data decompression. |
990 | */ | 991 | */ |
991 | struct dec_data { | 992 | struct dec_data { |
992 | struct task_struct *thr; /* thread */ | 993 | struct task_struct *thr; /* thread */ |
993 | atomic_t ready; /* ready to start flag */ | 994 | atomic_t ready; /* ready to start flag */ |
994 | atomic_t stop; /* ready to stop flag */ | 995 | atomic_t stop; /* ready to stop flag */ |
995 | int ret; /* return code */ | 996 | int ret; /* return code */ |
996 | wait_queue_head_t go; /* start decompression */ | 997 | wait_queue_head_t go; /* start decompression */ |
997 | wait_queue_head_t done; /* decompression done */ | 998 | wait_queue_head_t done; /* decompression done */ |
998 | size_t unc_len; /* uncompressed length */ | 999 | size_t unc_len; /* uncompressed length */ |
999 | size_t cmp_len; /* compressed length */ | 1000 | size_t cmp_len; /* compressed length */ |
1000 | unsigned char unc[LZO_UNC_SIZE]; /* uncompressed buffer */ | 1001 | unsigned char unc[LZO_UNC_SIZE]; /* uncompressed buffer */ |
1001 | unsigned char cmp[LZO_CMP_SIZE]; /* compressed buffer */ | 1002 | unsigned char cmp[LZO_CMP_SIZE]; /* compressed buffer */ |
1002 | }; | 1003 | }; |
1003 | 1004 | ||
1004 | /** | 1005 | /** |
1005 | * Deompression function that runs in its own thread. | 1006 | * Deompression function that runs in its own thread. |
1006 | */ | 1007 | */ |
1007 | static int lzo_decompress_threadfn(void *data) | 1008 | static int lzo_decompress_threadfn(void *data) |
1008 | { | 1009 | { |
1009 | struct dec_data *d = data; | 1010 | struct dec_data *d = data; |
1010 | 1011 | ||
1011 | while (1) { | 1012 | while (1) { |
1012 | wait_event(d->go, atomic_read(&d->ready) || | 1013 | wait_event(d->go, atomic_read(&d->ready) || |
1013 | kthread_should_stop()); | 1014 | kthread_should_stop()); |
1014 | if (kthread_should_stop()) { | 1015 | if (kthread_should_stop()) { |
1015 | d->thr = NULL; | 1016 | d->thr = NULL; |
1016 | d->ret = -1; | 1017 | d->ret = -1; |
1017 | atomic_set(&d->stop, 1); | 1018 | atomic_set(&d->stop, 1); |
1018 | wake_up(&d->done); | 1019 | wake_up(&d->done); |
1019 | break; | 1020 | break; |
1020 | } | 1021 | } |
1021 | atomic_set(&d->ready, 0); | 1022 | atomic_set(&d->ready, 0); |
1022 | 1023 | ||
1023 | d->unc_len = LZO_UNC_SIZE; | 1024 | d->unc_len = LZO_UNC_SIZE; |
1024 | d->ret = lzo1x_decompress_safe(d->cmp + LZO_HEADER, d->cmp_len, | 1025 | d->ret = lzo1x_decompress_safe(d->cmp + LZO_HEADER, d->cmp_len, |
1025 | d->unc, &d->unc_len); | 1026 | d->unc, &d->unc_len); |
1026 | atomic_set(&d->stop, 1); | 1027 | atomic_set(&d->stop, 1); |
1027 | wake_up(&d->done); | 1028 | wake_up(&d->done); |
1028 | } | 1029 | } |
1029 | return 0; | 1030 | return 0; |
1030 | } | 1031 | } |
1031 | 1032 | ||
1032 | /** | 1033 | /** |
1033 | * load_image_lzo - Load compressed image data and decompress them with LZO. | 1034 | * load_image_lzo - Load compressed image data and decompress them with LZO. |
1034 | * @handle: Swap map handle to use for loading data. | 1035 | * @handle: Swap map handle to use for loading data. |
1035 | * @snapshot: Image to copy uncompressed data into. | 1036 | * @snapshot: Image to copy uncompressed data into. |
1036 | * @nr_to_read: Number of pages to load. | 1037 | * @nr_to_read: Number of pages to load. |
1037 | */ | 1038 | */ |
1038 | static int load_image_lzo(struct swap_map_handle *handle, | 1039 | static int load_image_lzo(struct swap_map_handle *handle, |
1039 | struct snapshot_handle *snapshot, | 1040 | struct snapshot_handle *snapshot, |
1040 | unsigned int nr_to_read) | 1041 | unsigned int nr_to_read) |
1041 | { | 1042 | { |
1042 | unsigned int m; | 1043 | unsigned int m; |
1043 | int ret = 0; | 1044 | int ret = 0; |
1044 | int eof = 0; | 1045 | int eof = 0; |
1045 | struct bio *bio; | 1046 | struct bio *bio; |
1046 | struct timeval start; | 1047 | struct timeval start; |
1047 | struct timeval stop; | 1048 | struct timeval stop; |
1048 | unsigned nr_pages; | 1049 | unsigned nr_pages; |
1049 | size_t off; | 1050 | size_t off; |
1050 | unsigned i, thr, run_threads, nr_threads; | 1051 | unsigned i, thr, run_threads, nr_threads; |
1051 | unsigned ring = 0, pg = 0, ring_size = 0, | 1052 | unsigned ring = 0, pg = 0, ring_size = 0, |
1052 | have = 0, want, need, asked = 0; | 1053 | have = 0, want, need, asked = 0; |
1053 | unsigned long read_pages; | 1054 | unsigned long read_pages; |
1054 | unsigned char **page = NULL; | 1055 | unsigned char **page = NULL; |
1055 | struct dec_data *data = NULL; | 1056 | struct dec_data *data = NULL; |
1056 | struct crc_data *crc = NULL; | 1057 | struct crc_data *crc = NULL; |
1057 | 1058 | ||
1058 | /* | 1059 | /* |
1059 | * We'll limit the number of threads for decompression to limit memory | 1060 | * We'll limit the number of threads for decompression to limit memory |
1060 | * footprint. | 1061 | * footprint. |
1061 | */ | 1062 | */ |
1062 | nr_threads = num_online_cpus() - 1; | 1063 | nr_threads = num_online_cpus() - 1; |
1063 | nr_threads = clamp_val(nr_threads, 1, LZO_THREADS); | 1064 | nr_threads = clamp_val(nr_threads, 1, LZO_THREADS); |
1064 | 1065 | ||
1065 | page = vmalloc(sizeof(*page) * LZO_READ_PAGES); | 1066 | page = vmalloc(sizeof(*page) * LZO_READ_PAGES); |
1066 | if (!page) { | 1067 | if (!page) { |
1067 | printk(KERN_ERR "PM: Failed to allocate LZO page\n"); | 1068 | printk(KERN_ERR "PM: Failed to allocate LZO page\n"); |
1068 | ret = -ENOMEM; | 1069 | ret = -ENOMEM; |
1069 | goto out_clean; | 1070 | goto out_clean; |
1070 | } | 1071 | } |
1071 | 1072 | ||
1072 | data = vmalloc(sizeof(*data) * nr_threads); | 1073 | data = vmalloc(sizeof(*data) * nr_threads); |
1073 | if (!data) { | 1074 | if (!data) { |
1074 | printk(KERN_ERR "PM: Failed to allocate LZO data\n"); | 1075 | printk(KERN_ERR "PM: Failed to allocate LZO data\n"); |
1075 | ret = -ENOMEM; | 1076 | ret = -ENOMEM; |
1076 | goto out_clean; | 1077 | goto out_clean; |
1077 | } | 1078 | } |
1078 | for (thr = 0; thr < nr_threads; thr++) | 1079 | for (thr = 0; thr < nr_threads; thr++) |
1079 | memset(&data[thr], 0, offsetof(struct dec_data, go)); | 1080 | memset(&data[thr], 0, offsetof(struct dec_data, go)); |
1080 | 1081 | ||
1081 | crc = kmalloc(sizeof(*crc), GFP_KERNEL); | 1082 | crc = kmalloc(sizeof(*crc), GFP_KERNEL); |
1082 | if (!crc) { | 1083 | if (!crc) { |
1083 | printk(KERN_ERR "PM: Failed to allocate crc\n"); | 1084 | printk(KERN_ERR "PM: Failed to allocate crc\n"); |
1084 | ret = -ENOMEM; | 1085 | ret = -ENOMEM; |
1085 | goto out_clean; | 1086 | goto out_clean; |
1086 | } | 1087 | } |
1087 | memset(crc, 0, offsetof(struct crc_data, go)); | 1088 | memset(crc, 0, offsetof(struct crc_data, go)); |
1088 | 1089 | ||
1089 | /* | 1090 | /* |
1090 | * Start the decompression threads. | 1091 | * Start the decompression threads. |
1091 | */ | 1092 | */ |
1092 | for (thr = 0; thr < nr_threads; thr++) { | 1093 | for (thr = 0; thr < nr_threads; thr++) { |
1093 | init_waitqueue_head(&data[thr].go); | 1094 | init_waitqueue_head(&data[thr].go); |
1094 | init_waitqueue_head(&data[thr].done); | 1095 | init_waitqueue_head(&data[thr].done); |
1095 | 1096 | ||
1096 | data[thr].thr = kthread_run(lzo_decompress_threadfn, | 1097 | data[thr].thr = kthread_run(lzo_decompress_threadfn, |
1097 | &data[thr], | 1098 | &data[thr], |
1098 | "image_decompress/%u", thr); | 1099 | "image_decompress/%u", thr); |
1099 | if (IS_ERR(data[thr].thr)) { | 1100 | if (IS_ERR(data[thr].thr)) { |
1100 | data[thr].thr = NULL; | 1101 | data[thr].thr = NULL; |
1101 | printk(KERN_ERR | 1102 | printk(KERN_ERR |
1102 | "PM: Cannot start decompression threads\n"); | 1103 | "PM: Cannot start decompression threads\n"); |
1103 | ret = -ENOMEM; | 1104 | ret = -ENOMEM; |
1104 | goto out_clean; | 1105 | goto out_clean; |
1105 | } | 1106 | } |
1106 | } | 1107 | } |
1107 | 1108 | ||
1108 | /* | 1109 | /* |
1109 | * Start the CRC32 thread. | 1110 | * Start the CRC32 thread. |
1110 | */ | 1111 | */ |
1111 | init_waitqueue_head(&crc->go); | 1112 | init_waitqueue_head(&crc->go); |
1112 | init_waitqueue_head(&crc->done); | 1113 | init_waitqueue_head(&crc->done); |
1113 | 1114 | ||
1114 | handle->crc32 = 0; | 1115 | handle->crc32 = 0; |
1115 | crc->crc32 = &handle->crc32; | 1116 | crc->crc32 = &handle->crc32; |
1116 | for (thr = 0; thr < nr_threads; thr++) { | 1117 | for (thr = 0; thr < nr_threads; thr++) { |
1117 | crc->unc[thr] = data[thr].unc; | 1118 | crc->unc[thr] = data[thr].unc; |
1118 | crc->unc_len[thr] = &data[thr].unc_len; | 1119 | crc->unc_len[thr] = &data[thr].unc_len; |
1119 | } | 1120 | } |
1120 | 1121 | ||
1121 | crc->thr = kthread_run(crc32_threadfn, crc, "image_crc32"); | 1122 | crc->thr = kthread_run(crc32_threadfn, crc, "image_crc32"); |
1122 | if (IS_ERR(crc->thr)) { | 1123 | if (IS_ERR(crc->thr)) { |
1123 | crc->thr = NULL; | 1124 | crc->thr = NULL; |
1124 | printk(KERN_ERR "PM: Cannot start CRC32 thread\n"); | 1125 | printk(KERN_ERR "PM: Cannot start CRC32 thread\n"); |
1125 | ret = -ENOMEM; | 1126 | ret = -ENOMEM; |
1126 | goto out_clean; | 1127 | goto out_clean; |
1127 | } | 1128 | } |
1128 | 1129 | ||
1129 | /* | 1130 | /* |
1130 | * Adjust number of pages for read buffering, in case we are short. | 1131 | * Adjust number of pages for read buffering, in case we are short. |
1131 | */ | 1132 | */ |
1132 | read_pages = (nr_free_pages() - snapshot_get_image_size()) >> 1; | 1133 | read_pages = (nr_free_pages() - snapshot_get_image_size()) >> 1; |
1133 | read_pages = clamp_val(read_pages, LZO_CMP_PAGES, LZO_READ_PAGES); | 1134 | read_pages = clamp_val(read_pages, LZO_CMP_PAGES, LZO_READ_PAGES); |
1134 | 1135 | ||
1135 | for (i = 0; i < read_pages; i++) { | 1136 | for (i = 0; i < read_pages; i++) { |
1136 | page[i] = (void *)__get_free_page(i < LZO_CMP_PAGES ? | 1137 | page[i] = (void *)__get_free_page(i < LZO_CMP_PAGES ? |
1137 | __GFP_WAIT | __GFP_HIGH : | 1138 | __GFP_WAIT | __GFP_HIGH : |
1138 | __GFP_WAIT); | 1139 | __GFP_WAIT); |
1139 | if (!page[i]) { | 1140 | if (!page[i]) { |
1140 | if (i < LZO_CMP_PAGES) { | 1141 | if (i < LZO_CMP_PAGES) { |
1141 | ring_size = i; | 1142 | ring_size = i; |
1142 | printk(KERN_ERR | 1143 | printk(KERN_ERR |
1143 | "PM: Failed to allocate LZO pages\n"); | 1144 | "PM: Failed to allocate LZO pages\n"); |
1144 | ret = -ENOMEM; | 1145 | ret = -ENOMEM; |
1145 | goto out_clean; | 1146 | goto out_clean; |
1146 | } else { | 1147 | } else { |
1147 | break; | 1148 | break; |
1148 | } | 1149 | } |
1149 | } | 1150 | } |
1150 | } | 1151 | } |
1151 | want = ring_size = i; | 1152 | want = ring_size = i; |
1152 | 1153 | ||
1153 | printk(KERN_INFO | 1154 | printk(KERN_INFO |
1154 | "PM: Using %u thread(s) for decompression.\n" | 1155 | "PM: Using %u thread(s) for decompression.\n" |
1155 | "PM: Loading and decompressing image data (%u pages) ... ", | 1156 | "PM: Loading and decompressing image data (%u pages) ... ", |
1156 | nr_threads, nr_to_read); | 1157 | nr_threads, nr_to_read); |
1157 | m = nr_to_read / 100; | 1158 | m = nr_to_read / 100; |
1158 | if (!m) | 1159 | if (!m) |
1159 | m = 1; | 1160 | m = 1; |
1160 | nr_pages = 0; | 1161 | nr_pages = 0; |
1161 | bio = NULL; | 1162 | bio = NULL; |
1162 | do_gettimeofday(&start); | 1163 | do_gettimeofday(&start); |
1163 | 1164 | ||
1164 | ret = snapshot_write_next(snapshot); | 1165 | ret = snapshot_write_next(snapshot); |
1165 | if (ret <= 0) | 1166 | if (ret <= 0) |
1166 | goto out_finish; | 1167 | goto out_finish; |
1167 | 1168 | ||
1168 | for(;;) { | 1169 | for(;;) { |
1169 | for (i = 0; !eof && i < want; i++) { | 1170 | for (i = 0; !eof && i < want; i++) { |
1170 | ret = swap_read_page(handle, page[ring], &bio); | 1171 | ret = swap_read_page(handle, page[ring], &bio); |
1171 | if (ret) { | 1172 | if (ret) { |
1172 | /* | 1173 | /* |
1173 | * On real read error, finish. On end of data, | 1174 | * On real read error, finish. On end of data, |
1174 | * set EOF flag and just exit the read loop. | 1175 | * set EOF flag and just exit the read loop. |
1175 | */ | 1176 | */ |
1176 | if (handle->cur && | 1177 | if (handle->cur && |
1177 | handle->cur->entries[handle->k]) { | 1178 | handle->cur->entries[handle->k]) { |
1178 | goto out_finish; | 1179 | goto out_finish; |
1179 | } else { | 1180 | } else { |
1180 | eof = 1; | 1181 | eof = 1; |
1181 | break; | 1182 | break; |
1182 | } | 1183 | } |
1183 | } | 1184 | } |
1184 | if (++ring >= ring_size) | 1185 | if (++ring >= ring_size) |
1185 | ring = 0; | 1186 | ring = 0; |
1186 | } | 1187 | } |
1187 | asked += i; | 1188 | asked += i; |
1188 | want -= i; | 1189 | want -= i; |
1189 | 1190 | ||
1190 | /* | 1191 | /* |
1191 | * We are out of data, wait for some more. | 1192 | * We are out of data, wait for some more. |
1192 | */ | 1193 | */ |
1193 | if (!have) { | 1194 | if (!have) { |
1194 | if (!asked) | 1195 | if (!asked) |
1195 | break; | 1196 | break; |
1196 | 1197 | ||
1197 | ret = hib_wait_on_bio_chain(&bio); | 1198 | ret = hib_wait_on_bio_chain(&bio); |
1198 | if (ret) | 1199 | if (ret) |
1199 | goto out_finish; | 1200 | goto out_finish; |
1200 | have += asked; | 1201 | have += asked; |
1201 | asked = 0; | 1202 | asked = 0; |
1202 | if (eof) | 1203 | if (eof) |
1203 | eof = 2; | 1204 | eof = 2; |
1204 | } | 1205 | } |
1205 | 1206 | ||
1206 | if (crc->run_threads) { | 1207 | if (crc->run_threads) { |
1207 | wait_event(crc->done, atomic_read(&crc->stop)); | 1208 | wait_event(crc->done, atomic_read(&crc->stop)); |
1208 | atomic_set(&crc->stop, 0); | 1209 | atomic_set(&crc->stop, 0); |
1209 | crc->run_threads = 0; | 1210 | crc->run_threads = 0; |
1210 | } | 1211 | } |
1211 | 1212 | ||
1212 | for (thr = 0; have && thr < nr_threads; thr++) { | 1213 | for (thr = 0; have && thr < nr_threads; thr++) { |
1213 | data[thr].cmp_len = *(size_t *)page[pg]; | 1214 | data[thr].cmp_len = *(size_t *)page[pg]; |
1214 | if (unlikely(!data[thr].cmp_len || | 1215 | if (unlikely(!data[thr].cmp_len || |
1215 | data[thr].cmp_len > | 1216 | data[thr].cmp_len > |
1216 | lzo1x_worst_compress(LZO_UNC_SIZE))) { | 1217 | lzo1x_worst_compress(LZO_UNC_SIZE))) { |
1217 | printk(KERN_ERR | 1218 | printk(KERN_ERR |
1218 | "PM: Invalid LZO compressed length\n"); | 1219 | "PM: Invalid LZO compressed length\n"); |
1219 | ret = -1; | 1220 | ret = -1; |
1220 | goto out_finish; | 1221 | goto out_finish; |
1221 | } | 1222 | } |
1222 | 1223 | ||
1223 | need = DIV_ROUND_UP(data[thr].cmp_len + LZO_HEADER, | 1224 | need = DIV_ROUND_UP(data[thr].cmp_len + LZO_HEADER, |
1224 | PAGE_SIZE); | 1225 | PAGE_SIZE); |
1225 | if (need > have) { | 1226 | if (need > have) { |
1226 | if (eof > 1) { | 1227 | if (eof > 1) { |
1227 | ret = -1; | 1228 | ret = -1; |
1228 | goto out_finish; | 1229 | goto out_finish; |
1229 | } | 1230 | } |
1230 | break; | 1231 | break; |
1231 | } | 1232 | } |
1232 | 1233 | ||
1233 | for (off = 0; | 1234 | for (off = 0; |
1234 | off < LZO_HEADER + data[thr].cmp_len; | 1235 | off < LZO_HEADER + data[thr].cmp_len; |
1235 | off += PAGE_SIZE) { | 1236 | off += PAGE_SIZE) { |
1236 | memcpy(data[thr].cmp + off, | 1237 | memcpy(data[thr].cmp + off, |
1237 | page[pg], PAGE_SIZE); | 1238 | page[pg], PAGE_SIZE); |
1238 | have--; | 1239 | have--; |
1239 | want++; | 1240 | want++; |
1240 | if (++pg >= ring_size) | 1241 | if (++pg >= ring_size) |
1241 | pg = 0; | 1242 | pg = 0; |
1242 | } | 1243 | } |
1243 | 1244 | ||
1244 | atomic_set(&data[thr].ready, 1); | 1245 | atomic_set(&data[thr].ready, 1); |
1245 | wake_up(&data[thr].go); | 1246 | wake_up(&data[thr].go); |
1246 | } | 1247 | } |
1247 | 1248 | ||
1248 | /* | 1249 | /* |
1249 | * Wait for more data while we are decompressing. | 1250 | * Wait for more data while we are decompressing. |
1250 | */ | 1251 | */ |
1251 | if (have < LZO_CMP_PAGES && asked) { | 1252 | if (have < LZO_CMP_PAGES && asked) { |
1252 | ret = hib_wait_on_bio_chain(&bio); | 1253 | ret = hib_wait_on_bio_chain(&bio); |
1253 | if (ret) | 1254 | if (ret) |
1254 | goto out_finish; | 1255 | goto out_finish; |
1255 | have += asked; | 1256 | have += asked; |
1256 | asked = 0; | 1257 | asked = 0; |
1257 | if (eof) | 1258 | if (eof) |
1258 | eof = 2; | 1259 | eof = 2; |
1259 | } | 1260 | } |
1260 | 1261 | ||
1261 | for (run_threads = thr, thr = 0; thr < run_threads; thr++) { | 1262 | for (run_threads = thr, thr = 0; thr < run_threads; thr++) { |
1262 | wait_event(data[thr].done, | 1263 | wait_event(data[thr].done, |
1263 | atomic_read(&data[thr].stop)); | 1264 | atomic_read(&data[thr].stop)); |
1264 | atomic_set(&data[thr].stop, 0); | 1265 | atomic_set(&data[thr].stop, 0); |
1265 | 1266 | ||
1266 | ret = data[thr].ret; | 1267 | ret = data[thr].ret; |
1267 | 1268 | ||
1268 | if (ret < 0) { | 1269 | if (ret < 0) { |
1269 | printk(KERN_ERR | 1270 | printk(KERN_ERR |
1270 | "PM: LZO decompression failed\n"); | 1271 | "PM: LZO decompression failed\n"); |
1271 | goto out_finish; | 1272 | goto out_finish; |
1272 | } | 1273 | } |
1273 | 1274 | ||
1274 | if (unlikely(!data[thr].unc_len || | 1275 | if (unlikely(!data[thr].unc_len || |
1275 | data[thr].unc_len > LZO_UNC_SIZE || | 1276 | data[thr].unc_len > LZO_UNC_SIZE || |
1276 | data[thr].unc_len & (PAGE_SIZE - 1))) { | 1277 | data[thr].unc_len & (PAGE_SIZE - 1))) { |
1277 | printk(KERN_ERR | 1278 | printk(KERN_ERR |
1278 | "PM: Invalid LZO uncompressed length\n"); | 1279 | "PM: Invalid LZO uncompressed length\n"); |
1279 | ret = -1; | 1280 | ret = -1; |
1280 | goto out_finish; | 1281 | goto out_finish; |
1281 | } | 1282 | } |
1282 | 1283 | ||
1283 | for (off = 0; | 1284 | for (off = 0; |
1284 | off < data[thr].unc_len; off += PAGE_SIZE) { | 1285 | off < data[thr].unc_len; off += PAGE_SIZE) { |
1285 | memcpy(data_of(*snapshot), | 1286 | memcpy(data_of(*snapshot), |
1286 | data[thr].unc + off, PAGE_SIZE); | 1287 | data[thr].unc + off, PAGE_SIZE); |
1287 | 1288 | ||
1288 | if (!(nr_pages % m)) | 1289 | if (!(nr_pages % m)) |
1289 | printk("\b\b\b\b%3d%%", nr_pages / m); | 1290 | printk("\b\b\b\b%3d%%", nr_pages / m); |
1290 | nr_pages++; | 1291 | nr_pages++; |
1291 | 1292 | ||
1292 | ret = snapshot_write_next(snapshot); | 1293 | ret = snapshot_write_next(snapshot); |
1293 | if (ret <= 0) { | 1294 | if (ret <= 0) { |
1294 | crc->run_threads = thr + 1; | 1295 | crc->run_threads = thr + 1; |
1295 | atomic_set(&crc->ready, 1); | 1296 | atomic_set(&crc->ready, 1); |
1296 | wake_up(&crc->go); | 1297 | wake_up(&crc->go); |
1297 | goto out_finish; | 1298 | goto out_finish; |
1298 | } | 1299 | } |
1299 | } | 1300 | } |
1300 | } | 1301 | } |
1301 | 1302 | ||
1302 | crc->run_threads = thr; | 1303 | crc->run_threads = thr; |
1303 | atomic_set(&crc->ready, 1); | 1304 | atomic_set(&crc->ready, 1); |
1304 | wake_up(&crc->go); | 1305 | wake_up(&crc->go); |
1305 | } | 1306 | } |
1306 | 1307 | ||
1307 | out_finish: | 1308 | out_finish: |
1308 | if (crc->run_threads) { | 1309 | if (crc->run_threads) { |
1309 | wait_event(crc->done, atomic_read(&crc->stop)); | 1310 | wait_event(crc->done, atomic_read(&crc->stop)); |
1310 | atomic_set(&crc->stop, 0); | 1311 | atomic_set(&crc->stop, 0); |
1311 | } | 1312 | } |
1312 | do_gettimeofday(&stop); | 1313 | do_gettimeofday(&stop); |
1313 | if (!ret) { | 1314 | if (!ret) { |
1314 | printk("\b\b\b\bdone\n"); | 1315 | printk("\b\b\b\bdone\n"); |
1315 | snapshot_write_finalize(snapshot); | 1316 | snapshot_write_finalize(snapshot); |
1316 | if (!snapshot_image_loaded(snapshot)) | 1317 | if (!snapshot_image_loaded(snapshot)) |
1317 | ret = -ENODATA; | 1318 | ret = -ENODATA; |
1318 | if (!ret) { | 1319 | if (!ret) { |
1319 | if (swsusp_header->flags & SF_CRC32_MODE) { | 1320 | if (swsusp_header->flags & SF_CRC32_MODE) { |
1320 | if(handle->crc32 != swsusp_header->crc32) { | 1321 | if(handle->crc32 != swsusp_header->crc32) { |
1321 | printk(KERN_ERR | 1322 | printk(KERN_ERR |
1322 | "PM: Invalid image CRC32!\n"); | 1323 | "PM: Invalid image CRC32!\n"); |
1323 | ret = -ENODATA; | 1324 | ret = -ENODATA; |
1324 | } | 1325 | } |
1325 | } | 1326 | } |
1326 | } | 1327 | } |
1327 | } else | 1328 | } else |
1328 | printk("\n"); | 1329 | printk("\n"); |
1329 | swsusp_show_speed(&start, &stop, nr_to_read, "Read"); | 1330 | swsusp_show_speed(&start, &stop, nr_to_read, "Read"); |
1330 | out_clean: | 1331 | out_clean: |
1331 | for (i = 0; i < ring_size; i++) | 1332 | for (i = 0; i < ring_size; i++) |
1332 | free_page((unsigned long)page[i]); | 1333 | free_page((unsigned long)page[i]); |
1333 | if (crc) { | 1334 | if (crc) { |
1334 | if (crc->thr) | 1335 | if (crc->thr) |
1335 | kthread_stop(crc->thr); | 1336 | kthread_stop(crc->thr); |
1336 | kfree(crc); | 1337 | kfree(crc); |
1337 | } | 1338 | } |
1338 | if (data) { | 1339 | if (data) { |
1339 | for (thr = 0; thr < nr_threads; thr++) | 1340 | for (thr = 0; thr < nr_threads; thr++) |
1340 | if (data[thr].thr) | 1341 | if (data[thr].thr) |
1341 | kthread_stop(data[thr].thr); | 1342 | kthread_stop(data[thr].thr); |
1342 | vfree(data); | 1343 | vfree(data); |
1343 | } | 1344 | } |
1344 | if (page) vfree(page); | 1345 | if (page) vfree(page); |
1345 | 1346 | ||
1346 | return ret; | 1347 | return ret; |
1347 | } | 1348 | } |
1348 | 1349 | ||
1349 | /** | 1350 | /** |
1350 | * swsusp_read - read the hibernation image. | 1351 | * swsusp_read - read the hibernation image. |
1351 | * @flags_p: flags passed by the "frozen" kernel in the image header should | 1352 | * @flags_p: flags passed by the "frozen" kernel in the image header should |
1352 | * be written into this memory location | 1353 | * be written into this memory location |
1353 | */ | 1354 | */ |
1354 | 1355 | ||
1355 | int swsusp_read(unsigned int *flags_p) | 1356 | int swsusp_read(unsigned int *flags_p) |
1356 | { | 1357 | { |
1357 | int error; | 1358 | int error; |
1358 | struct swap_map_handle handle; | 1359 | struct swap_map_handle handle; |
1359 | struct snapshot_handle snapshot; | 1360 | struct snapshot_handle snapshot; |
1360 | struct swsusp_info *header; | 1361 | struct swsusp_info *header; |
1361 | 1362 | ||
1362 | memset(&snapshot, 0, sizeof(struct snapshot_handle)); | 1363 | memset(&snapshot, 0, sizeof(struct snapshot_handle)); |
1363 | error = snapshot_write_next(&snapshot); | 1364 | error = snapshot_write_next(&snapshot); |
1364 | if (error < PAGE_SIZE) | 1365 | if (error < PAGE_SIZE) |
1365 | return error < 0 ? error : -EFAULT; | 1366 | return error < 0 ? error : -EFAULT; |
1366 | header = (struct swsusp_info *)data_of(snapshot); | 1367 | header = (struct swsusp_info *)data_of(snapshot); |
1367 | error = get_swap_reader(&handle, flags_p); | 1368 | error = get_swap_reader(&handle, flags_p); |
1368 | if (error) | 1369 | if (error) |
1369 | goto end; | 1370 | goto end; |
1370 | if (!error) | 1371 | if (!error) |
1371 | error = swap_read_page(&handle, header, NULL); | 1372 | error = swap_read_page(&handle, header, NULL); |
1372 | if (!error) { | 1373 | if (!error) { |
1373 | error = (*flags_p & SF_NOCOMPRESS_MODE) ? | 1374 | error = (*flags_p & SF_NOCOMPRESS_MODE) ? |
1374 | load_image(&handle, &snapshot, header->pages - 1) : | 1375 | load_image(&handle, &snapshot, header->pages - 1) : |
1375 | load_image_lzo(&handle, &snapshot, header->pages - 1); | 1376 | load_image_lzo(&handle, &snapshot, header->pages - 1); |
1376 | } | 1377 | } |
1377 | swap_reader_finish(&handle); | 1378 | swap_reader_finish(&handle); |
1378 | end: | 1379 | end: |
1379 | if (!error) | 1380 | if (!error) |
1380 | pr_debug("PM: Image successfully loaded\n"); | 1381 | pr_debug("PM: Image successfully loaded\n"); |
1381 | else | 1382 | else |
1382 | pr_debug("PM: Error %d resuming\n", error); | 1383 | pr_debug("PM: Error %d resuming\n", error); |
1383 | return error; | 1384 | return error; |
1384 | } | 1385 | } |
1385 | 1386 | ||
1386 | /** | 1387 | /** |
1387 | * swsusp_check - Check for swsusp signature in the resume device | 1388 | * swsusp_check - Check for swsusp signature in the resume device |
1388 | */ | 1389 | */ |
1389 | 1390 | ||
1390 | int swsusp_check(void) | 1391 | int swsusp_check(void) |
1391 | { | 1392 | { |
1392 | int error; | 1393 | int error; |
1393 | 1394 | ||
1394 | hib_resume_bdev = blkdev_get_by_dev(swsusp_resume_device, | 1395 | hib_resume_bdev = blkdev_get_by_dev(swsusp_resume_device, |
1395 | FMODE_READ, NULL); | 1396 | FMODE_READ, NULL); |
1396 | if (!IS_ERR(hib_resume_bdev)) { | 1397 | if (!IS_ERR(hib_resume_bdev)) { |
1397 | set_blocksize(hib_resume_bdev, PAGE_SIZE); | 1398 | set_blocksize(hib_resume_bdev, PAGE_SIZE); |
1398 | clear_page(swsusp_header); | 1399 | clear_page(swsusp_header); |
1399 | error = hib_bio_read_page(swsusp_resume_block, | 1400 | error = hib_bio_read_page(swsusp_resume_block, |
1400 | swsusp_header, NULL); | 1401 | swsusp_header, NULL); |
1401 | if (error) | 1402 | if (error) |
1402 | goto put; | 1403 | goto put; |
1403 | 1404 | ||
1404 | if (!memcmp(HIBERNATE_SIG, swsusp_header->sig, 10)) { | 1405 | if (!memcmp(HIBERNATE_SIG, swsusp_header->sig, 10)) { |
1405 | memcpy(swsusp_header->sig, swsusp_header->orig_sig, 10); | 1406 | memcpy(swsusp_header->sig, swsusp_header->orig_sig, 10); |
1406 | /* Reset swap signature now */ | 1407 | /* Reset swap signature now */ |
1407 | error = hib_bio_write_page(swsusp_resume_block, | 1408 | error = hib_bio_write_page(swsusp_resume_block, |
1408 | swsusp_header, NULL); | 1409 | swsusp_header, NULL); |
1409 | } else { | 1410 | } else { |
1410 | error = -EINVAL; | 1411 | error = -EINVAL; |
1411 | } | 1412 | } |
1412 | 1413 | ||
1413 | put: | 1414 | put: |
1414 | if (error) | 1415 | if (error) |
1415 | blkdev_put(hib_resume_bdev, FMODE_READ); | 1416 | blkdev_put(hib_resume_bdev, FMODE_READ); |
1416 | else | 1417 | else |
1417 | pr_debug("PM: Image signature found, resuming\n"); | 1418 | pr_debug("PM: Image signature found, resuming\n"); |
1418 | } else { | 1419 | } else { |
1419 | error = PTR_ERR(hib_resume_bdev); | 1420 | error = PTR_ERR(hib_resume_bdev); |
1420 | } | 1421 | } |
1421 | 1422 | ||
1422 | if (error) | 1423 | if (error) |
1423 | pr_debug("PM: Image not found (code %d)\n", error); | 1424 | pr_debug("PM: Image not found (code %d)\n", error); |
1424 | 1425 | ||
1425 | return error; | 1426 | return error; |
1426 | } | 1427 | } |
1427 | 1428 | ||
1428 | /** | 1429 | /** |
1429 | * swsusp_close - close swap device. | 1430 | * swsusp_close - close swap device. |
1430 | */ | 1431 | */ |
1431 | 1432 | ||
1432 | void swsusp_close(fmode_t mode) | 1433 | void swsusp_close(fmode_t mode) |
1433 | { | 1434 | { |
1434 | if (IS_ERR(hib_resume_bdev)) { | 1435 | if (IS_ERR(hib_resume_bdev)) { |
1435 | pr_debug("PM: Image device not initialised\n"); | 1436 | pr_debug("PM: Image device not initialised\n"); |
1436 | return; | 1437 | return; |
1437 | } | 1438 | } |
1438 | 1439 | ||
1439 | blkdev_put(hib_resume_bdev, mode); | 1440 | blkdev_put(hib_resume_bdev, mode); |
1440 | } | 1441 | } |
1441 | 1442 | ||
1442 | static int swsusp_header_init(void) | 1443 | static int swsusp_header_init(void) |
1443 | { | 1444 | { |
1444 | swsusp_header = (struct swsusp_header*) __get_free_page(GFP_KERNEL); | 1445 | swsusp_header = (struct swsusp_header*) __get_free_page(GFP_KERNEL); |
1445 | if (!swsusp_header) | 1446 | if (!swsusp_header) |
1446 | panic("Could not allocate memory for swsusp_header\n"); | 1447 | panic("Could not allocate memory for swsusp_header\n"); |
1447 | return 0; | 1448 | return 0; |
1448 | } | 1449 | } |
1449 | 1450 | ||
1450 | core_initcall(swsusp_header_init); | 1451 | core_initcall(swsusp_header_init); |