Commit 6715045ddc7472a22be5e49d4047d2d89b391f45

Authored by Rafael J. Wysocki
1 parent 0109c2c48d

PM / Hibernate: Avoid hitting OOM during preallocation of memory

There is a problem in hibernate_preallocate_memory() that it calls
preallocate_image_memory() with an argument that may be greater than
the total number of available non-highmem memory pages.  If that's
the case, the OOM condition is guaranteed to trigger, which in turn
can cause significant slowdown to occur during hibernation.

To avoid that, make preallocate_image_memory() adjust its argument
before calling preallocate_image_pages(), so that the total number of
saveable non-highem pages left is not less than the minimum size of
a hibernation image.  Change hibernate_preallocate_memory() to try to
allocate from highmem if the number of pages allocated by
preallocate_image_memory() is too low.

Modify free_unnecessary_pages() to take all possible memory
allocation patterns into account.

Reported-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
Tested-by: M. Vefa Bicakci <bicave@superonline.com>

Showing 1 changed file with 65 additions and 20 deletions Inline Diff

kernel/power/snapshot.c
1 /* 1 /*
2 * linux/kernel/power/snapshot.c 2 * linux/kernel/power/snapshot.c
3 * 3 *
4 * This file provides system snapshot/restore functionality for swsusp. 4 * This file provides system snapshot/restore functionality for swsusp.
5 * 5 *
6 * Copyright (C) 1998-2005 Pavel Machek <pavel@ucw.cz> 6 * Copyright (C) 1998-2005 Pavel Machek <pavel@ucw.cz>
7 * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl> 7 * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
8 * 8 *
9 * This file is released under the GPLv2. 9 * This file is released under the GPLv2.
10 * 10 *
11 */ 11 */
12 12
13 #include <linux/version.h> 13 #include <linux/version.h>
14 #include <linux/module.h> 14 #include <linux/module.h>
15 #include <linux/mm.h> 15 #include <linux/mm.h>
16 #include <linux/suspend.h> 16 #include <linux/suspend.h>
17 #include <linux/delay.h> 17 #include <linux/delay.h>
18 #include <linux/bitops.h> 18 #include <linux/bitops.h>
19 #include <linux/spinlock.h> 19 #include <linux/spinlock.h>
20 #include <linux/kernel.h> 20 #include <linux/kernel.h>
21 #include <linux/pm.h> 21 #include <linux/pm.h>
22 #include <linux/device.h> 22 #include <linux/device.h>
23 #include <linux/init.h> 23 #include <linux/init.h>
24 #include <linux/bootmem.h> 24 #include <linux/bootmem.h>
25 #include <linux/syscalls.h> 25 #include <linux/syscalls.h>
26 #include <linux/console.h> 26 #include <linux/console.h>
27 #include <linux/highmem.h> 27 #include <linux/highmem.h>
28 #include <linux/list.h> 28 #include <linux/list.h>
29 #include <linux/slab.h> 29 #include <linux/slab.h>
30 30
31 #include <asm/uaccess.h> 31 #include <asm/uaccess.h>
32 #include <asm/mmu_context.h> 32 #include <asm/mmu_context.h>
33 #include <asm/pgtable.h> 33 #include <asm/pgtable.h>
34 #include <asm/tlbflush.h> 34 #include <asm/tlbflush.h>
35 #include <asm/io.h> 35 #include <asm/io.h>
36 36
37 #include "power.h" 37 #include "power.h"
38 38
39 static int swsusp_page_is_free(struct page *); 39 static int swsusp_page_is_free(struct page *);
40 static void swsusp_set_page_forbidden(struct page *); 40 static void swsusp_set_page_forbidden(struct page *);
41 static void swsusp_unset_page_forbidden(struct page *); 41 static void swsusp_unset_page_forbidden(struct page *);
42 42
43 /* 43 /*
44 * Preferred image size in bytes (tunable via /sys/power/image_size). 44 * Preferred image size in bytes (tunable via /sys/power/image_size).
45 * When it is set to N, swsusp will do its best to ensure the image 45 * When it is set to N, swsusp will do its best to ensure the image
46 * size will not exceed N bytes, but if that is impossible, it will 46 * size will not exceed N bytes, but if that is impossible, it will
47 * try to create the smallest image possible. 47 * try to create the smallest image possible.
48 */ 48 */
49 unsigned long image_size = 500 * 1024 * 1024; 49 unsigned long image_size = 500 * 1024 * 1024;
50 50
51 /* List of PBEs needed for restoring the pages that were allocated before 51 /* List of PBEs needed for restoring the pages that were allocated before
52 * the suspend and included in the suspend image, but have also been 52 * the suspend and included in the suspend image, but have also been
53 * allocated by the "resume" kernel, so their contents cannot be written 53 * allocated by the "resume" kernel, so their contents cannot be written
54 * directly to their "original" page frames. 54 * directly to their "original" page frames.
55 */ 55 */
56 struct pbe *restore_pblist; 56 struct pbe *restore_pblist;
57 57
58 /* Pointer to an auxiliary buffer (1 page) */ 58 /* Pointer to an auxiliary buffer (1 page) */
59 static void *buffer; 59 static void *buffer;
60 60
61 /** 61 /**
62 * @safe_needed - on resume, for storing the PBE list and the image, 62 * @safe_needed - on resume, for storing the PBE list and the image,
63 * we can only use memory pages that do not conflict with the pages 63 * we can only use memory pages that do not conflict with the pages
64 * used before suspend. The unsafe pages have PageNosaveFree set 64 * used before suspend. The unsafe pages have PageNosaveFree set
65 * and we count them using unsafe_pages. 65 * and we count them using unsafe_pages.
66 * 66 *
67 * Each allocated image page is marked as PageNosave and PageNosaveFree 67 * Each allocated image page is marked as PageNosave and PageNosaveFree
68 * so that swsusp_free() can release it. 68 * so that swsusp_free() can release it.
69 */ 69 */
70 70
71 #define PG_ANY 0 71 #define PG_ANY 0
72 #define PG_SAFE 1 72 #define PG_SAFE 1
73 #define PG_UNSAFE_CLEAR 1 73 #define PG_UNSAFE_CLEAR 1
74 #define PG_UNSAFE_KEEP 0 74 #define PG_UNSAFE_KEEP 0
75 75
76 static unsigned int allocated_unsafe_pages; 76 static unsigned int allocated_unsafe_pages;
77 77
78 static void *get_image_page(gfp_t gfp_mask, int safe_needed) 78 static void *get_image_page(gfp_t gfp_mask, int safe_needed)
79 { 79 {
80 void *res; 80 void *res;
81 81
82 res = (void *)get_zeroed_page(gfp_mask); 82 res = (void *)get_zeroed_page(gfp_mask);
83 if (safe_needed) 83 if (safe_needed)
84 while (res && swsusp_page_is_free(virt_to_page(res))) { 84 while (res && swsusp_page_is_free(virt_to_page(res))) {
85 /* The page is unsafe, mark it for swsusp_free() */ 85 /* The page is unsafe, mark it for swsusp_free() */
86 swsusp_set_page_forbidden(virt_to_page(res)); 86 swsusp_set_page_forbidden(virt_to_page(res));
87 allocated_unsafe_pages++; 87 allocated_unsafe_pages++;
88 res = (void *)get_zeroed_page(gfp_mask); 88 res = (void *)get_zeroed_page(gfp_mask);
89 } 89 }
90 if (res) { 90 if (res) {
91 swsusp_set_page_forbidden(virt_to_page(res)); 91 swsusp_set_page_forbidden(virt_to_page(res));
92 swsusp_set_page_free(virt_to_page(res)); 92 swsusp_set_page_free(virt_to_page(res));
93 } 93 }
94 return res; 94 return res;
95 } 95 }
96 96
97 unsigned long get_safe_page(gfp_t gfp_mask) 97 unsigned long get_safe_page(gfp_t gfp_mask)
98 { 98 {
99 return (unsigned long)get_image_page(gfp_mask, PG_SAFE); 99 return (unsigned long)get_image_page(gfp_mask, PG_SAFE);
100 } 100 }
101 101
102 static struct page *alloc_image_page(gfp_t gfp_mask) 102 static struct page *alloc_image_page(gfp_t gfp_mask)
103 { 103 {
104 struct page *page; 104 struct page *page;
105 105
106 page = alloc_page(gfp_mask); 106 page = alloc_page(gfp_mask);
107 if (page) { 107 if (page) {
108 swsusp_set_page_forbidden(page); 108 swsusp_set_page_forbidden(page);
109 swsusp_set_page_free(page); 109 swsusp_set_page_free(page);
110 } 110 }
111 return page; 111 return page;
112 } 112 }
113 113
114 /** 114 /**
115 * free_image_page - free page represented by @addr, allocated with 115 * free_image_page - free page represented by @addr, allocated with
116 * get_image_page (page flags set by it must be cleared) 116 * get_image_page (page flags set by it must be cleared)
117 */ 117 */
118 118
119 static inline void free_image_page(void *addr, int clear_nosave_free) 119 static inline void free_image_page(void *addr, int clear_nosave_free)
120 { 120 {
121 struct page *page; 121 struct page *page;
122 122
123 BUG_ON(!virt_addr_valid(addr)); 123 BUG_ON(!virt_addr_valid(addr));
124 124
125 page = virt_to_page(addr); 125 page = virt_to_page(addr);
126 126
127 swsusp_unset_page_forbidden(page); 127 swsusp_unset_page_forbidden(page);
128 if (clear_nosave_free) 128 if (clear_nosave_free)
129 swsusp_unset_page_free(page); 129 swsusp_unset_page_free(page);
130 130
131 __free_page(page); 131 __free_page(page);
132 } 132 }
133 133
134 /* struct linked_page is used to build chains of pages */ 134 /* struct linked_page is used to build chains of pages */
135 135
136 #define LINKED_PAGE_DATA_SIZE (PAGE_SIZE - sizeof(void *)) 136 #define LINKED_PAGE_DATA_SIZE (PAGE_SIZE - sizeof(void *))
137 137
138 struct linked_page { 138 struct linked_page {
139 struct linked_page *next; 139 struct linked_page *next;
140 char data[LINKED_PAGE_DATA_SIZE]; 140 char data[LINKED_PAGE_DATA_SIZE];
141 } __attribute__((packed)); 141 } __attribute__((packed));
142 142
143 static inline void 143 static inline void
144 free_list_of_pages(struct linked_page *list, int clear_page_nosave) 144 free_list_of_pages(struct linked_page *list, int clear_page_nosave)
145 { 145 {
146 while (list) { 146 while (list) {
147 struct linked_page *lp = list->next; 147 struct linked_page *lp = list->next;
148 148
149 free_image_page(list, clear_page_nosave); 149 free_image_page(list, clear_page_nosave);
150 list = lp; 150 list = lp;
151 } 151 }
152 } 152 }
153 153
154 /** 154 /**
155 * struct chain_allocator is used for allocating small objects out of 155 * struct chain_allocator is used for allocating small objects out of
156 * a linked list of pages called 'the chain'. 156 * a linked list of pages called 'the chain'.
157 * 157 *
158 * The chain grows each time when there is no room for a new object in 158 * The chain grows each time when there is no room for a new object in
159 * the current page. The allocated objects cannot be freed individually. 159 * the current page. The allocated objects cannot be freed individually.
160 * It is only possible to free them all at once, by freeing the entire 160 * It is only possible to free them all at once, by freeing the entire
161 * chain. 161 * chain.
162 * 162 *
163 * NOTE: The chain allocator may be inefficient if the allocated objects 163 * NOTE: The chain allocator may be inefficient if the allocated objects
164 * are not much smaller than PAGE_SIZE. 164 * are not much smaller than PAGE_SIZE.
165 */ 165 */
166 166
167 struct chain_allocator { 167 struct chain_allocator {
168 struct linked_page *chain; /* the chain */ 168 struct linked_page *chain; /* the chain */
169 unsigned int used_space; /* total size of objects allocated out 169 unsigned int used_space; /* total size of objects allocated out
170 * of the current page 170 * of the current page
171 */ 171 */
172 gfp_t gfp_mask; /* mask for allocating pages */ 172 gfp_t gfp_mask; /* mask for allocating pages */
173 int safe_needed; /* if set, only "safe" pages are allocated */ 173 int safe_needed; /* if set, only "safe" pages are allocated */
174 }; 174 };
175 175
176 static void 176 static void
177 chain_init(struct chain_allocator *ca, gfp_t gfp_mask, int safe_needed) 177 chain_init(struct chain_allocator *ca, gfp_t gfp_mask, int safe_needed)
178 { 178 {
179 ca->chain = NULL; 179 ca->chain = NULL;
180 ca->used_space = LINKED_PAGE_DATA_SIZE; 180 ca->used_space = LINKED_PAGE_DATA_SIZE;
181 ca->gfp_mask = gfp_mask; 181 ca->gfp_mask = gfp_mask;
182 ca->safe_needed = safe_needed; 182 ca->safe_needed = safe_needed;
183 } 183 }
184 184
185 static void *chain_alloc(struct chain_allocator *ca, unsigned int size) 185 static void *chain_alloc(struct chain_allocator *ca, unsigned int size)
186 { 186 {
187 void *ret; 187 void *ret;
188 188
189 if (LINKED_PAGE_DATA_SIZE - ca->used_space < size) { 189 if (LINKED_PAGE_DATA_SIZE - ca->used_space < size) {
190 struct linked_page *lp; 190 struct linked_page *lp;
191 191
192 lp = get_image_page(ca->gfp_mask, ca->safe_needed); 192 lp = get_image_page(ca->gfp_mask, ca->safe_needed);
193 if (!lp) 193 if (!lp)
194 return NULL; 194 return NULL;
195 195
196 lp->next = ca->chain; 196 lp->next = ca->chain;
197 ca->chain = lp; 197 ca->chain = lp;
198 ca->used_space = 0; 198 ca->used_space = 0;
199 } 199 }
200 ret = ca->chain->data + ca->used_space; 200 ret = ca->chain->data + ca->used_space;
201 ca->used_space += size; 201 ca->used_space += size;
202 return ret; 202 return ret;
203 } 203 }
204 204
205 /** 205 /**
206 * Data types related to memory bitmaps. 206 * Data types related to memory bitmaps.
207 * 207 *
208 * Memory bitmap is a structure consiting of many linked lists of 208 * Memory bitmap is a structure consiting of many linked lists of
209 * objects. The main list's elements are of type struct zone_bitmap 209 * objects. The main list's elements are of type struct zone_bitmap
210 * and each of them corresonds to one zone. For each zone bitmap 210 * and each of them corresonds to one zone. For each zone bitmap
211 * object there is a list of objects of type struct bm_block that 211 * object there is a list of objects of type struct bm_block that
212 * represent each blocks of bitmap in which information is stored. 212 * represent each blocks of bitmap in which information is stored.
213 * 213 *
214 * struct memory_bitmap contains a pointer to the main list of zone 214 * struct memory_bitmap contains a pointer to the main list of zone
215 * bitmap objects, a struct bm_position used for browsing the bitmap, 215 * bitmap objects, a struct bm_position used for browsing the bitmap,
216 * and a pointer to the list of pages used for allocating all of the 216 * and a pointer to the list of pages used for allocating all of the
217 * zone bitmap objects and bitmap block objects. 217 * zone bitmap objects and bitmap block objects.
218 * 218 *
219 * NOTE: It has to be possible to lay out the bitmap in memory 219 * NOTE: It has to be possible to lay out the bitmap in memory
220 * using only allocations of order 0. Additionally, the bitmap is 220 * using only allocations of order 0. Additionally, the bitmap is
221 * designed to work with arbitrary number of zones (this is over the 221 * designed to work with arbitrary number of zones (this is over the
222 * top for now, but let's avoid making unnecessary assumptions ;-). 222 * top for now, but let's avoid making unnecessary assumptions ;-).
223 * 223 *
224 * struct zone_bitmap contains a pointer to a list of bitmap block 224 * struct zone_bitmap contains a pointer to a list of bitmap block
225 * objects and a pointer to the bitmap block object that has been 225 * objects and a pointer to the bitmap block object that has been
226 * most recently used for setting bits. Additionally, it contains the 226 * most recently used for setting bits. Additionally, it contains the
227 * pfns that correspond to the start and end of the represented zone. 227 * pfns that correspond to the start and end of the represented zone.
228 * 228 *
229 * struct bm_block contains a pointer to the memory page in which 229 * struct bm_block contains a pointer to the memory page in which
230 * information is stored (in the form of a block of bitmap) 230 * information is stored (in the form of a block of bitmap)
231 * It also contains the pfns that correspond to the start and end of 231 * It also contains the pfns that correspond to the start and end of
232 * the represented memory area. 232 * the represented memory area.
233 */ 233 */
234 234
235 #define BM_END_OF_MAP (~0UL) 235 #define BM_END_OF_MAP (~0UL)
236 236
237 #define BM_BITS_PER_BLOCK (PAGE_SIZE * BITS_PER_BYTE) 237 #define BM_BITS_PER_BLOCK (PAGE_SIZE * BITS_PER_BYTE)
238 238
239 struct bm_block { 239 struct bm_block {
240 struct list_head hook; /* hook into a list of bitmap blocks */ 240 struct list_head hook; /* hook into a list of bitmap blocks */
241 unsigned long start_pfn; /* pfn represented by the first bit */ 241 unsigned long start_pfn; /* pfn represented by the first bit */
242 unsigned long end_pfn; /* pfn represented by the last bit plus 1 */ 242 unsigned long end_pfn; /* pfn represented by the last bit plus 1 */
243 unsigned long *data; /* bitmap representing pages */ 243 unsigned long *data; /* bitmap representing pages */
244 }; 244 };
245 245
246 static inline unsigned long bm_block_bits(struct bm_block *bb) 246 static inline unsigned long bm_block_bits(struct bm_block *bb)
247 { 247 {
248 return bb->end_pfn - bb->start_pfn; 248 return bb->end_pfn - bb->start_pfn;
249 } 249 }
250 250
251 /* strcut bm_position is used for browsing memory bitmaps */ 251 /* strcut bm_position is used for browsing memory bitmaps */
252 252
253 struct bm_position { 253 struct bm_position {
254 struct bm_block *block; 254 struct bm_block *block;
255 int bit; 255 int bit;
256 }; 256 };
257 257
258 struct memory_bitmap { 258 struct memory_bitmap {
259 struct list_head blocks; /* list of bitmap blocks */ 259 struct list_head blocks; /* list of bitmap blocks */
260 struct linked_page *p_list; /* list of pages used to store zone 260 struct linked_page *p_list; /* list of pages used to store zone
261 * bitmap objects and bitmap block 261 * bitmap objects and bitmap block
262 * objects 262 * objects
263 */ 263 */
264 struct bm_position cur; /* most recently used bit position */ 264 struct bm_position cur; /* most recently used bit position */
265 }; 265 };
266 266
267 /* Functions that operate on memory bitmaps */ 267 /* Functions that operate on memory bitmaps */
268 268
269 static void memory_bm_position_reset(struct memory_bitmap *bm) 269 static void memory_bm_position_reset(struct memory_bitmap *bm)
270 { 270 {
271 bm->cur.block = list_entry(bm->blocks.next, struct bm_block, hook); 271 bm->cur.block = list_entry(bm->blocks.next, struct bm_block, hook);
272 bm->cur.bit = 0; 272 bm->cur.bit = 0;
273 } 273 }
274 274
275 static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free); 275 static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free);
276 276
277 /** 277 /**
278 * create_bm_block_list - create a list of block bitmap objects 278 * create_bm_block_list - create a list of block bitmap objects
279 * @pages - number of pages to track 279 * @pages - number of pages to track
280 * @list - list to put the allocated blocks into 280 * @list - list to put the allocated blocks into
281 * @ca - chain allocator to be used for allocating memory 281 * @ca - chain allocator to be used for allocating memory
282 */ 282 */
283 static int create_bm_block_list(unsigned long pages, 283 static int create_bm_block_list(unsigned long pages,
284 struct list_head *list, 284 struct list_head *list,
285 struct chain_allocator *ca) 285 struct chain_allocator *ca)
286 { 286 {
287 unsigned int nr_blocks = DIV_ROUND_UP(pages, BM_BITS_PER_BLOCK); 287 unsigned int nr_blocks = DIV_ROUND_UP(pages, BM_BITS_PER_BLOCK);
288 288
289 while (nr_blocks-- > 0) { 289 while (nr_blocks-- > 0) {
290 struct bm_block *bb; 290 struct bm_block *bb;
291 291
292 bb = chain_alloc(ca, sizeof(struct bm_block)); 292 bb = chain_alloc(ca, sizeof(struct bm_block));
293 if (!bb) 293 if (!bb)
294 return -ENOMEM; 294 return -ENOMEM;
295 list_add(&bb->hook, list); 295 list_add(&bb->hook, list);
296 } 296 }
297 297
298 return 0; 298 return 0;
299 } 299 }
300 300
301 struct mem_extent { 301 struct mem_extent {
302 struct list_head hook; 302 struct list_head hook;
303 unsigned long start; 303 unsigned long start;
304 unsigned long end; 304 unsigned long end;
305 }; 305 };
306 306
307 /** 307 /**
308 * free_mem_extents - free a list of memory extents 308 * free_mem_extents - free a list of memory extents
309 * @list - list of extents to empty 309 * @list - list of extents to empty
310 */ 310 */
311 static void free_mem_extents(struct list_head *list) 311 static void free_mem_extents(struct list_head *list)
312 { 312 {
313 struct mem_extent *ext, *aux; 313 struct mem_extent *ext, *aux;
314 314
315 list_for_each_entry_safe(ext, aux, list, hook) { 315 list_for_each_entry_safe(ext, aux, list, hook) {
316 list_del(&ext->hook); 316 list_del(&ext->hook);
317 kfree(ext); 317 kfree(ext);
318 } 318 }
319 } 319 }
320 320
321 /** 321 /**
322 * create_mem_extents - create a list of memory extents representing 322 * create_mem_extents - create a list of memory extents representing
323 * contiguous ranges of PFNs 323 * contiguous ranges of PFNs
324 * @list - list to put the extents into 324 * @list - list to put the extents into
325 * @gfp_mask - mask to use for memory allocations 325 * @gfp_mask - mask to use for memory allocations
326 */ 326 */
327 static int create_mem_extents(struct list_head *list, gfp_t gfp_mask) 327 static int create_mem_extents(struct list_head *list, gfp_t gfp_mask)
328 { 328 {
329 struct zone *zone; 329 struct zone *zone;
330 330
331 INIT_LIST_HEAD(list); 331 INIT_LIST_HEAD(list);
332 332
333 for_each_populated_zone(zone) { 333 for_each_populated_zone(zone) {
334 unsigned long zone_start, zone_end; 334 unsigned long zone_start, zone_end;
335 struct mem_extent *ext, *cur, *aux; 335 struct mem_extent *ext, *cur, *aux;
336 336
337 zone_start = zone->zone_start_pfn; 337 zone_start = zone->zone_start_pfn;
338 zone_end = zone->zone_start_pfn + zone->spanned_pages; 338 zone_end = zone->zone_start_pfn + zone->spanned_pages;
339 339
340 list_for_each_entry(ext, list, hook) 340 list_for_each_entry(ext, list, hook)
341 if (zone_start <= ext->end) 341 if (zone_start <= ext->end)
342 break; 342 break;
343 343
344 if (&ext->hook == list || zone_end < ext->start) { 344 if (&ext->hook == list || zone_end < ext->start) {
345 /* New extent is necessary */ 345 /* New extent is necessary */
346 struct mem_extent *new_ext; 346 struct mem_extent *new_ext;
347 347
348 new_ext = kzalloc(sizeof(struct mem_extent), gfp_mask); 348 new_ext = kzalloc(sizeof(struct mem_extent), gfp_mask);
349 if (!new_ext) { 349 if (!new_ext) {
350 free_mem_extents(list); 350 free_mem_extents(list);
351 return -ENOMEM; 351 return -ENOMEM;
352 } 352 }
353 new_ext->start = zone_start; 353 new_ext->start = zone_start;
354 new_ext->end = zone_end; 354 new_ext->end = zone_end;
355 list_add_tail(&new_ext->hook, &ext->hook); 355 list_add_tail(&new_ext->hook, &ext->hook);
356 continue; 356 continue;
357 } 357 }
358 358
359 /* Merge this zone's range of PFNs with the existing one */ 359 /* Merge this zone's range of PFNs with the existing one */
360 if (zone_start < ext->start) 360 if (zone_start < ext->start)
361 ext->start = zone_start; 361 ext->start = zone_start;
362 if (zone_end > ext->end) 362 if (zone_end > ext->end)
363 ext->end = zone_end; 363 ext->end = zone_end;
364 364
365 /* More merging may be possible */ 365 /* More merging may be possible */
366 cur = ext; 366 cur = ext;
367 list_for_each_entry_safe_continue(cur, aux, list, hook) { 367 list_for_each_entry_safe_continue(cur, aux, list, hook) {
368 if (zone_end < cur->start) 368 if (zone_end < cur->start)
369 break; 369 break;
370 if (zone_end < cur->end) 370 if (zone_end < cur->end)
371 ext->end = cur->end; 371 ext->end = cur->end;
372 list_del(&cur->hook); 372 list_del(&cur->hook);
373 kfree(cur); 373 kfree(cur);
374 } 374 }
375 } 375 }
376 376
377 return 0; 377 return 0;
378 } 378 }
379 379
380 /** 380 /**
381 * memory_bm_create - allocate memory for a memory bitmap 381 * memory_bm_create - allocate memory for a memory bitmap
382 */ 382 */
383 static int 383 static int
384 memory_bm_create(struct memory_bitmap *bm, gfp_t gfp_mask, int safe_needed) 384 memory_bm_create(struct memory_bitmap *bm, gfp_t gfp_mask, int safe_needed)
385 { 385 {
386 struct chain_allocator ca; 386 struct chain_allocator ca;
387 struct list_head mem_extents; 387 struct list_head mem_extents;
388 struct mem_extent *ext; 388 struct mem_extent *ext;
389 int error; 389 int error;
390 390
391 chain_init(&ca, gfp_mask, safe_needed); 391 chain_init(&ca, gfp_mask, safe_needed);
392 INIT_LIST_HEAD(&bm->blocks); 392 INIT_LIST_HEAD(&bm->blocks);
393 393
394 error = create_mem_extents(&mem_extents, gfp_mask); 394 error = create_mem_extents(&mem_extents, gfp_mask);
395 if (error) 395 if (error)
396 return error; 396 return error;
397 397
398 list_for_each_entry(ext, &mem_extents, hook) { 398 list_for_each_entry(ext, &mem_extents, hook) {
399 struct bm_block *bb; 399 struct bm_block *bb;
400 unsigned long pfn = ext->start; 400 unsigned long pfn = ext->start;
401 unsigned long pages = ext->end - ext->start; 401 unsigned long pages = ext->end - ext->start;
402 402
403 bb = list_entry(bm->blocks.prev, struct bm_block, hook); 403 bb = list_entry(bm->blocks.prev, struct bm_block, hook);
404 404
405 error = create_bm_block_list(pages, bm->blocks.prev, &ca); 405 error = create_bm_block_list(pages, bm->blocks.prev, &ca);
406 if (error) 406 if (error)
407 goto Error; 407 goto Error;
408 408
409 list_for_each_entry_continue(bb, &bm->blocks, hook) { 409 list_for_each_entry_continue(bb, &bm->blocks, hook) {
410 bb->data = get_image_page(gfp_mask, safe_needed); 410 bb->data = get_image_page(gfp_mask, safe_needed);
411 if (!bb->data) { 411 if (!bb->data) {
412 error = -ENOMEM; 412 error = -ENOMEM;
413 goto Error; 413 goto Error;
414 } 414 }
415 415
416 bb->start_pfn = pfn; 416 bb->start_pfn = pfn;
417 if (pages >= BM_BITS_PER_BLOCK) { 417 if (pages >= BM_BITS_PER_BLOCK) {
418 pfn += BM_BITS_PER_BLOCK; 418 pfn += BM_BITS_PER_BLOCK;
419 pages -= BM_BITS_PER_BLOCK; 419 pages -= BM_BITS_PER_BLOCK;
420 } else { 420 } else {
421 /* This is executed only once in the loop */ 421 /* This is executed only once in the loop */
422 pfn += pages; 422 pfn += pages;
423 } 423 }
424 bb->end_pfn = pfn; 424 bb->end_pfn = pfn;
425 } 425 }
426 } 426 }
427 427
428 bm->p_list = ca.chain; 428 bm->p_list = ca.chain;
429 memory_bm_position_reset(bm); 429 memory_bm_position_reset(bm);
430 Exit: 430 Exit:
431 free_mem_extents(&mem_extents); 431 free_mem_extents(&mem_extents);
432 return error; 432 return error;
433 433
434 Error: 434 Error:
435 bm->p_list = ca.chain; 435 bm->p_list = ca.chain;
436 memory_bm_free(bm, PG_UNSAFE_CLEAR); 436 memory_bm_free(bm, PG_UNSAFE_CLEAR);
437 goto Exit; 437 goto Exit;
438 } 438 }
439 439
440 /** 440 /**
441 * memory_bm_free - free memory occupied by the memory bitmap @bm 441 * memory_bm_free - free memory occupied by the memory bitmap @bm
442 */ 442 */
443 static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free) 443 static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free)
444 { 444 {
445 struct bm_block *bb; 445 struct bm_block *bb;
446 446
447 list_for_each_entry(bb, &bm->blocks, hook) 447 list_for_each_entry(bb, &bm->blocks, hook)
448 if (bb->data) 448 if (bb->data)
449 free_image_page(bb->data, clear_nosave_free); 449 free_image_page(bb->data, clear_nosave_free);
450 450
451 free_list_of_pages(bm->p_list, clear_nosave_free); 451 free_list_of_pages(bm->p_list, clear_nosave_free);
452 452
453 INIT_LIST_HEAD(&bm->blocks); 453 INIT_LIST_HEAD(&bm->blocks);
454 } 454 }
455 455
456 /** 456 /**
457 * memory_bm_find_bit - find the bit in the bitmap @bm that corresponds 457 * memory_bm_find_bit - find the bit in the bitmap @bm that corresponds
458 * to given pfn. The cur_zone_bm member of @bm and the cur_block member 458 * to given pfn. The cur_zone_bm member of @bm and the cur_block member
459 * of @bm->cur_zone_bm are updated. 459 * of @bm->cur_zone_bm are updated.
460 */ 460 */
461 static int memory_bm_find_bit(struct memory_bitmap *bm, unsigned long pfn, 461 static int memory_bm_find_bit(struct memory_bitmap *bm, unsigned long pfn,
462 void **addr, unsigned int *bit_nr) 462 void **addr, unsigned int *bit_nr)
463 { 463 {
464 struct bm_block *bb; 464 struct bm_block *bb;
465 465
466 /* 466 /*
467 * Check if the pfn corresponds to the current bitmap block and find 467 * Check if the pfn corresponds to the current bitmap block and find
468 * the block where it fits if this is not the case. 468 * the block where it fits if this is not the case.
469 */ 469 */
470 bb = bm->cur.block; 470 bb = bm->cur.block;
471 if (pfn < bb->start_pfn) 471 if (pfn < bb->start_pfn)
472 list_for_each_entry_continue_reverse(bb, &bm->blocks, hook) 472 list_for_each_entry_continue_reverse(bb, &bm->blocks, hook)
473 if (pfn >= bb->start_pfn) 473 if (pfn >= bb->start_pfn)
474 break; 474 break;
475 475
476 if (pfn >= bb->end_pfn) 476 if (pfn >= bb->end_pfn)
477 list_for_each_entry_continue(bb, &bm->blocks, hook) 477 list_for_each_entry_continue(bb, &bm->blocks, hook)
478 if (pfn >= bb->start_pfn && pfn < bb->end_pfn) 478 if (pfn >= bb->start_pfn && pfn < bb->end_pfn)
479 break; 479 break;
480 480
481 if (&bb->hook == &bm->blocks) 481 if (&bb->hook == &bm->blocks)
482 return -EFAULT; 482 return -EFAULT;
483 483
484 /* The block has been found */ 484 /* The block has been found */
485 bm->cur.block = bb; 485 bm->cur.block = bb;
486 pfn -= bb->start_pfn; 486 pfn -= bb->start_pfn;
487 bm->cur.bit = pfn + 1; 487 bm->cur.bit = pfn + 1;
488 *bit_nr = pfn; 488 *bit_nr = pfn;
489 *addr = bb->data; 489 *addr = bb->data;
490 return 0; 490 return 0;
491 } 491 }
492 492
493 static void memory_bm_set_bit(struct memory_bitmap *bm, unsigned long pfn) 493 static void memory_bm_set_bit(struct memory_bitmap *bm, unsigned long pfn)
494 { 494 {
495 void *addr; 495 void *addr;
496 unsigned int bit; 496 unsigned int bit;
497 int error; 497 int error;
498 498
499 error = memory_bm_find_bit(bm, pfn, &addr, &bit); 499 error = memory_bm_find_bit(bm, pfn, &addr, &bit);
500 BUG_ON(error); 500 BUG_ON(error);
501 set_bit(bit, addr); 501 set_bit(bit, addr);
502 } 502 }
503 503
504 static int mem_bm_set_bit_check(struct memory_bitmap *bm, unsigned long pfn) 504 static int mem_bm_set_bit_check(struct memory_bitmap *bm, unsigned long pfn)
505 { 505 {
506 void *addr; 506 void *addr;
507 unsigned int bit; 507 unsigned int bit;
508 int error; 508 int error;
509 509
510 error = memory_bm_find_bit(bm, pfn, &addr, &bit); 510 error = memory_bm_find_bit(bm, pfn, &addr, &bit);
511 if (!error) 511 if (!error)
512 set_bit(bit, addr); 512 set_bit(bit, addr);
513 return error; 513 return error;
514 } 514 }
515 515
516 static void memory_bm_clear_bit(struct memory_bitmap *bm, unsigned long pfn) 516 static void memory_bm_clear_bit(struct memory_bitmap *bm, unsigned long pfn)
517 { 517 {
518 void *addr; 518 void *addr;
519 unsigned int bit; 519 unsigned int bit;
520 int error; 520 int error;
521 521
522 error = memory_bm_find_bit(bm, pfn, &addr, &bit); 522 error = memory_bm_find_bit(bm, pfn, &addr, &bit);
523 BUG_ON(error); 523 BUG_ON(error);
524 clear_bit(bit, addr); 524 clear_bit(bit, addr);
525 } 525 }
526 526
527 static int memory_bm_test_bit(struct memory_bitmap *bm, unsigned long pfn) 527 static int memory_bm_test_bit(struct memory_bitmap *bm, unsigned long pfn)
528 { 528 {
529 void *addr; 529 void *addr;
530 unsigned int bit; 530 unsigned int bit;
531 int error; 531 int error;
532 532
533 error = memory_bm_find_bit(bm, pfn, &addr, &bit); 533 error = memory_bm_find_bit(bm, pfn, &addr, &bit);
534 BUG_ON(error); 534 BUG_ON(error);
535 return test_bit(bit, addr); 535 return test_bit(bit, addr);
536 } 536 }
537 537
538 static bool memory_bm_pfn_present(struct memory_bitmap *bm, unsigned long pfn) 538 static bool memory_bm_pfn_present(struct memory_bitmap *bm, unsigned long pfn)
539 { 539 {
540 void *addr; 540 void *addr;
541 unsigned int bit; 541 unsigned int bit;
542 542
543 return !memory_bm_find_bit(bm, pfn, &addr, &bit); 543 return !memory_bm_find_bit(bm, pfn, &addr, &bit);
544 } 544 }
545 545
546 /** 546 /**
547 * memory_bm_next_pfn - find the pfn that corresponds to the next set bit 547 * memory_bm_next_pfn - find the pfn that corresponds to the next set bit
548 * in the bitmap @bm. If the pfn cannot be found, BM_END_OF_MAP is 548 * in the bitmap @bm. If the pfn cannot be found, BM_END_OF_MAP is
549 * returned. 549 * returned.
550 * 550 *
551 * It is required to run memory_bm_position_reset() before the first call to 551 * It is required to run memory_bm_position_reset() before the first call to
552 * this function. 552 * this function.
553 */ 553 */
554 554
555 static unsigned long memory_bm_next_pfn(struct memory_bitmap *bm) 555 static unsigned long memory_bm_next_pfn(struct memory_bitmap *bm)
556 { 556 {
557 struct bm_block *bb; 557 struct bm_block *bb;
558 int bit; 558 int bit;
559 559
560 bb = bm->cur.block; 560 bb = bm->cur.block;
561 do { 561 do {
562 bit = bm->cur.bit; 562 bit = bm->cur.bit;
563 bit = find_next_bit(bb->data, bm_block_bits(bb), bit); 563 bit = find_next_bit(bb->data, bm_block_bits(bb), bit);
564 if (bit < bm_block_bits(bb)) 564 if (bit < bm_block_bits(bb))
565 goto Return_pfn; 565 goto Return_pfn;
566 566
567 bb = list_entry(bb->hook.next, struct bm_block, hook); 567 bb = list_entry(bb->hook.next, struct bm_block, hook);
568 bm->cur.block = bb; 568 bm->cur.block = bb;
569 bm->cur.bit = 0; 569 bm->cur.bit = 0;
570 } while (&bb->hook != &bm->blocks); 570 } while (&bb->hook != &bm->blocks);
571 571
572 memory_bm_position_reset(bm); 572 memory_bm_position_reset(bm);
573 return BM_END_OF_MAP; 573 return BM_END_OF_MAP;
574 574
575 Return_pfn: 575 Return_pfn:
576 bm->cur.bit = bit + 1; 576 bm->cur.bit = bit + 1;
577 return bb->start_pfn + bit; 577 return bb->start_pfn + bit;
578 } 578 }
579 579
580 /** 580 /**
581 * This structure represents a range of page frames the contents of which 581 * This structure represents a range of page frames the contents of which
582 * should not be saved during the suspend. 582 * should not be saved during the suspend.
583 */ 583 */
584 584
585 struct nosave_region { 585 struct nosave_region {
586 struct list_head list; 586 struct list_head list;
587 unsigned long start_pfn; 587 unsigned long start_pfn;
588 unsigned long end_pfn; 588 unsigned long end_pfn;
589 }; 589 };
590 590
591 static LIST_HEAD(nosave_regions); 591 static LIST_HEAD(nosave_regions);
592 592
593 /** 593 /**
594 * register_nosave_region - register a range of page frames the contents 594 * register_nosave_region - register a range of page frames the contents
595 * of which should not be saved during the suspend (to be used in the early 595 * of which should not be saved during the suspend (to be used in the early
596 * initialization code) 596 * initialization code)
597 */ 597 */
598 598
599 void __init 599 void __init
600 __register_nosave_region(unsigned long start_pfn, unsigned long end_pfn, 600 __register_nosave_region(unsigned long start_pfn, unsigned long end_pfn,
601 int use_kmalloc) 601 int use_kmalloc)
602 { 602 {
603 struct nosave_region *region; 603 struct nosave_region *region;
604 604
605 if (start_pfn >= end_pfn) 605 if (start_pfn >= end_pfn)
606 return; 606 return;
607 607
608 if (!list_empty(&nosave_regions)) { 608 if (!list_empty(&nosave_regions)) {
609 /* Try to extend the previous region (they should be sorted) */ 609 /* Try to extend the previous region (they should be sorted) */
610 region = list_entry(nosave_regions.prev, 610 region = list_entry(nosave_regions.prev,
611 struct nosave_region, list); 611 struct nosave_region, list);
612 if (region->end_pfn == start_pfn) { 612 if (region->end_pfn == start_pfn) {
613 region->end_pfn = end_pfn; 613 region->end_pfn = end_pfn;
614 goto Report; 614 goto Report;
615 } 615 }
616 } 616 }
617 if (use_kmalloc) { 617 if (use_kmalloc) {
618 /* during init, this shouldn't fail */ 618 /* during init, this shouldn't fail */
619 region = kmalloc(sizeof(struct nosave_region), GFP_KERNEL); 619 region = kmalloc(sizeof(struct nosave_region), GFP_KERNEL);
620 BUG_ON(!region); 620 BUG_ON(!region);
621 } else 621 } else
622 /* This allocation cannot fail */ 622 /* This allocation cannot fail */
623 region = alloc_bootmem(sizeof(struct nosave_region)); 623 region = alloc_bootmem(sizeof(struct nosave_region));
624 region->start_pfn = start_pfn; 624 region->start_pfn = start_pfn;
625 region->end_pfn = end_pfn; 625 region->end_pfn = end_pfn;
626 list_add_tail(&region->list, &nosave_regions); 626 list_add_tail(&region->list, &nosave_regions);
627 Report: 627 Report:
628 printk(KERN_INFO "PM: Registered nosave memory: %016lx - %016lx\n", 628 printk(KERN_INFO "PM: Registered nosave memory: %016lx - %016lx\n",
629 start_pfn << PAGE_SHIFT, end_pfn << PAGE_SHIFT); 629 start_pfn << PAGE_SHIFT, end_pfn << PAGE_SHIFT);
630 } 630 }
631 631
632 /* 632 /*
633 * Set bits in this map correspond to the page frames the contents of which 633 * Set bits in this map correspond to the page frames the contents of which
634 * should not be saved during the suspend. 634 * should not be saved during the suspend.
635 */ 635 */
636 static struct memory_bitmap *forbidden_pages_map; 636 static struct memory_bitmap *forbidden_pages_map;
637 637
638 /* Set bits in this map correspond to free page frames. */ 638 /* Set bits in this map correspond to free page frames. */
639 static struct memory_bitmap *free_pages_map; 639 static struct memory_bitmap *free_pages_map;
640 640
641 /* 641 /*
642 * Each page frame allocated for creating the image is marked by setting the 642 * Each page frame allocated for creating the image is marked by setting the
643 * corresponding bits in forbidden_pages_map and free_pages_map simultaneously 643 * corresponding bits in forbidden_pages_map and free_pages_map simultaneously
644 */ 644 */
645 645
646 void swsusp_set_page_free(struct page *page) 646 void swsusp_set_page_free(struct page *page)
647 { 647 {
648 if (free_pages_map) 648 if (free_pages_map)
649 memory_bm_set_bit(free_pages_map, page_to_pfn(page)); 649 memory_bm_set_bit(free_pages_map, page_to_pfn(page));
650 } 650 }
651 651
652 static int swsusp_page_is_free(struct page *page) 652 static int swsusp_page_is_free(struct page *page)
653 { 653 {
654 return free_pages_map ? 654 return free_pages_map ?
655 memory_bm_test_bit(free_pages_map, page_to_pfn(page)) : 0; 655 memory_bm_test_bit(free_pages_map, page_to_pfn(page)) : 0;
656 } 656 }
657 657
658 void swsusp_unset_page_free(struct page *page) 658 void swsusp_unset_page_free(struct page *page)
659 { 659 {
660 if (free_pages_map) 660 if (free_pages_map)
661 memory_bm_clear_bit(free_pages_map, page_to_pfn(page)); 661 memory_bm_clear_bit(free_pages_map, page_to_pfn(page));
662 } 662 }
663 663
664 static void swsusp_set_page_forbidden(struct page *page) 664 static void swsusp_set_page_forbidden(struct page *page)
665 { 665 {
666 if (forbidden_pages_map) 666 if (forbidden_pages_map)
667 memory_bm_set_bit(forbidden_pages_map, page_to_pfn(page)); 667 memory_bm_set_bit(forbidden_pages_map, page_to_pfn(page));
668 } 668 }
669 669
670 int swsusp_page_is_forbidden(struct page *page) 670 int swsusp_page_is_forbidden(struct page *page)
671 { 671 {
672 return forbidden_pages_map ? 672 return forbidden_pages_map ?
673 memory_bm_test_bit(forbidden_pages_map, page_to_pfn(page)) : 0; 673 memory_bm_test_bit(forbidden_pages_map, page_to_pfn(page)) : 0;
674 } 674 }
675 675
676 static void swsusp_unset_page_forbidden(struct page *page) 676 static void swsusp_unset_page_forbidden(struct page *page)
677 { 677 {
678 if (forbidden_pages_map) 678 if (forbidden_pages_map)
679 memory_bm_clear_bit(forbidden_pages_map, page_to_pfn(page)); 679 memory_bm_clear_bit(forbidden_pages_map, page_to_pfn(page));
680 } 680 }
681 681
682 /** 682 /**
683 * mark_nosave_pages - set bits corresponding to the page frames the 683 * mark_nosave_pages - set bits corresponding to the page frames the
684 * contents of which should not be saved in a given bitmap. 684 * contents of which should not be saved in a given bitmap.
685 */ 685 */
686 686
687 static void mark_nosave_pages(struct memory_bitmap *bm) 687 static void mark_nosave_pages(struct memory_bitmap *bm)
688 { 688 {
689 struct nosave_region *region; 689 struct nosave_region *region;
690 690
691 if (list_empty(&nosave_regions)) 691 if (list_empty(&nosave_regions))
692 return; 692 return;
693 693
694 list_for_each_entry(region, &nosave_regions, list) { 694 list_for_each_entry(region, &nosave_regions, list) {
695 unsigned long pfn; 695 unsigned long pfn;
696 696
697 pr_debug("PM: Marking nosave pages: %016lx - %016lx\n", 697 pr_debug("PM: Marking nosave pages: %016lx - %016lx\n",
698 region->start_pfn << PAGE_SHIFT, 698 region->start_pfn << PAGE_SHIFT,
699 region->end_pfn << PAGE_SHIFT); 699 region->end_pfn << PAGE_SHIFT);
700 700
701 for (pfn = region->start_pfn; pfn < region->end_pfn; pfn++) 701 for (pfn = region->start_pfn; pfn < region->end_pfn; pfn++)
702 if (pfn_valid(pfn)) { 702 if (pfn_valid(pfn)) {
703 /* 703 /*
704 * It is safe to ignore the result of 704 * It is safe to ignore the result of
705 * mem_bm_set_bit_check() here, since we won't 705 * mem_bm_set_bit_check() here, since we won't
706 * touch the PFNs for which the error is 706 * touch the PFNs for which the error is
707 * returned anyway. 707 * returned anyway.
708 */ 708 */
709 mem_bm_set_bit_check(bm, pfn); 709 mem_bm_set_bit_check(bm, pfn);
710 } 710 }
711 } 711 }
712 } 712 }
713 713
714 /** 714 /**
715 * create_basic_memory_bitmaps - create bitmaps needed for marking page 715 * create_basic_memory_bitmaps - create bitmaps needed for marking page
716 * frames that should not be saved and free page frames. The pointers 716 * frames that should not be saved and free page frames. The pointers
717 * forbidden_pages_map and free_pages_map are only modified if everything 717 * forbidden_pages_map and free_pages_map are only modified if everything
718 * goes well, because we don't want the bits to be used before both bitmaps 718 * goes well, because we don't want the bits to be used before both bitmaps
719 * are set up. 719 * are set up.
720 */ 720 */
721 721
722 int create_basic_memory_bitmaps(void) 722 int create_basic_memory_bitmaps(void)
723 { 723 {
724 struct memory_bitmap *bm1, *bm2; 724 struct memory_bitmap *bm1, *bm2;
725 int error = 0; 725 int error = 0;
726 726
727 BUG_ON(forbidden_pages_map || free_pages_map); 727 BUG_ON(forbidden_pages_map || free_pages_map);
728 728
729 bm1 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL); 729 bm1 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL);
730 if (!bm1) 730 if (!bm1)
731 return -ENOMEM; 731 return -ENOMEM;
732 732
733 error = memory_bm_create(bm1, GFP_KERNEL, PG_ANY); 733 error = memory_bm_create(bm1, GFP_KERNEL, PG_ANY);
734 if (error) 734 if (error)
735 goto Free_first_object; 735 goto Free_first_object;
736 736
737 bm2 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL); 737 bm2 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL);
738 if (!bm2) 738 if (!bm2)
739 goto Free_first_bitmap; 739 goto Free_first_bitmap;
740 740
741 error = memory_bm_create(bm2, GFP_KERNEL, PG_ANY); 741 error = memory_bm_create(bm2, GFP_KERNEL, PG_ANY);
742 if (error) 742 if (error)
743 goto Free_second_object; 743 goto Free_second_object;
744 744
745 forbidden_pages_map = bm1; 745 forbidden_pages_map = bm1;
746 free_pages_map = bm2; 746 free_pages_map = bm2;
747 mark_nosave_pages(forbidden_pages_map); 747 mark_nosave_pages(forbidden_pages_map);
748 748
749 pr_debug("PM: Basic memory bitmaps created\n"); 749 pr_debug("PM: Basic memory bitmaps created\n");
750 750
751 return 0; 751 return 0;
752 752
753 Free_second_object: 753 Free_second_object:
754 kfree(bm2); 754 kfree(bm2);
755 Free_first_bitmap: 755 Free_first_bitmap:
756 memory_bm_free(bm1, PG_UNSAFE_CLEAR); 756 memory_bm_free(bm1, PG_UNSAFE_CLEAR);
757 Free_first_object: 757 Free_first_object:
758 kfree(bm1); 758 kfree(bm1);
759 return -ENOMEM; 759 return -ENOMEM;
760 } 760 }
761 761
762 /** 762 /**
763 * free_basic_memory_bitmaps - free memory bitmaps allocated by 763 * free_basic_memory_bitmaps - free memory bitmaps allocated by
764 * create_basic_memory_bitmaps(). The auxiliary pointers are necessary 764 * create_basic_memory_bitmaps(). The auxiliary pointers are necessary
765 * so that the bitmaps themselves are not referred to while they are being 765 * so that the bitmaps themselves are not referred to while they are being
766 * freed. 766 * freed.
767 */ 767 */
768 768
769 void free_basic_memory_bitmaps(void) 769 void free_basic_memory_bitmaps(void)
770 { 770 {
771 struct memory_bitmap *bm1, *bm2; 771 struct memory_bitmap *bm1, *bm2;
772 772
773 BUG_ON(!(forbidden_pages_map && free_pages_map)); 773 BUG_ON(!(forbidden_pages_map && free_pages_map));
774 774
775 bm1 = forbidden_pages_map; 775 bm1 = forbidden_pages_map;
776 bm2 = free_pages_map; 776 bm2 = free_pages_map;
777 forbidden_pages_map = NULL; 777 forbidden_pages_map = NULL;
778 free_pages_map = NULL; 778 free_pages_map = NULL;
779 memory_bm_free(bm1, PG_UNSAFE_CLEAR); 779 memory_bm_free(bm1, PG_UNSAFE_CLEAR);
780 kfree(bm1); 780 kfree(bm1);
781 memory_bm_free(bm2, PG_UNSAFE_CLEAR); 781 memory_bm_free(bm2, PG_UNSAFE_CLEAR);
782 kfree(bm2); 782 kfree(bm2);
783 783
784 pr_debug("PM: Basic memory bitmaps freed\n"); 784 pr_debug("PM: Basic memory bitmaps freed\n");
785 } 785 }
786 786
787 /** 787 /**
788 * snapshot_additional_pages - estimate the number of additional pages 788 * snapshot_additional_pages - estimate the number of additional pages
789 * be needed for setting up the suspend image data structures for given 789 * be needed for setting up the suspend image data structures for given
790 * zone (usually the returned value is greater than the exact number) 790 * zone (usually the returned value is greater than the exact number)
791 */ 791 */
792 792
793 unsigned int snapshot_additional_pages(struct zone *zone) 793 unsigned int snapshot_additional_pages(struct zone *zone)
794 { 794 {
795 unsigned int res; 795 unsigned int res;
796 796
797 res = DIV_ROUND_UP(zone->spanned_pages, BM_BITS_PER_BLOCK); 797 res = DIV_ROUND_UP(zone->spanned_pages, BM_BITS_PER_BLOCK);
798 res += DIV_ROUND_UP(res * sizeof(struct bm_block), PAGE_SIZE); 798 res += DIV_ROUND_UP(res * sizeof(struct bm_block), PAGE_SIZE);
799 return 2 * res; 799 return 2 * res;
800 } 800 }
801 801
802 #ifdef CONFIG_HIGHMEM 802 #ifdef CONFIG_HIGHMEM
803 /** 803 /**
804 * count_free_highmem_pages - compute the total number of free highmem 804 * count_free_highmem_pages - compute the total number of free highmem
805 * pages, system-wide. 805 * pages, system-wide.
806 */ 806 */
807 807
808 static unsigned int count_free_highmem_pages(void) 808 static unsigned int count_free_highmem_pages(void)
809 { 809 {
810 struct zone *zone; 810 struct zone *zone;
811 unsigned int cnt = 0; 811 unsigned int cnt = 0;
812 812
813 for_each_populated_zone(zone) 813 for_each_populated_zone(zone)
814 if (is_highmem(zone)) 814 if (is_highmem(zone))
815 cnt += zone_page_state(zone, NR_FREE_PAGES); 815 cnt += zone_page_state(zone, NR_FREE_PAGES);
816 816
817 return cnt; 817 return cnt;
818 } 818 }
819 819
820 /** 820 /**
821 * saveable_highmem_page - Determine whether a highmem page should be 821 * saveable_highmem_page - Determine whether a highmem page should be
822 * included in the suspend image. 822 * included in the suspend image.
823 * 823 *
824 * We should save the page if it isn't Nosave or NosaveFree, or Reserved, 824 * We should save the page if it isn't Nosave or NosaveFree, or Reserved,
825 * and it isn't a part of a free chunk of pages. 825 * and it isn't a part of a free chunk of pages.
826 */ 826 */
827 static struct page *saveable_highmem_page(struct zone *zone, unsigned long pfn) 827 static struct page *saveable_highmem_page(struct zone *zone, unsigned long pfn)
828 { 828 {
829 struct page *page; 829 struct page *page;
830 830
831 if (!pfn_valid(pfn)) 831 if (!pfn_valid(pfn))
832 return NULL; 832 return NULL;
833 833
834 page = pfn_to_page(pfn); 834 page = pfn_to_page(pfn);
835 if (page_zone(page) != zone) 835 if (page_zone(page) != zone)
836 return NULL; 836 return NULL;
837 837
838 BUG_ON(!PageHighMem(page)); 838 BUG_ON(!PageHighMem(page));
839 839
840 if (swsusp_page_is_forbidden(page) || swsusp_page_is_free(page) || 840 if (swsusp_page_is_forbidden(page) || swsusp_page_is_free(page) ||
841 PageReserved(page)) 841 PageReserved(page))
842 return NULL; 842 return NULL;
843 843
844 return page; 844 return page;
845 } 845 }
846 846
847 /** 847 /**
848 * count_highmem_pages - compute the total number of saveable highmem 848 * count_highmem_pages - compute the total number of saveable highmem
849 * pages. 849 * pages.
850 */ 850 */
851 851
852 static unsigned int count_highmem_pages(void) 852 static unsigned int count_highmem_pages(void)
853 { 853 {
854 struct zone *zone; 854 struct zone *zone;
855 unsigned int n = 0; 855 unsigned int n = 0;
856 856
857 for_each_populated_zone(zone) { 857 for_each_populated_zone(zone) {
858 unsigned long pfn, max_zone_pfn; 858 unsigned long pfn, max_zone_pfn;
859 859
860 if (!is_highmem(zone)) 860 if (!is_highmem(zone))
861 continue; 861 continue;
862 862
863 mark_free_pages(zone); 863 mark_free_pages(zone);
864 max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages; 864 max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
865 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) 865 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
866 if (saveable_highmem_page(zone, pfn)) 866 if (saveable_highmem_page(zone, pfn))
867 n++; 867 n++;
868 } 868 }
869 return n; 869 return n;
870 } 870 }
871 #else 871 #else
872 static inline void *saveable_highmem_page(struct zone *z, unsigned long p) 872 static inline void *saveable_highmem_page(struct zone *z, unsigned long p)
873 { 873 {
874 return NULL; 874 return NULL;
875 } 875 }
876 #endif /* CONFIG_HIGHMEM */ 876 #endif /* CONFIG_HIGHMEM */
877 877
878 /** 878 /**
879 * saveable_page - Determine whether a non-highmem page should be included 879 * saveable_page - Determine whether a non-highmem page should be included
880 * in the suspend image. 880 * in the suspend image.
881 * 881 *
882 * We should save the page if it isn't Nosave, and is not in the range 882 * We should save the page if it isn't Nosave, and is not in the range
883 * of pages statically defined as 'unsaveable', and it isn't a part of 883 * of pages statically defined as 'unsaveable', and it isn't a part of
884 * a free chunk of pages. 884 * a free chunk of pages.
885 */ 885 */
886 static struct page *saveable_page(struct zone *zone, unsigned long pfn) 886 static struct page *saveable_page(struct zone *zone, unsigned long pfn)
887 { 887 {
888 struct page *page; 888 struct page *page;
889 889
890 if (!pfn_valid(pfn)) 890 if (!pfn_valid(pfn))
891 return NULL; 891 return NULL;
892 892
893 page = pfn_to_page(pfn); 893 page = pfn_to_page(pfn);
894 if (page_zone(page) != zone) 894 if (page_zone(page) != zone)
895 return NULL; 895 return NULL;
896 896
897 BUG_ON(PageHighMem(page)); 897 BUG_ON(PageHighMem(page));
898 898
899 if (swsusp_page_is_forbidden(page) || swsusp_page_is_free(page)) 899 if (swsusp_page_is_forbidden(page) || swsusp_page_is_free(page))
900 return NULL; 900 return NULL;
901 901
902 if (PageReserved(page) 902 if (PageReserved(page)
903 && (!kernel_page_present(page) || pfn_is_nosave(pfn))) 903 && (!kernel_page_present(page) || pfn_is_nosave(pfn)))
904 return NULL; 904 return NULL;
905 905
906 return page; 906 return page;
907 } 907 }
908 908
909 /** 909 /**
910 * count_data_pages - compute the total number of saveable non-highmem 910 * count_data_pages - compute the total number of saveable non-highmem
911 * pages. 911 * pages.
912 */ 912 */
913 913
914 static unsigned int count_data_pages(void) 914 static unsigned int count_data_pages(void)
915 { 915 {
916 struct zone *zone; 916 struct zone *zone;
917 unsigned long pfn, max_zone_pfn; 917 unsigned long pfn, max_zone_pfn;
918 unsigned int n = 0; 918 unsigned int n = 0;
919 919
920 for_each_populated_zone(zone) { 920 for_each_populated_zone(zone) {
921 if (is_highmem(zone)) 921 if (is_highmem(zone))
922 continue; 922 continue;
923 923
924 mark_free_pages(zone); 924 mark_free_pages(zone);
925 max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages; 925 max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
926 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) 926 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
927 if (saveable_page(zone, pfn)) 927 if (saveable_page(zone, pfn))
928 n++; 928 n++;
929 } 929 }
930 return n; 930 return n;
931 } 931 }
932 932
933 /* This is needed, because copy_page and memcpy are not usable for copying 933 /* This is needed, because copy_page and memcpy are not usable for copying
934 * task structs. 934 * task structs.
935 */ 935 */
936 static inline void do_copy_page(long *dst, long *src) 936 static inline void do_copy_page(long *dst, long *src)
937 { 937 {
938 int n; 938 int n;
939 939
940 for (n = PAGE_SIZE / sizeof(long); n; n--) 940 for (n = PAGE_SIZE / sizeof(long); n; n--)
941 *dst++ = *src++; 941 *dst++ = *src++;
942 } 942 }
943 943
944 944
945 /** 945 /**
946 * safe_copy_page - check if the page we are going to copy is marked as 946 * safe_copy_page - check if the page we are going to copy is marked as
947 * present in the kernel page tables (this always is the case if 947 * present in the kernel page tables (this always is the case if
948 * CONFIG_DEBUG_PAGEALLOC is not set and in that case 948 * CONFIG_DEBUG_PAGEALLOC is not set and in that case
949 * kernel_page_present() always returns 'true'). 949 * kernel_page_present() always returns 'true').
950 */ 950 */
951 static void safe_copy_page(void *dst, struct page *s_page) 951 static void safe_copy_page(void *dst, struct page *s_page)
952 { 952 {
953 if (kernel_page_present(s_page)) { 953 if (kernel_page_present(s_page)) {
954 do_copy_page(dst, page_address(s_page)); 954 do_copy_page(dst, page_address(s_page));
955 } else { 955 } else {
956 kernel_map_pages(s_page, 1, 1); 956 kernel_map_pages(s_page, 1, 1);
957 do_copy_page(dst, page_address(s_page)); 957 do_copy_page(dst, page_address(s_page));
958 kernel_map_pages(s_page, 1, 0); 958 kernel_map_pages(s_page, 1, 0);
959 } 959 }
960 } 960 }
961 961
962 962
963 #ifdef CONFIG_HIGHMEM 963 #ifdef CONFIG_HIGHMEM
964 static inline struct page * 964 static inline struct page *
965 page_is_saveable(struct zone *zone, unsigned long pfn) 965 page_is_saveable(struct zone *zone, unsigned long pfn)
966 { 966 {
967 return is_highmem(zone) ? 967 return is_highmem(zone) ?
968 saveable_highmem_page(zone, pfn) : saveable_page(zone, pfn); 968 saveable_highmem_page(zone, pfn) : saveable_page(zone, pfn);
969 } 969 }
970 970
971 static void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn) 971 static void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
972 { 972 {
973 struct page *s_page, *d_page; 973 struct page *s_page, *d_page;
974 void *src, *dst; 974 void *src, *dst;
975 975
976 s_page = pfn_to_page(src_pfn); 976 s_page = pfn_to_page(src_pfn);
977 d_page = pfn_to_page(dst_pfn); 977 d_page = pfn_to_page(dst_pfn);
978 if (PageHighMem(s_page)) { 978 if (PageHighMem(s_page)) {
979 src = kmap_atomic(s_page, KM_USER0); 979 src = kmap_atomic(s_page, KM_USER0);
980 dst = kmap_atomic(d_page, KM_USER1); 980 dst = kmap_atomic(d_page, KM_USER1);
981 do_copy_page(dst, src); 981 do_copy_page(dst, src);
982 kunmap_atomic(src, KM_USER0); 982 kunmap_atomic(src, KM_USER0);
983 kunmap_atomic(dst, KM_USER1); 983 kunmap_atomic(dst, KM_USER1);
984 } else { 984 } else {
985 if (PageHighMem(d_page)) { 985 if (PageHighMem(d_page)) {
986 /* Page pointed to by src may contain some kernel 986 /* Page pointed to by src may contain some kernel
987 * data modified by kmap_atomic() 987 * data modified by kmap_atomic()
988 */ 988 */
989 safe_copy_page(buffer, s_page); 989 safe_copy_page(buffer, s_page);
990 dst = kmap_atomic(d_page, KM_USER0); 990 dst = kmap_atomic(d_page, KM_USER0);
991 memcpy(dst, buffer, PAGE_SIZE); 991 memcpy(dst, buffer, PAGE_SIZE);
992 kunmap_atomic(dst, KM_USER0); 992 kunmap_atomic(dst, KM_USER0);
993 } else { 993 } else {
994 safe_copy_page(page_address(d_page), s_page); 994 safe_copy_page(page_address(d_page), s_page);
995 } 995 }
996 } 996 }
997 } 997 }
998 #else 998 #else
999 #define page_is_saveable(zone, pfn) saveable_page(zone, pfn) 999 #define page_is_saveable(zone, pfn) saveable_page(zone, pfn)
1000 1000
1001 static inline void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn) 1001 static inline void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
1002 { 1002 {
1003 safe_copy_page(page_address(pfn_to_page(dst_pfn)), 1003 safe_copy_page(page_address(pfn_to_page(dst_pfn)),
1004 pfn_to_page(src_pfn)); 1004 pfn_to_page(src_pfn));
1005 } 1005 }
1006 #endif /* CONFIG_HIGHMEM */ 1006 #endif /* CONFIG_HIGHMEM */
1007 1007
1008 static void 1008 static void
1009 copy_data_pages(struct memory_bitmap *copy_bm, struct memory_bitmap *orig_bm) 1009 copy_data_pages(struct memory_bitmap *copy_bm, struct memory_bitmap *orig_bm)
1010 { 1010 {
1011 struct zone *zone; 1011 struct zone *zone;
1012 unsigned long pfn; 1012 unsigned long pfn;
1013 1013
1014 for_each_populated_zone(zone) { 1014 for_each_populated_zone(zone) {
1015 unsigned long max_zone_pfn; 1015 unsigned long max_zone_pfn;
1016 1016
1017 mark_free_pages(zone); 1017 mark_free_pages(zone);
1018 max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages; 1018 max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
1019 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) 1019 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1020 if (page_is_saveable(zone, pfn)) 1020 if (page_is_saveable(zone, pfn))
1021 memory_bm_set_bit(orig_bm, pfn); 1021 memory_bm_set_bit(orig_bm, pfn);
1022 } 1022 }
1023 memory_bm_position_reset(orig_bm); 1023 memory_bm_position_reset(orig_bm);
1024 memory_bm_position_reset(copy_bm); 1024 memory_bm_position_reset(copy_bm);
1025 for(;;) { 1025 for(;;) {
1026 pfn = memory_bm_next_pfn(orig_bm); 1026 pfn = memory_bm_next_pfn(orig_bm);
1027 if (unlikely(pfn == BM_END_OF_MAP)) 1027 if (unlikely(pfn == BM_END_OF_MAP))
1028 break; 1028 break;
1029 copy_data_page(memory_bm_next_pfn(copy_bm), pfn); 1029 copy_data_page(memory_bm_next_pfn(copy_bm), pfn);
1030 } 1030 }
1031 } 1031 }
1032 1032
1033 /* Total number of image pages */ 1033 /* Total number of image pages */
1034 static unsigned int nr_copy_pages; 1034 static unsigned int nr_copy_pages;
1035 /* Number of pages needed for saving the original pfns of the image pages */ 1035 /* Number of pages needed for saving the original pfns of the image pages */
1036 static unsigned int nr_meta_pages; 1036 static unsigned int nr_meta_pages;
1037 /* 1037 /*
1038 * Numbers of normal and highmem page frames allocated for hibernation image 1038 * Numbers of normal and highmem page frames allocated for hibernation image
1039 * before suspending devices. 1039 * before suspending devices.
1040 */ 1040 */
1041 unsigned int alloc_normal, alloc_highmem; 1041 unsigned int alloc_normal, alloc_highmem;
1042 /* 1042 /*
1043 * Memory bitmap used for marking saveable pages (during hibernation) or 1043 * Memory bitmap used for marking saveable pages (during hibernation) or
1044 * hibernation image pages (during restore) 1044 * hibernation image pages (during restore)
1045 */ 1045 */
1046 static struct memory_bitmap orig_bm; 1046 static struct memory_bitmap orig_bm;
1047 /* 1047 /*
1048 * Memory bitmap used during hibernation for marking allocated page frames that 1048 * Memory bitmap used during hibernation for marking allocated page frames that
1049 * will contain copies of saveable pages. During restore it is initially used 1049 * will contain copies of saveable pages. During restore it is initially used
1050 * for marking hibernation image pages, but then the set bits from it are 1050 * for marking hibernation image pages, but then the set bits from it are
1051 * duplicated in @orig_bm and it is released. On highmem systems it is next 1051 * duplicated in @orig_bm and it is released. On highmem systems it is next
1052 * used for marking "safe" highmem pages, but it has to be reinitialized for 1052 * used for marking "safe" highmem pages, but it has to be reinitialized for
1053 * this purpose. 1053 * this purpose.
1054 */ 1054 */
1055 static struct memory_bitmap copy_bm; 1055 static struct memory_bitmap copy_bm;
1056 1056
1057 /** 1057 /**
1058 * swsusp_free - free pages allocated for the suspend. 1058 * swsusp_free - free pages allocated for the suspend.
1059 * 1059 *
1060 * Suspend pages are alocated before the atomic copy is made, so we 1060 * Suspend pages are alocated before the atomic copy is made, so we
1061 * need to release them after the resume. 1061 * need to release them after the resume.
1062 */ 1062 */
1063 1063
1064 void swsusp_free(void) 1064 void swsusp_free(void)
1065 { 1065 {
1066 struct zone *zone; 1066 struct zone *zone;
1067 unsigned long pfn, max_zone_pfn; 1067 unsigned long pfn, max_zone_pfn;
1068 1068
1069 for_each_populated_zone(zone) { 1069 for_each_populated_zone(zone) {
1070 max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages; 1070 max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
1071 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) 1071 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1072 if (pfn_valid(pfn)) { 1072 if (pfn_valid(pfn)) {
1073 struct page *page = pfn_to_page(pfn); 1073 struct page *page = pfn_to_page(pfn);
1074 1074
1075 if (swsusp_page_is_forbidden(page) && 1075 if (swsusp_page_is_forbidden(page) &&
1076 swsusp_page_is_free(page)) { 1076 swsusp_page_is_free(page)) {
1077 swsusp_unset_page_forbidden(page); 1077 swsusp_unset_page_forbidden(page);
1078 swsusp_unset_page_free(page); 1078 swsusp_unset_page_free(page);
1079 __free_page(page); 1079 __free_page(page);
1080 } 1080 }
1081 } 1081 }
1082 } 1082 }
1083 nr_copy_pages = 0; 1083 nr_copy_pages = 0;
1084 nr_meta_pages = 0; 1084 nr_meta_pages = 0;
1085 restore_pblist = NULL; 1085 restore_pblist = NULL;
1086 buffer = NULL; 1086 buffer = NULL;
1087 alloc_normal = 0; 1087 alloc_normal = 0;
1088 alloc_highmem = 0; 1088 alloc_highmem = 0;
1089 hibernation_thaw_swap(); 1089 hibernation_thaw_swap();
1090 } 1090 }
1091 1091
1092 /* Helper functions used for the shrinking of memory. */ 1092 /* Helper functions used for the shrinking of memory. */
1093 1093
1094 #define GFP_IMAGE (GFP_KERNEL | __GFP_NOWARN) 1094 #define GFP_IMAGE (GFP_KERNEL | __GFP_NOWARN)
1095 1095
1096 /** 1096 /**
1097 * preallocate_image_pages - Allocate a number of pages for hibernation image 1097 * preallocate_image_pages - Allocate a number of pages for hibernation image
1098 * @nr_pages: Number of page frames to allocate. 1098 * @nr_pages: Number of page frames to allocate.
1099 * @mask: GFP flags to use for the allocation. 1099 * @mask: GFP flags to use for the allocation.
1100 * 1100 *
1101 * Return value: Number of page frames actually allocated 1101 * Return value: Number of page frames actually allocated
1102 */ 1102 */
1103 static unsigned long preallocate_image_pages(unsigned long nr_pages, gfp_t mask) 1103 static unsigned long preallocate_image_pages(unsigned long nr_pages, gfp_t mask)
1104 { 1104 {
1105 unsigned long nr_alloc = 0; 1105 unsigned long nr_alloc = 0;
1106 1106
1107 while (nr_pages > 0) { 1107 while (nr_pages > 0) {
1108 struct page *page; 1108 struct page *page;
1109 1109
1110 page = alloc_image_page(mask); 1110 page = alloc_image_page(mask);
1111 if (!page) 1111 if (!page)
1112 break; 1112 break;
1113 memory_bm_set_bit(&copy_bm, page_to_pfn(page)); 1113 memory_bm_set_bit(&copy_bm, page_to_pfn(page));
1114 if (PageHighMem(page)) 1114 if (PageHighMem(page))
1115 alloc_highmem++; 1115 alloc_highmem++;
1116 else 1116 else
1117 alloc_normal++; 1117 alloc_normal++;
1118 nr_pages--; 1118 nr_pages--;
1119 nr_alloc++; 1119 nr_alloc++;
1120 } 1120 }
1121 1121
1122 return nr_alloc; 1122 return nr_alloc;
1123 } 1123 }
1124 1124
1125 static unsigned long preallocate_image_memory(unsigned long nr_pages) 1125 static unsigned long preallocate_image_memory(unsigned long nr_pages,
1126 unsigned long avail_normal)
1126 { 1127 {
1127 return preallocate_image_pages(nr_pages, GFP_IMAGE); 1128 unsigned long alloc;
1129
1130 if (avail_normal <= alloc_normal)
1131 return 0;
1132
1133 alloc = avail_normal - alloc_normal;
1134 if (nr_pages < alloc)
1135 alloc = nr_pages;
1136
1137 return preallocate_image_pages(alloc, GFP_IMAGE);
1128 } 1138 }
1129 1139
1130 #ifdef CONFIG_HIGHMEM 1140 #ifdef CONFIG_HIGHMEM
1131 static unsigned long preallocate_image_highmem(unsigned long nr_pages) 1141 static unsigned long preallocate_image_highmem(unsigned long nr_pages)
1132 { 1142 {
1133 return preallocate_image_pages(nr_pages, GFP_IMAGE | __GFP_HIGHMEM); 1143 return preallocate_image_pages(nr_pages, GFP_IMAGE | __GFP_HIGHMEM);
1134 } 1144 }
1135 1145
1136 /** 1146 /**
1137 * __fraction - Compute (an approximation of) x * (multiplier / base) 1147 * __fraction - Compute (an approximation of) x * (multiplier / base)
1138 */ 1148 */
1139 static unsigned long __fraction(u64 x, u64 multiplier, u64 base) 1149 static unsigned long __fraction(u64 x, u64 multiplier, u64 base)
1140 { 1150 {
1141 x *= multiplier; 1151 x *= multiplier;
1142 do_div(x, base); 1152 do_div(x, base);
1143 return (unsigned long)x; 1153 return (unsigned long)x;
1144 } 1154 }
1145 1155
1146 static unsigned long preallocate_highmem_fraction(unsigned long nr_pages, 1156 static unsigned long preallocate_highmem_fraction(unsigned long nr_pages,
1147 unsigned long highmem, 1157 unsigned long highmem,
1148 unsigned long total) 1158 unsigned long total)
1149 { 1159 {
1150 unsigned long alloc = __fraction(nr_pages, highmem, total); 1160 unsigned long alloc = __fraction(nr_pages, highmem, total);
1151 1161
1152 return preallocate_image_pages(alloc, GFP_IMAGE | __GFP_HIGHMEM); 1162 return preallocate_image_pages(alloc, GFP_IMAGE | __GFP_HIGHMEM);
1153 } 1163 }
1154 #else /* CONFIG_HIGHMEM */ 1164 #else /* CONFIG_HIGHMEM */
1155 static inline unsigned long preallocate_image_highmem(unsigned long nr_pages) 1165 static inline unsigned long preallocate_image_highmem(unsigned long nr_pages)
1156 { 1166 {
1157 return 0; 1167 return 0;
1158 } 1168 }
1159 1169
1160 static inline unsigned long preallocate_highmem_fraction(unsigned long nr_pages, 1170 static inline unsigned long preallocate_highmem_fraction(unsigned long nr_pages,
1161 unsigned long highmem, 1171 unsigned long highmem,
1162 unsigned long total) 1172 unsigned long total)
1163 { 1173 {
1164 return 0; 1174 return 0;
1165 } 1175 }
1166 #endif /* CONFIG_HIGHMEM */ 1176 #endif /* CONFIG_HIGHMEM */
1167 1177
1168 /** 1178 /**
1169 * free_unnecessary_pages - Release preallocated pages not needed for the image 1179 * free_unnecessary_pages - Release preallocated pages not needed for the image
1170 */ 1180 */
1171 static void free_unnecessary_pages(void) 1181 static void free_unnecessary_pages(void)
1172 { 1182 {
1173 unsigned long save_highmem, to_free_normal, to_free_highmem; 1183 unsigned long save, to_free_normal, to_free_highmem;
1174 1184
1175 to_free_normal = alloc_normal - count_data_pages(); 1185 save = count_data_pages();
1176 save_highmem = count_highmem_pages(); 1186 if (alloc_normal >= save) {
1177 if (alloc_highmem > save_highmem) { 1187 to_free_normal = alloc_normal - save;
1178 to_free_highmem = alloc_highmem - save_highmem; 1188 save = 0;
1179 } else { 1189 } else {
1190 to_free_normal = 0;
1191 save -= alloc_normal;
1192 }
1193 save += count_highmem_pages();
1194 if (alloc_highmem >= save) {
1195 to_free_highmem = alloc_highmem - save;
1196 } else {
1180 to_free_highmem = 0; 1197 to_free_highmem = 0;
1181 to_free_normal -= save_highmem - alloc_highmem; 1198 to_free_normal -= save - alloc_highmem;
1182 } 1199 }
1183 1200
1184 memory_bm_position_reset(&copy_bm); 1201 memory_bm_position_reset(&copy_bm);
1185 1202
1186 while (to_free_normal > 0 || to_free_highmem > 0) { 1203 while (to_free_normal > 0 || to_free_highmem > 0) {
1187 unsigned long pfn = memory_bm_next_pfn(&copy_bm); 1204 unsigned long pfn = memory_bm_next_pfn(&copy_bm);
1188 struct page *page = pfn_to_page(pfn); 1205 struct page *page = pfn_to_page(pfn);
1189 1206
1190 if (PageHighMem(page)) { 1207 if (PageHighMem(page)) {
1191 if (!to_free_highmem) 1208 if (!to_free_highmem)
1192 continue; 1209 continue;
1193 to_free_highmem--; 1210 to_free_highmem--;
1194 alloc_highmem--; 1211 alloc_highmem--;
1195 } else { 1212 } else {
1196 if (!to_free_normal) 1213 if (!to_free_normal)
1197 continue; 1214 continue;
1198 to_free_normal--; 1215 to_free_normal--;
1199 alloc_normal--; 1216 alloc_normal--;
1200 } 1217 }
1201 memory_bm_clear_bit(&copy_bm, pfn); 1218 memory_bm_clear_bit(&copy_bm, pfn);
1202 swsusp_unset_page_forbidden(page); 1219 swsusp_unset_page_forbidden(page);
1203 swsusp_unset_page_free(page); 1220 swsusp_unset_page_free(page);
1204 __free_page(page); 1221 __free_page(page);
1205 } 1222 }
1206 } 1223 }
1207 1224
1208 /** 1225 /**
1209 * minimum_image_size - Estimate the minimum acceptable size of an image 1226 * minimum_image_size - Estimate the minimum acceptable size of an image
1210 * @saveable: Number of saveable pages in the system. 1227 * @saveable: Number of saveable pages in the system.
1211 * 1228 *
1212 * We want to avoid attempting to free too much memory too hard, so estimate the 1229 * We want to avoid attempting to free too much memory too hard, so estimate the
1213 * minimum acceptable size of a hibernation image to use as the lower limit for 1230 * minimum acceptable size of a hibernation image to use as the lower limit for
1214 * preallocating memory. 1231 * preallocating memory.
1215 * 1232 *
1216 * We assume that the minimum image size should be proportional to 1233 * We assume that the minimum image size should be proportional to
1217 * 1234 *
1218 * [number of saveable pages] - [number of pages that can be freed in theory] 1235 * [number of saveable pages] - [number of pages that can be freed in theory]
1219 * 1236 *
1220 * where the second term is the sum of (1) reclaimable slab pages, (2) active 1237 * where the second term is the sum of (1) reclaimable slab pages, (2) active
1221 * and (3) inactive anonymouns pages, (4) active and (5) inactive file pages, 1238 * and (3) inactive anonymouns pages, (4) active and (5) inactive file pages,
1222 * minus mapped file pages. 1239 * minus mapped file pages.
1223 */ 1240 */
1224 static unsigned long minimum_image_size(unsigned long saveable) 1241 static unsigned long minimum_image_size(unsigned long saveable)
1225 { 1242 {
1226 unsigned long size; 1243 unsigned long size;
1227 1244
1228 size = global_page_state(NR_SLAB_RECLAIMABLE) 1245 size = global_page_state(NR_SLAB_RECLAIMABLE)
1229 + global_page_state(NR_ACTIVE_ANON) 1246 + global_page_state(NR_ACTIVE_ANON)
1230 + global_page_state(NR_INACTIVE_ANON) 1247 + global_page_state(NR_INACTIVE_ANON)
1231 + global_page_state(NR_ACTIVE_FILE) 1248 + global_page_state(NR_ACTIVE_FILE)
1232 + global_page_state(NR_INACTIVE_FILE) 1249 + global_page_state(NR_INACTIVE_FILE)
1233 - global_page_state(NR_FILE_MAPPED); 1250 - global_page_state(NR_FILE_MAPPED);
1234 1251
1235 return saveable <= size ? 0 : saveable - size; 1252 return saveable <= size ? 0 : saveable - size;
1236 } 1253 }
1237 1254
1238 /** 1255 /**
1239 * hibernate_preallocate_memory - Preallocate memory for hibernation image 1256 * hibernate_preallocate_memory - Preallocate memory for hibernation image
1240 * 1257 *
1241 * To create a hibernation image it is necessary to make a copy of every page 1258 * To create a hibernation image it is necessary to make a copy of every page
1242 * frame in use. We also need a number of page frames to be free during 1259 * frame in use. We also need a number of page frames to be free during
1243 * hibernation for allocations made while saving the image and for device 1260 * hibernation for allocations made while saving the image and for device
1244 * drivers, in case they need to allocate memory from their hibernation 1261 * drivers, in case they need to allocate memory from their hibernation
1245 * callbacks (these two numbers are given by PAGES_FOR_IO and SPARE_PAGES, 1262 * callbacks (these two numbers are given by PAGES_FOR_IO and SPARE_PAGES,
1246 * respectively, both of which are rough estimates). To make this happen, we 1263 * respectively, both of which are rough estimates). To make this happen, we
1247 * compute the total number of available page frames and allocate at least 1264 * compute the total number of available page frames and allocate at least
1248 * 1265 *
1249 * ([page frames total] + PAGES_FOR_IO + [metadata pages]) / 2 + 2 * SPARE_PAGES 1266 * ([page frames total] + PAGES_FOR_IO + [metadata pages]) / 2 + 2 * SPARE_PAGES
1250 * 1267 *
1251 * of them, which corresponds to the maximum size of a hibernation image. 1268 * of them, which corresponds to the maximum size of a hibernation image.
1252 * 1269 *
1253 * If image_size is set below the number following from the above formula, 1270 * If image_size is set below the number following from the above formula,
1254 * the preallocation of memory is continued until the total number of saveable 1271 * the preallocation of memory is continued until the total number of saveable
1255 * pages in the system is below the requested image size or the minimum 1272 * pages in the system is below the requested image size or the minimum
1256 * acceptable image size returned by minimum_image_size(), whichever is greater. 1273 * acceptable image size returned by minimum_image_size(), whichever is greater.
1257 */ 1274 */
1258 int hibernate_preallocate_memory(void) 1275 int hibernate_preallocate_memory(void)
1259 { 1276 {
1260 struct zone *zone; 1277 struct zone *zone;
1261 unsigned long saveable, size, max_size, count, highmem, pages = 0; 1278 unsigned long saveable, size, max_size, count, highmem, pages = 0;
1262 unsigned long alloc, save_highmem, pages_highmem; 1279 unsigned long alloc, save_highmem, pages_highmem, avail_normal;
1263 struct timeval start, stop; 1280 struct timeval start, stop;
1264 int error; 1281 int error;
1265 1282
1266 printk(KERN_INFO "PM: Preallocating image memory... "); 1283 printk(KERN_INFO "PM: Preallocating image memory... ");
1267 do_gettimeofday(&start); 1284 do_gettimeofday(&start);
1268 1285
1269 error = memory_bm_create(&orig_bm, GFP_IMAGE, PG_ANY); 1286 error = memory_bm_create(&orig_bm, GFP_IMAGE, PG_ANY);
1270 if (error) 1287 if (error)
1271 goto err_out; 1288 goto err_out;
1272 1289
1273 error = memory_bm_create(&copy_bm, GFP_IMAGE, PG_ANY); 1290 error = memory_bm_create(&copy_bm, GFP_IMAGE, PG_ANY);
1274 if (error) 1291 if (error)
1275 goto err_out; 1292 goto err_out;
1276 1293
1277 alloc_normal = 0; 1294 alloc_normal = 0;
1278 alloc_highmem = 0; 1295 alloc_highmem = 0;
1279 1296
1280 /* Count the number of saveable data pages. */ 1297 /* Count the number of saveable data pages. */
1281 save_highmem = count_highmem_pages(); 1298 save_highmem = count_highmem_pages();
1282 saveable = count_data_pages(); 1299 saveable = count_data_pages();
1283 1300
1284 /* 1301 /*
1285 * Compute the total number of page frames we can use (count) and the 1302 * Compute the total number of page frames we can use (count) and the
1286 * number of pages needed for image metadata (size). 1303 * number of pages needed for image metadata (size).
1287 */ 1304 */
1288 count = saveable; 1305 count = saveable;
1289 saveable += save_highmem; 1306 saveable += save_highmem;
1290 highmem = save_highmem; 1307 highmem = save_highmem;
1291 size = 0; 1308 size = 0;
1292 for_each_populated_zone(zone) { 1309 for_each_populated_zone(zone) {
1293 size += snapshot_additional_pages(zone); 1310 size += snapshot_additional_pages(zone);
1294 if (is_highmem(zone)) 1311 if (is_highmem(zone))
1295 highmem += zone_page_state(zone, NR_FREE_PAGES); 1312 highmem += zone_page_state(zone, NR_FREE_PAGES);
1296 else 1313 else
1297 count += zone_page_state(zone, NR_FREE_PAGES); 1314 count += zone_page_state(zone, NR_FREE_PAGES);
1298 } 1315 }
1316 avail_normal = count;
1299 count += highmem; 1317 count += highmem;
1300 count -= totalreserve_pages; 1318 count -= totalreserve_pages;
1301 1319
1302 /* Compute the maximum number of saveable pages to leave in memory. */ 1320 /* Compute the maximum number of saveable pages to leave in memory. */
1303 max_size = (count - (size + PAGES_FOR_IO)) / 2 - 2 * SPARE_PAGES; 1321 max_size = (count - (size + PAGES_FOR_IO)) / 2 - 2 * SPARE_PAGES;
1304 size = DIV_ROUND_UP(image_size, PAGE_SIZE); 1322 size = DIV_ROUND_UP(image_size, PAGE_SIZE);
1305 if (size > max_size) 1323 if (size > max_size)
1306 size = max_size; 1324 size = max_size;
1307 /* 1325 /*
1308 * If the maximum is not less than the current number of saveable pages 1326 * If the maximum is not less than the current number of saveable pages
1309 * in memory, allocate page frames for the image and we're done. 1327 * in memory, allocate page frames for the image and we're done.
1310 */ 1328 */
1311 if (size >= saveable) { 1329 if (size >= saveable) {
1312 pages = preallocate_image_highmem(save_highmem); 1330 pages = preallocate_image_highmem(save_highmem);
1313 pages += preallocate_image_memory(saveable - pages); 1331 pages += preallocate_image_memory(saveable - pages, avail_normal);
1314 goto out; 1332 goto out;
1315 } 1333 }
1316 1334
1317 /* Estimate the minimum size of the image. */ 1335 /* Estimate the minimum size of the image. */
1318 pages = minimum_image_size(saveable); 1336 pages = minimum_image_size(saveable);
1337 /*
1338 * To avoid excessive pressure on the normal zone, leave room in it to
1339 * accommodate an image of the minimum size (unless it's already too
1340 * small, in which case don't preallocate pages from it at all).
1341 */
1342 if (avail_normal > pages)
1343 avail_normal -= pages;
1344 else
1345 avail_normal = 0;
1319 if (size < pages) 1346 if (size < pages)
1320 size = min_t(unsigned long, pages, max_size); 1347 size = min_t(unsigned long, pages, max_size);
1321 1348
1322 /* 1349 /*
1323 * Let the memory management subsystem know that we're going to need a 1350 * Let the memory management subsystem know that we're going to need a
1324 * large number of page frames to allocate and make it free some memory. 1351 * large number of page frames to allocate and make it free some memory.
1325 * NOTE: If this is not done, performance will be hurt badly in some 1352 * NOTE: If this is not done, performance will be hurt badly in some
1326 * test cases. 1353 * test cases.
1327 */ 1354 */
1328 shrink_all_memory(saveable - size); 1355 shrink_all_memory(saveable - size);
1329 1356
1330 /* 1357 /*
1331 * The number of saveable pages in memory was too high, so apply some 1358 * The number of saveable pages in memory was too high, so apply some
1332 * pressure to decrease it. First, make room for the largest possible 1359 * pressure to decrease it. First, make room for the largest possible
1333 * image and fail if that doesn't work. Next, try to decrease the size 1360 * image and fail if that doesn't work. Next, try to decrease the size
1334 * of the image as much as indicated by 'size' using allocations from 1361 * of the image as much as indicated by 'size' using allocations from
1335 * highmem and non-highmem zones separately. 1362 * highmem and non-highmem zones separately.
1336 */ 1363 */
1337 pages_highmem = preallocate_image_highmem(highmem / 2); 1364 pages_highmem = preallocate_image_highmem(highmem / 2);
1338 alloc = (count - max_size) - pages_highmem; 1365 alloc = (count - max_size) - pages_highmem;
1339 pages = preallocate_image_memory(alloc); 1366 pages = preallocate_image_memory(alloc, avail_normal);
1340 if (pages < alloc) 1367 if (pages < alloc) {
1341 goto err_out; 1368 /* We have exhausted non-highmem pages, try highmem. */
1342 size = max_size - size; 1369 alloc -= pages;
1343 alloc = size; 1370 pages += pages_highmem;
1344 size = preallocate_highmem_fraction(size, highmem, count); 1371 pages_highmem = preallocate_image_highmem(alloc);
1345 pages_highmem += size; 1372 if (pages_highmem < alloc)
1346 alloc -= size; 1373 goto err_out;
1347 pages += preallocate_image_memory(alloc); 1374 pages += pages_highmem;
1348 pages += pages_highmem; 1375 /*
1376 * size is the desired number of saveable pages to leave in
1377 * memory, so try to preallocate (all memory - size) pages.
1378 */
1379 alloc = (count - pages) - size;
1380 pages += preallocate_image_highmem(alloc);
1381 } else {
1382 /*
1383 * There are approximately max_size saveable pages at this point
1384 * and we want to reduce this number down to size.
1385 */
1386 alloc = max_size - size;
1387 size = preallocate_highmem_fraction(alloc, highmem, count);
1388 pages_highmem += size;
1389 alloc -= size;
1390 size = preallocate_image_memory(alloc, avail_normal);
1391 pages_highmem += preallocate_image_highmem(alloc - size);
1392 pages += pages_highmem + size;
1393 }
1349 1394
1350 /* 1395 /*
1351 * We only need as many page frames for the image as there are saveable 1396 * We only need as many page frames for the image as there are saveable
1352 * pages in memory, but we have allocated more. Release the excessive 1397 * pages in memory, but we have allocated more. Release the excessive
1353 * ones now. 1398 * ones now.
1354 */ 1399 */
1355 free_unnecessary_pages(); 1400 free_unnecessary_pages();
1356 1401
1357 out: 1402 out:
1358 do_gettimeofday(&stop); 1403 do_gettimeofday(&stop);
1359 printk(KERN_CONT "done (allocated %lu pages)\n", pages); 1404 printk(KERN_CONT "done (allocated %lu pages)\n", pages);
1360 swsusp_show_speed(&start, &stop, pages, "Allocated"); 1405 swsusp_show_speed(&start, &stop, pages, "Allocated");
1361 1406
1362 return 0; 1407 return 0;
1363 1408
1364 err_out: 1409 err_out:
1365 printk(KERN_CONT "\n"); 1410 printk(KERN_CONT "\n");
1366 swsusp_free(); 1411 swsusp_free();
1367 return -ENOMEM; 1412 return -ENOMEM;
1368 } 1413 }
1369 1414
1370 #ifdef CONFIG_HIGHMEM 1415 #ifdef CONFIG_HIGHMEM
1371 /** 1416 /**
1372 * count_pages_for_highmem - compute the number of non-highmem pages 1417 * count_pages_for_highmem - compute the number of non-highmem pages
1373 * that will be necessary for creating copies of highmem pages. 1418 * that will be necessary for creating copies of highmem pages.
1374 */ 1419 */
1375 1420
1376 static unsigned int count_pages_for_highmem(unsigned int nr_highmem) 1421 static unsigned int count_pages_for_highmem(unsigned int nr_highmem)
1377 { 1422 {
1378 unsigned int free_highmem = count_free_highmem_pages() + alloc_highmem; 1423 unsigned int free_highmem = count_free_highmem_pages() + alloc_highmem;
1379 1424
1380 if (free_highmem >= nr_highmem) 1425 if (free_highmem >= nr_highmem)
1381 nr_highmem = 0; 1426 nr_highmem = 0;
1382 else 1427 else
1383 nr_highmem -= free_highmem; 1428 nr_highmem -= free_highmem;
1384 1429
1385 return nr_highmem; 1430 return nr_highmem;
1386 } 1431 }
1387 #else 1432 #else
1388 static unsigned int 1433 static unsigned int
1389 count_pages_for_highmem(unsigned int nr_highmem) { return 0; } 1434 count_pages_for_highmem(unsigned int nr_highmem) { return 0; }
1390 #endif /* CONFIG_HIGHMEM */ 1435 #endif /* CONFIG_HIGHMEM */
1391 1436
1392 /** 1437 /**
1393 * enough_free_mem - Make sure we have enough free memory for the 1438 * enough_free_mem - Make sure we have enough free memory for the
1394 * snapshot image. 1439 * snapshot image.
1395 */ 1440 */
1396 1441
1397 static int enough_free_mem(unsigned int nr_pages, unsigned int nr_highmem) 1442 static int enough_free_mem(unsigned int nr_pages, unsigned int nr_highmem)
1398 { 1443 {
1399 struct zone *zone; 1444 struct zone *zone;
1400 unsigned int free = alloc_normal; 1445 unsigned int free = alloc_normal;
1401 1446
1402 for_each_populated_zone(zone) 1447 for_each_populated_zone(zone)
1403 if (!is_highmem(zone)) 1448 if (!is_highmem(zone))
1404 free += zone_page_state(zone, NR_FREE_PAGES); 1449 free += zone_page_state(zone, NR_FREE_PAGES);
1405 1450
1406 nr_pages += count_pages_for_highmem(nr_highmem); 1451 nr_pages += count_pages_for_highmem(nr_highmem);
1407 pr_debug("PM: Normal pages needed: %u + %u, available pages: %u\n", 1452 pr_debug("PM: Normal pages needed: %u + %u, available pages: %u\n",
1408 nr_pages, PAGES_FOR_IO, free); 1453 nr_pages, PAGES_FOR_IO, free);
1409 1454
1410 return free > nr_pages + PAGES_FOR_IO; 1455 return free > nr_pages + PAGES_FOR_IO;
1411 } 1456 }
1412 1457
1413 #ifdef CONFIG_HIGHMEM 1458 #ifdef CONFIG_HIGHMEM
1414 /** 1459 /**
1415 * get_highmem_buffer - if there are some highmem pages in the suspend 1460 * get_highmem_buffer - if there are some highmem pages in the suspend
1416 * image, we may need the buffer to copy them and/or load their data. 1461 * image, we may need the buffer to copy them and/or load their data.
1417 */ 1462 */
1418 1463
1419 static inline int get_highmem_buffer(int safe_needed) 1464 static inline int get_highmem_buffer(int safe_needed)
1420 { 1465 {
1421 buffer = get_image_page(GFP_ATOMIC | __GFP_COLD, safe_needed); 1466 buffer = get_image_page(GFP_ATOMIC | __GFP_COLD, safe_needed);
1422 return buffer ? 0 : -ENOMEM; 1467 return buffer ? 0 : -ENOMEM;
1423 } 1468 }
1424 1469
1425 /** 1470 /**
1426 * alloc_highmem_image_pages - allocate some highmem pages for the image. 1471 * alloc_highmem_image_pages - allocate some highmem pages for the image.
1427 * Try to allocate as many pages as needed, but if the number of free 1472 * Try to allocate as many pages as needed, but if the number of free
1428 * highmem pages is lesser than that, allocate them all. 1473 * highmem pages is lesser than that, allocate them all.
1429 */ 1474 */
1430 1475
1431 static inline unsigned int 1476 static inline unsigned int
1432 alloc_highmem_pages(struct memory_bitmap *bm, unsigned int nr_highmem) 1477 alloc_highmem_pages(struct memory_bitmap *bm, unsigned int nr_highmem)
1433 { 1478 {
1434 unsigned int to_alloc = count_free_highmem_pages(); 1479 unsigned int to_alloc = count_free_highmem_pages();
1435 1480
1436 if (to_alloc > nr_highmem) 1481 if (to_alloc > nr_highmem)
1437 to_alloc = nr_highmem; 1482 to_alloc = nr_highmem;
1438 1483
1439 nr_highmem -= to_alloc; 1484 nr_highmem -= to_alloc;
1440 while (to_alloc-- > 0) { 1485 while (to_alloc-- > 0) {
1441 struct page *page; 1486 struct page *page;
1442 1487
1443 page = alloc_image_page(__GFP_HIGHMEM); 1488 page = alloc_image_page(__GFP_HIGHMEM);
1444 memory_bm_set_bit(bm, page_to_pfn(page)); 1489 memory_bm_set_bit(bm, page_to_pfn(page));
1445 } 1490 }
1446 return nr_highmem; 1491 return nr_highmem;
1447 } 1492 }
1448 #else 1493 #else
1449 static inline int get_highmem_buffer(int safe_needed) { return 0; } 1494 static inline int get_highmem_buffer(int safe_needed) { return 0; }
1450 1495
1451 static inline unsigned int 1496 static inline unsigned int
1452 alloc_highmem_pages(struct memory_bitmap *bm, unsigned int n) { return 0; } 1497 alloc_highmem_pages(struct memory_bitmap *bm, unsigned int n) { return 0; }
1453 #endif /* CONFIG_HIGHMEM */ 1498 #endif /* CONFIG_HIGHMEM */
1454 1499
1455 /** 1500 /**
1456 * swsusp_alloc - allocate memory for the suspend image 1501 * swsusp_alloc - allocate memory for the suspend image
1457 * 1502 *
1458 * We first try to allocate as many highmem pages as there are 1503 * We first try to allocate as many highmem pages as there are
1459 * saveable highmem pages in the system. If that fails, we allocate 1504 * saveable highmem pages in the system. If that fails, we allocate
1460 * non-highmem pages for the copies of the remaining highmem ones. 1505 * non-highmem pages for the copies of the remaining highmem ones.
1461 * 1506 *
1462 * In this approach it is likely that the copies of highmem pages will 1507 * In this approach it is likely that the copies of highmem pages will
1463 * also be located in the high memory, because of the way in which 1508 * also be located in the high memory, because of the way in which
1464 * copy_data_pages() works. 1509 * copy_data_pages() works.
1465 */ 1510 */
1466 1511
1467 static int 1512 static int
1468 swsusp_alloc(struct memory_bitmap *orig_bm, struct memory_bitmap *copy_bm, 1513 swsusp_alloc(struct memory_bitmap *orig_bm, struct memory_bitmap *copy_bm,
1469 unsigned int nr_pages, unsigned int nr_highmem) 1514 unsigned int nr_pages, unsigned int nr_highmem)
1470 { 1515 {
1471 int error = 0; 1516 int error = 0;
1472 1517
1473 if (nr_highmem > 0) { 1518 if (nr_highmem > 0) {
1474 error = get_highmem_buffer(PG_ANY); 1519 error = get_highmem_buffer(PG_ANY);
1475 if (error) 1520 if (error)
1476 goto err_out; 1521 goto err_out;
1477 if (nr_highmem > alloc_highmem) { 1522 if (nr_highmem > alloc_highmem) {
1478 nr_highmem -= alloc_highmem; 1523 nr_highmem -= alloc_highmem;
1479 nr_pages += alloc_highmem_pages(copy_bm, nr_highmem); 1524 nr_pages += alloc_highmem_pages(copy_bm, nr_highmem);
1480 } 1525 }
1481 } 1526 }
1482 if (nr_pages > alloc_normal) { 1527 if (nr_pages > alloc_normal) {
1483 nr_pages -= alloc_normal; 1528 nr_pages -= alloc_normal;
1484 while (nr_pages-- > 0) { 1529 while (nr_pages-- > 0) {
1485 struct page *page; 1530 struct page *page;
1486 1531
1487 page = alloc_image_page(GFP_ATOMIC | __GFP_COLD); 1532 page = alloc_image_page(GFP_ATOMIC | __GFP_COLD);
1488 if (!page) 1533 if (!page)
1489 goto err_out; 1534 goto err_out;
1490 memory_bm_set_bit(copy_bm, page_to_pfn(page)); 1535 memory_bm_set_bit(copy_bm, page_to_pfn(page));
1491 } 1536 }
1492 } 1537 }
1493 1538
1494 return 0; 1539 return 0;
1495 1540
1496 err_out: 1541 err_out:
1497 swsusp_free(); 1542 swsusp_free();
1498 return error; 1543 return error;
1499 } 1544 }
1500 1545
1501 asmlinkage int swsusp_save(void) 1546 asmlinkage int swsusp_save(void)
1502 { 1547 {
1503 unsigned int nr_pages, nr_highmem; 1548 unsigned int nr_pages, nr_highmem;
1504 1549
1505 printk(KERN_INFO "PM: Creating hibernation image:\n"); 1550 printk(KERN_INFO "PM: Creating hibernation image:\n");
1506 1551
1507 drain_local_pages(NULL); 1552 drain_local_pages(NULL);
1508 nr_pages = count_data_pages(); 1553 nr_pages = count_data_pages();
1509 nr_highmem = count_highmem_pages(); 1554 nr_highmem = count_highmem_pages();
1510 printk(KERN_INFO "PM: Need to copy %u pages\n", nr_pages + nr_highmem); 1555 printk(KERN_INFO "PM: Need to copy %u pages\n", nr_pages + nr_highmem);
1511 1556
1512 if (!enough_free_mem(nr_pages, nr_highmem)) { 1557 if (!enough_free_mem(nr_pages, nr_highmem)) {
1513 printk(KERN_ERR "PM: Not enough free memory\n"); 1558 printk(KERN_ERR "PM: Not enough free memory\n");
1514 return -ENOMEM; 1559 return -ENOMEM;
1515 } 1560 }
1516 1561
1517 if (swsusp_alloc(&orig_bm, &copy_bm, nr_pages, nr_highmem)) { 1562 if (swsusp_alloc(&orig_bm, &copy_bm, nr_pages, nr_highmem)) {
1518 printk(KERN_ERR "PM: Memory allocation failed\n"); 1563 printk(KERN_ERR "PM: Memory allocation failed\n");
1519 return -ENOMEM; 1564 return -ENOMEM;
1520 } 1565 }
1521 1566
1522 /* During allocating of suspend pagedir, new cold pages may appear. 1567 /* During allocating of suspend pagedir, new cold pages may appear.
1523 * Kill them. 1568 * Kill them.
1524 */ 1569 */
1525 drain_local_pages(NULL); 1570 drain_local_pages(NULL);
1526 copy_data_pages(&copy_bm, &orig_bm); 1571 copy_data_pages(&copy_bm, &orig_bm);
1527 1572
1528 /* 1573 /*
1529 * End of critical section. From now on, we can write to memory, 1574 * End of critical section. From now on, we can write to memory,
1530 * but we should not touch disk. This specially means we must _not_ 1575 * but we should not touch disk. This specially means we must _not_
1531 * touch swap space! Except we must write out our image of course. 1576 * touch swap space! Except we must write out our image of course.
1532 */ 1577 */
1533 1578
1534 nr_pages += nr_highmem; 1579 nr_pages += nr_highmem;
1535 nr_copy_pages = nr_pages; 1580 nr_copy_pages = nr_pages;
1536 nr_meta_pages = DIV_ROUND_UP(nr_pages * sizeof(long), PAGE_SIZE); 1581 nr_meta_pages = DIV_ROUND_UP(nr_pages * sizeof(long), PAGE_SIZE);
1537 1582
1538 printk(KERN_INFO "PM: Hibernation image created (%d pages copied)\n", 1583 printk(KERN_INFO "PM: Hibernation image created (%d pages copied)\n",
1539 nr_pages); 1584 nr_pages);
1540 1585
1541 return 0; 1586 return 0;
1542 } 1587 }
1543 1588
1544 #ifndef CONFIG_ARCH_HIBERNATION_HEADER 1589 #ifndef CONFIG_ARCH_HIBERNATION_HEADER
1545 static int init_header_complete(struct swsusp_info *info) 1590 static int init_header_complete(struct swsusp_info *info)
1546 { 1591 {
1547 memcpy(&info->uts, init_utsname(), sizeof(struct new_utsname)); 1592 memcpy(&info->uts, init_utsname(), sizeof(struct new_utsname));
1548 info->version_code = LINUX_VERSION_CODE; 1593 info->version_code = LINUX_VERSION_CODE;
1549 return 0; 1594 return 0;
1550 } 1595 }
1551 1596
1552 static char *check_image_kernel(struct swsusp_info *info) 1597 static char *check_image_kernel(struct swsusp_info *info)
1553 { 1598 {
1554 if (info->version_code != LINUX_VERSION_CODE) 1599 if (info->version_code != LINUX_VERSION_CODE)
1555 return "kernel version"; 1600 return "kernel version";
1556 if (strcmp(info->uts.sysname,init_utsname()->sysname)) 1601 if (strcmp(info->uts.sysname,init_utsname()->sysname))
1557 return "system type"; 1602 return "system type";
1558 if (strcmp(info->uts.release,init_utsname()->release)) 1603 if (strcmp(info->uts.release,init_utsname()->release))
1559 return "kernel release"; 1604 return "kernel release";
1560 if (strcmp(info->uts.version,init_utsname()->version)) 1605 if (strcmp(info->uts.version,init_utsname()->version))
1561 return "version"; 1606 return "version";
1562 if (strcmp(info->uts.machine,init_utsname()->machine)) 1607 if (strcmp(info->uts.machine,init_utsname()->machine))
1563 return "machine"; 1608 return "machine";
1564 return NULL; 1609 return NULL;
1565 } 1610 }
1566 #endif /* CONFIG_ARCH_HIBERNATION_HEADER */ 1611 #endif /* CONFIG_ARCH_HIBERNATION_HEADER */
1567 1612
1568 unsigned long snapshot_get_image_size(void) 1613 unsigned long snapshot_get_image_size(void)
1569 { 1614 {
1570 return nr_copy_pages + nr_meta_pages + 1; 1615 return nr_copy_pages + nr_meta_pages + 1;
1571 } 1616 }
1572 1617
1573 static int init_header(struct swsusp_info *info) 1618 static int init_header(struct swsusp_info *info)
1574 { 1619 {
1575 memset(info, 0, sizeof(struct swsusp_info)); 1620 memset(info, 0, sizeof(struct swsusp_info));
1576 info->num_physpages = num_physpages; 1621 info->num_physpages = num_physpages;
1577 info->image_pages = nr_copy_pages; 1622 info->image_pages = nr_copy_pages;
1578 info->pages = snapshot_get_image_size(); 1623 info->pages = snapshot_get_image_size();
1579 info->size = info->pages; 1624 info->size = info->pages;
1580 info->size <<= PAGE_SHIFT; 1625 info->size <<= PAGE_SHIFT;
1581 return init_header_complete(info); 1626 return init_header_complete(info);
1582 } 1627 }
1583 1628
1584 /** 1629 /**
1585 * pack_pfns - pfns corresponding to the set bits found in the bitmap @bm 1630 * pack_pfns - pfns corresponding to the set bits found in the bitmap @bm
1586 * are stored in the array @buf[] (1 page at a time) 1631 * are stored in the array @buf[] (1 page at a time)
1587 */ 1632 */
1588 1633
1589 static inline void 1634 static inline void
1590 pack_pfns(unsigned long *buf, struct memory_bitmap *bm) 1635 pack_pfns(unsigned long *buf, struct memory_bitmap *bm)
1591 { 1636 {
1592 int j; 1637 int j;
1593 1638
1594 for (j = 0; j < PAGE_SIZE / sizeof(long); j++) { 1639 for (j = 0; j < PAGE_SIZE / sizeof(long); j++) {
1595 buf[j] = memory_bm_next_pfn(bm); 1640 buf[j] = memory_bm_next_pfn(bm);
1596 if (unlikely(buf[j] == BM_END_OF_MAP)) 1641 if (unlikely(buf[j] == BM_END_OF_MAP))
1597 break; 1642 break;
1598 } 1643 }
1599 } 1644 }
1600 1645
1601 /** 1646 /**
1602 * snapshot_read_next - used for reading the system memory snapshot. 1647 * snapshot_read_next - used for reading the system memory snapshot.
1603 * 1648 *
1604 * On the first call to it @handle should point to a zeroed 1649 * On the first call to it @handle should point to a zeroed
1605 * snapshot_handle structure. The structure gets updated and a pointer 1650 * snapshot_handle structure. The structure gets updated and a pointer
1606 * to it should be passed to this function every next time. 1651 * to it should be passed to this function every next time.
1607 * 1652 *
1608 * On success the function returns a positive number. Then, the caller 1653 * On success the function returns a positive number. Then, the caller
1609 * is allowed to read up to the returned number of bytes from the memory 1654 * is allowed to read up to the returned number of bytes from the memory
1610 * location computed by the data_of() macro. 1655 * location computed by the data_of() macro.
1611 * 1656 *
1612 * The function returns 0 to indicate the end of data stream condition, 1657 * The function returns 0 to indicate the end of data stream condition,
1613 * and a negative number is returned on error. In such cases the 1658 * and a negative number is returned on error. In such cases the
1614 * structure pointed to by @handle is not updated and should not be used 1659 * structure pointed to by @handle is not updated and should not be used
1615 * any more. 1660 * any more.
1616 */ 1661 */
1617 1662
1618 int snapshot_read_next(struct snapshot_handle *handle) 1663 int snapshot_read_next(struct snapshot_handle *handle)
1619 { 1664 {
1620 if (handle->cur > nr_meta_pages + nr_copy_pages) 1665 if (handle->cur > nr_meta_pages + nr_copy_pages)
1621 return 0; 1666 return 0;
1622 1667
1623 if (!buffer) { 1668 if (!buffer) {
1624 /* This makes the buffer be freed by swsusp_free() */ 1669 /* This makes the buffer be freed by swsusp_free() */
1625 buffer = get_image_page(GFP_ATOMIC, PG_ANY); 1670 buffer = get_image_page(GFP_ATOMIC, PG_ANY);
1626 if (!buffer) 1671 if (!buffer)
1627 return -ENOMEM; 1672 return -ENOMEM;
1628 } 1673 }
1629 if (!handle->cur) { 1674 if (!handle->cur) {
1630 int error; 1675 int error;
1631 1676
1632 error = init_header((struct swsusp_info *)buffer); 1677 error = init_header((struct swsusp_info *)buffer);
1633 if (error) 1678 if (error)
1634 return error; 1679 return error;
1635 handle->buffer = buffer; 1680 handle->buffer = buffer;
1636 memory_bm_position_reset(&orig_bm); 1681 memory_bm_position_reset(&orig_bm);
1637 memory_bm_position_reset(&copy_bm); 1682 memory_bm_position_reset(&copy_bm);
1638 } else if (handle->cur <= nr_meta_pages) { 1683 } else if (handle->cur <= nr_meta_pages) {
1639 memset(buffer, 0, PAGE_SIZE); 1684 memset(buffer, 0, PAGE_SIZE);
1640 pack_pfns(buffer, &orig_bm); 1685 pack_pfns(buffer, &orig_bm);
1641 } else { 1686 } else {
1642 struct page *page; 1687 struct page *page;
1643 1688
1644 page = pfn_to_page(memory_bm_next_pfn(&copy_bm)); 1689 page = pfn_to_page(memory_bm_next_pfn(&copy_bm));
1645 if (PageHighMem(page)) { 1690 if (PageHighMem(page)) {
1646 /* Highmem pages are copied to the buffer, 1691 /* Highmem pages are copied to the buffer,
1647 * because we can't return with a kmapped 1692 * because we can't return with a kmapped
1648 * highmem page (we may not be called again). 1693 * highmem page (we may not be called again).
1649 */ 1694 */
1650 void *kaddr; 1695 void *kaddr;
1651 1696
1652 kaddr = kmap_atomic(page, KM_USER0); 1697 kaddr = kmap_atomic(page, KM_USER0);
1653 memcpy(buffer, kaddr, PAGE_SIZE); 1698 memcpy(buffer, kaddr, PAGE_SIZE);
1654 kunmap_atomic(kaddr, KM_USER0); 1699 kunmap_atomic(kaddr, KM_USER0);
1655 handle->buffer = buffer; 1700 handle->buffer = buffer;
1656 } else { 1701 } else {
1657 handle->buffer = page_address(page); 1702 handle->buffer = page_address(page);
1658 } 1703 }
1659 } 1704 }
1660 handle->cur++; 1705 handle->cur++;
1661 return PAGE_SIZE; 1706 return PAGE_SIZE;
1662 } 1707 }
1663 1708
1664 /** 1709 /**
1665 * mark_unsafe_pages - mark the pages that cannot be used for storing 1710 * mark_unsafe_pages - mark the pages that cannot be used for storing
1666 * the image during resume, because they conflict with the pages that 1711 * the image during resume, because they conflict with the pages that
1667 * had been used before suspend 1712 * had been used before suspend
1668 */ 1713 */
1669 1714
1670 static int mark_unsafe_pages(struct memory_bitmap *bm) 1715 static int mark_unsafe_pages(struct memory_bitmap *bm)
1671 { 1716 {
1672 struct zone *zone; 1717 struct zone *zone;
1673 unsigned long pfn, max_zone_pfn; 1718 unsigned long pfn, max_zone_pfn;
1674 1719
1675 /* Clear page flags */ 1720 /* Clear page flags */
1676 for_each_populated_zone(zone) { 1721 for_each_populated_zone(zone) {
1677 max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages; 1722 max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
1678 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) 1723 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1679 if (pfn_valid(pfn)) 1724 if (pfn_valid(pfn))
1680 swsusp_unset_page_free(pfn_to_page(pfn)); 1725 swsusp_unset_page_free(pfn_to_page(pfn));
1681 } 1726 }
1682 1727
1683 /* Mark pages that correspond to the "original" pfns as "unsafe" */ 1728 /* Mark pages that correspond to the "original" pfns as "unsafe" */
1684 memory_bm_position_reset(bm); 1729 memory_bm_position_reset(bm);
1685 do { 1730 do {
1686 pfn = memory_bm_next_pfn(bm); 1731 pfn = memory_bm_next_pfn(bm);
1687 if (likely(pfn != BM_END_OF_MAP)) { 1732 if (likely(pfn != BM_END_OF_MAP)) {
1688 if (likely(pfn_valid(pfn))) 1733 if (likely(pfn_valid(pfn)))
1689 swsusp_set_page_free(pfn_to_page(pfn)); 1734 swsusp_set_page_free(pfn_to_page(pfn));
1690 else 1735 else
1691 return -EFAULT; 1736 return -EFAULT;
1692 } 1737 }
1693 } while (pfn != BM_END_OF_MAP); 1738 } while (pfn != BM_END_OF_MAP);
1694 1739
1695 allocated_unsafe_pages = 0; 1740 allocated_unsafe_pages = 0;
1696 1741
1697 return 0; 1742 return 0;
1698 } 1743 }
1699 1744
1700 static void 1745 static void
1701 duplicate_memory_bitmap(struct memory_bitmap *dst, struct memory_bitmap *src) 1746 duplicate_memory_bitmap(struct memory_bitmap *dst, struct memory_bitmap *src)
1702 { 1747 {
1703 unsigned long pfn; 1748 unsigned long pfn;
1704 1749
1705 memory_bm_position_reset(src); 1750 memory_bm_position_reset(src);
1706 pfn = memory_bm_next_pfn(src); 1751 pfn = memory_bm_next_pfn(src);
1707 while (pfn != BM_END_OF_MAP) { 1752 while (pfn != BM_END_OF_MAP) {
1708 memory_bm_set_bit(dst, pfn); 1753 memory_bm_set_bit(dst, pfn);
1709 pfn = memory_bm_next_pfn(src); 1754 pfn = memory_bm_next_pfn(src);
1710 } 1755 }
1711 } 1756 }
1712 1757
1713 static int check_header(struct swsusp_info *info) 1758 static int check_header(struct swsusp_info *info)
1714 { 1759 {
1715 char *reason; 1760 char *reason;
1716 1761
1717 reason = check_image_kernel(info); 1762 reason = check_image_kernel(info);
1718 if (!reason && info->num_physpages != num_physpages) 1763 if (!reason && info->num_physpages != num_physpages)
1719 reason = "memory size"; 1764 reason = "memory size";
1720 if (reason) { 1765 if (reason) {
1721 printk(KERN_ERR "PM: Image mismatch: %s\n", reason); 1766 printk(KERN_ERR "PM: Image mismatch: %s\n", reason);
1722 return -EPERM; 1767 return -EPERM;
1723 } 1768 }
1724 return 0; 1769 return 0;
1725 } 1770 }
1726 1771
1727 /** 1772 /**
1728 * load header - check the image header and copy data from it 1773 * load header - check the image header and copy data from it
1729 */ 1774 */
1730 1775
1731 static int 1776 static int
1732 load_header(struct swsusp_info *info) 1777 load_header(struct swsusp_info *info)
1733 { 1778 {
1734 int error; 1779 int error;
1735 1780
1736 restore_pblist = NULL; 1781 restore_pblist = NULL;
1737 error = check_header(info); 1782 error = check_header(info);
1738 if (!error) { 1783 if (!error) {
1739 nr_copy_pages = info->image_pages; 1784 nr_copy_pages = info->image_pages;
1740 nr_meta_pages = info->pages - info->image_pages - 1; 1785 nr_meta_pages = info->pages - info->image_pages - 1;
1741 } 1786 }
1742 return error; 1787 return error;
1743 } 1788 }
1744 1789
1745 /** 1790 /**
1746 * unpack_orig_pfns - for each element of @buf[] (1 page at a time) set 1791 * unpack_orig_pfns - for each element of @buf[] (1 page at a time) set
1747 * the corresponding bit in the memory bitmap @bm 1792 * the corresponding bit in the memory bitmap @bm
1748 */ 1793 */
1749 static int unpack_orig_pfns(unsigned long *buf, struct memory_bitmap *bm) 1794 static int unpack_orig_pfns(unsigned long *buf, struct memory_bitmap *bm)
1750 { 1795 {
1751 int j; 1796 int j;
1752 1797
1753 for (j = 0; j < PAGE_SIZE / sizeof(long); j++) { 1798 for (j = 0; j < PAGE_SIZE / sizeof(long); j++) {
1754 if (unlikely(buf[j] == BM_END_OF_MAP)) 1799 if (unlikely(buf[j] == BM_END_OF_MAP))
1755 break; 1800 break;
1756 1801
1757 if (memory_bm_pfn_present(bm, buf[j])) 1802 if (memory_bm_pfn_present(bm, buf[j]))
1758 memory_bm_set_bit(bm, buf[j]); 1803 memory_bm_set_bit(bm, buf[j]);
1759 else 1804 else
1760 return -EFAULT; 1805 return -EFAULT;
1761 } 1806 }
1762 1807
1763 return 0; 1808 return 0;
1764 } 1809 }
1765 1810
1766 /* List of "safe" pages that may be used to store data loaded from the suspend 1811 /* List of "safe" pages that may be used to store data loaded from the suspend
1767 * image 1812 * image
1768 */ 1813 */
1769 static struct linked_page *safe_pages_list; 1814 static struct linked_page *safe_pages_list;
1770 1815
1771 #ifdef CONFIG_HIGHMEM 1816 #ifdef CONFIG_HIGHMEM
1772 /* struct highmem_pbe is used for creating the list of highmem pages that 1817 /* struct highmem_pbe is used for creating the list of highmem pages that
1773 * should be restored atomically during the resume from disk, because the page 1818 * should be restored atomically during the resume from disk, because the page
1774 * frames they have occupied before the suspend are in use. 1819 * frames they have occupied before the suspend are in use.
1775 */ 1820 */
1776 struct highmem_pbe { 1821 struct highmem_pbe {
1777 struct page *copy_page; /* data is here now */ 1822 struct page *copy_page; /* data is here now */
1778 struct page *orig_page; /* data was here before the suspend */ 1823 struct page *orig_page; /* data was here before the suspend */
1779 struct highmem_pbe *next; 1824 struct highmem_pbe *next;
1780 }; 1825 };
1781 1826
1782 /* List of highmem PBEs needed for restoring the highmem pages that were 1827 /* List of highmem PBEs needed for restoring the highmem pages that were
1783 * allocated before the suspend and included in the suspend image, but have 1828 * allocated before the suspend and included in the suspend image, but have
1784 * also been allocated by the "resume" kernel, so their contents cannot be 1829 * also been allocated by the "resume" kernel, so their contents cannot be
1785 * written directly to their "original" page frames. 1830 * written directly to their "original" page frames.
1786 */ 1831 */
1787 static struct highmem_pbe *highmem_pblist; 1832 static struct highmem_pbe *highmem_pblist;
1788 1833
1789 /** 1834 /**
1790 * count_highmem_image_pages - compute the number of highmem pages in the 1835 * count_highmem_image_pages - compute the number of highmem pages in the
1791 * suspend image. The bits in the memory bitmap @bm that correspond to the 1836 * suspend image. The bits in the memory bitmap @bm that correspond to the
1792 * image pages are assumed to be set. 1837 * image pages are assumed to be set.
1793 */ 1838 */
1794 1839
1795 static unsigned int count_highmem_image_pages(struct memory_bitmap *bm) 1840 static unsigned int count_highmem_image_pages(struct memory_bitmap *bm)
1796 { 1841 {
1797 unsigned long pfn; 1842 unsigned long pfn;
1798 unsigned int cnt = 0; 1843 unsigned int cnt = 0;
1799 1844
1800 memory_bm_position_reset(bm); 1845 memory_bm_position_reset(bm);
1801 pfn = memory_bm_next_pfn(bm); 1846 pfn = memory_bm_next_pfn(bm);
1802 while (pfn != BM_END_OF_MAP) { 1847 while (pfn != BM_END_OF_MAP) {
1803 if (PageHighMem(pfn_to_page(pfn))) 1848 if (PageHighMem(pfn_to_page(pfn)))
1804 cnt++; 1849 cnt++;
1805 1850
1806 pfn = memory_bm_next_pfn(bm); 1851 pfn = memory_bm_next_pfn(bm);
1807 } 1852 }
1808 return cnt; 1853 return cnt;
1809 } 1854 }
1810 1855
1811 /** 1856 /**
1812 * prepare_highmem_image - try to allocate as many highmem pages as 1857 * prepare_highmem_image - try to allocate as many highmem pages as
1813 * there are highmem image pages (@nr_highmem_p points to the variable 1858 * there are highmem image pages (@nr_highmem_p points to the variable
1814 * containing the number of highmem image pages). The pages that are 1859 * containing the number of highmem image pages). The pages that are
1815 * "safe" (ie. will not be overwritten when the suspend image is 1860 * "safe" (ie. will not be overwritten when the suspend image is
1816 * restored) have the corresponding bits set in @bm (it must be 1861 * restored) have the corresponding bits set in @bm (it must be
1817 * unitialized). 1862 * unitialized).
1818 * 1863 *
1819 * NOTE: This function should not be called if there are no highmem 1864 * NOTE: This function should not be called if there are no highmem
1820 * image pages. 1865 * image pages.
1821 */ 1866 */
1822 1867
1823 static unsigned int safe_highmem_pages; 1868 static unsigned int safe_highmem_pages;
1824 1869
1825 static struct memory_bitmap *safe_highmem_bm; 1870 static struct memory_bitmap *safe_highmem_bm;
1826 1871
1827 static int 1872 static int
1828 prepare_highmem_image(struct memory_bitmap *bm, unsigned int *nr_highmem_p) 1873 prepare_highmem_image(struct memory_bitmap *bm, unsigned int *nr_highmem_p)
1829 { 1874 {
1830 unsigned int to_alloc; 1875 unsigned int to_alloc;
1831 1876
1832 if (memory_bm_create(bm, GFP_ATOMIC, PG_SAFE)) 1877 if (memory_bm_create(bm, GFP_ATOMIC, PG_SAFE))
1833 return -ENOMEM; 1878 return -ENOMEM;
1834 1879
1835 if (get_highmem_buffer(PG_SAFE)) 1880 if (get_highmem_buffer(PG_SAFE))
1836 return -ENOMEM; 1881 return -ENOMEM;
1837 1882
1838 to_alloc = count_free_highmem_pages(); 1883 to_alloc = count_free_highmem_pages();
1839 if (to_alloc > *nr_highmem_p) 1884 if (to_alloc > *nr_highmem_p)
1840 to_alloc = *nr_highmem_p; 1885 to_alloc = *nr_highmem_p;
1841 else 1886 else
1842 *nr_highmem_p = to_alloc; 1887 *nr_highmem_p = to_alloc;
1843 1888
1844 safe_highmem_pages = 0; 1889 safe_highmem_pages = 0;
1845 while (to_alloc-- > 0) { 1890 while (to_alloc-- > 0) {
1846 struct page *page; 1891 struct page *page;
1847 1892
1848 page = alloc_page(__GFP_HIGHMEM); 1893 page = alloc_page(__GFP_HIGHMEM);
1849 if (!swsusp_page_is_free(page)) { 1894 if (!swsusp_page_is_free(page)) {
1850 /* The page is "safe", set its bit the bitmap */ 1895 /* The page is "safe", set its bit the bitmap */
1851 memory_bm_set_bit(bm, page_to_pfn(page)); 1896 memory_bm_set_bit(bm, page_to_pfn(page));
1852 safe_highmem_pages++; 1897 safe_highmem_pages++;
1853 } 1898 }
1854 /* Mark the page as allocated */ 1899 /* Mark the page as allocated */
1855 swsusp_set_page_forbidden(page); 1900 swsusp_set_page_forbidden(page);
1856 swsusp_set_page_free(page); 1901 swsusp_set_page_free(page);
1857 } 1902 }
1858 memory_bm_position_reset(bm); 1903 memory_bm_position_reset(bm);
1859 safe_highmem_bm = bm; 1904 safe_highmem_bm = bm;
1860 return 0; 1905 return 0;
1861 } 1906 }
1862 1907
1863 /** 1908 /**
1864 * get_highmem_page_buffer - for given highmem image page find the buffer 1909 * get_highmem_page_buffer - for given highmem image page find the buffer
1865 * that suspend_write_next() should set for its caller to write to. 1910 * that suspend_write_next() should set for its caller to write to.
1866 * 1911 *
1867 * If the page is to be saved to its "original" page frame or a copy of 1912 * If the page is to be saved to its "original" page frame or a copy of
1868 * the page is to be made in the highmem, @buffer is returned. Otherwise, 1913 * the page is to be made in the highmem, @buffer is returned. Otherwise,
1869 * the copy of the page is to be made in normal memory, so the address of 1914 * the copy of the page is to be made in normal memory, so the address of
1870 * the copy is returned. 1915 * the copy is returned.
1871 * 1916 *
1872 * If @buffer is returned, the caller of suspend_write_next() will write 1917 * If @buffer is returned, the caller of suspend_write_next() will write
1873 * the page's contents to @buffer, so they will have to be copied to the 1918 * the page's contents to @buffer, so they will have to be copied to the
1874 * right location on the next call to suspend_write_next() and it is done 1919 * right location on the next call to suspend_write_next() and it is done
1875 * with the help of copy_last_highmem_page(). For this purpose, if 1920 * with the help of copy_last_highmem_page(). For this purpose, if
1876 * @buffer is returned, @last_highmem page is set to the page to which 1921 * @buffer is returned, @last_highmem page is set to the page to which
1877 * the data will have to be copied from @buffer. 1922 * the data will have to be copied from @buffer.
1878 */ 1923 */
1879 1924
1880 static struct page *last_highmem_page; 1925 static struct page *last_highmem_page;
1881 1926
1882 static void * 1927 static void *
1883 get_highmem_page_buffer(struct page *page, struct chain_allocator *ca) 1928 get_highmem_page_buffer(struct page *page, struct chain_allocator *ca)
1884 { 1929 {
1885 struct highmem_pbe *pbe; 1930 struct highmem_pbe *pbe;
1886 void *kaddr; 1931 void *kaddr;
1887 1932
1888 if (swsusp_page_is_forbidden(page) && swsusp_page_is_free(page)) { 1933 if (swsusp_page_is_forbidden(page) && swsusp_page_is_free(page)) {
1889 /* We have allocated the "original" page frame and we can 1934 /* We have allocated the "original" page frame and we can
1890 * use it directly to store the loaded page. 1935 * use it directly to store the loaded page.
1891 */ 1936 */
1892 last_highmem_page = page; 1937 last_highmem_page = page;
1893 return buffer; 1938 return buffer;
1894 } 1939 }
1895 /* The "original" page frame has not been allocated and we have to 1940 /* The "original" page frame has not been allocated and we have to
1896 * use a "safe" page frame to store the loaded page. 1941 * use a "safe" page frame to store the loaded page.
1897 */ 1942 */
1898 pbe = chain_alloc(ca, sizeof(struct highmem_pbe)); 1943 pbe = chain_alloc(ca, sizeof(struct highmem_pbe));
1899 if (!pbe) { 1944 if (!pbe) {
1900 swsusp_free(); 1945 swsusp_free();
1901 return ERR_PTR(-ENOMEM); 1946 return ERR_PTR(-ENOMEM);
1902 } 1947 }
1903 pbe->orig_page = page; 1948 pbe->orig_page = page;
1904 if (safe_highmem_pages > 0) { 1949 if (safe_highmem_pages > 0) {
1905 struct page *tmp; 1950 struct page *tmp;
1906 1951
1907 /* Copy of the page will be stored in high memory */ 1952 /* Copy of the page will be stored in high memory */
1908 kaddr = buffer; 1953 kaddr = buffer;
1909 tmp = pfn_to_page(memory_bm_next_pfn(safe_highmem_bm)); 1954 tmp = pfn_to_page(memory_bm_next_pfn(safe_highmem_bm));
1910 safe_highmem_pages--; 1955 safe_highmem_pages--;
1911 last_highmem_page = tmp; 1956 last_highmem_page = tmp;
1912 pbe->copy_page = tmp; 1957 pbe->copy_page = tmp;
1913 } else { 1958 } else {
1914 /* Copy of the page will be stored in normal memory */ 1959 /* Copy of the page will be stored in normal memory */
1915 kaddr = safe_pages_list; 1960 kaddr = safe_pages_list;
1916 safe_pages_list = safe_pages_list->next; 1961 safe_pages_list = safe_pages_list->next;
1917 pbe->copy_page = virt_to_page(kaddr); 1962 pbe->copy_page = virt_to_page(kaddr);
1918 } 1963 }
1919 pbe->next = highmem_pblist; 1964 pbe->next = highmem_pblist;
1920 highmem_pblist = pbe; 1965 highmem_pblist = pbe;
1921 return kaddr; 1966 return kaddr;
1922 } 1967 }
1923 1968
1924 /** 1969 /**
1925 * copy_last_highmem_page - copy the contents of a highmem image from 1970 * copy_last_highmem_page - copy the contents of a highmem image from
1926 * @buffer, where the caller of snapshot_write_next() has place them, 1971 * @buffer, where the caller of snapshot_write_next() has place them,
1927 * to the right location represented by @last_highmem_page . 1972 * to the right location represented by @last_highmem_page .
1928 */ 1973 */
1929 1974
1930 static void copy_last_highmem_page(void) 1975 static void copy_last_highmem_page(void)
1931 { 1976 {
1932 if (last_highmem_page) { 1977 if (last_highmem_page) {
1933 void *dst; 1978 void *dst;
1934 1979
1935 dst = kmap_atomic(last_highmem_page, KM_USER0); 1980 dst = kmap_atomic(last_highmem_page, KM_USER0);
1936 memcpy(dst, buffer, PAGE_SIZE); 1981 memcpy(dst, buffer, PAGE_SIZE);
1937 kunmap_atomic(dst, KM_USER0); 1982 kunmap_atomic(dst, KM_USER0);
1938 last_highmem_page = NULL; 1983 last_highmem_page = NULL;
1939 } 1984 }
1940 } 1985 }
1941 1986
1942 static inline int last_highmem_page_copied(void) 1987 static inline int last_highmem_page_copied(void)
1943 { 1988 {
1944 return !last_highmem_page; 1989 return !last_highmem_page;
1945 } 1990 }
1946 1991
1947 static inline void free_highmem_data(void) 1992 static inline void free_highmem_data(void)
1948 { 1993 {
1949 if (safe_highmem_bm) 1994 if (safe_highmem_bm)
1950 memory_bm_free(safe_highmem_bm, PG_UNSAFE_CLEAR); 1995 memory_bm_free(safe_highmem_bm, PG_UNSAFE_CLEAR);
1951 1996
1952 if (buffer) 1997 if (buffer)
1953 free_image_page(buffer, PG_UNSAFE_CLEAR); 1998 free_image_page(buffer, PG_UNSAFE_CLEAR);
1954 } 1999 }
1955 #else 2000 #else
1956 static inline int get_safe_write_buffer(void) { return 0; } 2001 static inline int get_safe_write_buffer(void) { return 0; }
1957 2002
1958 static unsigned int 2003 static unsigned int
1959 count_highmem_image_pages(struct memory_bitmap *bm) { return 0; } 2004 count_highmem_image_pages(struct memory_bitmap *bm) { return 0; }
1960 2005
1961 static inline int 2006 static inline int
1962 prepare_highmem_image(struct memory_bitmap *bm, unsigned int *nr_highmem_p) 2007 prepare_highmem_image(struct memory_bitmap *bm, unsigned int *nr_highmem_p)
1963 { 2008 {
1964 return 0; 2009 return 0;
1965 } 2010 }
1966 2011
1967 static inline void * 2012 static inline void *
1968 get_highmem_page_buffer(struct page *page, struct chain_allocator *ca) 2013 get_highmem_page_buffer(struct page *page, struct chain_allocator *ca)
1969 { 2014 {
1970 return ERR_PTR(-EINVAL); 2015 return ERR_PTR(-EINVAL);
1971 } 2016 }
1972 2017
1973 static inline void copy_last_highmem_page(void) {} 2018 static inline void copy_last_highmem_page(void) {}
1974 static inline int last_highmem_page_copied(void) { return 1; } 2019 static inline int last_highmem_page_copied(void) { return 1; }
1975 static inline void free_highmem_data(void) {} 2020 static inline void free_highmem_data(void) {}
1976 #endif /* CONFIG_HIGHMEM */ 2021 #endif /* CONFIG_HIGHMEM */
1977 2022
1978 /** 2023 /**
1979 * prepare_image - use the memory bitmap @bm to mark the pages that will 2024 * prepare_image - use the memory bitmap @bm to mark the pages that will
1980 * be overwritten in the process of restoring the system memory state 2025 * be overwritten in the process of restoring the system memory state
1981 * from the suspend image ("unsafe" pages) and allocate memory for the 2026 * from the suspend image ("unsafe" pages) and allocate memory for the
1982 * image. 2027 * image.
1983 * 2028 *
1984 * The idea is to allocate a new memory bitmap first and then allocate 2029 * The idea is to allocate a new memory bitmap first and then allocate
1985 * as many pages as needed for the image data, but not to assign these 2030 * as many pages as needed for the image data, but not to assign these
1986 * pages to specific tasks initially. Instead, we just mark them as 2031 * pages to specific tasks initially. Instead, we just mark them as
1987 * allocated and create a lists of "safe" pages that will be used 2032 * allocated and create a lists of "safe" pages that will be used
1988 * later. On systems with high memory a list of "safe" highmem pages is 2033 * later. On systems with high memory a list of "safe" highmem pages is
1989 * also created. 2034 * also created.
1990 */ 2035 */
1991 2036
1992 #define PBES_PER_LINKED_PAGE (LINKED_PAGE_DATA_SIZE / sizeof(struct pbe)) 2037 #define PBES_PER_LINKED_PAGE (LINKED_PAGE_DATA_SIZE / sizeof(struct pbe))
1993 2038
1994 static int 2039 static int
1995 prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm) 2040 prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm)
1996 { 2041 {
1997 unsigned int nr_pages, nr_highmem; 2042 unsigned int nr_pages, nr_highmem;
1998 struct linked_page *sp_list, *lp; 2043 struct linked_page *sp_list, *lp;
1999 int error; 2044 int error;
2000 2045
2001 /* If there is no highmem, the buffer will not be necessary */ 2046 /* If there is no highmem, the buffer will not be necessary */
2002 free_image_page(buffer, PG_UNSAFE_CLEAR); 2047 free_image_page(buffer, PG_UNSAFE_CLEAR);
2003 buffer = NULL; 2048 buffer = NULL;
2004 2049
2005 nr_highmem = count_highmem_image_pages(bm); 2050 nr_highmem = count_highmem_image_pages(bm);
2006 error = mark_unsafe_pages(bm); 2051 error = mark_unsafe_pages(bm);
2007 if (error) 2052 if (error)
2008 goto Free; 2053 goto Free;
2009 2054
2010 error = memory_bm_create(new_bm, GFP_ATOMIC, PG_SAFE); 2055 error = memory_bm_create(new_bm, GFP_ATOMIC, PG_SAFE);
2011 if (error) 2056 if (error)
2012 goto Free; 2057 goto Free;
2013 2058
2014 duplicate_memory_bitmap(new_bm, bm); 2059 duplicate_memory_bitmap(new_bm, bm);
2015 memory_bm_free(bm, PG_UNSAFE_KEEP); 2060 memory_bm_free(bm, PG_UNSAFE_KEEP);
2016 if (nr_highmem > 0) { 2061 if (nr_highmem > 0) {
2017 error = prepare_highmem_image(bm, &nr_highmem); 2062 error = prepare_highmem_image(bm, &nr_highmem);
2018 if (error) 2063 if (error)
2019 goto Free; 2064 goto Free;
2020 } 2065 }
2021 /* Reserve some safe pages for potential later use. 2066 /* Reserve some safe pages for potential later use.
2022 * 2067 *
2023 * NOTE: This way we make sure there will be enough safe pages for the 2068 * NOTE: This way we make sure there will be enough safe pages for the
2024 * chain_alloc() in get_buffer(). It is a bit wasteful, but 2069 * chain_alloc() in get_buffer(). It is a bit wasteful, but
2025 * nr_copy_pages cannot be greater than 50% of the memory anyway. 2070 * nr_copy_pages cannot be greater than 50% of the memory anyway.
2026 */ 2071 */
2027 sp_list = NULL; 2072 sp_list = NULL;
2028 /* nr_copy_pages cannot be lesser than allocated_unsafe_pages */ 2073 /* nr_copy_pages cannot be lesser than allocated_unsafe_pages */
2029 nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages; 2074 nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages;
2030 nr_pages = DIV_ROUND_UP(nr_pages, PBES_PER_LINKED_PAGE); 2075 nr_pages = DIV_ROUND_UP(nr_pages, PBES_PER_LINKED_PAGE);
2031 while (nr_pages > 0) { 2076 while (nr_pages > 0) {
2032 lp = get_image_page(GFP_ATOMIC, PG_SAFE); 2077 lp = get_image_page(GFP_ATOMIC, PG_SAFE);
2033 if (!lp) { 2078 if (!lp) {
2034 error = -ENOMEM; 2079 error = -ENOMEM;
2035 goto Free; 2080 goto Free;
2036 } 2081 }
2037 lp->next = sp_list; 2082 lp->next = sp_list;
2038 sp_list = lp; 2083 sp_list = lp;
2039 nr_pages--; 2084 nr_pages--;
2040 } 2085 }
2041 /* Preallocate memory for the image */ 2086 /* Preallocate memory for the image */
2042 safe_pages_list = NULL; 2087 safe_pages_list = NULL;
2043 nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages; 2088 nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages;
2044 while (nr_pages > 0) { 2089 while (nr_pages > 0) {
2045 lp = (struct linked_page *)get_zeroed_page(GFP_ATOMIC); 2090 lp = (struct linked_page *)get_zeroed_page(GFP_ATOMIC);
2046 if (!lp) { 2091 if (!lp) {
2047 error = -ENOMEM; 2092 error = -ENOMEM;
2048 goto Free; 2093 goto Free;
2049 } 2094 }
2050 if (!swsusp_page_is_free(virt_to_page(lp))) { 2095 if (!swsusp_page_is_free(virt_to_page(lp))) {
2051 /* The page is "safe", add it to the list */ 2096 /* The page is "safe", add it to the list */
2052 lp->next = safe_pages_list; 2097 lp->next = safe_pages_list;
2053 safe_pages_list = lp; 2098 safe_pages_list = lp;
2054 } 2099 }
2055 /* Mark the page as allocated */ 2100 /* Mark the page as allocated */
2056 swsusp_set_page_forbidden(virt_to_page(lp)); 2101 swsusp_set_page_forbidden(virt_to_page(lp));
2057 swsusp_set_page_free(virt_to_page(lp)); 2102 swsusp_set_page_free(virt_to_page(lp));
2058 nr_pages--; 2103 nr_pages--;
2059 } 2104 }
2060 /* Free the reserved safe pages so that chain_alloc() can use them */ 2105 /* Free the reserved safe pages so that chain_alloc() can use them */
2061 while (sp_list) { 2106 while (sp_list) {
2062 lp = sp_list->next; 2107 lp = sp_list->next;
2063 free_image_page(sp_list, PG_UNSAFE_CLEAR); 2108 free_image_page(sp_list, PG_UNSAFE_CLEAR);
2064 sp_list = lp; 2109 sp_list = lp;
2065 } 2110 }
2066 return 0; 2111 return 0;
2067 2112
2068 Free: 2113 Free:
2069 swsusp_free(); 2114 swsusp_free();
2070 return error; 2115 return error;
2071 } 2116 }
2072 2117
2073 /** 2118 /**
2074 * get_buffer - compute the address that snapshot_write_next() should 2119 * get_buffer - compute the address that snapshot_write_next() should
2075 * set for its caller to write to. 2120 * set for its caller to write to.
2076 */ 2121 */
2077 2122
2078 static void *get_buffer(struct memory_bitmap *bm, struct chain_allocator *ca) 2123 static void *get_buffer(struct memory_bitmap *bm, struct chain_allocator *ca)
2079 { 2124 {
2080 struct pbe *pbe; 2125 struct pbe *pbe;
2081 struct page *page; 2126 struct page *page;
2082 unsigned long pfn = memory_bm_next_pfn(bm); 2127 unsigned long pfn = memory_bm_next_pfn(bm);
2083 2128
2084 if (pfn == BM_END_OF_MAP) 2129 if (pfn == BM_END_OF_MAP)
2085 return ERR_PTR(-EFAULT); 2130 return ERR_PTR(-EFAULT);
2086 2131
2087 page = pfn_to_page(pfn); 2132 page = pfn_to_page(pfn);
2088 if (PageHighMem(page)) 2133 if (PageHighMem(page))
2089 return get_highmem_page_buffer(page, ca); 2134 return get_highmem_page_buffer(page, ca);
2090 2135
2091 if (swsusp_page_is_forbidden(page) && swsusp_page_is_free(page)) 2136 if (swsusp_page_is_forbidden(page) && swsusp_page_is_free(page))
2092 /* We have allocated the "original" page frame and we can 2137 /* We have allocated the "original" page frame and we can
2093 * use it directly to store the loaded page. 2138 * use it directly to store the loaded page.
2094 */ 2139 */
2095 return page_address(page); 2140 return page_address(page);
2096 2141
2097 /* The "original" page frame has not been allocated and we have to 2142 /* The "original" page frame has not been allocated and we have to
2098 * use a "safe" page frame to store the loaded page. 2143 * use a "safe" page frame to store the loaded page.
2099 */ 2144 */
2100 pbe = chain_alloc(ca, sizeof(struct pbe)); 2145 pbe = chain_alloc(ca, sizeof(struct pbe));
2101 if (!pbe) { 2146 if (!pbe) {
2102 swsusp_free(); 2147 swsusp_free();
2103 return ERR_PTR(-ENOMEM); 2148 return ERR_PTR(-ENOMEM);
2104 } 2149 }
2105 pbe->orig_address = page_address(page); 2150 pbe->orig_address = page_address(page);
2106 pbe->address = safe_pages_list; 2151 pbe->address = safe_pages_list;
2107 safe_pages_list = safe_pages_list->next; 2152 safe_pages_list = safe_pages_list->next;
2108 pbe->next = restore_pblist; 2153 pbe->next = restore_pblist;
2109 restore_pblist = pbe; 2154 restore_pblist = pbe;
2110 return pbe->address; 2155 return pbe->address;
2111 } 2156 }
2112 2157
2113 /** 2158 /**
2114 * snapshot_write_next - used for writing the system memory snapshot. 2159 * snapshot_write_next - used for writing the system memory snapshot.
2115 * 2160 *
2116 * On the first call to it @handle should point to a zeroed 2161 * On the first call to it @handle should point to a zeroed
2117 * snapshot_handle structure. The structure gets updated and a pointer 2162 * snapshot_handle structure. The structure gets updated and a pointer
2118 * to it should be passed to this function every next time. 2163 * to it should be passed to this function every next time.
2119 * 2164 *
2120 * On success the function returns a positive number. Then, the caller 2165 * On success the function returns a positive number. Then, the caller
2121 * is allowed to write up to the returned number of bytes to the memory 2166 * is allowed to write up to the returned number of bytes to the memory
2122 * location computed by the data_of() macro. 2167 * location computed by the data_of() macro.
2123 * 2168 *
2124 * The function returns 0 to indicate the "end of file" condition, 2169 * The function returns 0 to indicate the "end of file" condition,
2125 * and a negative number is returned on error. In such cases the 2170 * and a negative number is returned on error. In such cases the
2126 * structure pointed to by @handle is not updated and should not be used 2171 * structure pointed to by @handle is not updated and should not be used
2127 * any more. 2172 * any more.
2128 */ 2173 */
2129 2174
2130 int snapshot_write_next(struct snapshot_handle *handle) 2175 int snapshot_write_next(struct snapshot_handle *handle)
2131 { 2176 {
2132 static struct chain_allocator ca; 2177 static struct chain_allocator ca;
2133 int error = 0; 2178 int error = 0;
2134 2179
2135 /* Check if we have already loaded the entire image */ 2180 /* Check if we have already loaded the entire image */
2136 if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages) 2181 if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages)
2137 return 0; 2182 return 0;
2138 2183
2139 handle->sync_read = 1; 2184 handle->sync_read = 1;
2140 2185
2141 if (!handle->cur) { 2186 if (!handle->cur) {
2142 if (!buffer) 2187 if (!buffer)
2143 /* This makes the buffer be freed by swsusp_free() */ 2188 /* This makes the buffer be freed by swsusp_free() */
2144 buffer = get_image_page(GFP_ATOMIC, PG_ANY); 2189 buffer = get_image_page(GFP_ATOMIC, PG_ANY);
2145 2190
2146 if (!buffer) 2191 if (!buffer)
2147 return -ENOMEM; 2192 return -ENOMEM;
2148 2193
2149 handle->buffer = buffer; 2194 handle->buffer = buffer;
2150 } else if (handle->cur == 1) { 2195 } else if (handle->cur == 1) {
2151 error = load_header(buffer); 2196 error = load_header(buffer);
2152 if (error) 2197 if (error)
2153 return error; 2198 return error;
2154 2199
2155 error = memory_bm_create(&copy_bm, GFP_ATOMIC, PG_ANY); 2200 error = memory_bm_create(&copy_bm, GFP_ATOMIC, PG_ANY);
2156 if (error) 2201 if (error)
2157 return error; 2202 return error;
2158 2203
2159 } else if (handle->cur <= nr_meta_pages + 1) { 2204 } else if (handle->cur <= nr_meta_pages + 1) {
2160 error = unpack_orig_pfns(buffer, &copy_bm); 2205 error = unpack_orig_pfns(buffer, &copy_bm);
2161 if (error) 2206 if (error)
2162 return error; 2207 return error;
2163 2208
2164 if (handle->cur == nr_meta_pages + 1) { 2209 if (handle->cur == nr_meta_pages + 1) {
2165 error = prepare_image(&orig_bm, &copy_bm); 2210 error = prepare_image(&orig_bm, &copy_bm);
2166 if (error) 2211 if (error)
2167 return error; 2212 return error;
2168 2213
2169 chain_init(&ca, GFP_ATOMIC, PG_SAFE); 2214 chain_init(&ca, GFP_ATOMIC, PG_SAFE);
2170 memory_bm_position_reset(&orig_bm); 2215 memory_bm_position_reset(&orig_bm);
2171 restore_pblist = NULL; 2216 restore_pblist = NULL;
2172 handle->buffer = get_buffer(&orig_bm, &ca); 2217 handle->buffer = get_buffer(&orig_bm, &ca);
2173 handle->sync_read = 0; 2218 handle->sync_read = 0;
2174 if (IS_ERR(handle->buffer)) 2219 if (IS_ERR(handle->buffer))
2175 return PTR_ERR(handle->buffer); 2220 return PTR_ERR(handle->buffer);
2176 } 2221 }
2177 } else { 2222 } else {
2178 copy_last_highmem_page(); 2223 copy_last_highmem_page();
2179 handle->buffer = get_buffer(&orig_bm, &ca); 2224 handle->buffer = get_buffer(&orig_bm, &ca);
2180 if (IS_ERR(handle->buffer)) 2225 if (IS_ERR(handle->buffer))
2181 return PTR_ERR(handle->buffer); 2226 return PTR_ERR(handle->buffer);
2182 if (handle->buffer != buffer) 2227 if (handle->buffer != buffer)
2183 handle->sync_read = 0; 2228 handle->sync_read = 0;
2184 } 2229 }
2185 handle->cur++; 2230 handle->cur++;
2186 return PAGE_SIZE; 2231 return PAGE_SIZE;
2187 } 2232 }
2188 2233
2189 /** 2234 /**
2190 * snapshot_write_finalize - must be called after the last call to 2235 * snapshot_write_finalize - must be called after the last call to
2191 * snapshot_write_next() in case the last page in the image happens 2236 * snapshot_write_next() in case the last page in the image happens
2192 * to be a highmem page and its contents should be stored in the 2237 * to be a highmem page and its contents should be stored in the
2193 * highmem. Additionally, it releases the memory that will not be 2238 * highmem. Additionally, it releases the memory that will not be
2194 * used any more. 2239 * used any more.
2195 */ 2240 */
2196 2241
2197 void snapshot_write_finalize(struct snapshot_handle *handle) 2242 void snapshot_write_finalize(struct snapshot_handle *handle)
2198 { 2243 {
2199 copy_last_highmem_page(); 2244 copy_last_highmem_page();
2200 /* Free only if we have loaded the image entirely */ 2245 /* Free only if we have loaded the image entirely */
2201 if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages) { 2246 if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages) {
2202 memory_bm_free(&orig_bm, PG_UNSAFE_CLEAR); 2247 memory_bm_free(&orig_bm, PG_UNSAFE_CLEAR);
2203 free_highmem_data(); 2248 free_highmem_data();
2204 } 2249 }
2205 } 2250 }
2206 2251
2207 int snapshot_image_loaded(struct snapshot_handle *handle) 2252 int snapshot_image_loaded(struct snapshot_handle *handle)
2208 { 2253 {
2209 return !(!nr_copy_pages || !last_highmem_page_copied() || 2254 return !(!nr_copy_pages || !last_highmem_page_copied() ||
2210 handle->cur <= nr_meta_pages + nr_copy_pages); 2255 handle->cur <= nr_meta_pages + nr_copy_pages);
2211 } 2256 }
2212 2257
2213 #ifdef CONFIG_HIGHMEM 2258 #ifdef CONFIG_HIGHMEM
2214 /* Assumes that @buf is ready and points to a "safe" page */ 2259 /* Assumes that @buf is ready and points to a "safe" page */
2215 static inline void 2260 static inline void
2216 swap_two_pages_data(struct page *p1, struct page *p2, void *buf) 2261 swap_two_pages_data(struct page *p1, struct page *p2, void *buf)
2217 { 2262 {
2218 void *kaddr1, *kaddr2; 2263 void *kaddr1, *kaddr2;
2219 2264
2220 kaddr1 = kmap_atomic(p1, KM_USER0); 2265 kaddr1 = kmap_atomic(p1, KM_USER0);
2221 kaddr2 = kmap_atomic(p2, KM_USER1); 2266 kaddr2 = kmap_atomic(p2, KM_USER1);
2222 memcpy(buf, kaddr1, PAGE_SIZE); 2267 memcpy(buf, kaddr1, PAGE_SIZE);
2223 memcpy(kaddr1, kaddr2, PAGE_SIZE); 2268 memcpy(kaddr1, kaddr2, PAGE_SIZE);
2224 memcpy(kaddr2, buf, PAGE_SIZE); 2269 memcpy(kaddr2, buf, PAGE_SIZE);
2225 kunmap_atomic(kaddr1, KM_USER0); 2270 kunmap_atomic(kaddr1, KM_USER0);
2226 kunmap_atomic(kaddr2, KM_USER1); 2271 kunmap_atomic(kaddr2, KM_USER1);
2227 } 2272 }
2228 2273
2229 /** 2274 /**
2230 * restore_highmem - for each highmem page that was allocated before 2275 * restore_highmem - for each highmem page that was allocated before
2231 * the suspend and included in the suspend image, and also has been 2276 * the suspend and included in the suspend image, and also has been
2232 * allocated by the "resume" kernel swap its current (ie. "before 2277 * allocated by the "resume" kernel swap its current (ie. "before
2233 * resume") contents with the previous (ie. "before suspend") one. 2278 * resume") contents with the previous (ie. "before suspend") one.
2234 * 2279 *
2235 * If the resume eventually fails, we can call this function once 2280 * If the resume eventually fails, we can call this function once
2236 * again and restore the "before resume" highmem state. 2281 * again and restore the "before resume" highmem state.
2237 */ 2282 */
2238 2283
2239 int restore_highmem(void) 2284 int restore_highmem(void)
2240 { 2285 {
2241 struct highmem_pbe *pbe = highmem_pblist; 2286 struct highmem_pbe *pbe = highmem_pblist;
2242 void *buf; 2287 void *buf;
2243 2288
2244 if (!pbe) 2289 if (!pbe)
2245 return 0; 2290 return 0;
2246 2291
2247 buf = get_image_page(GFP_ATOMIC, PG_SAFE); 2292 buf = get_image_page(GFP_ATOMIC, PG_SAFE);
2248 if (!buf) 2293 if (!buf)
2249 return -ENOMEM; 2294 return -ENOMEM;
2250 2295
2251 while (pbe) { 2296 while (pbe) {
2252 swap_two_pages_data(pbe->copy_page, pbe->orig_page, buf); 2297 swap_two_pages_data(pbe->copy_page, pbe->orig_page, buf);
2253 pbe = pbe->next; 2298 pbe = pbe->next;
2254 } 2299 }
2255 free_image_page(buf, PG_UNSAFE_CLEAR); 2300 free_image_page(buf, PG_UNSAFE_CLEAR);
2256 return 0; 2301 return 0;
2257 } 2302 }
2258 #endif /* CONFIG_HIGHMEM */ 2303 #endif /* CONFIG_HIGHMEM */
2259 2304