Blame view

kernel/power/snapshot.c 60.5 KB
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
1
  /*
96bc7aec2   Pavel Machek   [PATCH] swsusp: r...
2
   * linux/kernel/power/snapshot.c
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
3
   *
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
4
   * This file provides system snapshot/restore functionality for swsusp.
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
5
   *
a2531293d   Pavel Machek   update email address
6
   * Copyright (C) 1998-2005 Pavel Machek <pavel@ucw.cz>
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
7
   * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
8
   *
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
9
   * This file is released under the GPLv2.
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
10
11
   *
   */
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
12
  #include <linux/version.h>
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
13
14
15
  #include <linux/module.h>
  #include <linux/mm.h>
  #include <linux/suspend.h>
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
16
  #include <linux/delay.h>
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
17
  #include <linux/bitops.h>
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
18
  #include <linux/spinlock.h>
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
19
  #include <linux/kernel.h>
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
20
21
  #include <linux/pm.h>
  #include <linux/device.h>
74dfd666d   Rafael J. Wysocki   swsusp: do not us...
22
  #include <linux/init.h>
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
23
24
25
26
  #include <linux/bootmem.h>
  #include <linux/syscalls.h>
  #include <linux/console.h>
  #include <linux/highmem.h>
846705deb   Rafael J. Wysocki   Hibernate: Take o...
27
  #include <linux/list.h>
5a0e3ad6a   Tejun Heo   include cleanup: ...
28
  #include <linux/slab.h>
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
29
30
31
32
33
34
  
  #include <asm/uaccess.h>
  #include <asm/mmu_context.h>
  #include <asm/pgtable.h>
  #include <asm/tlbflush.h>
  #include <asm/io.h>
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
35
  #include "power.h"
74dfd666d   Rafael J. Wysocki   swsusp: do not us...
36
37
38
  static int swsusp_page_is_free(struct page *);
  static void swsusp_set_page_forbidden(struct page *);
  static void swsusp_unset_page_forbidden(struct page *);
fe419535d   Rafael J. Wysocki   PM/Hibernate: Mov...
39
  /*
ddeb64870   Rafael J. Wysocki   PM / Hibernate: A...
40
41
42
43
44
45
46
47
48
49
50
51
   * Number of bytes to reserve for memory allocations made by device drivers
   * from their ->freeze() and ->freeze_noirq() callbacks so that they don't
   * cause image creation to fail (tunable via /sys/power/reserved_size).
   */
  unsigned long reserved_size;
  
  void __init hibernate_reserved_size_init(void)
  {
  	reserved_size = SPARE_PAGES * PAGE_SIZE;
  }
  
  /*
fe419535d   Rafael J. Wysocki   PM/Hibernate: Mov...
52
   * Preferred image size in bytes (tunable via /sys/power/image_size).
1c1be3a94   Rafael J. Wysocki   Revert "PM / Hibe...
53
54
55
   * When it is set to N, swsusp will do its best to ensure the image
   * size will not exceed N bytes, but if that is impossible, it will
   * try to create the smallest image possible.
fe419535d   Rafael J. Wysocki   PM/Hibernate: Mov...
56
   */
ac5c24ec1   Rafael J. Wysocki   PM / Hibernate: M...
57
58
59
60
  unsigned long image_size;
  
  void __init hibernate_image_size_init(void)
  {
1c1be3a94   Rafael J. Wysocki   Revert "PM / Hibe...
61
  	image_size = ((totalram_pages * 2) / 5) * PAGE_SIZE;
ac5c24ec1   Rafael J. Wysocki   PM / Hibernate: M...
62
  }
fe419535d   Rafael J. Wysocki   PM/Hibernate: Mov...
63

8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
64
65
66
67
68
  /* List of PBEs needed for restoring the pages that were allocated before
   * the suspend and included in the suspend image, but have also been
   * allocated by the "resume" kernel, so their contents cannot be written
   * directly to their "original" page frames.
   */
75534b50c   Rafael J. Wysocki   [PATCH] Change th...
69
  struct pbe *restore_pblist;
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
70
  /* Pointer to an auxiliary buffer (1 page) */
940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
71
  static void *buffer;
7088a5c00   Rafael J. Wysocki   [PATCH] swsusp: i...
72

f6143aa60   Rafael J. Wysocki   [PATCH] swsusp: R...
73
74
75
  /**
   *	@safe_needed - on resume, for storing the PBE list and the image,
   *	we can only use memory pages that do not conflict with the pages
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
76
77
   *	used before suspend.  The unsafe pages have PageNosaveFree set
   *	and we count them using unsafe_pages.
f6143aa60   Rafael J. Wysocki   [PATCH] swsusp: R...
78
   *
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
79
80
   *	Each allocated image page is marked as PageNosave and PageNosaveFree
   *	so that swsusp_free() can release it.
f6143aa60   Rafael J. Wysocki   [PATCH] swsusp: R...
81
   */
0bcd888d6   Rafael J. Wysocki   [PATCH] swsusp: I...
82
83
84
85
  #define PG_ANY		0
  #define PG_SAFE		1
  #define PG_UNSAFE_CLEAR	1
  #define PG_UNSAFE_KEEP	0
940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
86
  static unsigned int allocated_unsafe_pages;
f6143aa60   Rafael J. Wysocki   [PATCH] swsusp: R...
87

8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
88
  static void *get_image_page(gfp_t gfp_mask, int safe_needed)
f6143aa60   Rafael J. Wysocki   [PATCH] swsusp: R...
89
90
91
92
93
  {
  	void *res;
  
  	res = (void *)get_zeroed_page(gfp_mask);
  	if (safe_needed)
7be982349   Rafael J. Wysocki   swsusp: use inlin...
94
  		while (res && swsusp_page_is_free(virt_to_page(res))) {
f6143aa60   Rafael J. Wysocki   [PATCH] swsusp: R...
95
  			/* The page is unsafe, mark it for swsusp_free() */
7be982349   Rafael J. Wysocki   swsusp: use inlin...
96
  			swsusp_set_page_forbidden(virt_to_page(res));
940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
97
  			allocated_unsafe_pages++;
f6143aa60   Rafael J. Wysocki   [PATCH] swsusp: R...
98
99
100
  			res = (void *)get_zeroed_page(gfp_mask);
  		}
  	if (res) {
7be982349   Rafael J. Wysocki   swsusp: use inlin...
101
102
  		swsusp_set_page_forbidden(virt_to_page(res));
  		swsusp_set_page_free(virt_to_page(res));
f6143aa60   Rafael J. Wysocki   [PATCH] swsusp: R...
103
104
105
106
107
108
  	}
  	return res;
  }
  
  unsigned long get_safe_page(gfp_t gfp_mask)
  {
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
109
110
  	return (unsigned long)get_image_page(gfp_mask, PG_SAFE);
  }
5b6d15de2   Rafael J. Wysocki   [PATCH] swsusp: F...
111
112
  static struct page *alloc_image_page(gfp_t gfp_mask)
  {
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
113
114
115
116
  	struct page *page;
  
  	page = alloc_page(gfp_mask);
  	if (page) {
7be982349   Rafael J. Wysocki   swsusp: use inlin...
117
118
  		swsusp_set_page_forbidden(page);
  		swsusp_set_page_free(page);
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
119
120
  	}
  	return page;
f6143aa60   Rafael J. Wysocki   [PATCH] swsusp: R...
121
122
123
124
  }
  
  /**
   *	free_image_page - free page represented by @addr, allocated with
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
125
   *	get_image_page (page flags set by it must be cleared)
f6143aa60   Rafael J. Wysocki   [PATCH] swsusp: R...
126
127
128
129
   */
  
  static inline void free_image_page(void *addr, int clear_nosave_free)
  {
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
130
131
132
133
134
  	struct page *page;
  
  	BUG_ON(!virt_addr_valid(addr));
  
  	page = virt_to_page(addr);
7be982349   Rafael J. Wysocki   swsusp: use inlin...
135
  	swsusp_unset_page_forbidden(page);
f6143aa60   Rafael J. Wysocki   [PATCH] swsusp: R...
136
  	if (clear_nosave_free)
7be982349   Rafael J. Wysocki   swsusp: use inlin...
137
  		swsusp_unset_page_free(page);
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
138
139
  
  	__free_page(page);
f6143aa60   Rafael J. Wysocki   [PATCH] swsusp: R...
140
  }
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
  /* struct linked_page is used to build chains of pages */
  
  #define LINKED_PAGE_DATA_SIZE	(PAGE_SIZE - sizeof(void *))
  
  struct linked_page {
  	struct linked_page *next;
  	char data[LINKED_PAGE_DATA_SIZE];
  } __attribute__((packed));
  
  static inline void
  free_list_of_pages(struct linked_page *list, int clear_page_nosave)
  {
  	while (list) {
  		struct linked_page *lp = list->next;
  
  		free_image_page(list, clear_page_nosave);
  		list = lp;
  	}
  }
  
  /**
    *	struct chain_allocator is used for allocating small objects out of
    *	a linked list of pages called 'the chain'.
    *
    *	The chain grows each time when there is no room for a new object in
    *	the current page.  The allocated objects cannot be freed individually.
    *	It is only possible to free them all at once, by freeing the entire
    *	chain.
    *
    *	NOTE: The chain allocator may be inefficient if the allocated objects
    *	are not much smaller than PAGE_SIZE.
    */
  
  struct chain_allocator {
  	struct linked_page *chain;	/* the chain */
  	unsigned int used_space;	/* total size of objects allocated out
  					 * of the current page
  					 */
  	gfp_t gfp_mask;		/* mask for allocating pages */
  	int safe_needed;	/* if set, only "safe" pages are allocated */
  };
  
  static void
  chain_init(struct chain_allocator *ca, gfp_t gfp_mask, int safe_needed)
  {
  	ca->chain = NULL;
  	ca->used_space = LINKED_PAGE_DATA_SIZE;
  	ca->gfp_mask = gfp_mask;
  	ca->safe_needed = safe_needed;
  }
  
  static void *chain_alloc(struct chain_allocator *ca, unsigned int size)
  {
  	void *ret;
  
  	if (LINKED_PAGE_DATA_SIZE - ca->used_space < size) {
  		struct linked_page *lp;
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
198
  		lp = get_image_page(ca->gfp_mask, ca->safe_needed);
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
199
200
201
202
203
204
205
206
207
208
209
  		if (!lp)
  			return NULL;
  
  		lp->next = ca->chain;
  		ca->chain = lp;
  		ca->used_space = 0;
  	}
  	ret = ca->chain->data + ca->used_space;
  	ca->used_space += size;
  	return ret;
  }
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
210
211
212
213
214
215
216
  /**
   *	Data types related to memory bitmaps.
   *
   *	Memory bitmap is a structure consiting of many linked lists of
   *	objects.  The main list's elements are of type struct zone_bitmap
   *	and each of them corresonds to one zone.  For each zone bitmap
   *	object there is a list of objects of type struct bm_block that
0d83304c7   Akinobu Mita   pm: hibernation: ...
217
   *	represent each blocks of bitmap in which information is stored.
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
   *
   *	struct memory_bitmap contains a pointer to the main list of zone
   *	bitmap objects, a struct bm_position used for browsing the bitmap,
   *	and a pointer to the list of pages used for allocating all of the
   *	zone bitmap objects and bitmap block objects.
   *
   *	NOTE: It has to be possible to lay out the bitmap in memory
   *	using only allocations of order 0.  Additionally, the bitmap is
   *	designed to work with arbitrary number of zones (this is over the
   *	top for now, but let's avoid making unnecessary assumptions ;-).
   *
   *	struct zone_bitmap contains a pointer to a list of bitmap block
   *	objects and a pointer to the bitmap block object that has been
   *	most recently used for setting bits.  Additionally, it contains the
   *	pfns that correspond to the start and end of the represented zone.
   *
   *	struct bm_block contains a pointer to the memory page in which
0d83304c7   Akinobu Mita   pm: hibernation: ...
235
236
237
   *	information is stored (in the form of a block of bitmap)
   *	It also contains the pfns that correspond to the start and end of
   *	the represented memory area.
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
238
239
240
   */
  
  #define BM_END_OF_MAP	(~0UL)
8de030732   Wu Fengguang   PM: Trivial fixes
241
  #define BM_BITS_PER_BLOCK	(PAGE_SIZE * BITS_PER_BYTE)
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
242
243
  
  struct bm_block {
846705deb   Rafael J. Wysocki   Hibernate: Take o...
244
  	struct list_head hook;	/* hook into a list of bitmap blocks */
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
245
246
  	unsigned long start_pfn;	/* pfn represented by the first bit */
  	unsigned long end_pfn;	/* pfn represented by the last bit plus 1 */
0d83304c7   Akinobu Mita   pm: hibernation: ...
247
  	unsigned long *data;	/* bitmap representing pages */
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
248
  };
0d83304c7   Akinobu Mita   pm: hibernation: ...
249
250
251
252
  static inline unsigned long bm_block_bits(struct bm_block *bb)
  {
  	return bb->end_pfn - bb->start_pfn;
  }
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
253
254
255
  /* strcut bm_position is used for browsing memory bitmaps */
  
  struct bm_position {
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
256
  	struct bm_block *block;
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
257
258
259
260
  	int bit;
  };
  
  struct memory_bitmap {
846705deb   Rafael J. Wysocki   Hibernate: Take o...
261
  	struct list_head blocks;	/* list of bitmap blocks */
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
262
263
264
265
266
267
268
269
  	struct linked_page *p_list;	/* list of pages used to store zone
  					 * bitmap objects and bitmap block
  					 * objects
  					 */
  	struct bm_position cur;	/* most recently used bit position */
  };
  
  /* Functions that operate on memory bitmaps */
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
270
271
  static void memory_bm_position_reset(struct memory_bitmap *bm)
  {
846705deb   Rafael J. Wysocki   Hibernate: Take o...
272
  	bm->cur.block = list_entry(bm->blocks.next, struct bm_block, hook);
0d83304c7   Akinobu Mita   pm: hibernation: ...
273
  	bm->cur.bit = 0;
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
274
275
276
277
278
279
  }
  
  static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free);
  
  /**
   *	create_bm_block_list - create a list of block bitmap objects
8de030732   Wu Fengguang   PM: Trivial fixes
280
   *	@pages - number of pages to track
846705deb   Rafael J. Wysocki   Hibernate: Take o...
281
282
   *	@list - list to put the allocated blocks into
   *	@ca - chain allocator to be used for allocating memory
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
283
   */
846705deb   Rafael J. Wysocki   Hibernate: Take o...
284
285
286
  static int create_bm_block_list(unsigned long pages,
  				struct list_head *list,
  				struct chain_allocator *ca)
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
287
  {
846705deb   Rafael J. Wysocki   Hibernate: Take o...
288
  	unsigned int nr_blocks = DIV_ROUND_UP(pages, BM_BITS_PER_BLOCK);
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
289
290
291
292
293
294
  
  	while (nr_blocks-- > 0) {
  		struct bm_block *bb;
  
  		bb = chain_alloc(ca, sizeof(struct bm_block));
  		if (!bb)
846705deb   Rafael J. Wysocki   Hibernate: Take o...
295
296
  			return -ENOMEM;
  		list_add(&bb->hook, list);
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
297
  	}
846705deb   Rafael J. Wysocki   Hibernate: Take o...
298
299
  
  	return 0;
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
300
  }
846705deb   Rafael J. Wysocki   Hibernate: Take o...
301
302
303
304
305
  struct mem_extent {
  	struct list_head hook;
  	unsigned long start;
  	unsigned long end;
  };
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
306
  /**
846705deb   Rafael J. Wysocki   Hibernate: Take o...
307
308
   *	free_mem_extents - free a list of memory extents
   *	@list - list of extents to empty
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
309
   */
846705deb   Rafael J. Wysocki   Hibernate: Take o...
310
311
312
  static void free_mem_extents(struct list_head *list)
  {
  	struct mem_extent *ext, *aux;
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
313

846705deb   Rafael J. Wysocki   Hibernate: Take o...
314
315
316
317
318
319
320
321
322
323
324
325
326
  	list_for_each_entry_safe(ext, aux, list, hook) {
  		list_del(&ext->hook);
  		kfree(ext);
  	}
  }
  
  /**
   *	create_mem_extents - create a list of memory extents representing
   *	                     contiguous ranges of PFNs
   *	@list - list to put the extents into
   *	@gfp_mask - mask to use for memory allocations
   */
  static int create_mem_extents(struct list_head *list, gfp_t gfp_mask)
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
327
  {
846705deb   Rafael J. Wysocki   Hibernate: Take o...
328
  	struct zone *zone;
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
329

846705deb   Rafael J. Wysocki   Hibernate: Take o...
330
  	INIT_LIST_HEAD(list);
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
331

ee99c71c5   KOSAKI Motohiro   mm: introduce for...
332
  	for_each_populated_zone(zone) {
846705deb   Rafael J. Wysocki   Hibernate: Take o...
333
334
  		unsigned long zone_start, zone_end;
  		struct mem_extent *ext, *cur, *aux;
846705deb   Rafael J. Wysocki   Hibernate: Take o...
335
336
337
338
339
340
  		zone_start = zone->zone_start_pfn;
  		zone_end = zone->zone_start_pfn + zone->spanned_pages;
  
  		list_for_each_entry(ext, list, hook)
  			if (zone_start <= ext->end)
  				break;
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
341

846705deb   Rafael J. Wysocki   Hibernate: Take o...
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
  		if (&ext->hook == list || zone_end < ext->start) {
  			/* New extent is necessary */
  			struct mem_extent *new_ext;
  
  			new_ext = kzalloc(sizeof(struct mem_extent), gfp_mask);
  			if (!new_ext) {
  				free_mem_extents(list);
  				return -ENOMEM;
  			}
  			new_ext->start = zone_start;
  			new_ext->end = zone_end;
  			list_add_tail(&new_ext->hook, &ext->hook);
  			continue;
  		}
  
  		/* Merge this zone's range of PFNs with the existing one */
  		if (zone_start < ext->start)
  			ext->start = zone_start;
  		if (zone_end > ext->end)
  			ext->end = zone_end;
  
  		/* More merging may be possible */
  		cur = ext;
  		list_for_each_entry_safe_continue(cur, aux, list, hook) {
  			if (zone_end < cur->start)
  				break;
  			if (zone_end < cur->end)
  				ext->end = cur->end;
  			list_del(&cur->hook);
  			kfree(cur);
  		}
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
373
  	}
846705deb   Rafael J. Wysocki   Hibernate: Take o...
374
375
  
  	return 0;
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
376
377
378
379
380
  }
  
  /**
    *	memory_bm_create - allocate memory for a memory bitmap
    */
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
381
382
383
384
  static int
  memory_bm_create(struct memory_bitmap *bm, gfp_t gfp_mask, int safe_needed)
  {
  	struct chain_allocator ca;
846705deb   Rafael J. Wysocki   Hibernate: Take o...
385
386
387
  	struct list_head mem_extents;
  	struct mem_extent *ext;
  	int error;
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
388
389
  
  	chain_init(&ca, gfp_mask, safe_needed);
846705deb   Rafael J. Wysocki   Hibernate: Take o...
390
  	INIT_LIST_HEAD(&bm->blocks);
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
391

846705deb   Rafael J. Wysocki   Hibernate: Take o...
392
393
394
  	error = create_mem_extents(&mem_extents, gfp_mask);
  	if (error)
  		return error;
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
395

846705deb   Rafael J. Wysocki   Hibernate: Take o...
396
397
398
399
  	list_for_each_entry(ext, &mem_extents, hook) {
  		struct bm_block *bb;
  		unsigned long pfn = ext->start;
  		unsigned long pages = ext->end - ext->start;
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
400

846705deb   Rafael J. Wysocki   Hibernate: Take o...
401
  		bb = list_entry(bm->blocks.prev, struct bm_block, hook);
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
402

846705deb   Rafael J. Wysocki   Hibernate: Take o...
403
404
405
  		error = create_bm_block_list(pages, bm->blocks.prev, &ca);
  		if (error)
  			goto Error;
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
406

846705deb   Rafael J. Wysocki   Hibernate: Take o...
407
408
409
410
411
412
  		list_for_each_entry_continue(bb, &bm->blocks, hook) {
  			bb->data = get_image_page(gfp_mask, safe_needed);
  			if (!bb->data) {
  				error = -ENOMEM;
  				goto Error;
  			}
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
413
414
  
  			bb->start_pfn = pfn;
846705deb   Rafael J. Wysocki   Hibernate: Take o...
415
  			if (pages >= BM_BITS_PER_BLOCK) {
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
416
  				pfn += BM_BITS_PER_BLOCK;
846705deb   Rafael J. Wysocki   Hibernate: Take o...
417
  				pages -= BM_BITS_PER_BLOCK;
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
418
419
  			} else {
  				/* This is executed only once in the loop */
846705deb   Rafael J. Wysocki   Hibernate: Take o...
420
  				pfn += pages;
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
421
422
  			}
  			bb->end_pfn = pfn;
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
423
  		}
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
424
  	}
846705deb   Rafael J. Wysocki   Hibernate: Take o...
425

b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
426
427
  	bm->p_list = ca.chain;
  	memory_bm_position_reset(bm);
846705deb   Rafael J. Wysocki   Hibernate: Take o...
428
429
430
   Exit:
  	free_mem_extents(&mem_extents);
  	return error;
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
431

846705deb   Rafael J. Wysocki   Hibernate: Take o...
432
   Error:
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
433
434
  	bm->p_list = ca.chain;
  	memory_bm_free(bm, PG_UNSAFE_CLEAR);
846705deb   Rafael J. Wysocki   Hibernate: Take o...
435
  	goto Exit;
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
436
437
438
439
440
  }
  
  /**
    *	memory_bm_free - free memory occupied by the memory bitmap @bm
    */
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
441
442
  static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free)
  {
846705deb   Rafael J. Wysocki   Hibernate: Take o...
443
  	struct bm_block *bb;
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
444

846705deb   Rafael J. Wysocki   Hibernate: Take o...
445
446
447
  	list_for_each_entry(bb, &bm->blocks, hook)
  		if (bb->data)
  			free_image_page(bb->data, clear_nosave_free);
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
448

b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
449
  	free_list_of_pages(bm->p_list, clear_nosave_free);
846705deb   Rafael J. Wysocki   Hibernate: Take o...
450
451
  
  	INIT_LIST_HEAD(&bm->blocks);
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
452
453
454
  }
  
  /**
74dfd666d   Rafael J. Wysocki   swsusp: do not us...
455
   *	memory_bm_find_bit - find the bit in the bitmap @bm that corresponds
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
456
457
   *	to given pfn.  The cur_zone_bm member of @bm and the cur_block member
   *	of @bm->cur_zone_bm are updated.
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
458
   */
a82f7119f   Rafael J. Wysocki   Hibernation: Fix ...
459
  static int memory_bm_find_bit(struct memory_bitmap *bm, unsigned long pfn,
74dfd666d   Rafael J. Wysocki   swsusp: do not us...
460
  				void **addr, unsigned int *bit_nr)
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
461
  {
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
462
  	struct bm_block *bb;
846705deb   Rafael J. Wysocki   Hibernate: Take o...
463
464
465
466
467
  	/*
  	 * Check if the pfn corresponds to the current bitmap block and find
  	 * the block where it fits if this is not the case.
  	 */
  	bb = bm->cur.block;
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
468
  	if (pfn < bb->start_pfn)
846705deb   Rafael J. Wysocki   Hibernate: Take o...
469
470
471
  		list_for_each_entry_continue_reverse(bb, &bm->blocks, hook)
  			if (pfn >= bb->start_pfn)
  				break;
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
472

846705deb   Rafael J. Wysocki   Hibernate: Take o...
473
474
475
476
  	if (pfn >= bb->end_pfn)
  		list_for_each_entry_continue(bb, &bm->blocks, hook)
  			if (pfn >= bb->start_pfn && pfn < bb->end_pfn)
  				break;
74dfd666d   Rafael J. Wysocki   swsusp: do not us...
477

846705deb   Rafael J. Wysocki   Hibernate: Take o...
478
479
480
481
482
  	if (&bb->hook == &bm->blocks)
  		return -EFAULT;
  
  	/* The block has been found */
  	bm->cur.block = bb;
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
483
  	pfn -= bb->start_pfn;
846705deb   Rafael J. Wysocki   Hibernate: Take o...
484
  	bm->cur.bit = pfn + 1;
0d83304c7   Akinobu Mita   pm: hibernation: ...
485
486
  	*bit_nr = pfn;
  	*addr = bb->data;
a82f7119f   Rafael J. Wysocki   Hibernation: Fix ...
487
  	return 0;
74dfd666d   Rafael J. Wysocki   swsusp: do not us...
488
489
490
491
492
493
  }
  
  static void memory_bm_set_bit(struct memory_bitmap *bm, unsigned long pfn)
  {
  	void *addr;
  	unsigned int bit;
a82f7119f   Rafael J. Wysocki   Hibernation: Fix ...
494
  	int error;
74dfd666d   Rafael J. Wysocki   swsusp: do not us...
495

a82f7119f   Rafael J. Wysocki   Hibernation: Fix ...
496
497
  	error = memory_bm_find_bit(bm, pfn, &addr, &bit);
  	BUG_ON(error);
74dfd666d   Rafael J. Wysocki   swsusp: do not us...
498
499
  	set_bit(bit, addr);
  }
a82f7119f   Rafael J. Wysocki   Hibernation: Fix ...
500
501
502
503
504
505
506
507
508
509
510
  static int mem_bm_set_bit_check(struct memory_bitmap *bm, unsigned long pfn)
  {
  	void *addr;
  	unsigned int bit;
  	int error;
  
  	error = memory_bm_find_bit(bm, pfn, &addr, &bit);
  	if (!error)
  		set_bit(bit, addr);
  	return error;
  }
74dfd666d   Rafael J. Wysocki   swsusp: do not us...
511
512
513
514
  static void memory_bm_clear_bit(struct memory_bitmap *bm, unsigned long pfn)
  {
  	void *addr;
  	unsigned int bit;
a82f7119f   Rafael J. Wysocki   Hibernation: Fix ...
515
  	int error;
74dfd666d   Rafael J. Wysocki   swsusp: do not us...
516

a82f7119f   Rafael J. Wysocki   Hibernation: Fix ...
517
518
  	error = memory_bm_find_bit(bm, pfn, &addr, &bit);
  	BUG_ON(error);
74dfd666d   Rafael J. Wysocki   swsusp: do not us...
519
520
521
522
523
524
525
  	clear_bit(bit, addr);
  }
  
  static int memory_bm_test_bit(struct memory_bitmap *bm, unsigned long pfn)
  {
  	void *addr;
  	unsigned int bit;
a82f7119f   Rafael J. Wysocki   Hibernation: Fix ...
526
  	int error;
74dfd666d   Rafael J. Wysocki   swsusp: do not us...
527

a82f7119f   Rafael J. Wysocki   Hibernation: Fix ...
528
529
  	error = memory_bm_find_bit(bm, pfn, &addr, &bit);
  	BUG_ON(error);
74dfd666d   Rafael J. Wysocki   swsusp: do not us...
530
  	return test_bit(bit, addr);
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
531
  }
69643279a   Rafael J. Wysocki   Hibernate: Do not...
532
533
534
535
536
537
538
  static bool memory_bm_pfn_present(struct memory_bitmap *bm, unsigned long pfn)
  {
  	void *addr;
  	unsigned int bit;
  
  	return !memory_bm_find_bit(bm, pfn, &addr, &bit);
  }
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
539
540
541
542
543
544
545
546
547
548
549
  /**
   *	memory_bm_next_pfn - find the pfn that corresponds to the next set bit
   *	in the bitmap @bm.  If the pfn cannot be found, BM_END_OF_MAP is
   *	returned.
   *
   *	It is required to run memory_bm_position_reset() before the first call to
   *	this function.
   */
  
  static unsigned long memory_bm_next_pfn(struct memory_bitmap *bm)
  {
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
550
  	struct bm_block *bb;
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
551
  	int bit;
846705deb   Rafael J. Wysocki   Hibernate: Take o...
552
  	bb = bm->cur.block;
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
553
  	do {
846705deb   Rafael J. Wysocki   Hibernate: Take o...
554
555
556
557
558
559
560
561
562
  		bit = bm->cur.bit;
  		bit = find_next_bit(bb->data, bm_block_bits(bb), bit);
  		if (bit < bm_block_bits(bb))
  			goto Return_pfn;
  
  		bb = list_entry(bb->hook.next, struct bm_block, hook);
  		bm->cur.block = bb;
  		bm->cur.bit = 0;
  	} while (&bb->hook != &bm->blocks);
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
563
564
  	memory_bm_position_reset(bm);
  	return BM_END_OF_MAP;
59a493350   Rafael J. Wysocki   [PATCH] swsusp: F...
565
   Return_pfn:
0d83304c7   Akinobu Mita   pm: hibernation: ...
566
567
  	bm->cur.bit = bit + 1;
  	return bb->start_pfn + bit;
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
568
569
570
  }
  
  /**
74dfd666d   Rafael J. Wysocki   swsusp: do not us...
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
   *	This structure represents a range of page frames the contents of which
   *	should not be saved during the suspend.
   */
  
  struct nosave_region {
  	struct list_head list;
  	unsigned long start_pfn;
  	unsigned long end_pfn;
  };
  
  static LIST_HEAD(nosave_regions);
  
  /**
   *	register_nosave_region - register a range of page frames the contents
   *	of which should not be saved during the suspend (to be used in the early
   *	initialization code)
   */
  
  void __init
940d67f6b   Johannes Berg   [POWERPC] swsusp:...
590
591
  __register_nosave_region(unsigned long start_pfn, unsigned long end_pfn,
  			 int use_kmalloc)
74dfd666d   Rafael J. Wysocki   swsusp: do not us...
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
  {
  	struct nosave_region *region;
  
  	if (start_pfn >= end_pfn)
  		return;
  
  	if (!list_empty(&nosave_regions)) {
  		/* Try to extend the previous region (they should be sorted) */
  		region = list_entry(nosave_regions.prev,
  					struct nosave_region, list);
  		if (region->end_pfn == start_pfn) {
  			region->end_pfn = end_pfn;
  			goto Report;
  		}
  	}
940d67f6b   Johannes Berg   [POWERPC] swsusp:...
607
608
609
610
611
612
  	if (use_kmalloc) {
  		/* during init, this shouldn't fail */
  		region = kmalloc(sizeof(struct nosave_region), GFP_KERNEL);
  		BUG_ON(!region);
  	} else
  		/* This allocation cannot fail */
3c1596efe   Jan Beulich   mm: don't use all...
613
  		region = alloc_bootmem(sizeof(struct nosave_region));
74dfd666d   Rafael J. Wysocki   swsusp: do not us...
614
615
616
617
  	region->start_pfn = start_pfn;
  	region->end_pfn = end_pfn;
  	list_add_tail(&region->list, &nosave_regions);
   Report:
23976728a   Rafael J. Wysocki   Hibernation: Upda...
618
619
  	printk(KERN_INFO "PM: Registered nosave memory: %016lx - %016lx
  ",
74dfd666d   Rafael J. Wysocki   swsusp: do not us...
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
  		start_pfn << PAGE_SHIFT, end_pfn << PAGE_SHIFT);
  }
  
  /*
   * Set bits in this map correspond to the page frames the contents of which
   * should not be saved during the suspend.
   */
  static struct memory_bitmap *forbidden_pages_map;
  
  /* Set bits in this map correspond to free page frames. */
  static struct memory_bitmap *free_pages_map;
  
  /*
   * Each page frame allocated for creating the image is marked by setting the
   * corresponding bits in forbidden_pages_map and free_pages_map simultaneously
   */
  
  void swsusp_set_page_free(struct page *page)
  {
  	if (free_pages_map)
  		memory_bm_set_bit(free_pages_map, page_to_pfn(page));
  }
  
  static int swsusp_page_is_free(struct page *page)
  {
  	return free_pages_map ?
  		memory_bm_test_bit(free_pages_map, page_to_pfn(page)) : 0;
  }
  
  void swsusp_unset_page_free(struct page *page)
  {
  	if (free_pages_map)
  		memory_bm_clear_bit(free_pages_map, page_to_pfn(page));
  }
  
  static void swsusp_set_page_forbidden(struct page *page)
  {
  	if (forbidden_pages_map)
  		memory_bm_set_bit(forbidden_pages_map, page_to_pfn(page));
  }
  
  int swsusp_page_is_forbidden(struct page *page)
  {
  	return forbidden_pages_map ?
  		memory_bm_test_bit(forbidden_pages_map, page_to_pfn(page)) : 0;
  }
  
  static void swsusp_unset_page_forbidden(struct page *page)
  {
  	if (forbidden_pages_map)
  		memory_bm_clear_bit(forbidden_pages_map, page_to_pfn(page));
  }
  
  /**
   *	mark_nosave_pages - set bits corresponding to the page frames the
   *	contents of which should not be saved in a given bitmap.
   */
  
  static void mark_nosave_pages(struct memory_bitmap *bm)
  {
  	struct nosave_region *region;
  
  	if (list_empty(&nosave_regions))
  		return;
  
  	list_for_each_entry(region, &nosave_regions, list) {
  		unsigned long pfn;
23976728a   Rafael J. Wysocki   Hibernation: Upda...
687
688
  		pr_debug("PM: Marking nosave pages: %016lx - %016lx
  ",
74dfd666d   Rafael J. Wysocki   swsusp: do not us...
689
690
691
692
  				region->start_pfn << PAGE_SHIFT,
  				region->end_pfn << PAGE_SHIFT);
  
  		for (pfn = region->start_pfn; pfn < region->end_pfn; pfn++)
a82f7119f   Rafael J. Wysocki   Hibernation: Fix ...
693
694
695
696
697
698
699
700
701
  			if (pfn_valid(pfn)) {
  				/*
  				 * It is safe to ignore the result of
  				 * mem_bm_set_bit_check() here, since we won't
  				 * touch the PFNs for which the error is
  				 * returned anyway.
  				 */
  				mem_bm_set_bit_check(bm, pfn);
  			}
74dfd666d   Rafael J. Wysocki   swsusp: do not us...
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
  	}
  }
  
  /**
   *	create_basic_memory_bitmaps - create bitmaps needed for marking page
   *	frames that should not be saved and free page frames.  The pointers
   *	forbidden_pages_map and free_pages_map are only modified if everything
   *	goes well, because we don't want the bits to be used before both bitmaps
   *	are set up.
   */
  
  int create_basic_memory_bitmaps(void)
  {
  	struct memory_bitmap *bm1, *bm2;
  	int error = 0;
  
  	BUG_ON(forbidden_pages_map || free_pages_map);
0709db607   Rafael J. Wysocki   swsusp: use GFP_K...
719
  	bm1 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL);
74dfd666d   Rafael J. Wysocki   swsusp: do not us...
720
721
  	if (!bm1)
  		return -ENOMEM;
0709db607   Rafael J. Wysocki   swsusp: use GFP_K...
722
  	error = memory_bm_create(bm1, GFP_KERNEL, PG_ANY);
74dfd666d   Rafael J. Wysocki   swsusp: do not us...
723
724
  	if (error)
  		goto Free_first_object;
0709db607   Rafael J. Wysocki   swsusp: use GFP_K...
725
  	bm2 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL);
74dfd666d   Rafael J. Wysocki   swsusp: do not us...
726
727
  	if (!bm2)
  		goto Free_first_bitmap;
0709db607   Rafael J. Wysocki   swsusp: use GFP_K...
728
  	error = memory_bm_create(bm2, GFP_KERNEL, PG_ANY);
74dfd666d   Rafael J. Wysocki   swsusp: do not us...
729
730
731
732
733
734
  	if (error)
  		goto Free_second_object;
  
  	forbidden_pages_map = bm1;
  	free_pages_map = bm2;
  	mark_nosave_pages(forbidden_pages_map);
23976728a   Rafael J. Wysocki   Hibernation: Upda...
735
736
  	pr_debug("PM: Basic memory bitmaps created
  ");
74dfd666d   Rafael J. Wysocki   swsusp: do not us...
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
  
  	return 0;
  
   Free_second_object:
  	kfree(bm2);
   Free_first_bitmap:
   	memory_bm_free(bm1, PG_UNSAFE_CLEAR);
   Free_first_object:
  	kfree(bm1);
  	return -ENOMEM;
  }
  
  /**
   *	free_basic_memory_bitmaps - free memory bitmaps allocated by
   *	create_basic_memory_bitmaps().  The auxiliary pointers are necessary
   *	so that the bitmaps themselves are not referred to while they are being
   *	freed.
   */
  
  void free_basic_memory_bitmaps(void)
  {
  	struct memory_bitmap *bm1, *bm2;
  
  	BUG_ON(!(forbidden_pages_map && free_pages_map));
  
  	bm1 = forbidden_pages_map;
  	bm2 = free_pages_map;
  	forbidden_pages_map = NULL;
  	free_pages_map = NULL;
  	memory_bm_free(bm1, PG_UNSAFE_CLEAR);
  	kfree(bm1);
  	memory_bm_free(bm2, PG_UNSAFE_CLEAR);
  	kfree(bm2);
23976728a   Rafael J. Wysocki   Hibernation: Upda...
770
771
  	pr_debug("PM: Basic memory bitmaps freed
  ");
74dfd666d   Rafael J. Wysocki   swsusp: do not us...
772
773
774
  }
  
  /**
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
775
776
777
778
779
780
781
782
783
784
785
   *	snapshot_additional_pages - estimate the number of additional pages
   *	be needed for setting up the suspend image data structures for given
   *	zone (usually the returned value is greater than the exact number)
   */
  
  unsigned int snapshot_additional_pages(struct zone *zone)
  {
  	unsigned int res;
  
  	res = DIV_ROUND_UP(zone->spanned_pages, BM_BITS_PER_BLOCK);
  	res += DIV_ROUND_UP(res * sizeof(struct bm_block), PAGE_SIZE);
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
786
  	return 2 * res;
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
787
  }
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
788
789
790
791
792
793
794
795
796
797
  #ifdef CONFIG_HIGHMEM
  /**
   *	count_free_highmem_pages - compute the total number of free highmem
   *	pages, system-wide.
   */
  
  static unsigned int count_free_highmem_pages(void)
  {
  	struct zone *zone;
  	unsigned int cnt = 0;
ee99c71c5   KOSAKI Motohiro   mm: introduce for...
798
799
  	for_each_populated_zone(zone)
  		if (is_highmem(zone))
d23ad4232   Christoph Lameter   [PATCH] Use ZVC f...
800
  			cnt += zone_page_state(zone, NR_FREE_PAGES);
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
801
802
803
804
805
806
807
808
809
810
811
  
  	return cnt;
  }
  
  /**
   *	saveable_highmem_page - Determine whether a highmem page should be
   *	included in the suspend image.
   *
   *	We should save the page if it isn't Nosave or NosaveFree, or Reserved,
   *	and it isn't a part of a free chunk of pages.
   */
846705deb   Rafael J. Wysocki   Hibernate: Take o...
812
  static struct page *saveable_highmem_page(struct zone *zone, unsigned long pfn)
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
813
814
815
816
817
818
819
  {
  	struct page *page;
  
  	if (!pfn_valid(pfn))
  		return NULL;
  
  	page = pfn_to_page(pfn);
846705deb   Rafael J. Wysocki   Hibernate: Take o...
820
821
  	if (page_zone(page) != zone)
  		return NULL;
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
822
823
  
  	BUG_ON(!PageHighMem(page));
7be982349   Rafael J. Wysocki   swsusp: use inlin...
824
825
  	if (swsusp_page_is_forbidden(page) ||  swsusp_page_is_free(page) ||
  	    PageReserved(page))
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
826
  		return NULL;
c6968e73b   Stanislaw Gruszka   PM/Hibernate: do ...
827
828
  	if (page_is_guard(page))
  		return NULL;
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
829
830
831
832
833
834
835
  	return page;
  }
  
  /**
   *	count_highmem_pages - compute the total number of saveable highmem
   *	pages.
   */
fe419535d   Rafael J. Wysocki   PM/Hibernate: Mov...
836
  static unsigned int count_highmem_pages(void)
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
837
838
839
  {
  	struct zone *zone;
  	unsigned int n = 0;
98e73dc5d   Gerald Schaefer   PM / Hibernate / ...
840
  	for_each_populated_zone(zone) {
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
841
842
843
844
845
846
847
848
  		unsigned long pfn, max_zone_pfn;
  
  		if (!is_highmem(zone))
  			continue;
  
  		mark_free_pages(zone);
  		max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
  		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
846705deb   Rafael J. Wysocki   Hibernate: Take o...
849
  			if (saveable_highmem_page(zone, pfn))
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
850
851
852
853
854
  				n++;
  	}
  	return n;
  }
  #else
846705deb   Rafael J. Wysocki   Hibernate: Take o...
855
856
857
858
  static inline void *saveable_highmem_page(struct zone *z, unsigned long p)
  {
  	return NULL;
  }
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
859
  #endif /* CONFIG_HIGHMEM */
f6143aa60   Rafael J. Wysocki   [PATCH] swsusp: R...
860
  /**
8a235efad   Rafael J. Wysocki   Hibernation: Hand...
861
862
   *	saveable_page - Determine whether a non-highmem page should be included
   *	in the suspend image.
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
863
   *
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
864
865
866
   *	We should save the page if it isn't Nosave, and is not in the range
   *	of pages statically defined as 'unsaveable', and it isn't a part of
   *	a free chunk of pages.
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
867
   */
846705deb   Rafael J. Wysocki   Hibernate: Take o...
868
  static struct page *saveable_page(struct zone *zone, unsigned long pfn)
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
869
  {
de491861e   Pavel Machek   [PATCH] swsusp: c...
870
  	struct page *page;
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
871
872
  
  	if (!pfn_valid(pfn))
ae83c5eef   Rafael J. Wysocki   [PATCH] swsusp: c...
873
  		return NULL;
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
874
875
  
  	page = pfn_to_page(pfn);
846705deb   Rafael J. Wysocki   Hibernate: Take o...
876
877
  	if (page_zone(page) != zone)
  		return NULL;
ae83c5eef   Rafael J. Wysocki   [PATCH] swsusp: c...
878

8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
879
  	BUG_ON(PageHighMem(page));
7be982349   Rafael J. Wysocki   swsusp: use inlin...
880
  	if (swsusp_page_is_forbidden(page) || swsusp_page_is_free(page))
ae83c5eef   Rafael J. Wysocki   [PATCH] swsusp: c...
881
  		return NULL;
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
882

8a235efad   Rafael J. Wysocki   Hibernation: Hand...
883
884
  	if (PageReserved(page)
  	    && (!kernel_page_present(page) || pfn_is_nosave(pfn)))
ae83c5eef   Rafael J. Wysocki   [PATCH] swsusp: c...
885
  		return NULL;
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
886

c6968e73b   Stanislaw Gruszka   PM/Hibernate: do ...
887
888
  	if (page_is_guard(page))
  		return NULL;
ae83c5eef   Rafael J. Wysocki   [PATCH] swsusp: c...
889
  	return page;
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
890
  }
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
891
892
893
894
  /**
   *	count_data_pages - compute the total number of saveable non-highmem
   *	pages.
   */
fe419535d   Rafael J. Wysocki   PM/Hibernate: Mov...
895
  static unsigned int count_data_pages(void)
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
896
897
  {
  	struct zone *zone;
ae83c5eef   Rafael J. Wysocki   [PATCH] swsusp: c...
898
  	unsigned long pfn, max_zone_pfn;
dc19d507b   Pavel Machek   [PATCH] swsusp cl...
899
  	unsigned int n = 0;
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
900

98e73dc5d   Gerald Schaefer   PM / Hibernate / ...
901
  	for_each_populated_zone(zone) {
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
902
903
  		if (is_highmem(zone))
  			continue;
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
904

25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
905
  		mark_free_pages(zone);
ae83c5eef   Rafael J. Wysocki   [PATCH] swsusp: c...
906
907
  		max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
  		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
846705deb   Rafael J. Wysocki   Hibernate: Take o...
908
  			if (saveable_page(zone, pfn))
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
909
  				n++;
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
910
  	}
a0f496517   Rafael J. Wysocki   [PATCH] swsusp: r...
911
  	return n;
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
912
  }
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
913
914
915
916
  /* This is needed, because copy_page and memcpy are not usable for copying
   * task structs.
   */
  static inline void do_copy_page(long *dst, long *src)
f623f0db8   Rafael J. Wysocki   [PATCH] swsusp: F...
917
918
  {
  	int n;
f623f0db8   Rafael J. Wysocki   [PATCH] swsusp: F...
919
920
921
  	for (n = PAGE_SIZE / sizeof(long); n; n--)
  		*dst++ = *src++;
  }
8a235efad   Rafael J. Wysocki   Hibernation: Hand...
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
  
  /**
   *	safe_copy_page - check if the page we are going to copy is marked as
   *		present in the kernel page tables (this always is the case if
   *		CONFIG_DEBUG_PAGEALLOC is not set and in that case
   *		kernel_page_present() always returns 'true').
   */
  static void safe_copy_page(void *dst, struct page *s_page)
  {
  	if (kernel_page_present(s_page)) {
  		do_copy_page(dst, page_address(s_page));
  	} else {
  		kernel_map_pages(s_page, 1, 1);
  		do_copy_page(dst, page_address(s_page));
  		kernel_map_pages(s_page, 1, 0);
  	}
  }
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
939
940
941
942
943
  #ifdef CONFIG_HIGHMEM
  static inline struct page *
  page_is_saveable(struct zone *zone, unsigned long pfn)
  {
  	return is_highmem(zone) ?
846705deb   Rafael J. Wysocki   Hibernate: Take o...
944
  		saveable_highmem_page(zone, pfn) : saveable_page(zone, pfn);
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
945
  }
8a235efad   Rafael J. Wysocki   Hibernation: Hand...
946
  static void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
947
948
949
950
951
952
953
954
955
956
  {
  	struct page *s_page, *d_page;
  	void *src, *dst;
  
  	s_page = pfn_to_page(src_pfn);
  	d_page = pfn_to_page(dst_pfn);
  	if (PageHighMem(s_page)) {
  		src = kmap_atomic(s_page, KM_USER0);
  		dst = kmap_atomic(d_page, KM_USER1);
  		do_copy_page(dst, src);
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
957
  		kunmap_atomic(dst, KM_USER1);
61ecdb801   Peter Zijlstra   mm: strictly nest...
958
  		kunmap_atomic(src, KM_USER0);
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
959
  	} else {
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
960
961
962
963
  		if (PageHighMem(d_page)) {
  			/* Page pointed to by src may contain some kernel
  			 * data modified by kmap_atomic()
  			 */
8a235efad   Rafael J. Wysocki   Hibernation: Hand...
964
  			safe_copy_page(buffer, s_page);
baa5835df   Rafael J. Wysocki   Hibernate: Replac...
965
  			dst = kmap_atomic(d_page, KM_USER0);
3ecb01df3   Jan Beulich   use clear_page()/...
966
  			copy_page(dst, buffer);
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
967
968
  			kunmap_atomic(dst, KM_USER0);
  		} else {
8a235efad   Rafael J. Wysocki   Hibernation: Hand...
969
  			safe_copy_page(page_address(d_page), s_page);
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
970
971
972
973
  		}
  	}
  }
  #else
846705deb   Rafael J. Wysocki   Hibernate: Take o...
974
  #define page_is_saveable(zone, pfn)	saveable_page(zone, pfn)
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
975

8a235efad   Rafael J. Wysocki   Hibernation: Hand...
976
  static inline void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
977
  {
8a235efad   Rafael J. Wysocki   Hibernation: Hand...
978
979
  	safe_copy_page(page_address(pfn_to_page(dst_pfn)),
  				pfn_to_page(src_pfn));
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
980
981
  }
  #endif /* CONFIG_HIGHMEM */
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
982
983
  static void
  copy_data_pages(struct memory_bitmap *copy_bm, struct memory_bitmap *orig_bm)
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
984
985
  {
  	struct zone *zone;
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
986
  	unsigned long pfn;
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
987

98e73dc5d   Gerald Schaefer   PM / Hibernate / ...
988
  	for_each_populated_zone(zone) {
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
989
  		unsigned long max_zone_pfn;
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
990
  		mark_free_pages(zone);
ae83c5eef   Rafael J. Wysocki   [PATCH] swsusp: c...
991
  		max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
992
  		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
993
  			if (page_is_saveable(zone, pfn))
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
994
  				memory_bm_set_bit(orig_bm, pfn);
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
995
  	}
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
996
997
  	memory_bm_position_reset(orig_bm);
  	memory_bm_position_reset(copy_bm);
df7c48725   Fengguang Wu   trivial copy_data...
998
  	for(;;) {
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
999
  		pfn = memory_bm_next_pfn(orig_bm);
df7c48725   Fengguang Wu   trivial copy_data...
1000
1001
1002
1003
  		if (unlikely(pfn == BM_END_OF_MAP))
  			break;
  		copy_data_page(memory_bm_next_pfn(copy_bm), pfn);
  	}
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
1004
  }
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1005
1006
1007
1008
  /* Total number of image pages */
  static unsigned int nr_copy_pages;
  /* Number of pages needed for saving the original pfns of the image pages */
  static unsigned int nr_meta_pages;
64a473cb7   Rafael J. Wysocki   PM/Hibernate: Do ...
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
  /*
   * Numbers of normal and highmem page frames allocated for hibernation image
   * before suspending devices.
   */
  unsigned int alloc_normal, alloc_highmem;
  /*
   * Memory bitmap used for marking saveable pages (during hibernation) or
   * hibernation image pages (during restore)
   */
  static struct memory_bitmap orig_bm;
  /*
   * Memory bitmap used during hibernation for marking allocated page frames that
   * will contain copies of saveable pages.  During restore it is initially used
   * for marking hibernation image pages, but then the set bits from it are
   * duplicated in @orig_bm and it is released.  On highmem systems it is next
   * used for marking "safe" highmem pages, but it has to be reinitialized for
   * this purpose.
   */
  static struct memory_bitmap copy_bm;
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1028

25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
1029
  /**
940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
1030
   *	swsusp_free - free pages allocated for the suspend.
cd560bb2f   Rafael J. Wysocki   [PATCH] swsusp: F...
1031
   *
940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
1032
1033
   *	Suspend pages are alocated before the atomic copy is made, so we
   *	need to release them after the resume.
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
1034
1035
1036
1037
1038
   */
  
  void swsusp_free(void)
  {
  	struct zone *zone;
ae83c5eef   Rafael J. Wysocki   [PATCH] swsusp: c...
1039
  	unsigned long pfn, max_zone_pfn;
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
1040

98e73dc5d   Gerald Schaefer   PM / Hibernate / ...
1041
  	for_each_populated_zone(zone) {
ae83c5eef   Rafael J. Wysocki   [PATCH] swsusp: c...
1042
1043
1044
1045
  		max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
  		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
  			if (pfn_valid(pfn)) {
  				struct page *page = pfn_to_page(pfn);
7be982349   Rafael J. Wysocki   swsusp: use inlin...
1046
1047
1048
1049
  				if (swsusp_page_is_forbidden(page) &&
  				    swsusp_page_is_free(page)) {
  					swsusp_unset_page_forbidden(page);
  					swsusp_unset_page_free(page);
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1050
  					__free_page(page);
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
1051
1052
1053
  				}
  			}
  	}
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
1054
1055
  	nr_copy_pages = 0;
  	nr_meta_pages = 0;
75534b50c   Rafael J. Wysocki   [PATCH] Change th...
1056
  	restore_pblist = NULL;
6e1819d61   Rafael J. Wysocki   [PATCH] swsusp: u...
1057
  	buffer = NULL;
64a473cb7   Rafael J. Wysocki   PM/Hibernate: Do ...
1058
1059
  	alloc_normal = 0;
  	alloc_highmem = 0;
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
1060
  }
4bb334353   Rafael J. Wysocki   PM/Hibernate: Rew...
1061
1062
1063
  /* Helper functions used for the shrinking of memory. */
  
  #define GFP_IMAGE	(GFP_KERNEL | __GFP_NOWARN)
fe419535d   Rafael J. Wysocki   PM/Hibernate: Mov...
1064
  /**
4bb334353   Rafael J. Wysocki   PM/Hibernate: Rew...
1065
1066
1067
   * preallocate_image_pages - Allocate a number of pages for hibernation image
   * @nr_pages: Number of page frames to allocate.
   * @mask: GFP flags to use for the allocation.
fe419535d   Rafael J. Wysocki   PM/Hibernate: Mov...
1068
   *
4bb334353   Rafael J. Wysocki   PM/Hibernate: Rew...
1069
1070
1071
1072
1073
1074
1075
   * Return value: Number of page frames actually allocated
   */
  static unsigned long preallocate_image_pages(unsigned long nr_pages, gfp_t mask)
  {
  	unsigned long nr_alloc = 0;
  
  	while (nr_pages > 0) {
64a473cb7   Rafael J. Wysocki   PM/Hibernate: Do ...
1076
1077
1078
1079
  		struct page *page;
  
  		page = alloc_image_page(mask);
  		if (!page)
4bb334353   Rafael J. Wysocki   PM/Hibernate: Rew...
1080
  			break;
64a473cb7   Rafael J. Wysocki   PM/Hibernate: Do ...
1081
1082
1083
1084
1085
  		memory_bm_set_bit(&copy_bm, page_to_pfn(page));
  		if (PageHighMem(page))
  			alloc_highmem++;
  		else
  			alloc_normal++;
4bb334353   Rafael J. Wysocki   PM/Hibernate: Rew...
1086
1087
1088
1089
1090
1091
  		nr_pages--;
  		nr_alloc++;
  	}
  
  	return nr_alloc;
  }
6715045dd   Rafael J. Wysocki   PM / Hibernate: A...
1092
1093
  static unsigned long preallocate_image_memory(unsigned long nr_pages,
  					      unsigned long avail_normal)
4bb334353   Rafael J. Wysocki   PM/Hibernate: Rew...
1094
  {
6715045dd   Rafael J. Wysocki   PM / Hibernate: A...
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
  	unsigned long alloc;
  
  	if (avail_normal <= alloc_normal)
  		return 0;
  
  	alloc = avail_normal - alloc_normal;
  	if (nr_pages < alloc)
  		alloc = nr_pages;
  
  	return preallocate_image_pages(alloc, GFP_IMAGE);
4bb334353   Rafael J. Wysocki   PM/Hibernate: Rew...
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
  }
  
  #ifdef CONFIG_HIGHMEM
  static unsigned long preallocate_image_highmem(unsigned long nr_pages)
  {
  	return preallocate_image_pages(nr_pages, GFP_IMAGE | __GFP_HIGHMEM);
  }
  
  /**
   *  __fraction - Compute (an approximation of) x * (multiplier / base)
fe419535d   Rafael J. Wysocki   PM/Hibernate: Mov...
1115
   */
4bb334353   Rafael J. Wysocki   PM/Hibernate: Rew...
1116
1117
1118
1119
1120
1121
  static unsigned long __fraction(u64 x, u64 multiplier, u64 base)
  {
  	x *= multiplier;
  	do_div(x, base);
  	return (unsigned long)x;
  }
fe419535d   Rafael J. Wysocki   PM/Hibernate: Mov...
1122

4bb334353   Rafael J. Wysocki   PM/Hibernate: Rew...
1123
1124
1125
  static unsigned long preallocate_highmem_fraction(unsigned long nr_pages,
  						unsigned long highmem,
  						unsigned long total)
fe419535d   Rafael J. Wysocki   PM/Hibernate: Mov...
1126
  {
4bb334353   Rafael J. Wysocki   PM/Hibernate: Rew...
1127
1128
1129
  	unsigned long alloc = __fraction(nr_pages, highmem, total);
  
  	return preallocate_image_pages(alloc, GFP_IMAGE | __GFP_HIGHMEM);
fe419535d   Rafael J. Wysocki   PM/Hibernate: Mov...
1130
  }
4bb334353   Rafael J. Wysocki   PM/Hibernate: Rew...
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
  #else /* CONFIG_HIGHMEM */
  static inline unsigned long preallocate_image_highmem(unsigned long nr_pages)
  {
  	return 0;
  }
  
  static inline unsigned long preallocate_highmem_fraction(unsigned long nr_pages,
  						unsigned long highmem,
  						unsigned long total)
  {
  	return 0;
  }
  #endif /* CONFIG_HIGHMEM */
fe419535d   Rafael J. Wysocki   PM/Hibernate: Mov...
1144

4bb334353   Rafael J. Wysocki   PM/Hibernate: Rew...
1145
  /**
64a473cb7   Rafael J. Wysocki   PM/Hibernate: Do ...
1146
1147
1148
1149
   * free_unnecessary_pages - Release preallocated pages not needed for the image
   */
  static void free_unnecessary_pages(void)
  {
6715045dd   Rafael J. Wysocki   PM / Hibernate: A...
1150
  	unsigned long save, to_free_normal, to_free_highmem;
64a473cb7   Rafael J. Wysocki   PM/Hibernate: Do ...
1151

6715045dd   Rafael J. Wysocki   PM / Hibernate: A...
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
  	save = count_data_pages();
  	if (alloc_normal >= save) {
  		to_free_normal = alloc_normal - save;
  		save = 0;
  	} else {
  		to_free_normal = 0;
  		save -= alloc_normal;
  	}
  	save += count_highmem_pages();
  	if (alloc_highmem >= save) {
  		to_free_highmem = alloc_highmem - save;
64a473cb7   Rafael J. Wysocki   PM/Hibernate: Do ...
1163
1164
  	} else {
  		to_free_highmem = 0;
4d4cf23cd   Rafael J. Wysocki   PM / Hibernate: F...
1165
1166
1167
1168
1169
  		save -= alloc_highmem;
  		if (to_free_normal > save)
  			to_free_normal -= save;
  		else
  			to_free_normal = 0;
64a473cb7   Rafael J. Wysocki   PM/Hibernate: Do ...
1170
1171
1172
  	}
  
  	memory_bm_position_reset(&copy_bm);
a9c9b4429   Rafael J. Wysocki   PM / Hibernate: F...
1173
  	while (to_free_normal > 0 || to_free_highmem > 0) {
64a473cb7   Rafael J. Wysocki   PM/Hibernate: Do ...
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
  		unsigned long pfn = memory_bm_next_pfn(&copy_bm);
  		struct page *page = pfn_to_page(pfn);
  
  		if (PageHighMem(page)) {
  			if (!to_free_highmem)
  				continue;
  			to_free_highmem--;
  			alloc_highmem--;
  		} else {
  			if (!to_free_normal)
  				continue;
  			to_free_normal--;
  			alloc_normal--;
  		}
  		memory_bm_clear_bit(&copy_bm, pfn);
  		swsusp_unset_page_forbidden(page);
  		swsusp_unset_page_free(page);
  		__free_page(page);
  	}
  }
  
  /**
ef4aede3f   Rafael J. Wysocki   PM/Hibernate: Do ...
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
   * minimum_image_size - Estimate the minimum acceptable size of an image
   * @saveable: Number of saveable pages in the system.
   *
   * We want to avoid attempting to free too much memory too hard, so estimate the
   * minimum acceptable size of a hibernation image to use as the lower limit for
   * preallocating memory.
   *
   * We assume that the minimum image size should be proportional to
   *
   * [number of saveable pages] - [number of pages that can be freed in theory]
   *
   * where the second term is the sum of (1) reclaimable slab pages, (2) active
   * and (3) inactive anonymouns pages, (4) active and (5) inactive file pages,
   * minus mapped file pages.
   */
  static unsigned long minimum_image_size(unsigned long saveable)
  {
  	unsigned long size;
  
  	size = global_page_state(NR_SLAB_RECLAIMABLE)
  		+ global_page_state(NR_ACTIVE_ANON)
  		+ global_page_state(NR_INACTIVE_ANON)
  		+ global_page_state(NR_ACTIVE_FILE)
  		+ global_page_state(NR_INACTIVE_FILE)
  		- global_page_state(NR_FILE_MAPPED);
  
  	return saveable <= size ? 0 : saveable - size;
  }
  
  /**
64a473cb7   Rafael J. Wysocki   PM/Hibernate: Do ...
1226
   * hibernate_preallocate_memory - Preallocate memory for hibernation image
4bb334353   Rafael J. Wysocki   PM/Hibernate: Rew...
1227
1228
1229
1230
1231
   *
   * To create a hibernation image it is necessary to make a copy of every page
   * frame in use.  We also need a number of page frames to be free during
   * hibernation for allocations made while saving the image and for device
   * drivers, in case they need to allocate memory from their hibernation
ddeb64870   Rafael J. Wysocki   PM / Hibernate: A...
1232
1233
1234
1235
   * callbacks (these two numbers are given by PAGES_FOR_IO (which is a rough
   * estimate) and reserverd_size divided by PAGE_SIZE (which is tunable through
   * /sys/power/reserved_size, respectively).  To make this happen, we compute the
   * total number of available page frames and allocate at least
4bb334353   Rafael J. Wysocki   PM/Hibernate: Rew...
1236
   *
ddeb64870   Rafael J. Wysocki   PM / Hibernate: A...
1237
1238
   * ([page frames total] + PAGES_FOR_IO + [metadata pages]) / 2
   *  + 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE)
4bb334353   Rafael J. Wysocki   PM/Hibernate: Rew...
1239
1240
1241
1242
1243
   *
   * of them, which corresponds to the maximum size of a hibernation image.
   *
   * If image_size is set below the number following from the above formula,
   * the preallocation of memory is continued until the total number of saveable
ef4aede3f   Rafael J. Wysocki   PM/Hibernate: Do ...
1244
1245
   * pages in the system is below the requested image size or the minimum
   * acceptable image size returned by minimum_image_size(), whichever is greater.
4bb334353   Rafael J. Wysocki   PM/Hibernate: Rew...
1246
   */
64a473cb7   Rafael J. Wysocki   PM/Hibernate: Do ...
1247
  int hibernate_preallocate_memory(void)
fe419535d   Rafael J. Wysocki   PM/Hibernate: Mov...
1248
  {
fe419535d   Rafael J. Wysocki   PM/Hibernate: Mov...
1249
  	struct zone *zone;
4bb334353   Rafael J. Wysocki   PM/Hibernate: Rew...
1250
  	unsigned long saveable, size, max_size, count, highmem, pages = 0;
6715045dd   Rafael J. Wysocki   PM / Hibernate: A...
1251
  	unsigned long alloc, save_highmem, pages_highmem, avail_normal;
fe419535d   Rafael J. Wysocki   PM/Hibernate: Mov...
1252
  	struct timeval start, stop;
64a473cb7   Rafael J. Wysocki   PM/Hibernate: Do ...
1253
  	int error;
fe419535d   Rafael J. Wysocki   PM/Hibernate: Mov...
1254

64a473cb7   Rafael J. Wysocki   PM/Hibernate: Do ...
1255
  	printk(KERN_INFO "PM: Preallocating image memory... ");
fe419535d   Rafael J. Wysocki   PM/Hibernate: Mov...
1256
  	do_gettimeofday(&start);
fe419535d   Rafael J. Wysocki   PM/Hibernate: Mov...
1257

64a473cb7   Rafael J. Wysocki   PM/Hibernate: Do ...
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
  	error = memory_bm_create(&orig_bm, GFP_IMAGE, PG_ANY);
  	if (error)
  		goto err_out;
  
  	error = memory_bm_create(&copy_bm, GFP_IMAGE, PG_ANY);
  	if (error)
  		goto err_out;
  
  	alloc_normal = 0;
  	alloc_highmem = 0;
4bb334353   Rafael J. Wysocki   PM/Hibernate: Rew...
1268
  	/* Count the number of saveable data pages. */
64a473cb7   Rafael J. Wysocki   PM/Hibernate: Do ...
1269
  	save_highmem = count_highmem_pages();
4bb334353   Rafael J. Wysocki   PM/Hibernate: Rew...
1270
  	saveable = count_data_pages();
fe419535d   Rafael J. Wysocki   PM/Hibernate: Mov...
1271

4bb334353   Rafael J. Wysocki   PM/Hibernate: Rew...
1272
1273
1274
1275
1276
  	/*
  	 * Compute the total number of page frames we can use (count) and the
  	 * number of pages needed for image metadata (size).
  	 */
  	count = saveable;
64a473cb7   Rafael J. Wysocki   PM/Hibernate: Do ...
1277
1278
  	saveable += save_highmem;
  	highmem = save_highmem;
4bb334353   Rafael J. Wysocki   PM/Hibernate: Rew...
1279
1280
1281
1282
1283
1284
1285
1286
  	size = 0;
  	for_each_populated_zone(zone) {
  		size += snapshot_additional_pages(zone);
  		if (is_highmem(zone))
  			highmem += zone_page_state(zone, NR_FREE_PAGES);
  		else
  			count += zone_page_state(zone, NR_FREE_PAGES);
  	}
6715045dd   Rafael J. Wysocki   PM / Hibernate: A...
1287
  	avail_normal = count;
4bb334353   Rafael J. Wysocki   PM/Hibernate: Rew...
1288
1289
  	count += highmem;
  	count -= totalreserve_pages;
85055dd80   Martin Schwidefsky   PM / Hibernate: I...
1290
1291
  	/* Add number of pages required for page keys (s390 only). */
  	size += page_key_additional_pages(saveable);
4bb334353   Rafael J. Wysocki   PM/Hibernate: Rew...
1292
  	/* Compute the maximum number of saveable pages to leave in memory. */
ddeb64870   Rafael J. Wysocki   PM / Hibernate: A...
1293
1294
  	max_size = (count - (size + PAGES_FOR_IO)) / 2
  			- 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE);
266f1a25e   Rafael J. Wysocki   PM / Hibernate: I...
1295
  	/* Compute the desired number of image pages specified by image_size. */
4bb334353   Rafael J. Wysocki   PM/Hibernate: Rew...
1296
1297
1298
1299
  	size = DIV_ROUND_UP(image_size, PAGE_SIZE);
  	if (size > max_size)
  		size = max_size;
  	/*
266f1a25e   Rafael J. Wysocki   PM / Hibernate: I...
1300
1301
1302
  	 * If the desired number of image pages is at least as large as the
  	 * current number of saveable pages in memory, allocate page frames for
  	 * the image and we're done.
4bb334353   Rafael J. Wysocki   PM/Hibernate: Rew...
1303
  	 */
64a473cb7   Rafael J. Wysocki   PM/Hibernate: Do ...
1304
1305
  	if (size >= saveable) {
  		pages = preallocate_image_highmem(save_highmem);
6715045dd   Rafael J. Wysocki   PM / Hibernate: A...
1306
  		pages += preallocate_image_memory(saveable - pages, avail_normal);
4bb334353   Rafael J. Wysocki   PM/Hibernate: Rew...
1307
  		goto out;
64a473cb7   Rafael J. Wysocki   PM/Hibernate: Do ...
1308
  	}
4bb334353   Rafael J. Wysocki   PM/Hibernate: Rew...
1309

ef4aede3f   Rafael J. Wysocki   PM/Hibernate: Do ...
1310
1311
  	/* Estimate the minimum size of the image. */
  	pages = minimum_image_size(saveable);
6715045dd   Rafael J. Wysocki   PM / Hibernate: A...
1312
1313
1314
1315
1316
1317
1318
1319
1320
  	/*
  	 * To avoid excessive pressure on the normal zone, leave room in it to
  	 * accommodate an image of the minimum size (unless it's already too
  	 * small, in which case don't preallocate pages from it at all).
  	 */
  	if (avail_normal > pages)
  		avail_normal -= pages;
  	else
  		avail_normal = 0;
ef4aede3f   Rafael J. Wysocki   PM/Hibernate: Do ...
1321
1322
  	if (size < pages)
  		size = min_t(unsigned long, pages, max_size);
4bb334353   Rafael J. Wysocki   PM/Hibernate: Rew...
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
  	/*
  	 * Let the memory management subsystem know that we're going to need a
  	 * large number of page frames to allocate and make it free some memory.
  	 * NOTE: If this is not done, performance will be hurt badly in some
  	 * test cases.
  	 */
  	shrink_all_memory(saveable - size);
  
  	/*
  	 * The number of saveable pages in memory was too high, so apply some
  	 * pressure to decrease it.  First, make room for the largest possible
  	 * image and fail if that doesn't work.  Next, try to decrease the size
ef4aede3f   Rafael J. Wysocki   PM/Hibernate: Do ...
1335
1336
  	 * of the image as much as indicated by 'size' using allocations from
  	 * highmem and non-highmem zones separately.
4bb334353   Rafael J. Wysocki   PM/Hibernate: Rew...
1337
1338
1339
  	 */
  	pages_highmem = preallocate_image_highmem(highmem / 2);
  	alloc = (count - max_size) - pages_highmem;
6715045dd   Rafael J. Wysocki   PM / Hibernate: A...
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
  	pages = preallocate_image_memory(alloc, avail_normal);
  	if (pages < alloc) {
  		/* We have exhausted non-highmem pages, try highmem. */
  		alloc -= pages;
  		pages += pages_highmem;
  		pages_highmem = preallocate_image_highmem(alloc);
  		if (pages_highmem < alloc)
  			goto err_out;
  		pages += pages_highmem;
  		/*
  		 * size is the desired number of saveable pages to leave in
  		 * memory, so try to preallocate (all memory - size) pages.
  		 */
  		alloc = (count - pages) - size;
  		pages += preallocate_image_highmem(alloc);
  	} else {
  		/*
  		 * There are approximately max_size saveable pages at this point
  		 * and we want to reduce this number down to size.
  		 */
  		alloc = max_size - size;
  		size = preallocate_highmem_fraction(alloc, highmem, count);
  		pages_highmem += size;
  		alloc -= size;
  		size = preallocate_image_memory(alloc, avail_normal);
  		pages_highmem += preallocate_image_highmem(alloc - size);
  		pages += pages_highmem + size;
  	}
4bb334353   Rafael J. Wysocki   PM/Hibernate: Rew...
1368

64a473cb7   Rafael J. Wysocki   PM/Hibernate: Do ...
1369
1370
1371
1372
1373
1374
  	/*
  	 * We only need as many page frames for the image as there are saveable
  	 * pages in memory, but we have allocated more.  Release the excessive
  	 * ones now.
  	 */
  	free_unnecessary_pages();
4bb334353   Rafael J. Wysocki   PM/Hibernate: Rew...
1375
1376
  
   out:
fe419535d   Rafael J. Wysocki   PM/Hibernate: Mov...
1377
  	do_gettimeofday(&stop);
64a473cb7   Rafael J. Wysocki   PM/Hibernate: Do ...
1378
1379
1380
  	printk(KERN_CONT "done (allocated %lu pages)
  ", pages);
  	swsusp_show_speed(&start, &stop, pages, "Allocated");
fe419535d   Rafael J. Wysocki   PM/Hibernate: Mov...
1381
1382
  
  	return 0;
64a473cb7   Rafael J. Wysocki   PM/Hibernate: Do ...
1383
1384
1385
1386
1387
1388
  
   err_out:
  	printk(KERN_CONT "
  ");
  	swsusp_free();
  	return -ENOMEM;
fe419535d   Rafael J. Wysocki   PM/Hibernate: Mov...
1389
  }
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1390
1391
1392
1393
1394
1395
1396
1397
  #ifdef CONFIG_HIGHMEM
  /**
    *	count_pages_for_highmem - compute the number of non-highmem pages
    *	that will be necessary for creating copies of highmem pages.
    */
  
  static unsigned int count_pages_for_highmem(unsigned int nr_highmem)
  {
64a473cb7   Rafael J. Wysocki   PM/Hibernate: Do ...
1398
  	unsigned int free_highmem = count_free_highmem_pages() + alloc_highmem;
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
  
  	if (free_highmem >= nr_highmem)
  		nr_highmem = 0;
  	else
  		nr_highmem -= free_highmem;
  
  	return nr_highmem;
  }
  #else
  static unsigned int
  count_pages_for_highmem(unsigned int nr_highmem) { return 0; }
  #endif /* CONFIG_HIGHMEM */
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
1411
1412
  
  /**
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1413
1414
   *	enough_free_mem - Make sure we have enough free memory for the
   *	snapshot image.
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
1415
   */
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1416
  static int enough_free_mem(unsigned int nr_pages, unsigned int nr_highmem)
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
1417
  {
e5e2fa785   Rafael J. Wysocki   [PATCH] swsusp: f...
1418
  	struct zone *zone;
64a473cb7   Rafael J. Wysocki   PM/Hibernate: Do ...
1419
  	unsigned int free = alloc_normal;
e5e2fa785   Rafael J. Wysocki   [PATCH] swsusp: f...
1420

98e73dc5d   Gerald Schaefer   PM / Hibernate / ...
1421
  	for_each_populated_zone(zone)
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1422
  		if (!is_highmem(zone))
d23ad4232   Christoph Lameter   [PATCH] Use ZVC f...
1423
  			free += zone_page_state(zone, NR_FREE_PAGES);
940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
1424

8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1425
  	nr_pages += count_pages_for_highmem(nr_highmem);
64a473cb7   Rafael J. Wysocki   PM/Hibernate: Do ...
1426
1427
1428
  	pr_debug("PM: Normal pages needed: %u + %u, available pages: %u
  ",
  		nr_pages, PAGES_FOR_IO, free);
940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
1429

64a473cb7   Rafael J. Wysocki   PM/Hibernate: Do ...
1430
  	return free > nr_pages + PAGES_FOR_IO;
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
1431
  }
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
  #ifdef CONFIG_HIGHMEM
  /**
   *	get_highmem_buffer - if there are some highmem pages in the suspend
   *	image, we may need the buffer to copy them and/or load their data.
   */
  
  static inline int get_highmem_buffer(int safe_needed)
  {
  	buffer = get_image_page(GFP_ATOMIC | __GFP_COLD, safe_needed);
  	return buffer ? 0 : -ENOMEM;
  }
  
  /**
   *	alloc_highmem_image_pages - allocate some highmem pages for the image.
   *	Try to allocate as many pages as needed, but if the number of free
   *	highmem pages is lesser than that, allocate them all.
   */
  
  static inline unsigned int
64a473cb7   Rafael J. Wysocki   PM/Hibernate: Do ...
1451
  alloc_highmem_pages(struct memory_bitmap *bm, unsigned int nr_highmem)
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
  {
  	unsigned int to_alloc = count_free_highmem_pages();
  
  	if (to_alloc > nr_highmem)
  		to_alloc = nr_highmem;
  
  	nr_highmem -= to_alloc;
  	while (to_alloc-- > 0) {
  		struct page *page;
  
  		page = alloc_image_page(__GFP_HIGHMEM);
  		memory_bm_set_bit(bm, page_to_pfn(page));
  	}
  	return nr_highmem;
  }
  #else
  static inline int get_highmem_buffer(int safe_needed) { return 0; }
  
  static inline unsigned int
64a473cb7   Rafael J. Wysocki   PM/Hibernate: Do ...
1471
  alloc_highmem_pages(struct memory_bitmap *bm, unsigned int n) { return 0; }
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
  #endif /* CONFIG_HIGHMEM */
  
  /**
   *	swsusp_alloc - allocate memory for the suspend image
   *
   *	We first try to allocate as many highmem pages as there are
   *	saveable highmem pages in the system.  If that fails, we allocate
   *	non-highmem pages for the copies of the remaining highmem ones.
   *
   *	In this approach it is likely that the copies of highmem pages will
   *	also be located in the high memory, because of the way in which
   *	copy_data_pages() works.
   */
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
1485
1486
  static int
  swsusp_alloc(struct memory_bitmap *orig_bm, struct memory_bitmap *copy_bm,
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1487
  		unsigned int nr_pages, unsigned int nr_highmem)
054bd4c18   Rafael J. Wysocki   [PATCH] swsusp: r...
1488
  {
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1489
  	if (nr_highmem > 0) {
2e725a065   Stanislaw Gruszka   PM / Hibernate: R...
1490
  		if (get_highmem_buffer(PG_ANY))
64a473cb7   Rafael J. Wysocki   PM/Hibernate: Do ...
1491
1492
1493
1494
1495
  			goto err_out;
  		if (nr_highmem > alloc_highmem) {
  			nr_highmem -= alloc_highmem;
  			nr_pages += alloc_highmem_pages(copy_bm, nr_highmem);
  		}
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1496
  	}
64a473cb7   Rafael J. Wysocki   PM/Hibernate: Do ...
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
  	if (nr_pages > alloc_normal) {
  		nr_pages -= alloc_normal;
  		while (nr_pages-- > 0) {
  			struct page *page;
  
  			page = alloc_image_page(GFP_ATOMIC | __GFP_COLD);
  			if (!page)
  				goto err_out;
  			memory_bm_set_bit(copy_bm, page_to_pfn(page));
  		}
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
1507
  	}
64a473cb7   Rafael J. Wysocki   PM/Hibernate: Do ...
1508

b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
1509
  	return 0;
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
1510

64a473cb7   Rafael J. Wysocki   PM/Hibernate: Do ...
1511
   err_out:
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
1512
  	swsusp_free();
2e725a065   Stanislaw Gruszka   PM / Hibernate: R...
1513
  	return -ENOMEM;
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
1514
  }
2e32a43ef   Rafael J. Wysocki   [PATCH] swsusp: g...
1515
  asmlinkage int swsusp_save(void)
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
1516
  {
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1517
  	unsigned int nr_pages, nr_highmem;
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
1518

07c3bb579   Frans Pop   PM / Hibernate: R...
1519
1520
  	printk(KERN_INFO "PM: Creating hibernation image:
  ");
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
1521

9f8f21725   Christoph Lameter   Page allocator: c...
1522
  	drain_local_pages(NULL);
a0f496517   Rafael J. Wysocki   [PATCH] swsusp: r...
1523
  	nr_pages = count_data_pages();
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1524
  	nr_highmem = count_highmem_pages();
23976728a   Rafael J. Wysocki   Hibernation: Upda...
1525
1526
  	printk(KERN_INFO "PM: Need to copy %u pages
  ", nr_pages + nr_highmem);
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
1527

8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1528
  	if (!enough_free_mem(nr_pages, nr_highmem)) {
23976728a   Rafael J. Wysocki   Hibernation: Upda...
1529
1530
  		printk(KERN_ERR "PM: Not enough free memory
  ");
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
1531
1532
  		return -ENOMEM;
  	}
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1533
  	if (swsusp_alloc(&orig_bm, &copy_bm, nr_pages, nr_highmem)) {
23976728a   Rafael J. Wysocki   Hibernation: Upda...
1534
1535
  		printk(KERN_ERR "PM: Memory allocation failed
  ");
a0f496517   Rafael J. Wysocki   [PATCH] swsusp: r...
1536
  		return -ENOMEM;
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1537
  	}
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
1538
1539
1540
1541
  
  	/* During allocating of suspend pagedir, new cold pages may appear.
  	 * Kill them.
  	 */
9f8f21725   Christoph Lameter   Page allocator: c...
1542
  	drain_local_pages(NULL);
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
1543
  	copy_data_pages(&copy_bm, &orig_bm);
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
1544
1545
1546
1547
1548
1549
  
  	/*
  	 * End of critical section. From now on, we can write to memory,
  	 * but we should not touch disk. This specially means we must _not_
  	 * touch swap space! Except we must write out our image of course.
  	 */
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1550
  	nr_pages += nr_highmem;
a0f496517   Rafael J. Wysocki   [PATCH] swsusp: r...
1551
  	nr_copy_pages = nr_pages;
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1552
  	nr_meta_pages = DIV_ROUND_UP(nr_pages * sizeof(long), PAGE_SIZE);
a0f496517   Rafael J. Wysocki   [PATCH] swsusp: r...
1553

23976728a   Rafael J. Wysocki   Hibernation: Upda...
1554
1555
1556
  	printk(KERN_INFO "PM: Hibernation image created (%d pages copied)
  ",
  		nr_pages);
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1557

25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
1558
1559
  	return 0;
  }
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
1560

d307c4a8e   Rafael J. Wysocki   Hibernation: Arbi...
1561
1562
  #ifndef CONFIG_ARCH_HIBERNATION_HEADER
  static int init_header_complete(struct swsusp_info *info)
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
1563
  {
d307c4a8e   Rafael J. Wysocki   Hibernation: Arbi...
1564
  	memcpy(&info->uts, init_utsname(), sizeof(struct new_utsname));
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
1565
  	info->version_code = LINUX_VERSION_CODE;
d307c4a8e   Rafael J. Wysocki   Hibernation: Arbi...
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
  	return 0;
  }
  
  static char *check_image_kernel(struct swsusp_info *info)
  {
  	if (info->version_code != LINUX_VERSION_CODE)
  		return "kernel version";
  	if (strcmp(info->uts.sysname,init_utsname()->sysname))
  		return "system type";
  	if (strcmp(info->uts.release,init_utsname()->release))
  		return "kernel release";
  	if (strcmp(info->uts.version,init_utsname()->version))
  		return "version";
  	if (strcmp(info->uts.machine,init_utsname()->machine))
  		return "machine";
  	return NULL;
  }
  #endif /* CONFIG_ARCH_HIBERNATION_HEADER */
af508b34d   Rafael J. Wysocki   Hibernation: Intr...
1584
1585
1586
1587
  unsigned long snapshot_get_image_size(void)
  {
  	return nr_copy_pages + nr_meta_pages + 1;
  }
d307c4a8e   Rafael J. Wysocki   Hibernation: Arbi...
1588
1589
1590
  static int init_header(struct swsusp_info *info)
  {
  	memset(info, 0, sizeof(struct swsusp_info));
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
1591
  	info->num_physpages = num_physpages;
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
1592
  	info->image_pages = nr_copy_pages;
af508b34d   Rafael J. Wysocki   Hibernation: Intr...
1593
  	info->pages = snapshot_get_image_size();
6e1819d61   Rafael J. Wysocki   [PATCH] swsusp: u...
1594
1595
  	info->size = info->pages;
  	info->size <<= PAGE_SHIFT;
d307c4a8e   Rafael J. Wysocki   Hibernation: Arbi...
1596
  	return init_header_complete(info);
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
1597
1598
1599
  }
  
  /**
940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
1600
1601
   *	pack_pfns - pfns corresponding to the set bits found in the bitmap @bm
   *	are stored in the array @buf[] (1 page at a time)
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
1602
   */
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
1603
  static inline void
940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
1604
  pack_pfns(unsigned long *buf, struct memory_bitmap *bm)
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
1605
1606
  {
  	int j;
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
1607
  	for (j = 0; j < PAGE_SIZE / sizeof(long); j++) {
940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
1608
1609
  		buf[j] = memory_bm_next_pfn(bm);
  		if (unlikely(buf[j] == BM_END_OF_MAP))
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
1610
  			break;
85055dd80   Martin Schwidefsky   PM / Hibernate: I...
1611
1612
  		/* Save page key for data page (s390 only). */
  		page_key_read(buf + j);
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
1613
  	}
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
1614
1615
1616
1617
1618
1619
1620
1621
1622
  }
  
  /**
   *	snapshot_read_next - used for reading the system memory snapshot.
   *
   *	On the first call to it @handle should point to a zeroed
   *	snapshot_handle structure.  The structure gets updated and a pointer
   *	to it should be passed to this function every next time.
   *
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
1623
1624
   *	On success the function returns a positive number.  Then, the caller
   *	is allowed to read up to the returned number of bytes from the memory
d3c1b24c5   Jiri Slaby   PM / Hibernate: S...
1625
   *	location computed by the data_of() macro.
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
1626
1627
1628
1629
1630
1631
   *
   *	The function returns 0 to indicate the end of data stream condition,
   *	and a negative number is returned on error.  In such cases the
   *	structure pointed to by @handle is not updated and should not be used
   *	any more.
   */
d3c1b24c5   Jiri Slaby   PM / Hibernate: S...
1632
  int snapshot_read_next(struct snapshot_handle *handle)
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
1633
  {
fb13a28b0   Rafael J. Wysocki   [PATCH] swsusp: s...
1634
  	if (handle->cur > nr_meta_pages + nr_copy_pages)
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
1635
  		return 0;
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
1636

f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
1637
1638
  	if (!buffer) {
  		/* This makes the buffer be freed by swsusp_free() */
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1639
  		buffer = get_image_page(GFP_ATOMIC, PG_ANY);
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
1640
1641
1642
  		if (!buffer)
  			return -ENOMEM;
  	}
d3c1b24c5   Jiri Slaby   PM / Hibernate: S...
1643
  	if (!handle->cur) {
d307c4a8e   Rafael J. Wysocki   Hibernation: Arbi...
1644
1645
1646
1647
1648
  		int error;
  
  		error = init_header((struct swsusp_info *)buffer);
  		if (error)
  			return error;
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
1649
  		handle->buffer = buffer;
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
1650
1651
  		memory_bm_position_reset(&orig_bm);
  		memory_bm_position_reset(&copy_bm);
d3c1b24c5   Jiri Slaby   PM / Hibernate: S...
1652
  	} else if (handle->cur <= nr_meta_pages) {
3ecb01df3   Jan Beulich   use clear_page()/...
1653
  		clear_page(buffer);
d3c1b24c5   Jiri Slaby   PM / Hibernate: S...
1654
1655
1656
  		pack_pfns(buffer, &orig_bm);
  	} else {
  		struct page *page;
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
1657

d3c1b24c5   Jiri Slaby   PM / Hibernate: S...
1658
1659
1660
1661
1662
1663
1664
  		page = pfn_to_page(memory_bm_next_pfn(&copy_bm));
  		if (PageHighMem(page)) {
  			/* Highmem pages are copied to the buffer,
  			 * because we can't return with a kmapped
  			 * highmem page (we may not be called again).
  			 */
  			void *kaddr;
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1665

d3c1b24c5   Jiri Slaby   PM / Hibernate: S...
1666
  			kaddr = kmap_atomic(page, KM_USER0);
3ecb01df3   Jan Beulich   use clear_page()/...
1667
  			copy_page(buffer, kaddr);
d3c1b24c5   Jiri Slaby   PM / Hibernate: S...
1668
1669
1670
1671
  			kunmap_atomic(kaddr, KM_USER0);
  			handle->buffer = buffer;
  		} else {
  			handle->buffer = page_address(page);
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
1672
  		}
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
1673
  	}
d3c1b24c5   Jiri Slaby   PM / Hibernate: S...
1674
1675
  	handle->cur++;
  	return PAGE_SIZE;
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
1676
1677
1678
1679
1680
1681
1682
  }
  
  /**
   *	mark_unsafe_pages - mark the pages that cannot be used for storing
   *	the image during resume, because they conflict with the pages that
   *	had been used before suspend
   */
940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
1683
  static int mark_unsafe_pages(struct memory_bitmap *bm)
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
1684
1685
  {
  	struct zone *zone;
ae83c5eef   Rafael J. Wysocki   [PATCH] swsusp: c...
1686
  	unsigned long pfn, max_zone_pfn;
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
1687
1688
  
  	/* Clear page flags */
98e73dc5d   Gerald Schaefer   PM / Hibernate / ...
1689
  	for_each_populated_zone(zone) {
ae83c5eef   Rafael J. Wysocki   [PATCH] swsusp: c...
1690
1691
1692
  		max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
  		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
  			if (pfn_valid(pfn))
7be982349   Rafael J. Wysocki   swsusp: use inlin...
1693
  				swsusp_unset_page_free(pfn_to_page(pfn));
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
1694
  	}
940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
1695
1696
1697
1698
1699
1700
  	/* Mark pages that correspond to the "original" pfns as "unsafe" */
  	memory_bm_position_reset(bm);
  	do {
  		pfn = memory_bm_next_pfn(bm);
  		if (likely(pfn != BM_END_OF_MAP)) {
  			if (likely(pfn_valid(pfn)))
7be982349   Rafael J. Wysocki   swsusp: use inlin...
1701
  				swsusp_set_page_free(pfn_to_page(pfn));
940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
1702
1703
1704
1705
  			else
  				return -EFAULT;
  		}
  	} while (pfn != BM_END_OF_MAP);
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
1706

940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
1707
  	allocated_unsafe_pages = 0;
968808b89   Rafael J. Wysocki   [PATCH] swsusp: u...
1708

f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
1709
1710
  	return 0;
  }
940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
1711
1712
  static void
  duplicate_memory_bitmap(struct memory_bitmap *dst, struct memory_bitmap *src)
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
1713
  {
940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
1714
1715
1716
1717
1718
1719
1720
  	unsigned long pfn;
  
  	memory_bm_position_reset(src);
  	pfn = memory_bm_next_pfn(src);
  	while (pfn != BM_END_OF_MAP) {
  		memory_bm_set_bit(dst, pfn);
  		pfn = memory_bm_next_pfn(src);
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
1721
1722
  	}
  }
d307c4a8e   Rafael J. Wysocki   Hibernation: Arbi...
1723
  static int check_header(struct swsusp_info *info)
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
1724
  {
d307c4a8e   Rafael J. Wysocki   Hibernation: Arbi...
1725
  	char *reason;
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
1726

d307c4a8e   Rafael J. Wysocki   Hibernation: Arbi...
1727
1728
  	reason = check_image_kernel(info);
  	if (!reason && info->num_physpages != num_physpages)
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
1729
  		reason = "memory size";
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
1730
  	if (reason) {
23976728a   Rafael J. Wysocki   Hibernation: Upda...
1731
1732
  		printk(KERN_ERR "PM: Image mismatch: %s
  ", reason);
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
1733
1734
1735
1736
1737
1738
1739
1740
  		return -EPERM;
  	}
  	return 0;
  }
  
  /**
   *	load header - check the image header and copy data from it
   */
940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
1741
1742
  static int
  load_header(struct swsusp_info *info)
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
1743
1744
  {
  	int error;
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
1745

940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
1746
  	restore_pblist = NULL;
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
1747
1748
  	error = check_header(info);
  	if (!error) {
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
1749
1750
1751
1752
1753
1754
1755
  		nr_copy_pages = info->image_pages;
  		nr_meta_pages = info->pages - info->image_pages - 1;
  	}
  	return error;
  }
  
  /**
940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
1756
1757
   *	unpack_orig_pfns - for each element of @buf[] (1 page at a time) set
   *	the corresponding bit in the memory bitmap @bm
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
1758
   */
69643279a   Rafael J. Wysocki   Hibernate: Do not...
1759
  static int unpack_orig_pfns(unsigned long *buf, struct memory_bitmap *bm)
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
1760
1761
  {
  	int j;
940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
1762
1763
1764
  	for (j = 0; j < PAGE_SIZE / sizeof(long); j++) {
  		if (unlikely(buf[j] == BM_END_OF_MAP))
  			break;
85055dd80   Martin Schwidefsky   PM / Hibernate: I...
1765
1766
  		/* Extract and buffer page key for data page (s390 only). */
  		page_key_memorize(buf + j);
69643279a   Rafael J. Wysocki   Hibernate: Do not...
1767
1768
1769
1770
  		if (memory_bm_pfn_present(bm, buf[j]))
  			memory_bm_set_bit(bm, buf[j]);
  		else
  			return -EFAULT;
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
1771
  	}
69643279a   Rafael J. Wysocki   Hibernate: Do not...
1772
1773
  
  	return 0;
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
1774
  }
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
  /* List of "safe" pages that may be used to store data loaded from the suspend
   * image
   */
  static struct linked_page *safe_pages_list;
  
  #ifdef CONFIG_HIGHMEM
  /* struct highmem_pbe is used for creating the list of highmem pages that
   * should be restored atomically during the resume from disk, because the page
   * frames they have occupied before the suspend are in use.
   */
  struct highmem_pbe {
  	struct page *copy_page;	/* data is here now */
  	struct page *orig_page;	/* data was here before the suspend */
  	struct highmem_pbe *next;
  };
  
  /* List of highmem PBEs needed for restoring the highmem pages that were
   * allocated before the suspend and included in the suspend image, but have
   * also been allocated by the "resume" kernel, so their contents cannot be
   * written directly to their "original" page frames.
   */
  static struct highmem_pbe *highmem_pblist;
  
  /**
   *	count_highmem_image_pages - compute the number of highmem pages in the
   *	suspend image.  The bits in the memory bitmap @bm that correspond to the
   *	image pages are assumed to be set.
   */
  
  static unsigned int count_highmem_image_pages(struct memory_bitmap *bm)
  {
  	unsigned long pfn;
  	unsigned int cnt = 0;
  
  	memory_bm_position_reset(bm);
  	pfn = memory_bm_next_pfn(bm);
  	while (pfn != BM_END_OF_MAP) {
  		if (PageHighMem(pfn_to_page(pfn)))
  			cnt++;
  
  		pfn = memory_bm_next_pfn(bm);
  	}
  	return cnt;
  }
  
  /**
   *	prepare_highmem_image - try to allocate as many highmem pages as
   *	there are highmem image pages (@nr_highmem_p points to the variable
   *	containing the number of highmem image pages).  The pages that are
   *	"safe" (ie. will not be overwritten when the suspend image is
   *	restored) have the corresponding bits set in @bm (it must be
   *	unitialized).
   *
   *	NOTE: This function should not be called if there are no highmem
   *	image pages.
   */
  
  static unsigned int safe_highmem_pages;
  
  static struct memory_bitmap *safe_highmem_bm;
  
  static int
  prepare_highmem_image(struct memory_bitmap *bm, unsigned int *nr_highmem_p)
  {
  	unsigned int to_alloc;
  
  	if (memory_bm_create(bm, GFP_ATOMIC, PG_SAFE))
  		return -ENOMEM;
  
  	if (get_highmem_buffer(PG_SAFE))
  		return -ENOMEM;
  
  	to_alloc = count_free_highmem_pages();
  	if (to_alloc > *nr_highmem_p)
  		to_alloc = *nr_highmem_p;
  	else
  		*nr_highmem_p = to_alloc;
  
  	safe_highmem_pages = 0;
  	while (to_alloc-- > 0) {
  		struct page *page;
  
  		page = alloc_page(__GFP_HIGHMEM);
7be982349   Rafael J. Wysocki   swsusp: use inlin...
1858
  		if (!swsusp_page_is_free(page)) {
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1859
1860
1861
1862
1863
  			/* The page is "safe", set its bit the bitmap */
  			memory_bm_set_bit(bm, page_to_pfn(page));
  			safe_highmem_pages++;
  		}
  		/* Mark the page as allocated */
7be982349   Rafael J. Wysocki   swsusp: use inlin...
1864
1865
  		swsusp_set_page_forbidden(page);
  		swsusp_set_page_free(page);
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
  	}
  	memory_bm_position_reset(bm);
  	safe_highmem_bm = bm;
  	return 0;
  }
  
  /**
   *	get_highmem_page_buffer - for given highmem image page find the buffer
   *	that suspend_write_next() should set for its caller to write to.
   *
   *	If the page is to be saved to its "original" page frame or a copy of
   *	the page is to be made in the highmem, @buffer is returned.  Otherwise,
   *	the copy of the page is to be made in normal memory, so the address of
   *	the copy is returned.
   *
   *	If @buffer is returned, the caller of suspend_write_next() will write
   *	the page's contents to @buffer, so they will have to be copied to the
   *	right location on the next call to suspend_write_next() and it is done
   *	with the help of copy_last_highmem_page().  For this purpose, if
   *	@buffer is returned, @last_highmem page is set to the page to which
   *	the data will have to be copied from @buffer.
   */
  
  static struct page *last_highmem_page;
  
  static void *
  get_highmem_page_buffer(struct page *page, struct chain_allocator *ca)
  {
  	struct highmem_pbe *pbe;
  	void *kaddr;
7be982349   Rafael J. Wysocki   swsusp: use inlin...
1896
  	if (swsusp_page_is_forbidden(page) && swsusp_page_is_free(page)) {
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
  		/* We have allocated the "original" page frame and we can
  		 * use it directly to store the loaded page.
  		 */
  		last_highmem_page = page;
  		return buffer;
  	}
  	/* The "original" page frame has not been allocated and we have to
  	 * use a "safe" page frame to store the loaded page.
  	 */
  	pbe = chain_alloc(ca, sizeof(struct highmem_pbe));
  	if (!pbe) {
  		swsusp_free();
69643279a   Rafael J. Wysocki   Hibernate: Do not...
1909
  		return ERR_PTR(-ENOMEM);
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
  	}
  	pbe->orig_page = page;
  	if (safe_highmem_pages > 0) {
  		struct page *tmp;
  
  		/* Copy of the page will be stored in high memory */
  		kaddr = buffer;
  		tmp = pfn_to_page(memory_bm_next_pfn(safe_highmem_bm));
  		safe_highmem_pages--;
  		last_highmem_page = tmp;
  		pbe->copy_page = tmp;
  	} else {
  		/* Copy of the page will be stored in normal memory */
  		kaddr = safe_pages_list;
  		safe_pages_list = safe_pages_list->next;
  		pbe->copy_page = virt_to_page(kaddr);
  	}
  	pbe->next = highmem_pblist;
  	highmem_pblist = pbe;
  	return kaddr;
  }
  
  /**
   *	copy_last_highmem_page - copy the contents of a highmem image from
   *	@buffer, where the caller of snapshot_write_next() has place them,
   *	to the right location represented by @last_highmem_page .
   */
  
  static void copy_last_highmem_page(void)
  {
  	if (last_highmem_page) {
  		void *dst;
  
  		dst = kmap_atomic(last_highmem_page, KM_USER0);
3ecb01df3   Jan Beulich   use clear_page()/...
1944
  		copy_page(dst, buffer);
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
  		kunmap_atomic(dst, KM_USER0);
  		last_highmem_page = NULL;
  	}
  }
  
  static inline int last_highmem_page_copied(void)
  {
  	return !last_highmem_page;
  }
  
  static inline void free_highmem_data(void)
  {
  	if (safe_highmem_bm)
  		memory_bm_free(safe_highmem_bm, PG_UNSAFE_CLEAR);
  
  	if (buffer)
  		free_image_page(buffer, PG_UNSAFE_CLEAR);
  }
  #else
  static inline int get_safe_write_buffer(void) { return 0; }
  
  static unsigned int
  count_highmem_image_pages(struct memory_bitmap *bm) { return 0; }
  
  static inline int
  prepare_highmem_image(struct memory_bitmap *bm, unsigned int *nr_highmem_p)
  {
  	return 0;
  }
  
  static inline void *
  get_highmem_page_buffer(struct page *page, struct chain_allocator *ca)
  {
69643279a   Rafael J. Wysocki   Hibernate: Do not...
1978
  	return ERR_PTR(-EINVAL);
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1979
1980
1981
1982
1983
1984
  }
  
  static inline void copy_last_highmem_page(void) {}
  static inline int last_highmem_page_copied(void) { return 1; }
  static inline void free_highmem_data(void) {}
  #endif /* CONFIG_HIGHMEM */
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
1985
  /**
940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
1986
1987
1988
1989
   *	prepare_image - use the memory bitmap @bm to mark the pages that will
   *	be overwritten in the process of restoring the system memory state
   *	from the suspend image ("unsafe" pages) and allocate memory for the
   *	image.
968808b89   Rafael J. Wysocki   [PATCH] swsusp: u...
1990
   *
940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
1991
1992
1993
   *	The idea is to allocate a new memory bitmap first and then allocate
   *	as many pages as needed for the image data, but not to assign these
   *	pages to specific tasks initially.  Instead, we just mark them as
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1994
1995
1996
   *	allocated and create a lists of "safe" pages that will be used
   *	later.  On systems with high memory a list of "safe" highmem pages is
   *	also created.
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
1997
   */
940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
1998
  #define PBES_PER_LINKED_PAGE	(LINKED_PAGE_DATA_SIZE / sizeof(struct pbe))
940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
1999
2000
  static int
  prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm)
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
2001
  {
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
2002
  	unsigned int nr_pages, nr_highmem;
940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
2003
2004
  	struct linked_page *sp_list, *lp;
  	int error;
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
2005

8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
2006
2007
2008
2009
2010
  	/* If there is no highmem, the buffer will not be necessary */
  	free_image_page(buffer, PG_UNSAFE_CLEAR);
  	buffer = NULL;
  
  	nr_highmem = count_highmem_image_pages(bm);
940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
  	error = mark_unsafe_pages(bm);
  	if (error)
  		goto Free;
  
  	error = memory_bm_create(new_bm, GFP_ATOMIC, PG_SAFE);
  	if (error)
  		goto Free;
  
  	duplicate_memory_bitmap(new_bm, bm);
  	memory_bm_free(bm, PG_UNSAFE_KEEP);
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
2021
2022
2023
2024
2025
  	if (nr_highmem > 0) {
  		error = prepare_highmem_image(bm, &nr_highmem);
  		if (error)
  			goto Free;
  	}
940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
2026
2027
2028
2029
2030
2031
2032
2033
  	/* Reserve some safe pages for potential later use.
  	 *
  	 * NOTE: This way we make sure there will be enough safe pages for the
  	 * chain_alloc() in get_buffer().  It is a bit wasteful, but
  	 * nr_copy_pages cannot be greater than 50% of the memory anyway.
  	 */
  	sp_list = NULL;
  	/* nr_copy_pages cannot be lesser than allocated_unsafe_pages */
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
2034
  	nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages;
940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
2035
2036
  	nr_pages = DIV_ROUND_UP(nr_pages, PBES_PER_LINKED_PAGE);
  	while (nr_pages > 0) {
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
2037
  		lp = get_image_page(GFP_ATOMIC, PG_SAFE);
940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
2038
  		if (!lp) {
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
2039
  			error = -ENOMEM;
940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
2040
2041
2042
2043
2044
  			goto Free;
  		}
  		lp->next = sp_list;
  		sp_list = lp;
  		nr_pages--;
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
2045
  	}
940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
2046
2047
  	/* Preallocate memory for the image */
  	safe_pages_list = NULL;
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
2048
  	nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages;
940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
2049
2050
2051
2052
2053
2054
  	while (nr_pages > 0) {
  		lp = (struct linked_page *)get_zeroed_page(GFP_ATOMIC);
  		if (!lp) {
  			error = -ENOMEM;
  			goto Free;
  		}
7be982349   Rafael J. Wysocki   swsusp: use inlin...
2055
  		if (!swsusp_page_is_free(virt_to_page(lp))) {
940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
2056
2057
2058
  			/* The page is "safe", add it to the list */
  			lp->next = safe_pages_list;
  			safe_pages_list = lp;
968808b89   Rafael J. Wysocki   [PATCH] swsusp: u...
2059
  		}
940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
2060
  		/* Mark the page as allocated */
7be982349   Rafael J. Wysocki   swsusp: use inlin...
2061
2062
  		swsusp_set_page_forbidden(virt_to_page(lp));
  		swsusp_set_page_free(virt_to_page(lp));
940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
2063
  		nr_pages--;
968808b89   Rafael J. Wysocki   [PATCH] swsusp: u...
2064
  	}
940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
2065
2066
2067
2068
2069
  	/* Free the reserved safe pages so that chain_alloc() can use them */
  	while (sp_list) {
  		lp = sp_list->next;
  		free_image_page(sp_list, PG_UNSAFE_CLEAR);
  		sp_list = lp;
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
2070
  	}
940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
2071
  	return 0;
59a493350   Rafael J. Wysocki   [PATCH] swsusp: F...
2072
   Free:
940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
2073
  	swsusp_free();
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
2074
2075
  	return error;
  }
940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
2076
2077
2078
2079
2080
2081
  /**
   *	get_buffer - compute the address that snapshot_write_next() should
   *	set for its caller to write to.
   */
  
  static void *get_buffer(struct memory_bitmap *bm, struct chain_allocator *ca)
968808b89   Rafael J. Wysocki   [PATCH] swsusp: u...
2082
  {
940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
2083
  	struct pbe *pbe;
69643279a   Rafael J. Wysocki   Hibernate: Do not...
2084
2085
  	struct page *page;
  	unsigned long pfn = memory_bm_next_pfn(bm);
968808b89   Rafael J. Wysocki   [PATCH] swsusp: u...
2086

69643279a   Rafael J. Wysocki   Hibernate: Do not...
2087
2088
2089
2090
  	if (pfn == BM_END_OF_MAP)
  		return ERR_PTR(-EFAULT);
  
  	page = pfn_to_page(pfn);
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
2091
2092
  	if (PageHighMem(page))
  		return get_highmem_page_buffer(page, ca);
7be982349   Rafael J. Wysocki   swsusp: use inlin...
2093
  	if (swsusp_page_is_forbidden(page) && swsusp_page_is_free(page))
940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
2094
2095
  		/* We have allocated the "original" page frame and we can
  		 * use it directly to store the loaded page.
968808b89   Rafael J. Wysocki   [PATCH] swsusp: u...
2096
  		 */
940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
2097
2098
2099
2100
  		return page_address(page);
  
  	/* The "original" page frame has not been allocated and we have to
  	 * use a "safe" page frame to store the loaded page.
968808b89   Rafael J. Wysocki   [PATCH] swsusp: u...
2101
  	 */
940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
2102
2103
2104
  	pbe = chain_alloc(ca, sizeof(struct pbe));
  	if (!pbe) {
  		swsusp_free();
69643279a   Rafael J. Wysocki   Hibernate: Do not...
2105
  		return ERR_PTR(-ENOMEM);
940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
2106
  	}
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
2107
2108
  	pbe->orig_address = page_address(page);
  	pbe->address = safe_pages_list;
940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
2109
2110
2111
  	safe_pages_list = safe_pages_list->next;
  	pbe->next = restore_pblist;
  	restore_pblist = pbe;
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
2112
  	return pbe->address;
968808b89   Rafael J. Wysocki   [PATCH] swsusp: u...
2113
  }
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
2114
2115
2116
2117
2118
2119
2120
  /**
   *	snapshot_write_next - used for writing the system memory snapshot.
   *
   *	On the first call to it @handle should point to a zeroed
   *	snapshot_handle structure.  The structure gets updated and a pointer
   *	to it should be passed to this function every next time.
   *
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
2121
2122
   *	On success the function returns a positive number.  Then, the caller
   *	is allowed to write up to the returned number of bytes to the memory
d3c1b24c5   Jiri Slaby   PM / Hibernate: S...
2123
   *	location computed by the data_of() macro.
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
2124
2125
2126
2127
2128
2129
   *
   *	The function returns 0 to indicate the "end of file" condition,
   *	and a negative number is returned on error.  In such cases the
   *	structure pointed to by @handle is not updated and should not be used
   *	any more.
   */
d3c1b24c5   Jiri Slaby   PM / Hibernate: S...
2130
  int snapshot_write_next(struct snapshot_handle *handle)
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
2131
  {
940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
2132
  	static struct chain_allocator ca;
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
2133
  	int error = 0;
940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
2134
  	/* Check if we have already loaded the entire image */
d3c1b24c5   Jiri Slaby   PM / Hibernate: S...
2135
  	if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages)
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
2136
  		return 0;
940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
2137

d3c1b24c5   Jiri Slaby   PM / Hibernate: S...
2138
2139
2140
  	handle->sync_read = 1;
  
  	if (!handle->cur) {
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
2141
2142
2143
  		if (!buffer)
  			/* This makes the buffer be freed by swsusp_free() */
  			buffer = get_image_page(GFP_ATOMIC, PG_ANY);
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
2144
2145
  		if (!buffer)
  			return -ENOMEM;
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
2146

f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
2147
  		handle->buffer = buffer;
d3c1b24c5   Jiri Slaby   PM / Hibernate: S...
2148
2149
2150
2151
  	} else if (handle->cur == 1) {
  		error = load_header(buffer);
  		if (error)
  			return error;
940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
2152

d3c1b24c5   Jiri Slaby   PM / Hibernate: S...
2153
2154
2155
  		error = memory_bm_create(&copy_bm, GFP_ATOMIC, PG_ANY);
  		if (error)
  			return error;
85055dd80   Martin Schwidefsky   PM / Hibernate: I...
2156
2157
2158
2159
  		/* Allocate buffer for page keys. */
  		error = page_key_alloc(nr_copy_pages);
  		if (error)
  			return error;
d3c1b24c5   Jiri Slaby   PM / Hibernate: S...
2160
2161
2162
2163
  	} else if (handle->cur <= nr_meta_pages + 1) {
  		error = unpack_orig_pfns(buffer, &copy_bm);
  		if (error)
  			return error;
940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
2164

d3c1b24c5   Jiri Slaby   PM / Hibernate: S...
2165
2166
  		if (handle->cur == nr_meta_pages + 1) {
  			error = prepare_image(&orig_bm, &copy_bm);
69643279a   Rafael J. Wysocki   Hibernate: Do not...
2167
2168
  			if (error)
  				return error;
d3c1b24c5   Jiri Slaby   PM / Hibernate: S...
2169
2170
2171
  			chain_init(&ca, GFP_ATOMIC, PG_SAFE);
  			memory_bm_position_reset(&orig_bm);
  			restore_pblist = NULL;
940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
2172
  			handle->buffer = get_buffer(&orig_bm, &ca);
d3c1b24c5   Jiri Slaby   PM / Hibernate: S...
2173
  			handle->sync_read = 0;
69643279a   Rafael J. Wysocki   Hibernate: Do not...
2174
2175
  			if (IS_ERR(handle->buffer))
  				return PTR_ERR(handle->buffer);
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
2176
  		}
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
2177
  	} else {
d3c1b24c5   Jiri Slaby   PM / Hibernate: S...
2178
  		copy_last_highmem_page();
85055dd80   Martin Schwidefsky   PM / Hibernate: I...
2179
2180
  		/* Restore page key for data page (s390 only). */
  		page_key_write(handle->buffer);
d3c1b24c5   Jiri Slaby   PM / Hibernate: S...
2181
2182
2183
2184
2185
  		handle->buffer = get_buffer(&orig_bm, &ca);
  		if (IS_ERR(handle->buffer))
  			return PTR_ERR(handle->buffer);
  		if (handle->buffer != buffer)
  			handle->sync_read = 0;
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
2186
  	}
d3c1b24c5   Jiri Slaby   PM / Hibernate: S...
2187
2188
  	handle->cur++;
  	return PAGE_SIZE;
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
2189
  }
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200
  /**
   *	snapshot_write_finalize - must be called after the last call to
   *	snapshot_write_next() in case the last page in the image happens
   *	to be a highmem page and its contents should be stored in the
   *	highmem.  Additionally, it releases the memory that will not be
   *	used any more.
   */
  
  void snapshot_write_finalize(struct snapshot_handle *handle)
  {
  	copy_last_highmem_page();
85055dd80   Martin Schwidefsky   PM / Hibernate: I...
2201
2202
2203
  	/* Restore page key for data page (s390 only). */
  	page_key_write(handle->buffer);
  	page_key_free();
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
2204
  	/* Free only if we have loaded the image entirely */
d3c1b24c5   Jiri Slaby   PM / Hibernate: S...
2205
  	if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages) {
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
2206
2207
2208
2209
  		memory_bm_free(&orig_bm, PG_UNSAFE_CLEAR);
  		free_highmem_data();
  	}
  }
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
2210
2211
  int snapshot_image_loaded(struct snapshot_handle *handle)
  {
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
2212
  	return !(!nr_copy_pages || !last_highmem_page_copied() ||
940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
2213
2214
  			handle->cur <= nr_meta_pages + nr_copy_pages);
  }
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
2215
2216
2217
2218
  #ifdef CONFIG_HIGHMEM
  /* Assumes that @buf is ready and points to a "safe" page */
  static inline void
  swap_two_pages_data(struct page *p1, struct page *p2, void *buf)
940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
2219
  {
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
2220
2221
2222
2223
  	void *kaddr1, *kaddr2;
  
  	kaddr1 = kmap_atomic(p1, KM_USER0);
  	kaddr2 = kmap_atomic(p2, KM_USER1);
3ecb01df3   Jan Beulich   use clear_page()/...
2224
2225
2226
  	copy_page(buf, kaddr1);
  	copy_page(kaddr1, kaddr2);
  	copy_page(kaddr2, buf);
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
2227
  	kunmap_atomic(kaddr2, KM_USER1);
61ecdb801   Peter Zijlstra   mm: strictly nest...
2228
  	kunmap_atomic(kaddr1, KM_USER0);
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
2229
2230
2231
2232
2233
2234
2235
2236
2237
2238
2239
2240
2241
2242
2243
2244
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258
  }
  
  /**
   *	restore_highmem - for each highmem page that was allocated before
   *	the suspend and included in the suspend image, and also has been
   *	allocated by the "resume" kernel swap its current (ie. "before
   *	resume") contents with the previous (ie. "before suspend") one.
   *
   *	If the resume eventually fails, we can call this function once
   *	again and restore the "before resume" highmem state.
   */
  
  int restore_highmem(void)
  {
  	struct highmem_pbe *pbe = highmem_pblist;
  	void *buf;
  
  	if (!pbe)
  		return 0;
  
  	buf = get_image_page(GFP_ATOMIC, PG_SAFE);
  	if (!buf)
  		return -ENOMEM;
  
  	while (pbe) {
  		swap_two_pages_data(pbe->copy_page, pbe->orig_page, buf);
  		pbe = pbe->next;
  	}
  	free_image_page(buf, PG_UNSAFE_CLEAR);
  	return 0;
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
2259
  }
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
2260
  #endif /* CONFIG_HIGHMEM */