Blame view

kernel/power/snapshot.c 71.6 KB
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
1
  /*
96bc7aec2   Pavel Machek   [PATCH] swsusp: r...
2
   * linux/kernel/power/snapshot.c
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
3
   *
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
4
   * This file provides system snapshot/restore functionality for swsusp.
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
5
   *
a2531293d   Pavel Machek   update email address
6
   * Copyright (C) 1998-2005 Pavel Machek <pavel@ucw.cz>
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
7
   * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
8
   *
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
9
   * This file is released under the GPLv2.
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
10
11
   *
   */
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
12
  #include <linux/version.h>
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
13
14
15
  #include <linux/module.h>
  #include <linux/mm.h>
  #include <linux/suspend.h>
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
16
  #include <linux/delay.h>
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
17
  #include <linux/bitops.h>
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
18
  #include <linux/spinlock.h>
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
19
  #include <linux/kernel.h>
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
20
21
  #include <linux/pm.h>
  #include <linux/device.h>
74dfd666d   Rafael J. Wysocki   swsusp: do not us...
22
  #include <linux/init.h>
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
23
24
25
26
  #include <linux/bootmem.h>
  #include <linux/syscalls.h>
  #include <linux/console.h>
  #include <linux/highmem.h>
846705deb   Rafael J. Wysocki   Hibernate: Take o...
27
  #include <linux/list.h>
5a0e3ad6a   Tejun Heo   include cleanup: ...
28
  #include <linux/slab.h>
52f5684c8   Gideon Israel Dsouza   kernel: use macro...
29
  #include <linux/compiler.h>
db5976058   Tina Ruchandani   PM / Hibernate: M...
30
  #include <linux/ktime.h>
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
31
32
33
34
35
36
  
  #include <asm/uaccess.h>
  #include <asm/mmu_context.h>
  #include <asm/pgtable.h>
  #include <asm/tlbflush.h>
  #include <asm/io.h>
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
37
  #include "power.h"
4c0b6c10f   Rafael J. Wysocki   PM / hibernate: I...
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
  #ifdef CONFIG_DEBUG_RODATA
  static bool hibernate_restore_protection;
  static bool hibernate_restore_protection_active;
  
  void enable_restore_image_protection(void)
  {
  	hibernate_restore_protection = true;
  }
  
  static inline void hibernate_restore_protection_begin(void)
  {
  	hibernate_restore_protection_active = hibernate_restore_protection;
  }
  
  static inline void hibernate_restore_protection_end(void)
  {
  	hibernate_restore_protection_active = false;
  }
  
  static inline void hibernate_restore_protect_page(void *page_address)
  {
  	if (hibernate_restore_protection_active)
  		set_memory_ro((unsigned long)page_address, 1);
  }
  
  static inline void hibernate_restore_unprotect_page(void *page_address)
  {
  	if (hibernate_restore_protection_active)
  		set_memory_rw((unsigned long)page_address, 1);
  }
  #else
  static inline void hibernate_restore_protection_begin(void) {}
  static inline void hibernate_restore_protection_end(void) {}
  static inline void hibernate_restore_protect_page(void *page_address) {}
  static inline void hibernate_restore_unprotect_page(void *page_address) {}
  #endif /* CONFIG_DEBUG_RODATA */
74dfd666d   Rafael J. Wysocki   swsusp: do not us...
74
75
76
  static int swsusp_page_is_free(struct page *);
  static void swsusp_set_page_forbidden(struct page *);
  static void swsusp_unset_page_forbidden(struct page *);
fe419535d   Rafael J. Wysocki   PM/Hibernate: Mov...
77
  /*
ddeb64870   Rafael J. Wysocki   PM / Hibernate: A...
78
79
80
81
82
83
84
85
86
87
88
89
   * Number of bytes to reserve for memory allocations made by device drivers
   * from their ->freeze() and ->freeze_noirq() callbacks so that they don't
   * cause image creation to fail (tunable via /sys/power/reserved_size).
   */
  unsigned long reserved_size;
  
  void __init hibernate_reserved_size_init(void)
  {
  	reserved_size = SPARE_PAGES * PAGE_SIZE;
  }
  
  /*
fe419535d   Rafael J. Wysocki   PM/Hibernate: Mov...
90
   * Preferred image size in bytes (tunable via /sys/power/image_size).
1c1be3a94   Rafael J. Wysocki   Revert "PM / Hibe...
91
92
93
   * When it is set to N, swsusp will do its best to ensure the image
   * size will not exceed N bytes, but if that is impossible, it will
   * try to create the smallest image possible.
fe419535d   Rafael J. Wysocki   PM/Hibernate: Mov...
94
   */
ac5c24ec1   Rafael J. Wysocki   PM / Hibernate: M...
95
96
97
98
  unsigned long image_size;
  
  void __init hibernate_image_size_init(void)
  {
1c1be3a94   Rafael J. Wysocki   Revert "PM / Hibe...
99
  	image_size = ((totalram_pages * 2) / 5) * PAGE_SIZE;
ac5c24ec1   Rafael J. Wysocki   PM / Hibernate: M...
100
  }
fe419535d   Rafael J. Wysocki   PM/Hibernate: Mov...
101

ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
102
103
  /*
   * List of PBEs needed for restoring the pages that were allocated before
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
104
105
106
107
   * the suspend and included in the suspend image, but have also been
   * allocated by the "resume" kernel, so their contents cannot be written
   * directly to their "original" page frames.
   */
75534b50c   Rafael J. Wysocki   [PATCH] Change th...
108
  struct pbe *restore_pblist;
9c744481c   Rafael J. Wysocki   PM / hibernate: D...
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
  /* struct linked_page is used to build chains of pages */
  
  #define LINKED_PAGE_DATA_SIZE	(PAGE_SIZE - sizeof(void *))
  
  struct linked_page {
  	struct linked_page *next;
  	char data[LINKED_PAGE_DATA_SIZE];
  } __packed;
  
  /*
   * List of "safe" pages (ie. pages that were not used by the image kernel
   * before hibernation) that may be used as temporary storage for image kernel
   * memory contents.
   */
  static struct linked_page *safe_pages_list;
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
124
  /* Pointer to an auxiliary buffer (1 page) */
940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
125
  static void *buffer;
7088a5c00   Rafael J. Wysocki   [PATCH] swsusp: i...
126

0bcd888d6   Rafael J. Wysocki   [PATCH] swsusp: I...
127
128
129
130
  #define PG_ANY		0
  #define PG_SAFE		1
  #define PG_UNSAFE_CLEAR	1
  #define PG_UNSAFE_KEEP	0
940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
131
  static unsigned int allocated_unsafe_pages;
f6143aa60   Rafael J. Wysocki   [PATCH] swsusp: R...
132

ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
133
134
135
136
137
138
139
140
141
142
143
144
145
  /**
   * get_image_page - Allocate a page for a hibernation image.
   * @gfp_mask: GFP mask for the allocation.
   * @safe_needed: Get pages that were not used before hibernation (restore only)
   *
   * During image restoration, for storing the PBE list and the image data, we can
   * only use memory pages that do not conflict with the pages used before
   * hibernation.  The "unsafe" pages have PageNosaveFree set and we count them
   * using allocated_unsafe_pages.
   *
   * Each allocated image page is marked as PageNosave and PageNosaveFree so that
   * swsusp_free() can release it.
   */
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
146
  static void *get_image_page(gfp_t gfp_mask, int safe_needed)
f6143aa60   Rafael J. Wysocki   [PATCH] swsusp: R...
147
148
149
150
151
  {
  	void *res;
  
  	res = (void *)get_zeroed_page(gfp_mask);
  	if (safe_needed)
7be982349   Rafael J. Wysocki   swsusp: use inlin...
152
  		while (res && swsusp_page_is_free(virt_to_page(res))) {
f6143aa60   Rafael J. Wysocki   [PATCH] swsusp: R...
153
  			/* The page is unsafe, mark it for swsusp_free() */
7be982349   Rafael J. Wysocki   swsusp: use inlin...
154
  			swsusp_set_page_forbidden(virt_to_page(res));
940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
155
  			allocated_unsafe_pages++;
f6143aa60   Rafael J. Wysocki   [PATCH] swsusp: R...
156
157
158
  			res = (void *)get_zeroed_page(gfp_mask);
  		}
  	if (res) {
7be982349   Rafael J. Wysocki   swsusp: use inlin...
159
160
  		swsusp_set_page_forbidden(virt_to_page(res));
  		swsusp_set_page_free(virt_to_page(res));
f6143aa60   Rafael J. Wysocki   [PATCH] swsusp: R...
161
162
163
  	}
  	return res;
  }
9c744481c   Rafael J. Wysocki   PM / hibernate: D...
164
165
166
167
168
169
170
171
172
173
174
  static void *__get_safe_page(gfp_t gfp_mask)
  {
  	if (safe_pages_list) {
  		void *ret = safe_pages_list;
  
  		safe_pages_list = safe_pages_list->next;
  		memset(ret, 0, PAGE_SIZE);
  		return ret;
  	}
  	return get_image_page(gfp_mask, PG_SAFE);
  }
f6143aa60   Rafael J. Wysocki   [PATCH] swsusp: R...
175
176
  unsigned long get_safe_page(gfp_t gfp_mask)
  {
9c744481c   Rafael J. Wysocki   PM / hibernate: D...
177
  	return (unsigned long)__get_safe_page(gfp_mask);
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
178
  }
5b6d15de2   Rafael J. Wysocki   [PATCH] swsusp: F...
179
180
  static struct page *alloc_image_page(gfp_t gfp_mask)
  {
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
181
182
183
184
  	struct page *page;
  
  	page = alloc_page(gfp_mask);
  	if (page) {
7be982349   Rafael J. Wysocki   swsusp: use inlin...
185
186
  		swsusp_set_page_forbidden(page);
  		swsusp_set_page_free(page);
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
187
188
  	}
  	return page;
f6143aa60   Rafael J. Wysocki   [PATCH] swsusp: R...
189
  }
307c5971c   Rafael J. Wysocki   PM / hibernate: R...
190
191
192
193
194
195
196
  static void recycle_safe_page(void *page_address)
  {
  	struct linked_page *lp = page_address;
  
  	lp->next = safe_pages_list;
  	safe_pages_list = lp;
  }
f6143aa60   Rafael J. Wysocki   [PATCH] swsusp: R...
197
  /**
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
198
199
200
201
202
203
   * free_image_page - Free a page allocated for hibernation image.
   * @addr: Address of the page to free.
   * @clear_nosave_free: If set, clear the PageNosaveFree bit for the page.
   *
   * The page to free should have been allocated by get_image_page() (page flags
   * set by it are affected).
f6143aa60   Rafael J. Wysocki   [PATCH] swsusp: R...
204
   */
f6143aa60   Rafael J. Wysocki   [PATCH] swsusp: R...
205
206
  static inline void free_image_page(void *addr, int clear_nosave_free)
  {
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
207
208
209
210
211
  	struct page *page;
  
  	BUG_ON(!virt_addr_valid(addr));
  
  	page = virt_to_page(addr);
7be982349   Rafael J. Wysocki   swsusp: use inlin...
212
  	swsusp_unset_page_forbidden(page);
f6143aa60   Rafael J. Wysocki   [PATCH] swsusp: R...
213
  	if (clear_nosave_free)
7be982349   Rafael J. Wysocki   swsusp: use inlin...
214
  		swsusp_unset_page_free(page);
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
215
216
  
  	__free_page(page);
f6143aa60   Rafael J. Wysocki   [PATCH] swsusp: R...
217
  }
efd5a8524   Rafael J. Wysocki   PM / hibernate: C...
218
219
  static inline void free_list_of_pages(struct linked_page *list,
  				      int clear_page_nosave)
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
220
221
222
223
224
225
226
227
  {
  	while (list) {
  		struct linked_page *lp = list->next;
  
  		free_image_page(list, clear_page_nosave);
  		list = lp;
  	}
  }
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
228
229
230
231
232
233
234
235
236
237
238
239
  /*
   * struct chain_allocator is used for allocating small objects out of
   * a linked list of pages called 'the chain'.
   *
   * The chain grows each time when there is no room for a new object in
   * the current page.  The allocated objects cannot be freed individually.
   * It is only possible to free them all at once, by freeing the entire
   * chain.
   *
   * NOTE: The chain allocator may be inefficient if the allocated objects
   * are not much smaller than PAGE_SIZE.
   */
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
240
241
242
  struct chain_allocator {
  	struct linked_page *chain;	/* the chain */
  	unsigned int used_space;	/* total size of objects allocated out
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
243
  					   of the current page */
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
244
245
246
  	gfp_t gfp_mask;		/* mask for allocating pages */
  	int safe_needed;	/* if set, only "safe" pages are allocated */
  };
efd5a8524   Rafael J. Wysocki   PM / hibernate: C...
247
248
  static void chain_init(struct chain_allocator *ca, gfp_t gfp_mask,
  		       int safe_needed)
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
249
250
251
252
253
254
255
256
257
258
259
260
261
  {
  	ca->chain = NULL;
  	ca->used_space = LINKED_PAGE_DATA_SIZE;
  	ca->gfp_mask = gfp_mask;
  	ca->safe_needed = safe_needed;
  }
  
  static void *chain_alloc(struct chain_allocator *ca, unsigned int size)
  {
  	void *ret;
  
  	if (LINKED_PAGE_DATA_SIZE - ca->used_space < size) {
  		struct linked_page *lp;
9c744481c   Rafael J. Wysocki   PM / hibernate: D...
262
263
  		lp = ca->safe_needed ? __get_safe_page(ca->gfp_mask) :
  					get_image_page(ca->gfp_mask, PG_ANY);
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
264
265
266
267
268
269
270
271
272
273
274
  		if (!lp)
  			return NULL;
  
  		lp->next = ca->chain;
  		ca->chain = lp;
  		ca->used_space = 0;
  	}
  	ret = ca->chain->data + ca->used_space;
  	ca->used_space += size;
  	return ret;
  }
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
275
  /**
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
276
   * Data types related to memory bitmaps.
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
277
   *
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
278
279
280
281
282
   * Memory bitmap is a structure consiting of many linked lists of
   * objects.  The main list's elements are of type struct zone_bitmap
   * and each of them corresonds to one zone.  For each zone bitmap
   * object there is a list of objects of type struct bm_block that
   * represent each blocks of bitmap in which information is stored.
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
283
   *
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
284
285
286
287
   * struct memory_bitmap contains a pointer to the main list of zone
   * bitmap objects, a struct bm_position used for browsing the bitmap,
   * and a pointer to the list of pages used for allocating all of the
   * zone bitmap objects and bitmap block objects.
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
288
   *
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
289
290
291
292
   * NOTE: It has to be possible to lay out the bitmap in memory
   * using only allocations of order 0.  Additionally, the bitmap is
   * designed to work with arbitrary number of zones (this is over the
   * top for now, but let's avoid making unnecessary assumptions ;-).
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
293
   *
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
294
295
296
297
   * struct zone_bitmap contains a pointer to a list of bitmap block
   * objects and a pointer to the bitmap block object that has been
   * most recently used for setting bits.  Additionally, it contains the
   * PFNs that correspond to the start and end of the represented zone.
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
298
   *
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
299
300
301
302
   * struct bm_block contains a pointer to the memory page in which
   * information is stored (in the form of a block of bitmap)
   * It also contains the pfns that correspond to the start and end of
   * the represented memory area.
f469f02dc   Joerg Roedel   PM / Hibernate: C...
303
   *
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
304
305
306
   * The memory bitmap is organized as a radix tree to guarantee fast random
   * access to the bits. There is one radix tree for each zone (as returned
   * from create_mem_extents).
f469f02dc   Joerg Roedel   PM / Hibernate: C...
307
   *
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
308
309
310
311
   * One radix tree is represented by one struct mem_zone_bm_rtree. There are
   * two linked lists for the nodes of the tree, one for the inner nodes and
   * one for the leave nodes. The linked leave nodes are used for fast linear
   * access of the memory bitmap.
f469f02dc   Joerg Roedel   PM / Hibernate: C...
312
   *
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
313
   * The struct rtree_node represents one node of the radix tree.
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
314
315
316
   */
  
  #define BM_END_OF_MAP	(~0UL)
8de030732   Wu Fengguang   PM: Trivial fixes
317
  #define BM_BITS_PER_BLOCK	(PAGE_SIZE * BITS_PER_BYTE)
f469f02dc   Joerg Roedel   PM / Hibernate: C...
318
319
  #define BM_BLOCK_SHIFT		(PAGE_SHIFT + 3)
  #define BM_BLOCK_MASK		((1UL << BM_BLOCK_SHIFT) - 1)
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
320

f469f02dc   Joerg Roedel   PM / Hibernate: C...
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
  /*
   * struct rtree_node is a wrapper struct to link the nodes
   * of the rtree together for easy linear iteration over
   * bits and easy freeing
   */
  struct rtree_node {
  	struct list_head list;
  	unsigned long *data;
  };
  
  /*
   * struct mem_zone_bm_rtree represents a bitmap used for one
   * populated memory zone.
   */
  struct mem_zone_bm_rtree {
  	struct list_head list;		/* Link Zones together         */
  	struct list_head nodes;		/* Radix Tree inner nodes      */
  	struct list_head leaves;	/* Radix Tree leaves           */
  	unsigned long start_pfn;	/* Zone start page frame       */
  	unsigned long end_pfn;		/* Zone end page frame + 1     */
  	struct rtree_node *rtree;	/* Radix Tree Root             */
  	int levels;			/* Number of Radix Tree Levels */
  	unsigned int blocks;		/* Number of Bitmap Blocks     */
  };
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
345
346
347
  /* strcut bm_position is used for browsing memory bitmaps */
  
  struct bm_position {
3a20cb177   Joerg Roedel   PM / Hibernate: I...
348
349
350
351
  	struct mem_zone_bm_rtree *zone;
  	struct rtree_node *node;
  	unsigned long node_pfn;
  	int node_bit;
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
352
353
354
  };
  
  struct memory_bitmap {
f469f02dc   Joerg Roedel   PM / Hibernate: C...
355
  	struct list_head zones;
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
356
  	struct linked_page *p_list;	/* list of pages used to store zone
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
357
358
  					   bitmap objects and bitmap block
  					   objects */
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
359
360
361
362
  	struct bm_position cur;	/* most recently used bit position */
  };
  
  /* Functions that operate on memory bitmaps */
f469f02dc   Joerg Roedel   PM / Hibernate: C...
363
364
365
366
367
368
369
  #define BM_ENTRIES_PER_LEVEL	(PAGE_SIZE / sizeof(unsigned long))
  #if BITS_PER_LONG == 32
  #define BM_RTREE_LEVEL_SHIFT	(PAGE_SHIFT - 2)
  #else
  #define BM_RTREE_LEVEL_SHIFT	(PAGE_SHIFT - 3)
  #endif
  #define BM_RTREE_LEVEL_MASK	((1UL << BM_RTREE_LEVEL_SHIFT) - 1)
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
370
371
  /**
   * alloc_rtree_node - Allocate a new node and add it to the radix tree.
f469f02dc   Joerg Roedel   PM / Hibernate: C...
372
   *
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
373
374
375
   * This function is used to allocate inner nodes as well as the
   * leave nodes of the radix tree. It also adds the node to the
   * corresponding linked list passed in by the *list parameter.
f469f02dc   Joerg Roedel   PM / Hibernate: C...
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
   */
  static struct rtree_node *alloc_rtree_node(gfp_t gfp_mask, int safe_needed,
  					   struct chain_allocator *ca,
  					   struct list_head *list)
  {
  	struct rtree_node *node;
  
  	node = chain_alloc(ca, sizeof(struct rtree_node));
  	if (!node)
  		return NULL;
  
  	node->data = get_image_page(gfp_mask, safe_needed);
  	if (!node->data)
  		return NULL;
  
  	list_add_tail(&node->list, list);
  
  	return node;
  }
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
395
396
  /**
   * add_rtree_block - Add a new leave node to the radix tree.
f469f02dc   Joerg Roedel   PM / Hibernate: C...
397
   *
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
398
399
400
   * The leave nodes need to be allocated in order to keep the leaves
   * linked list in order. This is guaranteed by the zone->blocks
   * counter.
f469f02dc   Joerg Roedel   PM / Hibernate: C...
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
   */
  static int add_rtree_block(struct mem_zone_bm_rtree *zone, gfp_t gfp_mask,
  			   int safe_needed, struct chain_allocator *ca)
  {
  	struct rtree_node *node, *block, **dst;
  	unsigned int levels_needed, block_nr;
  	int i;
  
  	block_nr = zone->blocks;
  	levels_needed = 0;
  
  	/* How many levels do we need for this block nr? */
  	while (block_nr) {
  		levels_needed += 1;
  		block_nr >>= BM_RTREE_LEVEL_SHIFT;
  	}
  
  	/* Make sure the rtree has enough levels */
  	for (i = zone->levels; i < levels_needed; i++) {
  		node = alloc_rtree_node(gfp_mask, safe_needed, ca,
  					&zone->nodes);
  		if (!node)
  			return -ENOMEM;
  
  		node->data[0] = (unsigned long)zone->rtree;
  		zone->rtree = node;
  		zone->levels += 1;
  	}
  
  	/* Allocate new block */
  	block = alloc_rtree_node(gfp_mask, safe_needed, ca, &zone->leaves);
  	if (!block)
  		return -ENOMEM;
  
  	/* Now walk the rtree to insert the block */
  	node = zone->rtree;
  	dst = &zone->rtree;
  	block_nr = zone->blocks;
  	for (i = zone->levels; i > 0; i--) {
  		int index;
  
  		if (!node) {
  			node = alloc_rtree_node(gfp_mask, safe_needed, ca,
  						&zone->nodes);
  			if (!node)
  				return -ENOMEM;
  			*dst = node;
  		}
  
  		index = block_nr >> ((i - 1) * BM_RTREE_LEVEL_SHIFT);
  		index &= BM_RTREE_LEVEL_MASK;
  		dst = (struct rtree_node **)&((*dst)->data[index]);
  		node = *dst;
  	}
  
  	zone->blocks += 1;
  	*dst = block;
  
  	return 0;
  }
  
  static void free_zone_bm_rtree(struct mem_zone_bm_rtree *zone,
  			       int clear_nosave_free);
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
464
465
  /**
   * create_zone_bm_rtree - Create a radix tree for one zone.
f469f02dc   Joerg Roedel   PM / Hibernate: C...
466
   *
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
467
468
469
   * Allocated the mem_zone_bm_rtree structure and initializes it.
   * This function also allocated and builds the radix tree for the
   * zone.
f469f02dc   Joerg Roedel   PM / Hibernate: C...
470
   */
efd5a8524   Rafael J. Wysocki   PM / hibernate: C...
471
472
473
474
475
  static struct mem_zone_bm_rtree *create_zone_bm_rtree(gfp_t gfp_mask,
  						      int safe_needed,
  						      struct chain_allocator *ca,
  						      unsigned long start,
  						      unsigned long end)
f469f02dc   Joerg Roedel   PM / Hibernate: C...
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
  {
  	struct mem_zone_bm_rtree *zone;
  	unsigned int i, nr_blocks;
  	unsigned long pages;
  
  	pages = end - start;
  	zone  = chain_alloc(ca, sizeof(struct mem_zone_bm_rtree));
  	if (!zone)
  		return NULL;
  
  	INIT_LIST_HEAD(&zone->nodes);
  	INIT_LIST_HEAD(&zone->leaves);
  	zone->start_pfn = start;
  	zone->end_pfn = end;
  	nr_blocks = DIV_ROUND_UP(pages, BM_BITS_PER_BLOCK);
  
  	for (i = 0; i < nr_blocks; i++) {
  		if (add_rtree_block(zone, gfp_mask, safe_needed, ca)) {
  			free_zone_bm_rtree(zone, PG_UNSAFE_CLEAR);
  			return NULL;
  		}
  	}
  
  	return zone;
  }
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
501
502
  /**
   * free_zone_bm_rtree - Free the memory of the radix tree.
f469f02dc   Joerg Roedel   PM / Hibernate: C...
503
   *
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
504
505
506
   * Free all node pages of the radix tree. The mem_zone_bm_rtree
   * structure itself is not freed here nor are the rtree_node
   * structs.
f469f02dc   Joerg Roedel   PM / Hibernate: C...
507
508
509
510
511
512
513
514
515
516
517
518
   */
  static void free_zone_bm_rtree(struct mem_zone_bm_rtree *zone,
  			       int clear_nosave_free)
  {
  	struct rtree_node *node;
  
  	list_for_each_entry(node, &zone->nodes, list)
  		free_image_page(node->data, clear_nosave_free);
  
  	list_for_each_entry(node, &zone->leaves, list)
  		free_image_page(node->data, clear_nosave_free);
  }
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
519
520
  static void memory_bm_position_reset(struct memory_bitmap *bm)
  {
3a20cb177   Joerg Roedel   PM / Hibernate: I...
521
522
523
524
525
526
  	bm->cur.zone = list_entry(bm->zones.next, struct mem_zone_bm_rtree,
  				  list);
  	bm->cur.node = list_entry(bm->cur.zone->leaves.next,
  				  struct rtree_node, list);
  	bm->cur.node_pfn = 0;
  	bm->cur.node_bit = 0;
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
527
528
529
  }
  
  static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free);
846705deb   Rafael J. Wysocki   Hibernate: Take o...
530
531
532
533
534
  struct mem_extent {
  	struct list_head hook;
  	unsigned long start;
  	unsigned long end;
  };
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
535
  /**
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
536
537
   * free_mem_extents - Free a list of memory extents.
   * @list: List of extents to free.
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
538
   */
846705deb   Rafael J. Wysocki   Hibernate: Take o...
539
540
541
  static void free_mem_extents(struct list_head *list)
  {
  	struct mem_extent *ext, *aux;
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
542

846705deb   Rafael J. Wysocki   Hibernate: Take o...
543
544
545
546
547
548
549
  	list_for_each_entry_safe(ext, aux, list, hook) {
  		list_del(&ext->hook);
  		kfree(ext);
  	}
  }
  
  /**
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
550
551
552
553
554
   * create_mem_extents - Create a list of memory extents.
   * @list: List to put the extents into.
   * @gfp_mask: Mask to use for memory allocations.
   *
   * The extents represent contiguous ranges of PFNs.
846705deb   Rafael J. Wysocki   Hibernate: Take o...
555
556
   */
  static int create_mem_extents(struct list_head *list, gfp_t gfp_mask)
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
557
  {
846705deb   Rafael J. Wysocki   Hibernate: Take o...
558
  	struct zone *zone;
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
559

846705deb   Rafael J. Wysocki   Hibernate: Take o...
560
  	INIT_LIST_HEAD(list);
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
561

ee99c71c5   KOSAKI Motohiro   mm: introduce for...
562
  	for_each_populated_zone(zone) {
846705deb   Rafael J. Wysocki   Hibernate: Take o...
563
564
  		unsigned long zone_start, zone_end;
  		struct mem_extent *ext, *cur, *aux;
846705deb   Rafael J. Wysocki   Hibernate: Take o...
565
  		zone_start = zone->zone_start_pfn;
c33bc315f   Xishi Qiu   mm: use zone_end_...
566
  		zone_end = zone_end_pfn(zone);
846705deb   Rafael J. Wysocki   Hibernate: Take o...
567
568
569
570
  
  		list_for_each_entry(ext, list, hook)
  			if (zone_start <= ext->end)
  				break;
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
571

846705deb   Rafael J. Wysocki   Hibernate: Take o...
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
  		if (&ext->hook == list || zone_end < ext->start) {
  			/* New extent is necessary */
  			struct mem_extent *new_ext;
  
  			new_ext = kzalloc(sizeof(struct mem_extent), gfp_mask);
  			if (!new_ext) {
  				free_mem_extents(list);
  				return -ENOMEM;
  			}
  			new_ext->start = zone_start;
  			new_ext->end = zone_end;
  			list_add_tail(&new_ext->hook, &ext->hook);
  			continue;
  		}
  
  		/* Merge this zone's range of PFNs with the existing one */
  		if (zone_start < ext->start)
  			ext->start = zone_start;
  		if (zone_end > ext->end)
  			ext->end = zone_end;
  
  		/* More merging may be possible */
  		cur = ext;
  		list_for_each_entry_safe_continue(cur, aux, list, hook) {
  			if (zone_end < cur->start)
  				break;
  			if (zone_end < cur->end)
  				ext->end = cur->end;
  			list_del(&cur->hook);
  			kfree(cur);
  		}
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
603
  	}
846705deb   Rafael J. Wysocki   Hibernate: Take o...
604
605
  
  	return 0;
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
606
607
608
  }
  
  /**
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
609
610
   * memory_bm_create - Allocate memory for a memory bitmap.
   */
efd5a8524   Rafael J. Wysocki   PM / hibernate: C...
611
612
  static int memory_bm_create(struct memory_bitmap *bm, gfp_t gfp_mask,
  			    int safe_needed)
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
613
614
  {
  	struct chain_allocator ca;
846705deb   Rafael J. Wysocki   Hibernate: Take o...
615
616
617
  	struct list_head mem_extents;
  	struct mem_extent *ext;
  	int error;
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
618
619
  
  	chain_init(&ca, gfp_mask, safe_needed);
f469f02dc   Joerg Roedel   PM / Hibernate: C...
620
  	INIT_LIST_HEAD(&bm->zones);
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
621

846705deb   Rafael J. Wysocki   Hibernate: Take o...
622
623
624
  	error = create_mem_extents(&mem_extents, gfp_mask);
  	if (error)
  		return error;
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
625

846705deb   Rafael J. Wysocki   Hibernate: Take o...
626
  	list_for_each_entry(ext, &mem_extents, hook) {
f469f02dc   Joerg Roedel   PM / Hibernate: C...
627
  		struct mem_zone_bm_rtree *zone;
f469f02dc   Joerg Roedel   PM / Hibernate: C...
628
629
630
  
  		zone = create_zone_bm_rtree(gfp_mask, safe_needed, &ca,
  					    ext->start, ext->end);
9047eb629   Joerg Roedel   PM / Hibernate: R...
631
632
  		if (!zone) {
  			error = -ENOMEM;
f469f02dc   Joerg Roedel   PM / Hibernate: C...
633
  			goto Error;
9047eb629   Joerg Roedel   PM / Hibernate: R...
634
  		}
f469f02dc   Joerg Roedel   PM / Hibernate: C...
635
  		list_add_tail(&zone->list, &bm->zones);
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
636
  	}
846705deb   Rafael J. Wysocki   Hibernate: Take o...
637

b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
638
639
  	bm->p_list = ca.chain;
  	memory_bm_position_reset(bm);
846705deb   Rafael J. Wysocki   Hibernate: Take o...
640
641
642
   Exit:
  	free_mem_extents(&mem_extents);
  	return error;
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
643

846705deb   Rafael J. Wysocki   Hibernate: Take o...
644
   Error:
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
645
646
  	bm->p_list = ca.chain;
  	memory_bm_free(bm, PG_UNSAFE_CLEAR);
846705deb   Rafael J. Wysocki   Hibernate: Take o...
647
  	goto Exit;
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
648
649
650
  }
  
  /**
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
651
652
653
   * memory_bm_free - Free memory occupied by the memory bitmap.
   * @bm: Memory bitmap.
   */
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
654
655
  static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free)
  {
f469f02dc   Joerg Roedel   PM / Hibernate: C...
656
  	struct mem_zone_bm_rtree *zone;
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
657

f469f02dc   Joerg Roedel   PM / Hibernate: C...
658
659
  	list_for_each_entry(zone, &bm->zones, list)
  		free_zone_bm_rtree(zone, clear_nosave_free);
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
660
  	free_list_of_pages(bm->p_list, clear_nosave_free);
846705deb   Rafael J. Wysocki   Hibernate: Take o...
661

f469f02dc   Joerg Roedel   PM / Hibernate: C...
662
  	INIT_LIST_HEAD(&bm->zones);
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
663
664
665
  }
  
  /**
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
666
   * memory_bm_find_bit - Find the bit for a given PFN in a memory bitmap.
07a338236   Joerg Roedel   PM / Hibernate: A...
667
   *
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
668
669
670
671
672
   * Find the bit in memory bitmap @bm that corresponds to the given PFN.
   * The cur.zone, cur.block and cur.node_pfn members of @bm are updated.
   *
   * Walk the radix tree to find the page containing the bit that represents @pfn
   * and return the position of the bit in @addr and @bit_nr.
07a338236   Joerg Roedel   PM / Hibernate: A...
673
   */
9047eb629   Joerg Roedel   PM / Hibernate: R...
674
675
  static int memory_bm_find_bit(struct memory_bitmap *bm, unsigned long pfn,
  			      void **addr, unsigned int *bit_nr)
07a338236   Joerg Roedel   PM / Hibernate: A...
676
677
678
679
  {
  	struct mem_zone_bm_rtree *curr, *zone;
  	struct rtree_node *node;
  	int i, block_nr;
3a20cb177   Joerg Roedel   PM / Hibernate: I...
680
681
682
683
  	zone = bm->cur.zone;
  
  	if (pfn >= zone->start_pfn && pfn < zone->end_pfn)
  		goto zone_found;
07a338236   Joerg Roedel   PM / Hibernate: A...
684
685
686
687
688
689
690
691
692
693
694
695
  	zone = NULL;
  
  	/* Find the right zone */
  	list_for_each_entry(curr, &bm->zones, list) {
  		if (pfn >= curr->start_pfn && pfn < curr->end_pfn) {
  			zone = curr;
  			break;
  		}
  	}
  
  	if (!zone)
  		return -EFAULT;
3a20cb177   Joerg Roedel   PM / Hibernate: I...
696
  zone_found:
07a338236   Joerg Roedel   PM / Hibernate: A...
697
  	/*
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
698
699
  	 * We have found the zone. Now walk the radix tree to find the leaf node
  	 * for our PFN.
07a338236   Joerg Roedel   PM / Hibernate: A...
700
  	 */
3a20cb177   Joerg Roedel   PM / Hibernate: I...
701
702
703
  	node = bm->cur.node;
  	if (((pfn - zone->start_pfn) & ~BM_BLOCK_MASK) == bm->cur.node_pfn)
  		goto node_found;
07a338236   Joerg Roedel   PM / Hibernate: A...
704
705
706
707
708
709
710
711
712
713
714
  	node      = zone->rtree;
  	block_nr  = (pfn - zone->start_pfn) >> BM_BLOCK_SHIFT;
  
  	for (i = zone->levels; i > 0; i--) {
  		int index;
  
  		index = block_nr >> ((i - 1) * BM_RTREE_LEVEL_SHIFT);
  		index &= BM_RTREE_LEVEL_MASK;
  		BUG_ON(node->data[index] == 0);
  		node = (struct rtree_node *)node->data[index];
  	}
3a20cb177   Joerg Roedel   PM / Hibernate: I...
715
716
717
718
719
  node_found:
  	/* Update last position */
  	bm->cur.zone = zone;
  	bm->cur.node = node;
  	bm->cur.node_pfn = (pfn - zone->start_pfn) & ~BM_BLOCK_MASK;
07a338236   Joerg Roedel   PM / Hibernate: A...
720
721
722
723
724
725
  	/* Set return values */
  	*addr = node->data;
  	*bit_nr = (pfn - zone->start_pfn) & BM_BLOCK_MASK;
  
  	return 0;
  }
74dfd666d   Rafael J. Wysocki   swsusp: do not us...
726
727
728
729
  static void memory_bm_set_bit(struct memory_bitmap *bm, unsigned long pfn)
  {
  	void *addr;
  	unsigned int bit;
a82f7119f   Rafael J. Wysocki   Hibernation: Fix ...
730
  	int error;
74dfd666d   Rafael J. Wysocki   swsusp: do not us...
731

a82f7119f   Rafael J. Wysocki   Hibernation: Fix ...
732
733
  	error = memory_bm_find_bit(bm, pfn, &addr, &bit);
  	BUG_ON(error);
74dfd666d   Rafael J. Wysocki   swsusp: do not us...
734
735
  	set_bit(bit, addr);
  }
a82f7119f   Rafael J. Wysocki   Hibernation: Fix ...
736
737
738
739
740
741
742
743
744
  static int mem_bm_set_bit_check(struct memory_bitmap *bm, unsigned long pfn)
  {
  	void *addr;
  	unsigned int bit;
  	int error;
  
  	error = memory_bm_find_bit(bm, pfn, &addr, &bit);
  	if (!error)
  		set_bit(bit, addr);
07a338236   Joerg Roedel   PM / Hibernate: A...
745

a82f7119f   Rafael J. Wysocki   Hibernation: Fix ...
746
747
  	return error;
  }
74dfd666d   Rafael J. Wysocki   swsusp: do not us...
748
749
750
751
  static void memory_bm_clear_bit(struct memory_bitmap *bm, unsigned long pfn)
  {
  	void *addr;
  	unsigned int bit;
a82f7119f   Rafael J. Wysocki   Hibernation: Fix ...
752
  	int error;
74dfd666d   Rafael J. Wysocki   swsusp: do not us...
753

a82f7119f   Rafael J. Wysocki   Hibernation: Fix ...
754
755
  	error = memory_bm_find_bit(bm, pfn, &addr, &bit);
  	BUG_ON(error);
74dfd666d   Rafael J. Wysocki   swsusp: do not us...
756
757
  	clear_bit(bit, addr);
  }
fdd64ed54   Joerg Roedel   PM / hibernate: I...
758
759
760
761
762
763
764
  static void memory_bm_clear_current(struct memory_bitmap *bm)
  {
  	int bit;
  
  	bit = max(bm->cur.node_bit - 1, 0);
  	clear_bit(bit, bm->cur.node->data);
  }
74dfd666d   Rafael J. Wysocki   swsusp: do not us...
765
766
767
768
  static int memory_bm_test_bit(struct memory_bitmap *bm, unsigned long pfn)
  {
  	void *addr;
  	unsigned int bit;
9047eb629   Joerg Roedel   PM / Hibernate: R...
769
  	int error;
74dfd666d   Rafael J. Wysocki   swsusp: do not us...
770

a82f7119f   Rafael J. Wysocki   Hibernation: Fix ...
771
772
  	error = memory_bm_find_bit(bm, pfn, &addr, &bit);
  	BUG_ON(error);
9047eb629   Joerg Roedel   PM / Hibernate: R...
773
  	return test_bit(bit, addr);
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
774
  }
69643279a   Rafael J. Wysocki   Hibernate: Do not...
775
776
777
778
  static bool memory_bm_pfn_present(struct memory_bitmap *bm, unsigned long pfn)
  {
  	void *addr;
  	unsigned int bit;
07a338236   Joerg Roedel   PM / Hibernate: A...
779

9047eb629   Joerg Roedel   PM / Hibernate: R...
780
  	return !memory_bm_find_bit(bm, pfn, &addr, &bit);
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
781
  }
3a20cb177   Joerg Roedel   PM / Hibernate: I...
782
  /*
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
783
   * rtree_next_node - Jump to the next leaf node.
3a20cb177   Joerg Roedel   PM / Hibernate: I...
784
   *
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
785
786
787
788
   * Set the position to the beginning of the next node in the
   * memory bitmap. This is either the next node in the current
   * zone's radix tree or the first node in the radix tree of the
   * next zone.
3a20cb177   Joerg Roedel   PM / Hibernate: I...
789
   *
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
790
   * Return true if there is a next node, false otherwise.
3a20cb177   Joerg Roedel   PM / Hibernate: I...
791
792
793
   */
  static bool rtree_next_node(struct memory_bitmap *bm)
  {
924d86967   James Morse   PM / hibernate: F...
794
795
796
  	if (!list_is_last(&bm->cur.node->list, &bm->cur.zone->leaves)) {
  		bm->cur.node = list_entry(bm->cur.node->list.next,
  					  struct rtree_node, list);
3a20cb177   Joerg Roedel   PM / Hibernate: I...
797
798
  		bm->cur.node_pfn += BM_BITS_PER_BLOCK;
  		bm->cur.node_bit  = 0;
0f7d83e85   Joerg Roedel   PM / Hibernate: T...
799
  		touch_softlockup_watchdog();
3a20cb177   Joerg Roedel   PM / Hibernate: I...
800
801
802
803
  		return true;
  	}
  
  	/* No more nodes, goto next zone */
924d86967   James Morse   PM / hibernate: F...
804
805
  	if (!list_is_last(&bm->cur.zone->list, &bm->zones)) {
  		bm->cur.zone = list_entry(bm->cur.zone->list.next,
3a20cb177   Joerg Roedel   PM / Hibernate: I...
806
  				  struct mem_zone_bm_rtree, list);
3a20cb177   Joerg Roedel   PM / Hibernate: I...
807
808
809
810
811
812
813
814
815
816
  		bm->cur.node = list_entry(bm->cur.zone->leaves.next,
  					  struct rtree_node, list);
  		bm->cur.node_pfn = 0;
  		bm->cur.node_bit = 0;
  		return true;
  	}
  
  	/* No more zones */
  	return false;
  }
9047eb629   Joerg Roedel   PM / Hibernate: R...
817
  /**
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
818
819
   * memory_bm_rtree_next_pfn - Find the next set bit in a memory bitmap.
   * @bm: Memory bitmap.
3a20cb177   Joerg Roedel   PM / Hibernate: I...
820
   *
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
821
822
823
   * Starting from the last returned position this function searches for the next
   * set bit in @bm and returns the PFN represented by it.  If no more bits are
   * set, BM_END_OF_MAP is returned.
9047eb629   Joerg Roedel   PM / Hibernate: R...
824
   *
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
825
826
   * It is required to run memory_bm_position_reset() before the first call to
   * this function for the given memory bitmap.
3a20cb177   Joerg Roedel   PM / Hibernate: I...
827
   */
9047eb629   Joerg Roedel   PM / Hibernate: R...
828
  static unsigned long memory_bm_next_pfn(struct memory_bitmap *bm)
3a20cb177   Joerg Roedel   PM / Hibernate: I...
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
  {
  	unsigned long bits, pfn, pages;
  	int bit;
  
  	do {
  		pages	  = bm->cur.zone->end_pfn - bm->cur.zone->start_pfn;
  		bits      = min(pages - bm->cur.node_pfn, BM_BITS_PER_BLOCK);
  		bit	  = find_next_bit(bm->cur.node->data, bits,
  					  bm->cur.node_bit);
  		if (bit < bits) {
  			pfn = bm->cur.zone->start_pfn + bm->cur.node_pfn + bit;
  			bm->cur.node_bit = bit + 1;
  			return pfn;
  		}
  	} while (rtree_next_node(bm));
  
  	return BM_END_OF_MAP;
  }
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
847
848
849
  /*
   * This structure represents a range of page frames the contents of which
   * should not be saved during hibernation.
74dfd666d   Rafael J. Wysocki   swsusp: do not us...
850
   */
74dfd666d   Rafael J. Wysocki   swsusp: do not us...
851
852
853
854
855
856
857
  struct nosave_region {
  	struct list_head list;
  	unsigned long start_pfn;
  	unsigned long end_pfn;
  };
  
  static LIST_HEAD(nosave_regions);
307c5971c   Rafael J. Wysocki   PM / hibernate: R...
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
  static void recycle_zone_bm_rtree(struct mem_zone_bm_rtree *zone)
  {
  	struct rtree_node *node;
  
  	list_for_each_entry(node, &zone->nodes, list)
  		recycle_safe_page(node->data);
  
  	list_for_each_entry(node, &zone->leaves, list)
  		recycle_safe_page(node->data);
  }
  
  static void memory_bm_recycle(struct memory_bitmap *bm)
  {
  	struct mem_zone_bm_rtree *zone;
  	struct linked_page *p_list;
  
  	list_for_each_entry(zone, &bm->zones, list)
  		recycle_zone_bm_rtree(zone);
  
  	p_list = bm->p_list;
  	while (p_list) {
  		struct linked_page *lp = p_list;
  
  		p_list = lp->next;
  		recycle_safe_page(lp);
  	}
  }
74dfd666d   Rafael J. Wysocki   swsusp: do not us...
885
  /**
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
886
887
888
889
   * register_nosave_region - Register a region of unsaveable memory.
   *
   * Register a range of page frames the contents of which should not be saved
   * during hibernation (to be used in the early initialization code).
74dfd666d   Rafael J. Wysocki   swsusp: do not us...
890
   */
efd5a8524   Rafael J. Wysocki   PM / hibernate: C...
891
892
  void __init __register_nosave_region(unsigned long start_pfn,
  				     unsigned long end_pfn, int use_kmalloc)
74dfd666d   Rafael J. Wysocki   swsusp: do not us...
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
  {
  	struct nosave_region *region;
  
  	if (start_pfn >= end_pfn)
  		return;
  
  	if (!list_empty(&nosave_regions)) {
  		/* Try to extend the previous region (they should be sorted) */
  		region = list_entry(nosave_regions.prev,
  					struct nosave_region, list);
  		if (region->end_pfn == start_pfn) {
  			region->end_pfn = end_pfn;
  			goto Report;
  		}
  	}
940d67f6b   Johannes Berg   [POWERPC] swsusp:...
908
  	if (use_kmalloc) {
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
909
  		/* During init, this shouldn't fail */
940d67f6b   Johannes Berg   [POWERPC] swsusp:...
910
911
  		region = kmalloc(sizeof(struct nosave_region), GFP_KERNEL);
  		BUG_ON(!region);
d5f32af31   Rafael J. Wysocki   PM / hibernate: A...
912
  	} else {
940d67f6b   Johannes Berg   [POWERPC] swsusp:...
913
  		/* This allocation cannot fail */
c2f69cdaf   Santosh Shilimkar   kernel/power/snap...
914
  		region = memblock_virt_alloc(sizeof(struct nosave_region), 0);
d5f32af31   Rafael J. Wysocki   PM / hibernate: A...
915
  	}
74dfd666d   Rafael J. Wysocki   swsusp: do not us...
916
917
918
919
  	region->start_pfn = start_pfn;
  	region->end_pfn = end_pfn;
  	list_add_tail(&region->list, &nosave_regions);
   Report:
cd38ca854   Bjorn Helgaas   PM / Hibernate: p...
920
921
922
923
  	printk(KERN_INFO "PM: Registered nosave memory: [mem %#010llx-%#010llx]
  ",
  		(unsigned long long) start_pfn << PAGE_SHIFT,
  		((unsigned long long) end_pfn << PAGE_SHIFT) - 1);
74dfd666d   Rafael J. Wysocki   swsusp: do not us...
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
  }
  
  /*
   * Set bits in this map correspond to the page frames the contents of which
   * should not be saved during the suspend.
   */
  static struct memory_bitmap *forbidden_pages_map;
  
  /* Set bits in this map correspond to free page frames. */
  static struct memory_bitmap *free_pages_map;
  
  /*
   * Each page frame allocated for creating the image is marked by setting the
   * corresponding bits in forbidden_pages_map and free_pages_map simultaneously
   */
  
  void swsusp_set_page_free(struct page *page)
  {
  	if (free_pages_map)
  		memory_bm_set_bit(free_pages_map, page_to_pfn(page));
  }
  
  static int swsusp_page_is_free(struct page *page)
  {
  	return free_pages_map ?
  		memory_bm_test_bit(free_pages_map, page_to_pfn(page)) : 0;
  }
  
  void swsusp_unset_page_free(struct page *page)
  {
  	if (free_pages_map)
  		memory_bm_clear_bit(free_pages_map, page_to_pfn(page));
  }
  
  static void swsusp_set_page_forbidden(struct page *page)
  {
  	if (forbidden_pages_map)
  		memory_bm_set_bit(forbidden_pages_map, page_to_pfn(page));
  }
  
  int swsusp_page_is_forbidden(struct page *page)
  {
  	return forbidden_pages_map ?
  		memory_bm_test_bit(forbidden_pages_map, page_to_pfn(page)) : 0;
  }
  
  static void swsusp_unset_page_forbidden(struct page *page)
  {
  	if (forbidden_pages_map)
  		memory_bm_clear_bit(forbidden_pages_map, page_to_pfn(page));
  }
  
  /**
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
977
978
979
980
981
   * mark_nosave_pages - Mark pages that should not be saved.
   * @bm: Memory bitmap.
   *
   * Set the bits in @bm that correspond to the page frames the contents of which
   * should not be saved.
74dfd666d   Rafael J. Wysocki   swsusp: do not us...
982
   */
74dfd666d   Rafael J. Wysocki   swsusp: do not us...
983
984
985
986
987
988
989
990
991
  static void mark_nosave_pages(struct memory_bitmap *bm)
  {
  	struct nosave_region *region;
  
  	if (list_empty(&nosave_regions))
  		return;
  
  	list_for_each_entry(region, &nosave_regions, list) {
  		unsigned long pfn;
69f1d475c   Bjorn Helgaas   PM / Hibernate: p...
992
993
994
995
996
  		pr_debug("PM: Marking nosave pages: [mem %#010llx-%#010llx]
  ",
  			 (unsigned long long) region->start_pfn << PAGE_SHIFT,
  			 ((unsigned long long) region->end_pfn << PAGE_SHIFT)
  				- 1);
74dfd666d   Rafael J. Wysocki   swsusp: do not us...
997
998
  
  		for (pfn = region->start_pfn; pfn < region->end_pfn; pfn++)
a82f7119f   Rafael J. Wysocki   Hibernation: Fix ...
999
1000
1001
1002
1003
1004
1005
1006
1007
  			if (pfn_valid(pfn)) {
  				/*
  				 * It is safe to ignore the result of
  				 * mem_bm_set_bit_check() here, since we won't
  				 * touch the PFNs for which the error is
  				 * returned anyway.
  				 */
  				mem_bm_set_bit_check(bm, pfn);
  			}
74dfd666d   Rafael J. Wysocki   swsusp: do not us...
1008
1009
1010
1011
  	}
  }
  
  /**
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
1012
1013
1014
1015
1016
1017
   * create_basic_memory_bitmaps - Create bitmaps to hold basic page information.
   *
   * Create bitmaps needed for marking page frames that should not be saved and
   * free page frames.  The forbidden_pages_map and free_pages_map pointers are
   * only modified if everything goes well, because we don't want the bits to be
   * touched before both bitmaps are set up.
74dfd666d   Rafael J. Wysocki   swsusp: do not us...
1018
   */
74dfd666d   Rafael J. Wysocki   swsusp: do not us...
1019
1020
1021
1022
  int create_basic_memory_bitmaps(void)
  {
  	struct memory_bitmap *bm1, *bm2;
  	int error = 0;
aab172891   Rafael J. Wysocki   PM / hibernate: F...
1023
1024
1025
1026
  	if (forbidden_pages_map && free_pages_map)
  		return 0;
  	else
  		BUG_ON(forbidden_pages_map || free_pages_map);
74dfd666d   Rafael J. Wysocki   swsusp: do not us...
1027

0709db607   Rafael J. Wysocki   swsusp: use GFP_K...
1028
  	bm1 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL);
74dfd666d   Rafael J. Wysocki   swsusp: do not us...
1029
1030
  	if (!bm1)
  		return -ENOMEM;
0709db607   Rafael J. Wysocki   swsusp: use GFP_K...
1031
  	error = memory_bm_create(bm1, GFP_KERNEL, PG_ANY);
74dfd666d   Rafael J. Wysocki   swsusp: do not us...
1032
1033
  	if (error)
  		goto Free_first_object;
0709db607   Rafael J. Wysocki   swsusp: use GFP_K...
1034
  	bm2 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL);
74dfd666d   Rafael J. Wysocki   swsusp: do not us...
1035
1036
  	if (!bm2)
  		goto Free_first_bitmap;
0709db607   Rafael J. Wysocki   swsusp: use GFP_K...
1037
  	error = memory_bm_create(bm2, GFP_KERNEL, PG_ANY);
74dfd666d   Rafael J. Wysocki   swsusp: do not us...
1038
1039
1040
1041
1042
1043
  	if (error)
  		goto Free_second_object;
  
  	forbidden_pages_map = bm1;
  	free_pages_map = bm2;
  	mark_nosave_pages(forbidden_pages_map);
23976728a   Rafael J. Wysocki   Hibernation: Upda...
1044
1045
  	pr_debug("PM: Basic memory bitmaps created
  ");
74dfd666d   Rafael J. Wysocki   swsusp: do not us...
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
  
  	return 0;
  
   Free_second_object:
  	kfree(bm2);
   Free_first_bitmap:
   	memory_bm_free(bm1, PG_UNSAFE_CLEAR);
   Free_first_object:
  	kfree(bm1);
  	return -ENOMEM;
  }
  
  /**
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
1059
1060
1061
1062
1063
   * free_basic_memory_bitmaps - Free memory bitmaps holding basic information.
   *
   * Free memory bitmaps allocated by create_basic_memory_bitmaps().  The
   * auxiliary pointers are necessary so that the bitmaps themselves are not
   * referred to while they are being freed.
74dfd666d   Rafael J. Wysocki   swsusp: do not us...
1064
   */
74dfd666d   Rafael J. Wysocki   swsusp: do not us...
1065
1066
1067
  void free_basic_memory_bitmaps(void)
  {
  	struct memory_bitmap *bm1, *bm2;
6a0c7cd33   Rafael J. Wysocki   PM / Hibernate: D...
1068
1069
  	if (WARN_ON(!(forbidden_pages_map && free_pages_map)))
  		return;
74dfd666d   Rafael J. Wysocki   swsusp: do not us...
1070
1071
1072
1073
1074
1075
1076
1077
1078
  
  	bm1 = forbidden_pages_map;
  	bm2 = free_pages_map;
  	forbidden_pages_map = NULL;
  	free_pages_map = NULL;
  	memory_bm_free(bm1, PG_UNSAFE_CLEAR);
  	kfree(bm1);
  	memory_bm_free(bm2, PG_UNSAFE_CLEAR);
  	kfree(bm2);
23976728a   Rafael J. Wysocki   Hibernation: Upda...
1079
1080
  	pr_debug("PM: Basic memory bitmaps freed
  ");
74dfd666d   Rafael J. Wysocki   swsusp: do not us...
1081
  }
1ad1410f6   Anisse Astier   PM / Hibernate: a...
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
  void clear_free_pages(void)
  {
  #ifdef CONFIG_PAGE_POISONING_ZERO
  	struct memory_bitmap *bm = free_pages_map;
  	unsigned long pfn;
  
  	if (WARN_ON(!(free_pages_map)))
  		return;
  
  	memory_bm_position_reset(bm);
  	pfn = memory_bm_next_pfn(bm);
  	while (pfn != BM_END_OF_MAP) {
  		if (pfn_valid(pfn))
  			clear_highpage(pfn_to_page(pfn));
  
  		pfn = memory_bm_next_pfn(bm);
  	}
  	memory_bm_position_reset(bm);
  	pr_info("PM: free pages cleared after restore
  ");
  #endif /* PAGE_POISONING_ZERO */
  }
74dfd666d   Rafael J. Wysocki   swsusp: do not us...
1104
  /**
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
1105
1106
1107
1108
1109
1110
   * snapshot_additional_pages - Estimate the number of extra pages needed.
   * @zone: Memory zone to carry out the computation for.
   *
   * Estimate the number of additional pages needed for setting up a hibernation
   * image data structures for @zone (usually, the returned value is greater than
   * the exact number).
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
1111
   */
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
1112
1113
  unsigned int snapshot_additional_pages(struct zone *zone)
  {
f469f02dc   Joerg Roedel   PM / Hibernate: C...
1114
  	unsigned int rtree, nodes;
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
1115

f469f02dc   Joerg Roedel   PM / Hibernate: C...
1116
1117
1118
1119
1120
1121
1122
  	rtree = nodes = DIV_ROUND_UP(zone->spanned_pages, BM_BITS_PER_BLOCK);
  	rtree += DIV_ROUND_UP(rtree * sizeof(struct rtree_node),
  			      LINKED_PAGE_DATA_SIZE);
  	while (nodes > 1) {
  		nodes = DIV_ROUND_UP(nodes, BM_ENTRIES_PER_LEVEL);
  		rtree += nodes;
  	}
9047eb629   Joerg Roedel   PM / Hibernate: R...
1123
  	return 2 * rtree;
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
1124
  }
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1125
1126
  #ifdef CONFIG_HIGHMEM
  /**
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
1127
1128
1129
   * count_free_highmem_pages - Compute the total number of free highmem pages.
   *
   * The returned number is system-wide.
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1130
   */
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1131
1132
1133
1134
  static unsigned int count_free_highmem_pages(void)
  {
  	struct zone *zone;
  	unsigned int cnt = 0;
ee99c71c5   KOSAKI Motohiro   mm: introduce for...
1135
1136
  	for_each_populated_zone(zone)
  		if (is_highmem(zone))
d23ad4232   Christoph Lameter   [PATCH] Use ZVC f...
1137
  			cnt += zone_page_state(zone, NR_FREE_PAGES);
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1138
1139
1140
1141
1142
  
  	return cnt;
  }
  
  /**
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
1143
1144
1145
   * saveable_highmem_page - Check if a highmem page is saveable.
   *
   * Determine whether a highmem page should be included in a hibernation image.
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1146
   *
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
1147
1148
   * We should save the page if it isn't Nosave or NosaveFree, or Reserved,
   * and it isn't part of a free chunk of pages.
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1149
   */
846705deb   Rafael J. Wysocki   Hibernate: Take o...
1150
  static struct page *saveable_highmem_page(struct zone *zone, unsigned long pfn)
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1151
1152
1153
1154
1155
1156
1157
  {
  	struct page *page;
  
  	if (!pfn_valid(pfn))
  		return NULL;
  
  	page = pfn_to_page(pfn);
846705deb   Rafael J. Wysocki   Hibernate: Take o...
1158
1159
  	if (page_zone(page) != zone)
  		return NULL;
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1160
1161
  
  	BUG_ON(!PageHighMem(page));
7be982349   Rafael J. Wysocki   swsusp: use inlin...
1162
1163
  	if (swsusp_page_is_forbidden(page) ||  swsusp_page_is_free(page) ||
  	    PageReserved(page))
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1164
  		return NULL;
c6968e73b   Stanislaw Gruszka   PM/Hibernate: do ...
1165
1166
  	if (page_is_guard(page))
  		return NULL;
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1167
1168
1169
1170
  	return page;
  }
  
  /**
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
1171
   * count_highmem_pages - Compute the total number of saveable highmem pages.
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1172
   */
fe419535d   Rafael J. Wysocki   PM/Hibernate: Mov...
1173
  static unsigned int count_highmem_pages(void)
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1174
1175
1176
  {
  	struct zone *zone;
  	unsigned int n = 0;
98e73dc5d   Gerald Schaefer   PM / Hibernate / ...
1177
  	for_each_populated_zone(zone) {
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1178
1179
1180
1181
1182
1183
  		unsigned long pfn, max_zone_pfn;
  
  		if (!is_highmem(zone))
  			continue;
  
  		mark_free_pages(zone);
c33bc315f   Xishi Qiu   mm: use zone_end_...
1184
  		max_zone_pfn = zone_end_pfn(zone);
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1185
  		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
846705deb   Rafael J. Wysocki   Hibernate: Take o...
1186
  			if (saveable_highmem_page(zone, pfn))
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1187
1188
1189
1190
1191
  				n++;
  	}
  	return n;
  }
  #else
846705deb   Rafael J. Wysocki   Hibernate: Take o...
1192
1193
1194
1195
  static inline void *saveable_highmem_page(struct zone *z, unsigned long p)
  {
  	return NULL;
  }
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1196
  #endif /* CONFIG_HIGHMEM */
f6143aa60   Rafael J. Wysocki   [PATCH] swsusp: R...
1197
  /**
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
1198
1199
1200
1201
   * saveable_page - Check if the given page is saveable.
   *
   * Determine whether a non-highmem page should be included in a hibernation
   * image.
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
1202
   *
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
1203
1204
1205
   * We should save the page if it isn't Nosave, and is not in the range
   * of pages statically defined as 'unsaveable', and it isn't part of
   * a free chunk of pages.
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
1206
   */
846705deb   Rafael J. Wysocki   Hibernate: Take o...
1207
  static struct page *saveable_page(struct zone *zone, unsigned long pfn)
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
1208
  {
de491861e   Pavel Machek   [PATCH] swsusp: c...
1209
  	struct page *page;
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
1210
1211
  
  	if (!pfn_valid(pfn))
ae83c5eef   Rafael J. Wysocki   [PATCH] swsusp: c...
1212
  		return NULL;
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
1213
1214
  
  	page = pfn_to_page(pfn);
846705deb   Rafael J. Wysocki   Hibernate: Take o...
1215
1216
  	if (page_zone(page) != zone)
  		return NULL;
ae83c5eef   Rafael J. Wysocki   [PATCH] swsusp: c...
1217

8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1218
  	BUG_ON(PageHighMem(page));
7be982349   Rafael J. Wysocki   swsusp: use inlin...
1219
  	if (swsusp_page_is_forbidden(page) || swsusp_page_is_free(page))
ae83c5eef   Rafael J. Wysocki   [PATCH] swsusp: c...
1220
  		return NULL;
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1221

8a235efad   Rafael J. Wysocki   Hibernation: Hand...
1222
1223
  	if (PageReserved(page)
  	    && (!kernel_page_present(page) || pfn_is_nosave(pfn)))
ae83c5eef   Rafael J. Wysocki   [PATCH] swsusp: c...
1224
  		return NULL;
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
1225

c6968e73b   Stanislaw Gruszka   PM/Hibernate: do ...
1226
1227
  	if (page_is_guard(page))
  		return NULL;
ae83c5eef   Rafael J. Wysocki   [PATCH] swsusp: c...
1228
  	return page;
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
1229
  }
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1230
  /**
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
1231
   * count_data_pages - Compute the total number of saveable non-highmem pages.
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1232
   */
fe419535d   Rafael J. Wysocki   PM/Hibernate: Mov...
1233
  static unsigned int count_data_pages(void)
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
1234
1235
  {
  	struct zone *zone;
ae83c5eef   Rafael J. Wysocki   [PATCH] swsusp: c...
1236
  	unsigned long pfn, max_zone_pfn;
dc19d507b   Pavel Machek   [PATCH] swsusp cl...
1237
  	unsigned int n = 0;
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
1238

98e73dc5d   Gerald Schaefer   PM / Hibernate / ...
1239
  	for_each_populated_zone(zone) {
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
1240
1241
  		if (is_highmem(zone))
  			continue;
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1242

25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
1243
  		mark_free_pages(zone);
c33bc315f   Xishi Qiu   mm: use zone_end_...
1244
  		max_zone_pfn = zone_end_pfn(zone);
ae83c5eef   Rafael J. Wysocki   [PATCH] swsusp: c...
1245
  		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
846705deb   Rafael J. Wysocki   Hibernate: Take o...
1246
  			if (saveable_page(zone, pfn))
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1247
  				n++;
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
1248
  	}
a0f496517   Rafael J. Wysocki   [PATCH] swsusp: r...
1249
  	return n;
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
1250
  }
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
1251
1252
  /*
   * This is needed, because copy_page and memcpy are not usable for copying
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1253
1254
1255
   * task structs.
   */
  static inline void do_copy_page(long *dst, long *src)
f623f0db8   Rafael J. Wysocki   [PATCH] swsusp: F...
1256
1257
  {
  	int n;
f623f0db8   Rafael J. Wysocki   [PATCH] swsusp: F...
1258
1259
1260
  	for (n = PAGE_SIZE / sizeof(long); n; n--)
  		*dst++ = *src++;
  }
8a235efad   Rafael J. Wysocki   Hibernation: Hand...
1261
  /**
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
1262
1263
1264
1265
1266
   * safe_copy_page - Copy a page in a safe way.
   *
   * Check if the page we are going to copy is marked as present in the kernel
   * page tables (this always is the case if CONFIG_DEBUG_PAGEALLOC is not set
   * and in that case kernel_page_present() always returns 'true').
8a235efad   Rafael J. Wysocki   Hibernation: Hand...
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
   */
  static void safe_copy_page(void *dst, struct page *s_page)
  {
  	if (kernel_page_present(s_page)) {
  		do_copy_page(dst, page_address(s_page));
  	} else {
  		kernel_map_pages(s_page, 1, 1);
  		do_copy_page(dst, page_address(s_page));
  		kernel_map_pages(s_page, 1, 0);
  	}
  }
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1278
  #ifdef CONFIG_HIGHMEM
efd5a8524   Rafael J. Wysocki   PM / hibernate: C...
1279
  static inline struct page *page_is_saveable(struct zone *zone, unsigned long pfn)
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1280
1281
  {
  	return is_highmem(zone) ?
846705deb   Rafael J. Wysocki   Hibernate: Take o...
1282
  		saveable_highmem_page(zone, pfn) : saveable_page(zone, pfn);
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1283
  }
8a235efad   Rafael J. Wysocki   Hibernation: Hand...
1284
  static void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1285
1286
1287
1288
1289
1290
1291
  {
  	struct page *s_page, *d_page;
  	void *src, *dst;
  
  	s_page = pfn_to_page(src_pfn);
  	d_page = pfn_to_page(dst_pfn);
  	if (PageHighMem(s_page)) {
0de9a1e28   Cong Wang   power: remove the...
1292
1293
  		src = kmap_atomic(s_page);
  		dst = kmap_atomic(d_page);
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1294
  		do_copy_page(dst, src);
0de9a1e28   Cong Wang   power: remove the...
1295
1296
  		kunmap_atomic(dst);
  		kunmap_atomic(src);
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1297
  	} else {
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1298
  		if (PageHighMem(d_page)) {
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
1299
1300
  			/*
  			 * The page pointed to by src may contain some kernel
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1301
1302
  			 * data modified by kmap_atomic()
  			 */
8a235efad   Rafael J. Wysocki   Hibernation: Hand...
1303
  			safe_copy_page(buffer, s_page);
0de9a1e28   Cong Wang   power: remove the...
1304
  			dst = kmap_atomic(d_page);
3ecb01df3   Jan Beulich   use clear_page()/...
1305
  			copy_page(dst, buffer);
0de9a1e28   Cong Wang   power: remove the...
1306
  			kunmap_atomic(dst);
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1307
  		} else {
8a235efad   Rafael J. Wysocki   Hibernation: Hand...
1308
  			safe_copy_page(page_address(d_page), s_page);
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1309
1310
1311
1312
  		}
  	}
  }
  #else
846705deb   Rafael J. Wysocki   Hibernate: Take o...
1313
  #define page_is_saveable(zone, pfn)	saveable_page(zone, pfn)
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1314

8a235efad   Rafael J. Wysocki   Hibernation: Hand...
1315
  static inline void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1316
  {
8a235efad   Rafael J. Wysocki   Hibernation: Hand...
1317
1318
  	safe_copy_page(page_address(pfn_to_page(dst_pfn)),
  				pfn_to_page(src_pfn));
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1319
1320
  }
  #endif /* CONFIG_HIGHMEM */
efd5a8524   Rafael J. Wysocki   PM / hibernate: C...
1321
1322
  static void copy_data_pages(struct memory_bitmap *copy_bm,
  			    struct memory_bitmap *orig_bm)
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
1323
1324
  {
  	struct zone *zone;
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
1325
  	unsigned long pfn;
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
1326

98e73dc5d   Gerald Schaefer   PM / Hibernate / ...
1327
  	for_each_populated_zone(zone) {
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
1328
  		unsigned long max_zone_pfn;
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
1329
  		mark_free_pages(zone);
c33bc315f   Xishi Qiu   mm: use zone_end_...
1330
  		max_zone_pfn = zone_end_pfn(zone);
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
1331
  		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1332
  			if (page_is_saveable(zone, pfn))
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
1333
  				memory_bm_set_bit(orig_bm, pfn);
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
1334
  	}
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
1335
1336
  	memory_bm_position_reset(orig_bm);
  	memory_bm_position_reset(copy_bm);
df7c48725   Fengguang Wu   trivial copy_data...
1337
  	for(;;) {
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
1338
  		pfn = memory_bm_next_pfn(orig_bm);
df7c48725   Fengguang Wu   trivial copy_data...
1339
1340
1341
1342
  		if (unlikely(pfn == BM_END_OF_MAP))
  			break;
  		copy_data_page(memory_bm_next_pfn(copy_bm), pfn);
  	}
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
1343
  }
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1344
1345
1346
1347
  /* Total number of image pages */
  static unsigned int nr_copy_pages;
  /* Number of pages needed for saving the original pfns of the image pages */
  static unsigned int nr_meta_pages;
64a473cb7   Rafael J. Wysocki   PM/Hibernate: Do ...
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
  /*
   * Numbers of normal and highmem page frames allocated for hibernation image
   * before suspending devices.
   */
  unsigned int alloc_normal, alloc_highmem;
  /*
   * Memory bitmap used for marking saveable pages (during hibernation) or
   * hibernation image pages (during restore)
   */
  static struct memory_bitmap orig_bm;
  /*
   * Memory bitmap used during hibernation for marking allocated page frames that
   * will contain copies of saveable pages.  During restore it is initially used
   * for marking hibernation image pages, but then the set bits from it are
   * duplicated in @orig_bm and it is released.  On highmem systems it is next
   * used for marking "safe" highmem pages, but it has to be reinitialized for
   * this purpose.
   */
  static struct memory_bitmap copy_bm;
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1367

25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
1368
  /**
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
1369
   * swsusp_free - Free pages allocated for hibernation image.
cd560bb2f   Rafael J. Wysocki   [PATCH] swsusp: F...
1370
   *
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
1371
1372
   * Image pages are alocated before snapshot creation, so they need to be
   * released after resume.
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
1373
   */
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
1374
1375
  void swsusp_free(void)
  {
fdd64ed54   Joerg Roedel   PM / hibernate: I...
1376
  	unsigned long fb_pfn, fr_pfn;
6efde38f0   Joerg Roedel   PM / Hibernate: I...
1377

fdd64ed54   Joerg Roedel   PM / hibernate: I...
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
  	if (!forbidden_pages_map || !free_pages_map)
  		goto out;
  
  	memory_bm_position_reset(forbidden_pages_map);
  	memory_bm_position_reset(free_pages_map);
  
  loop:
  	fr_pfn = memory_bm_next_pfn(free_pages_map);
  	fb_pfn = memory_bm_next_pfn(forbidden_pages_map);
  
  	/*
  	 * Find the next bit set in both bitmaps. This is guaranteed to
  	 * terminate when fb_pfn == fr_pfn == BM_END_OF_MAP.
  	 */
  	do {
  		if (fb_pfn < fr_pfn)
  			fb_pfn = memory_bm_next_pfn(forbidden_pages_map);
  		if (fr_pfn < fb_pfn)
  			fr_pfn = memory_bm_next_pfn(free_pages_map);
  	} while (fb_pfn != fr_pfn);
  
  	if (fr_pfn != BM_END_OF_MAP && pfn_valid(fr_pfn)) {
  		struct page *page = pfn_to_page(fr_pfn);
  
  		memory_bm_clear_current(forbidden_pages_map);
  		memory_bm_clear_current(free_pages_map);
4c0b6c10f   Rafael J. Wysocki   PM / hibernate: I...
1404
  		hibernate_restore_unprotect_page(page_address(page));
fdd64ed54   Joerg Roedel   PM / hibernate: I...
1405
1406
  		__free_page(page);
  		goto loop;
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
1407
  	}
fdd64ed54   Joerg Roedel   PM / hibernate: I...
1408
1409
  
  out:
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
1410
1411
  	nr_copy_pages = 0;
  	nr_meta_pages = 0;
75534b50c   Rafael J. Wysocki   [PATCH] Change th...
1412
  	restore_pblist = NULL;
6e1819d61   Rafael J. Wysocki   [PATCH] swsusp: u...
1413
  	buffer = NULL;
64a473cb7   Rafael J. Wysocki   PM/Hibernate: Do ...
1414
1415
  	alloc_normal = 0;
  	alloc_highmem = 0;
4c0b6c10f   Rafael J. Wysocki   PM / hibernate: I...
1416
  	hibernate_restore_protection_end();
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
1417
  }
4bb334353   Rafael J. Wysocki   PM/Hibernate: Rew...
1418
1419
1420
  /* Helper functions used for the shrinking of memory. */
  
  #define GFP_IMAGE	(GFP_KERNEL | __GFP_NOWARN)
fe419535d   Rafael J. Wysocki   PM/Hibernate: Mov...
1421
  /**
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
1422
   * preallocate_image_pages - Allocate a number of pages for hibernation image.
4bb334353   Rafael J. Wysocki   PM/Hibernate: Rew...
1423
1424
   * @nr_pages: Number of page frames to allocate.
   * @mask: GFP flags to use for the allocation.
fe419535d   Rafael J. Wysocki   PM/Hibernate: Mov...
1425
   *
4bb334353   Rafael J. Wysocki   PM/Hibernate: Rew...
1426
1427
1428
1429
1430
1431
1432
   * Return value: Number of page frames actually allocated
   */
  static unsigned long preallocate_image_pages(unsigned long nr_pages, gfp_t mask)
  {
  	unsigned long nr_alloc = 0;
  
  	while (nr_pages > 0) {
64a473cb7   Rafael J. Wysocki   PM/Hibernate: Do ...
1433
1434
1435
1436
  		struct page *page;
  
  		page = alloc_image_page(mask);
  		if (!page)
4bb334353   Rafael J. Wysocki   PM/Hibernate: Rew...
1437
  			break;
64a473cb7   Rafael J. Wysocki   PM/Hibernate: Do ...
1438
1439
1440
1441
1442
  		memory_bm_set_bit(&copy_bm, page_to_pfn(page));
  		if (PageHighMem(page))
  			alloc_highmem++;
  		else
  			alloc_normal++;
4bb334353   Rafael J. Wysocki   PM/Hibernate: Rew...
1443
1444
1445
1446
1447
1448
  		nr_pages--;
  		nr_alloc++;
  	}
  
  	return nr_alloc;
  }
6715045dd   Rafael J. Wysocki   PM / Hibernate: A...
1449
1450
  static unsigned long preallocate_image_memory(unsigned long nr_pages,
  					      unsigned long avail_normal)
4bb334353   Rafael J. Wysocki   PM/Hibernate: Rew...
1451
  {
6715045dd   Rafael J. Wysocki   PM / Hibernate: A...
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
  	unsigned long alloc;
  
  	if (avail_normal <= alloc_normal)
  		return 0;
  
  	alloc = avail_normal - alloc_normal;
  	if (nr_pages < alloc)
  		alloc = nr_pages;
  
  	return preallocate_image_pages(alloc, GFP_IMAGE);
4bb334353   Rafael J. Wysocki   PM/Hibernate: Rew...
1462
1463
1464
1465
1466
1467
1468
1469
1470
  }
  
  #ifdef CONFIG_HIGHMEM
  static unsigned long preallocate_image_highmem(unsigned long nr_pages)
  {
  	return preallocate_image_pages(nr_pages, GFP_IMAGE | __GFP_HIGHMEM);
  }
  
  /**
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
1471
   *  __fraction - Compute (an approximation of) x * (multiplier / base).
fe419535d   Rafael J. Wysocki   PM/Hibernate: Mov...
1472
   */
4bb334353   Rafael J. Wysocki   PM/Hibernate: Rew...
1473
1474
1475
1476
1477
1478
  static unsigned long __fraction(u64 x, u64 multiplier, u64 base)
  {
  	x *= multiplier;
  	do_div(x, base);
  	return (unsigned long)x;
  }
fe419535d   Rafael J. Wysocki   PM/Hibernate: Mov...
1479

4bb334353   Rafael J. Wysocki   PM/Hibernate: Rew...
1480
  static unsigned long preallocate_highmem_fraction(unsigned long nr_pages,
efd5a8524   Rafael J. Wysocki   PM / hibernate: C...
1481
1482
  						  unsigned long highmem,
  						  unsigned long total)
fe419535d   Rafael J. Wysocki   PM/Hibernate: Mov...
1483
  {
4bb334353   Rafael J. Wysocki   PM/Hibernate: Rew...
1484
1485
1486
  	unsigned long alloc = __fraction(nr_pages, highmem, total);
  
  	return preallocate_image_pages(alloc, GFP_IMAGE | __GFP_HIGHMEM);
fe419535d   Rafael J. Wysocki   PM/Hibernate: Mov...
1487
  }
4bb334353   Rafael J. Wysocki   PM/Hibernate: Rew...
1488
1489
1490
1491
1492
1493
1494
  #else /* CONFIG_HIGHMEM */
  static inline unsigned long preallocate_image_highmem(unsigned long nr_pages)
  {
  	return 0;
  }
  
  static inline unsigned long preallocate_highmem_fraction(unsigned long nr_pages,
efd5a8524   Rafael J. Wysocki   PM / hibernate: C...
1495
1496
  							 unsigned long highmem,
  							 unsigned long total)
4bb334353   Rafael J. Wysocki   PM/Hibernate: Rew...
1497
1498
1499
1500
  {
  	return 0;
  }
  #endif /* CONFIG_HIGHMEM */
fe419535d   Rafael J. Wysocki   PM/Hibernate: Mov...
1501

4bb334353   Rafael J. Wysocki   PM/Hibernate: Rew...
1502
  /**
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
1503
   * free_unnecessary_pages - Release preallocated pages not needed for the image.
64a473cb7   Rafael J. Wysocki   PM/Hibernate: Do ...
1504
   */
a64fc82c4   Wonhong Kwon   PM / hibernate: e...
1505
  static unsigned long free_unnecessary_pages(void)
64a473cb7   Rafael J. Wysocki   PM/Hibernate: Do ...
1506
  {
a64fc82c4   Wonhong Kwon   PM / hibernate: e...
1507
  	unsigned long save, to_free_normal, to_free_highmem, free;
64a473cb7   Rafael J. Wysocki   PM/Hibernate: Do ...
1508

6715045dd   Rafael J. Wysocki   PM / Hibernate: A...
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
  	save = count_data_pages();
  	if (alloc_normal >= save) {
  		to_free_normal = alloc_normal - save;
  		save = 0;
  	} else {
  		to_free_normal = 0;
  		save -= alloc_normal;
  	}
  	save += count_highmem_pages();
  	if (alloc_highmem >= save) {
  		to_free_highmem = alloc_highmem - save;
64a473cb7   Rafael J. Wysocki   PM/Hibernate: Do ...
1520
1521
  	} else {
  		to_free_highmem = 0;
4d4cf23cd   Rafael J. Wysocki   PM / Hibernate: F...
1522
1523
1524
1525
1526
  		save -= alloc_highmem;
  		if (to_free_normal > save)
  			to_free_normal -= save;
  		else
  			to_free_normal = 0;
64a473cb7   Rafael J. Wysocki   PM/Hibernate: Do ...
1527
  	}
a64fc82c4   Wonhong Kwon   PM / hibernate: e...
1528
  	free = to_free_normal + to_free_highmem;
64a473cb7   Rafael J. Wysocki   PM/Hibernate: Do ...
1529
1530
  
  	memory_bm_position_reset(&copy_bm);
a9c9b4429   Rafael J. Wysocki   PM / Hibernate: F...
1531
  	while (to_free_normal > 0 || to_free_highmem > 0) {
64a473cb7   Rafael J. Wysocki   PM/Hibernate: Do ...
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
  		unsigned long pfn = memory_bm_next_pfn(&copy_bm);
  		struct page *page = pfn_to_page(pfn);
  
  		if (PageHighMem(page)) {
  			if (!to_free_highmem)
  				continue;
  			to_free_highmem--;
  			alloc_highmem--;
  		} else {
  			if (!to_free_normal)
  				continue;
  			to_free_normal--;
  			alloc_normal--;
  		}
  		memory_bm_clear_bit(&copy_bm, pfn);
  		swsusp_unset_page_forbidden(page);
  		swsusp_unset_page_free(page);
  		__free_page(page);
  	}
a64fc82c4   Wonhong Kwon   PM / hibernate: e...
1551
1552
  
  	return free;
64a473cb7   Rafael J. Wysocki   PM/Hibernate: Do ...
1553
1554
1555
  }
  
  /**
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
1556
   * minimum_image_size - Estimate the minimum acceptable size of an image.
ef4aede3f   Rafael J. Wysocki   PM/Hibernate: Do ...
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
   * @saveable: Number of saveable pages in the system.
   *
   * We want to avoid attempting to free too much memory too hard, so estimate the
   * minimum acceptable size of a hibernation image to use as the lower limit for
   * preallocating memory.
   *
   * We assume that the minimum image size should be proportional to
   *
   * [number of saveable pages] - [number of pages that can be freed in theory]
   *
   * where the second term is the sum of (1) reclaimable slab pages, (2) active
4d4348202   Geert Uytterhoeven   PM / Hibernate: S...
1568
   * and (3) inactive anonymous pages, (4) active and (5) inactive file pages,
ef4aede3f   Rafael J. Wysocki   PM/Hibernate: Do ...
1569
1570
1571
1572
1573
1574
1575
   * minus mapped file pages.
   */
  static unsigned long minimum_image_size(unsigned long saveable)
  {
  	unsigned long size;
  
  	size = global_page_state(NR_SLAB_RECLAIMABLE)
599d0c954   Mel Gorman   mm, vmscan: move ...
1576
1577
1578
1579
1580
  		+ global_node_page_state(NR_ACTIVE_ANON)
  		+ global_node_page_state(NR_INACTIVE_ANON)
  		+ global_node_page_state(NR_ACTIVE_FILE)
  		+ global_node_page_state(NR_INACTIVE_FILE)
  		- global_node_page_state(NR_FILE_MAPPED);
ef4aede3f   Rafael J. Wysocki   PM/Hibernate: Do ...
1581
1582
1583
1584
1585
  
  	return saveable <= size ? 0 : saveable - size;
  }
  
  /**
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
1586
   * hibernate_preallocate_memory - Preallocate memory for hibernation image.
4bb334353   Rafael J. Wysocki   PM/Hibernate: Rew...
1587
1588
1589
1590
1591
   *
   * To create a hibernation image it is necessary to make a copy of every page
   * frame in use.  We also need a number of page frames to be free during
   * hibernation for allocations made while saving the image and for device
   * drivers, in case they need to allocate memory from their hibernation
ddeb64870   Rafael J. Wysocki   PM / Hibernate: A...
1592
1593
1594
1595
   * callbacks (these two numbers are given by PAGES_FOR_IO (which is a rough
   * estimate) and reserverd_size divided by PAGE_SIZE (which is tunable through
   * /sys/power/reserved_size, respectively).  To make this happen, we compute the
   * total number of available page frames and allocate at least
4bb334353   Rafael J. Wysocki   PM/Hibernate: Rew...
1596
   *
ddeb64870   Rafael J. Wysocki   PM / Hibernate: A...
1597
1598
   * ([page frames total] + PAGES_FOR_IO + [metadata pages]) / 2
   *  + 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE)
4bb334353   Rafael J. Wysocki   PM/Hibernate: Rew...
1599
1600
1601
1602
1603
   *
   * of them, which corresponds to the maximum size of a hibernation image.
   *
   * If image_size is set below the number following from the above formula,
   * the preallocation of memory is continued until the total number of saveable
ef4aede3f   Rafael J. Wysocki   PM/Hibernate: Do ...
1604
1605
   * pages in the system is below the requested image size or the minimum
   * acceptable image size returned by minimum_image_size(), whichever is greater.
4bb334353   Rafael J. Wysocki   PM/Hibernate: Rew...
1606
   */
64a473cb7   Rafael J. Wysocki   PM/Hibernate: Do ...
1607
  int hibernate_preallocate_memory(void)
fe419535d   Rafael J. Wysocki   PM/Hibernate: Mov...
1608
  {
fe419535d   Rafael J. Wysocki   PM/Hibernate: Mov...
1609
  	struct zone *zone;
4bb334353   Rafael J. Wysocki   PM/Hibernate: Rew...
1610
  	unsigned long saveable, size, max_size, count, highmem, pages = 0;
6715045dd   Rafael J. Wysocki   PM / Hibernate: A...
1611
  	unsigned long alloc, save_highmem, pages_highmem, avail_normal;
db5976058   Tina Ruchandani   PM / Hibernate: M...
1612
  	ktime_t start, stop;
64a473cb7   Rafael J. Wysocki   PM/Hibernate: Do ...
1613
  	int error;
fe419535d   Rafael J. Wysocki   PM/Hibernate: Mov...
1614

64a473cb7   Rafael J. Wysocki   PM/Hibernate: Do ...
1615
  	printk(KERN_INFO "PM: Preallocating image memory... ");
db5976058   Tina Ruchandani   PM / Hibernate: M...
1616
  	start = ktime_get();
fe419535d   Rafael J. Wysocki   PM/Hibernate: Mov...
1617

64a473cb7   Rafael J. Wysocki   PM/Hibernate: Do ...
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
  	error = memory_bm_create(&orig_bm, GFP_IMAGE, PG_ANY);
  	if (error)
  		goto err_out;
  
  	error = memory_bm_create(&copy_bm, GFP_IMAGE, PG_ANY);
  	if (error)
  		goto err_out;
  
  	alloc_normal = 0;
  	alloc_highmem = 0;
4bb334353   Rafael J. Wysocki   PM/Hibernate: Rew...
1628
  	/* Count the number of saveable data pages. */
64a473cb7   Rafael J. Wysocki   PM/Hibernate: Do ...
1629
  	save_highmem = count_highmem_pages();
4bb334353   Rafael J. Wysocki   PM/Hibernate: Rew...
1630
  	saveable = count_data_pages();
fe419535d   Rafael J. Wysocki   PM/Hibernate: Mov...
1631

4bb334353   Rafael J. Wysocki   PM/Hibernate: Rew...
1632
1633
1634
1635
1636
  	/*
  	 * Compute the total number of page frames we can use (count) and the
  	 * number of pages needed for image metadata (size).
  	 */
  	count = saveable;
64a473cb7   Rafael J. Wysocki   PM/Hibernate: Do ...
1637
1638
  	saveable += save_highmem;
  	highmem = save_highmem;
4bb334353   Rafael J. Wysocki   PM/Hibernate: Rew...
1639
1640
1641
1642
1643
1644
1645
1646
  	size = 0;
  	for_each_populated_zone(zone) {
  		size += snapshot_additional_pages(zone);
  		if (is_highmem(zone))
  			highmem += zone_page_state(zone, NR_FREE_PAGES);
  		else
  			count += zone_page_state(zone, NR_FREE_PAGES);
  	}
6715045dd   Rafael J. Wysocki   PM / Hibernate: A...
1647
  	avail_normal = count;
4bb334353   Rafael J. Wysocki   PM/Hibernate: Rew...
1648
1649
  	count += highmem;
  	count -= totalreserve_pages;
85055dd80   Martin Schwidefsky   PM / Hibernate: I...
1650
1651
  	/* Add number of pages required for page keys (s390 only). */
  	size += page_key_additional_pages(saveable);
4bb334353   Rafael J. Wysocki   PM/Hibernate: Rew...
1652
  	/* Compute the maximum number of saveable pages to leave in memory. */
ddeb64870   Rafael J. Wysocki   PM / Hibernate: A...
1653
1654
  	max_size = (count - (size + PAGES_FOR_IO)) / 2
  			- 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE);
266f1a25e   Rafael J. Wysocki   PM / Hibernate: I...
1655
  	/* Compute the desired number of image pages specified by image_size. */
4bb334353   Rafael J. Wysocki   PM/Hibernate: Rew...
1656
1657
1658
1659
  	size = DIV_ROUND_UP(image_size, PAGE_SIZE);
  	if (size > max_size)
  		size = max_size;
  	/*
266f1a25e   Rafael J. Wysocki   PM / Hibernate: I...
1660
1661
1662
  	 * If the desired number of image pages is at least as large as the
  	 * current number of saveable pages in memory, allocate page frames for
  	 * the image and we're done.
4bb334353   Rafael J. Wysocki   PM/Hibernate: Rew...
1663
  	 */
64a473cb7   Rafael J. Wysocki   PM/Hibernate: Do ...
1664
1665
  	if (size >= saveable) {
  		pages = preallocate_image_highmem(save_highmem);
6715045dd   Rafael J. Wysocki   PM / Hibernate: A...
1666
  		pages += preallocate_image_memory(saveable - pages, avail_normal);
4bb334353   Rafael J. Wysocki   PM/Hibernate: Rew...
1667
  		goto out;
64a473cb7   Rafael J. Wysocki   PM/Hibernate: Do ...
1668
  	}
4bb334353   Rafael J. Wysocki   PM/Hibernate: Rew...
1669

ef4aede3f   Rafael J. Wysocki   PM/Hibernate: Do ...
1670
1671
  	/* Estimate the minimum size of the image. */
  	pages = minimum_image_size(saveable);
6715045dd   Rafael J. Wysocki   PM / Hibernate: A...
1672
1673
1674
1675
1676
1677
1678
1679
1680
  	/*
  	 * To avoid excessive pressure on the normal zone, leave room in it to
  	 * accommodate an image of the minimum size (unless it's already too
  	 * small, in which case don't preallocate pages from it at all).
  	 */
  	if (avail_normal > pages)
  		avail_normal -= pages;
  	else
  		avail_normal = 0;
ef4aede3f   Rafael J. Wysocki   PM/Hibernate: Do ...
1681
1682
  	if (size < pages)
  		size = min_t(unsigned long, pages, max_size);
4bb334353   Rafael J. Wysocki   PM/Hibernate: Rew...
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
  	/*
  	 * Let the memory management subsystem know that we're going to need a
  	 * large number of page frames to allocate and make it free some memory.
  	 * NOTE: If this is not done, performance will be hurt badly in some
  	 * test cases.
  	 */
  	shrink_all_memory(saveable - size);
  
  	/*
  	 * The number of saveable pages in memory was too high, so apply some
  	 * pressure to decrease it.  First, make room for the largest possible
  	 * image and fail if that doesn't work.  Next, try to decrease the size
ef4aede3f   Rafael J. Wysocki   PM/Hibernate: Do ...
1695
1696
  	 * of the image as much as indicated by 'size' using allocations from
  	 * highmem and non-highmem zones separately.
4bb334353   Rafael J. Wysocki   PM/Hibernate: Rew...
1697
1698
  	 */
  	pages_highmem = preallocate_image_highmem(highmem / 2);
fd432b9f8   Aaron Lu   PM / hibernate: A...
1699
1700
1701
1702
1703
  	alloc = count - max_size;
  	if (alloc > pages_highmem)
  		alloc -= pages_highmem;
  	else
  		alloc = 0;
6715045dd   Rafael J. Wysocki   PM / Hibernate: A...
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
  	pages = preallocate_image_memory(alloc, avail_normal);
  	if (pages < alloc) {
  		/* We have exhausted non-highmem pages, try highmem. */
  		alloc -= pages;
  		pages += pages_highmem;
  		pages_highmem = preallocate_image_highmem(alloc);
  		if (pages_highmem < alloc)
  			goto err_out;
  		pages += pages_highmem;
  		/*
  		 * size is the desired number of saveable pages to leave in
  		 * memory, so try to preallocate (all memory - size) pages.
  		 */
  		alloc = (count - pages) - size;
  		pages += preallocate_image_highmem(alloc);
  	} else {
  		/*
  		 * There are approximately max_size saveable pages at this point
  		 * and we want to reduce this number down to size.
  		 */
  		alloc = max_size - size;
  		size = preallocate_highmem_fraction(alloc, highmem, count);
  		pages_highmem += size;
  		alloc -= size;
  		size = preallocate_image_memory(alloc, avail_normal);
  		pages_highmem += preallocate_image_highmem(alloc - size);
  		pages += pages_highmem + size;
  	}
4bb334353   Rafael J. Wysocki   PM/Hibernate: Rew...
1732

64a473cb7   Rafael J. Wysocki   PM/Hibernate: Do ...
1733
1734
1735
1736
1737
  	/*
  	 * We only need as many page frames for the image as there are saveable
  	 * pages in memory, but we have allocated more.  Release the excessive
  	 * ones now.
  	 */
a64fc82c4   Wonhong Kwon   PM / hibernate: e...
1738
  	pages -= free_unnecessary_pages();
4bb334353   Rafael J. Wysocki   PM/Hibernate: Rew...
1739
1740
  
   out:
db5976058   Tina Ruchandani   PM / Hibernate: M...
1741
  	stop = ktime_get();
64a473cb7   Rafael J. Wysocki   PM/Hibernate: Do ...
1742
1743
  	printk(KERN_CONT "done (allocated %lu pages)
  ", pages);
db5976058   Tina Ruchandani   PM / Hibernate: M...
1744
  	swsusp_show_speed(start, stop, pages, "Allocated");
fe419535d   Rafael J. Wysocki   PM/Hibernate: Mov...
1745
1746
  
  	return 0;
64a473cb7   Rafael J. Wysocki   PM/Hibernate: Do ...
1747
1748
1749
1750
1751
1752
  
   err_out:
  	printk(KERN_CONT "
  ");
  	swsusp_free();
  	return -ENOMEM;
fe419535d   Rafael J. Wysocki   PM/Hibernate: Mov...
1753
  }
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1754
1755
  #ifdef CONFIG_HIGHMEM
  /**
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
1756
1757
1758
1759
1760
   * count_pages_for_highmem - Count non-highmem pages needed for copying highmem.
   *
   * Compute the number of non-highmem pages that will be necessary for creating
   * copies of highmem pages.
   */
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1761
1762
  static unsigned int count_pages_for_highmem(unsigned int nr_highmem)
  {
64a473cb7   Rafael J. Wysocki   PM/Hibernate: Do ...
1763
  	unsigned int free_highmem = count_free_highmem_pages() + alloc_highmem;
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1764
1765
1766
1767
1768
1769
1770
1771
1772
  
  	if (free_highmem >= nr_highmem)
  		nr_highmem = 0;
  	else
  		nr_highmem -= free_highmem;
  
  	return nr_highmem;
  }
  #else
efd5a8524   Rafael J. Wysocki   PM / hibernate: C...
1773
  static unsigned int count_pages_for_highmem(unsigned int nr_highmem) { return 0; }
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1774
  #endif /* CONFIG_HIGHMEM */
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
1775
1776
  
  /**
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
1777
   * enough_free_mem - Check if there is enough free memory for the image.
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
1778
   */
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1779
  static int enough_free_mem(unsigned int nr_pages, unsigned int nr_highmem)
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
1780
  {
e5e2fa785   Rafael J. Wysocki   [PATCH] swsusp: f...
1781
  	struct zone *zone;
64a473cb7   Rafael J. Wysocki   PM/Hibernate: Do ...
1782
  	unsigned int free = alloc_normal;
e5e2fa785   Rafael J. Wysocki   [PATCH] swsusp: f...
1783

98e73dc5d   Gerald Schaefer   PM / Hibernate / ...
1784
  	for_each_populated_zone(zone)
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1785
  		if (!is_highmem(zone))
d23ad4232   Christoph Lameter   [PATCH] Use ZVC f...
1786
  			free += zone_page_state(zone, NR_FREE_PAGES);
940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
1787

8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1788
  	nr_pages += count_pages_for_highmem(nr_highmem);
64a473cb7   Rafael J. Wysocki   PM/Hibernate: Do ...
1789
1790
1791
  	pr_debug("PM: Normal pages needed: %u + %u, available pages: %u
  ",
  		nr_pages, PAGES_FOR_IO, free);
940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
1792

64a473cb7   Rafael J. Wysocki   PM/Hibernate: Do ...
1793
  	return free > nr_pages + PAGES_FOR_IO;
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
1794
  }
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1795
1796
  #ifdef CONFIG_HIGHMEM
  /**
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
1797
1798
1799
1800
   * get_highmem_buffer - Allocate a buffer for highmem pages.
   *
   * If there are some highmem pages in the hibernation image, we may need a
   * buffer to copy them and/or load their data.
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1801
   */
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1802
1803
1804
1805
1806
1807
1808
  static inline int get_highmem_buffer(int safe_needed)
  {
  	buffer = get_image_page(GFP_ATOMIC | __GFP_COLD, safe_needed);
  	return buffer ? 0 : -ENOMEM;
  }
  
  /**
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
1809
1810
1811
1812
   * alloc_highmem_image_pages - Allocate some highmem pages for the image.
   *
   * Try to allocate as many pages as needed, but if the number of free highmem
   * pages is less than that, allocate them all.
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1813
   */
efd5a8524   Rafael J. Wysocki   PM / hibernate: C...
1814
1815
  static inline unsigned int alloc_highmem_pages(struct memory_bitmap *bm,
  					       unsigned int nr_highmem)
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1816
1817
1818
1819
1820
1821
1822
1823
1824
  {
  	unsigned int to_alloc = count_free_highmem_pages();
  
  	if (to_alloc > nr_highmem)
  		to_alloc = nr_highmem;
  
  	nr_highmem -= to_alloc;
  	while (to_alloc-- > 0) {
  		struct page *page;
d0164adc8   Mel Gorman   mm, page_alloc: d...
1825
  		page = alloc_image_page(__GFP_HIGHMEM|__GFP_KSWAPD_RECLAIM);
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1826
1827
1828
1829
1830
1831
  		memory_bm_set_bit(bm, page_to_pfn(page));
  	}
  	return nr_highmem;
  }
  #else
  static inline int get_highmem_buffer(int safe_needed) { return 0; }
efd5a8524   Rafael J. Wysocki   PM / hibernate: C...
1832
1833
  static inline unsigned int alloc_highmem_pages(struct memory_bitmap *bm,
  					       unsigned int n) { return 0; }
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1834
1835
1836
  #endif /* CONFIG_HIGHMEM */
  
  /**
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
1837
   * swsusp_alloc - Allocate memory for hibernation image.
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1838
   *
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
1839
1840
1841
   * We first try to allocate as many highmem pages as there are
   * saveable highmem pages in the system.  If that fails, we allocate
   * non-highmem pages for the copies of the remaining highmem ones.
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1842
   *
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
1843
1844
1845
   * In this approach it is likely that the copies of highmem pages will
   * also be located in the high memory, because of the way in which
   * copy_data_pages() works.
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1846
   */
efd5a8524   Rafael J. Wysocki   PM / hibernate: C...
1847
1848
1849
  static int swsusp_alloc(struct memory_bitmap *orig_bm,
  			struct memory_bitmap *copy_bm,
  			unsigned int nr_pages, unsigned int nr_highmem)
054bd4c18   Rafael J. Wysocki   [PATCH] swsusp: r...
1850
  {
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1851
  	if (nr_highmem > 0) {
2e725a065   Stanislaw Gruszka   PM / Hibernate: R...
1852
  		if (get_highmem_buffer(PG_ANY))
64a473cb7   Rafael J. Wysocki   PM/Hibernate: Do ...
1853
1854
1855
1856
1857
  			goto err_out;
  		if (nr_highmem > alloc_highmem) {
  			nr_highmem -= alloc_highmem;
  			nr_pages += alloc_highmem_pages(copy_bm, nr_highmem);
  		}
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1858
  	}
64a473cb7   Rafael J. Wysocki   PM/Hibernate: Do ...
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
  	if (nr_pages > alloc_normal) {
  		nr_pages -= alloc_normal;
  		while (nr_pages-- > 0) {
  			struct page *page;
  
  			page = alloc_image_page(GFP_ATOMIC | __GFP_COLD);
  			if (!page)
  				goto err_out;
  			memory_bm_set_bit(copy_bm, page_to_pfn(page));
  		}
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
1869
  	}
64a473cb7   Rafael J. Wysocki   PM/Hibernate: Do ...
1870

b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
1871
  	return 0;
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
1872

64a473cb7   Rafael J. Wysocki   PM/Hibernate: Do ...
1873
   err_out:
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
1874
  	swsusp_free();
2e725a065   Stanislaw Gruszka   PM / Hibernate: R...
1875
  	return -ENOMEM;
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
1876
  }
722a9f929   Andi Kleen   asmlinkage: Add e...
1877
  asmlinkage __visible int swsusp_save(void)
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
1878
  {
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1879
  	unsigned int nr_pages, nr_highmem;
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
1880

07c3bb579   Frans Pop   PM / Hibernate: R...
1881
1882
  	printk(KERN_INFO "PM: Creating hibernation image:
  ");
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
1883

9f8f21725   Christoph Lameter   Page allocator: c...
1884
  	drain_local_pages(NULL);
a0f496517   Rafael J. Wysocki   [PATCH] swsusp: r...
1885
  	nr_pages = count_data_pages();
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1886
  	nr_highmem = count_highmem_pages();
23976728a   Rafael J. Wysocki   Hibernation: Upda...
1887
1888
  	printk(KERN_INFO "PM: Need to copy %u pages
  ", nr_pages + nr_highmem);
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
1889

8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1890
  	if (!enough_free_mem(nr_pages, nr_highmem)) {
23976728a   Rafael J. Wysocki   Hibernation: Upda...
1891
1892
  		printk(KERN_ERR "PM: Not enough free memory
  ");
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
1893
1894
  		return -ENOMEM;
  	}
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1895
  	if (swsusp_alloc(&orig_bm, &copy_bm, nr_pages, nr_highmem)) {
23976728a   Rafael J. Wysocki   Hibernation: Upda...
1896
1897
  		printk(KERN_ERR "PM: Memory allocation failed
  ");
a0f496517   Rafael J. Wysocki   [PATCH] swsusp: r...
1898
  		return -ENOMEM;
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1899
  	}
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
1900

ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
1901
1902
  	/*
  	 * During allocating of suspend pagedir, new cold pages may appear.
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
1903
1904
  	 * Kill them.
  	 */
9f8f21725   Christoph Lameter   Page allocator: c...
1905
  	drain_local_pages(NULL);
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
1906
  	copy_data_pages(&copy_bm, &orig_bm);
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
1907
1908
1909
1910
1911
1912
  
  	/*
  	 * End of critical section. From now on, we can write to memory,
  	 * but we should not touch disk. This specially means we must _not_
  	 * touch swap space! Except we must write out our image of course.
  	 */
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1913
  	nr_pages += nr_highmem;
a0f496517   Rafael J. Wysocki   [PATCH] swsusp: r...
1914
  	nr_copy_pages = nr_pages;
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1915
  	nr_meta_pages = DIV_ROUND_UP(nr_pages * sizeof(long), PAGE_SIZE);
a0f496517   Rafael J. Wysocki   [PATCH] swsusp: r...
1916

23976728a   Rafael J. Wysocki   Hibernation: Upda...
1917
1918
1919
  	printk(KERN_INFO "PM: Hibernation image created (%d pages copied)
  ",
  		nr_pages);
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1920

25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
1921
1922
  	return 0;
  }
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
1923

d307c4a8e   Rafael J. Wysocki   Hibernation: Arbi...
1924
1925
  #ifndef CONFIG_ARCH_HIBERNATION_HEADER
  static int init_header_complete(struct swsusp_info *info)
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
1926
  {
d307c4a8e   Rafael J. Wysocki   Hibernation: Arbi...
1927
  	memcpy(&info->uts, init_utsname(), sizeof(struct new_utsname));
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
1928
  	info->version_code = LINUX_VERSION_CODE;
d307c4a8e   Rafael J. Wysocki   Hibernation: Arbi...
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
  	return 0;
  }
  
  static char *check_image_kernel(struct swsusp_info *info)
  {
  	if (info->version_code != LINUX_VERSION_CODE)
  		return "kernel version";
  	if (strcmp(info->uts.sysname,init_utsname()->sysname))
  		return "system type";
  	if (strcmp(info->uts.release,init_utsname()->release))
  		return "kernel release";
  	if (strcmp(info->uts.version,init_utsname()->version))
  		return "version";
  	if (strcmp(info->uts.machine,init_utsname()->machine))
  		return "machine";
  	return NULL;
  }
  #endif /* CONFIG_ARCH_HIBERNATION_HEADER */
af508b34d   Rafael J. Wysocki   Hibernation: Intr...
1947
1948
1949
1950
  unsigned long snapshot_get_image_size(void)
  {
  	return nr_copy_pages + nr_meta_pages + 1;
  }
d307c4a8e   Rafael J. Wysocki   Hibernation: Arbi...
1951
1952
1953
  static int init_header(struct swsusp_info *info)
  {
  	memset(info, 0, sizeof(struct swsusp_info));
0ed5fd138   Jiang Liu   mm: use totalram_...
1954
  	info->num_physpages = get_num_physpages();
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
1955
  	info->image_pages = nr_copy_pages;
af508b34d   Rafael J. Wysocki   Hibernation: Intr...
1956
  	info->pages = snapshot_get_image_size();
6e1819d61   Rafael J. Wysocki   [PATCH] swsusp: u...
1957
1958
  	info->size = info->pages;
  	info->size <<= PAGE_SHIFT;
d307c4a8e   Rafael J. Wysocki   Hibernation: Arbi...
1959
  	return init_header_complete(info);
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
1960
1961
1962
  }
  
  /**
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
1963
1964
1965
1966
1967
1968
   * pack_pfns - Prepare PFNs for saving.
   * @bm: Memory bitmap.
   * @buf: Memory buffer to store the PFNs in.
   *
   * PFNs corresponding to set bits in @bm are stored in the area of memory
   * pointed to by @buf (1 page at a time).
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
1969
   */
efd5a8524   Rafael J. Wysocki   PM / hibernate: C...
1970
  static inline void pack_pfns(unsigned long *buf, struct memory_bitmap *bm)
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
1971
1972
  {
  	int j;
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
1973
  	for (j = 0; j < PAGE_SIZE / sizeof(long); j++) {
940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
1974
1975
  		buf[j] = memory_bm_next_pfn(bm);
  		if (unlikely(buf[j] == BM_END_OF_MAP))
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
1976
  			break;
85055dd80   Martin Schwidefsky   PM / Hibernate: I...
1977
1978
  		/* Save page key for data page (s390 only). */
  		page_key_read(buf + j);
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
1979
  	}
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
1980
1981
1982
  }
  
  /**
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
1983
1984
   * snapshot_read_next - Get the address to read the next image page from.
   * @handle: Snapshot handle to be used for the reading.
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
1985
   *
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
1986
1987
1988
   * On the first call, @handle should point to a zeroed snapshot_handle
   * structure.  The structure gets populated then and a pointer to it should be
   * passed to this function every next time.
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
1989
   *
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
1990
1991
1992
   * On success, the function returns a positive number.  Then, the caller
   * is allowed to read up to the returned number of bytes from the memory
   * location computed by the data_of() macro.
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
1993
   *
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
1994
1995
1996
   * The function returns 0 to indicate the end of the data stream condition,
   * and negative numbers are returned on errors.  If that happens, the structure
   * pointed to by @handle is not updated and should not be used any more.
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
1997
   */
d3c1b24c5   Jiri Slaby   PM / Hibernate: S...
1998
  int snapshot_read_next(struct snapshot_handle *handle)
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
1999
  {
fb13a28b0   Rafael J. Wysocki   [PATCH] swsusp: s...
2000
  	if (handle->cur > nr_meta_pages + nr_copy_pages)
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
2001
  		return 0;
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
2002

f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
2003
2004
  	if (!buffer) {
  		/* This makes the buffer be freed by swsusp_free() */
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
2005
  		buffer = get_image_page(GFP_ATOMIC, PG_ANY);
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
2006
2007
2008
  		if (!buffer)
  			return -ENOMEM;
  	}
d3c1b24c5   Jiri Slaby   PM / Hibernate: S...
2009
  	if (!handle->cur) {
d307c4a8e   Rafael J. Wysocki   Hibernation: Arbi...
2010
2011
2012
2013
2014
  		int error;
  
  		error = init_header((struct swsusp_info *)buffer);
  		if (error)
  			return error;
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
2015
  		handle->buffer = buffer;
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
2016
2017
  		memory_bm_position_reset(&orig_bm);
  		memory_bm_position_reset(&copy_bm);
d3c1b24c5   Jiri Slaby   PM / Hibernate: S...
2018
  	} else if (handle->cur <= nr_meta_pages) {
3ecb01df3   Jan Beulich   use clear_page()/...
2019
  		clear_page(buffer);
d3c1b24c5   Jiri Slaby   PM / Hibernate: S...
2020
2021
2022
  		pack_pfns(buffer, &orig_bm);
  	} else {
  		struct page *page;
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
2023

d3c1b24c5   Jiri Slaby   PM / Hibernate: S...
2024
2025
  		page = pfn_to_page(memory_bm_next_pfn(&copy_bm));
  		if (PageHighMem(page)) {
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
2026
2027
  			/*
  			 * Highmem pages are copied to the buffer,
d3c1b24c5   Jiri Slaby   PM / Hibernate: S...
2028
2029
2030
2031
  			 * because we can't return with a kmapped
  			 * highmem page (we may not be called again).
  			 */
  			void *kaddr;
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
2032

0de9a1e28   Cong Wang   power: remove the...
2033
  			kaddr = kmap_atomic(page);
3ecb01df3   Jan Beulich   use clear_page()/...
2034
  			copy_page(buffer, kaddr);
0de9a1e28   Cong Wang   power: remove the...
2035
  			kunmap_atomic(kaddr);
d3c1b24c5   Jiri Slaby   PM / Hibernate: S...
2036
2037
2038
  			handle->buffer = buffer;
  		} else {
  			handle->buffer = page_address(page);
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
2039
  		}
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
2040
  	}
d3c1b24c5   Jiri Slaby   PM / Hibernate: S...
2041
2042
  	handle->cur++;
  	return PAGE_SIZE;
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
2043
  }
6dbecfd34   Rafael J. Wysocki   PM / hibernate: S...
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055
  static void duplicate_memory_bitmap(struct memory_bitmap *dst,
  				    struct memory_bitmap *src)
  {
  	unsigned long pfn;
  
  	memory_bm_position_reset(src);
  	pfn = memory_bm_next_pfn(src);
  	while (pfn != BM_END_OF_MAP) {
  		memory_bm_set_bit(dst, pfn);
  		pfn = memory_bm_next_pfn(src);
  	}
  }
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
2056
  /**
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
2057
2058
2059
2060
   * mark_unsafe_pages - Mark pages that were used before hibernation.
   *
   * Mark the pages that cannot be used for storing the image during restoration,
   * because they conflict with the pages that had been used before hibernation.
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
2061
   */
6dbecfd34   Rafael J. Wysocki   PM / hibernate: S...
2062
  static void mark_unsafe_pages(struct memory_bitmap *bm)
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
2063
  {
6dbecfd34   Rafael J. Wysocki   PM / hibernate: S...
2064
  	unsigned long pfn;
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
2065

6dbecfd34   Rafael J. Wysocki   PM / hibernate: S...
2066
2067
2068
2069
2070
2071
  	/* Clear the "free"/"unsafe" bit for all PFNs */
  	memory_bm_position_reset(free_pages_map);
  	pfn = memory_bm_next_pfn(free_pages_map);
  	while (pfn != BM_END_OF_MAP) {
  		memory_bm_clear_current(free_pages_map);
  		pfn = memory_bm_next_pfn(free_pages_map);
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
2072
  	}
6dbecfd34   Rafael J. Wysocki   PM / hibernate: S...
2073
2074
  	/* Mark pages that correspond to the "original" PFNs as "unsafe" */
  	duplicate_memory_bitmap(free_pages_map, bm);
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
2075

940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
2076
  	allocated_unsafe_pages = 0;
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
2077
  }
d307c4a8e   Rafael J. Wysocki   Hibernation: Arbi...
2078
  static int check_header(struct swsusp_info *info)
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
2079
  {
d307c4a8e   Rafael J. Wysocki   Hibernation: Arbi...
2080
  	char *reason;
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
2081

d307c4a8e   Rafael J. Wysocki   Hibernation: Arbi...
2082
  	reason = check_image_kernel(info);
0ed5fd138   Jiang Liu   mm: use totalram_...
2083
  	if (!reason && info->num_physpages != get_num_physpages())
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
2084
  		reason = "memory size";
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
2085
  	if (reason) {
23976728a   Rafael J. Wysocki   Hibernation: Upda...
2086
2087
  		printk(KERN_ERR "PM: Image mismatch: %s
  ", reason);
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
2088
2089
2090
2091
2092
2093
  		return -EPERM;
  	}
  	return 0;
  }
  
  /**
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
2094
   * load header - Check the image header and copy the data from it.
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
2095
   */
efd5a8524   Rafael J. Wysocki   PM / hibernate: C...
2096
  static int load_header(struct swsusp_info *info)
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
2097
2098
  {
  	int error;
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
2099

940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
2100
  	restore_pblist = NULL;
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
2101
2102
  	error = check_header(info);
  	if (!error) {
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
2103
2104
2105
2106
2107
2108
2109
  		nr_copy_pages = info->image_pages;
  		nr_meta_pages = info->pages - info->image_pages - 1;
  	}
  	return error;
  }
  
  /**
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
2110
2111
2112
2113
2114
2115
   * unpack_orig_pfns - Set bits corresponding to given PFNs in a memory bitmap.
   * @bm: Memory bitmap.
   * @buf: Area of memory containing the PFNs.
   *
   * For each element of the array pointed to by @buf (1 page at a time), set the
   * corresponding bit in @bm.
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
2116
   */
69643279a   Rafael J. Wysocki   Hibernate: Do not...
2117
  static int unpack_orig_pfns(unsigned long *buf, struct memory_bitmap *bm)
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
2118
2119
  {
  	int j;
940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
2120
2121
2122
  	for (j = 0; j < PAGE_SIZE / sizeof(long); j++) {
  		if (unlikely(buf[j] == BM_END_OF_MAP))
  			break;
85055dd80   Martin Schwidefsky   PM / Hibernate: I...
2123
2124
  		/* Extract and buffer page key for data page (s390 only). */
  		page_key_memorize(buf + j);
6dbecfd34   Rafael J. Wysocki   PM / hibernate: S...
2125
  		if (pfn_valid(buf[j]) && memory_bm_pfn_present(bm, buf[j]))
69643279a   Rafael J. Wysocki   Hibernate: Do not...
2126
2127
2128
  			memory_bm_set_bit(bm, buf[j]);
  		else
  			return -EFAULT;
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
2129
  	}
69643279a   Rafael J. Wysocki   Hibernate: Do not...
2130
2131
  
  	return 0;
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
2132
  }
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
2133
  #ifdef CONFIG_HIGHMEM
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
2134
2135
  /*
   * struct highmem_pbe is used for creating the list of highmem pages that
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
2136
2137
2138
2139
2140
2141
2142
2143
   * should be restored atomically during the resume from disk, because the page
   * frames they have occupied before the suspend are in use.
   */
  struct highmem_pbe {
  	struct page *copy_page;	/* data is here now */
  	struct page *orig_page;	/* data was here before the suspend */
  	struct highmem_pbe *next;
  };
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
2144
2145
  /*
   * List of highmem PBEs needed for restoring the highmem pages that were
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
2146
2147
2148
2149
2150
2151
2152
   * allocated before the suspend and included in the suspend image, but have
   * also been allocated by the "resume" kernel, so their contents cannot be
   * written directly to their "original" page frames.
   */
  static struct highmem_pbe *highmem_pblist;
  
  /**
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
2153
2154
2155
2156
   * count_highmem_image_pages - Compute the number of highmem pages in the image.
   * @bm: Memory bitmap.
   *
   * The bits in @bm that correspond to image pages are assumed to be set.
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
2157
   */
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
  static unsigned int count_highmem_image_pages(struct memory_bitmap *bm)
  {
  	unsigned long pfn;
  	unsigned int cnt = 0;
  
  	memory_bm_position_reset(bm);
  	pfn = memory_bm_next_pfn(bm);
  	while (pfn != BM_END_OF_MAP) {
  		if (PageHighMem(pfn_to_page(pfn)))
  			cnt++;
  
  		pfn = memory_bm_next_pfn(bm);
  	}
  	return cnt;
  }
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
2173
2174
2175
  static unsigned int safe_highmem_pages;
  
  static struct memory_bitmap *safe_highmem_bm;
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
  /**
   * prepare_highmem_image - Allocate memory for loading highmem data from image.
   * @bm: Pointer to an uninitialized memory bitmap structure.
   * @nr_highmem_p: Pointer to the number of highmem image pages.
   *
   * Try to allocate as many highmem pages as there are highmem image pages
   * (@nr_highmem_p points to the variable containing the number of highmem image
   * pages).  The pages that are "safe" (ie. will not be overwritten when the
   * hibernation image is restored entirely) have the corresponding bits set in
   * @bm (it must be unitialized).
   *
   * NOTE: This function should not be called if there are no highmem image pages.
   */
efd5a8524   Rafael J. Wysocki   PM / hibernate: C...
2189
2190
  static int prepare_highmem_image(struct memory_bitmap *bm,
  				 unsigned int *nr_highmem_p)
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208
2209
2210
  {
  	unsigned int to_alloc;
  
  	if (memory_bm_create(bm, GFP_ATOMIC, PG_SAFE))
  		return -ENOMEM;
  
  	if (get_highmem_buffer(PG_SAFE))
  		return -ENOMEM;
  
  	to_alloc = count_free_highmem_pages();
  	if (to_alloc > *nr_highmem_p)
  		to_alloc = *nr_highmem_p;
  	else
  		*nr_highmem_p = to_alloc;
  
  	safe_highmem_pages = 0;
  	while (to_alloc-- > 0) {
  		struct page *page;
  
  		page = alloc_page(__GFP_HIGHMEM);
7be982349   Rafael J. Wysocki   swsusp: use inlin...
2211
  		if (!swsusp_page_is_free(page)) {
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
2212
2213
2214
2215
2216
  			/* The page is "safe", set its bit the bitmap */
  			memory_bm_set_bit(bm, page_to_pfn(page));
  			safe_highmem_pages++;
  		}
  		/* Mark the page as allocated */
7be982349   Rafael J. Wysocki   swsusp: use inlin...
2217
2218
  		swsusp_set_page_forbidden(page);
  		swsusp_set_page_free(page);
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
2219
2220
2221
2222
2223
  	}
  	memory_bm_position_reset(bm);
  	safe_highmem_bm = bm;
  	return 0;
  }
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
2224
  static struct page *last_highmem_page;
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
2225
  /**
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
2226
2227
2228
2229
   * get_highmem_page_buffer - Prepare a buffer to store a highmem image page.
   *
   * For a given highmem image page get a buffer that suspend_write_next() should
   * return to its caller to write to.
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
2230
   *
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
2231
2232
2233
2234
   * If the page is to be saved to its "original" page frame or a copy of
   * the page is to be made in the highmem, @buffer is returned.  Otherwise,
   * the copy of the page is to be made in normal memory, so the address of
   * the copy is returned.
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
2235
   *
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
2236
2237
2238
2239
2240
2241
   * If @buffer is returned, the caller of suspend_write_next() will write
   * the page's contents to @buffer, so they will have to be copied to the
   * right location on the next call to suspend_write_next() and it is done
   * with the help of copy_last_highmem_page().  For this purpose, if
   * @buffer is returned, @last_highmem_page is set to the page to which
   * the data will have to be copied from @buffer.
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
2242
   */
efd5a8524   Rafael J. Wysocki   PM / hibernate: C...
2243
2244
  static void *get_highmem_page_buffer(struct page *page,
  				     struct chain_allocator *ca)
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
2245
2246
2247
  {
  	struct highmem_pbe *pbe;
  	void *kaddr;
7be982349   Rafael J. Wysocki   swsusp: use inlin...
2248
  	if (swsusp_page_is_forbidden(page) && swsusp_page_is_free(page)) {
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
2249
2250
  		/*
  		 * We have allocated the "original" page frame and we can
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
2251
2252
2253
2254
2255
  		 * use it directly to store the loaded page.
  		 */
  		last_highmem_page = page;
  		return buffer;
  	}
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
2256
2257
  	/*
  	 * The "original" page frame has not been allocated and we have to
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
2258
2259
2260
2261
2262
  	 * use a "safe" page frame to store the loaded page.
  	 */
  	pbe = chain_alloc(ca, sizeof(struct highmem_pbe));
  	if (!pbe) {
  		swsusp_free();
69643279a   Rafael J. Wysocki   Hibernate: Do not...
2263
  		return ERR_PTR(-ENOMEM);
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
2264
2265
2266
2267
2268
2269
2270
2271
2272
2273
2274
2275
2276
2277
2278
2279
2280
2281
2282
2283
2284
2285
2286
  	}
  	pbe->orig_page = page;
  	if (safe_highmem_pages > 0) {
  		struct page *tmp;
  
  		/* Copy of the page will be stored in high memory */
  		kaddr = buffer;
  		tmp = pfn_to_page(memory_bm_next_pfn(safe_highmem_bm));
  		safe_highmem_pages--;
  		last_highmem_page = tmp;
  		pbe->copy_page = tmp;
  	} else {
  		/* Copy of the page will be stored in normal memory */
  		kaddr = safe_pages_list;
  		safe_pages_list = safe_pages_list->next;
  		pbe->copy_page = virt_to_page(kaddr);
  	}
  	pbe->next = highmem_pblist;
  	highmem_pblist = pbe;
  	return kaddr;
  }
  
  /**
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
2287
2288
2289
2290
2291
   * copy_last_highmem_page - Copy most the most recent highmem image page.
   *
   * Copy the contents of a highmem image from @buffer, where the caller of
   * snapshot_write_next() has stored them, to the right location represented by
   * @last_highmem_page .
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
2292
   */
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
2293
2294
2295
2296
  static void copy_last_highmem_page(void)
  {
  	if (last_highmem_page) {
  		void *dst;
0de9a1e28   Cong Wang   power: remove the...
2297
  		dst = kmap_atomic(last_highmem_page);
3ecb01df3   Jan Beulich   use clear_page()/...
2298
  		copy_page(dst, buffer);
0de9a1e28   Cong Wang   power: remove the...
2299
  		kunmap_atomic(dst);
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
2300
2301
2302
2303
2304
2305
2306
2307
2308
2309
2310
2311
2312
2313
2314
2315
2316
2317
  		last_highmem_page = NULL;
  	}
  }
  
  static inline int last_highmem_page_copied(void)
  {
  	return !last_highmem_page;
  }
  
  static inline void free_highmem_data(void)
  {
  	if (safe_highmem_bm)
  		memory_bm_free(safe_highmem_bm, PG_UNSAFE_CLEAR);
  
  	if (buffer)
  		free_image_page(buffer, PG_UNSAFE_CLEAR);
  }
  #else
efd5a8524   Rafael J. Wysocki   PM / hibernate: C...
2318
  static unsigned int count_highmem_image_pages(struct memory_bitmap *bm) { return 0; }
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
2319

efd5a8524   Rafael J. Wysocki   PM / hibernate: C...
2320
2321
  static inline int prepare_highmem_image(struct memory_bitmap *bm,
  					unsigned int *nr_highmem_p) { return 0; }
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
2322

efd5a8524   Rafael J. Wysocki   PM / hibernate: C...
2323
2324
  static inline void *get_highmem_page_buffer(struct page *page,
  					    struct chain_allocator *ca)
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
2325
  {
69643279a   Rafael J. Wysocki   Hibernate: Do not...
2326
  	return ERR_PTR(-EINVAL);
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
2327
2328
2329
2330
2331
2332
  }
  
  static inline void copy_last_highmem_page(void) {}
  static inline int last_highmem_page_copied(void) { return 1; }
  static inline void free_highmem_data(void) {}
  #endif /* CONFIG_HIGHMEM */
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
2333
  #define PBES_PER_LINKED_PAGE	(LINKED_PAGE_DATA_SIZE / sizeof(struct pbe))
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
2334
  /**
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
2335
2336
2337
2338
2339
2340
2341
   * prepare_image - Make room for loading hibernation image.
   * @new_bm: Unitialized memory bitmap structure.
   * @bm: Memory bitmap with unsafe pages marked.
   *
   * Use @bm to mark the pages that will be overwritten in the process of
   * restoring the system memory state from the suspend image ("unsafe" pages)
   * and allocate memory for the image.
968808b89   Rafael J. Wysocki   [PATCH] swsusp: u...
2342
   *
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
2343
2344
2345
2346
2347
   * The idea is to allocate a new memory bitmap first and then allocate
   * as many pages as needed for image data, but without specifying what those
   * pages will be used for just yet.  Instead, we mark them all as allocated and
   * create a lists of "safe" pages to be used later.  On systems with high
   * memory a list of "safe" highmem pages is created too.
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
2348
   */
efd5a8524   Rafael J. Wysocki   PM / hibernate: C...
2349
  static int prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm)
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
2350
  {
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
2351
  	unsigned int nr_pages, nr_highmem;
9c744481c   Rafael J. Wysocki   PM / hibernate: D...
2352
  	struct linked_page *lp;
940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
2353
  	int error;
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
2354

8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
2355
2356
2357
2358
2359
  	/* If there is no highmem, the buffer will not be necessary */
  	free_image_page(buffer, PG_UNSAFE_CLEAR);
  	buffer = NULL;
  
  	nr_highmem = count_highmem_image_pages(bm);
6dbecfd34   Rafael J. Wysocki   PM / hibernate: S...
2360
  	mark_unsafe_pages(bm);
940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
2361
2362
2363
2364
2365
2366
2367
  
  	error = memory_bm_create(new_bm, GFP_ATOMIC, PG_SAFE);
  	if (error)
  		goto Free;
  
  	duplicate_memory_bitmap(new_bm, bm);
  	memory_bm_free(bm, PG_UNSAFE_KEEP);
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
2368
2369
2370
2371
2372
  	if (nr_highmem > 0) {
  		error = prepare_highmem_image(bm, &nr_highmem);
  		if (error)
  			goto Free;
  	}
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
2373
2374
  	/*
  	 * Reserve some safe pages for potential later use.
940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
2375
2376
2377
2378
  	 *
  	 * NOTE: This way we make sure there will be enough safe pages for the
  	 * chain_alloc() in get_buffer().  It is a bit wasteful, but
  	 * nr_copy_pages cannot be greater than 50% of the memory anyway.
9c744481c   Rafael J. Wysocki   PM / hibernate: D...
2379
2380
  	 *
  	 * nr_copy_pages cannot be less than allocated_unsafe_pages too.
940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
2381
  	 */
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
2382
  	nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages;
940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
2383
2384
  	nr_pages = DIV_ROUND_UP(nr_pages, PBES_PER_LINKED_PAGE);
  	while (nr_pages > 0) {
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
2385
  		lp = get_image_page(GFP_ATOMIC, PG_SAFE);
940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
2386
  		if (!lp) {
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
2387
  			error = -ENOMEM;
940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
2388
2389
  			goto Free;
  		}
9c744481c   Rafael J. Wysocki   PM / hibernate: D...
2390
2391
  		lp->next = safe_pages_list;
  		safe_pages_list = lp;
940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
2392
  		nr_pages--;
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
2393
  	}
940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
2394
  	/* Preallocate memory for the image */
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
2395
  	nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages;
940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
2396
2397
2398
2399
2400
2401
  	while (nr_pages > 0) {
  		lp = (struct linked_page *)get_zeroed_page(GFP_ATOMIC);
  		if (!lp) {
  			error = -ENOMEM;
  			goto Free;
  		}
7be982349   Rafael J. Wysocki   swsusp: use inlin...
2402
  		if (!swsusp_page_is_free(virt_to_page(lp))) {
940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
2403
2404
2405
  			/* The page is "safe", add it to the list */
  			lp->next = safe_pages_list;
  			safe_pages_list = lp;
968808b89   Rafael J. Wysocki   [PATCH] swsusp: u...
2406
  		}
940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
2407
  		/* Mark the page as allocated */
7be982349   Rafael J. Wysocki   swsusp: use inlin...
2408
2409
  		swsusp_set_page_forbidden(virt_to_page(lp));
  		swsusp_set_page_free(virt_to_page(lp));
940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
2410
  		nr_pages--;
968808b89   Rafael J. Wysocki   [PATCH] swsusp: u...
2411
  	}
940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
2412
  	return 0;
59a493350   Rafael J. Wysocki   [PATCH] swsusp: F...
2413
   Free:
940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
2414
  	swsusp_free();
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
2415
2416
  	return error;
  }
940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
2417
  /**
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
2418
2419
2420
2421
   * get_buffer - Get the address to store the next image data page.
   *
   * Get the address that snapshot_write_next() should return to its caller to
   * write to.
940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
2422
   */
940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
2423
  static void *get_buffer(struct memory_bitmap *bm, struct chain_allocator *ca)
968808b89   Rafael J. Wysocki   [PATCH] swsusp: u...
2424
  {
940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
2425
  	struct pbe *pbe;
69643279a   Rafael J. Wysocki   Hibernate: Do not...
2426
2427
  	struct page *page;
  	unsigned long pfn = memory_bm_next_pfn(bm);
968808b89   Rafael J. Wysocki   [PATCH] swsusp: u...
2428

69643279a   Rafael J. Wysocki   Hibernate: Do not...
2429
2430
2431
2432
  	if (pfn == BM_END_OF_MAP)
  		return ERR_PTR(-EFAULT);
  
  	page = pfn_to_page(pfn);
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
2433
2434
  	if (PageHighMem(page))
  		return get_highmem_page_buffer(page, ca);
7be982349   Rafael J. Wysocki   swsusp: use inlin...
2435
  	if (swsusp_page_is_forbidden(page) && swsusp_page_is_free(page))
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
2436
2437
  		/*
  		 * We have allocated the "original" page frame and we can
940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
2438
  		 * use it directly to store the loaded page.
968808b89   Rafael J. Wysocki   [PATCH] swsusp: u...
2439
  		 */
940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
2440
  		return page_address(page);
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
2441
2442
  	/*
  	 * The "original" page frame has not been allocated and we have to
940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
2443
  	 * use a "safe" page frame to store the loaded page.
968808b89   Rafael J. Wysocki   [PATCH] swsusp: u...
2444
  	 */
940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
2445
2446
2447
  	pbe = chain_alloc(ca, sizeof(struct pbe));
  	if (!pbe) {
  		swsusp_free();
69643279a   Rafael J. Wysocki   Hibernate: Do not...
2448
  		return ERR_PTR(-ENOMEM);
940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
2449
  	}
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
2450
2451
  	pbe->orig_address = page_address(page);
  	pbe->address = safe_pages_list;
940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
2452
2453
2454
  	safe_pages_list = safe_pages_list->next;
  	pbe->next = restore_pblist;
  	restore_pblist = pbe;
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
2455
  	return pbe->address;
968808b89   Rafael J. Wysocki   [PATCH] swsusp: u...
2456
  }
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
2457
  /**
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
2458
2459
   * snapshot_write_next - Get the address to store the next image page.
   * @handle: Snapshot handle structure to guide the writing.
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
2460
   *
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
2461
2462
2463
   * On the first call, @handle should point to a zeroed snapshot_handle
   * structure.  The structure gets populated then and a pointer to it should be
   * passed to this function every next time.
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
2464
   *
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
2465
2466
2467
   * On success, the function returns a positive number.  Then, the caller
   * is allowed to write up to the returned number of bytes to the memory
   * location computed by the data_of() macro.
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
2468
   *
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
2469
2470
2471
   * The function returns 0 to indicate the "end of file" condition.  Negative
   * numbers are returned on errors, in which cases the structure pointed to by
   * @handle is not updated and should not be used any more.
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
2472
   */
d3c1b24c5   Jiri Slaby   PM / Hibernate: S...
2473
  int snapshot_write_next(struct snapshot_handle *handle)
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
2474
  {
940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
2475
  	static struct chain_allocator ca;
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
2476
  	int error = 0;
940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
2477
  	/* Check if we have already loaded the entire image */
d3c1b24c5   Jiri Slaby   PM / Hibernate: S...
2478
  	if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages)
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
2479
  		return 0;
940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
2480

d3c1b24c5   Jiri Slaby   PM / Hibernate: S...
2481
2482
2483
  	handle->sync_read = 1;
  
  	if (!handle->cur) {
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
2484
2485
2486
  		if (!buffer)
  			/* This makes the buffer be freed by swsusp_free() */
  			buffer = get_image_page(GFP_ATOMIC, PG_ANY);
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
2487
2488
  		if (!buffer)
  			return -ENOMEM;
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
2489

f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
2490
  		handle->buffer = buffer;
d3c1b24c5   Jiri Slaby   PM / Hibernate: S...
2491
2492
2493
2494
  	} else if (handle->cur == 1) {
  		error = load_header(buffer);
  		if (error)
  			return error;
940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
2495

9c744481c   Rafael J. Wysocki   PM / hibernate: D...
2496
  		safe_pages_list = NULL;
d3c1b24c5   Jiri Slaby   PM / Hibernate: S...
2497
2498
2499
  		error = memory_bm_create(&copy_bm, GFP_ATOMIC, PG_ANY);
  		if (error)
  			return error;
85055dd80   Martin Schwidefsky   PM / Hibernate: I...
2500
2501
2502
2503
  		/* Allocate buffer for page keys. */
  		error = page_key_alloc(nr_copy_pages);
  		if (error)
  			return error;
4c0b6c10f   Rafael J. Wysocki   PM / hibernate: I...
2504
  		hibernate_restore_protection_begin();
d3c1b24c5   Jiri Slaby   PM / Hibernate: S...
2505
2506
2507
2508
  	} else if (handle->cur <= nr_meta_pages + 1) {
  		error = unpack_orig_pfns(buffer, &copy_bm);
  		if (error)
  			return error;
940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
2509

d3c1b24c5   Jiri Slaby   PM / Hibernate: S...
2510
2511
  		if (handle->cur == nr_meta_pages + 1) {
  			error = prepare_image(&orig_bm, &copy_bm);
69643279a   Rafael J. Wysocki   Hibernate: Do not...
2512
2513
  			if (error)
  				return error;
d3c1b24c5   Jiri Slaby   PM / Hibernate: S...
2514
2515
2516
  			chain_init(&ca, GFP_ATOMIC, PG_SAFE);
  			memory_bm_position_reset(&orig_bm);
  			restore_pblist = NULL;
940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
2517
  			handle->buffer = get_buffer(&orig_bm, &ca);
d3c1b24c5   Jiri Slaby   PM / Hibernate: S...
2518
  			handle->sync_read = 0;
69643279a   Rafael J. Wysocki   Hibernate: Do not...
2519
2520
  			if (IS_ERR(handle->buffer))
  				return PTR_ERR(handle->buffer);
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
2521
  		}
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
2522
  	} else {
d3c1b24c5   Jiri Slaby   PM / Hibernate: S...
2523
  		copy_last_highmem_page();
85055dd80   Martin Schwidefsky   PM / Hibernate: I...
2524
2525
  		/* Restore page key for data page (s390 only). */
  		page_key_write(handle->buffer);
4c0b6c10f   Rafael J. Wysocki   PM / hibernate: I...
2526
  		hibernate_restore_protect_page(handle->buffer);
d3c1b24c5   Jiri Slaby   PM / Hibernate: S...
2527
2528
2529
2530
2531
  		handle->buffer = get_buffer(&orig_bm, &ca);
  		if (IS_ERR(handle->buffer))
  			return PTR_ERR(handle->buffer);
  		if (handle->buffer != buffer)
  			handle->sync_read = 0;
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
2532
  	}
d3c1b24c5   Jiri Slaby   PM / Hibernate: S...
2533
2534
  	handle->cur++;
  	return PAGE_SIZE;
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
2535
  }
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
2536
  /**
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
2537
2538
2539
2540
2541
2542
   * snapshot_write_finalize - Complete the loading of a hibernation image.
   *
   * Must be called after the last call to snapshot_write_next() in case the last
   * page in the image happens to be a highmem page and its contents should be
   * stored in highmem.  Additionally, it recycles bitmap memory that's not
   * necessary any more.
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
2543
   */
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
2544
2545
2546
  void snapshot_write_finalize(struct snapshot_handle *handle)
  {
  	copy_last_highmem_page();
85055dd80   Martin Schwidefsky   PM / Hibernate: I...
2547
2548
2549
  	/* Restore page key for data page (s390 only). */
  	page_key_write(handle->buffer);
  	page_key_free();
4c0b6c10f   Rafael J. Wysocki   PM / hibernate: I...
2550
  	hibernate_restore_protect_page(handle->buffer);
307c5971c   Rafael J. Wysocki   PM / hibernate: R...
2551
  	/* Do that only if we have loaded the image entirely */
d3c1b24c5   Jiri Slaby   PM / Hibernate: S...
2552
  	if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages) {
307c5971c   Rafael J. Wysocki   PM / hibernate: R...
2553
  		memory_bm_recycle(&orig_bm);
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
2554
2555
2556
  		free_highmem_data();
  	}
  }
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
2557
2558
  int snapshot_image_loaded(struct snapshot_handle *handle)
  {
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
2559
  	return !(!nr_copy_pages || !last_highmem_page_copied() ||
940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
2560
2561
  			handle->cur <= nr_meta_pages + nr_copy_pages);
  }
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
2562
2563
  #ifdef CONFIG_HIGHMEM
  /* Assumes that @buf is ready and points to a "safe" page */
efd5a8524   Rafael J. Wysocki   PM / hibernate: C...
2564
2565
  static inline void swap_two_pages_data(struct page *p1, struct page *p2,
  				       void *buf)
940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
2566
  {
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
2567
  	void *kaddr1, *kaddr2;
0de9a1e28   Cong Wang   power: remove the...
2568
2569
  	kaddr1 = kmap_atomic(p1);
  	kaddr2 = kmap_atomic(p2);
3ecb01df3   Jan Beulich   use clear_page()/...
2570
2571
2572
  	copy_page(buf, kaddr1);
  	copy_page(kaddr1, kaddr2);
  	copy_page(kaddr2, buf);
0de9a1e28   Cong Wang   power: remove the...
2573
2574
  	kunmap_atomic(kaddr2);
  	kunmap_atomic(kaddr1);
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
2575
2576
2577
  }
  
  /**
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
2578
2579
2580
2581
2582
   * restore_highmem - Put highmem image pages into their original locations.
   *
   * For each highmem page that was in use before hibernation and is included in
   * the image, and also has been allocated by the "restore" kernel, swap its
   * current contents with the previous (ie. "before hibernation") ones.
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
2583
   *
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
2584
2585
   * If the restore eventually fails, we can call this function once again and
   * restore the highmem state as seen by the restore kernel.
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
2586
   */
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
2587
2588
2589
2590
2591
2592
2593
2594
2595
2596
2597
2598
2599
2600
2601
2602
2603
2604
  int restore_highmem(void)
  {
  	struct highmem_pbe *pbe = highmem_pblist;
  	void *buf;
  
  	if (!pbe)
  		return 0;
  
  	buf = get_image_page(GFP_ATOMIC, PG_SAFE);
  	if (!buf)
  		return -ENOMEM;
  
  	while (pbe) {
  		swap_two_pages_data(pbe->copy_page, pbe->orig_page, buf);
  		pbe = pbe->next;
  	}
  	free_image_page(buf, PG_UNSAFE_CLEAR);
  	return 0;
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
2605
  }
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
2606
  #endif /* CONFIG_HIGHMEM */