Blame view

kernel/power/snapshot.c 71.5 KB
55716d264   Thomas Gleixner   treewide: Replace...
1
  // SPDX-License-Identifier: GPL-2.0-only
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
2
  /*
96bc7aec2   Pavel Machek   [PATCH] swsusp: r...
3
   * linux/kernel/power/snapshot.c
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
4
   *
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
5
   * This file provides system snapshot/restore functionality for swsusp.
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
6
   *
a2531293d   Pavel Machek   update email address
7
   * Copyright (C) 1998-2005 Pavel Machek <pavel@ucw.cz>
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
8
   * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
9
   */
7a7b99bf8   Luigi Semenzato   PM: hibernate: Ad...
10
  #define pr_fmt(fmt) "PM: hibernation: " fmt
64ec72a1e   Joe Perches   PM: Use a more co...
11

f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
12
  #include <linux/version.h>
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
13
14
15
  #include <linux/module.h>
  #include <linux/mm.h>
  #include <linux/suspend.h>
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
16
  #include <linux/delay.h>
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
17
  #include <linux/bitops.h>
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
18
  #include <linux/spinlock.h>
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
19
  #include <linux/kernel.h>
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
20
21
  #include <linux/pm.h>
  #include <linux/device.h>
74dfd666d   Rafael J. Wysocki   swsusp: do not us...
22
  #include <linux/init.h>
57c8a661d   Mike Rapoport   mm: remove includ...
23
  #include <linux/memblock.h>
38b8d208a   Ingo Molnar   sched/headers: Pr...
24
  #include <linux/nmi.h>
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
25
26
27
  #include <linux/syscalls.h>
  #include <linux/console.h>
  #include <linux/highmem.h>
846705deb   Rafael J. Wysocki   Hibernate: Take o...
28
  #include <linux/list.h>
5a0e3ad6a   Tejun Heo   include cleanup: ...
29
  #include <linux/slab.h>
52f5684c8   Gideon Israel Dsouza   kernel: use macro...
30
  #include <linux/compiler.h>
db5976058   Tina Ruchandani   PM / Hibernate: M...
31
  #include <linux/ktime.h>
61f6d09a9   Michael Ellerman   kernel/power/snap...
32
  #include <linux/set_memory.h>
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
33

7c0f6ba68   Linus Torvalds   Replace <asm/uacc...
34
  #include <linux/uaccess.h>
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
35
  #include <asm/mmu_context.h>
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
36
37
  #include <asm/tlbflush.h>
  #include <asm/io.h>
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
38
  #include "power.h"
49368a47f   Balbir Singh   PM / hibernate: U...
39
  #if defined(CONFIG_STRICT_KERNEL_RWX) && defined(CONFIG_ARCH_HAS_SET_MEMORY)
4c0b6c10f   Rafael J. Wysocki   PM / hibernate: I...
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
  static bool hibernate_restore_protection;
  static bool hibernate_restore_protection_active;
  
  void enable_restore_image_protection(void)
  {
  	hibernate_restore_protection = true;
  }
  
  static inline void hibernate_restore_protection_begin(void)
  {
  	hibernate_restore_protection_active = hibernate_restore_protection;
  }
  
  static inline void hibernate_restore_protection_end(void)
  {
  	hibernate_restore_protection_active = false;
  }
  
  static inline void hibernate_restore_protect_page(void *page_address)
  {
  	if (hibernate_restore_protection_active)
  		set_memory_ro((unsigned long)page_address, 1);
  }
  
  static inline void hibernate_restore_unprotect_page(void *page_address)
  {
  	if (hibernate_restore_protection_active)
  		set_memory_rw((unsigned long)page_address, 1);
  }
  #else
  static inline void hibernate_restore_protection_begin(void) {}
  static inline void hibernate_restore_protection_end(void) {}
  static inline void hibernate_restore_protect_page(void *page_address) {}
  static inline void hibernate_restore_unprotect_page(void *page_address) {}
49368a47f   Balbir Singh   PM / hibernate: U...
74
  #endif /* CONFIG_STRICT_KERNEL_RWX  && CONFIG_ARCH_HAS_SET_MEMORY */
4c0b6c10f   Rafael J. Wysocki   PM / hibernate: I...
75

74dfd666d   Rafael J. Wysocki   swsusp: do not us...
76
77
78
  static int swsusp_page_is_free(struct page *);
  static void swsusp_set_page_forbidden(struct page *);
  static void swsusp_unset_page_forbidden(struct page *);
fe419535d   Rafael J. Wysocki   PM/Hibernate: Mov...
79
  /*
ddeb64870   Rafael J. Wysocki   PM / Hibernate: A...
80
81
82
83
84
85
86
87
88
89
90
91
   * Number of bytes to reserve for memory allocations made by device drivers
   * from their ->freeze() and ->freeze_noirq() callbacks so that they don't
   * cause image creation to fail (tunable via /sys/power/reserved_size).
   */
  unsigned long reserved_size;
  
  void __init hibernate_reserved_size_init(void)
  {
  	reserved_size = SPARE_PAGES * PAGE_SIZE;
  }
  
  /*
fe419535d   Rafael J. Wysocki   PM/Hibernate: Mov...
92
   * Preferred image size in bytes (tunable via /sys/power/image_size).
1c1be3a94   Rafael J. Wysocki   Revert "PM / Hibe...
93
94
95
   * When it is set to N, swsusp will do its best to ensure the image
   * size will not exceed N bytes, but if that is impossible, it will
   * try to create the smallest image possible.
fe419535d   Rafael J. Wysocki   PM/Hibernate: Mov...
96
   */
ac5c24ec1   Rafael J. Wysocki   PM / Hibernate: M...
97
98
99
100
  unsigned long image_size;
  
  void __init hibernate_image_size_init(void)
  {
ca79b0c21   Arun KS   mm: convert total...
101
  	image_size = ((totalram_pages() * 2) / 5) * PAGE_SIZE;
ac5c24ec1   Rafael J. Wysocki   PM / Hibernate: M...
102
  }
fe419535d   Rafael J. Wysocki   PM/Hibernate: Mov...
103

ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
104
105
  /*
   * List of PBEs needed for restoring the pages that were allocated before
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
106
107
108
109
   * the suspend and included in the suspend image, but have also been
   * allocated by the "resume" kernel, so their contents cannot be written
   * directly to their "original" page frames.
   */
75534b50c   Rafael J. Wysocki   [PATCH] Change th...
110
  struct pbe *restore_pblist;
9c744481c   Rafael J. Wysocki   PM / hibernate: D...
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
  /* struct linked_page is used to build chains of pages */
  
  #define LINKED_PAGE_DATA_SIZE	(PAGE_SIZE - sizeof(void *))
  
  struct linked_page {
  	struct linked_page *next;
  	char data[LINKED_PAGE_DATA_SIZE];
  } __packed;
  
  /*
   * List of "safe" pages (ie. pages that were not used by the image kernel
   * before hibernation) that may be used as temporary storage for image kernel
   * memory contents.
   */
  static struct linked_page *safe_pages_list;
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
126
  /* Pointer to an auxiliary buffer (1 page) */
940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
127
  static void *buffer;
7088a5c00   Rafael J. Wysocki   [PATCH] swsusp: i...
128

0bcd888d6   Rafael J. Wysocki   [PATCH] swsusp: I...
129
130
131
132
  #define PG_ANY		0
  #define PG_SAFE		1
  #define PG_UNSAFE_CLEAR	1
  #define PG_UNSAFE_KEEP	0
940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
133
  static unsigned int allocated_unsafe_pages;
f6143aa60   Rafael J. Wysocki   [PATCH] swsusp: R...
134

ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
135
136
137
138
139
140
141
142
143
144
145
146
147
  /**
   * get_image_page - Allocate a page for a hibernation image.
   * @gfp_mask: GFP mask for the allocation.
   * @safe_needed: Get pages that were not used before hibernation (restore only)
   *
   * During image restoration, for storing the PBE list and the image data, we can
   * only use memory pages that do not conflict with the pages used before
   * hibernation.  The "unsafe" pages have PageNosaveFree set and we count them
   * using allocated_unsafe_pages.
   *
   * Each allocated image page is marked as PageNosave and PageNosaveFree so that
   * swsusp_free() can release it.
   */
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
148
  static void *get_image_page(gfp_t gfp_mask, int safe_needed)
f6143aa60   Rafael J. Wysocki   [PATCH] swsusp: R...
149
150
151
152
153
  {
  	void *res;
  
  	res = (void *)get_zeroed_page(gfp_mask);
  	if (safe_needed)
7be982349   Rafael J. Wysocki   swsusp: use inlin...
154
  		while (res && swsusp_page_is_free(virt_to_page(res))) {
f6143aa60   Rafael J. Wysocki   [PATCH] swsusp: R...
155
  			/* The page is unsafe, mark it for swsusp_free() */
7be982349   Rafael J. Wysocki   swsusp: use inlin...
156
  			swsusp_set_page_forbidden(virt_to_page(res));
940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
157
  			allocated_unsafe_pages++;
f6143aa60   Rafael J. Wysocki   [PATCH] swsusp: R...
158
159
160
  			res = (void *)get_zeroed_page(gfp_mask);
  		}
  	if (res) {
7be982349   Rafael J. Wysocki   swsusp: use inlin...
161
162
  		swsusp_set_page_forbidden(virt_to_page(res));
  		swsusp_set_page_free(virt_to_page(res));
f6143aa60   Rafael J. Wysocki   [PATCH] swsusp: R...
163
164
165
  	}
  	return res;
  }
9c744481c   Rafael J. Wysocki   PM / hibernate: D...
166
167
168
169
170
171
172
173
174
175
176
  static void *__get_safe_page(gfp_t gfp_mask)
  {
  	if (safe_pages_list) {
  		void *ret = safe_pages_list;
  
  		safe_pages_list = safe_pages_list->next;
  		memset(ret, 0, PAGE_SIZE);
  		return ret;
  	}
  	return get_image_page(gfp_mask, PG_SAFE);
  }
f6143aa60   Rafael J. Wysocki   [PATCH] swsusp: R...
177
178
  unsigned long get_safe_page(gfp_t gfp_mask)
  {
9c744481c   Rafael J. Wysocki   PM / hibernate: D...
179
  	return (unsigned long)__get_safe_page(gfp_mask);
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
180
  }
5b6d15de2   Rafael J. Wysocki   [PATCH] swsusp: F...
181
182
  static struct page *alloc_image_page(gfp_t gfp_mask)
  {
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
183
184
185
186
  	struct page *page;
  
  	page = alloc_page(gfp_mask);
  	if (page) {
7be982349   Rafael J. Wysocki   swsusp: use inlin...
187
188
  		swsusp_set_page_forbidden(page);
  		swsusp_set_page_free(page);
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
189
190
  	}
  	return page;
f6143aa60   Rafael J. Wysocki   [PATCH] swsusp: R...
191
  }
307c5971c   Rafael J. Wysocki   PM / hibernate: R...
192
193
194
195
196
197
198
  static void recycle_safe_page(void *page_address)
  {
  	struct linked_page *lp = page_address;
  
  	lp->next = safe_pages_list;
  	safe_pages_list = lp;
  }
f6143aa60   Rafael J. Wysocki   [PATCH] swsusp: R...
199
  /**
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
200
201
202
203
204
205
   * free_image_page - Free a page allocated for hibernation image.
   * @addr: Address of the page to free.
   * @clear_nosave_free: If set, clear the PageNosaveFree bit for the page.
   *
   * The page to free should have been allocated by get_image_page() (page flags
   * set by it are affected).
f6143aa60   Rafael J. Wysocki   [PATCH] swsusp: R...
206
   */
f6143aa60   Rafael J. Wysocki   [PATCH] swsusp: R...
207
208
  static inline void free_image_page(void *addr, int clear_nosave_free)
  {
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
209
210
211
212
213
  	struct page *page;
  
  	BUG_ON(!virt_addr_valid(addr));
  
  	page = virt_to_page(addr);
7be982349   Rafael J. Wysocki   swsusp: use inlin...
214
  	swsusp_unset_page_forbidden(page);
f6143aa60   Rafael J. Wysocki   [PATCH] swsusp: R...
215
  	if (clear_nosave_free)
7be982349   Rafael J. Wysocki   swsusp: use inlin...
216
  		swsusp_unset_page_free(page);
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
217
218
  
  	__free_page(page);
f6143aa60   Rafael J. Wysocki   [PATCH] swsusp: R...
219
  }
efd5a8524   Rafael J. Wysocki   PM / hibernate: C...
220
221
  static inline void free_list_of_pages(struct linked_page *list,
  				      int clear_page_nosave)
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
222
223
224
225
226
227
228
229
  {
  	while (list) {
  		struct linked_page *lp = list->next;
  
  		free_image_page(list, clear_page_nosave);
  		list = lp;
  	}
  }
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
230
231
232
233
234
235
236
237
238
239
240
241
  /*
   * struct chain_allocator is used for allocating small objects out of
   * a linked list of pages called 'the chain'.
   *
   * The chain grows each time when there is no room for a new object in
   * the current page.  The allocated objects cannot be freed individually.
   * It is only possible to free them all at once, by freeing the entire
   * chain.
   *
   * NOTE: The chain allocator may be inefficient if the allocated objects
   * are not much smaller than PAGE_SIZE.
   */
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
242
243
244
  struct chain_allocator {
  	struct linked_page *chain;	/* the chain */
  	unsigned int used_space;	/* total size of objects allocated out
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
245
  					   of the current page */
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
246
247
248
  	gfp_t gfp_mask;		/* mask for allocating pages */
  	int safe_needed;	/* if set, only "safe" pages are allocated */
  };
efd5a8524   Rafael J. Wysocki   PM / hibernate: C...
249
250
  static void chain_init(struct chain_allocator *ca, gfp_t gfp_mask,
  		       int safe_needed)
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
251
252
253
254
255
256
257
258
259
260
261
262
263
  {
  	ca->chain = NULL;
  	ca->used_space = LINKED_PAGE_DATA_SIZE;
  	ca->gfp_mask = gfp_mask;
  	ca->safe_needed = safe_needed;
  }
  
  static void *chain_alloc(struct chain_allocator *ca, unsigned int size)
  {
  	void *ret;
  
  	if (LINKED_PAGE_DATA_SIZE - ca->used_space < size) {
  		struct linked_page *lp;
9c744481c   Rafael J. Wysocki   PM / hibernate: D...
264
265
  		lp = ca->safe_needed ? __get_safe_page(ca->gfp_mask) :
  					get_image_page(ca->gfp_mask, PG_ANY);
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
266
267
268
269
270
271
272
273
274
275
276
  		if (!lp)
  			return NULL;
  
  		lp->next = ca->chain;
  		ca->chain = lp;
  		ca->used_space = 0;
  	}
  	ret = ca->chain->data + ca->used_space;
  	ca->used_space += size;
  	return ret;
  }
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
277
  /**
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
278
   * Data types related to memory bitmaps.
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
279
   *
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
280
281
282
283
284
   * Memory bitmap is a structure consiting of many linked lists of
   * objects.  The main list's elements are of type struct zone_bitmap
   * and each of them corresonds to one zone.  For each zone bitmap
   * object there is a list of objects of type struct bm_block that
   * represent each blocks of bitmap in which information is stored.
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
285
   *
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
286
287
288
289
   * struct memory_bitmap contains a pointer to the main list of zone
   * bitmap objects, a struct bm_position used for browsing the bitmap,
   * and a pointer to the list of pages used for allocating all of the
   * zone bitmap objects and bitmap block objects.
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
290
   *
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
291
292
293
294
   * NOTE: It has to be possible to lay out the bitmap in memory
   * using only allocations of order 0.  Additionally, the bitmap is
   * designed to work with arbitrary number of zones (this is over the
   * top for now, but let's avoid making unnecessary assumptions ;-).
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
295
   *
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
296
297
298
299
   * struct zone_bitmap contains a pointer to a list of bitmap block
   * objects and a pointer to the bitmap block object that has been
   * most recently used for setting bits.  Additionally, it contains the
   * PFNs that correspond to the start and end of the represented zone.
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
300
   *
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
301
302
303
304
   * struct bm_block contains a pointer to the memory page in which
   * information is stored (in the form of a block of bitmap)
   * It also contains the pfns that correspond to the start and end of
   * the represented memory area.
f469f02dc   Joerg Roedel   PM / Hibernate: C...
305
   *
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
306
307
308
   * The memory bitmap is organized as a radix tree to guarantee fast random
   * access to the bits. There is one radix tree for each zone (as returned
   * from create_mem_extents).
f469f02dc   Joerg Roedel   PM / Hibernate: C...
309
   *
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
310
311
312
313
   * One radix tree is represented by one struct mem_zone_bm_rtree. There are
   * two linked lists for the nodes of the tree, one for the inner nodes and
   * one for the leave nodes. The linked leave nodes are used for fast linear
   * access of the memory bitmap.
f469f02dc   Joerg Roedel   PM / Hibernate: C...
314
   *
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
315
   * The struct rtree_node represents one node of the radix tree.
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
316
317
318
   */
  
  #define BM_END_OF_MAP	(~0UL)
8de030732   Wu Fengguang   PM: Trivial fixes
319
  #define BM_BITS_PER_BLOCK	(PAGE_SIZE * BITS_PER_BYTE)
f469f02dc   Joerg Roedel   PM / Hibernate: C...
320
321
  #define BM_BLOCK_SHIFT		(PAGE_SHIFT + 3)
  #define BM_BLOCK_MASK		((1UL << BM_BLOCK_SHIFT) - 1)
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
322

f469f02dc   Joerg Roedel   PM / Hibernate: C...
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
  /*
   * struct rtree_node is a wrapper struct to link the nodes
   * of the rtree together for easy linear iteration over
   * bits and easy freeing
   */
  struct rtree_node {
  	struct list_head list;
  	unsigned long *data;
  };
  
  /*
   * struct mem_zone_bm_rtree represents a bitmap used for one
   * populated memory zone.
   */
  struct mem_zone_bm_rtree {
  	struct list_head list;		/* Link Zones together         */
  	struct list_head nodes;		/* Radix Tree inner nodes      */
  	struct list_head leaves;	/* Radix Tree leaves           */
  	unsigned long start_pfn;	/* Zone start page frame       */
  	unsigned long end_pfn;		/* Zone end page frame + 1     */
  	struct rtree_node *rtree;	/* Radix Tree Root             */
  	int levels;			/* Number of Radix Tree Levels */
  	unsigned int blocks;		/* Number of Bitmap Blocks     */
  };
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
347
348
349
  /* strcut bm_position is used for browsing memory bitmaps */
  
  struct bm_position {
3a20cb177   Joerg Roedel   PM / Hibernate: I...
350
351
352
353
  	struct mem_zone_bm_rtree *zone;
  	struct rtree_node *node;
  	unsigned long node_pfn;
  	int node_bit;
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
354
355
356
  };
  
  struct memory_bitmap {
f469f02dc   Joerg Roedel   PM / Hibernate: C...
357
  	struct list_head zones;
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
358
  	struct linked_page *p_list;	/* list of pages used to store zone
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
359
360
  					   bitmap objects and bitmap block
  					   objects */
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
361
362
363
364
  	struct bm_position cur;	/* most recently used bit position */
  };
  
  /* Functions that operate on memory bitmaps */
f469f02dc   Joerg Roedel   PM / Hibernate: C...
365
366
367
368
369
370
371
  #define BM_ENTRIES_PER_LEVEL	(PAGE_SIZE / sizeof(unsigned long))
  #if BITS_PER_LONG == 32
  #define BM_RTREE_LEVEL_SHIFT	(PAGE_SHIFT - 2)
  #else
  #define BM_RTREE_LEVEL_SHIFT	(PAGE_SHIFT - 3)
  #endif
  #define BM_RTREE_LEVEL_MASK	((1UL << BM_RTREE_LEVEL_SHIFT) - 1)
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
372
373
  /**
   * alloc_rtree_node - Allocate a new node and add it to the radix tree.
f469f02dc   Joerg Roedel   PM / Hibernate: C...
374
   *
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
375
376
377
   * This function is used to allocate inner nodes as well as the
   * leave nodes of the radix tree. It also adds the node to the
   * corresponding linked list passed in by the *list parameter.
f469f02dc   Joerg Roedel   PM / Hibernate: C...
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
   */
  static struct rtree_node *alloc_rtree_node(gfp_t gfp_mask, int safe_needed,
  					   struct chain_allocator *ca,
  					   struct list_head *list)
  {
  	struct rtree_node *node;
  
  	node = chain_alloc(ca, sizeof(struct rtree_node));
  	if (!node)
  		return NULL;
  
  	node->data = get_image_page(gfp_mask, safe_needed);
  	if (!node->data)
  		return NULL;
  
  	list_add_tail(&node->list, list);
  
  	return node;
  }
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
397
398
  /**
   * add_rtree_block - Add a new leave node to the radix tree.
f469f02dc   Joerg Roedel   PM / Hibernate: C...
399
   *
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
400
401
402
   * The leave nodes need to be allocated in order to keep the leaves
   * linked list in order. This is guaranteed by the zone->blocks
   * counter.
f469f02dc   Joerg Roedel   PM / Hibernate: C...
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
   */
  static int add_rtree_block(struct mem_zone_bm_rtree *zone, gfp_t gfp_mask,
  			   int safe_needed, struct chain_allocator *ca)
  {
  	struct rtree_node *node, *block, **dst;
  	unsigned int levels_needed, block_nr;
  	int i;
  
  	block_nr = zone->blocks;
  	levels_needed = 0;
  
  	/* How many levels do we need for this block nr? */
  	while (block_nr) {
  		levels_needed += 1;
  		block_nr >>= BM_RTREE_LEVEL_SHIFT;
  	}
  
  	/* Make sure the rtree has enough levels */
  	for (i = zone->levels; i < levels_needed; i++) {
  		node = alloc_rtree_node(gfp_mask, safe_needed, ca,
  					&zone->nodes);
  		if (!node)
  			return -ENOMEM;
  
  		node->data[0] = (unsigned long)zone->rtree;
  		zone->rtree = node;
  		zone->levels += 1;
  	}
  
  	/* Allocate new block */
  	block = alloc_rtree_node(gfp_mask, safe_needed, ca, &zone->leaves);
  	if (!block)
  		return -ENOMEM;
  
  	/* Now walk the rtree to insert the block */
  	node = zone->rtree;
  	dst = &zone->rtree;
  	block_nr = zone->blocks;
  	for (i = zone->levels; i > 0; i--) {
  		int index;
  
  		if (!node) {
  			node = alloc_rtree_node(gfp_mask, safe_needed, ca,
  						&zone->nodes);
  			if (!node)
  				return -ENOMEM;
  			*dst = node;
  		}
  
  		index = block_nr >> ((i - 1) * BM_RTREE_LEVEL_SHIFT);
  		index &= BM_RTREE_LEVEL_MASK;
  		dst = (struct rtree_node **)&((*dst)->data[index]);
  		node = *dst;
  	}
  
  	zone->blocks += 1;
  	*dst = block;
  
  	return 0;
  }
  
  static void free_zone_bm_rtree(struct mem_zone_bm_rtree *zone,
  			       int clear_nosave_free);
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
466
467
  /**
   * create_zone_bm_rtree - Create a radix tree for one zone.
f469f02dc   Joerg Roedel   PM / Hibernate: C...
468
   *
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
469
470
471
   * Allocated the mem_zone_bm_rtree structure and initializes it.
   * This function also allocated and builds the radix tree for the
   * zone.
f469f02dc   Joerg Roedel   PM / Hibernate: C...
472
   */
efd5a8524   Rafael J. Wysocki   PM / hibernate: C...
473
474
475
476
477
  static struct mem_zone_bm_rtree *create_zone_bm_rtree(gfp_t gfp_mask,
  						      int safe_needed,
  						      struct chain_allocator *ca,
  						      unsigned long start,
  						      unsigned long end)
f469f02dc   Joerg Roedel   PM / Hibernate: C...
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
  {
  	struct mem_zone_bm_rtree *zone;
  	unsigned int i, nr_blocks;
  	unsigned long pages;
  
  	pages = end - start;
  	zone  = chain_alloc(ca, sizeof(struct mem_zone_bm_rtree));
  	if (!zone)
  		return NULL;
  
  	INIT_LIST_HEAD(&zone->nodes);
  	INIT_LIST_HEAD(&zone->leaves);
  	zone->start_pfn = start;
  	zone->end_pfn = end;
  	nr_blocks = DIV_ROUND_UP(pages, BM_BITS_PER_BLOCK);
  
  	for (i = 0; i < nr_blocks; i++) {
  		if (add_rtree_block(zone, gfp_mask, safe_needed, ca)) {
  			free_zone_bm_rtree(zone, PG_UNSAFE_CLEAR);
  			return NULL;
  		}
  	}
  
  	return zone;
  }
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
503
504
  /**
   * free_zone_bm_rtree - Free the memory of the radix tree.
f469f02dc   Joerg Roedel   PM / Hibernate: C...
505
   *
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
506
507
508
   * Free all node pages of the radix tree. The mem_zone_bm_rtree
   * structure itself is not freed here nor are the rtree_node
   * structs.
f469f02dc   Joerg Roedel   PM / Hibernate: C...
509
510
511
512
513
514
515
516
517
518
519
520
   */
  static void free_zone_bm_rtree(struct mem_zone_bm_rtree *zone,
  			       int clear_nosave_free)
  {
  	struct rtree_node *node;
  
  	list_for_each_entry(node, &zone->nodes, list)
  		free_image_page(node->data, clear_nosave_free);
  
  	list_for_each_entry(node, &zone->leaves, list)
  		free_image_page(node->data, clear_nosave_free);
  }
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
521
522
  static void memory_bm_position_reset(struct memory_bitmap *bm)
  {
3a20cb177   Joerg Roedel   PM / Hibernate: I...
523
524
525
526
527
528
  	bm->cur.zone = list_entry(bm->zones.next, struct mem_zone_bm_rtree,
  				  list);
  	bm->cur.node = list_entry(bm->cur.zone->leaves.next,
  				  struct rtree_node, list);
  	bm->cur.node_pfn = 0;
  	bm->cur.node_bit = 0;
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
529
530
531
  }
  
  static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free);
846705deb   Rafael J. Wysocki   Hibernate: Take o...
532
533
534
535
536
  struct mem_extent {
  	struct list_head hook;
  	unsigned long start;
  	unsigned long end;
  };
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
537
  /**
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
538
539
   * free_mem_extents - Free a list of memory extents.
   * @list: List of extents to free.
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
540
   */
846705deb   Rafael J. Wysocki   Hibernate: Take o...
541
542
543
  static void free_mem_extents(struct list_head *list)
  {
  	struct mem_extent *ext, *aux;
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
544

846705deb   Rafael J. Wysocki   Hibernate: Take o...
545
546
547
548
549
550
551
  	list_for_each_entry_safe(ext, aux, list, hook) {
  		list_del(&ext->hook);
  		kfree(ext);
  	}
  }
  
  /**
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
552
553
554
555
556
   * create_mem_extents - Create a list of memory extents.
   * @list: List to put the extents into.
   * @gfp_mask: Mask to use for memory allocations.
   *
   * The extents represent contiguous ranges of PFNs.
846705deb   Rafael J. Wysocki   Hibernate: Take o...
557
558
   */
  static int create_mem_extents(struct list_head *list, gfp_t gfp_mask)
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
559
  {
846705deb   Rafael J. Wysocki   Hibernate: Take o...
560
  	struct zone *zone;
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
561

846705deb   Rafael J. Wysocki   Hibernate: Take o...
562
  	INIT_LIST_HEAD(list);
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
563

ee99c71c5   KOSAKI Motohiro   mm: introduce for...
564
  	for_each_populated_zone(zone) {
846705deb   Rafael J. Wysocki   Hibernate: Take o...
565
566
  		unsigned long zone_start, zone_end;
  		struct mem_extent *ext, *cur, *aux;
846705deb   Rafael J. Wysocki   Hibernate: Take o...
567
  		zone_start = zone->zone_start_pfn;
c33bc315f   Xishi Qiu   mm: use zone_end_...
568
  		zone_end = zone_end_pfn(zone);
846705deb   Rafael J. Wysocki   Hibernate: Take o...
569
570
571
572
  
  		list_for_each_entry(ext, list, hook)
  			if (zone_start <= ext->end)
  				break;
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
573

846705deb   Rafael J. Wysocki   Hibernate: Take o...
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
  		if (&ext->hook == list || zone_end < ext->start) {
  			/* New extent is necessary */
  			struct mem_extent *new_ext;
  
  			new_ext = kzalloc(sizeof(struct mem_extent), gfp_mask);
  			if (!new_ext) {
  				free_mem_extents(list);
  				return -ENOMEM;
  			}
  			new_ext->start = zone_start;
  			new_ext->end = zone_end;
  			list_add_tail(&new_ext->hook, &ext->hook);
  			continue;
  		}
  
  		/* Merge this zone's range of PFNs with the existing one */
  		if (zone_start < ext->start)
  			ext->start = zone_start;
  		if (zone_end > ext->end)
  			ext->end = zone_end;
  
  		/* More merging may be possible */
  		cur = ext;
  		list_for_each_entry_safe_continue(cur, aux, list, hook) {
  			if (zone_end < cur->start)
  				break;
  			if (zone_end < cur->end)
  				ext->end = cur->end;
  			list_del(&cur->hook);
  			kfree(cur);
  		}
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
605
  	}
846705deb   Rafael J. Wysocki   Hibernate: Take o...
606
607
  
  	return 0;
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
608
609
610
  }
  
  /**
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
611
612
   * memory_bm_create - Allocate memory for a memory bitmap.
   */
efd5a8524   Rafael J. Wysocki   PM / hibernate: C...
613
614
  static int memory_bm_create(struct memory_bitmap *bm, gfp_t gfp_mask,
  			    int safe_needed)
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
615
616
  {
  	struct chain_allocator ca;
846705deb   Rafael J. Wysocki   Hibernate: Take o...
617
618
619
  	struct list_head mem_extents;
  	struct mem_extent *ext;
  	int error;
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
620
621
  
  	chain_init(&ca, gfp_mask, safe_needed);
f469f02dc   Joerg Roedel   PM / Hibernate: C...
622
  	INIT_LIST_HEAD(&bm->zones);
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
623

846705deb   Rafael J. Wysocki   Hibernate: Take o...
624
625
626
  	error = create_mem_extents(&mem_extents, gfp_mask);
  	if (error)
  		return error;
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
627

846705deb   Rafael J. Wysocki   Hibernate: Take o...
628
  	list_for_each_entry(ext, &mem_extents, hook) {
f469f02dc   Joerg Roedel   PM / Hibernate: C...
629
  		struct mem_zone_bm_rtree *zone;
f469f02dc   Joerg Roedel   PM / Hibernate: C...
630
631
632
  
  		zone = create_zone_bm_rtree(gfp_mask, safe_needed, &ca,
  					    ext->start, ext->end);
9047eb629   Joerg Roedel   PM / Hibernate: R...
633
634
  		if (!zone) {
  			error = -ENOMEM;
f469f02dc   Joerg Roedel   PM / Hibernate: C...
635
  			goto Error;
9047eb629   Joerg Roedel   PM / Hibernate: R...
636
  		}
f469f02dc   Joerg Roedel   PM / Hibernate: C...
637
  		list_add_tail(&zone->list, &bm->zones);
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
638
  	}
846705deb   Rafael J. Wysocki   Hibernate: Take o...
639

b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
640
641
  	bm->p_list = ca.chain;
  	memory_bm_position_reset(bm);
846705deb   Rafael J. Wysocki   Hibernate: Take o...
642
643
644
   Exit:
  	free_mem_extents(&mem_extents);
  	return error;
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
645

846705deb   Rafael J. Wysocki   Hibernate: Take o...
646
   Error:
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
647
648
  	bm->p_list = ca.chain;
  	memory_bm_free(bm, PG_UNSAFE_CLEAR);
846705deb   Rafael J. Wysocki   Hibernate: Take o...
649
  	goto Exit;
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
650
651
652
  }
  
  /**
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
653
654
655
   * memory_bm_free - Free memory occupied by the memory bitmap.
   * @bm: Memory bitmap.
   */
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
656
657
  static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free)
  {
f469f02dc   Joerg Roedel   PM / Hibernate: C...
658
  	struct mem_zone_bm_rtree *zone;
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
659

f469f02dc   Joerg Roedel   PM / Hibernate: C...
660
661
  	list_for_each_entry(zone, &bm->zones, list)
  		free_zone_bm_rtree(zone, clear_nosave_free);
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
662
  	free_list_of_pages(bm->p_list, clear_nosave_free);
846705deb   Rafael J. Wysocki   Hibernate: Take o...
663

f469f02dc   Joerg Roedel   PM / Hibernate: C...
664
  	INIT_LIST_HEAD(&bm->zones);
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
665
666
667
  }
  
  /**
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
668
   * memory_bm_find_bit - Find the bit for a given PFN in a memory bitmap.
07a338236   Joerg Roedel   PM / Hibernate: A...
669
   *
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
670
671
672
673
674
   * Find the bit in memory bitmap @bm that corresponds to the given PFN.
   * The cur.zone, cur.block and cur.node_pfn members of @bm are updated.
   *
   * Walk the radix tree to find the page containing the bit that represents @pfn
   * and return the position of the bit in @addr and @bit_nr.
07a338236   Joerg Roedel   PM / Hibernate: A...
675
   */
9047eb629   Joerg Roedel   PM / Hibernate: R...
676
677
  static int memory_bm_find_bit(struct memory_bitmap *bm, unsigned long pfn,
  			      void **addr, unsigned int *bit_nr)
07a338236   Joerg Roedel   PM / Hibernate: A...
678
679
680
681
  {
  	struct mem_zone_bm_rtree *curr, *zone;
  	struct rtree_node *node;
  	int i, block_nr;
3a20cb177   Joerg Roedel   PM / Hibernate: I...
682
683
684
685
  	zone = bm->cur.zone;
  
  	if (pfn >= zone->start_pfn && pfn < zone->end_pfn)
  		goto zone_found;
07a338236   Joerg Roedel   PM / Hibernate: A...
686
687
688
689
690
691
692
693
694
695
696
697
  	zone = NULL;
  
  	/* Find the right zone */
  	list_for_each_entry(curr, &bm->zones, list) {
  		if (pfn >= curr->start_pfn && pfn < curr->end_pfn) {
  			zone = curr;
  			break;
  		}
  	}
  
  	if (!zone)
  		return -EFAULT;
3a20cb177   Joerg Roedel   PM / Hibernate: I...
698
  zone_found:
07a338236   Joerg Roedel   PM / Hibernate: A...
699
  	/*
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
700
701
  	 * We have found the zone. Now walk the radix tree to find the leaf node
  	 * for our PFN.
07a338236   Joerg Roedel   PM / Hibernate: A...
702
  	 */
da6043fe8   Andy Whitcroft   PM / hibernate: m...
703
704
  
  	/*
7b7b8a2c9   Randy Dunlap   kernel/: fix repe...
705
  	 * If the zone we wish to scan is the current zone and the
da6043fe8   Andy Whitcroft   PM / hibernate: m...
706
707
708
  	 * pfn falls into the current node then we do not need to walk
  	 * the tree.
  	 */
3a20cb177   Joerg Roedel   PM / Hibernate: I...
709
  	node = bm->cur.node;
da6043fe8   Andy Whitcroft   PM / hibernate: m...
710
711
  	if (zone == bm->cur.zone &&
  	    ((pfn - zone->start_pfn) & ~BM_BLOCK_MASK) == bm->cur.node_pfn)
3a20cb177   Joerg Roedel   PM / Hibernate: I...
712
  		goto node_found;
07a338236   Joerg Roedel   PM / Hibernate: A...
713
714
715
716
717
718
719
720
721
722
723
  	node      = zone->rtree;
  	block_nr  = (pfn - zone->start_pfn) >> BM_BLOCK_SHIFT;
  
  	for (i = zone->levels; i > 0; i--) {
  		int index;
  
  		index = block_nr >> ((i - 1) * BM_RTREE_LEVEL_SHIFT);
  		index &= BM_RTREE_LEVEL_MASK;
  		BUG_ON(node->data[index] == 0);
  		node = (struct rtree_node *)node->data[index];
  	}
3a20cb177   Joerg Roedel   PM / Hibernate: I...
724
725
726
727
728
  node_found:
  	/* Update last position */
  	bm->cur.zone = zone;
  	bm->cur.node = node;
  	bm->cur.node_pfn = (pfn - zone->start_pfn) & ~BM_BLOCK_MASK;
07a338236   Joerg Roedel   PM / Hibernate: A...
729
730
731
732
733
734
  	/* Set return values */
  	*addr = node->data;
  	*bit_nr = (pfn - zone->start_pfn) & BM_BLOCK_MASK;
  
  	return 0;
  }
74dfd666d   Rafael J. Wysocki   swsusp: do not us...
735
736
737
738
  static void memory_bm_set_bit(struct memory_bitmap *bm, unsigned long pfn)
  {
  	void *addr;
  	unsigned int bit;
a82f7119f   Rafael J. Wysocki   Hibernation: Fix ...
739
  	int error;
74dfd666d   Rafael J. Wysocki   swsusp: do not us...
740

a82f7119f   Rafael J. Wysocki   Hibernation: Fix ...
741
742
  	error = memory_bm_find_bit(bm, pfn, &addr, &bit);
  	BUG_ON(error);
74dfd666d   Rafael J. Wysocki   swsusp: do not us...
743
744
  	set_bit(bit, addr);
  }
a82f7119f   Rafael J. Wysocki   Hibernation: Fix ...
745
746
747
748
749
750
751
752
753
  static int mem_bm_set_bit_check(struct memory_bitmap *bm, unsigned long pfn)
  {
  	void *addr;
  	unsigned int bit;
  	int error;
  
  	error = memory_bm_find_bit(bm, pfn, &addr, &bit);
  	if (!error)
  		set_bit(bit, addr);
07a338236   Joerg Roedel   PM / Hibernate: A...
754

a82f7119f   Rafael J. Wysocki   Hibernation: Fix ...
755
756
  	return error;
  }
74dfd666d   Rafael J. Wysocki   swsusp: do not us...
757
758
759
760
  static void memory_bm_clear_bit(struct memory_bitmap *bm, unsigned long pfn)
  {
  	void *addr;
  	unsigned int bit;
a82f7119f   Rafael J. Wysocki   Hibernation: Fix ...
761
  	int error;
74dfd666d   Rafael J. Wysocki   swsusp: do not us...
762

a82f7119f   Rafael J. Wysocki   Hibernation: Fix ...
763
764
  	error = memory_bm_find_bit(bm, pfn, &addr, &bit);
  	BUG_ON(error);
74dfd666d   Rafael J. Wysocki   swsusp: do not us...
765
766
  	clear_bit(bit, addr);
  }
fdd64ed54   Joerg Roedel   PM / hibernate: I...
767
768
769
770
771
772
773
  static void memory_bm_clear_current(struct memory_bitmap *bm)
  {
  	int bit;
  
  	bit = max(bm->cur.node_bit - 1, 0);
  	clear_bit(bit, bm->cur.node->data);
  }
74dfd666d   Rafael J. Wysocki   swsusp: do not us...
774
775
776
777
  static int memory_bm_test_bit(struct memory_bitmap *bm, unsigned long pfn)
  {
  	void *addr;
  	unsigned int bit;
9047eb629   Joerg Roedel   PM / Hibernate: R...
778
  	int error;
74dfd666d   Rafael J. Wysocki   swsusp: do not us...
779

a82f7119f   Rafael J. Wysocki   Hibernation: Fix ...
780
781
  	error = memory_bm_find_bit(bm, pfn, &addr, &bit);
  	BUG_ON(error);
9047eb629   Joerg Roedel   PM / Hibernate: R...
782
  	return test_bit(bit, addr);
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
783
  }
69643279a   Rafael J. Wysocki   Hibernate: Do not...
784
785
786
787
  static bool memory_bm_pfn_present(struct memory_bitmap *bm, unsigned long pfn)
  {
  	void *addr;
  	unsigned int bit;
07a338236   Joerg Roedel   PM / Hibernate: A...
788

9047eb629   Joerg Roedel   PM / Hibernate: R...
789
  	return !memory_bm_find_bit(bm, pfn, &addr, &bit);
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
790
  }
3a20cb177   Joerg Roedel   PM / Hibernate: I...
791
  /*
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
792
   * rtree_next_node - Jump to the next leaf node.
3a20cb177   Joerg Roedel   PM / Hibernate: I...
793
   *
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
794
795
796
797
   * Set the position to the beginning of the next node in the
   * memory bitmap. This is either the next node in the current
   * zone's radix tree or the first node in the radix tree of the
   * next zone.
3a20cb177   Joerg Roedel   PM / Hibernate: I...
798
   *
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
799
   * Return true if there is a next node, false otherwise.
3a20cb177   Joerg Roedel   PM / Hibernate: I...
800
801
802
   */
  static bool rtree_next_node(struct memory_bitmap *bm)
  {
924d86967   James Morse   PM / hibernate: F...
803
804
805
  	if (!list_is_last(&bm->cur.node->list, &bm->cur.zone->leaves)) {
  		bm->cur.node = list_entry(bm->cur.node->list.next,
  					  struct rtree_node, list);
3a20cb177   Joerg Roedel   PM / Hibernate: I...
806
807
  		bm->cur.node_pfn += BM_BITS_PER_BLOCK;
  		bm->cur.node_bit  = 0;
0f7d83e85   Joerg Roedel   PM / Hibernate: T...
808
  		touch_softlockup_watchdog();
3a20cb177   Joerg Roedel   PM / Hibernate: I...
809
810
811
812
  		return true;
  	}
  
  	/* No more nodes, goto next zone */
924d86967   James Morse   PM / hibernate: F...
813
814
  	if (!list_is_last(&bm->cur.zone->list, &bm->zones)) {
  		bm->cur.zone = list_entry(bm->cur.zone->list.next,
3a20cb177   Joerg Roedel   PM / Hibernate: I...
815
  				  struct mem_zone_bm_rtree, list);
3a20cb177   Joerg Roedel   PM / Hibernate: I...
816
817
818
819
820
821
822
823
824
825
  		bm->cur.node = list_entry(bm->cur.zone->leaves.next,
  					  struct rtree_node, list);
  		bm->cur.node_pfn = 0;
  		bm->cur.node_bit = 0;
  		return true;
  	}
  
  	/* No more zones */
  	return false;
  }
9047eb629   Joerg Roedel   PM / Hibernate: R...
826
  /**
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
827
828
   * memory_bm_rtree_next_pfn - Find the next set bit in a memory bitmap.
   * @bm: Memory bitmap.
3a20cb177   Joerg Roedel   PM / Hibernate: I...
829
   *
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
830
831
832
   * Starting from the last returned position this function searches for the next
   * set bit in @bm and returns the PFN represented by it.  If no more bits are
   * set, BM_END_OF_MAP is returned.
9047eb629   Joerg Roedel   PM / Hibernate: R...
833
   *
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
834
835
   * It is required to run memory_bm_position_reset() before the first call to
   * this function for the given memory bitmap.
3a20cb177   Joerg Roedel   PM / Hibernate: I...
836
   */
9047eb629   Joerg Roedel   PM / Hibernate: R...
837
  static unsigned long memory_bm_next_pfn(struct memory_bitmap *bm)
3a20cb177   Joerg Roedel   PM / Hibernate: I...
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
  {
  	unsigned long bits, pfn, pages;
  	int bit;
  
  	do {
  		pages	  = bm->cur.zone->end_pfn - bm->cur.zone->start_pfn;
  		bits      = min(pages - bm->cur.node_pfn, BM_BITS_PER_BLOCK);
  		bit	  = find_next_bit(bm->cur.node->data, bits,
  					  bm->cur.node_bit);
  		if (bit < bits) {
  			pfn = bm->cur.zone->start_pfn + bm->cur.node_pfn + bit;
  			bm->cur.node_bit = bit + 1;
  			return pfn;
  		}
  	} while (rtree_next_node(bm));
  
  	return BM_END_OF_MAP;
  }
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
856
857
858
  /*
   * This structure represents a range of page frames the contents of which
   * should not be saved during hibernation.
74dfd666d   Rafael J. Wysocki   swsusp: do not us...
859
   */
74dfd666d   Rafael J. Wysocki   swsusp: do not us...
860
861
862
863
864
865
866
  struct nosave_region {
  	struct list_head list;
  	unsigned long start_pfn;
  	unsigned long end_pfn;
  };
  
  static LIST_HEAD(nosave_regions);
307c5971c   Rafael J. Wysocki   PM / hibernate: R...
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
  static void recycle_zone_bm_rtree(struct mem_zone_bm_rtree *zone)
  {
  	struct rtree_node *node;
  
  	list_for_each_entry(node, &zone->nodes, list)
  		recycle_safe_page(node->data);
  
  	list_for_each_entry(node, &zone->leaves, list)
  		recycle_safe_page(node->data);
  }
  
  static void memory_bm_recycle(struct memory_bitmap *bm)
  {
  	struct mem_zone_bm_rtree *zone;
  	struct linked_page *p_list;
  
  	list_for_each_entry(zone, &bm->zones, list)
  		recycle_zone_bm_rtree(zone);
  
  	p_list = bm->p_list;
  	while (p_list) {
  		struct linked_page *lp = p_list;
  
  		p_list = lp->next;
  		recycle_safe_page(lp);
  	}
  }
74dfd666d   Rafael J. Wysocki   swsusp: do not us...
894
  /**
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
895
896
897
898
   * register_nosave_region - Register a region of unsaveable memory.
   *
   * Register a range of page frames the contents of which should not be saved
   * during hibernation (to be used in the early initialization code).
74dfd666d   Rafael J. Wysocki   swsusp: do not us...
899
   */
efd5a8524   Rafael J. Wysocki   PM / hibernate: C...
900
901
  void __init __register_nosave_region(unsigned long start_pfn,
  				     unsigned long end_pfn, int use_kmalloc)
74dfd666d   Rafael J. Wysocki   swsusp: do not us...
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
  {
  	struct nosave_region *region;
  
  	if (start_pfn >= end_pfn)
  		return;
  
  	if (!list_empty(&nosave_regions)) {
  		/* Try to extend the previous region (they should be sorted) */
  		region = list_entry(nosave_regions.prev,
  					struct nosave_region, list);
  		if (region->end_pfn == start_pfn) {
  			region->end_pfn = end_pfn;
  			goto Report;
  		}
  	}
940d67f6b   Johannes Berg   [POWERPC] swsusp:...
917
  	if (use_kmalloc) {
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
918
  		/* During init, this shouldn't fail */
940d67f6b   Johannes Berg   [POWERPC] swsusp:...
919
920
  		region = kmalloc(sizeof(struct nosave_region), GFP_KERNEL);
  		BUG_ON(!region);
d5f32af31   Rafael J. Wysocki   PM / hibernate: A...
921
  	} else {
940d67f6b   Johannes Berg   [POWERPC] swsusp:...
922
  		/* This allocation cannot fail */
7e1c4e279   Mike Rapoport   memblock: stop us...
923
924
  		region = memblock_alloc(sizeof(struct nosave_region),
  					SMP_CACHE_BYTES);
8a7f97b90   Mike Rapoport   treewide: add che...
925
926
927
928
  		if (!region)
  			panic("%s: Failed to allocate %zu bytes
  ", __func__,
  			      sizeof(struct nosave_region));
d5f32af31   Rafael J. Wysocki   PM / hibernate: A...
929
  	}
74dfd666d   Rafael J. Wysocki   swsusp: do not us...
930
931
932
933
  	region->start_pfn = start_pfn;
  	region->end_pfn = end_pfn;
  	list_add_tail(&region->list, &nosave_regions);
   Report:
64ec72a1e   Joe Perches   PM: Use a more co...
934
935
  	pr_info("Registered nosave memory: [mem %#010llx-%#010llx]
  ",
cd38ca854   Bjorn Helgaas   PM / Hibernate: p...
936
937
  		(unsigned long long) start_pfn << PAGE_SHIFT,
  		((unsigned long long) end_pfn << PAGE_SHIFT) - 1);
74dfd666d   Rafael J. Wysocki   swsusp: do not us...
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
  }
  
  /*
   * Set bits in this map correspond to the page frames the contents of which
   * should not be saved during the suspend.
   */
  static struct memory_bitmap *forbidden_pages_map;
  
  /* Set bits in this map correspond to free page frames. */
  static struct memory_bitmap *free_pages_map;
  
  /*
   * Each page frame allocated for creating the image is marked by setting the
   * corresponding bits in forbidden_pages_map and free_pages_map simultaneously
   */
  
  void swsusp_set_page_free(struct page *page)
  {
  	if (free_pages_map)
  		memory_bm_set_bit(free_pages_map, page_to_pfn(page));
  }
  
  static int swsusp_page_is_free(struct page *page)
  {
  	return free_pages_map ?
  		memory_bm_test_bit(free_pages_map, page_to_pfn(page)) : 0;
  }
  
  void swsusp_unset_page_free(struct page *page)
  {
  	if (free_pages_map)
  		memory_bm_clear_bit(free_pages_map, page_to_pfn(page));
  }
  
  static void swsusp_set_page_forbidden(struct page *page)
  {
  	if (forbidden_pages_map)
  		memory_bm_set_bit(forbidden_pages_map, page_to_pfn(page));
  }
  
  int swsusp_page_is_forbidden(struct page *page)
  {
  	return forbidden_pages_map ?
  		memory_bm_test_bit(forbidden_pages_map, page_to_pfn(page)) : 0;
  }
  
  static void swsusp_unset_page_forbidden(struct page *page)
  {
  	if (forbidden_pages_map)
  		memory_bm_clear_bit(forbidden_pages_map, page_to_pfn(page));
  }
  
  /**
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
991
992
993
994
995
   * mark_nosave_pages - Mark pages that should not be saved.
   * @bm: Memory bitmap.
   *
   * Set the bits in @bm that correspond to the page frames the contents of which
   * should not be saved.
74dfd666d   Rafael J. Wysocki   swsusp: do not us...
996
   */
74dfd666d   Rafael J. Wysocki   swsusp: do not us...
997
998
999
1000
1001
1002
1003
1004
1005
  static void mark_nosave_pages(struct memory_bitmap *bm)
  {
  	struct nosave_region *region;
  
  	if (list_empty(&nosave_regions))
  		return;
  
  	list_for_each_entry(region, &nosave_regions, list) {
  		unsigned long pfn;
64ec72a1e   Joe Perches   PM: Use a more co...
1006
1007
  		pr_debug("Marking nosave pages: [mem %#010llx-%#010llx]
  ",
69f1d475c   Bjorn Helgaas   PM / Hibernate: p...
1008
1009
1010
  			 (unsigned long long) region->start_pfn << PAGE_SHIFT,
  			 ((unsigned long long) region->end_pfn << PAGE_SHIFT)
  				- 1);
74dfd666d   Rafael J. Wysocki   swsusp: do not us...
1011
1012
  
  		for (pfn = region->start_pfn; pfn < region->end_pfn; pfn++)
a82f7119f   Rafael J. Wysocki   Hibernation: Fix ...
1013
1014
1015
1016
1017
1018
1019
1020
1021
  			if (pfn_valid(pfn)) {
  				/*
  				 * It is safe to ignore the result of
  				 * mem_bm_set_bit_check() here, since we won't
  				 * touch the PFNs for which the error is
  				 * returned anyway.
  				 */
  				mem_bm_set_bit_check(bm, pfn);
  			}
74dfd666d   Rafael J. Wysocki   swsusp: do not us...
1022
1023
1024
1025
  	}
  }
  
  /**
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
1026
1027
1028
1029
1030
1031
   * create_basic_memory_bitmaps - Create bitmaps to hold basic page information.
   *
   * Create bitmaps needed for marking page frames that should not be saved and
   * free page frames.  The forbidden_pages_map and free_pages_map pointers are
   * only modified if everything goes well, because we don't want the bits to be
   * touched before both bitmaps are set up.
74dfd666d   Rafael J. Wysocki   swsusp: do not us...
1032
   */
74dfd666d   Rafael J. Wysocki   swsusp: do not us...
1033
1034
1035
1036
  int create_basic_memory_bitmaps(void)
  {
  	struct memory_bitmap *bm1, *bm2;
  	int error = 0;
aab172891   Rafael J. Wysocki   PM / hibernate: F...
1037
1038
1039
1040
  	if (forbidden_pages_map && free_pages_map)
  		return 0;
  	else
  		BUG_ON(forbidden_pages_map || free_pages_map);
74dfd666d   Rafael J. Wysocki   swsusp: do not us...
1041

0709db607   Rafael J. Wysocki   swsusp: use GFP_K...
1042
  	bm1 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL);
74dfd666d   Rafael J. Wysocki   swsusp: do not us...
1043
1044
  	if (!bm1)
  		return -ENOMEM;
0709db607   Rafael J. Wysocki   swsusp: use GFP_K...
1045
  	error = memory_bm_create(bm1, GFP_KERNEL, PG_ANY);
74dfd666d   Rafael J. Wysocki   swsusp: do not us...
1046
1047
  	if (error)
  		goto Free_first_object;
0709db607   Rafael J. Wysocki   swsusp: use GFP_K...
1048
  	bm2 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL);
74dfd666d   Rafael J. Wysocki   swsusp: do not us...
1049
1050
  	if (!bm2)
  		goto Free_first_bitmap;
0709db607   Rafael J. Wysocki   swsusp: use GFP_K...
1051
  	error = memory_bm_create(bm2, GFP_KERNEL, PG_ANY);
74dfd666d   Rafael J. Wysocki   swsusp: do not us...
1052
1053
1054
1055
1056
1057
  	if (error)
  		goto Free_second_object;
  
  	forbidden_pages_map = bm1;
  	free_pages_map = bm2;
  	mark_nosave_pages(forbidden_pages_map);
64ec72a1e   Joe Perches   PM: Use a more co...
1058
1059
  	pr_debug("Basic memory bitmaps created
  ");
74dfd666d   Rafael J. Wysocki   swsusp: do not us...
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
  
  	return 0;
  
   Free_second_object:
  	kfree(bm2);
   Free_first_bitmap:
   	memory_bm_free(bm1, PG_UNSAFE_CLEAR);
   Free_first_object:
  	kfree(bm1);
  	return -ENOMEM;
  }
  
  /**
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
1073
1074
1075
1076
1077
   * free_basic_memory_bitmaps - Free memory bitmaps holding basic information.
   *
   * Free memory bitmaps allocated by create_basic_memory_bitmaps().  The
   * auxiliary pointers are necessary so that the bitmaps themselves are not
   * referred to while they are being freed.
74dfd666d   Rafael J. Wysocki   swsusp: do not us...
1078
   */
74dfd666d   Rafael J. Wysocki   swsusp: do not us...
1079
1080
1081
  void free_basic_memory_bitmaps(void)
  {
  	struct memory_bitmap *bm1, *bm2;
6a0c7cd33   Rafael J. Wysocki   PM / Hibernate: D...
1082
1083
  	if (WARN_ON(!(forbidden_pages_map && free_pages_map)))
  		return;
74dfd666d   Rafael J. Wysocki   swsusp: do not us...
1084
1085
1086
1087
1088
1089
1090
1091
1092
  
  	bm1 = forbidden_pages_map;
  	bm2 = free_pages_map;
  	forbidden_pages_map = NULL;
  	free_pages_map = NULL;
  	memory_bm_free(bm1, PG_UNSAFE_CLEAR);
  	kfree(bm1);
  	memory_bm_free(bm2, PG_UNSAFE_CLEAR);
  	kfree(bm2);
64ec72a1e   Joe Perches   PM: Use a more co...
1093
1094
  	pr_debug("Basic memory bitmaps freed
  ");
74dfd666d   Rafael J. Wysocki   swsusp: do not us...
1095
  }
1ad1410f6   Anisse Astier   PM / Hibernate: a...
1096
1097
  void clear_free_pages(void)
  {
1ad1410f6   Anisse Astier   PM / Hibernate: a...
1098
1099
1100
1101
1102
  	struct memory_bitmap *bm = free_pages_map;
  	unsigned long pfn;
  
  	if (WARN_ON(!(free_pages_map)))
  		return;
18451f9f9   Alexander Potapenko   PM: hibernate: fi...
1103
1104
  	if (IS_ENABLED(CONFIG_PAGE_POISONING_ZERO) || want_init_on_free()) {
  		memory_bm_position_reset(bm);
1ad1410f6   Anisse Astier   PM / Hibernate: a...
1105
  		pfn = memory_bm_next_pfn(bm);
18451f9f9   Alexander Potapenko   PM: hibernate: fi...
1106
1107
1108
1109
1110
1111
1112
1113
1114
  		while (pfn != BM_END_OF_MAP) {
  			if (pfn_valid(pfn))
  				clear_highpage(pfn_to_page(pfn));
  
  			pfn = memory_bm_next_pfn(bm);
  		}
  		memory_bm_position_reset(bm);
  		pr_info("free pages cleared after restore
  ");
1ad1410f6   Anisse Astier   PM / Hibernate: a...
1115
  	}
1ad1410f6   Anisse Astier   PM / Hibernate: a...
1116
  }
74dfd666d   Rafael J. Wysocki   swsusp: do not us...
1117
  /**
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
1118
1119
1120
1121
1122
1123
   * snapshot_additional_pages - Estimate the number of extra pages needed.
   * @zone: Memory zone to carry out the computation for.
   *
   * Estimate the number of additional pages needed for setting up a hibernation
   * image data structures for @zone (usually, the returned value is greater than
   * the exact number).
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
1124
   */
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
1125
1126
  unsigned int snapshot_additional_pages(struct zone *zone)
  {
f469f02dc   Joerg Roedel   PM / Hibernate: C...
1127
  	unsigned int rtree, nodes;
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
1128

f469f02dc   Joerg Roedel   PM / Hibernate: C...
1129
1130
1131
1132
1133
1134
1135
  	rtree = nodes = DIV_ROUND_UP(zone->spanned_pages, BM_BITS_PER_BLOCK);
  	rtree += DIV_ROUND_UP(rtree * sizeof(struct rtree_node),
  			      LINKED_PAGE_DATA_SIZE);
  	while (nodes > 1) {
  		nodes = DIV_ROUND_UP(nodes, BM_ENTRIES_PER_LEVEL);
  		rtree += nodes;
  	}
9047eb629   Joerg Roedel   PM / Hibernate: R...
1136
  	return 2 * rtree;
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
1137
  }
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1138
1139
  #ifdef CONFIG_HIGHMEM
  /**
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
1140
1141
1142
   * count_free_highmem_pages - Compute the total number of free highmem pages.
   *
   * The returned number is system-wide.
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1143
   */
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1144
1145
1146
1147
  static unsigned int count_free_highmem_pages(void)
  {
  	struct zone *zone;
  	unsigned int cnt = 0;
ee99c71c5   KOSAKI Motohiro   mm: introduce for...
1148
1149
  	for_each_populated_zone(zone)
  		if (is_highmem(zone))
d23ad4232   Christoph Lameter   [PATCH] Use ZVC f...
1150
  			cnt += zone_page_state(zone, NR_FREE_PAGES);
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1151
1152
1153
1154
1155
  
  	return cnt;
  }
  
  /**
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
1156
1157
1158
   * saveable_highmem_page - Check if a highmem page is saveable.
   *
   * Determine whether a highmem page should be included in a hibernation image.
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1159
   *
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
1160
1161
   * We should save the page if it isn't Nosave or NosaveFree, or Reserved,
   * and it isn't part of a free chunk of pages.
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1162
   */
846705deb   Rafael J. Wysocki   Hibernate: Take o...
1163
  static struct page *saveable_highmem_page(struct zone *zone, unsigned long pfn)
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1164
1165
1166
1167
1168
  {
  	struct page *page;
  
  	if (!pfn_valid(pfn))
  		return NULL;
5b56db372   David Hildenbrand   PM/Hibernate: use...
1169
1170
  	page = pfn_to_online_page(pfn);
  	if (!page || page_zone(page) != zone)
846705deb   Rafael J. Wysocki   Hibernate: Take o...
1171
  		return NULL;
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1172
1173
  
  	BUG_ON(!PageHighMem(page));
abd02ac61   David Hildenbrand   PM/Hibernate: exc...
1174
1175
1176
1177
  	if (swsusp_page_is_forbidden(page) ||  swsusp_page_is_free(page))
  		return NULL;
  
  	if (PageReserved(page) || PageOffline(page))
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1178
  		return NULL;
c6968e73b   Stanislaw Gruszka   PM/Hibernate: do ...
1179
1180
  	if (page_is_guard(page))
  		return NULL;
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1181
1182
1183
1184
  	return page;
  }
  
  /**
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
1185
   * count_highmem_pages - Compute the total number of saveable highmem pages.
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1186
   */
fe419535d   Rafael J. Wysocki   PM/Hibernate: Mov...
1187
  static unsigned int count_highmem_pages(void)
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1188
1189
1190
  {
  	struct zone *zone;
  	unsigned int n = 0;
98e73dc5d   Gerald Schaefer   PM / Hibernate / ...
1191
  	for_each_populated_zone(zone) {
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1192
1193
1194
1195
1196
1197
  		unsigned long pfn, max_zone_pfn;
  
  		if (!is_highmem(zone))
  			continue;
  
  		mark_free_pages(zone);
c33bc315f   Xishi Qiu   mm: use zone_end_...
1198
  		max_zone_pfn = zone_end_pfn(zone);
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1199
  		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
846705deb   Rafael J. Wysocki   Hibernate: Take o...
1200
  			if (saveable_highmem_page(zone, pfn))
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1201
1202
1203
1204
1205
  				n++;
  	}
  	return n;
  }
  #else
846705deb   Rafael J. Wysocki   Hibernate: Take o...
1206
1207
1208
1209
  static inline void *saveable_highmem_page(struct zone *z, unsigned long p)
  {
  	return NULL;
  }
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1210
  #endif /* CONFIG_HIGHMEM */
f6143aa60   Rafael J. Wysocki   [PATCH] swsusp: R...
1211
  /**
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
1212
1213
1214
1215
   * saveable_page - Check if the given page is saveable.
   *
   * Determine whether a non-highmem page should be included in a hibernation
   * image.
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
1216
   *
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
1217
1218
1219
   * We should save the page if it isn't Nosave, and is not in the range
   * of pages statically defined as 'unsaveable', and it isn't part of
   * a free chunk of pages.
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
1220
   */
846705deb   Rafael J. Wysocki   Hibernate: Take o...
1221
  static struct page *saveable_page(struct zone *zone, unsigned long pfn)
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
1222
  {
de491861e   Pavel Machek   [PATCH] swsusp: c...
1223
  	struct page *page;
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
1224
1225
  
  	if (!pfn_valid(pfn))
ae83c5eef   Rafael J. Wysocki   [PATCH] swsusp: c...
1226
  		return NULL;
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
1227

5b56db372   David Hildenbrand   PM/Hibernate: use...
1228
1229
  	page = pfn_to_online_page(pfn);
  	if (!page || page_zone(page) != zone)
846705deb   Rafael J. Wysocki   Hibernate: Take o...
1230
  		return NULL;
ae83c5eef   Rafael J. Wysocki   [PATCH] swsusp: c...
1231

8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1232
  	BUG_ON(PageHighMem(page));
7be982349   Rafael J. Wysocki   swsusp: use inlin...
1233
  	if (swsusp_page_is_forbidden(page) || swsusp_page_is_free(page))
ae83c5eef   Rafael J. Wysocki   [PATCH] swsusp: c...
1234
  		return NULL;
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1235

abd02ac61   David Hildenbrand   PM/Hibernate: exc...
1236
1237
  	if (PageOffline(page))
  		return NULL;
8a235efad   Rafael J. Wysocki   Hibernation: Hand...
1238
1239
  	if (PageReserved(page)
  	    && (!kernel_page_present(page) || pfn_is_nosave(pfn)))
ae83c5eef   Rafael J. Wysocki   [PATCH] swsusp: c...
1240
  		return NULL;
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
1241

c6968e73b   Stanislaw Gruszka   PM/Hibernate: do ...
1242
1243
  	if (page_is_guard(page))
  		return NULL;
ae83c5eef   Rafael J. Wysocki   [PATCH] swsusp: c...
1244
  	return page;
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
1245
  }
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1246
  /**
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
1247
   * count_data_pages - Compute the total number of saveable non-highmem pages.
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1248
   */
fe419535d   Rafael J. Wysocki   PM/Hibernate: Mov...
1249
  static unsigned int count_data_pages(void)
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
1250
1251
  {
  	struct zone *zone;
ae83c5eef   Rafael J. Wysocki   [PATCH] swsusp: c...
1252
  	unsigned long pfn, max_zone_pfn;
dc19d507b   Pavel Machek   [PATCH] swsusp cl...
1253
  	unsigned int n = 0;
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
1254

98e73dc5d   Gerald Schaefer   PM / Hibernate / ...
1255
  	for_each_populated_zone(zone) {
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
1256
1257
  		if (is_highmem(zone))
  			continue;
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1258

25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
1259
  		mark_free_pages(zone);
c33bc315f   Xishi Qiu   mm: use zone_end_...
1260
  		max_zone_pfn = zone_end_pfn(zone);
ae83c5eef   Rafael J. Wysocki   [PATCH] swsusp: c...
1261
  		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
846705deb   Rafael J. Wysocki   Hibernate: Take o...
1262
  			if (saveable_page(zone, pfn))
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1263
  				n++;
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
1264
  	}
a0f496517   Rafael J. Wysocki   [PATCH] swsusp: r...
1265
  	return n;
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
1266
  }
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
1267
1268
  /*
   * This is needed, because copy_page and memcpy are not usable for copying
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1269
1270
1271
   * task structs.
   */
  static inline void do_copy_page(long *dst, long *src)
f623f0db8   Rafael J. Wysocki   [PATCH] swsusp: F...
1272
1273
  {
  	int n;
f623f0db8   Rafael J. Wysocki   [PATCH] swsusp: F...
1274
1275
1276
  	for (n = PAGE_SIZE / sizeof(long); n; n--)
  		*dst++ = *src++;
  }
8a235efad   Rafael J. Wysocki   Hibernation: Hand...
1277
  /**
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
1278
1279
1280
   * safe_copy_page - Copy a page in a safe way.
   *
   * Check if the page we are going to copy is marked as present in the kernel
d63326928   Rick Edgecombe   mm/hibernation: M...
1281
1282
1283
   * page tables. This always is the case if CONFIG_DEBUG_PAGEALLOC or
   * CONFIG_ARCH_HAS_SET_DIRECT_MAP is not set. In that case kernel_page_present()
   * always returns 'true'.
8a235efad   Rafael J. Wysocki   Hibernation: Hand...
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
   */
  static void safe_copy_page(void *dst, struct page *s_page)
  {
  	if (kernel_page_present(s_page)) {
  		do_copy_page(dst, page_address(s_page));
  	} else {
  		kernel_map_pages(s_page, 1, 1);
  		do_copy_page(dst, page_address(s_page));
  		kernel_map_pages(s_page, 1, 0);
  	}
  }
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1295
  #ifdef CONFIG_HIGHMEM
efd5a8524   Rafael J. Wysocki   PM / hibernate: C...
1296
  static inline struct page *page_is_saveable(struct zone *zone, unsigned long pfn)
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1297
1298
  {
  	return is_highmem(zone) ?
846705deb   Rafael J. Wysocki   Hibernate: Take o...
1299
  		saveable_highmem_page(zone, pfn) : saveable_page(zone, pfn);
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1300
  }
8a235efad   Rafael J. Wysocki   Hibernation: Hand...
1301
  static void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1302
1303
1304
1305
1306
1307
1308
  {
  	struct page *s_page, *d_page;
  	void *src, *dst;
  
  	s_page = pfn_to_page(src_pfn);
  	d_page = pfn_to_page(dst_pfn);
  	if (PageHighMem(s_page)) {
0de9a1e28   Cong Wang   power: remove the...
1309
1310
  		src = kmap_atomic(s_page);
  		dst = kmap_atomic(d_page);
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1311
  		do_copy_page(dst, src);
0de9a1e28   Cong Wang   power: remove the...
1312
1313
  		kunmap_atomic(dst);
  		kunmap_atomic(src);
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1314
  	} else {
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1315
  		if (PageHighMem(d_page)) {
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
1316
1317
  			/*
  			 * The page pointed to by src may contain some kernel
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1318
1319
  			 * data modified by kmap_atomic()
  			 */
8a235efad   Rafael J. Wysocki   Hibernation: Hand...
1320
  			safe_copy_page(buffer, s_page);
0de9a1e28   Cong Wang   power: remove the...
1321
  			dst = kmap_atomic(d_page);
3ecb01df3   Jan Beulich   use clear_page()/...
1322
  			copy_page(dst, buffer);
0de9a1e28   Cong Wang   power: remove the...
1323
  			kunmap_atomic(dst);
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1324
  		} else {
8a235efad   Rafael J. Wysocki   Hibernation: Hand...
1325
  			safe_copy_page(page_address(d_page), s_page);
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1326
1327
1328
1329
  		}
  	}
  }
  #else
846705deb   Rafael J. Wysocki   Hibernate: Take o...
1330
  #define page_is_saveable(zone, pfn)	saveable_page(zone, pfn)
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1331

8a235efad   Rafael J. Wysocki   Hibernation: Hand...
1332
  static inline void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1333
  {
8a235efad   Rafael J. Wysocki   Hibernation: Hand...
1334
1335
  	safe_copy_page(page_address(pfn_to_page(dst_pfn)),
  				pfn_to_page(src_pfn));
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1336
1337
  }
  #endif /* CONFIG_HIGHMEM */
efd5a8524   Rafael J. Wysocki   PM / hibernate: C...
1338
1339
  static void copy_data_pages(struct memory_bitmap *copy_bm,
  			    struct memory_bitmap *orig_bm)
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
1340
1341
  {
  	struct zone *zone;
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
1342
  	unsigned long pfn;
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
1343

98e73dc5d   Gerald Schaefer   PM / Hibernate / ...
1344
  	for_each_populated_zone(zone) {
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
1345
  		unsigned long max_zone_pfn;
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
1346
  		mark_free_pages(zone);
c33bc315f   Xishi Qiu   mm: use zone_end_...
1347
  		max_zone_pfn = zone_end_pfn(zone);
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
1348
  		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1349
  			if (page_is_saveable(zone, pfn))
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
1350
  				memory_bm_set_bit(orig_bm, pfn);
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
1351
  	}
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
1352
1353
  	memory_bm_position_reset(orig_bm);
  	memory_bm_position_reset(copy_bm);
df7c48725   Fengguang Wu   trivial copy_data...
1354
  	for(;;) {
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
1355
  		pfn = memory_bm_next_pfn(orig_bm);
df7c48725   Fengguang Wu   trivial copy_data...
1356
1357
1358
1359
  		if (unlikely(pfn == BM_END_OF_MAP))
  			break;
  		copy_data_page(memory_bm_next_pfn(copy_bm), pfn);
  	}
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
1360
  }
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1361
1362
1363
1364
  /* Total number of image pages */
  static unsigned int nr_copy_pages;
  /* Number of pages needed for saving the original pfns of the image pages */
  static unsigned int nr_meta_pages;
64a473cb7   Rafael J. Wysocki   PM/Hibernate: Do ...
1365
1366
1367
1368
  /*
   * Numbers of normal and highmem page frames allocated for hibernation image
   * before suspending devices.
   */
0bae5fd33   Pushkar Jambhlekar   PM / hibernate: D...
1369
  static unsigned int alloc_normal, alloc_highmem;
64a473cb7   Rafael J. Wysocki   PM/Hibernate: Do ...
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
  /*
   * Memory bitmap used for marking saveable pages (during hibernation) or
   * hibernation image pages (during restore)
   */
  static struct memory_bitmap orig_bm;
  /*
   * Memory bitmap used during hibernation for marking allocated page frames that
   * will contain copies of saveable pages.  During restore it is initially used
   * for marking hibernation image pages, but then the set bits from it are
   * duplicated in @orig_bm and it is released.  On highmem systems it is next
   * used for marking "safe" highmem pages, but it has to be reinitialized for
   * this purpose.
   */
  static struct memory_bitmap copy_bm;
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1384

25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
1385
  /**
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
1386
   * swsusp_free - Free pages allocated for hibernation image.
cd560bb2f   Rafael J. Wysocki   [PATCH] swsusp: F...
1387
   *
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
1388
1389
   * Image pages are alocated before snapshot creation, so they need to be
   * released after resume.
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
1390
   */
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
1391
1392
  void swsusp_free(void)
  {
fdd64ed54   Joerg Roedel   PM / hibernate: I...
1393
  	unsigned long fb_pfn, fr_pfn;
6efde38f0   Joerg Roedel   PM / Hibernate: I...
1394

fdd64ed54   Joerg Roedel   PM / hibernate: I...
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
  	if (!forbidden_pages_map || !free_pages_map)
  		goto out;
  
  	memory_bm_position_reset(forbidden_pages_map);
  	memory_bm_position_reset(free_pages_map);
  
  loop:
  	fr_pfn = memory_bm_next_pfn(free_pages_map);
  	fb_pfn = memory_bm_next_pfn(forbidden_pages_map);
  
  	/*
  	 * Find the next bit set in both bitmaps. This is guaranteed to
  	 * terminate when fb_pfn == fr_pfn == BM_END_OF_MAP.
  	 */
  	do {
  		if (fb_pfn < fr_pfn)
  			fb_pfn = memory_bm_next_pfn(forbidden_pages_map);
  		if (fr_pfn < fb_pfn)
  			fr_pfn = memory_bm_next_pfn(free_pages_map);
  	} while (fb_pfn != fr_pfn);
  
  	if (fr_pfn != BM_END_OF_MAP && pfn_valid(fr_pfn)) {
  		struct page *page = pfn_to_page(fr_pfn);
  
  		memory_bm_clear_current(forbidden_pages_map);
  		memory_bm_clear_current(free_pages_map);
4c0b6c10f   Rafael J. Wysocki   PM / hibernate: I...
1421
  		hibernate_restore_unprotect_page(page_address(page));
fdd64ed54   Joerg Roedel   PM / hibernate: I...
1422
1423
  		__free_page(page);
  		goto loop;
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
1424
  	}
fdd64ed54   Joerg Roedel   PM / hibernate: I...
1425
1426
  
  out:
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
1427
1428
  	nr_copy_pages = 0;
  	nr_meta_pages = 0;
75534b50c   Rafael J. Wysocki   [PATCH] Change th...
1429
  	restore_pblist = NULL;
6e1819d61   Rafael J. Wysocki   [PATCH] swsusp: u...
1430
  	buffer = NULL;
64a473cb7   Rafael J. Wysocki   PM/Hibernate: Do ...
1431
1432
  	alloc_normal = 0;
  	alloc_highmem = 0;
4c0b6c10f   Rafael J. Wysocki   PM / hibernate: I...
1433
  	hibernate_restore_protection_end();
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
1434
  }
4bb334353   Rafael J. Wysocki   PM/Hibernate: Rew...
1435
1436
1437
  /* Helper functions used for the shrinking of memory. */
  
  #define GFP_IMAGE	(GFP_KERNEL | __GFP_NOWARN)
fe419535d   Rafael J. Wysocki   PM/Hibernate: Mov...
1438
  /**
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
1439
   * preallocate_image_pages - Allocate a number of pages for hibernation image.
4bb334353   Rafael J. Wysocki   PM/Hibernate: Rew...
1440
1441
   * @nr_pages: Number of page frames to allocate.
   * @mask: GFP flags to use for the allocation.
fe419535d   Rafael J. Wysocki   PM/Hibernate: Mov...
1442
   *
4bb334353   Rafael J. Wysocki   PM/Hibernate: Rew...
1443
1444
1445
1446
1447
1448
1449
   * Return value: Number of page frames actually allocated
   */
  static unsigned long preallocate_image_pages(unsigned long nr_pages, gfp_t mask)
  {
  	unsigned long nr_alloc = 0;
  
  	while (nr_pages > 0) {
64a473cb7   Rafael J. Wysocki   PM/Hibernate: Do ...
1450
1451
1452
1453
  		struct page *page;
  
  		page = alloc_image_page(mask);
  		if (!page)
4bb334353   Rafael J. Wysocki   PM/Hibernate: Rew...
1454
  			break;
64a473cb7   Rafael J. Wysocki   PM/Hibernate: Do ...
1455
1456
1457
1458
1459
  		memory_bm_set_bit(&copy_bm, page_to_pfn(page));
  		if (PageHighMem(page))
  			alloc_highmem++;
  		else
  			alloc_normal++;
4bb334353   Rafael J. Wysocki   PM/Hibernate: Rew...
1460
1461
1462
1463
1464
1465
  		nr_pages--;
  		nr_alloc++;
  	}
  
  	return nr_alloc;
  }
6715045dd   Rafael J. Wysocki   PM / Hibernate: A...
1466
1467
  static unsigned long preallocate_image_memory(unsigned long nr_pages,
  					      unsigned long avail_normal)
4bb334353   Rafael J. Wysocki   PM/Hibernate: Rew...
1468
  {
6715045dd   Rafael J. Wysocki   PM / Hibernate: A...
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
  	unsigned long alloc;
  
  	if (avail_normal <= alloc_normal)
  		return 0;
  
  	alloc = avail_normal - alloc_normal;
  	if (nr_pages < alloc)
  		alloc = nr_pages;
  
  	return preallocate_image_pages(alloc, GFP_IMAGE);
4bb334353   Rafael J. Wysocki   PM/Hibernate: Rew...
1479
1480
1481
1482
1483
1484
1485
1486
1487
  }
  
  #ifdef CONFIG_HIGHMEM
  static unsigned long preallocate_image_highmem(unsigned long nr_pages)
  {
  	return preallocate_image_pages(nr_pages, GFP_IMAGE | __GFP_HIGHMEM);
  }
  
  /**
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
1488
   *  __fraction - Compute (an approximation of) x * (multiplier / base).
fe419535d   Rafael J. Wysocki   PM/Hibernate: Mov...
1489
   */
4bb334353   Rafael J. Wysocki   PM/Hibernate: Rew...
1490
1491
  static unsigned long __fraction(u64 x, u64 multiplier, u64 base)
  {
809ed78a8   Wen Yang   PM: hibernate: im...
1492
  	return div64_u64(x * multiplier, base);
4bb334353   Rafael J. Wysocki   PM/Hibernate: Rew...
1493
  }
fe419535d   Rafael J. Wysocki   PM/Hibernate: Mov...
1494

4bb334353   Rafael J. Wysocki   PM/Hibernate: Rew...
1495
  static unsigned long preallocate_highmem_fraction(unsigned long nr_pages,
efd5a8524   Rafael J. Wysocki   PM / hibernate: C...
1496
1497
  						  unsigned long highmem,
  						  unsigned long total)
fe419535d   Rafael J. Wysocki   PM/Hibernate: Mov...
1498
  {
4bb334353   Rafael J. Wysocki   PM/Hibernate: Rew...
1499
1500
1501
  	unsigned long alloc = __fraction(nr_pages, highmem, total);
  
  	return preallocate_image_pages(alloc, GFP_IMAGE | __GFP_HIGHMEM);
fe419535d   Rafael J. Wysocki   PM/Hibernate: Mov...
1502
  }
4bb334353   Rafael J. Wysocki   PM/Hibernate: Rew...
1503
1504
1505
1506
1507
1508
1509
  #else /* CONFIG_HIGHMEM */
  static inline unsigned long preallocate_image_highmem(unsigned long nr_pages)
  {
  	return 0;
  }
  
  static inline unsigned long preallocate_highmem_fraction(unsigned long nr_pages,
efd5a8524   Rafael J. Wysocki   PM / hibernate: C...
1510
1511
  							 unsigned long highmem,
  							 unsigned long total)
4bb334353   Rafael J. Wysocki   PM/Hibernate: Rew...
1512
1513
1514
1515
  {
  	return 0;
  }
  #endif /* CONFIG_HIGHMEM */
fe419535d   Rafael J. Wysocki   PM/Hibernate: Mov...
1516

4bb334353   Rafael J. Wysocki   PM/Hibernate: Rew...
1517
  /**
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
1518
   * free_unnecessary_pages - Release preallocated pages not needed for the image.
64a473cb7   Rafael J. Wysocki   PM/Hibernate: Do ...
1519
   */
a64fc82c4   Wonhong Kwon   PM / hibernate: e...
1520
  static unsigned long free_unnecessary_pages(void)
64a473cb7   Rafael J. Wysocki   PM/Hibernate: Do ...
1521
  {
a64fc82c4   Wonhong Kwon   PM / hibernate: e...
1522
  	unsigned long save, to_free_normal, to_free_highmem, free;
64a473cb7   Rafael J. Wysocki   PM/Hibernate: Do ...
1523

6715045dd   Rafael J. Wysocki   PM / Hibernate: A...
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
  	save = count_data_pages();
  	if (alloc_normal >= save) {
  		to_free_normal = alloc_normal - save;
  		save = 0;
  	} else {
  		to_free_normal = 0;
  		save -= alloc_normal;
  	}
  	save += count_highmem_pages();
  	if (alloc_highmem >= save) {
  		to_free_highmem = alloc_highmem - save;
64a473cb7   Rafael J. Wysocki   PM/Hibernate: Do ...
1535
1536
  	} else {
  		to_free_highmem = 0;
4d4cf23cd   Rafael J. Wysocki   PM / Hibernate: F...
1537
1538
1539
1540
1541
  		save -= alloc_highmem;
  		if (to_free_normal > save)
  			to_free_normal -= save;
  		else
  			to_free_normal = 0;
64a473cb7   Rafael J. Wysocki   PM/Hibernate: Do ...
1542
  	}
a64fc82c4   Wonhong Kwon   PM / hibernate: e...
1543
  	free = to_free_normal + to_free_highmem;
64a473cb7   Rafael J. Wysocki   PM/Hibernate: Do ...
1544
1545
  
  	memory_bm_position_reset(&copy_bm);
a9c9b4429   Rafael J. Wysocki   PM / Hibernate: F...
1546
  	while (to_free_normal > 0 || to_free_highmem > 0) {
64a473cb7   Rafael J. Wysocki   PM/Hibernate: Do ...
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
  		unsigned long pfn = memory_bm_next_pfn(&copy_bm);
  		struct page *page = pfn_to_page(pfn);
  
  		if (PageHighMem(page)) {
  			if (!to_free_highmem)
  				continue;
  			to_free_highmem--;
  			alloc_highmem--;
  		} else {
  			if (!to_free_normal)
  				continue;
  			to_free_normal--;
  			alloc_normal--;
  		}
  		memory_bm_clear_bit(&copy_bm, pfn);
  		swsusp_unset_page_forbidden(page);
  		swsusp_unset_page_free(page);
  		__free_page(page);
  	}
a64fc82c4   Wonhong Kwon   PM / hibernate: e...
1566
1567
  
  	return free;
64a473cb7   Rafael J. Wysocki   PM/Hibernate: Do ...
1568
1569
1570
  }
  
  /**
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
1571
   * minimum_image_size - Estimate the minimum acceptable size of an image.
ef4aede3f   Rafael J. Wysocki   PM/Hibernate: Do ...
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
   * @saveable: Number of saveable pages in the system.
   *
   * We want to avoid attempting to free too much memory too hard, so estimate the
   * minimum acceptable size of a hibernation image to use as the lower limit for
   * preallocating memory.
   *
   * We assume that the minimum image size should be proportional to
   *
   * [number of saveable pages] - [number of pages that can be freed in theory]
   *
   * where the second term is the sum of (1) reclaimable slab pages, (2) active
bdbc98abb   Rainer Fiebig   PM: hibernate: Do...
1583
   * and (3) inactive anonymous pages, (4) active and (5) inactive file pages.
ef4aede3f   Rafael J. Wysocki   PM/Hibernate: Do ...
1584
1585
1586
1587
   */
  static unsigned long minimum_image_size(unsigned long saveable)
  {
  	unsigned long size;
d42f3245c   Roman Gushchin   mm: memcg: conver...
1588
  	size = global_node_page_state_pages(NR_SLAB_RECLAIMABLE_B)
599d0c954   Mel Gorman   mm, vmscan: move ...
1589
1590
1591
  		+ global_node_page_state(NR_ACTIVE_ANON)
  		+ global_node_page_state(NR_INACTIVE_ANON)
  		+ global_node_page_state(NR_ACTIVE_FILE)
bdbc98abb   Rainer Fiebig   PM: hibernate: Do...
1592
  		+ global_node_page_state(NR_INACTIVE_FILE);
ef4aede3f   Rafael J. Wysocki   PM/Hibernate: Do ...
1593
1594
1595
1596
1597
  
  	return saveable <= size ? 0 : saveable - size;
  }
  
  /**
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
1598
   * hibernate_preallocate_memory - Preallocate memory for hibernation image.
4bb334353   Rafael J. Wysocki   PM/Hibernate: Rew...
1599
1600
1601
1602
1603
   *
   * To create a hibernation image it is necessary to make a copy of every page
   * frame in use.  We also need a number of page frames to be free during
   * hibernation for allocations made while saving the image and for device
   * drivers, in case they need to allocate memory from their hibernation
ddeb64870   Rafael J. Wysocki   PM / Hibernate: A...
1604
   * callbacks (these two numbers are given by PAGES_FOR_IO (which is a rough
b0c609ab2   Alexandre Belloni   PM / hibernate: f...
1605
   * estimate) and reserved_size divided by PAGE_SIZE (which is tunable through
ddeb64870   Rafael J. Wysocki   PM / Hibernate: A...
1606
1607
   * /sys/power/reserved_size, respectively).  To make this happen, we compute the
   * total number of available page frames and allocate at least
4bb334353   Rafael J. Wysocki   PM/Hibernate: Rew...
1608
   *
ddeb64870   Rafael J. Wysocki   PM / Hibernate: A...
1609
1610
   * ([page frames total] + PAGES_FOR_IO + [metadata pages]) / 2
   *  + 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE)
4bb334353   Rafael J. Wysocki   PM/Hibernate: Rew...
1611
1612
1613
1614
1615
   *
   * of them, which corresponds to the maximum size of a hibernation image.
   *
   * If image_size is set below the number following from the above formula,
   * the preallocation of memory is continued until the total number of saveable
ef4aede3f   Rafael J. Wysocki   PM/Hibernate: Do ...
1616
1617
   * pages in the system is below the requested image size or the minimum
   * acceptable image size returned by minimum_image_size(), whichever is greater.
4bb334353   Rafael J. Wysocki   PM/Hibernate: Rew...
1618
   */
64a473cb7   Rafael J. Wysocki   PM/Hibernate: Do ...
1619
  int hibernate_preallocate_memory(void)
fe419535d   Rafael J. Wysocki   PM/Hibernate: Mov...
1620
  {
fe419535d   Rafael J. Wysocki   PM/Hibernate: Mov...
1621
  	struct zone *zone;
4bb334353   Rafael J. Wysocki   PM/Hibernate: Rew...
1622
  	unsigned long saveable, size, max_size, count, highmem, pages = 0;
6715045dd   Rafael J. Wysocki   PM / Hibernate: A...
1623
  	unsigned long alloc, save_highmem, pages_highmem, avail_normal;
db5976058   Tina Ruchandani   PM / Hibernate: M...
1624
  	ktime_t start, stop;
64a473cb7   Rafael J. Wysocki   PM/Hibernate: Do ...
1625
  	int error;
fe419535d   Rafael J. Wysocki   PM/Hibernate: Mov...
1626

7a7b99bf8   Luigi Semenzato   PM: hibernate: Ad...
1627
1628
  	pr_info("Preallocating image memory
  ");
db5976058   Tina Ruchandani   PM / Hibernate: M...
1629
  	start = ktime_get();
fe419535d   Rafael J. Wysocki   PM/Hibernate: Mov...
1630

64a473cb7   Rafael J. Wysocki   PM/Hibernate: Do ...
1631
  	error = memory_bm_create(&orig_bm, GFP_IMAGE, PG_ANY);
7a7b99bf8   Luigi Semenzato   PM: hibernate: Ad...
1632
1633
1634
  	if (error) {
  		pr_err("Cannot allocate original bitmap
  ");
64a473cb7   Rafael J. Wysocki   PM/Hibernate: Do ...
1635
  		goto err_out;
7a7b99bf8   Luigi Semenzato   PM: hibernate: Ad...
1636
  	}
64a473cb7   Rafael J. Wysocki   PM/Hibernate: Do ...
1637
1638
  
  	error = memory_bm_create(&copy_bm, GFP_IMAGE, PG_ANY);
7a7b99bf8   Luigi Semenzato   PM: hibernate: Ad...
1639
1640
1641
  	if (error) {
  		pr_err("Cannot allocate copy bitmap
  ");
64a473cb7   Rafael J. Wysocki   PM/Hibernate: Do ...
1642
  		goto err_out;
7a7b99bf8   Luigi Semenzato   PM: hibernate: Ad...
1643
  	}
64a473cb7   Rafael J. Wysocki   PM/Hibernate: Do ...
1644
1645
1646
  
  	alloc_normal = 0;
  	alloc_highmem = 0;
4bb334353   Rafael J. Wysocki   PM/Hibernate: Rew...
1647
  	/* Count the number of saveable data pages. */
64a473cb7   Rafael J. Wysocki   PM/Hibernate: Do ...
1648
  	save_highmem = count_highmem_pages();
4bb334353   Rafael J. Wysocki   PM/Hibernate: Rew...
1649
  	saveable = count_data_pages();
fe419535d   Rafael J. Wysocki   PM/Hibernate: Mov...
1650

4bb334353   Rafael J. Wysocki   PM/Hibernate: Rew...
1651
1652
1653
1654
1655
  	/*
  	 * Compute the total number of page frames we can use (count) and the
  	 * number of pages needed for image metadata (size).
  	 */
  	count = saveable;
64a473cb7   Rafael J. Wysocki   PM/Hibernate: Do ...
1656
1657
  	saveable += save_highmem;
  	highmem = save_highmem;
4bb334353   Rafael J. Wysocki   PM/Hibernate: Rew...
1658
1659
1660
1661
1662
1663
1664
1665
  	size = 0;
  	for_each_populated_zone(zone) {
  		size += snapshot_additional_pages(zone);
  		if (is_highmem(zone))
  			highmem += zone_page_state(zone, NR_FREE_PAGES);
  		else
  			count += zone_page_state(zone, NR_FREE_PAGES);
  	}
6715045dd   Rafael J. Wysocki   PM / Hibernate: A...
1666
  	avail_normal = count;
4bb334353   Rafael J. Wysocki   PM/Hibernate: Rew...
1667
1668
1669
1670
  	count += highmem;
  	count -= totalreserve_pages;
  
  	/* Compute the maximum number of saveable pages to leave in memory. */
ddeb64870   Rafael J. Wysocki   PM / Hibernate: A...
1671
1672
  	max_size = (count - (size + PAGES_FOR_IO)) / 2
  			- 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE);
266f1a25e   Rafael J. Wysocki   PM / Hibernate: I...
1673
  	/* Compute the desired number of image pages specified by image_size. */
4bb334353   Rafael J. Wysocki   PM/Hibernate: Rew...
1674
1675
1676
1677
  	size = DIV_ROUND_UP(image_size, PAGE_SIZE);
  	if (size > max_size)
  		size = max_size;
  	/*
266f1a25e   Rafael J. Wysocki   PM / Hibernate: I...
1678
1679
1680
  	 * If the desired number of image pages is at least as large as the
  	 * current number of saveable pages in memory, allocate page frames for
  	 * the image and we're done.
4bb334353   Rafael J. Wysocki   PM/Hibernate: Rew...
1681
  	 */
64a473cb7   Rafael J. Wysocki   PM/Hibernate: Do ...
1682
1683
  	if (size >= saveable) {
  		pages = preallocate_image_highmem(save_highmem);
6715045dd   Rafael J. Wysocki   PM / Hibernate: A...
1684
  		pages += preallocate_image_memory(saveable - pages, avail_normal);
4bb334353   Rafael J. Wysocki   PM/Hibernate: Rew...
1685
  		goto out;
64a473cb7   Rafael J. Wysocki   PM/Hibernate: Do ...
1686
  	}
4bb334353   Rafael J. Wysocki   PM/Hibernate: Rew...
1687

ef4aede3f   Rafael J. Wysocki   PM/Hibernate: Do ...
1688
1689
  	/* Estimate the minimum size of the image. */
  	pages = minimum_image_size(saveable);
6715045dd   Rafael J. Wysocki   PM / Hibernate: A...
1690
1691
1692
1693
1694
1695
1696
1697
1698
  	/*
  	 * To avoid excessive pressure on the normal zone, leave room in it to
  	 * accommodate an image of the minimum size (unless it's already too
  	 * small, in which case don't preallocate pages from it at all).
  	 */
  	if (avail_normal > pages)
  		avail_normal -= pages;
  	else
  		avail_normal = 0;
ef4aede3f   Rafael J. Wysocki   PM/Hibernate: Do ...
1699
1700
  	if (size < pages)
  		size = min_t(unsigned long, pages, max_size);
4bb334353   Rafael J. Wysocki   PM/Hibernate: Rew...
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
  	/*
  	 * Let the memory management subsystem know that we're going to need a
  	 * large number of page frames to allocate and make it free some memory.
  	 * NOTE: If this is not done, performance will be hurt badly in some
  	 * test cases.
  	 */
  	shrink_all_memory(saveable - size);
  
  	/*
  	 * The number of saveable pages in memory was too high, so apply some
  	 * pressure to decrease it.  First, make room for the largest possible
  	 * image and fail if that doesn't work.  Next, try to decrease the size
ef4aede3f   Rafael J. Wysocki   PM/Hibernate: Do ...
1713
1714
  	 * of the image as much as indicated by 'size' using allocations from
  	 * highmem and non-highmem zones separately.
4bb334353   Rafael J. Wysocki   PM/Hibernate: Rew...
1715
1716
  	 */
  	pages_highmem = preallocate_image_highmem(highmem / 2);
fd432b9f8   Aaron Lu   PM / hibernate: A...
1717
1718
1719
1720
1721
  	alloc = count - max_size;
  	if (alloc > pages_highmem)
  		alloc -= pages_highmem;
  	else
  		alloc = 0;
6715045dd   Rafael J. Wysocki   PM / Hibernate: A...
1722
1723
1724
1725
1726
1727
  	pages = preallocate_image_memory(alloc, avail_normal);
  	if (pages < alloc) {
  		/* We have exhausted non-highmem pages, try highmem. */
  		alloc -= pages;
  		pages += pages_highmem;
  		pages_highmem = preallocate_image_highmem(alloc);
7a7b99bf8   Luigi Semenzato   PM: hibernate: Ad...
1728
1729
1730
1731
  		if (pages_highmem < alloc) {
  			pr_err("Image allocation is %lu pages short
  ",
  				alloc - pages_highmem);
6715045dd   Rafael J. Wysocki   PM / Hibernate: A...
1732
  			goto err_out;
7a7b99bf8   Luigi Semenzato   PM: hibernate: Ad...
1733
  		}
6715045dd   Rafael J. Wysocki   PM / Hibernate: A...
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
  		pages += pages_highmem;
  		/*
  		 * size is the desired number of saveable pages to leave in
  		 * memory, so try to preallocate (all memory - size) pages.
  		 */
  		alloc = (count - pages) - size;
  		pages += preallocate_image_highmem(alloc);
  	} else {
  		/*
  		 * There are approximately max_size saveable pages at this point
  		 * and we want to reduce this number down to size.
  		 */
  		alloc = max_size - size;
  		size = preallocate_highmem_fraction(alloc, highmem, count);
  		pages_highmem += size;
  		alloc -= size;
  		size = preallocate_image_memory(alloc, avail_normal);
  		pages_highmem += preallocate_image_highmem(alloc - size);
  		pages += pages_highmem + size;
  	}
4bb334353   Rafael J. Wysocki   PM/Hibernate: Rew...
1754

64a473cb7   Rafael J. Wysocki   PM/Hibernate: Do ...
1755
1756
1757
1758
1759
  	/*
  	 * We only need as many page frames for the image as there are saveable
  	 * pages in memory, but we have allocated more.  Release the excessive
  	 * ones now.
  	 */
a64fc82c4   Wonhong Kwon   PM / hibernate: e...
1760
  	pages -= free_unnecessary_pages();
4bb334353   Rafael J. Wysocki   PM/Hibernate: Rew...
1761
1762
  
   out:
db5976058   Tina Ruchandani   PM / Hibernate: M...
1763
  	stop = ktime_get();
5c0e9de06   Colin Ian King   PM: hibernate: fi...
1764
1765
  	pr_info("Allocated %lu pages for snapshot
  ", pages);
db5976058   Tina Ruchandani   PM / Hibernate: M...
1766
  	swsusp_show_speed(start, stop, pages, "Allocated");
fe419535d   Rafael J. Wysocki   PM/Hibernate: Mov...
1767
1768
  
  	return 0;
64a473cb7   Rafael J. Wysocki   PM/Hibernate: Do ...
1769
1770
  
   err_out:
64a473cb7   Rafael J. Wysocki   PM/Hibernate: Do ...
1771
1772
  	swsusp_free();
  	return -ENOMEM;
fe419535d   Rafael J. Wysocki   PM/Hibernate: Mov...
1773
  }
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1774
1775
  #ifdef CONFIG_HIGHMEM
  /**
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
1776
1777
1778
1779
1780
   * count_pages_for_highmem - Count non-highmem pages needed for copying highmem.
   *
   * Compute the number of non-highmem pages that will be necessary for creating
   * copies of highmem pages.
   */
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1781
1782
  static unsigned int count_pages_for_highmem(unsigned int nr_highmem)
  {
64a473cb7   Rafael J. Wysocki   PM/Hibernate: Do ...
1783
  	unsigned int free_highmem = count_free_highmem_pages() + alloc_highmem;
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1784
1785
1786
1787
1788
1789
1790
1791
1792
  
  	if (free_highmem >= nr_highmem)
  		nr_highmem = 0;
  	else
  		nr_highmem -= free_highmem;
  
  	return nr_highmem;
  }
  #else
efd5a8524   Rafael J. Wysocki   PM / hibernate: C...
1793
  static unsigned int count_pages_for_highmem(unsigned int nr_highmem) { return 0; }
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1794
  #endif /* CONFIG_HIGHMEM */
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
1795
1796
  
  /**
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
1797
   * enough_free_mem - Check if there is enough free memory for the image.
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
1798
   */
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1799
  static int enough_free_mem(unsigned int nr_pages, unsigned int nr_highmem)
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
1800
  {
e5e2fa785   Rafael J. Wysocki   [PATCH] swsusp: f...
1801
  	struct zone *zone;
64a473cb7   Rafael J. Wysocki   PM/Hibernate: Do ...
1802
  	unsigned int free = alloc_normal;
e5e2fa785   Rafael J. Wysocki   [PATCH] swsusp: f...
1803

98e73dc5d   Gerald Schaefer   PM / Hibernate / ...
1804
  	for_each_populated_zone(zone)
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1805
  		if (!is_highmem(zone))
d23ad4232   Christoph Lameter   [PATCH] Use ZVC f...
1806
  			free += zone_page_state(zone, NR_FREE_PAGES);
940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
1807

8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1808
  	nr_pages += count_pages_for_highmem(nr_highmem);
64ec72a1e   Joe Perches   PM: Use a more co...
1809
1810
1811
  	pr_debug("Normal pages needed: %u + %u, available pages: %u
  ",
  		 nr_pages, PAGES_FOR_IO, free);
940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
1812

64a473cb7   Rafael J. Wysocki   PM/Hibernate: Do ...
1813
  	return free > nr_pages + PAGES_FOR_IO;
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
1814
  }
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1815
1816
  #ifdef CONFIG_HIGHMEM
  /**
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
1817
1818
1819
1820
   * get_highmem_buffer - Allocate a buffer for highmem pages.
   *
   * If there are some highmem pages in the hibernation image, we may need a
   * buffer to copy them and/or load their data.
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1821
   */
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1822
1823
  static inline int get_highmem_buffer(int safe_needed)
  {
453f85d43   Mel Gorman   mm: remove __GFP_...
1824
  	buffer = get_image_page(GFP_ATOMIC, safe_needed);
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1825
1826
1827
1828
  	return buffer ? 0 : -ENOMEM;
  }
  
  /**
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
1829
1830
1831
1832
   * alloc_highmem_image_pages - Allocate some highmem pages for the image.
   *
   * Try to allocate as many pages as needed, but if the number of free highmem
   * pages is less than that, allocate them all.
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1833
   */
efd5a8524   Rafael J. Wysocki   PM / hibernate: C...
1834
1835
  static inline unsigned int alloc_highmem_pages(struct memory_bitmap *bm,
  					       unsigned int nr_highmem)
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1836
1837
1838
1839
1840
1841
1842
1843
1844
  {
  	unsigned int to_alloc = count_free_highmem_pages();
  
  	if (to_alloc > nr_highmem)
  		to_alloc = nr_highmem;
  
  	nr_highmem -= to_alloc;
  	while (to_alloc-- > 0) {
  		struct page *page;
d0164adc8   Mel Gorman   mm, page_alloc: d...
1845
  		page = alloc_image_page(__GFP_HIGHMEM|__GFP_KSWAPD_RECLAIM);
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1846
1847
1848
1849
1850
1851
  		memory_bm_set_bit(bm, page_to_pfn(page));
  	}
  	return nr_highmem;
  }
  #else
  static inline int get_highmem_buffer(int safe_needed) { return 0; }
efd5a8524   Rafael J. Wysocki   PM / hibernate: C...
1852
1853
  static inline unsigned int alloc_highmem_pages(struct memory_bitmap *bm,
  					       unsigned int n) { return 0; }
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1854
1855
1856
  #endif /* CONFIG_HIGHMEM */
  
  /**
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
1857
   * swsusp_alloc - Allocate memory for hibernation image.
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1858
   *
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
1859
1860
1861
   * We first try to allocate as many highmem pages as there are
   * saveable highmem pages in the system.  If that fails, we allocate
   * non-highmem pages for the copies of the remaining highmem ones.
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1862
   *
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
1863
1864
1865
   * In this approach it is likely that the copies of highmem pages will
   * also be located in the high memory, because of the way in which
   * copy_data_pages() works.
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1866
   */
eba74c294   BaoJun Luo   PM / hibernate: D...
1867
  static int swsusp_alloc(struct memory_bitmap *copy_bm,
efd5a8524   Rafael J. Wysocki   PM / hibernate: C...
1868
  			unsigned int nr_pages, unsigned int nr_highmem)
054bd4c18   Rafael J. Wysocki   [PATCH] swsusp: r...
1869
  {
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1870
  	if (nr_highmem > 0) {
2e725a065   Stanislaw Gruszka   PM / Hibernate: R...
1871
  		if (get_highmem_buffer(PG_ANY))
64a473cb7   Rafael J. Wysocki   PM/Hibernate: Do ...
1872
1873
1874
1875
1876
  			goto err_out;
  		if (nr_highmem > alloc_highmem) {
  			nr_highmem -= alloc_highmem;
  			nr_pages += alloc_highmem_pages(copy_bm, nr_highmem);
  		}
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1877
  	}
64a473cb7   Rafael J. Wysocki   PM/Hibernate: Do ...
1878
1879
1880
1881
  	if (nr_pages > alloc_normal) {
  		nr_pages -= alloc_normal;
  		while (nr_pages-- > 0) {
  			struct page *page;
453f85d43   Mel Gorman   mm: remove __GFP_...
1882
  			page = alloc_image_page(GFP_ATOMIC);
64a473cb7   Rafael J. Wysocki   PM/Hibernate: Do ...
1883
1884
1885
1886
  			if (!page)
  				goto err_out;
  			memory_bm_set_bit(copy_bm, page_to_pfn(page));
  		}
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
1887
  	}
64a473cb7   Rafael J. Wysocki   PM/Hibernate: Do ...
1888

b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
1889
  	return 0;
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
1890

64a473cb7   Rafael J. Wysocki   PM/Hibernate: Do ...
1891
   err_out:
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
1892
  	swsusp_free();
2e725a065   Stanislaw Gruszka   PM / Hibernate: R...
1893
  	return -ENOMEM;
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
1894
  }
722a9f929   Andi Kleen   asmlinkage: Add e...
1895
  asmlinkage __visible int swsusp_save(void)
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
1896
  {
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1897
  	unsigned int nr_pages, nr_highmem;
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
1898

7a7b99bf8   Luigi Semenzato   PM: hibernate: Ad...
1899
1900
  	pr_info("Creating image:
  ");
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
1901

9f8f21725   Christoph Lameter   Page allocator: c...
1902
  	drain_local_pages(NULL);
a0f496517   Rafael J. Wysocki   [PATCH] swsusp: r...
1903
  	nr_pages = count_data_pages();
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1904
  	nr_highmem = count_highmem_pages();
64ec72a1e   Joe Perches   PM: Use a more co...
1905
1906
  	pr_info("Need to copy %u pages
  ", nr_pages + nr_highmem);
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
1907

8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1908
  	if (!enough_free_mem(nr_pages, nr_highmem)) {
64ec72a1e   Joe Perches   PM: Use a more co...
1909
1910
  		pr_err("Not enough free memory
  ");
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
1911
1912
  		return -ENOMEM;
  	}
eba74c294   BaoJun Luo   PM / hibernate: D...
1913
  	if (swsusp_alloc(&copy_bm, nr_pages, nr_highmem)) {
64ec72a1e   Joe Perches   PM: Use a more co...
1914
1915
  		pr_err("Memory allocation failed
  ");
a0f496517   Rafael J. Wysocki   [PATCH] swsusp: r...
1916
  		return -ENOMEM;
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1917
  	}
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
1918

ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
1919
1920
  	/*
  	 * During allocating of suspend pagedir, new cold pages may appear.
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
1921
1922
  	 * Kill them.
  	 */
9f8f21725   Christoph Lameter   Page allocator: c...
1923
  	drain_local_pages(NULL);
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
1924
  	copy_data_pages(&copy_bm, &orig_bm);
25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
1925
1926
1927
1928
1929
1930
  
  	/*
  	 * End of critical section. From now on, we can write to memory,
  	 * but we should not touch disk. This specially means we must _not_
  	 * touch swap space! Except we must write out our image of course.
  	 */
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1931
  	nr_pages += nr_highmem;
a0f496517   Rafael J. Wysocki   [PATCH] swsusp: r...
1932
  	nr_copy_pages = nr_pages;
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1933
  	nr_meta_pages = DIV_ROUND_UP(nr_pages * sizeof(long), PAGE_SIZE);
a0f496517   Rafael J. Wysocki   [PATCH] swsusp: r...
1934

7a7b99bf8   Luigi Semenzato   PM: hibernate: Ad...
1935
1936
  	pr_info("Image created (%d pages copied)
  ", nr_pages);
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
1937

25761b6eb   Rafael J. Wysocki   [PATCH] swsusp: m...
1938
1939
  	return 0;
  }
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
1940

d307c4a8e   Rafael J. Wysocki   Hibernation: Arbi...
1941
1942
  #ifndef CONFIG_ARCH_HIBERNATION_HEADER
  static int init_header_complete(struct swsusp_info *info)
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
1943
  {
d307c4a8e   Rafael J. Wysocki   Hibernation: Arbi...
1944
  	memcpy(&info->uts, init_utsname(), sizeof(struct new_utsname));
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
1945
  	info->version_code = LINUX_VERSION_CODE;
d307c4a8e   Rafael J. Wysocki   Hibernation: Arbi...
1946
1947
  	return 0;
  }
02d7f4005   Alexey Dobriyan   PM: sleep: spread...
1948
  static const char *check_image_kernel(struct swsusp_info *info)
d307c4a8e   Rafael J. Wysocki   Hibernation: Arbi...
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
  {
  	if (info->version_code != LINUX_VERSION_CODE)
  		return "kernel version";
  	if (strcmp(info->uts.sysname,init_utsname()->sysname))
  		return "system type";
  	if (strcmp(info->uts.release,init_utsname()->release))
  		return "kernel release";
  	if (strcmp(info->uts.version,init_utsname()->version))
  		return "version";
  	if (strcmp(info->uts.machine,init_utsname()->machine))
  		return "machine";
  	return NULL;
  }
  #endif /* CONFIG_ARCH_HIBERNATION_HEADER */
af508b34d   Rafael J. Wysocki   Hibernation: Intr...
1963
1964
1965
1966
  unsigned long snapshot_get_image_size(void)
  {
  	return nr_copy_pages + nr_meta_pages + 1;
  }
d307c4a8e   Rafael J. Wysocki   Hibernation: Arbi...
1967
1968
1969
  static int init_header(struct swsusp_info *info)
  {
  	memset(info, 0, sizeof(struct swsusp_info));
0ed5fd138   Jiang Liu   mm: use totalram_...
1970
  	info->num_physpages = get_num_physpages();
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
1971
  	info->image_pages = nr_copy_pages;
af508b34d   Rafael J. Wysocki   Hibernation: Intr...
1972
  	info->pages = snapshot_get_image_size();
6e1819d61   Rafael J. Wysocki   [PATCH] swsusp: u...
1973
1974
  	info->size = info->pages;
  	info->size <<= PAGE_SHIFT;
d307c4a8e   Rafael J. Wysocki   Hibernation: Arbi...
1975
  	return init_header_complete(info);
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
1976
1977
1978
  }
  
  /**
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
1979
1980
1981
1982
1983
1984
   * pack_pfns - Prepare PFNs for saving.
   * @bm: Memory bitmap.
   * @buf: Memory buffer to store the PFNs in.
   *
   * PFNs corresponding to set bits in @bm are stored in the area of memory
   * pointed to by @buf (1 page at a time).
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
1985
   */
efd5a8524   Rafael J. Wysocki   PM / hibernate: C...
1986
  static inline void pack_pfns(unsigned long *buf, struct memory_bitmap *bm)
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
1987
1988
  {
  	int j;
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
1989
  	for (j = 0; j < PAGE_SIZE / sizeof(long); j++) {
940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
1990
1991
  		buf[j] = memory_bm_next_pfn(bm);
  		if (unlikely(buf[j] == BM_END_OF_MAP))
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
1992
  			break;
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
1993
  	}
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
1994
1995
1996
  }
  
  /**
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
1997
1998
   * snapshot_read_next - Get the address to read the next image page from.
   * @handle: Snapshot handle to be used for the reading.
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
1999
   *
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
2000
2001
2002
   * On the first call, @handle should point to a zeroed snapshot_handle
   * structure.  The structure gets populated then and a pointer to it should be
   * passed to this function every next time.
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
2003
   *
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
2004
2005
2006
   * On success, the function returns a positive number.  Then, the caller
   * is allowed to read up to the returned number of bytes from the memory
   * location computed by the data_of() macro.
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
2007
   *
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
2008
2009
2010
   * The function returns 0 to indicate the end of the data stream condition,
   * and negative numbers are returned on errors.  If that happens, the structure
   * pointed to by @handle is not updated and should not be used any more.
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
2011
   */
d3c1b24c5   Jiri Slaby   PM / Hibernate: S...
2012
  int snapshot_read_next(struct snapshot_handle *handle)
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
2013
  {
fb13a28b0   Rafael J. Wysocki   [PATCH] swsusp: s...
2014
  	if (handle->cur > nr_meta_pages + nr_copy_pages)
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
2015
  		return 0;
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
2016

f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
2017
2018
  	if (!buffer) {
  		/* This makes the buffer be freed by swsusp_free() */
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
2019
  		buffer = get_image_page(GFP_ATOMIC, PG_ANY);
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
2020
2021
2022
  		if (!buffer)
  			return -ENOMEM;
  	}
d3c1b24c5   Jiri Slaby   PM / Hibernate: S...
2023
  	if (!handle->cur) {
d307c4a8e   Rafael J. Wysocki   Hibernation: Arbi...
2024
2025
2026
2027
2028
  		int error;
  
  		error = init_header((struct swsusp_info *)buffer);
  		if (error)
  			return error;
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
2029
  		handle->buffer = buffer;
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
2030
2031
  		memory_bm_position_reset(&orig_bm);
  		memory_bm_position_reset(&copy_bm);
d3c1b24c5   Jiri Slaby   PM / Hibernate: S...
2032
  	} else if (handle->cur <= nr_meta_pages) {
3ecb01df3   Jan Beulich   use clear_page()/...
2033
  		clear_page(buffer);
d3c1b24c5   Jiri Slaby   PM / Hibernate: S...
2034
2035
2036
  		pack_pfns(buffer, &orig_bm);
  	} else {
  		struct page *page;
b788db798   Rafael J. Wysocki   [PATCH] swsusp: I...
2037

d3c1b24c5   Jiri Slaby   PM / Hibernate: S...
2038
2039
  		page = pfn_to_page(memory_bm_next_pfn(&copy_bm));
  		if (PageHighMem(page)) {
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
2040
2041
  			/*
  			 * Highmem pages are copied to the buffer,
d3c1b24c5   Jiri Slaby   PM / Hibernate: S...
2042
2043
2044
2045
  			 * because we can't return with a kmapped
  			 * highmem page (we may not be called again).
  			 */
  			void *kaddr;
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
2046

0de9a1e28   Cong Wang   power: remove the...
2047
  			kaddr = kmap_atomic(page);
3ecb01df3   Jan Beulich   use clear_page()/...
2048
  			copy_page(buffer, kaddr);
0de9a1e28   Cong Wang   power: remove the...
2049
  			kunmap_atomic(kaddr);
d3c1b24c5   Jiri Slaby   PM / Hibernate: S...
2050
2051
2052
  			handle->buffer = buffer;
  		} else {
  			handle->buffer = page_address(page);
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
2053
  		}
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
2054
  	}
d3c1b24c5   Jiri Slaby   PM / Hibernate: S...
2055
2056
  	handle->cur++;
  	return PAGE_SIZE;
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
2057
  }
6dbecfd34   Rafael J. Wysocki   PM / hibernate: S...
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
  static void duplicate_memory_bitmap(struct memory_bitmap *dst,
  				    struct memory_bitmap *src)
  {
  	unsigned long pfn;
  
  	memory_bm_position_reset(src);
  	pfn = memory_bm_next_pfn(src);
  	while (pfn != BM_END_OF_MAP) {
  		memory_bm_set_bit(dst, pfn);
  		pfn = memory_bm_next_pfn(src);
  	}
  }
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
2070
  /**
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
2071
2072
2073
2074
   * mark_unsafe_pages - Mark pages that were used before hibernation.
   *
   * Mark the pages that cannot be used for storing the image during restoration,
   * because they conflict with the pages that had been used before hibernation.
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
2075
   */
6dbecfd34   Rafael J. Wysocki   PM / hibernate: S...
2076
  static void mark_unsafe_pages(struct memory_bitmap *bm)
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
2077
  {
6dbecfd34   Rafael J. Wysocki   PM / hibernate: S...
2078
  	unsigned long pfn;
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
2079

6dbecfd34   Rafael J. Wysocki   PM / hibernate: S...
2080
2081
2082
2083
2084
2085
  	/* Clear the "free"/"unsafe" bit for all PFNs */
  	memory_bm_position_reset(free_pages_map);
  	pfn = memory_bm_next_pfn(free_pages_map);
  	while (pfn != BM_END_OF_MAP) {
  		memory_bm_clear_current(free_pages_map);
  		pfn = memory_bm_next_pfn(free_pages_map);
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
2086
  	}
6dbecfd34   Rafael J. Wysocki   PM / hibernate: S...
2087
2088
  	/* Mark pages that correspond to the "original" PFNs as "unsafe" */
  	duplicate_memory_bitmap(free_pages_map, bm);
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
2089

940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
2090
  	allocated_unsafe_pages = 0;
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
2091
  }
d307c4a8e   Rafael J. Wysocki   Hibernation: Arbi...
2092
  static int check_header(struct swsusp_info *info)
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
2093
  {
02d7f4005   Alexey Dobriyan   PM: sleep: spread...
2094
  	const char *reason;
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
2095

d307c4a8e   Rafael J. Wysocki   Hibernation: Arbi...
2096
  	reason = check_image_kernel(info);
0ed5fd138   Jiang Liu   mm: use totalram_...
2097
  	if (!reason && info->num_physpages != get_num_physpages())
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
2098
  		reason = "memory size";
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
2099
  	if (reason) {
64ec72a1e   Joe Perches   PM: Use a more co...
2100
2101
  		pr_err("Image mismatch: %s
  ", reason);
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
2102
2103
2104
2105
2106
2107
  		return -EPERM;
  	}
  	return 0;
  }
  
  /**
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
2108
   * load header - Check the image header and copy the data from it.
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
2109
   */
efd5a8524   Rafael J. Wysocki   PM / hibernate: C...
2110
  static int load_header(struct swsusp_info *info)
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
2111
2112
  {
  	int error;
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
2113

940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
2114
  	restore_pblist = NULL;
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
2115
2116
  	error = check_header(info);
  	if (!error) {
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
2117
2118
2119
2120
2121
2122
2123
  		nr_copy_pages = info->image_pages;
  		nr_meta_pages = info->pages - info->image_pages - 1;
  	}
  	return error;
  }
  
  /**
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
2124
2125
2126
2127
2128
2129
   * unpack_orig_pfns - Set bits corresponding to given PFNs in a memory bitmap.
   * @bm: Memory bitmap.
   * @buf: Area of memory containing the PFNs.
   *
   * For each element of the array pointed to by @buf (1 page at a time), set the
   * corresponding bit in @bm.
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
2130
   */
69643279a   Rafael J. Wysocki   Hibernate: Do not...
2131
  static int unpack_orig_pfns(unsigned long *buf, struct memory_bitmap *bm)
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
2132
2133
  {
  	int j;
940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
2134
2135
2136
  	for (j = 0; j < PAGE_SIZE / sizeof(long); j++) {
  		if (unlikely(buf[j] == BM_END_OF_MAP))
  			break;
6dbecfd34   Rafael J. Wysocki   PM / hibernate: S...
2137
  		if (pfn_valid(buf[j]) && memory_bm_pfn_present(bm, buf[j]))
69643279a   Rafael J. Wysocki   Hibernate: Do not...
2138
2139
2140
  			memory_bm_set_bit(bm, buf[j]);
  		else
  			return -EFAULT;
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
2141
  	}
69643279a   Rafael J. Wysocki   Hibernate: Do not...
2142
2143
  
  	return 0;
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
2144
  }
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
2145
  #ifdef CONFIG_HIGHMEM
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
2146
2147
  /*
   * struct highmem_pbe is used for creating the list of highmem pages that
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
2148
2149
2150
2151
2152
2153
2154
2155
   * should be restored atomically during the resume from disk, because the page
   * frames they have occupied before the suspend are in use.
   */
  struct highmem_pbe {
  	struct page *copy_page;	/* data is here now */
  	struct page *orig_page;	/* data was here before the suspend */
  	struct highmem_pbe *next;
  };
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
2156
2157
  /*
   * List of highmem PBEs needed for restoring the highmem pages that were
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
2158
2159
2160
2161
2162
2163
2164
   * allocated before the suspend and included in the suspend image, but have
   * also been allocated by the "resume" kernel, so their contents cannot be
   * written directly to their "original" page frames.
   */
  static struct highmem_pbe *highmem_pblist;
  
  /**
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
2165
2166
2167
2168
   * count_highmem_image_pages - Compute the number of highmem pages in the image.
   * @bm: Memory bitmap.
   *
   * The bits in @bm that correspond to image pages are assumed to be set.
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
2169
   */
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
  static unsigned int count_highmem_image_pages(struct memory_bitmap *bm)
  {
  	unsigned long pfn;
  	unsigned int cnt = 0;
  
  	memory_bm_position_reset(bm);
  	pfn = memory_bm_next_pfn(bm);
  	while (pfn != BM_END_OF_MAP) {
  		if (PageHighMem(pfn_to_page(pfn)))
  			cnt++;
  
  		pfn = memory_bm_next_pfn(bm);
  	}
  	return cnt;
  }
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
2185
2186
2187
  static unsigned int safe_highmem_pages;
  
  static struct memory_bitmap *safe_highmem_bm;
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200
  /**
   * prepare_highmem_image - Allocate memory for loading highmem data from image.
   * @bm: Pointer to an uninitialized memory bitmap structure.
   * @nr_highmem_p: Pointer to the number of highmem image pages.
   *
   * Try to allocate as many highmem pages as there are highmem image pages
   * (@nr_highmem_p points to the variable containing the number of highmem image
   * pages).  The pages that are "safe" (ie. will not be overwritten when the
   * hibernation image is restored entirely) have the corresponding bits set in
   * @bm (it must be unitialized).
   *
   * NOTE: This function should not be called if there are no highmem image pages.
   */
efd5a8524   Rafael J. Wysocki   PM / hibernate: C...
2201
2202
  static int prepare_highmem_image(struct memory_bitmap *bm,
  				 unsigned int *nr_highmem_p)
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219
2220
2221
2222
  {
  	unsigned int to_alloc;
  
  	if (memory_bm_create(bm, GFP_ATOMIC, PG_SAFE))
  		return -ENOMEM;
  
  	if (get_highmem_buffer(PG_SAFE))
  		return -ENOMEM;
  
  	to_alloc = count_free_highmem_pages();
  	if (to_alloc > *nr_highmem_p)
  		to_alloc = *nr_highmem_p;
  	else
  		*nr_highmem_p = to_alloc;
  
  	safe_highmem_pages = 0;
  	while (to_alloc-- > 0) {
  		struct page *page;
  
  		page = alloc_page(__GFP_HIGHMEM);
7be982349   Rafael J. Wysocki   swsusp: use inlin...
2223
  		if (!swsusp_page_is_free(page)) {
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
2224
2225
2226
2227
2228
  			/* The page is "safe", set its bit the bitmap */
  			memory_bm_set_bit(bm, page_to_pfn(page));
  			safe_highmem_pages++;
  		}
  		/* Mark the page as allocated */
7be982349   Rafael J. Wysocki   swsusp: use inlin...
2229
2230
  		swsusp_set_page_forbidden(page);
  		swsusp_set_page_free(page);
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
2231
2232
2233
2234
2235
  	}
  	memory_bm_position_reset(bm);
  	safe_highmem_bm = bm;
  	return 0;
  }
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
2236
  static struct page *last_highmem_page;
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
2237
  /**
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
2238
2239
2240
2241
   * get_highmem_page_buffer - Prepare a buffer to store a highmem image page.
   *
   * For a given highmem image page get a buffer that suspend_write_next() should
   * return to its caller to write to.
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
2242
   *
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
2243
2244
2245
2246
   * If the page is to be saved to its "original" page frame or a copy of
   * the page is to be made in the highmem, @buffer is returned.  Otherwise,
   * the copy of the page is to be made in normal memory, so the address of
   * the copy is returned.
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
2247
   *
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
2248
2249
2250
2251
2252
2253
   * If @buffer is returned, the caller of suspend_write_next() will write
   * the page's contents to @buffer, so they will have to be copied to the
   * right location on the next call to suspend_write_next() and it is done
   * with the help of copy_last_highmem_page().  For this purpose, if
   * @buffer is returned, @last_highmem_page is set to the page to which
   * the data will have to be copied from @buffer.
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
2254
   */
efd5a8524   Rafael J. Wysocki   PM / hibernate: C...
2255
2256
  static void *get_highmem_page_buffer(struct page *page,
  				     struct chain_allocator *ca)
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
2257
2258
2259
  {
  	struct highmem_pbe *pbe;
  	void *kaddr;
7be982349   Rafael J. Wysocki   swsusp: use inlin...
2260
  	if (swsusp_page_is_forbidden(page) && swsusp_page_is_free(page)) {
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
2261
2262
  		/*
  		 * We have allocated the "original" page frame and we can
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
2263
2264
2265
2266
2267
  		 * use it directly to store the loaded page.
  		 */
  		last_highmem_page = page;
  		return buffer;
  	}
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
2268
2269
  	/*
  	 * The "original" page frame has not been allocated and we have to
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
2270
2271
2272
2273
2274
  	 * use a "safe" page frame to store the loaded page.
  	 */
  	pbe = chain_alloc(ca, sizeof(struct highmem_pbe));
  	if (!pbe) {
  		swsusp_free();
69643279a   Rafael J. Wysocki   Hibernate: Do not...
2275
  		return ERR_PTR(-ENOMEM);
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
2276
2277
2278
2279
2280
2281
2282
2283
2284
2285
2286
2287
2288
2289
2290
2291
2292
2293
2294
2295
2296
2297
2298
  	}
  	pbe->orig_page = page;
  	if (safe_highmem_pages > 0) {
  		struct page *tmp;
  
  		/* Copy of the page will be stored in high memory */
  		kaddr = buffer;
  		tmp = pfn_to_page(memory_bm_next_pfn(safe_highmem_bm));
  		safe_highmem_pages--;
  		last_highmem_page = tmp;
  		pbe->copy_page = tmp;
  	} else {
  		/* Copy of the page will be stored in normal memory */
  		kaddr = safe_pages_list;
  		safe_pages_list = safe_pages_list->next;
  		pbe->copy_page = virt_to_page(kaddr);
  	}
  	pbe->next = highmem_pblist;
  	highmem_pblist = pbe;
  	return kaddr;
  }
  
  /**
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
2299
2300
2301
2302
2303
   * copy_last_highmem_page - Copy most the most recent highmem image page.
   *
   * Copy the contents of a highmem image from @buffer, where the caller of
   * snapshot_write_next() has stored them, to the right location represented by
   * @last_highmem_page .
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
2304
   */
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
2305
2306
2307
2308
  static void copy_last_highmem_page(void)
  {
  	if (last_highmem_page) {
  		void *dst;
0de9a1e28   Cong Wang   power: remove the...
2309
  		dst = kmap_atomic(last_highmem_page);
3ecb01df3   Jan Beulich   use clear_page()/...
2310
  		copy_page(dst, buffer);
0de9a1e28   Cong Wang   power: remove the...
2311
  		kunmap_atomic(dst);
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
2312
2313
2314
2315
2316
2317
2318
2319
2320
2321
2322
2323
2324
2325
2326
2327
2328
2329
  		last_highmem_page = NULL;
  	}
  }
  
  static inline int last_highmem_page_copied(void)
  {
  	return !last_highmem_page;
  }
  
  static inline void free_highmem_data(void)
  {
  	if (safe_highmem_bm)
  		memory_bm_free(safe_highmem_bm, PG_UNSAFE_CLEAR);
  
  	if (buffer)
  		free_image_page(buffer, PG_UNSAFE_CLEAR);
  }
  #else
efd5a8524   Rafael J. Wysocki   PM / hibernate: C...
2330
  static unsigned int count_highmem_image_pages(struct memory_bitmap *bm) { return 0; }
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
2331

efd5a8524   Rafael J. Wysocki   PM / hibernate: C...
2332
2333
  static inline int prepare_highmem_image(struct memory_bitmap *bm,
  					unsigned int *nr_highmem_p) { return 0; }
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
2334

efd5a8524   Rafael J. Wysocki   PM / hibernate: C...
2335
2336
  static inline void *get_highmem_page_buffer(struct page *page,
  					    struct chain_allocator *ca)
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
2337
  {
69643279a   Rafael J. Wysocki   Hibernate: Do not...
2338
  	return ERR_PTR(-EINVAL);
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
2339
2340
2341
2342
2343
2344
  }
  
  static inline void copy_last_highmem_page(void) {}
  static inline int last_highmem_page_copied(void) { return 1; }
  static inline void free_highmem_data(void) {}
  #endif /* CONFIG_HIGHMEM */
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
2345
  #define PBES_PER_LINKED_PAGE	(LINKED_PAGE_DATA_SIZE / sizeof(struct pbe))
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
2346
  /**
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
2347
2348
2349
2350
2351
2352
2353
   * prepare_image - Make room for loading hibernation image.
   * @new_bm: Unitialized memory bitmap structure.
   * @bm: Memory bitmap with unsafe pages marked.
   *
   * Use @bm to mark the pages that will be overwritten in the process of
   * restoring the system memory state from the suspend image ("unsafe" pages)
   * and allocate memory for the image.
968808b89   Rafael J. Wysocki   [PATCH] swsusp: u...
2354
   *
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
2355
2356
2357
2358
2359
   * The idea is to allocate a new memory bitmap first and then allocate
   * as many pages as needed for image data, but without specifying what those
   * pages will be used for just yet.  Instead, we mark them all as allocated and
   * create a lists of "safe" pages to be used later.  On systems with high
   * memory a list of "safe" highmem pages is created too.
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
2360
   */
efd5a8524   Rafael J. Wysocki   PM / hibernate: C...
2361
  static int prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm)
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
2362
  {
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
2363
  	unsigned int nr_pages, nr_highmem;
9c744481c   Rafael J. Wysocki   PM / hibernate: D...
2364
  	struct linked_page *lp;
940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
2365
  	int error;
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
2366

8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
2367
2368
2369
2370
2371
  	/* If there is no highmem, the buffer will not be necessary */
  	free_image_page(buffer, PG_UNSAFE_CLEAR);
  	buffer = NULL;
  
  	nr_highmem = count_highmem_image_pages(bm);
6dbecfd34   Rafael J. Wysocki   PM / hibernate: S...
2372
  	mark_unsafe_pages(bm);
940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
2373
2374
2375
2376
2377
2378
2379
  
  	error = memory_bm_create(new_bm, GFP_ATOMIC, PG_SAFE);
  	if (error)
  		goto Free;
  
  	duplicate_memory_bitmap(new_bm, bm);
  	memory_bm_free(bm, PG_UNSAFE_KEEP);
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
2380
2381
2382
2383
2384
  	if (nr_highmem > 0) {
  		error = prepare_highmem_image(bm, &nr_highmem);
  		if (error)
  			goto Free;
  	}
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
2385
2386
  	/*
  	 * Reserve some safe pages for potential later use.
940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
2387
2388
2389
2390
  	 *
  	 * NOTE: This way we make sure there will be enough safe pages for the
  	 * chain_alloc() in get_buffer().  It is a bit wasteful, but
  	 * nr_copy_pages cannot be greater than 50% of the memory anyway.
9c744481c   Rafael J. Wysocki   PM / hibernate: D...
2391
2392
  	 *
  	 * nr_copy_pages cannot be less than allocated_unsafe_pages too.
940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
2393
  	 */
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
2394
  	nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages;
940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
2395
2396
  	nr_pages = DIV_ROUND_UP(nr_pages, PBES_PER_LINKED_PAGE);
  	while (nr_pages > 0) {
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
2397
  		lp = get_image_page(GFP_ATOMIC, PG_SAFE);
940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
2398
  		if (!lp) {
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
2399
  			error = -ENOMEM;
940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
2400
2401
  			goto Free;
  		}
9c744481c   Rafael J. Wysocki   PM / hibernate: D...
2402
2403
  		lp->next = safe_pages_list;
  		safe_pages_list = lp;
940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
2404
  		nr_pages--;
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
2405
  	}
940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
2406
  	/* Preallocate memory for the image */
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
2407
  	nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages;
940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
2408
2409
2410
2411
2412
2413
  	while (nr_pages > 0) {
  		lp = (struct linked_page *)get_zeroed_page(GFP_ATOMIC);
  		if (!lp) {
  			error = -ENOMEM;
  			goto Free;
  		}
7be982349   Rafael J. Wysocki   swsusp: use inlin...
2414
  		if (!swsusp_page_is_free(virt_to_page(lp))) {
940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
2415
2416
2417
  			/* The page is "safe", add it to the list */
  			lp->next = safe_pages_list;
  			safe_pages_list = lp;
968808b89   Rafael J. Wysocki   [PATCH] swsusp: u...
2418
  		}
940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
2419
  		/* Mark the page as allocated */
7be982349   Rafael J. Wysocki   swsusp: use inlin...
2420
2421
  		swsusp_set_page_forbidden(virt_to_page(lp));
  		swsusp_set_page_free(virt_to_page(lp));
940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
2422
  		nr_pages--;
968808b89   Rafael J. Wysocki   [PATCH] swsusp: u...
2423
  	}
940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
2424
  	return 0;
59a493350   Rafael J. Wysocki   [PATCH] swsusp: F...
2425
   Free:
940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
2426
  	swsusp_free();
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
2427
2428
  	return error;
  }
940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
2429
  /**
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
2430
2431
2432
2433
   * get_buffer - Get the address to store the next image data page.
   *
   * Get the address that snapshot_write_next() should return to its caller to
   * write to.
940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
2434
   */
940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
2435
  static void *get_buffer(struct memory_bitmap *bm, struct chain_allocator *ca)
968808b89   Rafael J. Wysocki   [PATCH] swsusp: u...
2436
  {
940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
2437
  	struct pbe *pbe;
69643279a   Rafael J. Wysocki   Hibernate: Do not...
2438
2439
  	struct page *page;
  	unsigned long pfn = memory_bm_next_pfn(bm);
968808b89   Rafael J. Wysocki   [PATCH] swsusp: u...
2440

69643279a   Rafael J. Wysocki   Hibernate: Do not...
2441
2442
2443
2444
  	if (pfn == BM_END_OF_MAP)
  		return ERR_PTR(-EFAULT);
  
  	page = pfn_to_page(pfn);
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
2445
2446
  	if (PageHighMem(page))
  		return get_highmem_page_buffer(page, ca);
7be982349   Rafael J. Wysocki   swsusp: use inlin...
2447
  	if (swsusp_page_is_forbidden(page) && swsusp_page_is_free(page))
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
2448
2449
  		/*
  		 * We have allocated the "original" page frame and we can
940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
2450
  		 * use it directly to store the loaded page.
968808b89   Rafael J. Wysocki   [PATCH] swsusp: u...
2451
  		 */
940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
2452
  		return page_address(page);
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
2453
2454
  	/*
  	 * The "original" page frame has not been allocated and we have to
940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
2455
  	 * use a "safe" page frame to store the loaded page.
968808b89   Rafael J. Wysocki   [PATCH] swsusp: u...
2456
  	 */
940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
2457
2458
2459
  	pbe = chain_alloc(ca, sizeof(struct pbe));
  	if (!pbe) {
  		swsusp_free();
69643279a   Rafael J. Wysocki   Hibernate: Do not...
2460
  		return ERR_PTR(-ENOMEM);
940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
2461
  	}
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
2462
2463
  	pbe->orig_address = page_address(page);
  	pbe->address = safe_pages_list;
940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
2464
2465
2466
  	safe_pages_list = safe_pages_list->next;
  	pbe->next = restore_pblist;
  	restore_pblist = pbe;
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
2467
  	return pbe->address;
968808b89   Rafael J. Wysocki   [PATCH] swsusp: u...
2468
  }
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
2469
  /**
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
2470
2471
   * snapshot_write_next - Get the address to store the next image page.
   * @handle: Snapshot handle structure to guide the writing.
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
2472
   *
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
2473
2474
2475
   * On the first call, @handle should point to a zeroed snapshot_handle
   * structure.  The structure gets populated then and a pointer to it should be
   * passed to this function every next time.
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
2476
   *
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
2477
2478
2479
   * On success, the function returns a positive number.  Then, the caller
   * is allowed to write up to the returned number of bytes to the memory
   * location computed by the data_of() macro.
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
2480
   *
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
2481
2482
2483
   * The function returns 0 to indicate the "end of file" condition.  Negative
   * numbers are returned on errors, in which cases the structure pointed to by
   * @handle is not updated and should not be used any more.
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
2484
   */
d3c1b24c5   Jiri Slaby   PM / Hibernate: S...
2485
  int snapshot_write_next(struct snapshot_handle *handle)
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
2486
  {
940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
2487
  	static struct chain_allocator ca;
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
2488
  	int error = 0;
940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
2489
  	/* Check if we have already loaded the entire image */
d3c1b24c5   Jiri Slaby   PM / Hibernate: S...
2490
  	if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages)
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
2491
  		return 0;
940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
2492

d3c1b24c5   Jiri Slaby   PM / Hibernate: S...
2493
2494
2495
  	handle->sync_read = 1;
  
  	if (!handle->cur) {
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
2496
2497
2498
  		if (!buffer)
  			/* This makes the buffer be freed by swsusp_free() */
  			buffer = get_image_page(GFP_ATOMIC, PG_ANY);
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
2499
2500
  		if (!buffer)
  			return -ENOMEM;
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
2501

f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
2502
  		handle->buffer = buffer;
d3c1b24c5   Jiri Slaby   PM / Hibernate: S...
2503
2504
2505
2506
  	} else if (handle->cur == 1) {
  		error = load_header(buffer);
  		if (error)
  			return error;
940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
2507

9c744481c   Rafael J. Wysocki   PM / hibernate: D...
2508
  		safe_pages_list = NULL;
d3c1b24c5   Jiri Slaby   PM / Hibernate: S...
2509
2510
2511
  		error = memory_bm_create(&copy_bm, GFP_ATOMIC, PG_ANY);
  		if (error)
  			return error;
4c0b6c10f   Rafael J. Wysocki   PM / hibernate: I...
2512
  		hibernate_restore_protection_begin();
d3c1b24c5   Jiri Slaby   PM / Hibernate: S...
2513
2514
2515
2516
  	} else if (handle->cur <= nr_meta_pages + 1) {
  		error = unpack_orig_pfns(buffer, &copy_bm);
  		if (error)
  			return error;
940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
2517

d3c1b24c5   Jiri Slaby   PM / Hibernate: S...
2518
2519
  		if (handle->cur == nr_meta_pages + 1) {
  			error = prepare_image(&orig_bm, &copy_bm);
69643279a   Rafael J. Wysocki   Hibernate: Do not...
2520
2521
  			if (error)
  				return error;
d3c1b24c5   Jiri Slaby   PM / Hibernate: S...
2522
2523
2524
  			chain_init(&ca, GFP_ATOMIC, PG_SAFE);
  			memory_bm_position_reset(&orig_bm);
  			restore_pblist = NULL;
940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
2525
  			handle->buffer = get_buffer(&orig_bm, &ca);
d3c1b24c5   Jiri Slaby   PM / Hibernate: S...
2526
  			handle->sync_read = 0;
69643279a   Rafael J. Wysocki   Hibernate: Do not...
2527
2528
  			if (IS_ERR(handle->buffer))
  				return PTR_ERR(handle->buffer);
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
2529
  		}
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
2530
  	} else {
d3c1b24c5   Jiri Slaby   PM / Hibernate: S...
2531
  		copy_last_highmem_page();
4c0b6c10f   Rafael J. Wysocki   PM / hibernate: I...
2532
  		hibernate_restore_protect_page(handle->buffer);
d3c1b24c5   Jiri Slaby   PM / Hibernate: S...
2533
2534
2535
2536
2537
  		handle->buffer = get_buffer(&orig_bm, &ca);
  		if (IS_ERR(handle->buffer))
  			return PTR_ERR(handle->buffer);
  		if (handle->buffer != buffer)
  			handle->sync_read = 0;
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
2538
  	}
d3c1b24c5   Jiri Slaby   PM / Hibernate: S...
2539
2540
  	handle->cur++;
  	return PAGE_SIZE;
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
2541
  }
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
2542
  /**
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
2543
2544
2545
2546
2547
2548
   * snapshot_write_finalize - Complete the loading of a hibernation image.
   *
   * Must be called after the last call to snapshot_write_next() in case the last
   * page in the image happens to be a highmem page and its contents should be
   * stored in highmem.  Additionally, it recycles bitmap memory that's not
   * necessary any more.
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
2549
   */
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
2550
2551
2552
  void snapshot_write_finalize(struct snapshot_handle *handle)
  {
  	copy_last_highmem_page();
4c0b6c10f   Rafael J. Wysocki   PM / hibernate: I...
2553
  	hibernate_restore_protect_page(handle->buffer);
307c5971c   Rafael J. Wysocki   PM / hibernate: R...
2554
  	/* Do that only if we have loaded the image entirely */
d3c1b24c5   Jiri Slaby   PM / Hibernate: S...
2555
  	if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages) {
307c5971c   Rafael J. Wysocki   PM / hibernate: R...
2556
  		memory_bm_recycle(&orig_bm);
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
2557
2558
2559
  		free_highmem_data();
  	}
  }
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
2560
2561
  int snapshot_image_loaded(struct snapshot_handle *handle)
  {
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
2562
  	return !(!nr_copy_pages || !last_highmem_page_copied() ||
940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
2563
2564
  			handle->cur <= nr_meta_pages + nr_copy_pages);
  }
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
2565
2566
  #ifdef CONFIG_HIGHMEM
  /* Assumes that @buf is ready and points to a "safe" page */
efd5a8524   Rafael J. Wysocki   PM / hibernate: C...
2567
2568
  static inline void swap_two_pages_data(struct page *p1, struct page *p2,
  				       void *buf)
940864dda   Rafael J. Wysocki   [PATCH] swsusp: U...
2569
  {
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
2570
  	void *kaddr1, *kaddr2;
0de9a1e28   Cong Wang   power: remove the...
2571
2572
  	kaddr1 = kmap_atomic(p1);
  	kaddr2 = kmap_atomic(p2);
3ecb01df3   Jan Beulich   use clear_page()/...
2573
2574
2575
  	copy_page(buf, kaddr1);
  	copy_page(kaddr1, kaddr2);
  	copy_page(kaddr2, buf);
0de9a1e28   Cong Wang   power: remove the...
2576
2577
  	kunmap_atomic(kaddr2);
  	kunmap_atomic(kaddr1);
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
2578
2579
2580
  }
  
  /**
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
2581
2582
2583
2584
2585
   * restore_highmem - Put highmem image pages into their original locations.
   *
   * For each highmem page that was in use before hibernation and is included in
   * the image, and also has been allocated by the "restore" kernel, swap its
   * current contents with the previous (ie. "before hibernation") ones.
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
2586
   *
ef96f639e   Rafael J. Wysocki   PM / hibernate: C...
2587
2588
   * If the restore eventually fails, we can call this function once again and
   * restore the highmem state as seen by the restore kernel.
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
2589
   */
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
2590
2591
2592
2593
2594
2595
2596
2597
2598
2599
2600
2601
2602
2603
2604
2605
2606
2607
  int restore_highmem(void)
  {
  	struct highmem_pbe *pbe = highmem_pblist;
  	void *buf;
  
  	if (!pbe)
  		return 0;
  
  	buf = get_image_page(GFP_ATOMIC, PG_SAFE);
  	if (!buf)
  		return -ENOMEM;
  
  	while (pbe) {
  		swap_two_pages_data(pbe->copy_page, pbe->orig_page, buf);
  		pbe = pbe->next;
  	}
  	free_image_page(buf, PG_UNSAFE_CLEAR);
  	return 0;
f577eb30a   Rafael J. Wysocki   [PATCH] swsusp: l...
2608
  }
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
2609
  #endif /* CONFIG_HIGHMEM */