Blame view

kernel/power/swap.c 34.1 KB
61159a314   Rafael J. Wysocki   [PATCH] swsusp: s...
1
2
3
4
5
6
  /*
   * linux/kernel/power/swap.c
   *
   * This file provides functions for reading the suspend image from
   * and writing it to a swap partition.
   *
a2531293d   Pavel Machek   update email address
7
   * Copyright (C) 1998,2001-2005 Pavel Machek <pavel@ucw.cz>
61159a314   Rafael J. Wysocki   [PATCH] swsusp: s...
8
   * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
9f339caf8   Bojan Smojver   PM / Hibernate: U...
9
   * Copyright (C) 2010 Bojan Smojver <bojan@rexursive.com>
61159a314   Rafael J. Wysocki   [PATCH] swsusp: s...
10
11
12
13
14
15
   *
   * This file is released under the GPLv2.
   *
   */
  
  #include <linux/module.h>
61159a314   Rafael J. Wysocki   [PATCH] swsusp: s...
16
  #include <linux/file.h>
61159a314   Rafael J. Wysocki   [PATCH] swsusp: s...
17
18
19
20
  #include <linux/delay.h>
  #include <linux/bitops.h>
  #include <linux/genhd.h>
  #include <linux/device.h>
61159a314   Rafael J. Wysocki   [PATCH] swsusp: s...
21
  #include <linux/bio.h>
546e0d271   Andrew Morton   [PATCH] swsusp: r...
22
  #include <linux/blkdev.h>
61159a314   Rafael J. Wysocki   [PATCH] swsusp: s...
23
24
25
  #include <linux/swap.h>
  #include <linux/swapops.h>
  #include <linux/pm.h>
5a0e3ad6a   Tejun Heo   include cleanup: ...
26
  #include <linux/slab.h>
f996fc967   Bojan Smojver   PM / Hibernate: C...
27
28
  #include <linux/lzo.h>
  #include <linux/vmalloc.h>
081a9d043   Bojan Smojver   PM / Hibernate: I...
29
30
31
32
  #include <linux/cpumask.h>
  #include <linux/atomic.h>
  #include <linux/kthread.h>
  #include <linux/crc32.h>
61159a314   Rafael J. Wysocki   [PATCH] swsusp: s...
33
34
  
  #include "power.h"
be8cd644c   Rafael J. Wysocki   PM / Hibernate: R...
35
  #define HIBERNATE_SIG	"S1SUSPEND"
61159a314   Rafael J. Wysocki   [PATCH] swsusp: s...
36

51fb352b2   Jiri Slaby   PM / Hibernate: M...
37
38
39
  /*
   *	The swap map is a data structure used for keeping track of each page
   *	written to a swap partition.  It consists of many swap_map_page
901336733   Cesar Eduardo Barros   PM / Hibernate: F...
40
   *	structures that contain each an array of MAP_PAGE_ENTRIES swap entries.
51fb352b2   Jiri Slaby   PM / Hibernate: M...
41
42
43
44
45
46
47
   *	These structures are stored on the swap and linked together with the
   *	help of the .next_swap member.
   *
   *	The swap map is created during suspend.  The swap map pages are
   *	allocated and populated one at a time, so we only need one memory
   *	page to set up the entire structure.
   *
081a9d043   Bojan Smojver   PM / Hibernate: I...
48
   *	During resume we pick up all swap_map_page structures into a list.
51fb352b2   Jiri Slaby   PM / Hibernate: M...
49
50
51
52
53
54
55
56
   */
  
  #define MAP_PAGE_ENTRIES	(PAGE_SIZE / sizeof(sector_t) - 1)
  
  struct swap_map_page {
  	sector_t entries[MAP_PAGE_ENTRIES];
  	sector_t next_swap;
  };
081a9d043   Bojan Smojver   PM / Hibernate: I...
57
58
59
60
  struct swap_map_page_list {
  	struct swap_map_page *map;
  	struct swap_map_page_list *next;
  };
51fb352b2   Jiri Slaby   PM / Hibernate: M...
61
62
63
64
65
66
67
  /**
   *	The swap_map_handle structure is used for handling swap in
   *	a file-alike way
   */
  
  struct swap_map_handle {
  	struct swap_map_page *cur;
081a9d043   Bojan Smojver   PM / Hibernate: I...
68
  	struct swap_map_page_list *maps;
51fb352b2   Jiri Slaby   PM / Hibernate: M...
69
70
71
  	sector_t cur_swap;
  	sector_t first_sector;
  	unsigned int k;
081a9d043   Bojan Smojver   PM / Hibernate: I...
72
73
  	unsigned long nr_free_pages, written;
  	u32 crc32;
51fb352b2   Jiri Slaby   PM / Hibernate: M...
74
  };
1b29c1643   Vivek Goyal   [PATCH] x86-64: d...
75
  struct swsusp_header {
081a9d043   Bojan Smojver   PM / Hibernate: I...
76
77
78
  	char reserved[PAGE_SIZE - 20 - sizeof(sector_t) - sizeof(int) -
  	              sizeof(u32)];
  	u32	crc32;
3aef83e0e   Rafael J. Wysocki   [PATCH] swsusp: u...
79
  	sector_t image;
a634cc101   Rafael J. Wysocki   swsusp: introduce...
80
  	unsigned int flags;	/* Flags to pass to the "boot" kernel */
61159a314   Rafael J. Wysocki   [PATCH] swsusp: s...
81
82
  	char	orig_sig[10];
  	char	sig[10];
1b29c1643   Vivek Goyal   [PATCH] x86-64: d...
83
84
85
  } __attribute__((packed));
  
  static struct swsusp_header *swsusp_header;
61159a314   Rafael J. Wysocki   [PATCH] swsusp: s...
86

0414f2ec0   Nigel Cunningham   PM / Hibernate: M...
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
  /**
   *	The following functions are used for tracing the allocated
   *	swap pages, so that they can be freed in case of an error.
   */
  
  struct swsusp_extent {
  	struct rb_node node;
  	unsigned long start;
  	unsigned long end;
  };
  
  static struct rb_root swsusp_extents = RB_ROOT;
  
  static int swsusp_extents_insert(unsigned long swap_offset)
  {
  	struct rb_node **new = &(swsusp_extents.rb_node);
  	struct rb_node *parent = NULL;
  	struct swsusp_extent *ext;
  
  	/* Figure out where to put the new node */
  	while (*new) {
  		ext = container_of(*new, struct swsusp_extent, node);
  		parent = *new;
  		if (swap_offset < ext->start) {
  			/* Try to merge */
  			if (swap_offset == ext->start - 1) {
  				ext->start--;
  				return 0;
  			}
  			new = &((*new)->rb_left);
  		} else if (swap_offset > ext->end) {
  			/* Try to merge */
  			if (swap_offset == ext->end + 1) {
  				ext->end++;
  				return 0;
  			}
  			new = &((*new)->rb_right);
  		} else {
  			/* It already is in the tree */
  			return -EINVAL;
  		}
  	}
  	/* Add the new node and rebalance the tree. */
  	ext = kzalloc(sizeof(struct swsusp_extent), GFP_KERNEL);
  	if (!ext)
  		return -ENOMEM;
  
  	ext->start = swap_offset;
  	ext->end = swap_offset;
  	rb_link_node(&ext->node, parent, new);
  	rb_insert_color(&ext->node, &swsusp_extents);
  	return 0;
  }
  
  /**
   *	alloc_swapdev_block - allocate a swap page and register that it has
   *	been allocated, so that it can be freed in case of an error.
   */
  
  sector_t alloc_swapdev_block(int swap)
  {
  	unsigned long offset;
910321ea8   Hugh Dickins   swap: revert spec...
149
  	offset = swp_offset(get_swap_page_of_type(swap));
0414f2ec0   Nigel Cunningham   PM / Hibernate: M...
150
151
  	if (offset) {
  		if (swsusp_extents_insert(offset))
910321ea8   Hugh Dickins   swap: revert spec...
152
  			swap_free(swp_entry(swap, offset));
0414f2ec0   Nigel Cunningham   PM / Hibernate: M...
153
154
155
156
157
158
159
160
  		else
  			return swapdev_block(swap, offset);
  	}
  	return 0;
  }
  
  /**
   *	free_all_swap_pages - free swap pages allocated for saving image data.
901336733   Cesar Eduardo Barros   PM / Hibernate: F...
161
   *	It also frees the extents used to register which swap entries had been
0414f2ec0   Nigel Cunningham   PM / Hibernate: M...
162
163
164
165
166
167
168
169
170
171
172
173
174
175
   *	allocated.
   */
  
  void free_all_swap_pages(int swap)
  {
  	struct rb_node *node;
  
  	while ((node = swsusp_extents.rb_node)) {
  		struct swsusp_extent *ext;
  		unsigned long offset;
  
  		ext = container_of(node, struct swsusp_extent, node);
  		rb_erase(node, &swsusp_extents);
  		for (offset = ext->start; offset <= ext->end; offset++)
910321ea8   Hugh Dickins   swap: revert spec...
176
  			swap_free(swp_entry(swap, offset));
0414f2ec0   Nigel Cunningham   PM / Hibernate: M...
177
178
179
180
181
182
183
184
185
  
  		kfree(ext);
  	}
  }
  
  int swsusp_swap_in_use(void)
  {
  	return (swsusp_extents.rb_node != NULL);
  }
61159a314   Rafael J. Wysocki   [PATCH] swsusp: s...
186
  /*
3fc6b34f4   Rafael J. Wysocki   [PATCH] swsusp: r...
187
   * General things
61159a314   Rafael J. Wysocki   [PATCH] swsusp: s...
188
189
190
   */
  
  static unsigned short root_swap = 0xffff;
8a0d613fa   Jiri Slaby   PM / Hibernate: S...
191
  struct block_device *hib_resume_bdev;
3fc6b34f4   Rafael J. Wysocki   [PATCH] swsusp: r...
192

3fc6b34f4   Rafael J. Wysocki   [PATCH] swsusp: r...
193
194
195
  /*
   * Saving part
   */
61159a314   Rafael J. Wysocki   [PATCH] swsusp: s...
196

51fb352b2   Jiri Slaby   PM / Hibernate: M...
197
  static int mark_swapfiles(struct swap_map_handle *handle, unsigned int flags)
61159a314   Rafael J. Wysocki   [PATCH] swsusp: s...
198
199
  {
  	int error;
8a0d613fa   Jiri Slaby   PM / Hibernate: S...
200
  	hib_bio_read_page(swsusp_resume_block, swsusp_header, NULL);
1b29c1643   Vivek Goyal   [PATCH] x86-64: d...
201
202
203
  	if (!memcmp("SWAP-SPACE",swsusp_header->sig, 10) ||
  	    !memcmp("SWAPSPACE2",swsusp_header->sig, 10)) {
  		memcpy(swsusp_header->orig_sig,swsusp_header->sig, 10);
3624eb04c   Rafael J. Wysocki   PM / Hibernate: M...
204
  		memcpy(swsusp_header->sig, HIBERNATE_SIG, 10);
51fb352b2   Jiri Slaby   PM / Hibernate: M...
205
  		swsusp_header->image = handle->first_sector;
a634cc101   Rafael J. Wysocki   swsusp: introduce...
206
  		swsusp_header->flags = flags;
081a9d043   Bojan Smojver   PM / Hibernate: I...
207
208
  		if (flags & SF_CRC32_MODE)
  			swsusp_header->crc32 = handle->crc32;
8a0d613fa   Jiri Slaby   PM / Hibernate: S...
209
  		error = hib_bio_write_page(swsusp_resume_block,
1b29c1643   Vivek Goyal   [PATCH] x86-64: d...
210
  					swsusp_header, NULL);
61159a314   Rafael J. Wysocki   [PATCH] swsusp: s...
211
  	} else {
23976728a   Rafael J. Wysocki   Hibernation: Upda...
212
213
  		printk(KERN_ERR "PM: Swap header not found!
  ");
61159a314   Rafael J. Wysocki   [PATCH] swsusp: s...
214
215
216
217
218
219
220
221
  		error = -ENODEV;
  	}
  	return error;
  }
  
  /**
   *	swsusp_swap_check - check if the resume device is a swap device
   *	and get its index (if so)
6f612af57   Jiri Slaby   PM / Hibernate: G...
222
223
   *
   *	This is called before saving image
61159a314   Rafael J. Wysocki   [PATCH] swsusp: s...
224
   */
6f612af57   Jiri Slaby   PM / Hibernate: G...
225
  static int swsusp_swap_check(void)
61159a314   Rafael J. Wysocki   [PATCH] swsusp: s...
226
  {
3aef83e0e   Rafael J. Wysocki   [PATCH] swsusp: u...
227
  	int res;
7bf236874   Rafael J. Wysocki   [PATCH] swsusp: D...
228
  	res = swap_type_of(swsusp_resume_device, swsusp_resume_block,
8a0d613fa   Jiri Slaby   PM / Hibernate: S...
229
  			&hib_resume_bdev);
3aef83e0e   Rafael J. Wysocki   [PATCH] swsusp: u...
230
231
232
233
  	if (res < 0)
  		return res;
  
  	root_swap = res;
e525fd89d   Tejun Heo   block: make blkde...
234
  	res = blkdev_get(hib_resume_bdev, FMODE_WRITE, NULL);
7bf236874   Rafael J. Wysocki   [PATCH] swsusp: D...
235
236
  	if (res)
  		return res;
3aef83e0e   Rafael J. Wysocki   [PATCH] swsusp: u...
237

8a0d613fa   Jiri Slaby   PM / Hibernate: S...
238
  	res = set_blocksize(hib_resume_bdev, PAGE_SIZE);
3aef83e0e   Rafael J. Wysocki   [PATCH] swsusp: u...
239
  	if (res < 0)
8a0d613fa   Jiri Slaby   PM / Hibernate: S...
240
  		blkdev_put(hib_resume_bdev, FMODE_WRITE);
61159a314   Rafael J. Wysocki   [PATCH] swsusp: s...
241

61159a314   Rafael J. Wysocki   [PATCH] swsusp: s...
242
243
244
245
246
247
248
  	return res;
  }
  
  /**
   *	write_page - Write one page to given swap location.
   *	@buf:		Address we're writing.
   *	@offset:	Offset of the swap page we're writing to.
ab9541603   Andrew Morton   [PATCH] swsusp: w...
249
   *	@bio_chain:	Link the next write BIO here
61159a314   Rafael J. Wysocki   [PATCH] swsusp: s...
250
   */
3aef83e0e   Rafael J. Wysocki   [PATCH] swsusp: u...
251
  static int write_page(void *buf, sector_t offset, struct bio **bio_chain)
61159a314   Rafael J. Wysocki   [PATCH] swsusp: s...
252
  {
3aef83e0e   Rafael J. Wysocki   [PATCH] swsusp: u...
253
  	void *src;
081a9d043   Bojan Smojver   PM / Hibernate: I...
254
  	int ret;
3aef83e0e   Rafael J. Wysocki   [PATCH] swsusp: u...
255
256
257
258
259
  
  	if (!offset)
  		return -ENOSPC;
  
  	if (bio_chain) {
859491218   Rafael J. Wysocki   [PATCH] swsusp: u...
260
  		src = (void *)__get_free_page(__GFP_WAIT | __GFP_HIGH);
3aef83e0e   Rafael J. Wysocki   [PATCH] swsusp: u...
261
  		if (src) {
3ecb01df3   Jan Beulich   use clear_page()/...
262
  			copy_page(src, buf);
3aef83e0e   Rafael J. Wysocki   [PATCH] swsusp: u...
263
  		} else {
081a9d043   Bojan Smojver   PM / Hibernate: I...
264
265
266
267
268
269
270
271
272
273
274
  			ret = hib_wait_on_bio_chain(bio_chain); /* Free pages */
  			if (ret)
  				return ret;
  			src = (void *)__get_free_page(__GFP_WAIT | __GFP_HIGH);
  			if (src) {
  				copy_page(src, buf);
  			} else {
  				WARN_ON_ONCE(1);
  				bio_chain = NULL;	/* Go synchronous */
  				src = buf;
  			}
ab9541603   Andrew Morton   [PATCH] swsusp: w...
275
  		}
3aef83e0e   Rafael J. Wysocki   [PATCH] swsusp: u...
276
277
  	} else {
  		src = buf;
61159a314   Rafael J. Wysocki   [PATCH] swsusp: s...
278
  	}
8a0d613fa   Jiri Slaby   PM / Hibernate: S...
279
  	return hib_bio_write_page(offset, src, bio_chain);
61159a314   Rafael J. Wysocki   [PATCH] swsusp: s...
280
  }
61159a314   Rafael J. Wysocki   [PATCH] swsusp: s...
281
282
283
284
285
  static void release_swap_writer(struct swap_map_handle *handle)
  {
  	if (handle->cur)
  		free_page((unsigned long)handle->cur);
  	handle->cur = NULL;
61159a314   Rafael J. Wysocki   [PATCH] swsusp: s...
286
287
288
289
  }
  
  static int get_swap_writer(struct swap_map_handle *handle)
  {
6f612af57   Jiri Slaby   PM / Hibernate: G...
290
291
292
293
294
295
296
297
298
299
  	int ret;
  
  	ret = swsusp_swap_check();
  	if (ret) {
  		if (ret != -ENOSPC)
  			printk(KERN_ERR "PM: Cannot find swap device, try "
  					"swapon -a.
  ");
  		return ret;
  	}
61159a314   Rafael J. Wysocki   [PATCH] swsusp: s...
300
  	handle->cur = (struct swap_map_page *)get_zeroed_page(GFP_KERNEL);
6f612af57   Jiri Slaby   PM / Hibernate: G...
301
302
303
304
  	if (!handle->cur) {
  		ret = -ENOMEM;
  		goto err_close;
  	}
d1d241cc2   Rafael J. Wysocki   swsusp: use rbtre...
305
  	handle->cur_swap = alloc_swapdev_block(root_swap);
61159a314   Rafael J. Wysocki   [PATCH] swsusp: s...
306
  	if (!handle->cur_swap) {
6f612af57   Jiri Slaby   PM / Hibernate: G...
307
308
  		ret = -ENOSPC;
  		goto err_rel;
61159a314   Rafael J. Wysocki   [PATCH] swsusp: s...
309
310
  	}
  	handle->k = 0;
081a9d043   Bojan Smojver   PM / Hibernate: I...
311
312
  	handle->nr_free_pages = nr_free_pages() >> 1;
  	handle->written = 0;
51fb352b2   Jiri Slaby   PM / Hibernate: M...
313
  	handle->first_sector = handle->cur_swap;
61159a314   Rafael J. Wysocki   [PATCH] swsusp: s...
314
  	return 0;
6f612af57   Jiri Slaby   PM / Hibernate: G...
315
316
317
318
319
  err_rel:
  	release_swap_writer(handle);
  err_close:
  	swsusp_close(FMODE_WRITE);
  	return ret;
61159a314   Rafael J. Wysocki   [PATCH] swsusp: s...
320
  }
ab9541603   Andrew Morton   [PATCH] swsusp: w...
321
322
323
324
  static int swap_write_page(struct swap_map_handle *handle, void *buf,
  				struct bio **bio_chain)
  {
  	int error = 0;
3aef83e0e   Rafael J. Wysocki   [PATCH] swsusp: u...
325
  	sector_t offset;
61159a314   Rafael J. Wysocki   [PATCH] swsusp: s...
326
327
328
  
  	if (!handle->cur)
  		return -EINVAL;
d1d241cc2   Rafael J. Wysocki   swsusp: use rbtre...
329
  	offset = alloc_swapdev_block(root_swap);
ab9541603   Andrew Morton   [PATCH] swsusp: w...
330
  	error = write_page(buf, offset, bio_chain);
61159a314   Rafael J. Wysocki   [PATCH] swsusp: s...
331
332
333
334
  	if (error)
  		return error;
  	handle->cur->entries[handle->k++] = offset;
  	if (handle->k >= MAP_PAGE_ENTRIES) {
d1d241cc2   Rafael J. Wysocki   swsusp: use rbtre...
335
  		offset = alloc_swapdev_block(root_swap);
61159a314   Rafael J. Wysocki   [PATCH] swsusp: s...
336
337
338
  		if (!offset)
  			return -ENOSPC;
  		handle->cur->next_swap = offset;
081a9d043   Bojan Smojver   PM / Hibernate: I...
339
  		error = write_page(handle->cur, handle->cur_swap, bio_chain);
61159a314   Rafael J. Wysocki   [PATCH] swsusp: s...
340
  		if (error)
ab9541603   Andrew Morton   [PATCH] swsusp: w...
341
  			goto out;
3ecb01df3   Jan Beulich   use clear_page()/...
342
  		clear_page(handle->cur);
61159a314   Rafael J. Wysocki   [PATCH] swsusp: s...
343
344
345
  		handle->cur_swap = offset;
  		handle->k = 0;
  	}
081a9d043   Bojan Smojver   PM / Hibernate: I...
346
347
348
349
350
351
  	if (bio_chain && ++handle->written > handle->nr_free_pages) {
  		error = hib_wait_on_bio_chain(bio_chain);
  		if (error)
  			goto out;
  		handle->written = 0;
  	}
59a493350   Rafael J. Wysocki   [PATCH] swsusp: F...
352
   out:
ab9541603   Andrew Morton   [PATCH] swsusp: w...
353
  	return error;
61159a314   Rafael J. Wysocki   [PATCH] swsusp: s...
354
355
356
357
358
  }
  
  static int flush_swap_writer(struct swap_map_handle *handle)
  {
  	if (handle->cur && handle->cur_swap)
ab9541603   Andrew Morton   [PATCH] swsusp: w...
359
  		return write_page(handle->cur, handle->cur_swap, NULL);
61159a314   Rafael J. Wysocki   [PATCH] swsusp: s...
360
361
362
  	else
  		return -EINVAL;
  }
6f612af57   Jiri Slaby   PM / Hibernate: G...
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
  static int swap_writer_finish(struct swap_map_handle *handle,
  		unsigned int flags, int error)
  {
  	if (!error) {
  		flush_swap_writer(handle);
  		printk(KERN_INFO "PM: S");
  		error = mark_swapfiles(handle, flags);
  		printk("|
  ");
  	}
  
  	if (error)
  		free_all_swap_pages(root_swap);
  	release_swap_writer(handle);
  	swsusp_close(FMODE_WRITE);
  
  	return error;
  }
f996fc967   Bojan Smojver   PM / Hibernate: C...
381
382
383
384
385
386
387
388
389
390
391
  /* We need to remember how much compressed data we need to read. */
  #define LZO_HEADER	sizeof(size_t)
  
  /* Number of pages/bytes we'll compress at one time. */
  #define LZO_UNC_PAGES	32
  #define LZO_UNC_SIZE	(LZO_UNC_PAGES * PAGE_SIZE)
  
  /* Number of pages/bytes we need for compressed data (worst case). */
  #define LZO_CMP_PAGES	DIV_ROUND_UP(lzo1x_worst_compress(LZO_UNC_SIZE) + \
  			             LZO_HEADER, PAGE_SIZE)
  #define LZO_CMP_SIZE	(LZO_CMP_PAGES * PAGE_SIZE)
081a9d043   Bojan Smojver   PM / Hibernate: I...
392
393
394
395
396
  /* Maximum number of threads for compression/decompression. */
  #define LZO_THREADS	3
  
  /* Maximum number of pages for read buffering. */
  #define LZO_READ_PAGES	(MAP_PAGE_ENTRIES * 8)
61159a314   Rafael J. Wysocki   [PATCH] swsusp: s...
397
398
399
400
401
402
  /**
   *	save_image - save the suspend image data
   */
  
  static int save_image(struct swap_map_handle *handle,
                        struct snapshot_handle *snapshot,
3a4f7577c   Andrew Morton   [PATCH] swsusp: a...
403
                        unsigned int nr_to_write)
61159a314   Rafael J. Wysocki   [PATCH] swsusp: s...
404
405
406
  {
  	unsigned int m;
  	int ret;
3a4f7577c   Andrew Morton   [PATCH] swsusp: a...
407
  	int nr_pages;
ab9541603   Andrew Morton   [PATCH] swsusp: w...
408
409
  	int err2;
  	struct bio *bio;
3a4f7577c   Andrew Morton   [PATCH] swsusp: a...
410
411
  	struct timeval start;
  	struct timeval stop;
61159a314   Rafael J. Wysocki   [PATCH] swsusp: s...
412

23976728a   Rafael J. Wysocki   Hibernation: Upda...
413
414
  	printk(KERN_INFO "PM: Saving image data pages (%u pages) ...     ",
  		nr_to_write);
3a4f7577c   Andrew Morton   [PATCH] swsusp: a...
415
  	m = nr_to_write / 100;
61159a314   Rafael J. Wysocki   [PATCH] swsusp: s...
416
417
418
  	if (!m)
  		m = 1;
  	nr_pages = 0;
ab9541603   Andrew Morton   [PATCH] swsusp: w...
419
  	bio = NULL;
3a4f7577c   Andrew Morton   [PATCH] swsusp: a...
420
  	do_gettimeofday(&start);
4ff277f9e   Jiri Slaby   PM / Hibernate: F...
421
  	while (1) {
d3c1b24c5   Jiri Slaby   PM / Hibernate: S...
422
  		ret = snapshot_read_next(snapshot);
4ff277f9e   Jiri Slaby   PM / Hibernate: F...
423
424
425
426
427
428
  		if (ret <= 0)
  			break;
  		ret = swap_write_page(handle, data_of(*snapshot), &bio);
  		if (ret)
  			break;
  		if (!(nr_pages % m))
66d0ae4d6   Jiri Slaby   PM / Hibernate: S...
429
  			printk(KERN_CONT "\b\b\b\b%3d%%", nr_pages / m);
4ff277f9e   Jiri Slaby   PM / Hibernate: F...
430
431
  		nr_pages++;
  	}
8a0d613fa   Jiri Slaby   PM / Hibernate: S...
432
  	err2 = hib_wait_on_bio_chain(&bio);
3a4f7577c   Andrew Morton   [PATCH] swsusp: a...
433
  	do_gettimeofday(&stop);
4ff277f9e   Jiri Slaby   PM / Hibernate: F...
434
435
436
  	if (!ret)
  		ret = err2;
  	if (!ret)
66d0ae4d6   Jiri Slaby   PM / Hibernate: S...
437
438
  		printk(KERN_CONT "\b\b\b\bdone
  ");
4ff277f9e   Jiri Slaby   PM / Hibernate: F...
439
  	else
66d0ae4d6   Jiri Slaby   PM / Hibernate: S...
440
441
  		printk(KERN_CONT "
  ");
0d3a9abe8   Rafael J. Wysocki   [PATCH] swsusp: M...
442
  	swsusp_show_speed(&start, &stop, nr_to_write, "Wrote");
4ff277f9e   Jiri Slaby   PM / Hibernate: F...
443
  	return ret;
61159a314   Rafael J. Wysocki   [PATCH] swsusp: s...
444
  }
081a9d043   Bojan Smojver   PM / Hibernate: I...
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
  /**
   * Structure used for CRC32.
   */
  struct crc_data {
  	struct task_struct *thr;                  /* thread */
  	atomic_t ready;                           /* ready to start flag */
  	atomic_t stop;                            /* ready to stop flag */
  	unsigned run_threads;                     /* nr current threads */
  	wait_queue_head_t go;                     /* start crc update */
  	wait_queue_head_t done;                   /* crc update done */
  	u32 *crc32;                               /* points to handle's crc32 */
  	size_t *unc_len[LZO_THREADS];             /* uncompressed lengths */
  	unsigned char *unc[LZO_THREADS];          /* uncompressed data */
  };
  
  /**
   * CRC32 update function that runs in its own thread.
   */
  static int crc32_threadfn(void *data)
  {
  	struct crc_data *d = data;
  	unsigned i;
  
  	while (1) {
  		wait_event(d->go, atomic_read(&d->ready) ||
  		                  kthread_should_stop());
  		if (kthread_should_stop()) {
  			d->thr = NULL;
  			atomic_set(&d->stop, 1);
  			wake_up(&d->done);
  			break;
  		}
  		atomic_set(&d->ready, 0);
  
  		for (i = 0; i < d->run_threads; i++)
  			*d->crc32 = crc32_le(*d->crc32,
  			                     d->unc[i], *d->unc_len[i]);
  		atomic_set(&d->stop, 1);
  		wake_up(&d->done);
  	}
  	return 0;
  }
  /**
   * Structure used for LZO data compression.
   */
  struct cmp_data {
  	struct task_struct *thr;                  /* thread */
  	atomic_t ready;                           /* ready to start flag */
  	atomic_t stop;                            /* ready to stop flag */
  	int ret;                                  /* return code */
  	wait_queue_head_t go;                     /* start compression */
  	wait_queue_head_t done;                   /* compression done */
  	size_t unc_len;                           /* uncompressed length */
  	size_t cmp_len;                           /* compressed length */
  	unsigned char unc[LZO_UNC_SIZE];          /* uncompressed buffer */
  	unsigned char cmp[LZO_CMP_SIZE];          /* compressed buffer */
  	unsigned char wrk[LZO1X_1_MEM_COMPRESS];  /* compression workspace */
  };
  
  /**
   * Compression function that runs in its own thread.
   */
  static int lzo_compress_threadfn(void *data)
  {
  	struct cmp_data *d = data;
  
  	while (1) {
  		wait_event(d->go, atomic_read(&d->ready) ||
  		                  kthread_should_stop());
  		if (kthread_should_stop()) {
  			d->thr = NULL;
  			d->ret = -1;
  			atomic_set(&d->stop, 1);
  			wake_up(&d->done);
  			break;
  		}
  		atomic_set(&d->ready, 0);
  
  		d->ret = lzo1x_1_compress(d->unc, d->unc_len,
  		                          d->cmp + LZO_HEADER, &d->cmp_len,
  		                          d->wrk);
  		atomic_set(&d->stop, 1);
  		wake_up(&d->done);
  	}
  	return 0;
  }
f996fc967   Bojan Smojver   PM / Hibernate: C...
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
  
  /**
   * save_image_lzo - Save the suspend image data compressed with LZO.
   * @handle: Swap mam handle to use for saving the image.
   * @snapshot: Image to read data from.
   * @nr_to_write: Number of pages to save.
   */
  static int save_image_lzo(struct swap_map_handle *handle,
                            struct snapshot_handle *snapshot,
                            unsigned int nr_to_write)
  {
  	unsigned int m;
  	int ret = 0;
  	int nr_pages;
  	int err2;
  	struct bio *bio;
  	struct timeval start;
  	struct timeval stop;
081a9d043   Bojan Smojver   PM / Hibernate: I...
549
550
551
552
553
554
555
556
557
558
559
560
  	size_t off;
  	unsigned thr, run_threads, nr_threads;
  	unsigned char *page = NULL;
  	struct cmp_data *data = NULL;
  	struct crc_data *crc = NULL;
  
  	/*
  	 * We'll limit the number of threads for compression to limit memory
  	 * footprint.
  	 */
  	nr_threads = num_online_cpus() - 1;
  	nr_threads = clamp_val(nr_threads, 1, LZO_THREADS);
f996fc967   Bojan Smojver   PM / Hibernate: C...
561
562
563
564
565
  
  	page = (void *)__get_free_page(__GFP_WAIT | __GFP_HIGH);
  	if (!page) {
  		printk(KERN_ERR "PM: Failed to allocate LZO page
  ");
081a9d043   Bojan Smojver   PM / Hibernate: I...
566
567
  		ret = -ENOMEM;
  		goto out_clean;
f996fc967   Bojan Smojver   PM / Hibernate: C...
568
  	}
081a9d043   Bojan Smojver   PM / Hibernate: I...
569
570
571
572
573
574
  	data = vmalloc(sizeof(*data) * nr_threads);
  	if (!data) {
  		printk(KERN_ERR "PM: Failed to allocate LZO data
  ");
  		ret = -ENOMEM;
  		goto out_clean;
f996fc967   Bojan Smojver   PM / Hibernate: C...
575
  	}
081a9d043   Bojan Smojver   PM / Hibernate: I...
576
577
  	for (thr = 0; thr < nr_threads; thr++)
  		memset(&data[thr], 0, offsetof(struct cmp_data, go));
f996fc967   Bojan Smojver   PM / Hibernate: C...
578

081a9d043   Bojan Smojver   PM / Hibernate: I...
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
  	crc = kmalloc(sizeof(*crc), GFP_KERNEL);
  	if (!crc) {
  		printk(KERN_ERR "PM: Failed to allocate crc
  ");
  		ret = -ENOMEM;
  		goto out_clean;
  	}
  	memset(crc, 0, offsetof(struct crc_data, go));
  
  	/*
  	 * Start the compression threads.
  	 */
  	for (thr = 0; thr < nr_threads; thr++) {
  		init_waitqueue_head(&data[thr].go);
  		init_waitqueue_head(&data[thr].done);
  
  		data[thr].thr = kthread_run(lzo_compress_threadfn,
  		                            &data[thr],
  		                            "image_compress/%u", thr);
  		if (IS_ERR(data[thr].thr)) {
  			data[thr].thr = NULL;
  			printk(KERN_ERR
  			       "PM: Cannot start compression threads
  ");
  			ret = -ENOMEM;
  			goto out_clean;
  		}
f996fc967   Bojan Smojver   PM / Hibernate: C...
606
  	}
081a9d043   Bojan Smojver   PM / Hibernate: I...
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
  	/*
  	 * Adjust number of free pages after all allocations have been done.
  	 * We don't want to run out of pages when writing.
  	 */
  	handle->nr_free_pages = nr_free_pages() >> 1;
  
  	/*
  	 * Start the CRC32 thread.
  	 */
  	init_waitqueue_head(&crc->go);
  	init_waitqueue_head(&crc->done);
  
  	handle->crc32 = 0;
  	crc->crc32 = &handle->crc32;
  	for (thr = 0; thr < nr_threads; thr++) {
  		crc->unc[thr] = data[thr].unc;
  		crc->unc_len[thr] = &data[thr].unc_len;
  	}
  
  	crc->thr = kthread_run(crc32_threadfn, crc, "image_crc32");
  	if (IS_ERR(crc->thr)) {
  		crc->thr = NULL;
  		printk(KERN_ERR "PM: Cannot start CRC32 thread
  ");
  		ret = -ENOMEM;
  		goto out_clean;
f996fc967   Bojan Smojver   PM / Hibernate: C...
633
634
635
  	}
  
  	printk(KERN_INFO
081a9d043   Bojan Smojver   PM / Hibernate: I...
636
637
  		"PM: Using %u thread(s) for compression.
  "
f996fc967   Bojan Smojver   PM / Hibernate: C...
638
  		"PM: Compressing and saving image data (%u pages) ...     ",
081a9d043   Bojan Smojver   PM / Hibernate: I...
639
  		nr_threads, nr_to_write);
f996fc967   Bojan Smojver   PM / Hibernate: C...
640
641
642
643
644
645
646
  	m = nr_to_write / 100;
  	if (!m)
  		m = 1;
  	nr_pages = 0;
  	bio = NULL;
  	do_gettimeofday(&start);
  	for (;;) {
081a9d043   Bojan Smojver   PM / Hibernate: I...
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
  		for (thr = 0; thr < nr_threads; thr++) {
  			for (off = 0; off < LZO_UNC_SIZE; off += PAGE_SIZE) {
  				ret = snapshot_read_next(snapshot);
  				if (ret < 0)
  					goto out_finish;
  
  				if (!ret)
  					break;
  
  				memcpy(data[thr].unc + off,
  				       data_of(*snapshot), PAGE_SIZE);
  
  				if (!(nr_pages % m))
  					printk(KERN_CONT "\b\b\b\b%3d%%",
  				               nr_pages / m);
  				nr_pages++;
  			}
  			if (!off)
f996fc967   Bojan Smojver   PM / Hibernate: C...
665
  				break;
081a9d043   Bojan Smojver   PM / Hibernate: I...
666
  			data[thr].unc_len = off;
f996fc967   Bojan Smojver   PM / Hibernate: C...
667

081a9d043   Bojan Smojver   PM / Hibernate: I...
668
669
  			atomic_set(&data[thr].ready, 1);
  			wake_up(&data[thr].go);
f996fc967   Bojan Smojver   PM / Hibernate: C...
670
  		}
081a9d043   Bojan Smojver   PM / Hibernate: I...
671
  		if (!thr)
f996fc967   Bojan Smojver   PM / Hibernate: C...
672
  			break;
081a9d043   Bojan Smojver   PM / Hibernate: I...
673
674
675
  		crc->run_threads = thr;
  		atomic_set(&crc->ready, 1);
  		wake_up(&crc->go);
f996fc967   Bojan Smojver   PM / Hibernate: C...
676

081a9d043   Bojan Smojver   PM / Hibernate: I...
677
678
679
680
  		for (run_threads = thr, thr = 0; thr < run_threads; thr++) {
  			wait_event(data[thr].done,
  			           atomic_read(&data[thr].stop));
  			atomic_set(&data[thr].stop, 0);
f996fc967   Bojan Smojver   PM / Hibernate: C...
681

081a9d043   Bojan Smojver   PM / Hibernate: I...
682
  			ret = data[thr].ret;
f996fc967   Bojan Smojver   PM / Hibernate: C...
683

081a9d043   Bojan Smojver   PM / Hibernate: I...
684
685
686
687
688
  			if (ret < 0) {
  				printk(KERN_ERR "PM: LZO compression failed
  ");
  				goto out_finish;
  			}
f996fc967   Bojan Smojver   PM / Hibernate: C...
689

081a9d043   Bojan Smojver   PM / Hibernate: I...
690
691
692
693
694
695
696
  			if (unlikely(!data[thr].cmp_len ||
  			             data[thr].cmp_len >
  			             lzo1x_worst_compress(data[thr].unc_len))) {
  				printk(KERN_ERR
  				       "PM: Invalid LZO compressed length
  ");
  				ret = -1;
f996fc967   Bojan Smojver   PM / Hibernate: C...
697
  				goto out_finish;
081a9d043   Bojan Smojver   PM / Hibernate: I...
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
  			}
  
  			*(size_t *)data[thr].cmp = data[thr].cmp_len;
  
  			/*
  			 * Given we are writing one page at a time to disk, we
  			 * copy that much from the buffer, although the last
  			 * bit will likely be smaller than full page. This is
  			 * OK - we saved the length of the compressed data, so
  			 * any garbage at the end will be discarded when we
  			 * read it.
  			 */
  			for (off = 0;
  			     off < LZO_HEADER + data[thr].cmp_len;
  			     off += PAGE_SIZE) {
  				memcpy(page, data[thr].cmp + off, PAGE_SIZE);
  
  				ret = swap_write_page(handle, page, &bio);
  				if (ret)
  					goto out_finish;
  			}
f996fc967   Bojan Smojver   PM / Hibernate: C...
719
  		}
081a9d043   Bojan Smojver   PM / Hibernate: I...
720
721
722
  
  		wait_event(crc->done, atomic_read(&crc->stop));
  		atomic_set(&crc->stop, 0);
f996fc967   Bojan Smojver   PM / Hibernate: C...
723
724
725
726
727
728
729
  	}
  
  out_finish:
  	err2 = hib_wait_on_bio_chain(&bio);
  	do_gettimeofday(&stop);
  	if (!ret)
  		ret = err2;
081a9d043   Bojan Smojver   PM / Hibernate: I...
730
  	if (!ret) {
f996fc967   Bojan Smojver   PM / Hibernate: C...
731
732
  		printk(KERN_CONT "\b\b\b\bdone
  ");
081a9d043   Bojan Smojver   PM / Hibernate: I...
733
  	} else {
f996fc967   Bojan Smojver   PM / Hibernate: C...
734
735
  		printk(KERN_CONT "
  ");
081a9d043   Bojan Smojver   PM / Hibernate: I...
736
  	}
f996fc967   Bojan Smojver   PM / Hibernate: C...
737
  	swsusp_show_speed(&start, &stop, nr_to_write, "Wrote");
081a9d043   Bojan Smojver   PM / Hibernate: I...
738
739
740
741
742
743
744
745
746
747
748
749
750
  out_clean:
  	if (crc) {
  		if (crc->thr)
  			kthread_stop(crc->thr);
  		kfree(crc);
  	}
  	if (data) {
  		for (thr = 0; thr < nr_threads; thr++)
  			if (data[thr].thr)
  				kthread_stop(data[thr].thr);
  		vfree(data);
  	}
  	if (page) free_page((unsigned long)page);
f996fc967   Bojan Smojver   PM / Hibernate: C...
751
752
753
  
  	return ret;
  }
61159a314   Rafael J. Wysocki   [PATCH] swsusp: s...
754
755
756
757
758
759
  /**
   *	enough_swap - Make sure we have enough swap to save the image.
   *
   *	Returns TRUE or FALSE after checking the total amount of swap
   *	space avaiable from the resume partition.
   */
f996fc967   Bojan Smojver   PM / Hibernate: C...
760
  static int enough_swap(unsigned int nr_pages, unsigned int flags)
61159a314   Rafael J. Wysocki   [PATCH] swsusp: s...
761
762
  {
  	unsigned int free_swap = count_swap_pages(root_swap, 1);
f996fc967   Bojan Smojver   PM / Hibernate: C...
763
  	unsigned int required;
61159a314   Rafael J. Wysocki   [PATCH] swsusp: s...
764

23976728a   Rafael J. Wysocki   Hibernation: Upda...
765
766
  	pr_debug("PM: Free swap pages: %u
  ", free_swap);
f996fc967   Bojan Smojver   PM / Hibernate: C...
767

ee34a3704   Barry Song   PM / Hibernate: D...
768
  	required = PAGES_FOR_IO + nr_pages;
f996fc967   Bojan Smojver   PM / Hibernate: C...
769
  	return free_swap > required;
61159a314   Rafael J. Wysocki   [PATCH] swsusp: s...
770
771
772
773
  }
  
  /**
   *	swsusp_write - Write entire image and metadata.
a634cc101   Rafael J. Wysocki   swsusp: introduce...
774
   *	@flags: flags to pass to the "boot" kernel in the image header
61159a314   Rafael J. Wysocki   [PATCH] swsusp: s...
775
776
777
778
779
780
   *
   *	It is important _NOT_ to umount filesystems at this point. We want
   *	them synced (in case something goes wrong) but we DO not want to mark
   *	filesystem clean: it is not. (And it does not matter, if we resume
   *	correctly, we'll mark system clean, anyway.)
   */
a634cc101   Rafael J. Wysocki   swsusp: introduce...
781
  int swsusp_write(unsigned int flags)
61159a314   Rafael J. Wysocki   [PATCH] swsusp: s...
782
783
784
785
  {
  	struct swap_map_handle handle;
  	struct snapshot_handle snapshot;
  	struct swsusp_info *header;
6f612af57   Jiri Slaby   PM / Hibernate: G...
786
  	unsigned long pages;
61159a314   Rafael J. Wysocki   [PATCH] swsusp: s...
787
  	int error;
6f612af57   Jiri Slaby   PM / Hibernate: G...
788
789
  	pages = snapshot_get_image_size();
  	error = get_swap_writer(&handle);
3aef83e0e   Rafael J. Wysocki   [PATCH] swsusp: u...
790
  	if (error) {
6f612af57   Jiri Slaby   PM / Hibernate: G...
791
792
  		printk(KERN_ERR "PM: Cannot get swap writer
  ");
61159a314   Rafael J. Wysocki   [PATCH] swsusp: s...
793
794
  		return error;
  	}
ee34a3704   Barry Song   PM / Hibernate: D...
795
796
797
798
799
800
801
  	if (flags & SF_NOCOMPRESS_MODE) {
  		if (!enough_swap(pages, flags)) {
  			printk(KERN_ERR "PM: Not enough free swap
  ");
  			error = -ENOSPC;
  			goto out_finish;
  		}
6f612af57   Jiri Slaby   PM / Hibernate: G...
802
  	}
61159a314   Rafael J. Wysocki   [PATCH] swsusp: s...
803
  	memset(&snapshot, 0, sizeof(struct snapshot_handle));
d3c1b24c5   Jiri Slaby   PM / Hibernate: S...
804
  	error = snapshot_read_next(&snapshot);
3aef83e0e   Rafael J. Wysocki   [PATCH] swsusp: u...
805
806
807
  	if (error < PAGE_SIZE) {
  		if (error >= 0)
  			error = -EFAULT;
6f612af57   Jiri Slaby   PM / Hibernate: G...
808
  		goto out_finish;
3aef83e0e   Rafael J. Wysocki   [PATCH] swsusp: u...
809
  	}
61159a314   Rafael J. Wysocki   [PATCH] swsusp: s...
810
  	header = (struct swsusp_info *)data_of(snapshot);
6f612af57   Jiri Slaby   PM / Hibernate: G...
811
  	error = swap_write_page(&handle, header, NULL);
f996fc967   Bojan Smojver   PM / Hibernate: C...
812
813
814
815
816
  	if (!error) {
  		error = (flags & SF_NOCOMPRESS_MODE) ?
  			save_image(&handle, &snapshot, pages - 1) :
  			save_image_lzo(&handle, &snapshot, pages - 1);
  	}
6f612af57   Jiri Slaby   PM / Hibernate: G...
817
818
  out_finish:
  	error = swap_writer_finish(&handle, flags, error);
61159a314   Rafael J. Wysocki   [PATCH] swsusp: s...
819
820
  	return error;
  }
61159a314   Rafael J. Wysocki   [PATCH] swsusp: s...
821
822
823
824
825
826
827
  /**
   *	The following functions allow us to read data using a swap map
   *	in a file-alike way
   */
  
  static void release_swap_reader(struct swap_map_handle *handle)
  {
081a9d043   Bojan Smojver   PM / Hibernate: I...
828
829
830
831
832
833
834
835
836
  	struct swap_map_page_list *tmp;
  
  	while (handle->maps) {
  		if (handle->maps->map)
  			free_page((unsigned long)handle->maps->map);
  		tmp = handle->maps;
  		handle->maps = handle->maps->next;
  		kfree(tmp);
  	}
61159a314   Rafael J. Wysocki   [PATCH] swsusp: s...
837
838
  	handle->cur = NULL;
  }
6f612af57   Jiri Slaby   PM / Hibernate: G...
839
840
  static int get_swap_reader(struct swap_map_handle *handle,
  		unsigned int *flags_p)
61159a314   Rafael J. Wysocki   [PATCH] swsusp: s...
841
842
  {
  	int error;
081a9d043   Bojan Smojver   PM / Hibernate: I...
843
844
  	struct swap_map_page_list *tmp, *last;
  	sector_t offset;
61159a314   Rafael J. Wysocki   [PATCH] swsusp: s...
845

6f612af57   Jiri Slaby   PM / Hibernate: G...
846
847
848
  	*flags_p = swsusp_header->flags;
  
  	if (!swsusp_header->image) /* how can this happen? */
61159a314   Rafael J. Wysocki   [PATCH] swsusp: s...
849
  		return -EINVAL;
3aef83e0e   Rafael J. Wysocki   [PATCH] swsusp: u...
850

081a9d043   Bojan Smojver   PM / Hibernate: I...
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
  	handle->cur = NULL;
  	last = handle->maps = NULL;
  	offset = swsusp_header->image;
  	while (offset) {
  		tmp = kmalloc(sizeof(*handle->maps), GFP_KERNEL);
  		if (!tmp) {
  			release_swap_reader(handle);
  			return -ENOMEM;
  		}
  		memset(tmp, 0, sizeof(*tmp));
  		if (!handle->maps)
  			handle->maps = tmp;
  		if (last)
  			last->next = tmp;
  		last = tmp;
  
  		tmp->map = (struct swap_map_page *)
  		           __get_free_page(__GFP_WAIT | __GFP_HIGH);
  		if (!tmp->map) {
  			release_swap_reader(handle);
  			return -ENOMEM;
  		}
3aef83e0e   Rafael J. Wysocki   [PATCH] swsusp: u...
873

081a9d043   Bojan Smojver   PM / Hibernate: I...
874
875
876
877
878
879
  		error = hib_bio_read_page(offset, tmp->map, NULL);
  		if (error) {
  			release_swap_reader(handle);
  			return error;
  		}
  		offset = tmp->map->next_swap;
61159a314   Rafael J. Wysocki   [PATCH] swsusp: s...
880
881
  	}
  	handle->k = 0;
081a9d043   Bojan Smojver   PM / Hibernate: I...
882
  	handle->cur = handle->maps->map;
61159a314   Rafael J. Wysocki   [PATCH] swsusp: s...
883
884
  	return 0;
  }
546e0d271   Andrew Morton   [PATCH] swsusp: r...
885
886
  static int swap_read_page(struct swap_map_handle *handle, void *buf,
  				struct bio **bio_chain)
61159a314   Rafael J. Wysocki   [PATCH] swsusp: s...
887
  {
3aef83e0e   Rafael J. Wysocki   [PATCH] swsusp: u...
888
  	sector_t offset;
61159a314   Rafael J. Wysocki   [PATCH] swsusp: s...
889
  	int error;
081a9d043   Bojan Smojver   PM / Hibernate: I...
890
  	struct swap_map_page_list *tmp;
61159a314   Rafael J. Wysocki   [PATCH] swsusp: s...
891
892
893
894
895
896
  
  	if (!handle->cur)
  		return -EINVAL;
  	offset = handle->cur->entries[handle->k];
  	if (!offset)
  		return -EFAULT;
8a0d613fa   Jiri Slaby   PM / Hibernate: S...
897
  	error = hib_bio_read_page(offset, buf, bio_chain);
61159a314   Rafael J. Wysocki   [PATCH] swsusp: s...
898
899
900
901
  	if (error)
  		return error;
  	if (++handle->k >= MAP_PAGE_ENTRIES) {
  		handle->k = 0;
081a9d043   Bojan Smojver   PM / Hibernate: I...
902
903
904
905
906
  		free_page((unsigned long)handle->maps->map);
  		tmp = handle->maps;
  		handle->maps = handle->maps->next;
  		kfree(tmp);
  		if (!handle->maps)
61159a314   Rafael J. Wysocki   [PATCH] swsusp: s...
907
  			release_swap_reader(handle);
081a9d043   Bojan Smojver   PM / Hibernate: I...
908
909
  		else
  			handle->cur = handle->maps->map;
61159a314   Rafael J. Wysocki   [PATCH] swsusp: s...
910
911
912
  	}
  	return error;
  }
6f612af57   Jiri Slaby   PM / Hibernate: G...
913
914
915
916
917
918
  static int swap_reader_finish(struct swap_map_handle *handle)
  {
  	release_swap_reader(handle);
  
  	return 0;
  }
61159a314   Rafael J. Wysocki   [PATCH] swsusp: s...
919
920
921
922
923
924
925
926
  /**
   *	load_image - load the image using the swap map handle
   *	@handle and the snapshot handle @snapshot
   *	(assume there are @nr_pages pages to load)
   */
  
  static int load_image(struct swap_map_handle *handle,
                        struct snapshot_handle *snapshot,
546e0d271   Andrew Morton   [PATCH] swsusp: r...
927
                        unsigned int nr_to_read)
61159a314   Rafael J. Wysocki   [PATCH] swsusp: s...
928
929
  {
  	unsigned int m;
081a9d043   Bojan Smojver   PM / Hibernate: I...
930
  	int ret = 0;
8c002494b   Andrew Morton   [PATCH] swsusp: a...
931
932
  	struct timeval start;
  	struct timeval stop;
546e0d271   Andrew Morton   [PATCH] swsusp: r...
933
934
935
  	struct bio *bio;
  	int err2;
  	unsigned nr_pages;
61159a314   Rafael J. Wysocki   [PATCH] swsusp: s...
936

23976728a   Rafael J. Wysocki   Hibernation: Upda...
937
938
  	printk(KERN_INFO "PM: Loading image data pages (%u pages) ...     ",
  		nr_to_read);
546e0d271   Andrew Morton   [PATCH] swsusp: r...
939
  	m = nr_to_read / 100;
61159a314   Rafael J. Wysocki   [PATCH] swsusp: s...
940
941
942
  	if (!m)
  		m = 1;
  	nr_pages = 0;
546e0d271   Andrew Morton   [PATCH] swsusp: r...
943
  	bio = NULL;
8c002494b   Andrew Morton   [PATCH] swsusp: a...
944
  	do_gettimeofday(&start);
546e0d271   Andrew Morton   [PATCH] swsusp: r...
945
  	for ( ; ; ) {
081a9d043   Bojan Smojver   PM / Hibernate: I...
946
947
  		ret = snapshot_write_next(snapshot);
  		if (ret <= 0)
546e0d271   Andrew Morton   [PATCH] swsusp: r...
948
  			break;
081a9d043   Bojan Smojver   PM / Hibernate: I...
949
950
  		ret = swap_read_page(handle, data_of(*snapshot), &bio);
  		if (ret)
546e0d271   Andrew Morton   [PATCH] swsusp: r...
951
952
  			break;
  		if (snapshot->sync_read)
081a9d043   Bojan Smojver   PM / Hibernate: I...
953
954
  			ret = hib_wait_on_bio_chain(&bio);
  		if (ret)
546e0d271   Andrew Morton   [PATCH] swsusp: r...
955
956
957
958
959
  			break;
  		if (!(nr_pages % m))
  			printk("\b\b\b\b%3d%%", nr_pages / m);
  		nr_pages++;
  	}
8a0d613fa   Jiri Slaby   PM / Hibernate: S...
960
  	err2 = hib_wait_on_bio_chain(&bio);
8c002494b   Andrew Morton   [PATCH] swsusp: a...
961
  	do_gettimeofday(&stop);
081a9d043   Bojan Smojver   PM / Hibernate: I...
962
963
964
  	if (!ret)
  		ret = err2;
  	if (!ret) {
61159a314   Rafael J. Wysocki   [PATCH] swsusp: s...
965
966
  		printk("\b\b\b\bdone
  ");
8357376d3   Rafael J. Wysocki   [PATCH] swsusp: I...
967
  		snapshot_write_finalize(snapshot);
e655a250d   Con Kolivas   [PATCH] swswsup: ...
968
  		if (!snapshot_image_loaded(snapshot))
081a9d043   Bojan Smojver   PM / Hibernate: I...
969
  			ret = -ENODATA;
bf9fd67a0   Jiri Slaby   PM / Hibernate: A...
970
971
972
  	} else
  		printk("
  ");
0d3a9abe8   Rafael J. Wysocki   [PATCH] swsusp: M...
973
  	swsusp_show_speed(&start, &stop, nr_to_read, "Read");
081a9d043   Bojan Smojver   PM / Hibernate: I...
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
  	return ret;
  }
  
  /**
   * Structure used for LZO data decompression.
   */
  struct dec_data {
  	struct task_struct *thr;                  /* thread */
  	atomic_t ready;                           /* ready to start flag */
  	atomic_t stop;                            /* ready to stop flag */
  	int ret;                                  /* return code */
  	wait_queue_head_t go;                     /* start decompression */
  	wait_queue_head_t done;                   /* decompression done */
  	size_t unc_len;                           /* uncompressed length */
  	size_t cmp_len;                           /* compressed length */
  	unsigned char unc[LZO_UNC_SIZE];          /* uncompressed buffer */
  	unsigned char cmp[LZO_CMP_SIZE];          /* compressed buffer */
  };
  
  /**
   * Deompression function that runs in its own thread.
   */
  static int lzo_decompress_threadfn(void *data)
  {
  	struct dec_data *d = data;
  
  	while (1) {
  		wait_event(d->go, atomic_read(&d->ready) ||
  		                  kthread_should_stop());
  		if (kthread_should_stop()) {
  			d->thr = NULL;
  			d->ret = -1;
  			atomic_set(&d->stop, 1);
  			wake_up(&d->done);
  			break;
  		}
  		atomic_set(&d->ready, 0);
  
  		d->unc_len = LZO_UNC_SIZE;
  		d->ret = lzo1x_decompress_safe(d->cmp + LZO_HEADER, d->cmp_len,
  		                               d->unc, &d->unc_len);
  		atomic_set(&d->stop, 1);
  		wake_up(&d->done);
  	}
  	return 0;
61159a314   Rafael J. Wysocki   [PATCH] swsusp: s...
1019
  }
a634cc101   Rafael J. Wysocki   swsusp: introduce...
1020
  /**
f996fc967   Bojan Smojver   PM / Hibernate: C...
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
   * load_image_lzo - Load compressed image data and decompress them with LZO.
   * @handle: Swap map handle to use for loading data.
   * @snapshot: Image to copy uncompressed data into.
   * @nr_to_read: Number of pages to load.
   */
  static int load_image_lzo(struct swap_map_handle *handle,
                            struct snapshot_handle *snapshot,
                            unsigned int nr_to_read)
  {
  	unsigned int m;
081a9d043   Bojan Smojver   PM / Hibernate: I...
1031
1032
  	int ret = 0;
  	int eof = 0;
9f339caf8   Bojan Smojver   PM / Hibernate: U...
1033
  	struct bio *bio;
f996fc967   Bojan Smojver   PM / Hibernate: C...
1034
1035
1036
  	struct timeval start;
  	struct timeval stop;
  	unsigned nr_pages;
081a9d043   Bojan Smojver   PM / Hibernate: I...
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
  	size_t off;
  	unsigned i, thr, run_threads, nr_threads;
  	unsigned ring = 0, pg = 0, ring_size = 0,
  	         have = 0, want, need, asked = 0;
  	unsigned long read_pages;
  	unsigned char **page = NULL;
  	struct dec_data *data = NULL;
  	struct crc_data *crc = NULL;
  
  	/*
  	 * We'll limit the number of threads for decompression to limit memory
  	 * footprint.
  	 */
  	nr_threads = num_online_cpus() - 1;
  	nr_threads = clamp_val(nr_threads, 1, LZO_THREADS);
  
  	page = vmalloc(sizeof(*page) * LZO_READ_PAGES);
  	if (!page) {
  		printk(KERN_ERR "PM: Failed to allocate LZO page
  ");
  		ret = -ENOMEM;
  		goto out_clean;
  	}
9f339caf8   Bojan Smojver   PM / Hibernate: U...
1060

081a9d043   Bojan Smojver   PM / Hibernate: I...
1061
1062
1063
1064
1065
1066
1067
1068
1069
  	data = vmalloc(sizeof(*data) * nr_threads);
  	if (!data) {
  		printk(KERN_ERR "PM: Failed to allocate LZO data
  ");
  		ret = -ENOMEM;
  		goto out_clean;
  	}
  	for (thr = 0; thr < nr_threads; thr++)
  		memset(&data[thr], 0, offsetof(struct dec_data, go));
9f339caf8   Bojan Smojver   PM / Hibernate: U...
1070

081a9d043   Bojan Smojver   PM / Hibernate: I...
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
  	crc = kmalloc(sizeof(*crc), GFP_KERNEL);
  	if (!crc) {
  		printk(KERN_ERR "PM: Failed to allocate crc
  ");
  		ret = -ENOMEM;
  		goto out_clean;
  	}
  	memset(crc, 0, offsetof(struct crc_data, go));
  
  	/*
  	 * Start the decompression threads.
  	 */
  	for (thr = 0; thr < nr_threads; thr++) {
  		init_waitqueue_head(&data[thr].go);
  		init_waitqueue_head(&data[thr].done);
  
  		data[thr].thr = kthread_run(lzo_decompress_threadfn,
  		                            &data[thr],
  		                            "image_decompress/%u", thr);
  		if (IS_ERR(data[thr].thr)) {
  			data[thr].thr = NULL;
  			printk(KERN_ERR
  			       "PM: Cannot start decompression threads
  ");
  			ret = -ENOMEM;
  			goto out_clean;
9f339caf8   Bojan Smojver   PM / Hibernate: U...
1097
  		}
f996fc967   Bojan Smojver   PM / Hibernate: C...
1098
  	}
081a9d043   Bojan Smojver   PM / Hibernate: I...
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
  	/*
  	 * Start the CRC32 thread.
  	 */
  	init_waitqueue_head(&crc->go);
  	init_waitqueue_head(&crc->done);
  
  	handle->crc32 = 0;
  	crc->crc32 = &handle->crc32;
  	for (thr = 0; thr < nr_threads; thr++) {
  		crc->unc[thr] = data[thr].unc;
  		crc->unc_len[thr] = &data[thr].unc_len;
f996fc967   Bojan Smojver   PM / Hibernate: C...
1110
  	}
081a9d043   Bojan Smojver   PM / Hibernate: I...
1111
1112
1113
1114
1115
1116
1117
1118
  	crc->thr = kthread_run(crc32_threadfn, crc, "image_crc32");
  	if (IS_ERR(crc->thr)) {
  		crc->thr = NULL;
  		printk(KERN_ERR "PM: Cannot start CRC32 thread
  ");
  		ret = -ENOMEM;
  		goto out_clean;
  	}
9f339caf8   Bojan Smojver   PM / Hibernate: U...
1119

081a9d043   Bojan Smojver   PM / Hibernate: I...
1120
1121
1122
1123
1124
  	/*
  	 * Adjust number of pages for read buffering, in case we are short.
  	 */
  	read_pages = (nr_free_pages() - snapshot_get_image_size()) >> 1;
  	read_pages = clamp_val(read_pages, LZO_CMP_PAGES, LZO_READ_PAGES);
9f339caf8   Bojan Smojver   PM / Hibernate: U...
1125

081a9d043   Bojan Smojver   PM / Hibernate: I...
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
  	for (i = 0; i < read_pages; i++) {
  		page[i] = (void *)__get_free_page(i < LZO_CMP_PAGES ?
  		                                  __GFP_WAIT | __GFP_HIGH :
  		                                  __GFP_WAIT);
  		if (!page[i]) {
  			if (i < LZO_CMP_PAGES) {
  				ring_size = i;
  				printk(KERN_ERR
  				       "PM: Failed to allocate LZO pages
  ");
  				ret = -ENOMEM;
  				goto out_clean;
  			} else {
  				break;
  			}
  		}
f996fc967   Bojan Smojver   PM / Hibernate: C...
1142
  	}
081a9d043   Bojan Smojver   PM / Hibernate: I...
1143
  	want = ring_size = i;
f996fc967   Bojan Smojver   PM / Hibernate: C...
1144
1145
  
  	printk(KERN_INFO
081a9d043   Bojan Smojver   PM / Hibernate: I...
1146
1147
  		"PM: Using %u thread(s) for decompression.
  "
f996fc967   Bojan Smojver   PM / Hibernate: C...
1148
  		"PM: Loading and decompressing image data (%u pages) ...     ",
081a9d043   Bojan Smojver   PM / Hibernate: I...
1149
  		nr_threads, nr_to_read);
f996fc967   Bojan Smojver   PM / Hibernate: C...
1150
1151
1152
1153
  	m = nr_to_read / 100;
  	if (!m)
  		m = 1;
  	nr_pages = 0;
9f339caf8   Bojan Smojver   PM / Hibernate: U...
1154
  	bio = NULL;
f996fc967   Bojan Smojver   PM / Hibernate: C...
1155
  	do_gettimeofday(&start);
081a9d043   Bojan Smojver   PM / Hibernate: I...
1156
1157
  	ret = snapshot_write_next(snapshot);
  	if (ret <= 0)
f996fc967   Bojan Smojver   PM / Hibernate: C...
1158
  		goto out_finish;
081a9d043   Bojan Smojver   PM / Hibernate: I...
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
  	for(;;) {
  		for (i = 0; !eof && i < want; i++) {
  			ret = swap_read_page(handle, page[ring], &bio);
  			if (ret) {
  				/*
  				 * On real read error, finish. On end of data,
  				 * set EOF flag and just exit the read loop.
  				 */
  				if (handle->cur &&
  				    handle->cur->entries[handle->k]) {
  					goto out_finish;
  				} else {
  					eof = 1;
  					break;
  				}
  			}
  			if (++ring >= ring_size)
  				ring = 0;
f996fc967   Bojan Smojver   PM / Hibernate: C...
1177
  		}
081a9d043   Bojan Smojver   PM / Hibernate: I...
1178
1179
  		asked += i;
  		want -= i;
f996fc967   Bojan Smojver   PM / Hibernate: C...
1180

081a9d043   Bojan Smojver   PM / Hibernate: I...
1181
1182
1183
1184
1185
1186
1187
1188
1189
  		/*
  		 * We are out of data, wait for some more.
  		 */
  		if (!have) {
  			if (!asked)
  				break;
  
  			ret = hib_wait_on_bio_chain(&bio);
  			if (ret)
f996fc967   Bojan Smojver   PM / Hibernate: C...
1190
  				goto out_finish;
081a9d043   Bojan Smojver   PM / Hibernate: I...
1191
1192
1193
1194
  			have += asked;
  			asked = 0;
  			if (eof)
  				eof = 2;
9f339caf8   Bojan Smojver   PM / Hibernate: U...
1195
  		}
f996fc967   Bojan Smojver   PM / Hibernate: C...
1196

081a9d043   Bojan Smojver   PM / Hibernate: I...
1197
1198
1199
1200
  		if (crc->run_threads) {
  			wait_event(crc->done, atomic_read(&crc->stop));
  			atomic_set(&crc->stop, 0);
  			crc->run_threads = 0;
f996fc967   Bojan Smojver   PM / Hibernate: C...
1201
  		}
081a9d043   Bojan Smojver   PM / Hibernate: I...
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
  		for (thr = 0; have && thr < nr_threads; thr++) {
  			data[thr].cmp_len = *(size_t *)page[pg];
  			if (unlikely(!data[thr].cmp_len ||
  			             data[thr].cmp_len >
  			             lzo1x_worst_compress(LZO_UNC_SIZE))) {
  				printk(KERN_ERR
  				       "PM: Invalid LZO compressed length
  ");
  				ret = -1;
  				goto out_finish;
  			}
  
  			need = DIV_ROUND_UP(data[thr].cmp_len + LZO_HEADER,
  			                    PAGE_SIZE);
  			if (need > have) {
  				if (eof > 1) {
  					ret = -1;
  					goto out_finish;
  				}
  				break;
  			}
  
  			for (off = 0;
  			     off < LZO_HEADER + data[thr].cmp_len;
  			     off += PAGE_SIZE) {
  				memcpy(data[thr].cmp + off,
  				       page[pg], PAGE_SIZE);
  				have--;
  				want++;
  				if (++pg >= ring_size)
  					pg = 0;
  			}
  
  			atomic_set(&data[thr].ready, 1);
  			wake_up(&data[thr].go);
f996fc967   Bojan Smojver   PM / Hibernate: C...
1237
  		}
081a9d043   Bojan Smojver   PM / Hibernate: I...
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
  		/*
  		 * Wait for more data while we are decompressing.
  		 */
  		if (have < LZO_CMP_PAGES && asked) {
  			ret = hib_wait_on_bio_chain(&bio);
  			if (ret)
  				goto out_finish;
  			have += asked;
  			asked = 0;
  			if (eof)
  				eof = 2;
f996fc967   Bojan Smojver   PM / Hibernate: C...
1249
  		}
081a9d043   Bojan Smojver   PM / Hibernate: I...
1250
1251
1252
1253
1254
1255
  		for (run_threads = thr, thr = 0; thr < run_threads; thr++) {
  			wait_event(data[thr].done,
  			           atomic_read(&data[thr].stop));
  			atomic_set(&data[thr].stop, 0);
  
  			ret = data[thr].ret;
f996fc967   Bojan Smojver   PM / Hibernate: C...
1256

081a9d043   Bojan Smojver   PM / Hibernate: I...
1257
1258
1259
1260
1261
1262
  			if (ret < 0) {
  				printk(KERN_ERR
  				       "PM: LZO decompression failed
  ");
  				goto out_finish;
  			}
f996fc967   Bojan Smojver   PM / Hibernate: C...
1263

081a9d043   Bojan Smojver   PM / Hibernate: I...
1264
1265
1266
1267
1268
1269
1270
  			if (unlikely(!data[thr].unc_len ||
  			             data[thr].unc_len > LZO_UNC_SIZE ||
  			             data[thr].unc_len & (PAGE_SIZE - 1))) {
  				printk(KERN_ERR
  				       "PM: Invalid LZO uncompressed length
  ");
  				ret = -1;
f996fc967   Bojan Smojver   PM / Hibernate: C...
1271
  				goto out_finish;
081a9d043   Bojan Smojver   PM / Hibernate: I...
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
  			}
  
  			for (off = 0;
  			     off < data[thr].unc_len; off += PAGE_SIZE) {
  				memcpy(data_of(*snapshot),
  				       data[thr].unc + off, PAGE_SIZE);
  
  				if (!(nr_pages % m))
  					printk("\b\b\b\b%3d%%", nr_pages / m);
  				nr_pages++;
  
  				ret = snapshot_write_next(snapshot);
  				if (ret <= 0) {
  					crc->run_threads = thr + 1;
  					atomic_set(&crc->ready, 1);
  					wake_up(&crc->go);
  					goto out_finish;
  				}
  			}
f996fc967   Bojan Smojver   PM / Hibernate: C...
1291
  		}
081a9d043   Bojan Smojver   PM / Hibernate: I...
1292
1293
1294
1295
  
  		crc->run_threads = thr;
  		atomic_set(&crc->ready, 1);
  		wake_up(&crc->go);
f996fc967   Bojan Smojver   PM / Hibernate: C...
1296
1297
1298
  	}
  
  out_finish:
081a9d043   Bojan Smojver   PM / Hibernate: I...
1299
1300
1301
1302
  	if (crc->run_threads) {
  		wait_event(crc->done, atomic_read(&crc->stop));
  		atomic_set(&crc->stop, 0);
  	}
f996fc967   Bojan Smojver   PM / Hibernate: C...
1303
  	do_gettimeofday(&stop);
081a9d043   Bojan Smojver   PM / Hibernate: I...
1304
  	if (!ret) {
f996fc967   Bojan Smojver   PM / Hibernate: C...
1305
1306
1307
1308
  		printk("\b\b\b\bdone
  ");
  		snapshot_write_finalize(snapshot);
  		if (!snapshot_image_loaded(snapshot))
081a9d043   Bojan Smojver   PM / Hibernate: I...
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
  			ret = -ENODATA;
  		if (!ret) {
  			if (swsusp_header->flags & SF_CRC32_MODE) {
  				if(handle->crc32 != swsusp_header->crc32) {
  					printk(KERN_ERR
  					       "PM: Invalid image CRC32!
  ");
  					ret = -ENODATA;
  				}
  			}
  		}
f996fc967   Bojan Smojver   PM / Hibernate: C...
1320
1321
1322
1323
  	} else
  		printk("
  ");
  	swsusp_show_speed(&start, &stop, nr_to_read, "Read");
081a9d043   Bojan Smojver   PM / Hibernate: I...
1324
1325
  out_clean:
  	for (i = 0; i < ring_size; i++)
9f339caf8   Bojan Smojver   PM / Hibernate: U...
1326
  		free_page((unsigned long)page[i]);
081a9d043   Bojan Smojver   PM / Hibernate: I...
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
  	if (crc) {
  		if (crc->thr)
  			kthread_stop(crc->thr);
  		kfree(crc);
  	}
  	if (data) {
  		for (thr = 0; thr < nr_threads; thr++)
  			if (data[thr].thr)
  				kthread_stop(data[thr].thr);
  		vfree(data);
  	}
  	if (page) vfree(page);
f996fc967   Bojan Smojver   PM / Hibernate: C...
1339

081a9d043   Bojan Smojver   PM / Hibernate: I...
1340
  	return ret;
f996fc967   Bojan Smojver   PM / Hibernate: C...
1341
1342
1343
  }
  
  /**
a634cc101   Rafael J. Wysocki   swsusp: introduce...
1344
1345
   *	swsusp_read - read the hibernation image.
   *	@flags_p: flags passed by the "frozen" kernel in the image header should
b595076a1   Uwe Kleine-König   tree-wide: fix co...
1346
   *		  be written into this memory location
a634cc101   Rafael J. Wysocki   swsusp: introduce...
1347
1348
1349
   */
  
  int swsusp_read(unsigned int *flags_p)
61159a314   Rafael J. Wysocki   [PATCH] swsusp: s...
1350
1351
1352
1353
1354
  {
  	int error;
  	struct swap_map_handle handle;
  	struct snapshot_handle snapshot;
  	struct swsusp_info *header;
61159a314   Rafael J. Wysocki   [PATCH] swsusp: s...
1355
  	memset(&snapshot, 0, sizeof(struct snapshot_handle));
d3c1b24c5   Jiri Slaby   PM / Hibernate: S...
1356
  	error = snapshot_write_next(&snapshot);
61159a314   Rafael J. Wysocki   [PATCH] swsusp: s...
1357
1358
1359
  	if (error < PAGE_SIZE)
  		return error < 0 ? error : -EFAULT;
  	header = (struct swsusp_info *)data_of(snapshot);
6f612af57   Jiri Slaby   PM / Hibernate: G...
1360
1361
1362
  	error = get_swap_reader(&handle, flags_p);
  	if (error)
  		goto end;
61159a314   Rafael J. Wysocki   [PATCH] swsusp: s...
1363
  	if (!error)
546e0d271   Andrew Morton   [PATCH] swsusp: r...
1364
  		error = swap_read_page(&handle, header, NULL);
f996fc967   Bojan Smojver   PM / Hibernate: C...
1365
1366
1367
1368
1369
  	if (!error) {
  		error = (*flags_p & SF_NOCOMPRESS_MODE) ?
  			load_image(&handle, &snapshot, header->pages - 1) :
  			load_image_lzo(&handle, &snapshot, header->pages - 1);
  	}
6f612af57   Jiri Slaby   PM / Hibernate: G...
1370
1371
  	swap_reader_finish(&handle);
  end:
61159a314   Rafael J. Wysocki   [PATCH] swsusp: s...
1372
  	if (!error)
23976728a   Rafael J. Wysocki   Hibernation: Upda...
1373
1374
  		pr_debug("PM: Image successfully loaded
  ");
61159a314   Rafael J. Wysocki   [PATCH] swsusp: s...
1375
  	else
23976728a   Rafael J. Wysocki   Hibernation: Upda...
1376
1377
  		pr_debug("PM: Error %d resuming
  ", error);
61159a314   Rafael J. Wysocki   [PATCH] swsusp: s...
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
  	return error;
  }
  
  /**
   *      swsusp_check - Check for swsusp signature in the resume device
   */
  
  int swsusp_check(void)
  {
  	int error;
d4d776299   Tejun Heo   block: clean up b...
1388
1389
  	hib_resume_bdev = blkdev_get_by_dev(swsusp_resume_device,
  					    FMODE_READ, NULL);
8a0d613fa   Jiri Slaby   PM / Hibernate: S...
1390
1391
  	if (!IS_ERR(hib_resume_bdev)) {
  		set_blocksize(hib_resume_bdev, PAGE_SIZE);
3ecb01df3   Jan Beulich   use clear_page()/...
1392
  		clear_page(swsusp_header);
8a0d613fa   Jiri Slaby   PM / Hibernate: S...
1393
  		error = hib_bio_read_page(swsusp_resume_block,
1b29c1643   Vivek Goyal   [PATCH] x86-64: d...
1394
  					swsusp_header, NULL);
9a154d9d9   Rafael J. Wysocki   [PATCH] swsusp: a...
1395
  		if (error)
76b57e613   Jiri Slaby   PM / Hibernate: F...
1396
  			goto put;
9a154d9d9   Rafael J. Wysocki   [PATCH] swsusp: a...
1397

3624eb04c   Rafael J. Wysocki   PM / Hibernate: M...
1398
  		if (!memcmp(HIBERNATE_SIG, swsusp_header->sig, 10)) {
1b29c1643   Vivek Goyal   [PATCH] x86-64: d...
1399
  			memcpy(swsusp_header->sig, swsusp_header->orig_sig, 10);
61159a314   Rafael J. Wysocki   [PATCH] swsusp: s...
1400
  			/* Reset swap signature now */
8a0d613fa   Jiri Slaby   PM / Hibernate: S...
1401
  			error = hib_bio_write_page(swsusp_resume_block,
1b29c1643   Vivek Goyal   [PATCH] x86-64: d...
1402
  						swsusp_header, NULL);
61159a314   Rafael J. Wysocki   [PATCH] swsusp: s...
1403
  		} else {
76b57e613   Jiri Slaby   PM / Hibernate: F...
1404
  			error = -EINVAL;
61159a314   Rafael J. Wysocki   [PATCH] swsusp: s...
1405
  		}
76b57e613   Jiri Slaby   PM / Hibernate: F...
1406
1407
  
  put:
61159a314   Rafael J. Wysocki   [PATCH] swsusp: s...
1408
  		if (error)
8a0d613fa   Jiri Slaby   PM / Hibernate: S...
1409
  			blkdev_put(hib_resume_bdev, FMODE_READ);
61159a314   Rafael J. Wysocki   [PATCH] swsusp: s...
1410
  		else
d0941ead3   Rafael J. Wysocki   PM / Hibernate: M...
1411
1412
  			pr_debug("PM: Image signature found, resuming
  ");
61159a314   Rafael J. Wysocki   [PATCH] swsusp: s...
1413
  	} else {
8a0d613fa   Jiri Slaby   PM / Hibernate: S...
1414
  		error = PTR_ERR(hib_resume_bdev);
61159a314   Rafael J. Wysocki   [PATCH] swsusp: s...
1415
1416
1417
  	}
  
  	if (error)
d0941ead3   Rafael J. Wysocki   PM / Hibernate: M...
1418
1419
  		pr_debug("PM: Image not found (code %d)
  ", error);
61159a314   Rafael J. Wysocki   [PATCH] swsusp: s...
1420
1421
1422
1423
1424
1425
1426
  
  	return error;
  }
  
  /**
   *	swsusp_close - close swap device.
   */
c2dd0dae1   Al Viro   [PATCH] propagate...
1427
  void swsusp_close(fmode_t mode)
61159a314   Rafael J. Wysocki   [PATCH] swsusp: s...
1428
  {
8a0d613fa   Jiri Slaby   PM / Hibernate: S...
1429
  	if (IS_ERR(hib_resume_bdev)) {
23976728a   Rafael J. Wysocki   Hibernation: Upda...
1430
1431
  		pr_debug("PM: Image device not initialised
  ");
61159a314   Rafael J. Wysocki   [PATCH] swsusp: s...
1432
1433
  		return;
  	}
8a0d613fa   Jiri Slaby   PM / Hibernate: S...
1434
  	blkdev_put(hib_resume_bdev, mode);
61159a314   Rafael J. Wysocki   [PATCH] swsusp: s...
1435
  }
1b29c1643   Vivek Goyal   [PATCH] x86-64: d...
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
  
  static int swsusp_header_init(void)
  {
  	swsusp_header = (struct swsusp_header*) __get_free_page(GFP_KERNEL);
  	if (!swsusp_header)
  		panic("Could not allocate memory for swsusp_header
  ");
  	return 0;
  }
  
  core_initcall(swsusp_header_init);