Commit ddbe8db147cd0954da93dc6986f4793bcabb3889

Authored by Rafael J. Wysocki

Merge branch 'pm-sleep'

* pm-sleep:
  PM / Hibernate: Touch Soft Lockup Watchdog in rtree_next_node
  PM / Hibernate: Remove the old memory-bitmap implementation
  PM / Hibernate: Iterate over set bits instead of PFNs in swsusp_free()
  PM / Hibernate: Implement position keeping in radix tree
  PM / Hibernate: Add memory_rtree_find_bit function
  PM / Hibernate: Create a Radix-Tree to store memory bitmap
  PM / sleep: fix kernel-doc warnings in drivers/base/power/main.c

Showing 2 changed files Side-by-side Diff

drivers/base/power/main.c
... ... @@ -465,6 +465,7 @@
465 465 * device_resume_noirq - Execute an "early resume" callback for given device.
466 466 * @dev: Device to handle.
467 467 * @state: PM transition of the system being carried out.
  468 + * @async: If true, the device is being resumed asynchronously.
468 469 *
469 470 * The driver of @dev will not receive interrupts while this function is being
470 471 * executed.
... ... @@ -594,6 +595,7 @@
594 595 * device_resume_early - Execute an "early resume" callback for given device.
595 596 * @dev: Device to handle.
596 597 * @state: PM transition of the system being carried out.
  598 + * @async: If true, the device is being resumed asynchronously.
597 599 *
598 600 * Runtime PM is disabled for @dev while this function is being executed.
599 601 */
... ... @@ -1004,6 +1006,7 @@
1004 1006 * device_suspend_noirq - Execute a "late suspend" callback for given device.
1005 1007 * @dev: Device to handle.
1006 1008 * @state: PM transition of the system being carried out.
  1009 + * @async: If true, the device is being suspended asynchronously.
1007 1010 *
1008 1011 * The driver of @dev will not receive interrupts while this function is being
1009 1012 * executed.
... ... @@ -1144,6 +1147,7 @@
1144 1147 * device_suspend_late - Execute a "late suspend" callback for given device.
1145 1148 * @dev: Device to handle.
1146 1149 * @state: PM transition of the system being carried out.
  1150 + * @async: If true, the device is being suspended asynchronously.
1147 1151 *
1148 1152 * Runtime PM is disabled for @dev while this function is being executed.
1149 1153 */
... ... @@ -1298,6 +1302,7 @@
1298 1302 * @dev: Device to suspend.
1299 1303 * @state: PM transition of the system being carried out.
1300 1304 * @cb: Suspend callback to execute.
  1305 + * @info: string description of caller.
1301 1306 */
1302 1307 static int legacy_suspend(struct device *dev, pm_message_t state,
1303 1308 int (*cb)(struct device *dev, pm_message_t state),
kernel/power/snapshot.c
... ... @@ -248,33 +248,61 @@
248 248 * information is stored (in the form of a block of bitmap)
249 249 * It also contains the pfns that correspond to the start and end of
250 250 * the represented memory area.
  251 + *
  252 + * The memory bitmap is organized as a radix tree to guarantee fast random
  253 + * access to the bits. There is one radix tree for each zone (as returned
  254 + * from create_mem_extents).
  255 + *
  256 + * One radix tree is represented by one struct mem_zone_bm_rtree. There are
  257 + * two linked lists for the nodes of the tree, one for the inner nodes and
  258 + * one for the leave nodes. The linked leave nodes are used for fast linear
  259 + * access of the memory bitmap.
  260 + *
  261 + * The struct rtree_node represents one node of the radix tree.
251 262 */
252 263  
253 264 #define BM_END_OF_MAP (~0UL)
254 265  
255 266 #define BM_BITS_PER_BLOCK (PAGE_SIZE * BITS_PER_BYTE)
  267 +#define BM_BLOCK_SHIFT (PAGE_SHIFT + 3)
  268 +#define BM_BLOCK_MASK ((1UL << BM_BLOCK_SHIFT) - 1)
256 269  
257   -struct bm_block {
258   - struct list_head hook; /* hook into a list of bitmap blocks */
259   - unsigned long start_pfn; /* pfn represented by the first bit */
260   - unsigned long end_pfn; /* pfn represented by the last bit plus 1 */
261   - unsigned long *data; /* bitmap representing pages */
  270 +/*
  271 + * struct rtree_node is a wrapper struct to link the nodes
  272 + * of the rtree together for easy linear iteration over
  273 + * bits and easy freeing
  274 + */
  275 +struct rtree_node {
  276 + struct list_head list;
  277 + unsigned long *data;
262 278 };
263 279  
264   -static inline unsigned long bm_block_bits(struct bm_block *bb)
265   -{
266   - return bb->end_pfn - bb->start_pfn;
267   -}
  280 +/*
  281 + * struct mem_zone_bm_rtree represents a bitmap used for one
  282 + * populated memory zone.
  283 + */
  284 +struct mem_zone_bm_rtree {
  285 + struct list_head list; /* Link Zones together */
  286 + struct list_head nodes; /* Radix Tree inner nodes */
  287 + struct list_head leaves; /* Radix Tree leaves */
  288 + unsigned long start_pfn; /* Zone start page frame */
  289 + unsigned long end_pfn; /* Zone end page frame + 1 */
  290 + struct rtree_node *rtree; /* Radix Tree Root */
  291 + int levels; /* Number of Radix Tree Levels */
  292 + unsigned int blocks; /* Number of Bitmap Blocks */
  293 +};
268 294  
269 295 /* strcut bm_position is used for browsing memory bitmaps */
270 296  
271 297 struct bm_position {
272   - struct bm_block *block;
273   - int bit;
  298 + struct mem_zone_bm_rtree *zone;
  299 + struct rtree_node *node;
  300 + unsigned long node_pfn;
  301 + int node_bit;
274 302 };
275 303  
276 304 struct memory_bitmap {
277   - struct list_head blocks; /* list of bitmap blocks */
  305 + struct list_head zones;
278 306 struct linked_page *p_list; /* list of pages used to store zone
279 307 * bitmap objects and bitmap block
280 308 * objects
281 309  
282 310  
283 311  
284 312  
285 313  
286 314  
287 315  
288 316  
289 317  
290 318  
... ... @@ -284,38 +312,178 @@
284 312  
285 313 /* Functions that operate on memory bitmaps */
286 314  
287   -static void memory_bm_position_reset(struct memory_bitmap *bm)
  315 +#define BM_ENTRIES_PER_LEVEL (PAGE_SIZE / sizeof(unsigned long))
  316 +#if BITS_PER_LONG == 32
  317 +#define BM_RTREE_LEVEL_SHIFT (PAGE_SHIFT - 2)
  318 +#else
  319 +#define BM_RTREE_LEVEL_SHIFT (PAGE_SHIFT - 3)
  320 +#endif
  321 +#define BM_RTREE_LEVEL_MASK ((1UL << BM_RTREE_LEVEL_SHIFT) - 1)
  322 +
  323 +/*
  324 + * alloc_rtree_node - Allocate a new node and add it to the radix tree.
  325 + *
  326 + * This function is used to allocate inner nodes as well as the
  327 + * leave nodes of the radix tree. It also adds the node to the
  328 + * corresponding linked list passed in by the *list parameter.
  329 + */
  330 +static struct rtree_node *alloc_rtree_node(gfp_t gfp_mask, int safe_needed,
  331 + struct chain_allocator *ca,
  332 + struct list_head *list)
288 333 {
289   - bm->cur.block = list_entry(bm->blocks.next, struct bm_block, hook);
290   - bm->cur.bit = 0;
291   -}
  334 + struct rtree_node *node;
292 335  
293   -static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free);
  336 + node = chain_alloc(ca, sizeof(struct rtree_node));
  337 + if (!node)
  338 + return NULL;
294 339  
295   -/**
296   - * create_bm_block_list - create a list of block bitmap objects
297   - * @pages - number of pages to track
298   - * @list - list to put the allocated blocks into
299   - * @ca - chain allocator to be used for allocating memory
  340 + node->data = get_image_page(gfp_mask, safe_needed);
  341 + if (!node->data)
  342 + return NULL;
  343 +
  344 + list_add_tail(&node->list, list);
  345 +
  346 + return node;
  347 +}
  348 +
  349 +/*
  350 + * add_rtree_block - Add a new leave node to the radix tree
  351 + *
  352 + * The leave nodes need to be allocated in order to keep the leaves
  353 + * linked list in order. This is guaranteed by the zone->blocks
  354 + * counter.
300 355 */
301   -static int create_bm_block_list(unsigned long pages,
302   - struct list_head *list,
303   - struct chain_allocator *ca)
  356 +static int add_rtree_block(struct mem_zone_bm_rtree *zone, gfp_t gfp_mask,
  357 + int safe_needed, struct chain_allocator *ca)
304 358 {
305   - unsigned int nr_blocks = DIV_ROUND_UP(pages, BM_BITS_PER_BLOCK);
  359 + struct rtree_node *node, *block, **dst;
  360 + unsigned int levels_needed, block_nr;
  361 + int i;
306 362  
307   - while (nr_blocks-- > 0) {
308   - struct bm_block *bb;
  363 + block_nr = zone->blocks;
  364 + levels_needed = 0;
309 365  
310   - bb = chain_alloc(ca, sizeof(struct bm_block));
311   - if (!bb)
  366 + /* How many levels do we need for this block nr? */
  367 + while (block_nr) {
  368 + levels_needed += 1;
  369 + block_nr >>= BM_RTREE_LEVEL_SHIFT;
  370 + }
  371 +
  372 + /* Make sure the rtree has enough levels */
  373 + for (i = zone->levels; i < levels_needed; i++) {
  374 + node = alloc_rtree_node(gfp_mask, safe_needed, ca,
  375 + &zone->nodes);
  376 + if (!node)
312 377 return -ENOMEM;
313   - list_add(&bb->hook, list);
  378 +
  379 + node->data[0] = (unsigned long)zone->rtree;
  380 + zone->rtree = node;
  381 + zone->levels += 1;
314 382 }
315 383  
  384 + /* Allocate new block */
  385 + block = alloc_rtree_node(gfp_mask, safe_needed, ca, &zone->leaves);
  386 + if (!block)
  387 + return -ENOMEM;
  388 +
  389 + /* Now walk the rtree to insert the block */
  390 + node = zone->rtree;
  391 + dst = &zone->rtree;
  392 + block_nr = zone->blocks;
  393 + for (i = zone->levels; i > 0; i--) {
  394 + int index;
  395 +
  396 + if (!node) {
  397 + node = alloc_rtree_node(gfp_mask, safe_needed, ca,
  398 + &zone->nodes);
  399 + if (!node)
  400 + return -ENOMEM;
  401 + *dst = node;
  402 + }
  403 +
  404 + index = block_nr >> ((i - 1) * BM_RTREE_LEVEL_SHIFT);
  405 + index &= BM_RTREE_LEVEL_MASK;
  406 + dst = (struct rtree_node **)&((*dst)->data[index]);
  407 + node = *dst;
  408 + }
  409 +
  410 + zone->blocks += 1;
  411 + *dst = block;
  412 +
316 413 return 0;
317 414 }
318 415  
  416 +static void free_zone_bm_rtree(struct mem_zone_bm_rtree *zone,
  417 + int clear_nosave_free);
  418 +
  419 +/*
  420 + * create_zone_bm_rtree - create a radix tree for one zone
  421 + *
  422 + * Allocated the mem_zone_bm_rtree structure and initializes it.
  423 + * This function also allocated and builds the radix tree for the
  424 + * zone.
  425 + */
  426 +static struct mem_zone_bm_rtree *
  427 +create_zone_bm_rtree(gfp_t gfp_mask, int safe_needed,
  428 + struct chain_allocator *ca,
  429 + unsigned long start, unsigned long end)
  430 +{
  431 + struct mem_zone_bm_rtree *zone;
  432 + unsigned int i, nr_blocks;
  433 + unsigned long pages;
  434 +
  435 + pages = end - start;
  436 + zone = chain_alloc(ca, sizeof(struct mem_zone_bm_rtree));
  437 + if (!zone)
  438 + return NULL;
  439 +
  440 + INIT_LIST_HEAD(&zone->nodes);
  441 + INIT_LIST_HEAD(&zone->leaves);
  442 + zone->start_pfn = start;
  443 + zone->end_pfn = end;
  444 + nr_blocks = DIV_ROUND_UP(pages, BM_BITS_PER_BLOCK);
  445 +
  446 + for (i = 0; i < nr_blocks; i++) {
  447 + if (add_rtree_block(zone, gfp_mask, safe_needed, ca)) {
  448 + free_zone_bm_rtree(zone, PG_UNSAFE_CLEAR);
  449 + return NULL;
  450 + }
  451 + }
  452 +
  453 + return zone;
  454 +}
  455 +
  456 +/*
  457 + * free_zone_bm_rtree - Free the memory of the radix tree
  458 + *
  459 + * Free all node pages of the radix tree. The mem_zone_bm_rtree
  460 + * structure itself is not freed here nor are the rtree_node
  461 + * structs.
  462 + */
  463 +static void free_zone_bm_rtree(struct mem_zone_bm_rtree *zone,
  464 + int clear_nosave_free)
  465 +{
  466 + struct rtree_node *node;
  467 +
  468 + list_for_each_entry(node, &zone->nodes, list)
  469 + free_image_page(node->data, clear_nosave_free);
  470 +
  471 + list_for_each_entry(node, &zone->leaves, list)
  472 + free_image_page(node->data, clear_nosave_free);
  473 +}
  474 +
  475 +static void memory_bm_position_reset(struct memory_bitmap *bm)
  476 +{
  477 + bm->cur.zone = list_entry(bm->zones.next, struct mem_zone_bm_rtree,
  478 + list);
  479 + bm->cur.node = list_entry(bm->cur.zone->leaves.next,
  480 + struct rtree_node, list);
  481 + bm->cur.node_pfn = 0;
  482 + bm->cur.node_bit = 0;
  483 +}
  484 +
  485 +static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free);
  486 +
319 487 struct mem_extent {
320 488 struct list_head hook;
321 489 unsigned long start;
322 490  
323 491  
324 492  
325 493  
... ... @@ -407,40 +575,22 @@
407 575 int error;
408 576  
409 577 chain_init(&ca, gfp_mask, safe_needed);
410   - INIT_LIST_HEAD(&bm->blocks);
  578 + INIT_LIST_HEAD(&bm->zones);
411 579  
412 580 error = create_mem_extents(&mem_extents, gfp_mask);
413 581 if (error)
414 582 return error;
415 583  
416 584 list_for_each_entry(ext, &mem_extents, hook) {
417   - struct bm_block *bb;
418   - unsigned long pfn = ext->start;
419   - unsigned long pages = ext->end - ext->start;
  585 + struct mem_zone_bm_rtree *zone;
420 586  
421   - bb = list_entry(bm->blocks.prev, struct bm_block, hook);
422   -
423   - error = create_bm_block_list(pages, bm->blocks.prev, &ca);
424   - if (error)
  587 + zone = create_zone_bm_rtree(gfp_mask, safe_needed, &ca,
  588 + ext->start, ext->end);
  589 + if (!zone) {
  590 + error = -ENOMEM;
425 591 goto Error;
426   -
427   - list_for_each_entry_continue(bb, &bm->blocks, hook) {
428   - bb->data = get_image_page(gfp_mask, safe_needed);
429   - if (!bb->data) {
430   - error = -ENOMEM;
431   - goto Error;
432   - }
433   -
434   - bb->start_pfn = pfn;
435   - if (pages >= BM_BITS_PER_BLOCK) {
436   - pfn += BM_BITS_PER_BLOCK;
437   - pages -= BM_BITS_PER_BLOCK;
438   - } else {
439   - /* This is executed only once in the loop */
440   - pfn += pages;
441   - }
442   - bb->end_pfn = pfn;
443 592 }
  593 + list_add_tail(&zone->list, &bm->zones);
444 594 }
445 595  
446 596 bm->p_list = ca.chain;
447 597  
448 598  
449 599  
450 600  
451 601  
452 602  
453 603  
454 604  
455 605  
456 606  
457 607  
... ... @@ -460,51 +610,83 @@
460 610 */
461 611 static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free)
462 612 {
463   - struct bm_block *bb;
  613 + struct mem_zone_bm_rtree *zone;
464 614  
465   - list_for_each_entry(bb, &bm->blocks, hook)
466   - if (bb->data)
467   - free_image_page(bb->data, clear_nosave_free);
  615 + list_for_each_entry(zone, &bm->zones, list)
  616 + free_zone_bm_rtree(zone, clear_nosave_free);
468 617  
469 618 free_list_of_pages(bm->p_list, clear_nosave_free);
470 619  
471   - INIT_LIST_HEAD(&bm->blocks);
  620 + INIT_LIST_HEAD(&bm->zones);
472 621 }
473 622  
474 623 /**
475   - * memory_bm_find_bit - find the bit in the bitmap @bm that corresponds
476   - * to given pfn. The cur_zone_bm member of @bm and the cur_block member
477   - * of @bm->cur_zone_bm are updated.
  624 + * memory_bm_find_bit - Find the bit for pfn in the memory
  625 + * bitmap
  626 + *
  627 + * Find the bit in the bitmap @bm that corresponds to given pfn.
  628 + * The cur.zone, cur.block and cur.node_pfn member of @bm are
  629 + * updated.
  630 + * It walks the radix tree to find the page which contains the bit for
  631 + * pfn and returns the bit position in **addr and *bit_nr.
478 632 */
479 633 static int memory_bm_find_bit(struct memory_bitmap *bm, unsigned long pfn,
480   - void **addr, unsigned int *bit_nr)
  634 + void **addr, unsigned int *bit_nr)
481 635 {
482   - struct bm_block *bb;
  636 + struct mem_zone_bm_rtree *curr, *zone;
  637 + struct rtree_node *node;
  638 + int i, block_nr;
483 639  
  640 + zone = bm->cur.zone;
  641 +
  642 + if (pfn >= zone->start_pfn && pfn < zone->end_pfn)
  643 + goto zone_found;
  644 +
  645 + zone = NULL;
  646 +
  647 + /* Find the right zone */
  648 + list_for_each_entry(curr, &bm->zones, list) {
  649 + if (pfn >= curr->start_pfn && pfn < curr->end_pfn) {
  650 + zone = curr;
  651 + break;
  652 + }
  653 + }
  654 +
  655 + if (!zone)
  656 + return -EFAULT;
  657 +
  658 +zone_found:
484 659 /*
485   - * Check if the pfn corresponds to the current bitmap block and find
486   - * the block where it fits if this is not the case.
  660 + * We have a zone. Now walk the radix tree to find the leave
  661 + * node for our pfn.
487 662 */
488   - bb = bm->cur.block;
489   - if (pfn < bb->start_pfn)
490   - list_for_each_entry_continue_reverse(bb, &bm->blocks, hook)
491   - if (pfn >= bb->start_pfn)
492   - break;
493 663  
494   - if (pfn >= bb->end_pfn)
495   - list_for_each_entry_continue(bb, &bm->blocks, hook)
496   - if (pfn >= bb->start_pfn && pfn < bb->end_pfn)
497   - break;
  664 + node = bm->cur.node;
  665 + if (((pfn - zone->start_pfn) & ~BM_BLOCK_MASK) == bm->cur.node_pfn)
  666 + goto node_found;
498 667  
499   - if (&bb->hook == &bm->blocks)
500   - return -EFAULT;
  668 + node = zone->rtree;
  669 + block_nr = (pfn - zone->start_pfn) >> BM_BLOCK_SHIFT;
501 670  
502   - /* The block has been found */
503   - bm->cur.block = bb;
504   - pfn -= bb->start_pfn;
505   - bm->cur.bit = pfn + 1;
506   - *bit_nr = pfn;
507   - *addr = bb->data;
  671 + for (i = zone->levels; i > 0; i--) {
  672 + int index;
  673 +
  674 + index = block_nr >> ((i - 1) * BM_RTREE_LEVEL_SHIFT);
  675 + index &= BM_RTREE_LEVEL_MASK;
  676 + BUG_ON(node->data[index] == 0);
  677 + node = (struct rtree_node *)node->data[index];
  678 + }
  679 +
  680 +node_found:
  681 + /* Update last position */
  682 + bm->cur.zone = zone;
  683 + bm->cur.node = node;
  684 + bm->cur.node_pfn = (pfn - zone->start_pfn) & ~BM_BLOCK_MASK;
  685 +
  686 + /* Set return values */
  687 + *addr = node->data;
  688 + *bit_nr = (pfn - zone->start_pfn) & BM_BLOCK_MASK;
  689 +
508 690 return 0;
509 691 }
510 692  
... ... @@ -528,6 +710,7 @@
528 710 error = memory_bm_find_bit(bm, pfn, &addr, &bit);
529 711 if (!error)
530 712 set_bit(bit, addr);
  713 +
531 714 return error;
532 715 }
533 716  
... ... @@ -542,6 +725,14 @@
542 725 clear_bit(bit, addr);
543 726 }
544 727  
  728 +static void memory_bm_clear_current(struct memory_bitmap *bm)
  729 +{
  730 + int bit;
  731 +
  732 + bit = max(bm->cur.node_bit - 1, 0);
  733 + clear_bit(bit, bm->cur.node->data);
  734 +}
  735 +
545 736 static int memory_bm_test_bit(struct memory_bitmap *bm, unsigned long pfn)
546 737 {
547 738 void *addr;
548 739  
549 740  
550 741  
551 742  
552 743  
553 744  
554 745  
555 746  
... ... @@ -561,38 +752,70 @@
561 752 return !memory_bm_find_bit(bm, pfn, &addr, &bit);
562 753 }
563 754  
564   -/**
565   - * memory_bm_next_pfn - find the pfn that corresponds to the next set bit
566   - * in the bitmap @bm. If the pfn cannot be found, BM_END_OF_MAP is
567   - * returned.
  755 +/*
  756 + * rtree_next_node - Jumps to the next leave node
568 757 *
569   - * It is required to run memory_bm_position_reset() before the first call to
570   - * this function.
  758 + * Sets the position to the beginning of the next node in the
  759 + * memory bitmap. This is either the next node in the current
  760 + * zone's radix tree or the first node in the radix tree of the
  761 + * next zone.
  762 + *
  763 + * Returns true if there is a next node, false otherwise.
571 764 */
  765 +static bool rtree_next_node(struct memory_bitmap *bm)
  766 +{
  767 + bm->cur.node = list_entry(bm->cur.node->list.next,
  768 + struct rtree_node, list);
  769 + if (&bm->cur.node->list != &bm->cur.zone->leaves) {
  770 + bm->cur.node_pfn += BM_BITS_PER_BLOCK;
  771 + bm->cur.node_bit = 0;
  772 + touch_softlockup_watchdog();
  773 + return true;
  774 + }
572 775  
  776 + /* No more nodes, goto next zone */
  777 + bm->cur.zone = list_entry(bm->cur.zone->list.next,
  778 + struct mem_zone_bm_rtree, list);
  779 + if (&bm->cur.zone->list != &bm->zones) {
  780 + bm->cur.node = list_entry(bm->cur.zone->leaves.next,
  781 + struct rtree_node, list);
  782 + bm->cur.node_pfn = 0;
  783 + bm->cur.node_bit = 0;
  784 + return true;
  785 + }
  786 +
  787 + /* No more zones */
  788 + return false;
  789 +}
  790 +
  791 +/**
  792 + * memory_bm_rtree_next_pfn - Find the next set bit in the bitmap @bm
  793 + *
  794 + * Starting from the last returned position this function searches
  795 + * for the next set bit in the memory bitmap and returns its
  796 + * number. If no more bit is set BM_END_OF_MAP is returned.
  797 + *
  798 + * It is required to run memory_bm_position_reset() before the
  799 + * first call to this function.
  800 + */
573 801 static unsigned long memory_bm_next_pfn(struct memory_bitmap *bm)
574 802 {
575   - struct bm_block *bb;
  803 + unsigned long bits, pfn, pages;
576 804 int bit;
577 805  
578   - bb = bm->cur.block;
579 806 do {
580   - bit = bm->cur.bit;
581   - bit = find_next_bit(bb->data, bm_block_bits(bb), bit);
582   - if (bit < bm_block_bits(bb))
583   - goto Return_pfn;
  807 + pages = bm->cur.zone->end_pfn - bm->cur.zone->start_pfn;
  808 + bits = min(pages - bm->cur.node_pfn, BM_BITS_PER_BLOCK);
  809 + bit = find_next_bit(bm->cur.node->data, bits,
  810 + bm->cur.node_bit);
  811 + if (bit < bits) {
  812 + pfn = bm->cur.zone->start_pfn + bm->cur.node_pfn + bit;
  813 + bm->cur.node_bit = bit + 1;
  814 + return pfn;
  815 + }
  816 + } while (rtree_next_node(bm));
584 817  
585   - bb = list_entry(bb->hook.next, struct bm_block, hook);
586   - bm->cur.block = bb;
587   - bm->cur.bit = 0;
588   - } while (&bb->hook != &bm->blocks);
589   -
590   - memory_bm_position_reset(bm);
591 818 return BM_END_OF_MAP;
592   -
593   - Return_pfn:
594   - bm->cur.bit = bit + 1;
595   - return bb->start_pfn + bit;
596 819 }
597 820  
598 821 /**
599 822  
... ... @@ -816,12 +1039,17 @@
816 1039  
817 1040 unsigned int snapshot_additional_pages(struct zone *zone)
818 1041 {
819   - unsigned int res;
  1042 + unsigned int rtree, nodes;
820 1043  
821   - res = DIV_ROUND_UP(zone->spanned_pages, BM_BITS_PER_BLOCK);
822   - res += DIV_ROUND_UP(res * sizeof(struct bm_block),
823   - LINKED_PAGE_DATA_SIZE);
824   - return 2 * res;
  1044 + rtree = nodes = DIV_ROUND_UP(zone->spanned_pages, BM_BITS_PER_BLOCK);
  1045 + rtree += DIV_ROUND_UP(rtree * sizeof(struct rtree_node),
  1046 + LINKED_PAGE_DATA_SIZE);
  1047 + while (nodes > 1) {
  1048 + nodes = DIV_ROUND_UP(nodes, BM_ENTRIES_PER_LEVEL);
  1049 + rtree += nodes;
  1050 + }
  1051 +
  1052 + return 2 * rtree;
825 1053 }
826 1054  
827 1055 #ifdef CONFIG_HIGHMEM
828 1056  
829 1057  
830 1058  
... ... @@ -1094,23 +1322,35 @@
1094 1322  
1095 1323 void swsusp_free(void)
1096 1324 {
1097   - struct zone *zone;
1098   - unsigned long pfn, max_zone_pfn;
  1325 + unsigned long fb_pfn, fr_pfn;
1099 1326  
1100   - for_each_populated_zone(zone) {
1101   - max_zone_pfn = zone_end_pfn(zone);
1102   - for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1103   - if (pfn_valid(pfn)) {
1104   - struct page *page = pfn_to_page(pfn);
  1327 + memory_bm_position_reset(forbidden_pages_map);
  1328 + memory_bm_position_reset(free_pages_map);
1105 1329  
1106   - if (swsusp_page_is_forbidden(page) &&
1107   - swsusp_page_is_free(page)) {
1108   - swsusp_unset_page_forbidden(page);
1109   - swsusp_unset_page_free(page);
1110   - __free_page(page);
1111   - }
1112   - }
  1330 +loop:
  1331 + fr_pfn = memory_bm_next_pfn(free_pages_map);
  1332 + fb_pfn = memory_bm_next_pfn(forbidden_pages_map);
  1333 +
  1334 + /*
  1335 + * Find the next bit set in both bitmaps. This is guaranteed to
  1336 + * terminate when fb_pfn == fr_pfn == BM_END_OF_MAP.
  1337 + */
  1338 + do {
  1339 + if (fb_pfn < fr_pfn)
  1340 + fb_pfn = memory_bm_next_pfn(forbidden_pages_map);
  1341 + if (fr_pfn < fb_pfn)
  1342 + fr_pfn = memory_bm_next_pfn(free_pages_map);
  1343 + } while (fb_pfn != fr_pfn);
  1344 +
  1345 + if (fr_pfn != BM_END_OF_MAP && pfn_valid(fr_pfn)) {
  1346 + struct page *page = pfn_to_page(fr_pfn);
  1347 +
  1348 + memory_bm_clear_current(forbidden_pages_map);
  1349 + memory_bm_clear_current(free_pages_map);
  1350 + __free_page(page);
  1351 + goto loop;
1113 1352 }
  1353 +
1114 1354 nr_copy_pages = 0;
1115 1355 nr_meta_pages = 0;
1116 1356 restore_pblist = NULL;