Commit 76395d37611e8758dd8bd6c6f5bfcb31e1dc48f9

Authored by Andrew Morton
Committed by Linus Torvalds
1 parent d63b70902b

[PATCH] shrink_all_memory(): fix lru_pages handling

At the end of shrink_all_memory() we forget to recalculate lru_pages: it can
be zero.

Fix that up, and add a helper function for this operation too.

Also, recalculate lru_pages each time around the inner loop to get the
balancing correct.

Cc: "Rafael J. Wysocki" <rjw@sisk.pl>
Cc: Pavel Machek <pavel@ucw.cz>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>

Showing 1 changed file with 16 additions and 17 deletions Side-by-side Diff

... ... @@ -1406,6 +1406,16 @@
1406 1406 return ret;
1407 1407 }
1408 1408  
  1409 +static unsigned long count_lru_pages(void)
  1410 +{
  1411 + struct zone *zone;
  1412 + unsigned long ret = 0;
  1413 +
  1414 + for_each_zone(zone)
  1415 + ret += zone->nr_active + zone->nr_inactive;
  1416 + return ret;
  1417 +}
  1418 +
1409 1419 /*
1410 1420 * Try to free `nr_pages' of memory, system-wide, and return the number of
1411 1421 * freed pages.
... ... @@ -1420,7 +1430,6 @@
1420 1430 unsigned long ret = 0;
1421 1431 int pass;
1422 1432 struct reclaim_state reclaim_state;
1423   - struct zone *zone;
1424 1433 struct scan_control sc = {
1425 1434 .gfp_mask = GFP_KERNEL,
1426 1435 .may_swap = 0,
... ... @@ -1431,10 +1440,7 @@
1431 1440  
1432 1441 current->reclaim_state = &reclaim_state;
1433 1442  
1434   - lru_pages = 0;
1435   - for_each_zone(zone)
1436   - lru_pages += zone->nr_active + zone->nr_inactive;
1437   -
  1443 + lru_pages = count_lru_pages();
1438 1444 nr_slab = global_page_state(NR_SLAB_RECLAIMABLE);
1439 1445 /* If slab caches are huge, it's better to hit them first */
1440 1446 while (nr_slab >= lru_pages) {
... ... @@ -1461,13 +1467,6 @@
1461 1467 for (pass = 0; pass < 5; pass++) {
1462 1468 int prio;
1463 1469  
1464   - /* Needed for shrinking slab caches later on */
1465   - if (!lru_pages)
1466   - for_each_zone(zone) {
1467   - lru_pages += zone->nr_active;
1468   - lru_pages += zone->nr_inactive;
1469   - }
1470   -
1471 1470 /* Force reclaiming mapped pages in the passes #3 and #4 */
1472 1471 if (pass > 2) {
1473 1472 sc.may_swap = 1;
... ... @@ -1483,7 +1482,8 @@
1483 1482 goto out;
1484 1483  
1485 1484 reclaim_state.reclaimed_slab = 0;
1486   - shrink_slab(sc.nr_scanned, sc.gfp_mask, lru_pages);
  1485 + shrink_slab(sc.nr_scanned, sc.gfp_mask,
  1486 + count_lru_pages());
1487 1487 ret += reclaim_state.reclaimed_slab;
1488 1488 if (ret >= nr_pages)
1489 1489 goto out;
1490 1490  
1491 1491  
1492 1492  
... ... @@ -1491,20 +1491,19 @@
1491 1491 if (sc.nr_scanned && prio < DEF_PRIORITY - 2)
1492 1492 congestion_wait(WRITE, HZ / 10);
1493 1493 }
1494   -
1495   - lru_pages = 0;
1496 1494 }
1497 1495  
1498 1496 /*
1499 1497 * If ret = 0, we could not shrink LRUs, but there may be something
1500 1498 * in slab caches
1501 1499 */
1502   - if (!ret)
  1500 + if (!ret) {
1503 1501 do {
1504 1502 reclaim_state.reclaimed_slab = 0;
1505   - shrink_slab(nr_pages, sc.gfp_mask, lru_pages);
  1503 + shrink_slab(nr_pages, sc.gfp_mask, count_lru_pages());
1506 1504 ret += reclaim_state.reclaimed_slab;
1507 1505 } while (ret < nr_pages && reclaim_state.reclaimed_slab > 0);
  1506 + }
1508 1507  
1509 1508 out:
1510 1509 current->reclaim_state = NULL;