Commit 1c5e9c27cbd966c7f0038698d5dcd5ada3574f47

Authored by Mel Gorman
Committed by Linus Torvalds
1 parent 1c30e0177e

mm: numa: limit scope of lock for NUMA migrate rate limiting

NUMA migrate rate limiting protects a migration counter and window using
a lock but in some cases this can be a contended lock.  It is not
critical that the number of pages be perfect, lost updates are
acceptable.  Reduce the importance of this lock.

Signed-off-by: Mel Gorman <mgorman@suse.de>
Reviewed-by: Rik van Riel <riel@redhat.com>
Cc: Alex Thorlton <athorlton@sgi.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

Showing 2 changed files with 13 additions and 13 deletions Side-by-side Diff

include/linux/mmzone.h
... ... @@ -764,10 +764,7 @@
764 764 int kswapd_max_order;
765 765 enum zone_type classzone_idx;
766 766 #ifdef CONFIG_NUMA_BALANCING
767   - /*
768   - * Lock serializing the per destination node AutoNUMA memory
769   - * migration rate limiting data.
770   - */
  767 + /* Lock serializing the migrate rate limiting window */
771 768 spinlock_t numabalancing_migrate_lock;
772 769  
773 770 /* Rate limiting time interval */
... ... @@ -1602,26 +1602,29 @@
1602 1602 static bool numamigrate_update_ratelimit(pg_data_t *pgdat,
1603 1603 unsigned long nr_pages)
1604 1604 {
1605   - bool rate_limited = false;
1606   -
1607 1605 /*
1608 1606 * Rate-limit the amount of data that is being migrated to a node.
1609 1607 * Optimal placement is no good if the memory bus is saturated and
1610 1608 * all the time is being spent migrating!
1611 1609 */
1612   - spin_lock(&pgdat->numabalancing_migrate_lock);
1613 1610 if (time_after(jiffies, pgdat->numabalancing_migrate_next_window)) {
  1611 + spin_lock(&pgdat->numabalancing_migrate_lock);
1614 1612 pgdat->numabalancing_migrate_nr_pages = 0;
1615 1613 pgdat->numabalancing_migrate_next_window = jiffies +
1616 1614 msecs_to_jiffies(migrate_interval_millisecs);
  1615 + spin_unlock(&pgdat->numabalancing_migrate_lock);
1617 1616 }
1618 1617 if (pgdat->numabalancing_migrate_nr_pages > ratelimit_pages)
1619   - rate_limited = true;
1620   - else
1621   - pgdat->numabalancing_migrate_nr_pages += nr_pages;
1622   - spin_unlock(&pgdat->numabalancing_migrate_lock);
1623   -
1624   - return rate_limited;
  1618 + return true;
  1619 +
  1620 + /*
  1621 + * This is an unlocked non-atomic update so errors are possible.
  1622 + * The consequences are failing to migrate when we potentiall should
  1623 + * have which is not severe enough to warrant locking. If it is ever
  1624 + * a problem, it can be converted to a per-cpu counter.
  1625 + */
  1626 + pgdat->numabalancing_migrate_nr_pages += nr_pages;
  1627 + return false;
1625 1628 }
1626 1629  
1627 1630 static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)