Commit ee0ea59cf9ea95369d686bdc4b3d8c027e2b99cd

Authored by Hugh Dickins
Committed by Linus Torvalds
1 parent e850dcf530

ksm: reorganize ksm_check_stable_tree

Memory hotremove's ksm_check_stable_tree() is pitifully inefficient
(restarting whenever it finds a stale node to remove), but rearrange so
that at least it does not needlessly restart from nid 0 each time.  And
add a couple of comments: here is why we keep pfn instead of page.

Signed-off-by: Hugh Dickins <hughd@google.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Petr Holasek <pholasek@redhat.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Izik Eidus <izik.eidus@ravellosystems.com>
Cc: Gerald Schaefer <gerald.schaefer@de.ibm.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

Showing 1 changed file with 22 additions and 16 deletions Side-by-side Diff

... ... @@ -1830,31 +1830,36 @@
1830 1830 #endif /* CONFIG_MIGRATION */
1831 1831  
1832 1832 #ifdef CONFIG_MEMORY_HOTREMOVE
1833   -static struct stable_node *ksm_check_stable_tree(unsigned long start_pfn,
1834   - unsigned long end_pfn)
  1833 +static void ksm_check_stable_tree(unsigned long start_pfn,
  1834 + unsigned long end_pfn)
1835 1835 {
  1836 + struct stable_node *stable_node;
1836 1837 struct rb_node *node;
1837 1838 int nid;
1838 1839  
1839   - for (nid = 0; nid < nr_node_ids; nid++)
1840   - for (node = rb_first(&root_stable_tree[nid]); node;
1841   - node = rb_next(node)) {
1842   - struct stable_node *stable_node;
1843   -
  1840 + for (nid = 0; nid < nr_node_ids; nid++) {
  1841 + node = rb_first(&root_stable_tree[nid]);
  1842 + while (node) {
1844 1843 stable_node = rb_entry(node, struct stable_node, node);
1845 1844 if (stable_node->kpfn >= start_pfn &&
1846   - stable_node->kpfn < end_pfn)
1847   - return stable_node;
  1845 + stable_node->kpfn < end_pfn) {
  1846 + /*
  1847 + * Don't get_ksm_page, page has already gone:
  1848 + * which is why we keep kpfn instead of page*
  1849 + */
  1850 + remove_node_from_stable_tree(stable_node);
  1851 + node = rb_first(&root_stable_tree[nid]);
  1852 + } else
  1853 + node = rb_next(node);
  1854 + cond_resched();
1848 1855 }
1849   -
1850   - return NULL;
  1856 + }
1851 1857 }
1852 1858  
1853 1859 static int ksm_memory_callback(struct notifier_block *self,
1854 1860 unsigned long action, void *arg)
1855 1861 {
1856 1862 struct memory_notify *mn = arg;
1857   - struct stable_node *stable_node;
1858 1863  
1859 1864 switch (action) {
1860 1865 case MEM_GOING_OFFLINE:
1861 1866  
... ... @@ -1874,11 +1879,12 @@
1874 1879 /*
1875 1880 * Most of the work is done by page migration; but there might
1876 1881 * be a few stable_nodes left over, still pointing to struct
1877   - * pages which have been offlined: prune those from the tree.
  1882 + * pages which have been offlined: prune those from the tree,
  1883 + * otherwise get_ksm_page() might later try to access a
  1884 + * non-existent struct page.
1878 1885 */
1879   - while ((stable_node = ksm_check_stable_tree(mn->start_pfn,
1880   - mn->start_pfn + mn->nr_pages)) != NULL)
1881   - remove_node_from_stable_tree(stable_node);
  1886 + ksm_check_stable_tree(mn->start_pfn,
  1887 + mn->start_pfn + mn->nr_pages);
1882 1888 /* fallthrough */
1883 1889  
1884 1890 case MEM_CANCEL_OFFLINE: