Commit 4b02108ac1b3354a22b0d83c684797692efdc395

Authored by KOSAKI Motohiro
Committed by Linus Torvalds
1 parent c6a7f5728a

mm: oom analysis: add shmem vmstat

Recently we encountered OOM problems due to memory use of the GEM cache.
Generally a large amuont of Shmem/Tmpfs pages tend to create a memory
shortage problem.

We often use the following calculation to determine the amount of shmem
pages:

shmem = NR_ACTIVE_ANON + NR_INACTIVE_ANON - NR_ANON_PAGES

however the expression does not consider isolated and mlocked pages.

This patch adds explicit accounting for pages used by shmem and tmpfs.

Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Acked-by: Rik van Riel <riel@redhat.com>
Reviewed-by: Christoph Lameter <cl@linux-foundation.org>
Acked-by: Wu Fengguang <fengguang.wu@intel.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Hugh Dickins <hugh.dickins@tiscali.co.uk>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

Showing 7 changed files with 18 additions and 3 deletions Side-by-side Diff

... ... @@ -85,6 +85,7 @@
85 85 "Node %d FilePages: %8lu kB\n"
86 86 "Node %d Mapped: %8lu kB\n"
87 87 "Node %d AnonPages: %8lu kB\n"
  88 + "Node %d Shmem: %8lu kB\n"
88 89 "Node %d KernelStack: %8lu kB\n"
89 90 "Node %d PageTables: %8lu kB\n"
90 91 "Node %d NFS_Unstable: %8lu kB\n"
... ... @@ -117,6 +118,7 @@
117 118 nid, K(node_page_state(nid, NR_FILE_PAGES)),
118 119 nid, K(node_page_state(nid, NR_FILE_MAPPED)),
119 120 nid, K(node_page_state(nid, NR_ANON_PAGES)),
  121 + nid, K(node_page_state(nid, NR_SHMEM)),
120 122 nid, node_page_state(nid, NR_KERNEL_STACK) *
121 123 THREAD_SIZE / 1024,
122 124 nid, K(node_page_state(nid, NR_PAGETABLE)),
... ... @@ -81,6 +81,7 @@
81 81 "Writeback: %8lu kB\n"
82 82 "AnonPages: %8lu kB\n"
83 83 "Mapped: %8lu kB\n"
  84 + "Shmem: %8lu kB\n"
84 85 "Slab: %8lu kB\n"
85 86 "SReclaimable: %8lu kB\n"
86 87 "SUnreclaim: %8lu kB\n"
... ... @@ -125,6 +126,7 @@
125 126 K(global_page_state(NR_WRITEBACK)),
126 127 K(global_page_state(NR_ANON_PAGES)),
127 128 K(global_page_state(NR_FILE_MAPPED)),
  129 + K(global_page_state(NR_SHMEM)),
128 130 K(global_page_state(NR_SLAB_RECLAIMABLE) +
129 131 global_page_state(NR_SLAB_UNRECLAIMABLE)),
130 132 K(global_page_state(NR_SLAB_RECLAIMABLE)),
include/linux/mmzone.h
... ... @@ -100,6 +100,7 @@
100 100 NR_BOUNCE,
101 101 NR_VMSCAN_WRITE,
102 102 NR_WRITEBACK_TEMP, /* Writeback using temporary buffers */
  103 + NR_SHMEM, /* shmem pages (included tmpfs/GEM pages) */
103 104 #ifdef CONFIG_NUMA
104 105 NUMA_HIT, /* allocated in intended node */
105 106 NUMA_MISS, /* allocated in non intended node */
... ... @@ -119,6 +119,8 @@
119 119 page->mapping = NULL;
120 120 mapping->nrpages--;
121 121 __dec_zone_page_state(page, NR_FILE_PAGES);
  122 + if (PageSwapBacked(page))
  123 + __dec_zone_page_state(page, NR_SHMEM);
122 124 BUG_ON(page_mapped(page));
123 125  
124 126 /*
... ... @@ -431,6 +433,8 @@
431 433 if (likely(!error)) {
432 434 mapping->nrpages++;
433 435 __inc_zone_page_state(page, NR_FILE_PAGES);
  436 + if (PageSwapBacked(page))
  437 + __inc_zone_page_state(page, NR_SHMEM);
434 438 spin_unlock_irq(&mapping->tree_lock);
435 439 } else {
436 440 page->mapping = NULL;
... ... @@ -312,7 +312,10 @@
312 312 */
313 313 __dec_zone_page_state(page, NR_FILE_PAGES);
314 314 __inc_zone_page_state(newpage, NR_FILE_PAGES);
315   -
  315 + if (PageSwapBacked(page)) {
  316 + __dec_zone_page_state(page, NR_SHMEM);
  317 + __inc_zone_page_state(newpage, NR_SHMEM);
  318 + }
316 319 spin_unlock_irq(&mapping->tree_lock);
317 320  
318 321 return 0;
... ... @@ -2139,7 +2139,7 @@
2139 2139 " unevictable:%lu"
2140 2140 " dirty:%lu writeback:%lu unstable:%lu buffer:%lu\n"
2141 2141 " free:%lu slab_reclaimable:%lu slab_unreclaimable:%lu\n"
2142   - " mapped:%lu pagetables:%lu bounce:%lu\n",
  2142 + " mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n",
2143 2143 global_page_state(NR_ACTIVE_ANON),
2144 2144 global_page_state(NR_ACTIVE_FILE),
2145 2145 global_page_state(NR_INACTIVE_ANON),
... ... @@ -2153,6 +2153,7 @@
2153 2153 global_page_state(NR_SLAB_RECLAIMABLE),
2154 2154 global_page_state(NR_SLAB_UNRECLAIMABLE),
2155 2155 global_page_state(NR_FILE_MAPPED),
  2156 + global_page_state(NR_SHMEM),
2156 2157 global_page_state(NR_PAGETABLE),
2157 2158 global_page_state(NR_BOUNCE));
2158 2159  
... ... @@ -2175,6 +2176,7 @@
2175 2176 " dirty:%lukB"
2176 2177 " writeback:%lukB"
2177 2178 " mapped:%lukB"
  2179 + " shmem:%lukB"
2178 2180 " slab_reclaimable:%lukB"
2179 2181 " slab_unreclaimable:%lukB"
2180 2182 " kernel_stack:%lukB"
... ... @@ -2200,6 +2202,7 @@
2200 2202 K(zone_page_state(zone, NR_FILE_DIRTY)),
2201 2203 K(zone_page_state(zone, NR_WRITEBACK)),
2202 2204 K(zone_page_state(zone, NR_FILE_MAPPED)),
  2205 + K(zone_page_state(zone, NR_SHMEM)),
2203 2206 K(zone_page_state(zone, NR_SLAB_RECLAIMABLE)),
2204 2207 K(zone_page_state(zone, NR_SLAB_UNRECLAIMABLE)),
2205 2208 zone_page_state(zone, NR_KERNEL_STACK) *
... ... @@ -644,7 +644,7 @@
644 644 "nr_bounce",
645 645 "nr_vmscan_write",
646 646 "nr_writeback_temp",
647   -
  647 + "nr_shmem",
648 648 #ifdef CONFIG_NUMA
649 649 "numa_hit",
650 650 "numa_miss",