Commit 2c888cfbc1b45508a44763d85ba2e8ac43faff5f

Authored by Rik van Riel
Committed by Linus Torvalds
1 parent 97562cd243

thp: fix anon memory statistics with transparent hugepages

Count each transparent hugepage as HPAGE_PMD_NR pages in the LRU
statistics, so the Active(anon) and Inactive(anon) statistics in
/proc/meminfo are correct.

Signed-off-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Andrea Arcangeli <aarcange@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

Showing 5 changed files with 30 additions and 9 deletions Side-by-side Diff

include/linux/huge_mm.h
... ... @@ -117,10 +117,18 @@
117 117 return;
118 118 __vma_adjust_trans_huge(vma, start, end, adjust_next);
119 119 }
  120 +static inline int hpage_nr_pages(struct page *page)
  121 +{
  122 + if (unlikely(PageTransHuge(page)))
  123 + return HPAGE_PMD_NR;
  124 + return 1;
  125 +}
120 126 #else /* CONFIG_TRANSPARENT_HUGEPAGE */
121 127 #define HPAGE_PMD_SHIFT ({ BUG(); 0; })
122 128 #define HPAGE_PMD_MASK ({ BUG(); 0; })
123 129 #define HPAGE_PMD_SIZE ({ BUG(); 0; })
  130 +
  131 +#define hpage_nr_pages(x) 1
124 132  
125 133 #define transparent_hugepage_enabled(__vma) 0
126 134  
include/linux/mm_inline.h
1 1 #ifndef LINUX_MM_INLINE_H
2 2 #define LINUX_MM_INLINE_H
3 3  
  4 +#include <linux/huge_mm.h>
  5 +
4 6 /**
5 7 * page_is_file_cache - should the page be on a file LRU or anon LRU?
6 8 * @page: the page to test
... ... @@ -24,7 +26,7 @@
24 26 struct list_head *head)
25 27 {
26 28 list_add(&page->lru, head);
27   - __inc_zone_state(zone, NR_LRU_BASE + l);
  29 + __mod_zone_page_state(zone, NR_LRU_BASE + l, hpage_nr_pages(page));
28 30 mem_cgroup_add_lru_list(page, l);
29 31 }
30 32  
... ... @@ -38,7 +40,7 @@
38 40 del_page_from_lru_list(struct zone *zone, struct page *page, enum lru_list l)
39 41 {
40 42 list_del(&page->lru);
41   - __dec_zone_state(zone, NR_LRU_BASE + l);
  43 + __mod_zone_page_state(zone, NR_LRU_BASE + l, -hpage_nr_pages(page));
42 44 mem_cgroup_del_lru_list(page, l);
43 45 }
44 46  
... ... @@ -73,7 +75,7 @@
73 75 l += LRU_ACTIVE;
74 76 }
75 77 }
76   - __dec_zone_state(zone, NR_LRU_BASE + l);
  78 + __mod_zone_page_state(zone, NR_LRU_BASE + l, -hpage_nr_pages(page));
77 79 mem_cgroup_del_lru_list(page, l);
78 80 }
79 81  
... ... @@ -1143,6 +1143,7 @@
1143 1143 int i;
1144 1144 unsigned long head_index = page->index;
1145 1145 struct zone *zone = page_zone(page);
  1146 + int zonestat;
1146 1147  
1147 1148 /* prevent PageLRU to go away from under us, and freeze lru stats */
1148 1149 spin_lock_irq(&zone->lru_lock);
... ... @@ -1206,6 +1207,15 @@
1206 1207  
1207 1208 __dec_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES);
1208 1209 __mod_zone_page_state(zone, NR_ANON_PAGES, HPAGE_PMD_NR);
  1210 +
  1211 + /*
  1212 + * A hugepage counts for HPAGE_PMD_NR pages on the LRU statistics,
  1213 + * so adjust those appropriately if this page is on the LRU.
  1214 + */
  1215 + if (PageLRU(page)) {
  1216 + zonestat = NR_LRU_BASE + page_lru(page);
  1217 + __mod_zone_page_state(zone, zonestat, -(HPAGE_PMD_NR-1));
  1218 + }
1209 1219  
1210 1220 ClearPageCompound(page);
1211 1221 compound_unlock(page);
... ... @@ -1091,7 +1091,7 @@
1091 1091 case 0:
1092 1092 list_move(&page->lru, dst);
1093 1093 mem_cgroup_del_lru(page);
1094   - nr_taken++;
  1094 + nr_taken += hpage_nr_pages(page);
1095 1095 break;
1096 1096 case -EBUSY:
1097 1097 /* we don't affect global LRU but rotate in our LRU */
... ... @@ -1045,7 +1045,7 @@
1045 1045 case 0:
1046 1046 list_move(&page->lru, dst);
1047 1047 mem_cgroup_del_lru(page);
1048   - nr_taken++;
  1048 + nr_taken += hpage_nr_pages(page);
1049 1049 break;
1050 1050  
1051 1051 case -EBUSY:
... ... @@ -1103,7 +1103,7 @@
1103 1103 if (__isolate_lru_page(cursor_page, mode, file) == 0) {
1104 1104 list_move(&cursor_page->lru, dst);
1105 1105 mem_cgroup_del_lru(cursor_page);
1106   - nr_taken++;
  1106 + nr_taken += hpage_nr_pages(page);
1107 1107 nr_lumpy_taken++;
1108 1108 if (PageDirty(cursor_page))
1109 1109 nr_lumpy_dirty++;
1110 1110  
1111 1111  
... ... @@ -1158,14 +1158,15 @@
1158 1158 struct page *page;
1159 1159  
1160 1160 list_for_each_entry(page, page_list, lru) {
  1161 + int numpages = hpage_nr_pages(page);
1161 1162 lru = page_lru_base_type(page);
1162 1163 if (PageActive(page)) {
1163 1164 lru += LRU_ACTIVE;
1164 1165 ClearPageActive(page);
1165   - nr_active++;
  1166 + nr_active += numpages;
1166 1167 }
1167 1168 if (count)
1168   - count[lru]++;
  1169 + count[lru] += numpages;
1169 1170 }
1170 1171  
1171 1172 return nr_active;
... ... @@ -1483,7 +1484,7 @@
1483 1484  
1484 1485 list_move(&page->lru, &zone->lru[lru].list);
1485 1486 mem_cgroup_add_lru_list(page, lru);
1486   - pgmoved++;
  1487 + pgmoved += hpage_nr_pages(page);
1487 1488  
1488 1489 if (!pagevec_add(&pvec, page) || list_empty(list)) {
1489 1490 spin_unlock_irq(&zone->lru_lock);