Commit 17766dde364813568e4f876517c72bab70838646
Committed by
Linus Torvalds
1 parent
c02925540c
Exists in
smarc-imx_3.14.28_1.0.0_ga
and in
1 other branch
mm, thp: count thp_fault_fallback anytime thp fault fails
Currently, thp_fault_fallback in vmstat only gets incremented if a hugepage allocation fails. If current's memcg hits its limit or the page fault handler returns an error, it is incorrectly accounted as a successful thp_fault_alloc. Count thp_fault_fallback anytime the page fault handler falls back to using regular pages and only count thp_fault_alloc when a hugepage has actually been faulted. Signed-off-by: David Rientjes <rientjes@google.com> Cc: Mel Gorman <mgorman@suse.de> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Showing 1 changed file with 7 additions and 3 deletions Side-by-side Diff
mm/huge_memory.c
... | ... | @@ -820,17 +820,19 @@ |
820 | 820 | count_vm_event(THP_FAULT_FALLBACK); |
821 | 821 | return VM_FAULT_FALLBACK; |
822 | 822 | } |
823 | - count_vm_event(THP_FAULT_ALLOC); | |
824 | 823 | if (unlikely(mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))) { |
825 | 824 | put_page(page); |
825 | + count_vm_event(THP_FAULT_FALLBACK); | |
826 | 826 | return VM_FAULT_FALLBACK; |
827 | 827 | } |
828 | 828 | if (unlikely(__do_huge_pmd_anonymous_page(mm, vma, haddr, pmd, page))) { |
829 | 829 | mem_cgroup_uncharge_page(page); |
830 | 830 | put_page(page); |
831 | + count_vm_event(THP_FAULT_FALLBACK); | |
831 | 832 | return VM_FAULT_FALLBACK; |
832 | 833 | } |
833 | 834 | |
835 | + count_vm_event(THP_FAULT_ALLOC); | |
834 | 836 | return 0; |
835 | 837 | } |
836 | 838 | |
... | ... | @@ -1143,7 +1145,6 @@ |
1143 | 1145 | new_page = NULL; |
1144 | 1146 | |
1145 | 1147 | if (unlikely(!new_page)) { |
1146 | - count_vm_event(THP_FAULT_FALLBACK); | |
1147 | 1148 | if (is_huge_zero_pmd(orig_pmd)) { |
1148 | 1149 | ret = do_huge_pmd_wp_zero_page_fallback(mm, vma, |
1149 | 1150 | address, pmd, orig_pmd, haddr); |
1150 | 1151 | |
... | ... | @@ -1154,9 +1155,9 @@ |
1154 | 1155 | split_huge_page(page); |
1155 | 1156 | put_page(page); |
1156 | 1157 | } |
1158 | + count_vm_event(THP_FAULT_FALLBACK); | |
1157 | 1159 | goto out; |
1158 | 1160 | } |
1159 | - count_vm_event(THP_FAULT_ALLOC); | |
1160 | 1161 | |
1161 | 1162 | if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) { |
1162 | 1163 | put_page(new_page); |
1163 | 1164 | |
... | ... | @@ -1164,9 +1165,12 @@ |
1164 | 1165 | split_huge_page(page); |
1165 | 1166 | put_page(page); |
1166 | 1167 | } |
1168 | + count_vm_event(THP_FAULT_FALLBACK); | |
1167 | 1169 | ret |= VM_FAULT_OOM; |
1168 | 1170 | goto out; |
1169 | 1171 | } |
1172 | + | |
1173 | + count_vm_event(THP_FAULT_ALLOC); | |
1170 | 1174 | |
1171 | 1175 | if (is_huge_zero_pmd(orig_pmd)) |
1172 | 1176 | clear_huge_page(new_page, haddr, HPAGE_PMD_NR); |