Commit 347ce434d57da80fd5809c0c836f206a50999c26
Committed by
Linus Torvalds
1 parent
65ba55f500
Exists in
master
and in
4 other branches
[PATCH] zoned vm counters: conversion of nr_pagecache to per zone counter
Currently a single atomic variable is used to establish the size of the page cache in the whole machine. The zoned VM counters have the same method of implementation as the nr_pagecache code but also allow the determination of the pagecache size per zone. Remove the special implementation for nr_pagecache and make it a zoned counter named NR_FILE_PAGES. Updates of the page cache counters are always performed with interrupts off. We can therefore use the __ variant here. Signed-off-by: Christoph Lameter <clameter@sgi.com> Cc: Trond Myklebust <trond.myklebust@fys.uio.no> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Showing 13 changed files with 16 additions and 67 deletions Side-by-side Diff
arch/s390/appldata/appldata_mem.c
| ... | ... | @@ -130,7 +130,8 @@ |
| 130 | 130 | mem_data->totalhigh = P2K(val.totalhigh); |
| 131 | 131 | mem_data->freehigh = P2K(val.freehigh); |
| 132 | 132 | mem_data->bufferram = P2K(val.bufferram); |
| 133 | - mem_data->cached = P2K(atomic_read(&nr_pagecache) - val.bufferram); | |
| 133 | + mem_data->cached = P2K(global_page_state(NR_FILE_PAGES) | |
| 134 | + - val.bufferram); | |
| 134 | 135 | |
| 135 | 136 | si_swapinfo(&val); |
| 136 | 137 | mem_data->totalswap = P2K(val.totalswap); |
arch/sparc/kernel/sys_sunos.c
| ... | ... | @@ -196,7 +196,7 @@ |
| 196 | 196 | * simple, it hopefully works in most obvious cases.. Easy to |
| 197 | 197 | * fool it, but this should catch most mistakes. |
| 198 | 198 | */ |
| 199 | - freepages = get_page_cache_size(); | |
| 199 | + freepages = global_page_state(NR_FILE_PAGES); | |
| 200 | 200 | freepages >>= 1; |
| 201 | 201 | freepages += nr_free_pages(); |
| 202 | 202 | freepages += nr_swap_pages; |
arch/sparc64/kernel/sys_sunos32.c
| ... | ... | @@ -155,7 +155,7 @@ |
| 155 | 155 | * simple, it hopefully works in most obvious cases.. Easy to |
| 156 | 156 | * fool it, but this should catch most mistakes. |
| 157 | 157 | */ |
| 158 | - freepages = get_page_cache_size(); | |
| 158 | + freepages = global_page_state(NR_FILE_PAGES); | |
| 159 | 159 | freepages >>= 1; |
| 160 | 160 | freepages += nr_free_pages(); |
| 161 | 161 | freepages += nr_swap_pages; |
drivers/base/node.c
| ... | ... | @@ -69,6 +69,7 @@ |
| 69 | 69 | "Node %d LowFree: %8lu kB\n" |
| 70 | 70 | "Node %d Dirty: %8lu kB\n" |
| 71 | 71 | "Node %d Writeback: %8lu kB\n" |
| 72 | + "Node %d FilePages: %8lu kB\n" | |
| 72 | 73 | "Node %d Mapped: %8lu kB\n" |
| 73 | 74 | "Node %d Slab: %8lu kB\n", |
| 74 | 75 | nid, K(i.totalram), |
| ... | ... | @@ -82,6 +83,7 @@ |
| 82 | 83 | nid, K(i.freeram - i.freehigh), |
| 83 | 84 | nid, K(ps.nr_dirty), |
| 84 | 85 | nid, K(ps.nr_writeback), |
| 86 | + nid, K(node_page_state(nid, NR_FILE_PAGES)), | |
| 85 | 87 | nid, K(node_page_state(nid, NR_FILE_MAPPED)), |
| 86 | 88 | nid, K(ps.nr_slab)); |
| 87 | 89 | n += hugetlb_report_node_meminfo(nid, buf + n); |
fs/proc/proc_misc.c
| ... | ... | @@ -142,7 +142,8 @@ |
| 142 | 142 | allowed = ((totalram_pages - hugetlb_total_pages()) |
| 143 | 143 | * sysctl_overcommit_ratio / 100) + total_swap_pages; |
| 144 | 144 | |
| 145 | - cached = get_page_cache_size() - total_swapcache_pages - i.bufferram; | |
| 145 | + cached = global_page_state(NR_FILE_PAGES) - | |
| 146 | + total_swapcache_pages - i.bufferram; | |
| 146 | 147 | if (cached < 0) |
| 147 | 148 | cached = 0; |
| 148 | 149 |
include/linux/mmzone.h
include/linux/pagemap.h
| ... | ... | @@ -113,51 +113,6 @@ |
| 113 | 113 | extern void remove_from_page_cache(struct page *page); |
| 114 | 114 | extern void __remove_from_page_cache(struct page *page); |
| 115 | 115 | |
| 116 | -extern atomic_t nr_pagecache; | |
| 117 | - | |
| 118 | -#ifdef CONFIG_SMP | |
| 119 | - | |
| 120 | -#define PAGECACHE_ACCT_THRESHOLD max(16, NR_CPUS * 2) | |
| 121 | -DECLARE_PER_CPU(long, nr_pagecache_local); | |
| 122 | - | |
| 123 | -/* | |
| 124 | - * pagecache_acct implements approximate accounting for pagecache. | |
| 125 | - * vm_enough_memory() do not need high accuracy. Writers will keep | |
| 126 | - * an offset in their per-cpu arena and will spill that into the | |
| 127 | - * global count whenever the absolute value of the local count | |
| 128 | - * exceeds the counter's threshold. | |
| 129 | - * | |
| 130 | - * MUST be protected from preemption. | |
| 131 | - * current protection is mapping->page_lock. | |
| 132 | - */ | |
| 133 | -static inline void pagecache_acct(int count) | |
| 134 | -{ | |
| 135 | - long *local; | |
| 136 | - | |
| 137 | - local = &__get_cpu_var(nr_pagecache_local); | |
| 138 | - *local += count; | |
| 139 | - if (*local > PAGECACHE_ACCT_THRESHOLD || *local < -PAGECACHE_ACCT_THRESHOLD) { | |
| 140 | - atomic_add(*local, &nr_pagecache); | |
| 141 | - *local = 0; | |
| 142 | - } | |
| 143 | -} | |
| 144 | - | |
| 145 | -#else | |
| 146 | - | |
| 147 | -static inline void pagecache_acct(int count) | |
| 148 | -{ | |
| 149 | - atomic_add(count, &nr_pagecache); | |
| 150 | -} | |
| 151 | -#endif | |
| 152 | - | |
| 153 | -static inline unsigned long get_page_cache_size(void) | |
| 154 | -{ | |
| 155 | - int ret = atomic_read(&nr_pagecache); | |
| 156 | - if (unlikely(ret < 0)) | |
| 157 | - ret = 0; | |
| 158 | - return ret; | |
| 159 | -} | |
| 160 | - | |
| 161 | 116 | /* |
| 162 | 117 | * Return byte-offset into filesystem object for page. |
| 163 | 118 | */ |
mm/filemap.c
| ... | ... | @@ -120,7 +120,7 @@ |
| 120 | 120 | radix_tree_delete(&mapping->page_tree, page->index); |
| 121 | 121 | page->mapping = NULL; |
| 122 | 122 | mapping->nrpages--; |
| 123 | - pagecache_acct(-1); | |
| 123 | + __dec_zone_page_state(page, NR_FILE_PAGES); | |
| 124 | 124 | } |
| 125 | 125 | |
| 126 | 126 | void remove_from_page_cache(struct page *page) |
| ... | ... | @@ -449,7 +449,7 @@ |
| 449 | 449 | page->mapping = mapping; |
| 450 | 450 | page->index = offset; |
| 451 | 451 | mapping->nrpages++; |
| 452 | - pagecache_acct(1); | |
| 452 | + __inc_zone_page_state(page, NR_FILE_PAGES); | |
| 453 | 453 | } |
| 454 | 454 | write_unlock_irq(&mapping->tree_lock); |
| 455 | 455 | radix_tree_preload_end(); |
mm/mmap.c
mm/nommu.c
mm/page_alloc.c
| ... | ... | @@ -2124,16 +2124,11 @@ |
| 2124 | 2124 | unsigned long action, void *hcpu) |
| 2125 | 2125 | { |
| 2126 | 2126 | int cpu = (unsigned long)hcpu; |
| 2127 | - long *count; | |
| 2128 | 2127 | unsigned long *src, *dest; |
| 2129 | 2128 | |
| 2130 | 2129 | if (action == CPU_DEAD) { |
| 2131 | 2130 | int i; |
| 2132 | 2131 | |
| 2133 | - /* Drain local pagecache count. */ | |
| 2134 | - count = &per_cpu(nr_pagecache_local, cpu); | |
| 2135 | - atomic_add(*count, &nr_pagecache); | |
| 2136 | - *count = 0; | |
| 2137 | 2132 | local_irq_disable(); |
| 2138 | 2133 | __drain_pages(cpu); |
| 2139 | 2134 |
mm/swap_state.c
| ... | ... | @@ -87,7 +87,7 @@ |
| 87 | 87 | SetPageSwapCache(page); |
| 88 | 88 | set_page_private(page, entry.val); |
| 89 | 89 | total_swapcache_pages++; |
| 90 | - pagecache_acct(1); | |
| 90 | + __inc_zone_page_state(page, NR_FILE_PAGES); | |
| 91 | 91 | } |
| 92 | 92 | write_unlock_irq(&swapper_space.tree_lock); |
| 93 | 93 | radix_tree_preload_end(); |
| ... | ... | @@ -132,7 +132,7 @@ |
| 132 | 132 | set_page_private(page, 0); |
| 133 | 133 | ClearPageSwapCache(page); |
| 134 | 134 | total_swapcache_pages--; |
| 135 | - pagecache_acct(-1); | |
| 135 | + __dec_zone_page_state(page, NR_FILE_PAGES); | |
| 136 | 136 | INC_CACHE_INFO(del_total); |
| 137 | 137 | } |
| 138 | 138 |
mm/vmstat.c
| ... | ... | @@ -20,12 +20,6 @@ |
| 20 | 20 | */ |
| 21 | 21 | DEFINE_PER_CPU(struct page_state, page_states) = {0}; |
| 22 | 22 | |
| 23 | -atomic_t nr_pagecache = ATOMIC_INIT(0); | |
| 24 | -EXPORT_SYMBOL(nr_pagecache); | |
| 25 | -#ifdef CONFIG_SMP | |
| 26 | -DEFINE_PER_CPU(long, nr_pagecache_local) = 0; | |
| 27 | -#endif | |
| 28 | - | |
| 29 | 23 | static void __get_page_state(struct page_state *ret, int nr, cpumask_t *cpumask) |
| 30 | 24 | { |
| 31 | 25 | unsigned cpu; |
| ... | ... | @@ -402,6 +396,7 @@ |
| 402 | 396 | static char *vmstat_text[] = { |
| 403 | 397 | /* Zoned VM counters */ |
| 404 | 398 | "nr_mapped", |
| 399 | + "nr_file_pages", | |
| 405 | 400 | |
| 406 | 401 | /* Page state */ |
| 407 | 402 | "nr_dirty", |