Commit ad596925eaf9a48ed61bc9210088828f1f8e0552
Committed by
Tejun Heo
1 parent
99dcc3e5a9
Exists in
master
and in
7 other branches
this_cpu: Remove pageset_notifier
Remove the pageset notifier since it only marks that a processor exists on a specific node. Move that code into the vmstat notifier. Signed-off-by: Christoph Lameter <cl@linux-foundation.org> Signed-off-by: Tejun Heo <tj@kernel.org>
Showing 1 changed file with 1 additions and 0 deletions Inline Diff
mm/vmstat.c
1 | /* | 1 | /* |
2 | * linux/mm/vmstat.c | 2 | * linux/mm/vmstat.c |
3 | * | 3 | * |
4 | * Manages VM statistics | 4 | * Manages VM statistics |
5 | * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds | 5 | * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds |
6 | * | 6 | * |
7 | * zoned VM statistics | 7 | * zoned VM statistics |
8 | * Copyright (C) 2006 Silicon Graphics, Inc., | 8 | * Copyright (C) 2006 Silicon Graphics, Inc., |
9 | * Christoph Lameter <christoph@lameter.com> | 9 | * Christoph Lameter <christoph@lameter.com> |
10 | */ | 10 | */ |
11 | #include <linux/fs.h> | 11 | #include <linux/fs.h> |
12 | #include <linux/mm.h> | 12 | #include <linux/mm.h> |
13 | #include <linux/err.h> | 13 | #include <linux/err.h> |
14 | #include <linux/module.h> | 14 | #include <linux/module.h> |
15 | #include <linux/cpu.h> | 15 | #include <linux/cpu.h> |
16 | #include <linux/vmstat.h> | 16 | #include <linux/vmstat.h> |
17 | #include <linux/sched.h> | 17 | #include <linux/sched.h> |
18 | 18 | ||
19 | #ifdef CONFIG_VM_EVENT_COUNTERS | 19 | #ifdef CONFIG_VM_EVENT_COUNTERS |
20 | DEFINE_PER_CPU(struct vm_event_state, vm_event_states) = {{0}}; | 20 | DEFINE_PER_CPU(struct vm_event_state, vm_event_states) = {{0}}; |
21 | EXPORT_PER_CPU_SYMBOL(vm_event_states); | 21 | EXPORT_PER_CPU_SYMBOL(vm_event_states); |
22 | 22 | ||
23 | static void sum_vm_events(unsigned long *ret, const struct cpumask *cpumask) | 23 | static void sum_vm_events(unsigned long *ret, const struct cpumask *cpumask) |
24 | { | 24 | { |
25 | int cpu; | 25 | int cpu; |
26 | int i; | 26 | int i; |
27 | 27 | ||
28 | memset(ret, 0, NR_VM_EVENT_ITEMS * sizeof(unsigned long)); | 28 | memset(ret, 0, NR_VM_EVENT_ITEMS * sizeof(unsigned long)); |
29 | 29 | ||
30 | for_each_cpu(cpu, cpumask) { | 30 | for_each_cpu(cpu, cpumask) { |
31 | struct vm_event_state *this = &per_cpu(vm_event_states, cpu); | 31 | struct vm_event_state *this = &per_cpu(vm_event_states, cpu); |
32 | 32 | ||
33 | for (i = 0; i < NR_VM_EVENT_ITEMS; i++) | 33 | for (i = 0; i < NR_VM_EVENT_ITEMS; i++) |
34 | ret[i] += this->event[i]; | 34 | ret[i] += this->event[i]; |
35 | } | 35 | } |
36 | } | 36 | } |
37 | 37 | ||
38 | /* | 38 | /* |
39 | * Accumulate the vm event counters across all CPUs. | 39 | * Accumulate the vm event counters across all CPUs. |
40 | * The result is unavoidably approximate - it can change | 40 | * The result is unavoidably approximate - it can change |
41 | * during and after execution of this function. | 41 | * during and after execution of this function. |
42 | */ | 42 | */ |
43 | void all_vm_events(unsigned long *ret) | 43 | void all_vm_events(unsigned long *ret) |
44 | { | 44 | { |
45 | get_online_cpus(); | 45 | get_online_cpus(); |
46 | sum_vm_events(ret, cpu_online_mask); | 46 | sum_vm_events(ret, cpu_online_mask); |
47 | put_online_cpus(); | 47 | put_online_cpus(); |
48 | } | 48 | } |
49 | EXPORT_SYMBOL_GPL(all_vm_events); | 49 | EXPORT_SYMBOL_GPL(all_vm_events); |
50 | 50 | ||
51 | #ifdef CONFIG_HOTPLUG | 51 | #ifdef CONFIG_HOTPLUG |
52 | /* | 52 | /* |
53 | * Fold the foreign cpu events into our own. | 53 | * Fold the foreign cpu events into our own. |
54 | * | 54 | * |
55 | * This is adding to the events on one processor | 55 | * This is adding to the events on one processor |
56 | * but keeps the global counts constant. | 56 | * but keeps the global counts constant. |
57 | */ | 57 | */ |
58 | void vm_events_fold_cpu(int cpu) | 58 | void vm_events_fold_cpu(int cpu) |
59 | { | 59 | { |
60 | struct vm_event_state *fold_state = &per_cpu(vm_event_states, cpu); | 60 | struct vm_event_state *fold_state = &per_cpu(vm_event_states, cpu); |
61 | int i; | 61 | int i; |
62 | 62 | ||
63 | for (i = 0; i < NR_VM_EVENT_ITEMS; i++) { | 63 | for (i = 0; i < NR_VM_EVENT_ITEMS; i++) { |
64 | count_vm_events(i, fold_state->event[i]); | 64 | count_vm_events(i, fold_state->event[i]); |
65 | fold_state->event[i] = 0; | 65 | fold_state->event[i] = 0; |
66 | } | 66 | } |
67 | } | 67 | } |
68 | #endif /* CONFIG_HOTPLUG */ | 68 | #endif /* CONFIG_HOTPLUG */ |
69 | 69 | ||
70 | #endif /* CONFIG_VM_EVENT_COUNTERS */ | 70 | #endif /* CONFIG_VM_EVENT_COUNTERS */ |
71 | 71 | ||
72 | /* | 72 | /* |
73 | * Manage combined zone based / global counters | 73 | * Manage combined zone based / global counters |
74 | * | 74 | * |
75 | * vm_stat contains the global counters | 75 | * vm_stat contains the global counters |
76 | */ | 76 | */ |
77 | atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; | 77 | atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; |
78 | EXPORT_SYMBOL(vm_stat); | 78 | EXPORT_SYMBOL(vm_stat); |
79 | 79 | ||
80 | #ifdef CONFIG_SMP | 80 | #ifdef CONFIG_SMP |
81 | 81 | ||
82 | static int calculate_threshold(struct zone *zone) | 82 | static int calculate_threshold(struct zone *zone) |
83 | { | 83 | { |
84 | int threshold; | 84 | int threshold; |
85 | int mem; /* memory in 128 MB units */ | 85 | int mem; /* memory in 128 MB units */ |
86 | 86 | ||
87 | /* | 87 | /* |
88 | * The threshold scales with the number of processors and the amount | 88 | * The threshold scales with the number of processors and the amount |
89 | * of memory per zone. More memory means that we can defer updates for | 89 | * of memory per zone. More memory means that we can defer updates for |
90 | * longer, more processors could lead to more contention. | 90 | * longer, more processors could lead to more contention. |
91 | * fls() is used to have a cheap way of logarithmic scaling. | 91 | * fls() is used to have a cheap way of logarithmic scaling. |
92 | * | 92 | * |
93 | * Some sample thresholds: | 93 | * Some sample thresholds: |
94 | * | 94 | * |
95 | * Threshold Processors (fls) Zonesize fls(mem+1) | 95 | * Threshold Processors (fls) Zonesize fls(mem+1) |
96 | * ------------------------------------------------------------------ | 96 | * ------------------------------------------------------------------ |
97 | * 8 1 1 0.9-1 GB 4 | 97 | * 8 1 1 0.9-1 GB 4 |
98 | * 16 2 2 0.9-1 GB 4 | 98 | * 16 2 2 0.9-1 GB 4 |
99 | * 20 2 2 1-2 GB 5 | 99 | * 20 2 2 1-2 GB 5 |
100 | * 24 2 2 2-4 GB 6 | 100 | * 24 2 2 2-4 GB 6 |
101 | * 28 2 2 4-8 GB 7 | 101 | * 28 2 2 4-8 GB 7 |
102 | * 32 2 2 8-16 GB 8 | 102 | * 32 2 2 8-16 GB 8 |
103 | * 4 2 2 <128M 1 | 103 | * 4 2 2 <128M 1 |
104 | * 30 4 3 2-4 GB 5 | 104 | * 30 4 3 2-4 GB 5 |
105 | * 48 4 3 8-16 GB 8 | 105 | * 48 4 3 8-16 GB 8 |
106 | * 32 8 4 1-2 GB 4 | 106 | * 32 8 4 1-2 GB 4 |
107 | * 32 8 4 0.9-1GB 4 | 107 | * 32 8 4 0.9-1GB 4 |
108 | * 10 16 5 <128M 1 | 108 | * 10 16 5 <128M 1 |
109 | * 40 16 5 900M 4 | 109 | * 40 16 5 900M 4 |
110 | * 70 64 7 2-4 GB 5 | 110 | * 70 64 7 2-4 GB 5 |
111 | * 84 64 7 4-8 GB 6 | 111 | * 84 64 7 4-8 GB 6 |
112 | * 108 512 9 4-8 GB 6 | 112 | * 108 512 9 4-8 GB 6 |
113 | * 125 1024 10 8-16 GB 8 | 113 | * 125 1024 10 8-16 GB 8 |
114 | * 125 1024 10 16-32 GB 9 | 114 | * 125 1024 10 16-32 GB 9 |
115 | */ | 115 | */ |
116 | 116 | ||
117 | mem = zone->present_pages >> (27 - PAGE_SHIFT); | 117 | mem = zone->present_pages >> (27 - PAGE_SHIFT); |
118 | 118 | ||
119 | threshold = 2 * fls(num_online_cpus()) * (1 + fls(mem)); | 119 | threshold = 2 * fls(num_online_cpus()) * (1 + fls(mem)); |
120 | 120 | ||
121 | /* | 121 | /* |
122 | * Maximum threshold is 125 | 122 | * Maximum threshold is 125 |
123 | */ | 123 | */ |
124 | threshold = min(125, threshold); | 124 | threshold = min(125, threshold); |
125 | 125 | ||
126 | return threshold; | 126 | return threshold; |
127 | } | 127 | } |
128 | 128 | ||
129 | /* | 129 | /* |
130 | * Refresh the thresholds for each zone. | 130 | * Refresh the thresholds for each zone. |
131 | */ | 131 | */ |
132 | static void refresh_zone_stat_thresholds(void) | 132 | static void refresh_zone_stat_thresholds(void) |
133 | { | 133 | { |
134 | struct zone *zone; | 134 | struct zone *zone; |
135 | int cpu; | 135 | int cpu; |
136 | int threshold; | 136 | int threshold; |
137 | 137 | ||
138 | for_each_populated_zone(zone) { | 138 | for_each_populated_zone(zone) { |
139 | threshold = calculate_threshold(zone); | 139 | threshold = calculate_threshold(zone); |
140 | 140 | ||
141 | for_each_online_cpu(cpu) | 141 | for_each_online_cpu(cpu) |
142 | per_cpu_ptr(zone->pageset, cpu)->stat_threshold | 142 | per_cpu_ptr(zone->pageset, cpu)->stat_threshold |
143 | = threshold; | 143 | = threshold; |
144 | } | 144 | } |
145 | } | 145 | } |
146 | 146 | ||
147 | /* | 147 | /* |
148 | * For use when we know that interrupts are disabled. | 148 | * For use when we know that interrupts are disabled. |
149 | */ | 149 | */ |
150 | void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item, | 150 | void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item, |
151 | int delta) | 151 | int delta) |
152 | { | 152 | { |
153 | struct per_cpu_pageset *pcp = this_cpu_ptr(zone->pageset); | 153 | struct per_cpu_pageset *pcp = this_cpu_ptr(zone->pageset); |
154 | 154 | ||
155 | s8 *p = pcp->vm_stat_diff + item; | 155 | s8 *p = pcp->vm_stat_diff + item; |
156 | long x; | 156 | long x; |
157 | 157 | ||
158 | x = delta + *p; | 158 | x = delta + *p; |
159 | 159 | ||
160 | if (unlikely(x > pcp->stat_threshold || x < -pcp->stat_threshold)) { | 160 | if (unlikely(x > pcp->stat_threshold || x < -pcp->stat_threshold)) { |
161 | zone_page_state_add(x, zone, item); | 161 | zone_page_state_add(x, zone, item); |
162 | x = 0; | 162 | x = 0; |
163 | } | 163 | } |
164 | *p = x; | 164 | *p = x; |
165 | } | 165 | } |
166 | EXPORT_SYMBOL(__mod_zone_page_state); | 166 | EXPORT_SYMBOL(__mod_zone_page_state); |
167 | 167 | ||
168 | /* | 168 | /* |
169 | * For an unknown interrupt state | 169 | * For an unknown interrupt state |
170 | */ | 170 | */ |
171 | void mod_zone_page_state(struct zone *zone, enum zone_stat_item item, | 171 | void mod_zone_page_state(struct zone *zone, enum zone_stat_item item, |
172 | int delta) | 172 | int delta) |
173 | { | 173 | { |
174 | unsigned long flags; | 174 | unsigned long flags; |
175 | 175 | ||
176 | local_irq_save(flags); | 176 | local_irq_save(flags); |
177 | __mod_zone_page_state(zone, item, delta); | 177 | __mod_zone_page_state(zone, item, delta); |
178 | local_irq_restore(flags); | 178 | local_irq_restore(flags); |
179 | } | 179 | } |
180 | EXPORT_SYMBOL(mod_zone_page_state); | 180 | EXPORT_SYMBOL(mod_zone_page_state); |
181 | 181 | ||
182 | /* | 182 | /* |
183 | * Optimized increment and decrement functions. | 183 | * Optimized increment and decrement functions. |
184 | * | 184 | * |
185 | * These are only for a single page and therefore can take a struct page * | 185 | * These are only for a single page and therefore can take a struct page * |
186 | * argument instead of struct zone *. This allows the inclusion of the code | 186 | * argument instead of struct zone *. This allows the inclusion of the code |
187 | * generated for page_zone(page) into the optimized functions. | 187 | * generated for page_zone(page) into the optimized functions. |
188 | * | 188 | * |
189 | * No overflow check is necessary and therefore the differential can be | 189 | * No overflow check is necessary and therefore the differential can be |
190 | * incremented or decremented in place which may allow the compilers to | 190 | * incremented or decremented in place which may allow the compilers to |
191 | * generate better code. | 191 | * generate better code. |
192 | * The increment or decrement is known and therefore one boundary check can | 192 | * The increment or decrement is known and therefore one boundary check can |
193 | * be omitted. | 193 | * be omitted. |
194 | * | 194 | * |
195 | * NOTE: These functions are very performance sensitive. Change only | 195 | * NOTE: These functions are very performance sensitive. Change only |
196 | * with care. | 196 | * with care. |
197 | * | 197 | * |
198 | * Some processors have inc/dec instructions that are atomic vs an interrupt. | 198 | * Some processors have inc/dec instructions that are atomic vs an interrupt. |
199 | * However, the code must first determine the differential location in a zone | 199 | * However, the code must first determine the differential location in a zone |
200 | * based on the processor number and then inc/dec the counter. There is no | 200 | * based on the processor number and then inc/dec the counter. There is no |
201 | * guarantee without disabling preemption that the processor will not change | 201 | * guarantee without disabling preemption that the processor will not change |
202 | * in between and therefore the atomicity vs. interrupt cannot be exploited | 202 | * in between and therefore the atomicity vs. interrupt cannot be exploited |
203 | * in a useful way here. | 203 | * in a useful way here. |
204 | */ | 204 | */ |
205 | void __inc_zone_state(struct zone *zone, enum zone_stat_item item) | 205 | void __inc_zone_state(struct zone *zone, enum zone_stat_item item) |
206 | { | 206 | { |
207 | struct per_cpu_pageset *pcp = this_cpu_ptr(zone->pageset); | 207 | struct per_cpu_pageset *pcp = this_cpu_ptr(zone->pageset); |
208 | s8 *p = pcp->vm_stat_diff + item; | 208 | s8 *p = pcp->vm_stat_diff + item; |
209 | 209 | ||
210 | (*p)++; | 210 | (*p)++; |
211 | 211 | ||
212 | if (unlikely(*p > pcp->stat_threshold)) { | 212 | if (unlikely(*p > pcp->stat_threshold)) { |
213 | int overstep = pcp->stat_threshold / 2; | 213 | int overstep = pcp->stat_threshold / 2; |
214 | 214 | ||
215 | zone_page_state_add(*p + overstep, zone, item); | 215 | zone_page_state_add(*p + overstep, zone, item); |
216 | *p = -overstep; | 216 | *p = -overstep; |
217 | } | 217 | } |
218 | } | 218 | } |
219 | 219 | ||
220 | void __inc_zone_page_state(struct page *page, enum zone_stat_item item) | 220 | void __inc_zone_page_state(struct page *page, enum zone_stat_item item) |
221 | { | 221 | { |
222 | __inc_zone_state(page_zone(page), item); | 222 | __inc_zone_state(page_zone(page), item); |
223 | } | 223 | } |
224 | EXPORT_SYMBOL(__inc_zone_page_state); | 224 | EXPORT_SYMBOL(__inc_zone_page_state); |
225 | 225 | ||
226 | void __dec_zone_state(struct zone *zone, enum zone_stat_item item) | 226 | void __dec_zone_state(struct zone *zone, enum zone_stat_item item) |
227 | { | 227 | { |
228 | struct per_cpu_pageset *pcp = this_cpu_ptr(zone->pageset); | 228 | struct per_cpu_pageset *pcp = this_cpu_ptr(zone->pageset); |
229 | s8 *p = pcp->vm_stat_diff + item; | 229 | s8 *p = pcp->vm_stat_diff + item; |
230 | 230 | ||
231 | (*p)--; | 231 | (*p)--; |
232 | 232 | ||
233 | if (unlikely(*p < - pcp->stat_threshold)) { | 233 | if (unlikely(*p < - pcp->stat_threshold)) { |
234 | int overstep = pcp->stat_threshold / 2; | 234 | int overstep = pcp->stat_threshold / 2; |
235 | 235 | ||
236 | zone_page_state_add(*p - overstep, zone, item); | 236 | zone_page_state_add(*p - overstep, zone, item); |
237 | *p = overstep; | 237 | *p = overstep; |
238 | } | 238 | } |
239 | } | 239 | } |
240 | 240 | ||
241 | void __dec_zone_page_state(struct page *page, enum zone_stat_item item) | 241 | void __dec_zone_page_state(struct page *page, enum zone_stat_item item) |
242 | { | 242 | { |
243 | __dec_zone_state(page_zone(page), item); | 243 | __dec_zone_state(page_zone(page), item); |
244 | } | 244 | } |
245 | EXPORT_SYMBOL(__dec_zone_page_state); | 245 | EXPORT_SYMBOL(__dec_zone_page_state); |
246 | 246 | ||
247 | void inc_zone_state(struct zone *zone, enum zone_stat_item item) | 247 | void inc_zone_state(struct zone *zone, enum zone_stat_item item) |
248 | { | 248 | { |
249 | unsigned long flags; | 249 | unsigned long flags; |
250 | 250 | ||
251 | local_irq_save(flags); | 251 | local_irq_save(flags); |
252 | __inc_zone_state(zone, item); | 252 | __inc_zone_state(zone, item); |
253 | local_irq_restore(flags); | 253 | local_irq_restore(flags); |
254 | } | 254 | } |
255 | 255 | ||
256 | void inc_zone_page_state(struct page *page, enum zone_stat_item item) | 256 | void inc_zone_page_state(struct page *page, enum zone_stat_item item) |
257 | { | 257 | { |
258 | unsigned long flags; | 258 | unsigned long flags; |
259 | struct zone *zone; | 259 | struct zone *zone; |
260 | 260 | ||
261 | zone = page_zone(page); | 261 | zone = page_zone(page); |
262 | local_irq_save(flags); | 262 | local_irq_save(flags); |
263 | __inc_zone_state(zone, item); | 263 | __inc_zone_state(zone, item); |
264 | local_irq_restore(flags); | 264 | local_irq_restore(flags); |
265 | } | 265 | } |
266 | EXPORT_SYMBOL(inc_zone_page_state); | 266 | EXPORT_SYMBOL(inc_zone_page_state); |
267 | 267 | ||
268 | void dec_zone_page_state(struct page *page, enum zone_stat_item item) | 268 | void dec_zone_page_state(struct page *page, enum zone_stat_item item) |
269 | { | 269 | { |
270 | unsigned long flags; | 270 | unsigned long flags; |
271 | 271 | ||
272 | local_irq_save(flags); | 272 | local_irq_save(flags); |
273 | __dec_zone_page_state(page, item); | 273 | __dec_zone_page_state(page, item); |
274 | local_irq_restore(flags); | 274 | local_irq_restore(flags); |
275 | } | 275 | } |
276 | EXPORT_SYMBOL(dec_zone_page_state); | 276 | EXPORT_SYMBOL(dec_zone_page_state); |
277 | 277 | ||
278 | /* | 278 | /* |
279 | * Update the zone counters for one cpu. | 279 | * Update the zone counters for one cpu. |
280 | * | 280 | * |
281 | * The cpu specified must be either the current cpu or a processor that | 281 | * The cpu specified must be either the current cpu or a processor that |
282 | * is not online. If it is the current cpu then the execution thread must | 282 | * is not online. If it is the current cpu then the execution thread must |
283 | * be pinned to the current cpu. | 283 | * be pinned to the current cpu. |
284 | * | 284 | * |
285 | * Note that refresh_cpu_vm_stats strives to only access | 285 | * Note that refresh_cpu_vm_stats strives to only access |
286 | * node local memory. The per cpu pagesets on remote zones are placed | 286 | * node local memory. The per cpu pagesets on remote zones are placed |
287 | * in the memory local to the processor using that pageset. So the | 287 | * in the memory local to the processor using that pageset. So the |
288 | * loop over all zones will access a series of cachelines local to | 288 | * loop over all zones will access a series of cachelines local to |
289 | * the processor. | 289 | * the processor. |
290 | * | 290 | * |
291 | * The call to zone_page_state_add updates the cachelines with the | 291 | * The call to zone_page_state_add updates the cachelines with the |
292 | * statistics in the remote zone struct as well as the global cachelines | 292 | * statistics in the remote zone struct as well as the global cachelines |
293 | * with the global counters. These could cause remote node cache line | 293 | * with the global counters. These could cause remote node cache line |
294 | * bouncing and will have to be only done when necessary. | 294 | * bouncing and will have to be only done when necessary. |
295 | */ | 295 | */ |
296 | void refresh_cpu_vm_stats(int cpu) | 296 | void refresh_cpu_vm_stats(int cpu) |
297 | { | 297 | { |
298 | struct zone *zone; | 298 | struct zone *zone; |
299 | int i; | 299 | int i; |
300 | int global_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, }; | 300 | int global_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, }; |
301 | 301 | ||
302 | for_each_populated_zone(zone) { | 302 | for_each_populated_zone(zone) { |
303 | struct per_cpu_pageset *p; | 303 | struct per_cpu_pageset *p; |
304 | 304 | ||
305 | p = per_cpu_ptr(zone->pageset, cpu); | 305 | p = per_cpu_ptr(zone->pageset, cpu); |
306 | 306 | ||
307 | for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) | 307 | for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) |
308 | if (p->vm_stat_diff[i]) { | 308 | if (p->vm_stat_diff[i]) { |
309 | unsigned long flags; | 309 | unsigned long flags; |
310 | int v; | 310 | int v; |
311 | 311 | ||
312 | local_irq_save(flags); | 312 | local_irq_save(flags); |
313 | v = p->vm_stat_diff[i]; | 313 | v = p->vm_stat_diff[i]; |
314 | p->vm_stat_diff[i] = 0; | 314 | p->vm_stat_diff[i] = 0; |
315 | local_irq_restore(flags); | 315 | local_irq_restore(flags); |
316 | atomic_long_add(v, &zone->vm_stat[i]); | 316 | atomic_long_add(v, &zone->vm_stat[i]); |
317 | global_diff[i] += v; | 317 | global_diff[i] += v; |
318 | #ifdef CONFIG_NUMA | 318 | #ifdef CONFIG_NUMA |
319 | /* 3 seconds idle till flush */ | 319 | /* 3 seconds idle till flush */ |
320 | p->expire = 3; | 320 | p->expire = 3; |
321 | #endif | 321 | #endif |
322 | } | 322 | } |
323 | cond_resched(); | 323 | cond_resched(); |
324 | #ifdef CONFIG_NUMA | 324 | #ifdef CONFIG_NUMA |
325 | /* | 325 | /* |
326 | * Deal with draining the remote pageset of this | 326 | * Deal with draining the remote pageset of this |
327 | * processor | 327 | * processor |
328 | * | 328 | * |
329 | * Check if there are pages remaining in this pageset | 329 | * Check if there are pages remaining in this pageset |
330 | * if not then there is nothing to expire. | 330 | * if not then there is nothing to expire. |
331 | */ | 331 | */ |
332 | if (!p->expire || !p->pcp.count) | 332 | if (!p->expire || !p->pcp.count) |
333 | continue; | 333 | continue; |
334 | 334 | ||
335 | /* | 335 | /* |
336 | * We never drain zones local to this processor. | 336 | * We never drain zones local to this processor. |
337 | */ | 337 | */ |
338 | if (zone_to_nid(zone) == numa_node_id()) { | 338 | if (zone_to_nid(zone) == numa_node_id()) { |
339 | p->expire = 0; | 339 | p->expire = 0; |
340 | continue; | 340 | continue; |
341 | } | 341 | } |
342 | 342 | ||
343 | p->expire--; | 343 | p->expire--; |
344 | if (p->expire) | 344 | if (p->expire) |
345 | continue; | 345 | continue; |
346 | 346 | ||
347 | if (p->pcp.count) | 347 | if (p->pcp.count) |
348 | drain_zone_pages(zone, &p->pcp); | 348 | drain_zone_pages(zone, &p->pcp); |
349 | #endif | 349 | #endif |
350 | } | 350 | } |
351 | 351 | ||
352 | for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) | 352 | for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) |
353 | if (global_diff[i]) | 353 | if (global_diff[i]) |
354 | atomic_long_add(global_diff[i], &vm_stat[i]); | 354 | atomic_long_add(global_diff[i], &vm_stat[i]); |
355 | } | 355 | } |
356 | 356 | ||
357 | #endif | 357 | #endif |
358 | 358 | ||
359 | #ifdef CONFIG_NUMA | 359 | #ifdef CONFIG_NUMA |
360 | /* | 360 | /* |
361 | * zonelist = the list of zones passed to the allocator | 361 | * zonelist = the list of zones passed to the allocator |
362 | * z = the zone from which the allocation occurred. | 362 | * z = the zone from which the allocation occurred. |
363 | * | 363 | * |
364 | * Must be called with interrupts disabled. | 364 | * Must be called with interrupts disabled. |
365 | */ | 365 | */ |
366 | void zone_statistics(struct zone *preferred_zone, struct zone *z) | 366 | void zone_statistics(struct zone *preferred_zone, struct zone *z) |
367 | { | 367 | { |
368 | if (z->zone_pgdat == preferred_zone->zone_pgdat) { | 368 | if (z->zone_pgdat == preferred_zone->zone_pgdat) { |
369 | __inc_zone_state(z, NUMA_HIT); | 369 | __inc_zone_state(z, NUMA_HIT); |
370 | } else { | 370 | } else { |
371 | __inc_zone_state(z, NUMA_MISS); | 371 | __inc_zone_state(z, NUMA_MISS); |
372 | __inc_zone_state(preferred_zone, NUMA_FOREIGN); | 372 | __inc_zone_state(preferred_zone, NUMA_FOREIGN); |
373 | } | 373 | } |
374 | if (z->node == numa_node_id()) | 374 | if (z->node == numa_node_id()) |
375 | __inc_zone_state(z, NUMA_LOCAL); | 375 | __inc_zone_state(z, NUMA_LOCAL); |
376 | else | 376 | else |
377 | __inc_zone_state(z, NUMA_OTHER); | 377 | __inc_zone_state(z, NUMA_OTHER); |
378 | } | 378 | } |
379 | #endif | 379 | #endif |
380 | 380 | ||
381 | #ifdef CONFIG_PROC_FS | 381 | #ifdef CONFIG_PROC_FS |
382 | #include <linux/proc_fs.h> | 382 | #include <linux/proc_fs.h> |
383 | #include <linux/seq_file.h> | 383 | #include <linux/seq_file.h> |
384 | 384 | ||
385 | static char * const migratetype_names[MIGRATE_TYPES] = { | 385 | static char * const migratetype_names[MIGRATE_TYPES] = { |
386 | "Unmovable", | 386 | "Unmovable", |
387 | "Reclaimable", | 387 | "Reclaimable", |
388 | "Movable", | 388 | "Movable", |
389 | "Reserve", | 389 | "Reserve", |
390 | "Isolate", | 390 | "Isolate", |
391 | }; | 391 | }; |
392 | 392 | ||
393 | static void *frag_start(struct seq_file *m, loff_t *pos) | 393 | static void *frag_start(struct seq_file *m, loff_t *pos) |
394 | { | 394 | { |
395 | pg_data_t *pgdat; | 395 | pg_data_t *pgdat; |
396 | loff_t node = *pos; | 396 | loff_t node = *pos; |
397 | for (pgdat = first_online_pgdat(); | 397 | for (pgdat = first_online_pgdat(); |
398 | pgdat && node; | 398 | pgdat && node; |
399 | pgdat = next_online_pgdat(pgdat)) | 399 | pgdat = next_online_pgdat(pgdat)) |
400 | --node; | 400 | --node; |
401 | 401 | ||
402 | return pgdat; | 402 | return pgdat; |
403 | } | 403 | } |
404 | 404 | ||
405 | static void *frag_next(struct seq_file *m, void *arg, loff_t *pos) | 405 | static void *frag_next(struct seq_file *m, void *arg, loff_t *pos) |
406 | { | 406 | { |
407 | pg_data_t *pgdat = (pg_data_t *)arg; | 407 | pg_data_t *pgdat = (pg_data_t *)arg; |
408 | 408 | ||
409 | (*pos)++; | 409 | (*pos)++; |
410 | return next_online_pgdat(pgdat); | 410 | return next_online_pgdat(pgdat); |
411 | } | 411 | } |
412 | 412 | ||
413 | static void frag_stop(struct seq_file *m, void *arg) | 413 | static void frag_stop(struct seq_file *m, void *arg) |
414 | { | 414 | { |
415 | } | 415 | } |
416 | 416 | ||
417 | /* Walk all the zones in a node and print using a callback */ | 417 | /* Walk all the zones in a node and print using a callback */ |
418 | static void walk_zones_in_node(struct seq_file *m, pg_data_t *pgdat, | 418 | static void walk_zones_in_node(struct seq_file *m, pg_data_t *pgdat, |
419 | void (*print)(struct seq_file *m, pg_data_t *, struct zone *)) | 419 | void (*print)(struct seq_file *m, pg_data_t *, struct zone *)) |
420 | { | 420 | { |
421 | struct zone *zone; | 421 | struct zone *zone; |
422 | struct zone *node_zones = pgdat->node_zones; | 422 | struct zone *node_zones = pgdat->node_zones; |
423 | unsigned long flags; | 423 | unsigned long flags; |
424 | 424 | ||
425 | for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) { | 425 | for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) { |
426 | if (!populated_zone(zone)) | 426 | if (!populated_zone(zone)) |
427 | continue; | 427 | continue; |
428 | 428 | ||
429 | spin_lock_irqsave(&zone->lock, flags); | 429 | spin_lock_irqsave(&zone->lock, flags); |
430 | print(m, pgdat, zone); | 430 | print(m, pgdat, zone); |
431 | spin_unlock_irqrestore(&zone->lock, flags); | 431 | spin_unlock_irqrestore(&zone->lock, flags); |
432 | } | 432 | } |
433 | } | 433 | } |
434 | 434 | ||
435 | static void frag_show_print(struct seq_file *m, pg_data_t *pgdat, | 435 | static void frag_show_print(struct seq_file *m, pg_data_t *pgdat, |
436 | struct zone *zone) | 436 | struct zone *zone) |
437 | { | 437 | { |
438 | int order; | 438 | int order; |
439 | 439 | ||
440 | seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name); | 440 | seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name); |
441 | for (order = 0; order < MAX_ORDER; ++order) | 441 | for (order = 0; order < MAX_ORDER; ++order) |
442 | seq_printf(m, "%6lu ", zone->free_area[order].nr_free); | 442 | seq_printf(m, "%6lu ", zone->free_area[order].nr_free); |
443 | seq_putc(m, '\n'); | 443 | seq_putc(m, '\n'); |
444 | } | 444 | } |
445 | 445 | ||
446 | /* | 446 | /* |
447 | * This walks the free areas for each zone. | 447 | * This walks the free areas for each zone. |
448 | */ | 448 | */ |
449 | static int frag_show(struct seq_file *m, void *arg) | 449 | static int frag_show(struct seq_file *m, void *arg) |
450 | { | 450 | { |
451 | pg_data_t *pgdat = (pg_data_t *)arg; | 451 | pg_data_t *pgdat = (pg_data_t *)arg; |
452 | walk_zones_in_node(m, pgdat, frag_show_print); | 452 | walk_zones_in_node(m, pgdat, frag_show_print); |
453 | return 0; | 453 | return 0; |
454 | } | 454 | } |
455 | 455 | ||
456 | static void pagetypeinfo_showfree_print(struct seq_file *m, | 456 | static void pagetypeinfo_showfree_print(struct seq_file *m, |
457 | pg_data_t *pgdat, struct zone *zone) | 457 | pg_data_t *pgdat, struct zone *zone) |
458 | { | 458 | { |
459 | int order, mtype; | 459 | int order, mtype; |
460 | 460 | ||
461 | for (mtype = 0; mtype < MIGRATE_TYPES; mtype++) { | 461 | for (mtype = 0; mtype < MIGRATE_TYPES; mtype++) { |
462 | seq_printf(m, "Node %4d, zone %8s, type %12s ", | 462 | seq_printf(m, "Node %4d, zone %8s, type %12s ", |
463 | pgdat->node_id, | 463 | pgdat->node_id, |
464 | zone->name, | 464 | zone->name, |
465 | migratetype_names[mtype]); | 465 | migratetype_names[mtype]); |
466 | for (order = 0; order < MAX_ORDER; ++order) { | 466 | for (order = 0; order < MAX_ORDER; ++order) { |
467 | unsigned long freecount = 0; | 467 | unsigned long freecount = 0; |
468 | struct free_area *area; | 468 | struct free_area *area; |
469 | struct list_head *curr; | 469 | struct list_head *curr; |
470 | 470 | ||
471 | area = &(zone->free_area[order]); | 471 | area = &(zone->free_area[order]); |
472 | 472 | ||
473 | list_for_each(curr, &area->free_list[mtype]) | 473 | list_for_each(curr, &area->free_list[mtype]) |
474 | freecount++; | 474 | freecount++; |
475 | seq_printf(m, "%6lu ", freecount); | 475 | seq_printf(m, "%6lu ", freecount); |
476 | } | 476 | } |
477 | seq_putc(m, '\n'); | 477 | seq_putc(m, '\n'); |
478 | } | 478 | } |
479 | } | 479 | } |
480 | 480 | ||
481 | /* Print out the free pages at each order for each migatetype */ | 481 | /* Print out the free pages at each order for each migatetype */ |
482 | static int pagetypeinfo_showfree(struct seq_file *m, void *arg) | 482 | static int pagetypeinfo_showfree(struct seq_file *m, void *arg) |
483 | { | 483 | { |
484 | int order; | 484 | int order; |
485 | pg_data_t *pgdat = (pg_data_t *)arg; | 485 | pg_data_t *pgdat = (pg_data_t *)arg; |
486 | 486 | ||
487 | /* Print header */ | 487 | /* Print header */ |
488 | seq_printf(m, "%-43s ", "Free pages count per migrate type at order"); | 488 | seq_printf(m, "%-43s ", "Free pages count per migrate type at order"); |
489 | for (order = 0; order < MAX_ORDER; ++order) | 489 | for (order = 0; order < MAX_ORDER; ++order) |
490 | seq_printf(m, "%6d ", order); | 490 | seq_printf(m, "%6d ", order); |
491 | seq_putc(m, '\n'); | 491 | seq_putc(m, '\n'); |
492 | 492 | ||
493 | walk_zones_in_node(m, pgdat, pagetypeinfo_showfree_print); | 493 | walk_zones_in_node(m, pgdat, pagetypeinfo_showfree_print); |
494 | 494 | ||
495 | return 0; | 495 | return 0; |
496 | } | 496 | } |
497 | 497 | ||
498 | static void pagetypeinfo_showblockcount_print(struct seq_file *m, | 498 | static void pagetypeinfo_showblockcount_print(struct seq_file *m, |
499 | pg_data_t *pgdat, struct zone *zone) | 499 | pg_data_t *pgdat, struct zone *zone) |
500 | { | 500 | { |
501 | int mtype; | 501 | int mtype; |
502 | unsigned long pfn; | 502 | unsigned long pfn; |
503 | unsigned long start_pfn = zone->zone_start_pfn; | 503 | unsigned long start_pfn = zone->zone_start_pfn; |
504 | unsigned long end_pfn = start_pfn + zone->spanned_pages; | 504 | unsigned long end_pfn = start_pfn + zone->spanned_pages; |
505 | unsigned long count[MIGRATE_TYPES] = { 0, }; | 505 | unsigned long count[MIGRATE_TYPES] = { 0, }; |
506 | 506 | ||
507 | for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) { | 507 | for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) { |
508 | struct page *page; | 508 | struct page *page; |
509 | 509 | ||
510 | if (!pfn_valid(pfn)) | 510 | if (!pfn_valid(pfn)) |
511 | continue; | 511 | continue; |
512 | 512 | ||
513 | page = pfn_to_page(pfn); | 513 | page = pfn_to_page(pfn); |
514 | 514 | ||
515 | /* Watch for unexpected holes punched in the memmap */ | 515 | /* Watch for unexpected holes punched in the memmap */ |
516 | if (!memmap_valid_within(pfn, page, zone)) | 516 | if (!memmap_valid_within(pfn, page, zone)) |
517 | continue; | 517 | continue; |
518 | 518 | ||
519 | mtype = get_pageblock_migratetype(page); | 519 | mtype = get_pageblock_migratetype(page); |
520 | 520 | ||
521 | if (mtype < MIGRATE_TYPES) | 521 | if (mtype < MIGRATE_TYPES) |
522 | count[mtype]++; | 522 | count[mtype]++; |
523 | } | 523 | } |
524 | 524 | ||
525 | /* Print counts */ | 525 | /* Print counts */ |
526 | seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name); | 526 | seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name); |
527 | for (mtype = 0; mtype < MIGRATE_TYPES; mtype++) | 527 | for (mtype = 0; mtype < MIGRATE_TYPES; mtype++) |
528 | seq_printf(m, "%12lu ", count[mtype]); | 528 | seq_printf(m, "%12lu ", count[mtype]); |
529 | seq_putc(m, '\n'); | 529 | seq_putc(m, '\n'); |
530 | } | 530 | } |
531 | 531 | ||
532 | /* Print out the free pages at each order for each migratetype */ | 532 | /* Print out the free pages at each order for each migratetype */ |
533 | static int pagetypeinfo_showblockcount(struct seq_file *m, void *arg) | 533 | static int pagetypeinfo_showblockcount(struct seq_file *m, void *arg) |
534 | { | 534 | { |
535 | int mtype; | 535 | int mtype; |
536 | pg_data_t *pgdat = (pg_data_t *)arg; | 536 | pg_data_t *pgdat = (pg_data_t *)arg; |
537 | 537 | ||
538 | seq_printf(m, "\n%-23s", "Number of blocks type "); | 538 | seq_printf(m, "\n%-23s", "Number of blocks type "); |
539 | for (mtype = 0; mtype < MIGRATE_TYPES; mtype++) | 539 | for (mtype = 0; mtype < MIGRATE_TYPES; mtype++) |
540 | seq_printf(m, "%12s ", migratetype_names[mtype]); | 540 | seq_printf(m, "%12s ", migratetype_names[mtype]); |
541 | seq_putc(m, '\n'); | 541 | seq_putc(m, '\n'); |
542 | walk_zones_in_node(m, pgdat, pagetypeinfo_showblockcount_print); | 542 | walk_zones_in_node(m, pgdat, pagetypeinfo_showblockcount_print); |
543 | 543 | ||
544 | return 0; | 544 | return 0; |
545 | } | 545 | } |
546 | 546 | ||
547 | /* | 547 | /* |
548 | * This prints out statistics in relation to grouping pages by mobility. | 548 | * This prints out statistics in relation to grouping pages by mobility. |
549 | * It is expensive to collect so do not constantly read the file. | 549 | * It is expensive to collect so do not constantly read the file. |
550 | */ | 550 | */ |
551 | static int pagetypeinfo_show(struct seq_file *m, void *arg) | 551 | static int pagetypeinfo_show(struct seq_file *m, void *arg) |
552 | { | 552 | { |
553 | pg_data_t *pgdat = (pg_data_t *)arg; | 553 | pg_data_t *pgdat = (pg_data_t *)arg; |
554 | 554 | ||
555 | /* check memoryless node */ | 555 | /* check memoryless node */ |
556 | if (!node_state(pgdat->node_id, N_HIGH_MEMORY)) | 556 | if (!node_state(pgdat->node_id, N_HIGH_MEMORY)) |
557 | return 0; | 557 | return 0; |
558 | 558 | ||
559 | seq_printf(m, "Page block order: %d\n", pageblock_order); | 559 | seq_printf(m, "Page block order: %d\n", pageblock_order); |
560 | seq_printf(m, "Pages per block: %lu\n", pageblock_nr_pages); | 560 | seq_printf(m, "Pages per block: %lu\n", pageblock_nr_pages); |
561 | seq_putc(m, '\n'); | 561 | seq_putc(m, '\n'); |
562 | pagetypeinfo_showfree(m, pgdat); | 562 | pagetypeinfo_showfree(m, pgdat); |
563 | pagetypeinfo_showblockcount(m, pgdat); | 563 | pagetypeinfo_showblockcount(m, pgdat); |
564 | 564 | ||
565 | return 0; | 565 | return 0; |
566 | } | 566 | } |
567 | 567 | ||
568 | static const struct seq_operations fragmentation_op = { | 568 | static const struct seq_operations fragmentation_op = { |
569 | .start = frag_start, | 569 | .start = frag_start, |
570 | .next = frag_next, | 570 | .next = frag_next, |
571 | .stop = frag_stop, | 571 | .stop = frag_stop, |
572 | .show = frag_show, | 572 | .show = frag_show, |
573 | }; | 573 | }; |
574 | 574 | ||
575 | static int fragmentation_open(struct inode *inode, struct file *file) | 575 | static int fragmentation_open(struct inode *inode, struct file *file) |
576 | { | 576 | { |
577 | return seq_open(file, &fragmentation_op); | 577 | return seq_open(file, &fragmentation_op); |
578 | } | 578 | } |
579 | 579 | ||
580 | static const struct file_operations fragmentation_file_operations = { | 580 | static const struct file_operations fragmentation_file_operations = { |
581 | .open = fragmentation_open, | 581 | .open = fragmentation_open, |
582 | .read = seq_read, | 582 | .read = seq_read, |
583 | .llseek = seq_lseek, | 583 | .llseek = seq_lseek, |
584 | .release = seq_release, | 584 | .release = seq_release, |
585 | }; | 585 | }; |
586 | 586 | ||
587 | static const struct seq_operations pagetypeinfo_op = { | 587 | static const struct seq_operations pagetypeinfo_op = { |
588 | .start = frag_start, | 588 | .start = frag_start, |
589 | .next = frag_next, | 589 | .next = frag_next, |
590 | .stop = frag_stop, | 590 | .stop = frag_stop, |
591 | .show = pagetypeinfo_show, | 591 | .show = pagetypeinfo_show, |
592 | }; | 592 | }; |
593 | 593 | ||
594 | static int pagetypeinfo_open(struct inode *inode, struct file *file) | 594 | static int pagetypeinfo_open(struct inode *inode, struct file *file) |
595 | { | 595 | { |
596 | return seq_open(file, &pagetypeinfo_op); | 596 | return seq_open(file, &pagetypeinfo_op); |
597 | } | 597 | } |
598 | 598 | ||
599 | static const struct file_operations pagetypeinfo_file_ops = { | 599 | static const struct file_operations pagetypeinfo_file_ops = { |
600 | .open = pagetypeinfo_open, | 600 | .open = pagetypeinfo_open, |
601 | .read = seq_read, | 601 | .read = seq_read, |
602 | .llseek = seq_lseek, | 602 | .llseek = seq_lseek, |
603 | .release = seq_release, | 603 | .release = seq_release, |
604 | }; | 604 | }; |
605 | 605 | ||
606 | #ifdef CONFIG_ZONE_DMA | 606 | #ifdef CONFIG_ZONE_DMA |
607 | #define TEXT_FOR_DMA(xx) xx "_dma", | 607 | #define TEXT_FOR_DMA(xx) xx "_dma", |
608 | #else | 608 | #else |
609 | #define TEXT_FOR_DMA(xx) | 609 | #define TEXT_FOR_DMA(xx) |
610 | #endif | 610 | #endif |
611 | 611 | ||
612 | #ifdef CONFIG_ZONE_DMA32 | 612 | #ifdef CONFIG_ZONE_DMA32 |
613 | #define TEXT_FOR_DMA32(xx) xx "_dma32", | 613 | #define TEXT_FOR_DMA32(xx) xx "_dma32", |
614 | #else | 614 | #else |
615 | #define TEXT_FOR_DMA32(xx) | 615 | #define TEXT_FOR_DMA32(xx) |
616 | #endif | 616 | #endif |
617 | 617 | ||
618 | #ifdef CONFIG_HIGHMEM | 618 | #ifdef CONFIG_HIGHMEM |
619 | #define TEXT_FOR_HIGHMEM(xx) xx "_high", | 619 | #define TEXT_FOR_HIGHMEM(xx) xx "_high", |
620 | #else | 620 | #else |
621 | #define TEXT_FOR_HIGHMEM(xx) | 621 | #define TEXT_FOR_HIGHMEM(xx) |
622 | #endif | 622 | #endif |
623 | 623 | ||
624 | #define TEXTS_FOR_ZONES(xx) TEXT_FOR_DMA(xx) TEXT_FOR_DMA32(xx) xx "_normal", \ | 624 | #define TEXTS_FOR_ZONES(xx) TEXT_FOR_DMA(xx) TEXT_FOR_DMA32(xx) xx "_normal", \ |
625 | TEXT_FOR_HIGHMEM(xx) xx "_movable", | 625 | TEXT_FOR_HIGHMEM(xx) xx "_movable", |
626 | 626 | ||
627 | static const char * const vmstat_text[] = { | 627 | static const char * const vmstat_text[] = { |
628 | /* Zoned VM counters */ | 628 | /* Zoned VM counters */ |
629 | "nr_free_pages", | 629 | "nr_free_pages", |
630 | "nr_inactive_anon", | 630 | "nr_inactive_anon", |
631 | "nr_active_anon", | 631 | "nr_active_anon", |
632 | "nr_inactive_file", | 632 | "nr_inactive_file", |
633 | "nr_active_file", | 633 | "nr_active_file", |
634 | "nr_unevictable", | 634 | "nr_unevictable", |
635 | "nr_mlock", | 635 | "nr_mlock", |
636 | "nr_anon_pages", | 636 | "nr_anon_pages", |
637 | "nr_mapped", | 637 | "nr_mapped", |
638 | "nr_file_pages", | 638 | "nr_file_pages", |
639 | "nr_dirty", | 639 | "nr_dirty", |
640 | "nr_writeback", | 640 | "nr_writeback", |
641 | "nr_slab_reclaimable", | 641 | "nr_slab_reclaimable", |
642 | "nr_slab_unreclaimable", | 642 | "nr_slab_unreclaimable", |
643 | "nr_page_table_pages", | 643 | "nr_page_table_pages", |
644 | "nr_kernel_stack", | 644 | "nr_kernel_stack", |
645 | "nr_unstable", | 645 | "nr_unstable", |
646 | "nr_bounce", | 646 | "nr_bounce", |
647 | "nr_vmscan_write", | 647 | "nr_vmscan_write", |
648 | "nr_writeback_temp", | 648 | "nr_writeback_temp", |
649 | "nr_isolated_anon", | 649 | "nr_isolated_anon", |
650 | "nr_isolated_file", | 650 | "nr_isolated_file", |
651 | "nr_shmem", | 651 | "nr_shmem", |
652 | #ifdef CONFIG_NUMA | 652 | #ifdef CONFIG_NUMA |
653 | "numa_hit", | 653 | "numa_hit", |
654 | "numa_miss", | 654 | "numa_miss", |
655 | "numa_foreign", | 655 | "numa_foreign", |
656 | "numa_interleave", | 656 | "numa_interleave", |
657 | "numa_local", | 657 | "numa_local", |
658 | "numa_other", | 658 | "numa_other", |
659 | #endif | 659 | #endif |
660 | 660 | ||
661 | #ifdef CONFIG_VM_EVENT_COUNTERS | 661 | #ifdef CONFIG_VM_EVENT_COUNTERS |
662 | "pgpgin", | 662 | "pgpgin", |
663 | "pgpgout", | 663 | "pgpgout", |
664 | "pswpin", | 664 | "pswpin", |
665 | "pswpout", | 665 | "pswpout", |
666 | 666 | ||
667 | TEXTS_FOR_ZONES("pgalloc") | 667 | TEXTS_FOR_ZONES("pgalloc") |
668 | 668 | ||
669 | "pgfree", | 669 | "pgfree", |
670 | "pgactivate", | 670 | "pgactivate", |
671 | "pgdeactivate", | 671 | "pgdeactivate", |
672 | 672 | ||
673 | "pgfault", | 673 | "pgfault", |
674 | "pgmajfault", | 674 | "pgmajfault", |
675 | 675 | ||
676 | TEXTS_FOR_ZONES("pgrefill") | 676 | TEXTS_FOR_ZONES("pgrefill") |
677 | TEXTS_FOR_ZONES("pgsteal") | 677 | TEXTS_FOR_ZONES("pgsteal") |
678 | TEXTS_FOR_ZONES("pgscan_kswapd") | 678 | TEXTS_FOR_ZONES("pgscan_kswapd") |
679 | TEXTS_FOR_ZONES("pgscan_direct") | 679 | TEXTS_FOR_ZONES("pgscan_direct") |
680 | 680 | ||
681 | #ifdef CONFIG_NUMA | 681 | #ifdef CONFIG_NUMA |
682 | "zone_reclaim_failed", | 682 | "zone_reclaim_failed", |
683 | #endif | 683 | #endif |
684 | "pginodesteal", | 684 | "pginodesteal", |
685 | "slabs_scanned", | 685 | "slabs_scanned", |
686 | "kswapd_steal", | 686 | "kswapd_steal", |
687 | "kswapd_inodesteal", | 687 | "kswapd_inodesteal", |
688 | "kswapd_low_wmark_hit_quickly", | 688 | "kswapd_low_wmark_hit_quickly", |
689 | "kswapd_high_wmark_hit_quickly", | 689 | "kswapd_high_wmark_hit_quickly", |
690 | "kswapd_skip_congestion_wait", | 690 | "kswapd_skip_congestion_wait", |
691 | "pageoutrun", | 691 | "pageoutrun", |
692 | "allocstall", | 692 | "allocstall", |
693 | 693 | ||
694 | "pgrotated", | 694 | "pgrotated", |
695 | #ifdef CONFIG_HUGETLB_PAGE | 695 | #ifdef CONFIG_HUGETLB_PAGE |
696 | "htlb_buddy_alloc_success", | 696 | "htlb_buddy_alloc_success", |
697 | "htlb_buddy_alloc_fail", | 697 | "htlb_buddy_alloc_fail", |
698 | #endif | 698 | #endif |
699 | "unevictable_pgs_culled", | 699 | "unevictable_pgs_culled", |
700 | "unevictable_pgs_scanned", | 700 | "unevictable_pgs_scanned", |
701 | "unevictable_pgs_rescued", | 701 | "unevictable_pgs_rescued", |
702 | "unevictable_pgs_mlocked", | 702 | "unevictable_pgs_mlocked", |
703 | "unevictable_pgs_munlocked", | 703 | "unevictable_pgs_munlocked", |
704 | "unevictable_pgs_cleared", | 704 | "unevictable_pgs_cleared", |
705 | "unevictable_pgs_stranded", | 705 | "unevictable_pgs_stranded", |
706 | "unevictable_pgs_mlockfreed", | 706 | "unevictable_pgs_mlockfreed", |
707 | #endif | 707 | #endif |
708 | }; | 708 | }; |
709 | 709 | ||
710 | static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat, | 710 | static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat, |
711 | struct zone *zone) | 711 | struct zone *zone) |
712 | { | 712 | { |
713 | int i; | 713 | int i; |
714 | seq_printf(m, "Node %d, zone %8s", pgdat->node_id, zone->name); | 714 | seq_printf(m, "Node %d, zone %8s", pgdat->node_id, zone->name); |
715 | seq_printf(m, | 715 | seq_printf(m, |
716 | "\n pages free %lu" | 716 | "\n pages free %lu" |
717 | "\n min %lu" | 717 | "\n min %lu" |
718 | "\n low %lu" | 718 | "\n low %lu" |
719 | "\n high %lu" | 719 | "\n high %lu" |
720 | "\n scanned %lu" | 720 | "\n scanned %lu" |
721 | "\n spanned %lu" | 721 | "\n spanned %lu" |
722 | "\n present %lu", | 722 | "\n present %lu", |
723 | zone_page_state(zone, NR_FREE_PAGES), | 723 | zone_page_state(zone, NR_FREE_PAGES), |
724 | min_wmark_pages(zone), | 724 | min_wmark_pages(zone), |
725 | low_wmark_pages(zone), | 725 | low_wmark_pages(zone), |
726 | high_wmark_pages(zone), | 726 | high_wmark_pages(zone), |
727 | zone->pages_scanned, | 727 | zone->pages_scanned, |
728 | zone->spanned_pages, | 728 | zone->spanned_pages, |
729 | zone->present_pages); | 729 | zone->present_pages); |
730 | 730 | ||
731 | for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) | 731 | for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) |
732 | seq_printf(m, "\n %-12s %lu", vmstat_text[i], | 732 | seq_printf(m, "\n %-12s %lu", vmstat_text[i], |
733 | zone_page_state(zone, i)); | 733 | zone_page_state(zone, i)); |
734 | 734 | ||
735 | seq_printf(m, | 735 | seq_printf(m, |
736 | "\n protection: (%lu", | 736 | "\n protection: (%lu", |
737 | zone->lowmem_reserve[0]); | 737 | zone->lowmem_reserve[0]); |
738 | for (i = 1; i < ARRAY_SIZE(zone->lowmem_reserve); i++) | 738 | for (i = 1; i < ARRAY_SIZE(zone->lowmem_reserve); i++) |
739 | seq_printf(m, ", %lu", zone->lowmem_reserve[i]); | 739 | seq_printf(m, ", %lu", zone->lowmem_reserve[i]); |
740 | seq_printf(m, | 740 | seq_printf(m, |
741 | ")" | 741 | ")" |
742 | "\n pagesets"); | 742 | "\n pagesets"); |
743 | for_each_online_cpu(i) { | 743 | for_each_online_cpu(i) { |
744 | struct per_cpu_pageset *pageset; | 744 | struct per_cpu_pageset *pageset; |
745 | 745 | ||
746 | pageset = per_cpu_ptr(zone->pageset, i); | 746 | pageset = per_cpu_ptr(zone->pageset, i); |
747 | seq_printf(m, | 747 | seq_printf(m, |
748 | "\n cpu: %i" | 748 | "\n cpu: %i" |
749 | "\n count: %i" | 749 | "\n count: %i" |
750 | "\n high: %i" | 750 | "\n high: %i" |
751 | "\n batch: %i", | 751 | "\n batch: %i", |
752 | i, | 752 | i, |
753 | pageset->pcp.count, | 753 | pageset->pcp.count, |
754 | pageset->pcp.high, | 754 | pageset->pcp.high, |
755 | pageset->pcp.batch); | 755 | pageset->pcp.batch); |
756 | #ifdef CONFIG_SMP | 756 | #ifdef CONFIG_SMP |
757 | seq_printf(m, "\n vm stats threshold: %d", | 757 | seq_printf(m, "\n vm stats threshold: %d", |
758 | pageset->stat_threshold); | 758 | pageset->stat_threshold); |
759 | #endif | 759 | #endif |
760 | } | 760 | } |
761 | seq_printf(m, | 761 | seq_printf(m, |
762 | "\n all_unreclaimable: %u" | 762 | "\n all_unreclaimable: %u" |
763 | "\n prev_priority: %i" | 763 | "\n prev_priority: %i" |
764 | "\n start_pfn: %lu" | 764 | "\n start_pfn: %lu" |
765 | "\n inactive_ratio: %u", | 765 | "\n inactive_ratio: %u", |
766 | zone_is_all_unreclaimable(zone), | 766 | zone_is_all_unreclaimable(zone), |
767 | zone->prev_priority, | 767 | zone->prev_priority, |
768 | zone->zone_start_pfn, | 768 | zone->zone_start_pfn, |
769 | zone->inactive_ratio); | 769 | zone->inactive_ratio); |
770 | seq_putc(m, '\n'); | 770 | seq_putc(m, '\n'); |
771 | } | 771 | } |
772 | 772 | ||
773 | /* | 773 | /* |
774 | * Output information about zones in @pgdat. | 774 | * Output information about zones in @pgdat. |
775 | */ | 775 | */ |
776 | static int zoneinfo_show(struct seq_file *m, void *arg) | 776 | static int zoneinfo_show(struct seq_file *m, void *arg) |
777 | { | 777 | { |
778 | pg_data_t *pgdat = (pg_data_t *)arg; | 778 | pg_data_t *pgdat = (pg_data_t *)arg; |
779 | walk_zones_in_node(m, pgdat, zoneinfo_show_print); | 779 | walk_zones_in_node(m, pgdat, zoneinfo_show_print); |
780 | return 0; | 780 | return 0; |
781 | } | 781 | } |
782 | 782 | ||
783 | static const struct seq_operations zoneinfo_op = { | 783 | static const struct seq_operations zoneinfo_op = { |
784 | .start = frag_start, /* iterate over all zones. The same as in | 784 | .start = frag_start, /* iterate over all zones. The same as in |
785 | * fragmentation. */ | 785 | * fragmentation. */ |
786 | .next = frag_next, | 786 | .next = frag_next, |
787 | .stop = frag_stop, | 787 | .stop = frag_stop, |
788 | .show = zoneinfo_show, | 788 | .show = zoneinfo_show, |
789 | }; | 789 | }; |
790 | 790 | ||
791 | static int zoneinfo_open(struct inode *inode, struct file *file) | 791 | static int zoneinfo_open(struct inode *inode, struct file *file) |
792 | { | 792 | { |
793 | return seq_open(file, &zoneinfo_op); | 793 | return seq_open(file, &zoneinfo_op); |
794 | } | 794 | } |
795 | 795 | ||
796 | static const struct file_operations proc_zoneinfo_file_operations = { | 796 | static const struct file_operations proc_zoneinfo_file_operations = { |
797 | .open = zoneinfo_open, | 797 | .open = zoneinfo_open, |
798 | .read = seq_read, | 798 | .read = seq_read, |
799 | .llseek = seq_lseek, | 799 | .llseek = seq_lseek, |
800 | .release = seq_release, | 800 | .release = seq_release, |
801 | }; | 801 | }; |
802 | 802 | ||
803 | static void *vmstat_start(struct seq_file *m, loff_t *pos) | 803 | static void *vmstat_start(struct seq_file *m, loff_t *pos) |
804 | { | 804 | { |
805 | unsigned long *v; | 805 | unsigned long *v; |
806 | #ifdef CONFIG_VM_EVENT_COUNTERS | 806 | #ifdef CONFIG_VM_EVENT_COUNTERS |
807 | unsigned long *e; | 807 | unsigned long *e; |
808 | #endif | 808 | #endif |
809 | int i; | 809 | int i; |
810 | 810 | ||
811 | if (*pos >= ARRAY_SIZE(vmstat_text)) | 811 | if (*pos >= ARRAY_SIZE(vmstat_text)) |
812 | return NULL; | 812 | return NULL; |
813 | 813 | ||
814 | #ifdef CONFIG_VM_EVENT_COUNTERS | 814 | #ifdef CONFIG_VM_EVENT_COUNTERS |
815 | v = kmalloc(NR_VM_ZONE_STAT_ITEMS * sizeof(unsigned long) | 815 | v = kmalloc(NR_VM_ZONE_STAT_ITEMS * sizeof(unsigned long) |
816 | + sizeof(struct vm_event_state), GFP_KERNEL); | 816 | + sizeof(struct vm_event_state), GFP_KERNEL); |
817 | #else | 817 | #else |
818 | v = kmalloc(NR_VM_ZONE_STAT_ITEMS * sizeof(unsigned long), | 818 | v = kmalloc(NR_VM_ZONE_STAT_ITEMS * sizeof(unsigned long), |
819 | GFP_KERNEL); | 819 | GFP_KERNEL); |
820 | #endif | 820 | #endif |
821 | m->private = v; | 821 | m->private = v; |
822 | if (!v) | 822 | if (!v) |
823 | return ERR_PTR(-ENOMEM); | 823 | return ERR_PTR(-ENOMEM); |
824 | for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) | 824 | for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) |
825 | v[i] = global_page_state(i); | 825 | v[i] = global_page_state(i); |
826 | #ifdef CONFIG_VM_EVENT_COUNTERS | 826 | #ifdef CONFIG_VM_EVENT_COUNTERS |
827 | e = v + NR_VM_ZONE_STAT_ITEMS; | 827 | e = v + NR_VM_ZONE_STAT_ITEMS; |
828 | all_vm_events(e); | 828 | all_vm_events(e); |
829 | e[PGPGIN] /= 2; /* sectors -> kbytes */ | 829 | e[PGPGIN] /= 2; /* sectors -> kbytes */ |
830 | e[PGPGOUT] /= 2; | 830 | e[PGPGOUT] /= 2; |
831 | #endif | 831 | #endif |
832 | return v + *pos; | 832 | return v + *pos; |
833 | } | 833 | } |
834 | 834 | ||
835 | static void *vmstat_next(struct seq_file *m, void *arg, loff_t *pos) | 835 | static void *vmstat_next(struct seq_file *m, void *arg, loff_t *pos) |
836 | { | 836 | { |
837 | (*pos)++; | 837 | (*pos)++; |
838 | if (*pos >= ARRAY_SIZE(vmstat_text)) | 838 | if (*pos >= ARRAY_SIZE(vmstat_text)) |
839 | return NULL; | 839 | return NULL; |
840 | return (unsigned long *)m->private + *pos; | 840 | return (unsigned long *)m->private + *pos; |
841 | } | 841 | } |
842 | 842 | ||
843 | static int vmstat_show(struct seq_file *m, void *arg) | 843 | static int vmstat_show(struct seq_file *m, void *arg) |
844 | { | 844 | { |
845 | unsigned long *l = arg; | 845 | unsigned long *l = arg; |
846 | unsigned long off = l - (unsigned long *)m->private; | 846 | unsigned long off = l - (unsigned long *)m->private; |
847 | 847 | ||
848 | seq_printf(m, "%s %lu\n", vmstat_text[off], *l); | 848 | seq_printf(m, "%s %lu\n", vmstat_text[off], *l); |
849 | return 0; | 849 | return 0; |
850 | } | 850 | } |
851 | 851 | ||
852 | static void vmstat_stop(struct seq_file *m, void *arg) | 852 | static void vmstat_stop(struct seq_file *m, void *arg) |
853 | { | 853 | { |
854 | kfree(m->private); | 854 | kfree(m->private); |
855 | m->private = NULL; | 855 | m->private = NULL; |
856 | } | 856 | } |
857 | 857 | ||
858 | static const struct seq_operations vmstat_op = { | 858 | static const struct seq_operations vmstat_op = { |
859 | .start = vmstat_start, | 859 | .start = vmstat_start, |
860 | .next = vmstat_next, | 860 | .next = vmstat_next, |
861 | .stop = vmstat_stop, | 861 | .stop = vmstat_stop, |
862 | .show = vmstat_show, | 862 | .show = vmstat_show, |
863 | }; | 863 | }; |
864 | 864 | ||
865 | static int vmstat_open(struct inode *inode, struct file *file) | 865 | static int vmstat_open(struct inode *inode, struct file *file) |
866 | { | 866 | { |
867 | return seq_open(file, &vmstat_op); | 867 | return seq_open(file, &vmstat_op); |
868 | } | 868 | } |
869 | 869 | ||
870 | static const struct file_operations proc_vmstat_file_operations = { | 870 | static const struct file_operations proc_vmstat_file_operations = { |
871 | .open = vmstat_open, | 871 | .open = vmstat_open, |
872 | .read = seq_read, | 872 | .read = seq_read, |
873 | .llseek = seq_lseek, | 873 | .llseek = seq_lseek, |
874 | .release = seq_release, | 874 | .release = seq_release, |
875 | }; | 875 | }; |
876 | #endif /* CONFIG_PROC_FS */ | 876 | #endif /* CONFIG_PROC_FS */ |
877 | 877 | ||
878 | #ifdef CONFIG_SMP | 878 | #ifdef CONFIG_SMP |
879 | static DEFINE_PER_CPU(struct delayed_work, vmstat_work); | 879 | static DEFINE_PER_CPU(struct delayed_work, vmstat_work); |
880 | int sysctl_stat_interval __read_mostly = HZ; | 880 | int sysctl_stat_interval __read_mostly = HZ; |
881 | 881 | ||
882 | static void vmstat_update(struct work_struct *w) | 882 | static void vmstat_update(struct work_struct *w) |
883 | { | 883 | { |
884 | refresh_cpu_vm_stats(smp_processor_id()); | 884 | refresh_cpu_vm_stats(smp_processor_id()); |
885 | schedule_delayed_work(&__get_cpu_var(vmstat_work), | 885 | schedule_delayed_work(&__get_cpu_var(vmstat_work), |
886 | round_jiffies_relative(sysctl_stat_interval)); | 886 | round_jiffies_relative(sysctl_stat_interval)); |
887 | } | 887 | } |
888 | 888 | ||
889 | static void __cpuinit start_cpu_timer(int cpu) | 889 | static void __cpuinit start_cpu_timer(int cpu) |
890 | { | 890 | { |
891 | struct delayed_work *work = &per_cpu(vmstat_work, cpu); | 891 | struct delayed_work *work = &per_cpu(vmstat_work, cpu); |
892 | 892 | ||
893 | INIT_DELAYED_WORK_DEFERRABLE(work, vmstat_update); | 893 | INIT_DELAYED_WORK_DEFERRABLE(work, vmstat_update); |
894 | schedule_delayed_work_on(cpu, work, __round_jiffies_relative(HZ, cpu)); | 894 | schedule_delayed_work_on(cpu, work, __round_jiffies_relative(HZ, cpu)); |
895 | } | 895 | } |
896 | 896 | ||
897 | /* | 897 | /* |
898 | * Use the cpu notifier to insure that the thresholds are recalculated | 898 | * Use the cpu notifier to insure that the thresholds are recalculated |
899 | * when necessary. | 899 | * when necessary. |
900 | */ | 900 | */ |
901 | static int __cpuinit vmstat_cpuup_callback(struct notifier_block *nfb, | 901 | static int __cpuinit vmstat_cpuup_callback(struct notifier_block *nfb, |
902 | unsigned long action, | 902 | unsigned long action, |
903 | void *hcpu) | 903 | void *hcpu) |
904 | { | 904 | { |
905 | long cpu = (long)hcpu; | 905 | long cpu = (long)hcpu; |
906 | 906 | ||
907 | switch (action) { | 907 | switch (action) { |
908 | case CPU_ONLINE: | 908 | case CPU_ONLINE: |
909 | case CPU_ONLINE_FROZEN: | 909 | case CPU_ONLINE_FROZEN: |
910 | start_cpu_timer(cpu); | 910 | start_cpu_timer(cpu); |
911 | node_set_state(cpu_to_node(cpu), N_CPU); | ||
911 | break; | 912 | break; |
912 | case CPU_DOWN_PREPARE: | 913 | case CPU_DOWN_PREPARE: |
913 | case CPU_DOWN_PREPARE_FROZEN: | 914 | case CPU_DOWN_PREPARE_FROZEN: |
914 | cancel_rearming_delayed_work(&per_cpu(vmstat_work, cpu)); | 915 | cancel_rearming_delayed_work(&per_cpu(vmstat_work, cpu)); |
915 | per_cpu(vmstat_work, cpu).work.func = NULL; | 916 | per_cpu(vmstat_work, cpu).work.func = NULL; |
916 | break; | 917 | break; |
917 | case CPU_DOWN_FAILED: | 918 | case CPU_DOWN_FAILED: |
918 | case CPU_DOWN_FAILED_FROZEN: | 919 | case CPU_DOWN_FAILED_FROZEN: |
919 | start_cpu_timer(cpu); | 920 | start_cpu_timer(cpu); |
920 | break; | 921 | break; |
921 | case CPU_DEAD: | 922 | case CPU_DEAD: |
922 | case CPU_DEAD_FROZEN: | 923 | case CPU_DEAD_FROZEN: |
923 | refresh_zone_stat_thresholds(); | 924 | refresh_zone_stat_thresholds(); |
924 | break; | 925 | break; |
925 | default: | 926 | default: |
926 | break; | 927 | break; |
927 | } | 928 | } |
928 | return NOTIFY_OK; | 929 | return NOTIFY_OK; |
929 | } | 930 | } |
930 | 931 | ||
931 | static struct notifier_block __cpuinitdata vmstat_notifier = | 932 | static struct notifier_block __cpuinitdata vmstat_notifier = |
932 | { &vmstat_cpuup_callback, NULL, 0 }; | 933 | { &vmstat_cpuup_callback, NULL, 0 }; |
933 | #endif | 934 | #endif |
934 | 935 | ||
935 | static int __init setup_vmstat(void) | 936 | static int __init setup_vmstat(void) |
936 | { | 937 | { |
937 | #ifdef CONFIG_SMP | 938 | #ifdef CONFIG_SMP |
938 | int cpu; | 939 | int cpu; |
939 | 940 | ||
940 | refresh_zone_stat_thresholds(); | 941 | refresh_zone_stat_thresholds(); |
941 | register_cpu_notifier(&vmstat_notifier); | 942 | register_cpu_notifier(&vmstat_notifier); |
942 | 943 | ||
943 | for_each_online_cpu(cpu) | 944 | for_each_online_cpu(cpu) |
944 | start_cpu_timer(cpu); | 945 | start_cpu_timer(cpu); |
945 | #endif | 946 | #endif |
946 | #ifdef CONFIG_PROC_FS | 947 | #ifdef CONFIG_PROC_FS |
947 | proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations); | 948 | proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations); |
948 | proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops); | 949 | proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops); |
949 | proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations); | 950 | proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations); |
950 | proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations); | 951 | proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations); |
951 | #endif | 952 | #endif |
952 | return 0; | 953 | return 0; |
953 | } | 954 | } |
954 | module_init(setup_vmstat) | 955 | module_init(setup_vmstat) |
955 | 956 |