Blame view

mm/vmstat.c 33.2 KB
f6ac2354d   Christoph Lameter   [PATCH] zoned vm ...
1
2
3
4
5
  /*
   *  linux/mm/vmstat.c
   *
   *  Manages VM statistics
   *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
2244b95a7   Christoph Lameter   [PATCH] zoned vm ...
6
7
8
9
   *
   *  zoned VM statistics
   *  Copyright (C) 2006 Silicon Graphics, Inc.,
   *		Christoph Lameter <christoph@lameter.com>
f6ac2354d   Christoph Lameter   [PATCH] zoned vm ...
10
   */
8f32f7e5a   Alexey Dobriyan   proc: move /proc/...
11
  #include <linux/fs.h>
f6ac2354d   Christoph Lameter   [PATCH] zoned vm ...
12
  #include <linux/mm.h>
4e950f6f0   Alexey Dobriyan   Remove fs.h from ...
13
  #include <linux/err.h>
2244b95a7   Christoph Lameter   [PATCH] zoned vm ...
14
  #include <linux/module.h>
5a0e3ad6a   Tejun Heo   include cleanup: ...
15
  #include <linux/slab.h>
df9ecaba3   Christoph Lameter   [PATCH] ZVC: Scal...
16
  #include <linux/cpu.h>
c748e1340   Adrian Bunk   mm/vmstat.c: prop...
17
  #include <linux/vmstat.h>
e8edc6e03   Alexey Dobriyan   Detach sched.h fr...
18
  #include <linux/sched.h>
f1a5ab121   Mel Gorman   mm: export fragme...
19
  #include <linux/math64.h>
79da826ae   Michael Rubin   writeback: report...
20
  #include <linux/writeback.h>
36deb0be3   Namhyung Kim   vmstat: include c...
21
  #include <linux/compaction.h>
f6ac2354d   Christoph Lameter   [PATCH] zoned vm ...
22

f8891e5e1   Christoph Lameter   [PATCH] Light wei...
23
24
25
  #ifdef CONFIG_VM_EVENT_COUNTERS
  DEFINE_PER_CPU(struct vm_event_state, vm_event_states) = {{0}};
  EXPORT_PER_CPU_SYMBOL(vm_event_states);
31f961a89   Minchan Kim   mm: use for_each_...
26
  static void sum_vm_events(unsigned long *ret)
f8891e5e1   Christoph Lameter   [PATCH] Light wei...
27
  {
9eccf2a81   Christoph Lameter   vmstat: remove pr...
28
  	int cpu;
f8891e5e1   Christoph Lameter   [PATCH] Light wei...
29
30
31
  	int i;
  
  	memset(ret, 0, NR_VM_EVENT_ITEMS * sizeof(unsigned long));
31f961a89   Minchan Kim   mm: use for_each_...
32
  	for_each_online_cpu(cpu) {
f8891e5e1   Christoph Lameter   [PATCH] Light wei...
33
  		struct vm_event_state *this = &per_cpu(vm_event_states, cpu);
f8891e5e1   Christoph Lameter   [PATCH] Light wei...
34
35
36
37
38
39
40
41
42
43
44
45
  		for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
  			ret[i] += this->event[i];
  	}
  }
  
  /*
   * Accumulate the vm event counters across all CPUs.
   * The result is unavoidably approximate - it can change
   * during and after execution of this function.
  */
  void all_vm_events(unsigned long *ret)
  {
b5be11329   KOSAKI Motohiro   make vmstat cpu-u...
46
  	get_online_cpus();
31f961a89   Minchan Kim   mm: use for_each_...
47
  	sum_vm_events(ret);
b5be11329   KOSAKI Motohiro   make vmstat cpu-u...
48
  	put_online_cpus();
f8891e5e1   Christoph Lameter   [PATCH] Light wei...
49
  }
32dd66fce   Heiko Carstens   [PATCH] vmstat: e...
50
  EXPORT_SYMBOL_GPL(all_vm_events);
f8891e5e1   Christoph Lameter   [PATCH] Light wei...
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
  
  #ifdef CONFIG_HOTPLUG
  /*
   * Fold the foreign cpu events into our own.
   *
   * This is adding to the events on one processor
   * but keeps the global counts constant.
   */
  void vm_events_fold_cpu(int cpu)
  {
  	struct vm_event_state *fold_state = &per_cpu(vm_event_states, cpu);
  	int i;
  
  	for (i = 0; i < NR_VM_EVENT_ITEMS; i++) {
  		count_vm_events(i, fold_state->event[i]);
  		fold_state->event[i] = 0;
  	}
  }
  #endif /* CONFIG_HOTPLUG */
  
  #endif /* CONFIG_VM_EVENT_COUNTERS */
2244b95a7   Christoph Lameter   [PATCH] zoned vm ...
72
73
74
75
76
  /*
   * Manage combined zone based / global counters
   *
   * vm_stat contains the global counters
   */
a1cb2c60d   Dimitri Sivanich   mm/vmstat.c: cach...
77
  atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
2244b95a7   Christoph Lameter   [PATCH] zoned vm ...
78
79
80
  EXPORT_SYMBOL(vm_stat);
  
  #ifdef CONFIG_SMP
b44129b30   Mel Gorman   mm: vmstat: use a...
81
  int calculate_pressure_threshold(struct zone *zone)
88f5acf88   Mel Gorman   mm: page allocato...
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
  {
  	int threshold;
  	int watermark_distance;
  
  	/*
  	 * As vmstats are not up to date, there is drift between the estimated
  	 * and real values. For high thresholds and a high number of CPUs, it
  	 * is possible for the min watermark to be breached while the estimated
  	 * value looks fine. The pressure threshold is a reduced value such
  	 * that even the maximum amount of drift will not accidentally breach
  	 * the min watermark
  	 */
  	watermark_distance = low_wmark_pages(zone) - min_wmark_pages(zone);
  	threshold = max(1, (int)(watermark_distance / num_online_cpus()));
  
  	/*
  	 * Maximum threshold is 125
  	 */
  	threshold = min(125, threshold);
  
  	return threshold;
  }
b44129b30   Mel Gorman   mm: vmstat: use a...
104
  int calculate_normal_threshold(struct zone *zone)
df9ecaba3   Christoph Lameter   [PATCH] ZVC: Scal...
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
  {
  	int threshold;
  	int mem;	/* memory in 128 MB units */
  
  	/*
  	 * The threshold scales with the number of processors and the amount
  	 * of memory per zone. More memory means that we can defer updates for
  	 * longer, more processors could lead to more contention.
   	 * fls() is used to have a cheap way of logarithmic scaling.
  	 *
  	 * Some sample thresholds:
  	 *
  	 * Threshold	Processors	(fls)	Zonesize	fls(mem+1)
  	 * ------------------------------------------------------------------
  	 * 8		1		1	0.9-1 GB	4
  	 * 16		2		2	0.9-1 GB	4
  	 * 20 		2		2	1-2 GB		5
  	 * 24		2		2	2-4 GB		6
  	 * 28		2		2	4-8 GB		7
  	 * 32		2		2	8-16 GB		8
  	 * 4		2		2	<128M		1
  	 * 30		4		3	2-4 GB		5
  	 * 48		4		3	8-16 GB		8
  	 * 32		8		4	1-2 GB		4
  	 * 32		8		4	0.9-1GB		4
  	 * 10		16		5	<128M		1
  	 * 40		16		5	900M		4
  	 * 70		64		7	2-4 GB		5
  	 * 84		64		7	4-8 GB		6
  	 * 108		512		9	4-8 GB		6
  	 * 125		1024		10	8-16 GB		8
  	 * 125		1024		10	16-32 GB	9
  	 */
  
  	mem = zone->present_pages >> (27 - PAGE_SHIFT);
  
  	threshold = 2 * fls(num_online_cpus()) * (1 + fls(mem));
  
  	/*
  	 * Maximum threshold is 125
  	 */
  	threshold = min(125, threshold);
  
  	return threshold;
  }
2244b95a7   Christoph Lameter   [PATCH] zoned vm ...
150
151
  
  /*
df9ecaba3   Christoph Lameter   [PATCH] ZVC: Scal...
152
   * Refresh the thresholds for each zone.
2244b95a7   Christoph Lameter   [PATCH] zoned vm ...
153
   */
a6cccdc36   KOSAKI Motohiro   mm, mem-hotplug: ...
154
  void refresh_zone_stat_thresholds(void)
2244b95a7   Christoph Lameter   [PATCH] zoned vm ...
155
  {
df9ecaba3   Christoph Lameter   [PATCH] ZVC: Scal...
156
157
158
  	struct zone *zone;
  	int cpu;
  	int threshold;
ee99c71c5   KOSAKI Motohiro   mm: introduce for...
159
  	for_each_populated_zone(zone) {
aa4548403   Christoph Lameter   mm: page allocato...
160
  		unsigned long max_drift, tolerate_drift;
b44129b30   Mel Gorman   mm: vmstat: use a...
161
  		threshold = calculate_normal_threshold(zone);
df9ecaba3   Christoph Lameter   [PATCH] ZVC: Scal...
162
163
  
  		for_each_online_cpu(cpu)
99dcc3e5a   Christoph Lameter   this_cpu: Page al...
164
165
  			per_cpu_ptr(zone->pageset, cpu)->stat_threshold
  							= threshold;
aa4548403   Christoph Lameter   mm: page allocato...
166
167
168
169
170
171
172
173
174
175
176
  
  		/*
  		 * Only set percpu_drift_mark if there is a danger that
  		 * NR_FREE_PAGES reports the low watermark is ok when in fact
  		 * the min watermark could be breached by an allocation
  		 */
  		tolerate_drift = low_wmark_pages(zone) - min_wmark_pages(zone);
  		max_drift = num_online_cpus() * threshold;
  		if (max_drift > tolerate_drift)
  			zone->percpu_drift_mark = high_wmark_pages(zone) +
  					max_drift;
df9ecaba3   Christoph Lameter   [PATCH] ZVC: Scal...
177
  	}
2244b95a7   Christoph Lameter   [PATCH] zoned vm ...
178
  }
b44129b30   Mel Gorman   mm: vmstat: use a...
179
180
  void set_pgdat_percpu_threshold(pg_data_t *pgdat,
  				int (*calculate_pressure)(struct zone *))
88f5acf88   Mel Gorman   mm: page allocato...
181
182
183
184
185
  {
  	struct zone *zone;
  	int cpu;
  	int threshold;
  	int i;
88f5acf88   Mel Gorman   mm: page allocato...
186
187
188
189
  	for (i = 0; i < pgdat->nr_zones; i++) {
  		zone = &pgdat->node_zones[i];
  		if (!zone->percpu_drift_mark)
  			continue;
b44129b30   Mel Gorman   mm: vmstat: use a...
190
191
  		threshold = (*calculate_pressure)(zone);
  		for_each_possible_cpu(cpu)
88f5acf88   Mel Gorman   mm: page allocato...
192
193
194
  			per_cpu_ptr(zone->pageset, cpu)->stat_threshold
  							= threshold;
  	}
88f5acf88   Mel Gorman   mm: page allocato...
195
  }
2244b95a7   Christoph Lameter   [PATCH] zoned vm ...
196
197
198
199
200
201
  /*
   * For use when we know that interrupts are disabled.
   */
  void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
  				int delta)
  {
12938a922   Christoph Lameter   vmstat: Optimize ...
202
203
  	struct per_cpu_pageset __percpu *pcp = zone->pageset;
  	s8 __percpu *p = pcp->vm_stat_diff + item;
2244b95a7   Christoph Lameter   [PATCH] zoned vm ...
204
  	long x;
12938a922   Christoph Lameter   vmstat: Optimize ...
205
206
207
  	long t;
  
  	x = delta + __this_cpu_read(*p);
2244b95a7   Christoph Lameter   [PATCH] zoned vm ...
208

12938a922   Christoph Lameter   vmstat: Optimize ...
209
  	t = __this_cpu_read(pcp->stat_threshold);
2244b95a7   Christoph Lameter   [PATCH] zoned vm ...
210

12938a922   Christoph Lameter   vmstat: Optimize ...
211
  	if (unlikely(x > t || x < -t)) {
2244b95a7   Christoph Lameter   [PATCH] zoned vm ...
212
213
214
  		zone_page_state_add(x, zone, item);
  		x = 0;
  	}
12938a922   Christoph Lameter   vmstat: Optimize ...
215
  	__this_cpu_write(*p, x);
2244b95a7   Christoph Lameter   [PATCH] zoned vm ...
216
217
218
219
  }
  EXPORT_SYMBOL(__mod_zone_page_state);
  
  /*
2244b95a7   Christoph Lameter   [PATCH] zoned vm ...
220
221
222
223
224
225
226
227
228
   * Optimized increment and decrement functions.
   *
   * These are only for a single page and therefore can take a struct page *
   * argument instead of struct zone *. This allows the inclusion of the code
   * generated for page_zone(page) into the optimized functions.
   *
   * No overflow check is necessary and therefore the differential can be
   * incremented or decremented in place which may allow the compilers to
   * generate better code.
2244b95a7   Christoph Lameter   [PATCH] zoned vm ...
229
230
231
   * The increment or decrement is known and therefore one boundary check can
   * be omitted.
   *
df9ecaba3   Christoph Lameter   [PATCH] ZVC: Scal...
232
233
234
   * NOTE: These functions are very performance sensitive. Change only
   * with care.
   *
2244b95a7   Christoph Lameter   [PATCH] zoned vm ...
235
236
237
238
239
240
241
   * Some processors have inc/dec instructions that are atomic vs an interrupt.
   * However, the code must first determine the differential location in a zone
   * based on the processor number and then inc/dec the counter. There is no
   * guarantee without disabling preemption that the processor will not change
   * in between and therefore the atomicity vs. interrupt cannot be exploited
   * in a useful way here.
   */
c87853859   Christoph Lameter   [PATCH] Use ZVC f...
242
  void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
2244b95a7   Christoph Lameter   [PATCH] zoned vm ...
243
  {
12938a922   Christoph Lameter   vmstat: Optimize ...
244
245
246
  	struct per_cpu_pageset __percpu *pcp = zone->pageset;
  	s8 __percpu *p = pcp->vm_stat_diff + item;
  	s8 v, t;
2244b95a7   Christoph Lameter   [PATCH] zoned vm ...
247

908ee0f12   Christoph Lameter   vmstat: Use this_...
248
  	v = __this_cpu_inc_return(*p);
12938a922   Christoph Lameter   vmstat: Optimize ...
249
250
251
  	t = __this_cpu_read(pcp->stat_threshold);
  	if (unlikely(v > t)) {
  		s8 overstep = t >> 1;
df9ecaba3   Christoph Lameter   [PATCH] ZVC: Scal...
252

12938a922   Christoph Lameter   vmstat: Optimize ...
253
254
  		zone_page_state_add(v + overstep, zone, item);
  		__this_cpu_write(*p, -overstep);
2244b95a7   Christoph Lameter   [PATCH] zoned vm ...
255
256
  	}
  }
ca889e6c4   Christoph Lameter   [PATCH] Use Zoned...
257
258
259
260
261
  
  void __inc_zone_page_state(struct page *page, enum zone_stat_item item)
  {
  	__inc_zone_state(page_zone(page), item);
  }
2244b95a7   Christoph Lameter   [PATCH] zoned vm ...
262
  EXPORT_SYMBOL(__inc_zone_page_state);
c87853859   Christoph Lameter   [PATCH] Use ZVC f...
263
  void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
2244b95a7   Christoph Lameter   [PATCH] zoned vm ...
264
  {
12938a922   Christoph Lameter   vmstat: Optimize ...
265
266
267
  	struct per_cpu_pageset __percpu *pcp = zone->pageset;
  	s8 __percpu *p = pcp->vm_stat_diff + item;
  	s8 v, t;
2244b95a7   Christoph Lameter   [PATCH] zoned vm ...
268

908ee0f12   Christoph Lameter   vmstat: Use this_...
269
  	v = __this_cpu_dec_return(*p);
12938a922   Christoph Lameter   vmstat: Optimize ...
270
271
272
  	t = __this_cpu_read(pcp->stat_threshold);
  	if (unlikely(v < - t)) {
  		s8 overstep = t >> 1;
2244b95a7   Christoph Lameter   [PATCH] zoned vm ...
273

12938a922   Christoph Lameter   vmstat: Optimize ...
274
275
  		zone_page_state_add(v - overstep, zone, item);
  		__this_cpu_write(*p, overstep);
2244b95a7   Christoph Lameter   [PATCH] zoned vm ...
276
277
  	}
  }
c87853859   Christoph Lameter   [PATCH] Use ZVC f...
278
279
280
281
282
  
  void __dec_zone_page_state(struct page *page, enum zone_stat_item item)
  {
  	__dec_zone_state(page_zone(page), item);
  }
2244b95a7   Christoph Lameter   [PATCH] zoned vm ...
283
  EXPORT_SYMBOL(__dec_zone_page_state);
4156153c4   Heiko Carstens   mm,x86,um: move C...
284
  #ifdef CONFIG_HAVE_CMPXCHG_LOCAL
7c8391206   Christoph Lameter   vmstat: User per ...
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
  /*
   * If we have cmpxchg_local support then we do not need to incur the overhead
   * that comes with local_irq_save/restore if we use this_cpu_cmpxchg.
   *
   * mod_state() modifies the zone counter state through atomic per cpu
   * operations.
   *
   * Overstep mode specifies how overstep should handled:
   *     0       No overstepping
   *     1       Overstepping half of threshold
   *     -1      Overstepping minus half of threshold
  */
  static inline void mod_state(struct zone *zone,
         enum zone_stat_item item, int delta, int overstep_mode)
  {
  	struct per_cpu_pageset __percpu *pcp = zone->pageset;
  	s8 __percpu *p = pcp->vm_stat_diff + item;
  	long o, n, t, z;
  
  	do {
  		z = 0;  /* overflow to zone counters */
  
  		/*
  		 * The fetching of the stat_threshold is racy. We may apply
  		 * a counter threshold to the wrong the cpu if we get
d3bc23671   Christoph Lameter   vmstat: update co...
310
311
312
313
314
315
  		 * rescheduled while executing here. However, the next
  		 * counter update will apply the threshold again and
  		 * therefore bring the counter under the threshold again.
  		 *
  		 * Most of the time the thresholds are the same anyways
  		 * for all cpus in a zone.
7c8391206   Christoph Lameter   vmstat: User per ...
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
  		 */
  		t = this_cpu_read(pcp->stat_threshold);
  
  		o = this_cpu_read(*p);
  		n = delta + o;
  
  		if (n > t || n < -t) {
  			int os = overstep_mode * (t >> 1) ;
  
  			/* Overflow must be added to zone counters */
  			z = n + os;
  			n = -os;
  		}
  	} while (this_cpu_cmpxchg(*p, o, n) != o);
  
  	if (z)
  		zone_page_state_add(z, zone, item);
  }
  
  void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
  					int delta)
  {
  	mod_state(zone, item, delta, 0);
  }
  EXPORT_SYMBOL(mod_zone_page_state);
  
  void inc_zone_state(struct zone *zone, enum zone_stat_item item)
  {
  	mod_state(zone, item, 1, 1);
  }
  
  void inc_zone_page_state(struct page *page, enum zone_stat_item item)
  {
  	mod_state(page_zone(page), item, 1, 1);
  }
  EXPORT_SYMBOL(inc_zone_page_state);
  
  void dec_zone_page_state(struct page *page, enum zone_stat_item item)
  {
  	mod_state(page_zone(page), item, -1, -1);
  }
  EXPORT_SYMBOL(dec_zone_page_state);
  #else
  /*
   * Use interrupt disable to serialize counter updates
   */
  void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
  					int delta)
  {
  	unsigned long flags;
  
  	local_irq_save(flags);
  	__mod_zone_page_state(zone, item, delta);
  	local_irq_restore(flags);
  }
  EXPORT_SYMBOL(mod_zone_page_state);
ca889e6c4   Christoph Lameter   [PATCH] Use Zoned...
372
373
374
375
376
377
378
379
  void inc_zone_state(struct zone *zone, enum zone_stat_item item)
  {
  	unsigned long flags;
  
  	local_irq_save(flags);
  	__inc_zone_state(zone, item);
  	local_irq_restore(flags);
  }
2244b95a7   Christoph Lameter   [PATCH] zoned vm ...
380
381
382
383
  void inc_zone_page_state(struct page *page, enum zone_stat_item item)
  {
  	unsigned long flags;
  	struct zone *zone;
2244b95a7   Christoph Lameter   [PATCH] zoned vm ...
384
385
386
  
  	zone = page_zone(page);
  	local_irq_save(flags);
ca889e6c4   Christoph Lameter   [PATCH] Use Zoned...
387
  	__inc_zone_state(zone, item);
2244b95a7   Christoph Lameter   [PATCH] zoned vm ...
388
389
390
391
392
393
394
  	local_irq_restore(flags);
  }
  EXPORT_SYMBOL(inc_zone_page_state);
  
  void dec_zone_page_state(struct page *page, enum zone_stat_item item)
  {
  	unsigned long flags;
2244b95a7   Christoph Lameter   [PATCH] zoned vm ...
395

2244b95a7   Christoph Lameter   [PATCH] zoned vm ...
396
  	local_irq_save(flags);
a302eb4e4   Christoph Lameter   [PATCH] ZVC: Over...
397
  	__dec_zone_page_state(page, item);
2244b95a7   Christoph Lameter   [PATCH] zoned vm ...
398
399
400
  	local_irq_restore(flags);
  }
  EXPORT_SYMBOL(dec_zone_page_state);
7c8391206   Christoph Lameter   vmstat: User per ...
401
  #endif
2244b95a7   Christoph Lameter   [PATCH] zoned vm ...
402
403
404
  
  /*
   * Update the zone counters for one cpu.
4037d4522   Christoph Lameter   Move remote node ...
405
   *
a7f75e258   Christoph Lameter   vmstat: small rev...
406
407
408
409
   * The cpu specified must be either the current cpu or a processor that
   * is not online. If it is the current cpu then the execution thread must
   * be pinned to the current cpu.
   *
4037d4522   Christoph Lameter   Move remote node ...
410
411
412
413
414
415
416
417
418
419
   * Note that refresh_cpu_vm_stats strives to only access
   * node local memory. The per cpu pagesets on remote zones are placed
   * in the memory local to the processor using that pageset. So the
   * loop over all zones will access a series of cachelines local to
   * the processor.
   *
   * The call to zone_page_state_add updates the cachelines with the
   * statistics in the remote zone struct as well as the global cachelines
   * with the global counters. These could cause remote node cache line
   * bouncing and will have to be only done when necessary.
2244b95a7   Christoph Lameter   [PATCH] zoned vm ...
420
421
422
423
424
   */
  void refresh_cpu_vm_stats(int cpu)
  {
  	struct zone *zone;
  	int i;
a7f75e258   Christoph Lameter   vmstat: small rev...
425
  	int global_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, };
2244b95a7   Christoph Lameter   [PATCH] zoned vm ...
426

ee99c71c5   KOSAKI Motohiro   mm: introduce for...
427
  	for_each_populated_zone(zone) {
4037d4522   Christoph Lameter   Move remote node ...
428
  		struct per_cpu_pageset *p;
2244b95a7   Christoph Lameter   [PATCH] zoned vm ...
429

99dcc3e5a   Christoph Lameter   this_cpu: Page al...
430
  		p = per_cpu_ptr(zone->pageset, cpu);
2244b95a7   Christoph Lameter   [PATCH] zoned vm ...
431
432
  
  		for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
4037d4522   Christoph Lameter   Move remote node ...
433
  			if (p->vm_stat_diff[i]) {
a7f75e258   Christoph Lameter   vmstat: small rev...
434
435
  				unsigned long flags;
  				int v;
2244b95a7   Christoph Lameter   [PATCH] zoned vm ...
436
  				local_irq_save(flags);
a7f75e258   Christoph Lameter   vmstat: small rev...
437
  				v = p->vm_stat_diff[i];
4037d4522   Christoph Lameter   Move remote node ...
438
  				p->vm_stat_diff[i] = 0;
a7f75e258   Christoph Lameter   vmstat: small rev...
439
440
441
  				local_irq_restore(flags);
  				atomic_long_add(v, &zone->vm_stat[i]);
  				global_diff[i] += v;
4037d4522   Christoph Lameter   Move remote node ...
442
443
444
445
  #ifdef CONFIG_NUMA
  				/* 3 seconds idle till flush */
  				p->expire = 3;
  #endif
2244b95a7   Christoph Lameter   [PATCH] zoned vm ...
446
  			}
468fd62ed   Dimitri Sivanich   vmstats: add cond...
447
  		cond_resched();
4037d4522   Christoph Lameter   Move remote node ...
448
449
450
451
452
453
454
455
  #ifdef CONFIG_NUMA
  		/*
  		 * Deal with draining the remote pageset of this
  		 * processor
  		 *
  		 * Check if there are pages remaining in this pageset
  		 * if not then there is nothing to expire.
  		 */
3dfa5721f   Christoph Lameter   Page allocator: g...
456
  		if (!p->expire || !p->pcp.count)
4037d4522   Christoph Lameter   Move remote node ...
457
458
459
460
461
462
463
464
465
466
467
468
469
  			continue;
  
  		/*
  		 * We never drain zones local to this processor.
  		 */
  		if (zone_to_nid(zone) == numa_node_id()) {
  			p->expire = 0;
  			continue;
  		}
  
  		p->expire--;
  		if (p->expire)
  			continue;
3dfa5721f   Christoph Lameter   Page allocator: g...
470
471
  		if (p->pcp.count)
  			drain_zone_pages(zone, &p->pcp);
4037d4522   Christoph Lameter   Move remote node ...
472
  #endif
2244b95a7   Christoph Lameter   [PATCH] zoned vm ...
473
  	}
a7f75e258   Christoph Lameter   vmstat: small rev...
474
475
476
477
  
  	for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
  		if (global_diff[i])
  			atomic_long_add(global_diff[i], &vm_stat[i]);
2244b95a7   Christoph Lameter   [PATCH] zoned vm ...
478
  }
2244b95a7   Christoph Lameter   [PATCH] zoned vm ...
479
  #endif
ca889e6c4   Christoph Lameter   [PATCH] Use Zoned...
480
481
482
483
484
485
  #ifdef CONFIG_NUMA
  /*
   * zonelist = the list of zones passed to the allocator
   * z 	    = the zone from which the allocation occurred.
   *
   * Must be called with interrupts disabled.
78afd5612   Andi Kleen   mm: add __GFP_OTH...
486
487
488
489
   *
   * When __GFP_OTHER_NODE is set assume the node of the preferred
   * zone is the local node. This is useful for daemons who allocate
   * memory on behalf of other processes.
ca889e6c4   Christoph Lameter   [PATCH] Use Zoned...
490
   */
78afd5612   Andi Kleen   mm: add __GFP_OTH...
491
  void zone_statistics(struct zone *preferred_zone, struct zone *z, gfp_t flags)
ca889e6c4   Christoph Lameter   [PATCH] Use Zoned...
492
  {
18ea7e710   Mel Gorman   mm: remember what...
493
  	if (z->zone_pgdat == preferred_zone->zone_pgdat) {
ca889e6c4   Christoph Lameter   [PATCH] Use Zoned...
494
495
496
  		__inc_zone_state(z, NUMA_HIT);
  	} else {
  		__inc_zone_state(z, NUMA_MISS);
18ea7e710   Mel Gorman   mm: remember what...
497
  		__inc_zone_state(preferred_zone, NUMA_FOREIGN);
ca889e6c4   Christoph Lameter   [PATCH] Use Zoned...
498
  	}
78afd5612   Andi Kleen   mm: add __GFP_OTH...
499
500
  	if (z->node == ((flags & __GFP_OTHER_NODE) ?
  			preferred_zone->node : numa_node_id()))
ca889e6c4   Christoph Lameter   [PATCH] Use Zoned...
501
502
503
504
505
  		__inc_zone_state(z, NUMA_LOCAL);
  	else
  		__inc_zone_state(z, NUMA_OTHER);
  }
  #endif
d7a5752c0   Mel Gorman   mm: export unusab...
506
  #ifdef CONFIG_COMPACTION
36deb0be3   Namhyung Kim   vmstat: include c...
507

d7a5752c0   Mel Gorman   mm: export unusab...
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
  struct contig_page_info {
  	unsigned long free_pages;
  	unsigned long free_blocks_total;
  	unsigned long free_blocks_suitable;
  };
  
  /*
   * Calculate the number of free pages in a zone, how many contiguous
   * pages are free and how many are large enough to satisfy an allocation of
   * the target size. Note that this function makes no attempt to estimate
   * how many suitable free blocks there *might* be if MOVABLE pages were
   * migrated. Calculating that is possible, but expensive and can be
   * figured out from userspace
   */
  static void fill_contig_page_info(struct zone *zone,
  				unsigned int suitable_order,
  				struct contig_page_info *info)
  {
  	unsigned int order;
  
  	info->free_pages = 0;
  	info->free_blocks_total = 0;
  	info->free_blocks_suitable = 0;
  
  	for (order = 0; order < MAX_ORDER; order++) {
  		unsigned long blocks;
  
  		/* Count number of free blocks */
  		blocks = zone->free_area[order].nr_free;
  		info->free_blocks_total += blocks;
  
  		/* Count free base pages */
  		info->free_pages += blocks << order;
  
  		/* Count the suitable free blocks */
  		if (order >= suitable_order)
  			info->free_blocks_suitable += blocks <<
  						(order - suitable_order);
  	}
  }
f1a5ab121   Mel Gorman   mm: export fragme...
548
549
550
551
552
553
554
555
  
  /*
   * A fragmentation index only makes sense if an allocation of a requested
   * size would fail. If that is true, the fragmentation index indicates
   * whether external fragmentation or a lack of memory was the problem.
   * The value can be used to determine if page reclaim or compaction
   * should be used
   */
56de7263f   Mel Gorman   mm: compaction: d...
556
  static int __fragmentation_index(unsigned int order, struct contig_page_info *info)
f1a5ab121   Mel Gorman   mm: export fragme...
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
  {
  	unsigned long requested = 1UL << order;
  
  	if (!info->free_blocks_total)
  		return 0;
  
  	/* Fragmentation index only makes sense when a request would fail */
  	if (info->free_blocks_suitable)
  		return -1000;
  
  	/*
  	 * Index is between 0 and 1 so return within 3 decimal places
  	 *
  	 * 0 => allocation would fail due to lack of memory
  	 * 1 => allocation would fail due to fragmentation
  	 */
  	return 1000 - div_u64( (1000+(div_u64(info->free_pages * 1000ULL, requested))), info->free_blocks_total);
  }
56de7263f   Mel Gorman   mm: compaction: d...
575
576
577
578
579
580
581
582
583
  
  /* Same as __fragmentation index but allocs contig_page_info on stack */
  int fragmentation_index(struct zone *zone, unsigned int order)
  {
  	struct contig_page_info info;
  
  	fill_contig_page_info(zone, order, &info);
  	return __fragmentation_index(order, &info);
  }
d7a5752c0   Mel Gorman   mm: export unusab...
584
585
586
  #endif
  
  #if defined(CONFIG_PROC_FS) || defined(CONFIG_COMPACTION)
8f32f7e5a   Alexey Dobriyan   proc: move /proc/...
587
  #include <linux/proc_fs.h>
f6ac2354d   Christoph Lameter   [PATCH] zoned vm ...
588
  #include <linux/seq_file.h>
467c996c1   Mel Gorman   Print out statist...
589
590
591
592
593
  static char * const migratetype_names[MIGRATE_TYPES] = {
  	"Unmovable",
  	"Reclaimable",
  	"Movable",
  	"Reserve",
91446b064   KOSAKI Motohiro   add "Isolate" mig...
594
  	"Isolate",
467c996c1   Mel Gorman   Print out statist...
595
  };
f6ac2354d   Christoph Lameter   [PATCH] zoned vm ...
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
  static void *frag_start(struct seq_file *m, loff_t *pos)
  {
  	pg_data_t *pgdat;
  	loff_t node = *pos;
  	for (pgdat = first_online_pgdat();
  	     pgdat && node;
  	     pgdat = next_online_pgdat(pgdat))
  		--node;
  
  	return pgdat;
  }
  
  static void *frag_next(struct seq_file *m, void *arg, loff_t *pos)
  {
  	pg_data_t *pgdat = (pg_data_t *)arg;
  
  	(*pos)++;
  	return next_online_pgdat(pgdat);
  }
  
  static void frag_stop(struct seq_file *m, void *arg)
  {
  }
467c996c1   Mel Gorman   Print out statist...
619
620
621
  /* Walk all the zones in a node and print using a callback */
  static void walk_zones_in_node(struct seq_file *m, pg_data_t *pgdat,
  		void (*print)(struct seq_file *m, pg_data_t *, struct zone *))
f6ac2354d   Christoph Lameter   [PATCH] zoned vm ...
622
  {
f6ac2354d   Christoph Lameter   [PATCH] zoned vm ...
623
624
625
  	struct zone *zone;
  	struct zone *node_zones = pgdat->node_zones;
  	unsigned long flags;
f6ac2354d   Christoph Lameter   [PATCH] zoned vm ...
626
627
628
629
630
631
  
  	for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
  		if (!populated_zone(zone))
  			continue;
  
  		spin_lock_irqsave(&zone->lock, flags);
467c996c1   Mel Gorman   Print out statist...
632
  		print(m, pgdat, zone);
f6ac2354d   Christoph Lameter   [PATCH] zoned vm ...
633
  		spin_unlock_irqrestore(&zone->lock, flags);
467c996c1   Mel Gorman   Print out statist...
634
635
  	}
  }
d7a5752c0   Mel Gorman   mm: export unusab...
636
  #endif
467c996c1   Mel Gorman   Print out statist...
637

0d6617c77   David Rientjes   numa: fix NUMA co...
638
  #if defined(CONFIG_PROC_FS) || defined(CONFIG_SYSFS) || defined(CONFIG_NUMA)
fa25c503d   KOSAKI Motohiro   mm: per-node vmst...
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
  #ifdef CONFIG_ZONE_DMA
  #define TEXT_FOR_DMA(xx) xx "_dma",
  #else
  #define TEXT_FOR_DMA(xx)
  #endif
  
  #ifdef CONFIG_ZONE_DMA32
  #define TEXT_FOR_DMA32(xx) xx "_dma32",
  #else
  #define TEXT_FOR_DMA32(xx)
  #endif
  
  #ifdef CONFIG_HIGHMEM
  #define TEXT_FOR_HIGHMEM(xx) xx "_high",
  #else
  #define TEXT_FOR_HIGHMEM(xx)
  #endif
  
  #define TEXTS_FOR_ZONES(xx) TEXT_FOR_DMA(xx) TEXT_FOR_DMA32(xx) xx "_normal", \
  					TEXT_FOR_HIGHMEM(xx) xx "_movable",
  
  const char * const vmstat_text[] = {
  	/* Zoned VM counters */
  	"nr_free_pages",
  	"nr_inactive_anon",
  	"nr_active_anon",
  	"nr_inactive_file",
  	"nr_active_file",
  	"nr_unevictable",
  	"nr_mlock",
  	"nr_anon_pages",
  	"nr_mapped",
  	"nr_file_pages",
  	"nr_dirty",
  	"nr_writeback",
  	"nr_slab_reclaimable",
  	"nr_slab_unreclaimable",
  	"nr_page_table_pages",
  	"nr_kernel_stack",
  	"nr_unstable",
  	"nr_bounce",
  	"nr_vmscan_write",
49ea7eb65   Mel Gorman   mm: vmscan: immed...
681
  	"nr_vmscan_immediate_reclaim",
fa25c503d   KOSAKI Motohiro   mm: per-node vmst...
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
  	"nr_writeback_temp",
  	"nr_isolated_anon",
  	"nr_isolated_file",
  	"nr_shmem",
  	"nr_dirtied",
  	"nr_written",
  
  #ifdef CONFIG_NUMA
  	"numa_hit",
  	"numa_miss",
  	"numa_foreign",
  	"numa_interleave",
  	"numa_local",
  	"numa_other",
  #endif
  	"nr_anon_transparent_hugepages",
  	"nr_dirty_threshold",
  	"nr_dirty_background_threshold",
  
  #ifdef CONFIG_VM_EVENT_COUNTERS
  	"pgpgin",
  	"pgpgout",
  	"pswpin",
  	"pswpout",
  
  	TEXTS_FOR_ZONES("pgalloc")
  
  	"pgfree",
  	"pgactivate",
  	"pgdeactivate",
  
  	"pgfault",
  	"pgmajfault",
  
  	TEXTS_FOR_ZONES("pgrefill")
  	TEXTS_FOR_ZONES("pgsteal")
  	TEXTS_FOR_ZONES("pgscan_kswapd")
  	TEXTS_FOR_ZONES("pgscan_direct")
  
  #ifdef CONFIG_NUMA
  	"zone_reclaim_failed",
  #endif
  	"pginodesteal",
  	"slabs_scanned",
  	"kswapd_steal",
  	"kswapd_inodesteal",
  	"kswapd_low_wmark_hit_quickly",
  	"kswapd_high_wmark_hit_quickly",
  	"kswapd_skip_congestion_wait",
  	"pageoutrun",
  	"allocstall",
  
  	"pgrotated",
  
  #ifdef CONFIG_COMPACTION
  	"compact_blocks_moved",
  	"compact_pages_moved",
  	"compact_pagemigrate_failed",
  	"compact_stall",
  	"compact_fail",
  	"compact_success",
  #endif
  
  #ifdef CONFIG_HUGETLB_PAGE
  	"htlb_buddy_alloc_success",
  	"htlb_buddy_alloc_fail",
  #endif
  	"unevictable_pgs_culled",
  	"unevictable_pgs_scanned",
  	"unevictable_pgs_rescued",
  	"unevictable_pgs_mlocked",
  	"unevictable_pgs_munlocked",
  	"unevictable_pgs_cleared",
  	"unevictable_pgs_stranded",
  	"unevictable_pgs_mlockfreed",
  
  #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  	"thp_fault_alloc",
  	"thp_fault_fallback",
  	"thp_collapse_alloc",
  	"thp_collapse_alloc_failed",
  	"thp_split",
  #endif
  
  #endif /* CONFIG_VM_EVENTS_COUNTERS */
  };
0d6617c77   David Rientjes   numa: fix NUMA co...
768
  #endif /* CONFIG_PROC_FS || CONFIG_SYSFS || CONFIG_NUMA */
fa25c503d   KOSAKI Motohiro   mm: per-node vmst...
769

d7a5752c0   Mel Gorman   mm: export unusab...
770
  #ifdef CONFIG_PROC_FS
467c996c1   Mel Gorman   Print out statist...
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
  static void frag_show_print(struct seq_file *m, pg_data_t *pgdat,
  						struct zone *zone)
  {
  	int order;
  
  	seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
  	for (order = 0; order < MAX_ORDER; ++order)
  		seq_printf(m, "%6lu ", zone->free_area[order].nr_free);
  	seq_putc(m, '
  ');
  }
  
  /*
   * This walks the free areas for each zone.
   */
  static int frag_show(struct seq_file *m, void *arg)
  {
  	pg_data_t *pgdat = (pg_data_t *)arg;
  	walk_zones_in_node(m, pgdat, frag_show_print);
  	return 0;
  }
  
  static void pagetypeinfo_showfree_print(struct seq_file *m,
  					pg_data_t *pgdat, struct zone *zone)
  {
  	int order, mtype;
  
  	for (mtype = 0; mtype < MIGRATE_TYPES; mtype++) {
  		seq_printf(m, "Node %4d, zone %8s, type %12s ",
  					pgdat->node_id,
  					zone->name,
  					migratetype_names[mtype]);
  		for (order = 0; order < MAX_ORDER; ++order) {
  			unsigned long freecount = 0;
  			struct free_area *area;
  			struct list_head *curr;
  
  			area = &(zone->free_area[order]);
  
  			list_for_each(curr, &area->free_list[mtype])
  				freecount++;
  			seq_printf(m, "%6lu ", freecount);
  		}
f6ac2354d   Christoph Lameter   [PATCH] zoned vm ...
814
815
816
  		seq_putc(m, '
  ');
  	}
467c996c1   Mel Gorman   Print out statist...
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
  }
  
  /* Print out the free pages at each order for each migatetype */
  static int pagetypeinfo_showfree(struct seq_file *m, void *arg)
  {
  	int order;
  	pg_data_t *pgdat = (pg_data_t *)arg;
  
  	/* Print header */
  	seq_printf(m, "%-43s ", "Free pages count per migrate type at order");
  	for (order = 0; order < MAX_ORDER; ++order)
  		seq_printf(m, "%6d ", order);
  	seq_putc(m, '
  ');
  
  	walk_zones_in_node(m, pgdat, pagetypeinfo_showfree_print);
  
  	return 0;
  }
  
  static void pagetypeinfo_showblockcount_print(struct seq_file *m,
  					pg_data_t *pgdat, struct zone *zone)
  {
  	int mtype;
  	unsigned long pfn;
  	unsigned long start_pfn = zone->zone_start_pfn;
  	unsigned long end_pfn = start_pfn + zone->spanned_pages;
  	unsigned long count[MIGRATE_TYPES] = { 0, };
  
  	for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
  		struct page *page;
  
  		if (!pfn_valid(pfn))
  			continue;
  
  		page = pfn_to_page(pfn);
eb33575cf   Mel Gorman   [ARM] Double chec...
853
854
855
  
  		/* Watch for unexpected holes punched in the memmap */
  		if (!memmap_valid_within(pfn, page, zone))
e80d6a248   Mel Gorman   [ARM] Skip memory...
856
  			continue;
eb33575cf   Mel Gorman   [ARM] Double chec...
857

467c996c1   Mel Gorman   Print out statist...
858
  		mtype = get_pageblock_migratetype(page);
e80d6a248   Mel Gorman   [ARM] Skip memory...
859
860
  		if (mtype < MIGRATE_TYPES)
  			count[mtype]++;
467c996c1   Mel Gorman   Print out statist...
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
  	}
  
  	/* Print counts */
  	seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
  	for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
  		seq_printf(m, "%12lu ", count[mtype]);
  	seq_putc(m, '
  ');
  }
  
  /* Print out the free pages at each order for each migratetype */
  static int pagetypeinfo_showblockcount(struct seq_file *m, void *arg)
  {
  	int mtype;
  	pg_data_t *pgdat = (pg_data_t *)arg;
  
  	seq_printf(m, "
  %-23s", "Number of blocks type ");
  	for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
  		seq_printf(m, "%12s ", migratetype_names[mtype]);
  	seq_putc(m, '
  ');
  	walk_zones_in_node(m, pgdat, pagetypeinfo_showblockcount_print);
  
  	return 0;
  }
  
  /*
   * This prints out statistics in relation to grouping pages by mobility.
   * It is expensive to collect so do not constantly read the file.
   */
  static int pagetypeinfo_show(struct seq_file *m, void *arg)
  {
  	pg_data_t *pgdat = (pg_data_t *)arg;
41b25a378   KOSAKI Motohiro   /proc/pagetypeinf...
895
896
897
  	/* check memoryless node */
  	if (!node_state(pgdat->node_id, N_HIGH_MEMORY))
  		return 0;
467c996c1   Mel Gorman   Print out statist...
898
899
900
901
902
903
904
905
  	seq_printf(m, "Page block order: %d
  ", pageblock_order);
  	seq_printf(m, "Pages per block:  %lu
  ", pageblock_nr_pages);
  	seq_putc(m, '
  ');
  	pagetypeinfo_showfree(m, pgdat);
  	pagetypeinfo_showblockcount(m, pgdat);
f6ac2354d   Christoph Lameter   [PATCH] zoned vm ...
906
907
  	return 0;
  }
8f32f7e5a   Alexey Dobriyan   proc: move /proc/...
908
  static const struct seq_operations fragmentation_op = {
f6ac2354d   Christoph Lameter   [PATCH] zoned vm ...
909
910
911
912
913
  	.start	= frag_start,
  	.next	= frag_next,
  	.stop	= frag_stop,
  	.show	= frag_show,
  };
8f32f7e5a   Alexey Dobriyan   proc: move /proc/...
914
915
916
917
918
919
920
921
922
923
924
  static int fragmentation_open(struct inode *inode, struct file *file)
  {
  	return seq_open(file, &fragmentation_op);
  }
  
  static const struct file_operations fragmentation_file_operations = {
  	.open		= fragmentation_open,
  	.read		= seq_read,
  	.llseek		= seq_lseek,
  	.release	= seq_release,
  };
74e2e8e8c   Alexey Dobriyan   proc: move /proc/...
925
  static const struct seq_operations pagetypeinfo_op = {
467c996c1   Mel Gorman   Print out statist...
926
927
928
929
930
  	.start	= frag_start,
  	.next	= frag_next,
  	.stop	= frag_stop,
  	.show	= pagetypeinfo_show,
  };
74e2e8e8c   Alexey Dobriyan   proc: move /proc/...
931
932
933
934
935
936
937
938
939
940
941
  static int pagetypeinfo_open(struct inode *inode, struct file *file)
  {
  	return seq_open(file, &pagetypeinfo_op);
  }
  
  static const struct file_operations pagetypeinfo_file_ops = {
  	.open		= pagetypeinfo_open,
  	.read		= seq_read,
  	.llseek		= seq_lseek,
  	.release	= seq_release,
  };
467c996c1   Mel Gorman   Print out statist...
942
943
  static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
  							struct zone *zone)
f6ac2354d   Christoph Lameter   [PATCH] zoned vm ...
944
  {
467c996c1   Mel Gorman   Print out statist...
945
946
947
948
949
950
951
952
953
954
955
  	int i;
  	seq_printf(m, "Node %d, zone %8s", pgdat->node_id, zone->name);
  	seq_printf(m,
  		   "
    pages free     %lu"
  		   "
          min      %lu"
  		   "
          low      %lu"
  		   "
          high     %lu"
08d9ae7cb   Wu Fengguang   vmscan: don't exp...
956
957
  		   "
          scanned  %lu"
467c996c1   Mel Gorman   Print out statist...
958
959
960
961
  		   "
          spanned  %lu"
  		   "
          present  %lu",
88f5acf88   Mel Gorman   mm: page allocato...
962
  		   zone_page_state(zone, NR_FREE_PAGES),
418589663   Mel Gorman   page allocator: u...
963
964
965
  		   min_wmark_pages(zone),
  		   low_wmark_pages(zone),
  		   high_wmark_pages(zone),
467c996c1   Mel Gorman   Print out statist...
966
  		   zone->pages_scanned,
467c996c1   Mel Gorman   Print out statist...
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
  		   zone->spanned_pages,
  		   zone->present_pages);
  
  	for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
  		seq_printf(m, "
      %-12s %lu", vmstat_text[i],
  				zone_page_state(zone, i));
  
  	seq_printf(m,
  		   "
          protection: (%lu",
  		   zone->lowmem_reserve[0]);
  	for (i = 1; i < ARRAY_SIZE(zone->lowmem_reserve); i++)
  		seq_printf(m, ", %lu", zone->lowmem_reserve[i]);
  	seq_printf(m,
  		   ")"
  		   "
    pagesets");
  	for_each_online_cpu(i) {
  		struct per_cpu_pageset *pageset;
467c996c1   Mel Gorman   Print out statist...
987

99dcc3e5a   Christoph Lameter   this_cpu: Page al...
988
  		pageset = per_cpu_ptr(zone->pageset, i);
3dfa5721f   Christoph Lameter   Page allocator: g...
989
990
991
992
993
994
995
996
997
998
999
1000
1001
  		seq_printf(m,
  			   "
      cpu: %i"
  			   "
                count: %i"
  			   "
                high:  %i"
  			   "
                batch: %i",
  			   i,
  			   pageset->pcp.count,
  			   pageset->pcp.high,
  			   pageset->pcp.batch);
df9ecaba3   Christoph Lameter   [PATCH] ZVC: Scal...
1002
  #ifdef CONFIG_SMP
467c996c1   Mel Gorman   Print out statist...
1003
1004
1005
  		seq_printf(m, "
    vm stats threshold: %d",
  				pageset->stat_threshold);
df9ecaba3   Christoph Lameter   [PATCH] ZVC: Scal...
1006
  #endif
f6ac2354d   Christoph Lameter   [PATCH] zoned vm ...
1007
  	}
467c996c1   Mel Gorman   Print out statist...
1008
1009
1010
  	seq_printf(m,
  		   "
    all_unreclaimable: %u"
556adecba   Rik van Riel   vmscan: second ch...
1011
1012
1013
1014
  		   "
    start_pfn:         %lu"
  		   "
    inactive_ratio:    %u",
93e4a89a8   KOSAKI Motohiro   mm: restore zone-...
1015
  		   zone->all_unreclaimable,
556adecba   Rik van Riel   vmscan: second ch...
1016
1017
  		   zone->zone_start_pfn,
  		   zone->inactive_ratio);
467c996c1   Mel Gorman   Print out statist...
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
  	seq_putc(m, '
  ');
  }
  
  /*
   * Output information about zones in @pgdat.
   */
  static int zoneinfo_show(struct seq_file *m, void *arg)
  {
  	pg_data_t *pgdat = (pg_data_t *)arg;
  	walk_zones_in_node(m, pgdat, zoneinfo_show_print);
f6ac2354d   Christoph Lameter   [PATCH] zoned vm ...
1029
1030
  	return 0;
  }
5c9fe6281   Alexey Dobriyan   proc: move /proc/...
1031
  static const struct seq_operations zoneinfo_op = {
f6ac2354d   Christoph Lameter   [PATCH] zoned vm ...
1032
1033
1034
1035
1036
1037
  	.start	= frag_start, /* iterate over all zones. The same as in
  			       * fragmentation. */
  	.next	= frag_next,
  	.stop	= frag_stop,
  	.show	= zoneinfo_show,
  };
5c9fe6281   Alexey Dobriyan   proc: move /proc/...
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
  static int zoneinfo_open(struct inode *inode, struct file *file)
  {
  	return seq_open(file, &zoneinfo_op);
  }
  
  static const struct file_operations proc_zoneinfo_file_operations = {
  	.open		= zoneinfo_open,
  	.read		= seq_read,
  	.llseek		= seq_lseek,
  	.release	= seq_release,
  };
79da826ae   Michael Rubin   writeback: report...
1049
1050
1051
1052
1053
  enum writeback_stat_item {
  	NR_DIRTY_THRESHOLD,
  	NR_DIRTY_BG_THRESHOLD,
  	NR_VM_WRITEBACK_STAT_ITEMS,
  };
f6ac2354d   Christoph Lameter   [PATCH] zoned vm ...
1054
1055
  static void *vmstat_start(struct seq_file *m, loff_t *pos)
  {
2244b95a7   Christoph Lameter   [PATCH] zoned vm ...
1056
  	unsigned long *v;
79da826ae   Michael Rubin   writeback: report...
1057
  	int i, stat_items_size;
f6ac2354d   Christoph Lameter   [PATCH] zoned vm ...
1058
1059
1060
  
  	if (*pos >= ARRAY_SIZE(vmstat_text))
  		return NULL;
79da826ae   Michael Rubin   writeback: report...
1061
1062
  	stat_items_size = NR_VM_ZONE_STAT_ITEMS * sizeof(unsigned long) +
  			  NR_VM_WRITEBACK_STAT_ITEMS * sizeof(unsigned long);
f6ac2354d   Christoph Lameter   [PATCH] zoned vm ...
1063

f8891e5e1   Christoph Lameter   [PATCH] Light wei...
1064
  #ifdef CONFIG_VM_EVENT_COUNTERS
79da826ae   Michael Rubin   writeback: report...
1065
  	stat_items_size += sizeof(struct vm_event_state);
f8891e5e1   Christoph Lameter   [PATCH] Light wei...
1066
  #endif
79da826ae   Michael Rubin   writeback: report...
1067
1068
  
  	v = kmalloc(stat_items_size, GFP_KERNEL);
2244b95a7   Christoph Lameter   [PATCH] zoned vm ...
1069
1070
  	m->private = v;
  	if (!v)
f6ac2354d   Christoph Lameter   [PATCH] zoned vm ...
1071
  		return ERR_PTR(-ENOMEM);
2244b95a7   Christoph Lameter   [PATCH] zoned vm ...
1072
1073
  	for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
  		v[i] = global_page_state(i);
79da826ae   Michael Rubin   writeback: report...
1074
1075
1076
1077
1078
  	v += NR_VM_ZONE_STAT_ITEMS;
  
  	global_dirty_limits(v + NR_DIRTY_BG_THRESHOLD,
  			    v + NR_DIRTY_THRESHOLD);
  	v += NR_VM_WRITEBACK_STAT_ITEMS;
f8891e5e1   Christoph Lameter   [PATCH] Light wei...
1079
  #ifdef CONFIG_VM_EVENT_COUNTERS
79da826ae   Michael Rubin   writeback: report...
1080
1081
1082
  	all_vm_events(v);
  	v[PGPGIN] /= 2;		/* sectors -> kbytes */
  	v[PGPGOUT] /= 2;
f8891e5e1   Christoph Lameter   [PATCH] Light wei...
1083
  #endif
ff8b16d7e   Wu Fengguang   vmstat: fix offse...
1084
  	return (unsigned long *)m->private + *pos;
f6ac2354d   Christoph Lameter   [PATCH] zoned vm ...
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
  }
  
  static void *vmstat_next(struct seq_file *m, void *arg, loff_t *pos)
  {
  	(*pos)++;
  	if (*pos >= ARRAY_SIZE(vmstat_text))
  		return NULL;
  	return (unsigned long *)m->private + *pos;
  }
  
  static int vmstat_show(struct seq_file *m, void *arg)
  {
  	unsigned long *l = arg;
  	unsigned long off = l - (unsigned long *)m->private;
  
  	seq_printf(m, "%s %lu
  ", vmstat_text[off], *l);
  	return 0;
  }
  
  static void vmstat_stop(struct seq_file *m, void *arg)
  {
  	kfree(m->private);
  	m->private = NULL;
  }
b6aa44ab6   Alexey Dobriyan   proc: move /proc/...
1110
  static const struct seq_operations vmstat_op = {
f6ac2354d   Christoph Lameter   [PATCH] zoned vm ...
1111
1112
1113
1114
1115
  	.start	= vmstat_start,
  	.next	= vmstat_next,
  	.stop	= vmstat_stop,
  	.show	= vmstat_show,
  };
b6aa44ab6   Alexey Dobriyan   proc: move /proc/...
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
  static int vmstat_open(struct inode *inode, struct file *file)
  {
  	return seq_open(file, &vmstat_op);
  }
  
  static const struct file_operations proc_vmstat_file_operations = {
  	.open		= vmstat_open,
  	.read		= seq_read,
  	.llseek		= seq_lseek,
  	.release	= seq_release,
  };
f6ac2354d   Christoph Lameter   [PATCH] zoned vm ...
1127
  #endif /* CONFIG_PROC_FS */
df9ecaba3   Christoph Lameter   [PATCH] ZVC: Scal...
1128
  #ifdef CONFIG_SMP
d1187ed21   Christoph Lameter   vmstat: use our o...
1129
  static DEFINE_PER_CPU(struct delayed_work, vmstat_work);
77461ab33   Christoph Lameter   Make vm statistic...
1130
  int sysctl_stat_interval __read_mostly = HZ;
d1187ed21   Christoph Lameter   vmstat: use our o...
1131
1132
1133
1134
  
  static void vmstat_update(struct work_struct *w)
  {
  	refresh_cpu_vm_stats(smp_processor_id());
77461ab33   Christoph Lameter   Make vm statistic...
1135
  	schedule_delayed_work(&__get_cpu_var(vmstat_work),
98f4ebb29   Anton Blanchard   mm: align vmstat_...
1136
  		round_jiffies_relative(sysctl_stat_interval));
d1187ed21   Christoph Lameter   vmstat: use our o...
1137
  }
42614fcde   Randy Dunlap   vmstat: fix secti...
1138
  static void __cpuinit start_cpu_timer(int cpu)
d1187ed21   Christoph Lameter   vmstat: use our o...
1139
  {
1871e52c7   Tejun Heo   percpu: make perc...
1140
  	struct delayed_work *work = &per_cpu(vmstat_work, cpu);
d1187ed21   Christoph Lameter   vmstat: use our o...
1141

1871e52c7   Tejun Heo   percpu: make perc...
1142
1143
  	INIT_DELAYED_WORK_DEFERRABLE(work, vmstat_update);
  	schedule_delayed_work_on(cpu, work, __round_jiffies_relative(HZ, cpu));
d1187ed21   Christoph Lameter   vmstat: use our o...
1144
  }
df9ecaba3   Christoph Lameter   [PATCH] ZVC: Scal...
1145
1146
1147
1148
1149
1150
1151
1152
  /*
   * Use the cpu notifier to insure that the thresholds are recalculated
   * when necessary.
   */
  static int __cpuinit vmstat_cpuup_callback(struct notifier_block *nfb,
  		unsigned long action,
  		void *hcpu)
  {
d1187ed21   Christoph Lameter   vmstat: use our o...
1153
  	long cpu = (long)hcpu;
df9ecaba3   Christoph Lameter   [PATCH] ZVC: Scal...
1154
  	switch (action) {
d1187ed21   Christoph Lameter   vmstat: use our o...
1155
1156
  	case CPU_ONLINE:
  	case CPU_ONLINE_FROZEN:
5ee28a447   KAMEZAWA Hiroyuki   vmstat: update zo...
1157
  		refresh_zone_stat_thresholds();
d1187ed21   Christoph Lameter   vmstat: use our o...
1158
  		start_cpu_timer(cpu);
ad596925e   Christoph Lameter   this_cpu: Remove ...
1159
  		node_set_state(cpu_to_node(cpu), N_CPU);
d1187ed21   Christoph Lameter   vmstat: use our o...
1160
1161
1162
  		break;
  	case CPU_DOWN_PREPARE:
  	case CPU_DOWN_PREPARE_FROZEN:
afe2c511f   Tejun Heo   workqueue: conver...
1163
  		cancel_delayed_work_sync(&per_cpu(vmstat_work, cpu));
d1187ed21   Christoph Lameter   vmstat: use our o...
1164
1165
1166
1167
1168
1169
  		per_cpu(vmstat_work, cpu).work.func = NULL;
  		break;
  	case CPU_DOWN_FAILED:
  	case CPU_DOWN_FAILED_FROZEN:
  		start_cpu_timer(cpu);
  		break;
ce421c799   Andy Whitcroft   [PATCH] mm: clean...
1170
  	case CPU_DEAD:
8bb784428   Rafael J. Wysocki   Add suspend-relat...
1171
  	case CPU_DEAD_FROZEN:
ce421c799   Andy Whitcroft   [PATCH] mm: clean...
1172
1173
1174
1175
  		refresh_zone_stat_thresholds();
  		break;
  	default:
  		break;
df9ecaba3   Christoph Lameter   [PATCH] ZVC: Scal...
1176
1177
1178
1179
1180
1181
  	}
  	return NOTIFY_OK;
  }
  
  static struct notifier_block __cpuinitdata vmstat_notifier =
  	{ &vmstat_cpuup_callback, NULL, 0 };
8f32f7e5a   Alexey Dobriyan   proc: move /proc/...
1182
  #endif
df9ecaba3   Christoph Lameter   [PATCH] ZVC: Scal...
1183

e2fc88d06   Adrian Bunk   mm/vmstat.c: clea...
1184
  static int __init setup_vmstat(void)
df9ecaba3   Christoph Lameter   [PATCH] ZVC: Scal...
1185
  {
8f32f7e5a   Alexey Dobriyan   proc: move /proc/...
1186
  #ifdef CONFIG_SMP
d1187ed21   Christoph Lameter   vmstat: use our o...
1187
  	int cpu;
df9ecaba3   Christoph Lameter   [PATCH] ZVC: Scal...
1188
  	register_cpu_notifier(&vmstat_notifier);
d1187ed21   Christoph Lameter   vmstat: use our o...
1189
1190
1191
  
  	for_each_online_cpu(cpu)
  		start_cpu_timer(cpu);
8f32f7e5a   Alexey Dobriyan   proc: move /proc/...
1192
1193
1194
  #endif
  #ifdef CONFIG_PROC_FS
  	proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
74e2e8e8c   Alexey Dobriyan   proc: move /proc/...
1195
  	proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
b6aa44ab6   Alexey Dobriyan   proc: move /proc/...
1196
  	proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
5c9fe6281   Alexey Dobriyan   proc: move /proc/...
1197
  	proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
8f32f7e5a   Alexey Dobriyan   proc: move /proc/...
1198
  #endif
df9ecaba3   Christoph Lameter   [PATCH] ZVC: Scal...
1199
1200
1201
  	return 0;
  }
  module_init(setup_vmstat)
d7a5752c0   Mel Gorman   mm: export unusab...
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
  
  #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMPACTION)
  #include <linux/debugfs.h>
  
  static struct dentry *extfrag_debug_root;
  
  /*
   * Return an index indicating how much of the available free memory is
   * unusable for an allocation of the requested size.
   */
  static int unusable_free_index(unsigned int order,
  				struct contig_page_info *info)
  {
  	/* No free memory is interpreted as all free memory is unusable */
  	if (info->free_pages == 0)
  		return 1000;
  
  	/*
  	 * Index should be a value between 0 and 1. Return a value to 3
  	 * decimal places.
  	 *
  	 * 0 => no fragmentation
  	 * 1 => high fragmentation
  	 */
  	return div_u64((info->free_pages - (info->free_blocks_suitable << order)) * 1000ULL, info->free_pages);
  
  }
  
  static void unusable_show_print(struct seq_file *m,
  					pg_data_t *pgdat, struct zone *zone)
  {
  	unsigned int order;
  	int index;
  	struct contig_page_info info;
  
  	seq_printf(m, "Node %d, zone %8s ",
  				pgdat->node_id,
  				zone->name);
  	for (order = 0; order < MAX_ORDER; ++order) {
  		fill_contig_page_info(zone, order, &info);
  		index = unusable_free_index(order, &info);
  		seq_printf(m, "%d.%03d ", index / 1000, index % 1000);
  	}
  
  	seq_putc(m, '
  ');
  }
  
  /*
   * Display unusable free space index
   *
   * The unusable free space index measures how much of the available free
   * memory cannot be used to satisfy an allocation of a given size and is a
   * value between 0 and 1. The higher the value, the more of free memory is
   * unusable and by implication, the worse the external fragmentation is. This
   * can be expressed as a percentage by multiplying by 100.
   */
  static int unusable_show(struct seq_file *m, void *arg)
  {
  	pg_data_t *pgdat = (pg_data_t *)arg;
  
  	/* check memoryless node */
  	if (!node_state(pgdat->node_id, N_HIGH_MEMORY))
  		return 0;
  
  	walk_zones_in_node(m, pgdat, unusable_show_print);
  
  	return 0;
  }
  
  static const struct seq_operations unusable_op = {
  	.start	= frag_start,
  	.next	= frag_next,
  	.stop	= frag_stop,
  	.show	= unusable_show,
  };
  
  static int unusable_open(struct inode *inode, struct file *file)
  {
  	return seq_open(file, &unusable_op);
  }
  
  static const struct file_operations unusable_file_ops = {
  	.open		= unusable_open,
  	.read		= seq_read,
  	.llseek		= seq_lseek,
  	.release	= seq_release,
  };
f1a5ab121   Mel Gorman   mm: export fragme...
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
  static void extfrag_show_print(struct seq_file *m,
  					pg_data_t *pgdat, struct zone *zone)
  {
  	unsigned int order;
  	int index;
  
  	/* Alloc on stack as interrupts are disabled for zone walk */
  	struct contig_page_info info;
  
  	seq_printf(m, "Node %d, zone %8s ",
  				pgdat->node_id,
  				zone->name);
  	for (order = 0; order < MAX_ORDER; ++order) {
  		fill_contig_page_info(zone, order, &info);
56de7263f   Mel Gorman   mm: compaction: d...
1304
  		index = __fragmentation_index(order, &info);
f1a5ab121   Mel Gorman   mm: export fragme...
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
  		seq_printf(m, "%d.%03d ", index / 1000, index % 1000);
  	}
  
  	seq_putc(m, '
  ');
  }
  
  /*
   * Display fragmentation index for orders that allocations would fail for
   */
  static int extfrag_show(struct seq_file *m, void *arg)
  {
  	pg_data_t *pgdat = (pg_data_t *)arg;
  
  	walk_zones_in_node(m, pgdat, extfrag_show_print);
  
  	return 0;
  }
  
  static const struct seq_operations extfrag_op = {
  	.start	= frag_start,
  	.next	= frag_next,
  	.stop	= frag_stop,
  	.show	= extfrag_show,
  };
  
  static int extfrag_open(struct inode *inode, struct file *file)
  {
  	return seq_open(file, &extfrag_op);
  }
  
  static const struct file_operations extfrag_file_ops = {
  	.open		= extfrag_open,
  	.read		= seq_read,
  	.llseek		= seq_lseek,
  	.release	= seq_release,
  };
d7a5752c0   Mel Gorman   mm: export unusab...
1342
1343
1344
1345
1346
1347
1348
1349
1350
  static int __init extfrag_debug_init(void)
  {
  	extfrag_debug_root = debugfs_create_dir("extfrag", NULL);
  	if (!extfrag_debug_root)
  		return -ENOMEM;
  
  	if (!debugfs_create_file("unusable_index", 0444,
  			extfrag_debug_root, NULL, &unusable_file_ops))
  		return -ENOMEM;
f1a5ab121   Mel Gorman   mm: export fragme...
1351
1352
1353
  	if (!debugfs_create_file("extfrag_index", 0444,
  			extfrag_debug_root, NULL, &extfrag_file_ops))
  		return -ENOMEM;
d7a5752c0   Mel Gorman   mm: export unusab...
1354
1355
1356
1357
1358
  	return 0;
  }
  
  module_init(extfrag_debug_init);
  #endif