Blame view

include/linux/vmstat.h 9.74 KB
f6ac2354d   Christoph Lameter   [PATCH] zoned vm ...
1
2
3
4
5
  #ifndef _LINUX_VMSTAT_H
  #define _LINUX_VMSTAT_H
  
  #include <linux/types.h>
  #include <linux/percpu.h>
961772994   Christoph Lameter   [PATCH] Drop free...
6
  #include <linux/mm.h>
2244b95a7   Christoph Lameter   [PATCH] zoned vm ...
7
  #include <linux/mmzone.h>
f042e707e   Andrew Morton   mm: move enum vm_...
8
  #include <linux/vm_event_item.h>
60063497a   Arun Sharma   atomic: use <linu...
9
  #include <linux/atomic.h>
f6ac2354d   Christoph Lameter   [PATCH] zoned vm ...
10

c748e1340   Adrian Bunk   mm/vmstat.c: prop...
11
  extern int sysctl_stat_interval;
780a06566   Andrew Morton   [PATCH] count_vm_...
12
13
14
15
16
17
18
19
20
21
  #ifdef CONFIG_VM_EVENT_COUNTERS
  /*
   * Light weight per cpu counter implementation.
   *
   * Counters should only be incremented and no critical kernel component
   * should rely on the counter values.
   *
   * Counters are handled completely inline. On many platforms the code
   * generated will simply be the increment of a global address.
   */
f8891e5e1   Christoph Lameter   [PATCH] Light wei...
22
23
  struct vm_event_state {
  	unsigned long event[NR_VM_EVENT_ITEMS];
f6ac2354d   Christoph Lameter   [PATCH] zoned vm ...
24
  };
f8891e5e1   Christoph Lameter   [PATCH] Light wei...
25
  DECLARE_PER_CPU(struct vm_event_state, vm_event_states);
293b6a4c8   Christoph Lameter   vmstat: use raw_c...
26
27
28
29
  /*
   * vm counters are allowed to be racy. Use raw_cpu_ops to avoid the
   * local_irq_disable overhead.
   */
f8891e5e1   Christoph Lameter   [PATCH] Light wei...
30
31
  static inline void __count_vm_event(enum vm_event_item item)
  {
293b6a4c8   Christoph Lameter   vmstat: use raw_c...
32
  	raw_cpu_inc(vm_event_states.event[item]);
f8891e5e1   Christoph Lameter   [PATCH] Light wei...
33
34
35
36
  }
  
  static inline void count_vm_event(enum vm_event_item item)
  {
dd17c8f72   Rusty Russell   percpu: remove pe...
37
  	this_cpu_inc(vm_event_states.event[item]);
f8891e5e1   Christoph Lameter   [PATCH] Light wei...
38
39
40
41
  }
  
  static inline void __count_vm_events(enum vm_event_item item, long delta)
  {
293b6a4c8   Christoph Lameter   vmstat: use raw_c...
42
  	raw_cpu_add(vm_event_states.event[item], delta);
f8891e5e1   Christoph Lameter   [PATCH] Light wei...
43
44
45
46
  }
  
  static inline void count_vm_events(enum vm_event_item item, long delta)
  {
dd17c8f72   Rusty Russell   percpu: remove pe...
47
  	this_cpu_add(vm_event_states.event[item], delta);
f8891e5e1   Christoph Lameter   [PATCH] Light wei...
48
49
50
  }
  
  extern void all_vm_events(unsigned long *);
f1cb08798   Yijing Wang   mm: remove CONFIG...
51

f8891e5e1   Christoph Lameter   [PATCH] Light wei...
52
53
54
55
56
  extern void vm_events_fold_cpu(int cpu);
  
  #else
  
  /* Disable counters */
780a06566   Andrew Morton   [PATCH] count_vm_...
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
  static inline void count_vm_event(enum vm_event_item item)
  {
  }
  static inline void count_vm_events(enum vm_event_item item, long delta)
  {
  }
  static inline void __count_vm_event(enum vm_event_item item)
  {
  }
  static inline void __count_vm_events(enum vm_event_item item, long delta)
  {
  }
  static inline void all_vm_events(unsigned long *ret)
  {
  }
  static inline void vm_events_fold_cpu(int cpu)
  {
  }
f8891e5e1   Christoph Lameter   [PATCH] Light wei...
75
76
  
  #endif /* CONFIG_VM_EVENT_COUNTERS */
03c5a6e16   Mel Gorman   mm: numa: Add pte...
77
78
79
80
81
  #ifdef CONFIG_NUMA_BALANCING
  #define count_vm_numa_event(x)     count_vm_event(x)
  #define count_vm_numa_events(x, y) count_vm_events(x, y)
  #else
  #define count_vm_numa_event(x) do {} while (0)
3c0ff4689   Mel Gorman   mm: numa: handle ...
82
  #define count_vm_numa_events(x, y) do { (void)(y); } while (0)
03c5a6e16   Mel Gorman   mm: numa: Add pte...
83
  #endif /* CONFIG_NUMA_BALANCING */
ec6599344   Mel Gorman   mm, x86: Account ...
84
85
86
87
88
89
90
  #ifdef CONFIG_DEBUG_TLBFLUSH
  #define count_vm_tlb_event(x)	   count_vm_event(x)
  #define count_vm_tlb_events(x, y)  count_vm_events(x, y)
  #else
  #define count_vm_tlb_event(x)     do {} while (0)
  #define count_vm_tlb_events(x, y) do { (void)(y); } while (0)
  #endif
4f115147f   Davidlohr Bueso   mm,vmacache: add ...
91
92
93
94
95
  #ifdef CONFIG_DEBUG_VM_VMACACHE
  #define count_vm_vmacache_event(x) count_vm_event(x)
  #else
  #define count_vm_vmacache_event(x) do {} while (0)
  #endif
16709d1de   Mel Gorman   mm: vmstat: repla...
96
97
  #define __count_zid_vm_events(item, zid, delta) \
  	__count_vm_events(item##_NORMAL - ZONE_NORMAL + zid, delta)
f6ac2354d   Christoph Lameter   [PATCH] zoned vm ...
98

2244b95a7   Christoph Lameter   [PATCH] zoned vm ...
99
  /*
75ef71840   Mel Gorman   mm, vmstat: add i...
100
   * Zone and node-based page accounting with per cpu differentials.
2244b95a7   Christoph Lameter   [PATCH] zoned vm ...
101
   */
75ef71840   Mel Gorman   mm, vmstat: add i...
102
103
  extern atomic_long_t vm_zone_stat[NR_VM_ZONE_STAT_ITEMS];
  extern atomic_long_t vm_node_stat[NR_VM_NODE_STAT_ITEMS];
2244b95a7   Christoph Lameter   [PATCH] zoned vm ...
104
105
106
107
108
  
  static inline void zone_page_state_add(long x, struct zone *zone,
  				 enum zone_stat_item item)
  {
  	atomic_long_add(x, &zone->vm_stat[item]);
75ef71840   Mel Gorman   mm, vmstat: add i...
109
110
111
112
113
114
115
116
  	atomic_long_add(x, &vm_zone_stat[item]);
  }
  
  static inline void node_page_state_add(long x, struct pglist_data *pgdat,
  				 enum node_stat_item item)
  {
  	atomic_long_add(x, &pgdat->vm_stat[item]);
  	atomic_long_add(x, &vm_node_stat[item]);
2244b95a7   Christoph Lameter   [PATCH] zoned vm ...
117
118
119
120
  }
  
  static inline unsigned long global_page_state(enum zone_stat_item item)
  {
75ef71840   Mel Gorman   mm, vmstat: add i...
121
122
123
124
125
126
127
128
129
130
131
  	long x = atomic_long_read(&vm_zone_stat[item]);
  #ifdef CONFIG_SMP
  	if (x < 0)
  		x = 0;
  #endif
  	return x;
  }
  
  static inline unsigned long global_node_page_state(enum node_stat_item item)
  {
  	long x = atomic_long_read(&vm_node_stat[item]);
2244b95a7   Christoph Lameter   [PATCH] zoned vm ...
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
  #ifdef CONFIG_SMP
  	if (x < 0)
  		x = 0;
  #endif
  	return x;
  }
  
  static inline unsigned long zone_page_state(struct zone *zone,
  					enum zone_stat_item item)
  {
  	long x = atomic_long_read(&zone->vm_stat[item]);
  #ifdef CONFIG_SMP
  	if (x < 0)
  		x = 0;
  #endif
  	return x;
  }
aa4548403   Christoph Lameter   mm: page allocato...
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
  /*
   * More accurate version that also considers the currently pending
   * deltas. For that we need to loop over all cpus to find the current
   * deltas. There is no synchronization so the result cannot be
   * exactly accurate either.
   */
  static inline unsigned long zone_page_state_snapshot(struct zone *zone,
  					enum zone_stat_item item)
  {
  	long x = atomic_long_read(&zone->vm_stat[item]);
  
  #ifdef CONFIG_SMP
  	int cpu;
  	for_each_online_cpu(cpu)
  		x += per_cpu_ptr(zone->pageset, cpu)->vm_stat_diff[item];
  
  	if (x < 0)
  		x = 0;
  #endif
  	return x;
  }
599d0c954   Mel Gorman   mm, vmscan: move ...
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
  static inline unsigned long node_page_state_snapshot(pg_data_t *pgdat,
  					enum node_stat_item item)
  {
  	long x = atomic_long_read(&pgdat->vm_stat[item]);
  
  #ifdef CONFIG_SMP
  	int cpu;
  	for_each_online_cpu(cpu)
  		x += per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->vm_node_stat_diff[item];
  
  	if (x < 0)
  		x = 0;
  #endif
  	return x;
  }
2244b95a7   Christoph Lameter   [PATCH] zoned vm ...
185
  #ifdef CONFIG_NUMA
75ef71840   Mel Gorman   mm, vmstat: add i...
186
187
188
189
  extern unsigned long sum_zone_node_page_state(int node,
  						enum zone_stat_item item);
  extern unsigned long node_page_state(struct pglist_data *pgdat,
  						enum node_stat_item item);
2244b95a7   Christoph Lameter   [PATCH] zoned vm ...
190
  #else
75ef71840   Mel Gorman   mm, vmstat: add i...
191
192
  #define sum_zone_node_page_state(node, item) global_page_state(item)
  #define node_page_state(node, item) global_node_page_state(item)
ca889e6c4   Christoph Lameter   [PATCH] Use Zoned...
193
  #endif /* CONFIG_NUMA */
2244b95a7   Christoph Lameter   [PATCH] zoned vm ...
194

2244b95a7   Christoph Lameter   [PATCH] zoned vm ...
195
196
  #define add_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, __d)
  #define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d))
75ef71840   Mel Gorman   mm, vmstat: add i...
197
198
  #define add_node_page_state(__p, __i, __d) mod_node_page_state(__p, __i, __d)
  #define sub_node_page_state(__p, __i, __d) mod_node_page_state(__p, __i, -(__d))
2244b95a7   Christoph Lameter   [PATCH] zoned vm ...
199

2244b95a7   Christoph Lameter   [PATCH] zoned vm ...
200
  #ifdef CONFIG_SMP
6cdb18ad9   Heiko Carstens   mm/vmstat: fix ov...
201
  void __mod_zone_page_state(struct zone *, enum zone_stat_item item, long);
2244b95a7   Christoph Lameter   [PATCH] zoned vm ...
202
203
  void __inc_zone_page_state(struct page *, enum zone_stat_item);
  void __dec_zone_page_state(struct page *, enum zone_stat_item);
f6ac2354d   Christoph Lameter   [PATCH] zoned vm ...
204

75ef71840   Mel Gorman   mm, vmstat: add i...
205
206
207
  void __mod_node_page_state(struct pglist_data *, enum node_stat_item item, long);
  void __inc_node_page_state(struct page *, enum node_stat_item);
  void __dec_node_page_state(struct page *, enum node_stat_item);
6cdb18ad9   Heiko Carstens   mm/vmstat: fix ov...
208
  void mod_zone_page_state(struct zone *, enum zone_stat_item, long);
2244b95a7   Christoph Lameter   [PATCH] zoned vm ...
209
210
  void inc_zone_page_state(struct page *, enum zone_stat_item);
  void dec_zone_page_state(struct page *, enum zone_stat_item);
75ef71840   Mel Gorman   mm, vmstat: add i...
211
212
213
  void mod_node_page_state(struct pglist_data *, enum node_stat_item, long);
  void inc_node_page_state(struct page *, enum node_stat_item);
  void dec_node_page_state(struct page *, enum node_stat_item);
75ef71840   Mel Gorman   mm, vmstat: add i...
214
  extern void inc_node_state(struct pglist_data *, enum node_stat_item);
c87853859   Christoph Lameter   [PATCH] Use ZVC f...
215
  extern void __inc_zone_state(struct zone *, enum zone_stat_item);
75ef71840   Mel Gorman   mm, vmstat: add i...
216
  extern void __inc_node_state(struct pglist_data *, enum node_stat_item);
c87853859   Christoph Lameter   [PATCH] Use ZVC f...
217
218
  extern void dec_zone_state(struct zone *, enum zone_stat_item);
  extern void __dec_zone_state(struct zone *, enum zone_stat_item);
75ef71840   Mel Gorman   mm, vmstat: add i...
219
  extern void __dec_node_state(struct pglist_data *, enum node_stat_item);
2244b95a7   Christoph Lameter   [PATCH] zoned vm ...
220

0eb77e988   Christoph Lameter   vmstat: make vmst...
221
  void quiet_vmstat(void);
2bb921e52   Christoph Lameter   vmstat: create se...
222
  void cpu_vm_stats_fold(int cpu);
a6cccdc36   KOSAKI Motohiro   mm, mem-hotplug: ...
223
  void refresh_zone_stat_thresholds(void);
b44129b30   Mel Gorman   mm: vmstat: use a...
224

52b6f46bc   Hugh Dickins   mm: /proc/sys/vm/...
225
226
227
  struct ctl_table;
  int vmstat_refresh(struct ctl_table *, int write,
  		   void __user *buffer, size_t *lenp, loff_t *ppos);
5a8838138   Minchan Kim   memory-hotplug: f...
228
  void drain_zonestat(struct zone *zone, struct per_cpu_pageset *);
b44129b30   Mel Gorman   mm: vmstat: use a...
229
230
231
232
  int calculate_pressure_threshold(struct zone *zone);
  int calculate_normal_threshold(struct zone *zone);
  void set_pgdat_percpu_threshold(pg_data_t *pgdat,
  				int (*calculate_pressure)(struct zone *));
2244b95a7   Christoph Lameter   [PATCH] zoned vm ...
233
234
235
236
237
238
239
  #else /* CONFIG_SMP */
  
  /*
   * We do not maintain differentials in a single processor configuration.
   * The functions directly modify the zone and global counters.
   */
  static inline void __mod_zone_page_state(struct zone *zone,
6cdb18ad9   Heiko Carstens   mm/vmstat: fix ov...
240
  			enum zone_stat_item item, long delta)
2244b95a7   Christoph Lameter   [PATCH] zoned vm ...
241
242
243
  {
  	zone_page_state_add(delta, zone, item);
  }
75ef71840   Mel Gorman   mm, vmstat: add i...
244
245
246
247
248
  static inline void __mod_node_page_state(struct pglist_data *pgdat,
  			enum node_stat_item item, int delta)
  {
  	node_page_state_add(delta, pgdat, item);
  }
7f4599e9c   Christoph Lameter   [PATCH] ZVC: add ...
249
250
251
  static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
  {
  	atomic_long_inc(&zone->vm_stat[item]);
75ef71840   Mel Gorman   mm, vmstat: add i...
252
253
254
255
256
257
258
  	atomic_long_inc(&vm_zone_stat[item]);
  }
  
  static inline void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
  {
  	atomic_long_inc(&pgdat->vm_stat[item]);
  	atomic_long_inc(&vm_node_stat[item]);
7f4599e9c   Christoph Lameter   [PATCH] ZVC: add ...
259
  }
c87853859   Christoph Lameter   [PATCH] Use ZVC f...
260
261
262
  static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
  {
  	atomic_long_dec(&zone->vm_stat[item]);
75ef71840   Mel Gorman   mm, vmstat: add i...
263
264
265
266
267
268
269
  	atomic_long_dec(&vm_zone_stat[item]);
  }
  
  static inline void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item)
  {
  	atomic_long_dec(&pgdat->vm_stat[item]);
  	atomic_long_dec(&vm_node_stat[item]);
c87853859   Christoph Lameter   [PATCH] Use ZVC f...
270
  }
6a3ed2123   Johannes Weiner   mm: vmstat: fix U...
271
272
273
274
275
  static inline void __inc_zone_page_state(struct page *page,
  			enum zone_stat_item item)
  {
  	__inc_zone_state(page_zone(page), item);
  }
75ef71840   Mel Gorman   mm, vmstat: add i...
276
277
278
279
280
  static inline void __inc_node_page_state(struct page *page,
  			enum node_stat_item item)
  {
  	__inc_node_state(page_pgdat(page), item);
  }
2244b95a7   Christoph Lameter   [PATCH] zoned vm ...
281
282
283
  static inline void __dec_zone_page_state(struct page *page,
  			enum zone_stat_item item)
  {
57ce36feb   Uwe Kleine-König   let __dec_zone_pa...
284
  	__dec_zone_state(page_zone(page), item);
2244b95a7   Christoph Lameter   [PATCH] zoned vm ...
285
  }
75ef71840   Mel Gorman   mm, vmstat: add i...
286
287
288
289
290
  static inline void __dec_node_page_state(struct page *page,
  			enum node_stat_item item)
  {
  	__dec_node_state(page_pgdat(page), item);
  }
2244b95a7   Christoph Lameter   [PATCH] zoned vm ...
291
292
293
294
295
296
297
  /*
   * We only use atomic operations to update counters. So there is no need to
   * disable interrupts.
   */
  #define inc_zone_page_state __inc_zone_page_state
  #define dec_zone_page_state __dec_zone_page_state
  #define mod_zone_page_state __mod_zone_page_state
75ef71840   Mel Gorman   mm, vmstat: add i...
298
299
300
  #define inc_node_page_state __inc_node_page_state
  #define dec_node_page_state __dec_node_page_state
  #define mod_node_page_state __mod_node_page_state
6a3ed2123   Johannes Weiner   mm: vmstat: fix U...
301
  #define inc_zone_state __inc_zone_state
75ef71840   Mel Gorman   mm, vmstat: add i...
302
  #define inc_node_state __inc_node_state
6a3ed2123   Johannes Weiner   mm: vmstat: fix U...
303
  #define dec_zone_state __dec_zone_state
b44129b30   Mel Gorman   mm: vmstat: use a...
304
  #define set_pgdat_percpu_threshold(pgdat, callback) { }
88f5acf88   Mel Gorman   mm: page allocato...
305

a6cccdc36   KOSAKI Motohiro   mm, mem-hotplug: ...
306
  static inline void refresh_zone_stat_thresholds(void) { }
2bb921e52   Christoph Lameter   vmstat: create se...
307
  static inline void cpu_vm_stats_fold(int cpu) { }
0eb77e988   Christoph Lameter   vmstat: make vmst...
308
  static inline void quiet_vmstat(void) { }
a6cccdc36   KOSAKI Motohiro   mm, mem-hotplug: ...
309

5a8838138   Minchan Kim   memory-hotplug: f...
310
311
  static inline void drain_zonestat(struct zone *zone,
  			struct per_cpu_pageset *pset) { }
fa25c503d   KOSAKI Motohiro   mm: per-node vmst...
312
  #endif		/* CONFIG_SMP */
d1ce749a0   Bartlomiej Zolnierkiewicz   cma: count free C...
313
314
315
316
317
318
319
  static inline void __mod_zone_freepage_state(struct zone *zone, int nr_pages,
  					     int migratetype)
  {
  	__mod_zone_page_state(zone, NR_FREE_PAGES, nr_pages);
  	if (is_migrate_cma(migratetype))
  		__mod_zone_page_state(zone, NR_FREE_CMA_PAGES, nr_pages);
  }
fa25c503d   KOSAKI Motohiro   mm: per-node vmst...
320
  extern const char * const vmstat_text[];
2244b95a7   Christoph Lameter   [PATCH] zoned vm ...
321
322
  
  #endif /* _LINUX_VMSTAT_H */