Blame view

include/linux/vmstat.h 6.71 KB
f6ac2354d   Christoph Lameter   [PATCH] zoned vm ...
1
2
3
4
5
  #ifndef _LINUX_VMSTAT_H
  #define _LINUX_VMSTAT_H
  
  #include <linux/types.h>
  #include <linux/percpu.h>
961772994   Christoph Lameter   [PATCH] Drop free...
6
  #include <linux/mm.h>
2244b95a7   Christoph Lameter   [PATCH] zoned vm ...
7
  #include <linux/mmzone.h>
f042e707e   Andrew Morton   mm: move enum vm_...
8
  #include <linux/vm_event_item.h>
60063497a   Arun Sharma   atomic: use <linu...
9
  #include <linux/atomic.h>
f6ac2354d   Christoph Lameter   [PATCH] zoned vm ...
10

c748e1340   Adrian Bunk   mm/vmstat.c: prop...
11
  extern int sysctl_stat_interval;
780a06566   Andrew Morton   [PATCH] count_vm_...
12
13
14
15
16
17
18
19
20
21
  #ifdef CONFIG_VM_EVENT_COUNTERS
  /*
   * Light weight per cpu counter implementation.
   *
   * Counters should only be incremented and no critical kernel component
   * should rely on the counter values.
   *
   * Counters are handled completely inline. On many platforms the code
   * generated will simply be the increment of a global address.
   */
f8891e5e1   Christoph Lameter   [PATCH] Light wei...
22
23
  struct vm_event_state {
  	unsigned long event[NR_VM_EVENT_ITEMS];
f6ac2354d   Christoph Lameter   [PATCH] zoned vm ...
24
  };
f8891e5e1   Christoph Lameter   [PATCH] Light wei...
25
26
27
28
  DECLARE_PER_CPU(struct vm_event_state, vm_event_states);
  
  static inline void __count_vm_event(enum vm_event_item item)
  {
dd17c8f72   Rusty Russell   percpu: remove pe...
29
  	__this_cpu_inc(vm_event_states.event[item]);
f8891e5e1   Christoph Lameter   [PATCH] Light wei...
30
31
32
33
  }
  
  static inline void count_vm_event(enum vm_event_item item)
  {
dd17c8f72   Rusty Russell   percpu: remove pe...
34
  	this_cpu_inc(vm_event_states.event[item]);
f8891e5e1   Christoph Lameter   [PATCH] Light wei...
35
36
37
38
  }
  
  static inline void __count_vm_events(enum vm_event_item item, long delta)
  {
dd17c8f72   Rusty Russell   percpu: remove pe...
39
  	__this_cpu_add(vm_event_states.event[item], delta);
f8891e5e1   Christoph Lameter   [PATCH] Light wei...
40
41
42
43
  }
  
  static inline void count_vm_events(enum vm_event_item item, long delta)
  {
dd17c8f72   Rusty Russell   percpu: remove pe...
44
  	this_cpu_add(vm_event_states.event[item], delta);
f8891e5e1   Christoph Lameter   [PATCH] Light wei...
45
46
47
  }
  
  extern void all_vm_events(unsigned long *);
e903387f1   Magnus Damm   [PATCH] fix vm_ev...
48
  #ifdef CONFIG_HOTPLUG
f8891e5e1   Christoph Lameter   [PATCH] Light wei...
49
  extern void vm_events_fold_cpu(int cpu);
e903387f1   Magnus Damm   [PATCH] fix vm_ev...
50
51
52
53
54
  #else
  static inline void vm_events_fold_cpu(int cpu)
  {
  }
  #endif
f8891e5e1   Christoph Lameter   [PATCH] Light wei...
55
56
57
58
  
  #else
  
  /* Disable counters */
780a06566   Andrew Morton   [PATCH] count_vm_...
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
  static inline void count_vm_event(enum vm_event_item item)
  {
  }
  static inline void count_vm_events(enum vm_event_item item, long delta)
  {
  }
  static inline void __count_vm_event(enum vm_event_item item)
  {
  }
  static inline void __count_vm_events(enum vm_event_item item, long delta)
  {
  }
  static inline void all_vm_events(unsigned long *ret)
  {
  }
  static inline void vm_events_fold_cpu(int cpu)
  {
  }
f8891e5e1   Christoph Lameter   [PATCH] Light wei...
77
78
79
80
  
  #endif /* CONFIG_VM_EVENT_COUNTERS */
  
  #define __count_zone_vm_events(item, zone, delta) \
4b51d6698   Christoph Lameter   [PATCH] optional ...
81
82
  		__count_vm_events(item##_NORMAL - ZONE_NORMAL + \
  		zone_idx(zone), delta)
f6ac2354d   Christoph Lameter   [PATCH] zoned vm ...
83

2244b95a7   Christoph Lameter   [PATCH] zoned vm ...
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
  /*
   * Zone based page accounting with per cpu differentials.
   */
  extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
  
  static inline void zone_page_state_add(long x, struct zone *zone,
  				 enum zone_stat_item item)
  {
  	atomic_long_add(x, &zone->vm_stat[item]);
  	atomic_long_add(x, &vm_stat[item]);
  }
  
  static inline unsigned long global_page_state(enum zone_stat_item item)
  {
  	long x = atomic_long_read(&vm_stat[item]);
  #ifdef CONFIG_SMP
  	if (x < 0)
  		x = 0;
  #endif
  	return x;
  }
  
  static inline unsigned long zone_page_state(struct zone *zone,
  					enum zone_stat_item item)
  {
  	long x = atomic_long_read(&zone->vm_stat[item]);
  #ifdef CONFIG_SMP
  	if (x < 0)
  		x = 0;
  #endif
  	return x;
  }
aa4548403   Christoph Lameter   mm: page allocato...
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
  /*
   * More accurate version that also considers the currently pending
   * deltas. For that we need to loop over all cpus to find the current
   * deltas. There is no synchronization so the result cannot be
   * exactly accurate either.
   */
  static inline unsigned long zone_page_state_snapshot(struct zone *zone,
  					enum zone_stat_item item)
  {
  	long x = atomic_long_read(&zone->vm_stat[item]);
  
  #ifdef CONFIG_SMP
  	int cpu;
  	for_each_online_cpu(cpu)
  		x += per_cpu_ptr(zone->pageset, cpu)->vm_stat_diff[item];
  
  	if (x < 0)
  		x = 0;
  #endif
  	return x;
  }
adea02a1b   Wu Fengguang   mm: count only re...
137
138
  extern unsigned long global_reclaimable_pages(void);
  extern unsigned long zone_reclaimable_pages(struct zone *zone);
4f98a2fee   Rik van Riel   vmscan: split LRU...
139

2244b95a7   Christoph Lameter   [PATCH] zoned vm ...
140
141
142
143
144
145
146
147
148
149
150
151
  #ifdef CONFIG_NUMA
  /*
   * Determine the per node value of a stat item. This function
   * is called frequently in a NUMA machine, so try to be as
   * frugal as possible.
   */
  static inline unsigned long node_page_state(int node,
  				 enum zone_stat_item item)
  {
  	struct zone *zones = NODE_DATA(node)->node_zones;
  
  	return
4b51d6698   Christoph Lameter   [PATCH] optional ...
152
153
154
  #ifdef CONFIG_ZONE_DMA
  		zone_page_state(&zones[ZONE_DMA], item) +
  #endif
fb0e7942b   Christoph Lameter   [PATCH] reduce MA...
155
  #ifdef CONFIG_ZONE_DMA32
2244b95a7   Christoph Lameter   [PATCH] zoned vm ...
156
157
  		zone_page_state(&zones[ZONE_DMA32], item) +
  #endif
2244b95a7   Christoph Lameter   [PATCH] zoned vm ...
158
159
160
  #ifdef CONFIG_HIGHMEM
  		zone_page_state(&zones[ZONE_HIGHMEM], item) +
  #endif
2a1e274ac   Mel Gorman   Create the ZONE_M...
161
162
  		zone_page_state(&zones[ZONE_NORMAL], item) +
  		zone_page_state(&zones[ZONE_MOVABLE], item);
2244b95a7   Christoph Lameter   [PATCH] zoned vm ...
163
  }
ca889e6c4   Christoph Lameter   [PATCH] Use Zoned...
164

78afd5612   Andi Kleen   mm: add __GFP_OTH...
165
  extern void zone_statistics(struct zone *, struct zone *, gfp_t gfp);
ca889e6c4   Christoph Lameter   [PATCH] Use Zoned...
166

2244b95a7   Christoph Lameter   [PATCH] zoned vm ...
167
  #else
ca889e6c4   Christoph Lameter   [PATCH] Use Zoned...
168

2244b95a7   Christoph Lameter   [PATCH] zoned vm ...
169
  #define node_page_state(node, item) global_page_state(item)
78afd5612   Andi Kleen   mm: add __GFP_OTH...
170
  #define zone_statistics(_zl, _z, gfp) do { } while (0)
ca889e6c4   Christoph Lameter   [PATCH] Use Zoned...
171
172
  
  #endif /* CONFIG_NUMA */
2244b95a7   Christoph Lameter   [PATCH] zoned vm ...
173

2244b95a7   Christoph Lameter   [PATCH] zoned vm ...
174
175
176
177
178
179
180
  #define add_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, __d)
  #define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d))
  
  static inline void zap_zone_vm_stats(struct zone *zone)
  {
  	memset(zone->vm_stat, 0, sizeof(zone->vm_stat));
  }
ca889e6c4   Christoph Lameter   [PATCH] Use Zoned...
181
  extern void inc_zone_state(struct zone *, enum zone_stat_item);
2244b95a7   Christoph Lameter   [PATCH] zoned vm ...
182
183
184
185
  #ifdef CONFIG_SMP
  void __mod_zone_page_state(struct zone *, enum zone_stat_item item, int);
  void __inc_zone_page_state(struct page *, enum zone_stat_item);
  void __dec_zone_page_state(struct page *, enum zone_stat_item);
f6ac2354d   Christoph Lameter   [PATCH] zoned vm ...
186

2244b95a7   Christoph Lameter   [PATCH] zoned vm ...
187
188
189
190
191
  void mod_zone_page_state(struct zone *, enum zone_stat_item, int);
  void inc_zone_page_state(struct page *, enum zone_stat_item);
  void dec_zone_page_state(struct page *, enum zone_stat_item);
  
  extern void inc_zone_state(struct zone *, enum zone_stat_item);
c87853859   Christoph Lameter   [PATCH] Use ZVC f...
192
193
194
  extern void __inc_zone_state(struct zone *, enum zone_stat_item);
  extern void dec_zone_state(struct zone *, enum zone_stat_item);
  extern void __dec_zone_state(struct zone *, enum zone_stat_item);
2244b95a7   Christoph Lameter   [PATCH] zoned vm ...
195
196
  
  void refresh_cpu_vm_stats(int);
a6cccdc36   KOSAKI Motohiro   mm, mem-hotplug: ...
197
  void refresh_zone_stat_thresholds(void);
b44129b30   Mel Gorman   mm: vmstat: use a...
198
199
200
201
202
  
  int calculate_pressure_threshold(struct zone *zone);
  int calculate_normal_threshold(struct zone *zone);
  void set_pgdat_percpu_threshold(pg_data_t *pgdat,
  				int (*calculate_pressure)(struct zone *));
2244b95a7   Christoph Lameter   [PATCH] zoned vm ...
203
204
205
206
207
208
209
210
211
212
213
  #else /* CONFIG_SMP */
  
  /*
   * We do not maintain differentials in a single processor configuration.
   * The functions directly modify the zone and global counters.
   */
  static inline void __mod_zone_page_state(struct zone *zone,
  			enum zone_stat_item item, int delta)
  {
  	zone_page_state_add(delta, zone, item);
  }
7f4599e9c   Christoph Lameter   [PATCH] ZVC: add ...
214
215
216
217
218
  static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
  {
  	atomic_long_inc(&zone->vm_stat[item]);
  	atomic_long_inc(&vm_stat[item]);
  }
2244b95a7   Christoph Lameter   [PATCH] zoned vm ...
219
220
221
  static inline void __inc_zone_page_state(struct page *page,
  			enum zone_stat_item item)
  {
7f4599e9c   Christoph Lameter   [PATCH] ZVC: add ...
222
  	__inc_zone_state(page_zone(page), item);
2244b95a7   Christoph Lameter   [PATCH] zoned vm ...
223
  }
c87853859   Christoph Lameter   [PATCH] Use ZVC f...
224
225
226
227
228
  static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
  {
  	atomic_long_dec(&zone->vm_stat[item]);
  	atomic_long_dec(&vm_stat[item]);
  }
2244b95a7   Christoph Lameter   [PATCH] zoned vm ...
229
230
231
  static inline void __dec_zone_page_state(struct page *page,
  			enum zone_stat_item item)
  {
57ce36feb   Uwe Kleine-König   let __dec_zone_pa...
232
  	__dec_zone_state(page_zone(page), item);
2244b95a7   Christoph Lameter   [PATCH] zoned vm ...
233
234
235
236
237
238
239
240
241
  }
  
  /*
   * We only use atomic operations to update counters. So there is no need to
   * disable interrupts.
   */
  #define inc_zone_page_state __inc_zone_page_state
  #define dec_zone_page_state __dec_zone_page_state
  #define mod_zone_page_state __mod_zone_page_state
b44129b30   Mel Gorman   mm: vmstat: use a...
242
  #define set_pgdat_percpu_threshold(pgdat, callback) { }
88f5acf88   Mel Gorman   mm: page allocato...
243

2244b95a7   Christoph Lameter   [PATCH] zoned vm ...
244
  static inline void refresh_cpu_vm_stats(int cpu) { }
a6cccdc36   KOSAKI Motohiro   mm, mem-hotplug: ...
245
  static inline void refresh_zone_stat_thresholds(void) { }
fa25c503d   KOSAKI Motohiro   mm: per-node vmst...
246
247
248
  #endif		/* CONFIG_SMP */
  
  extern const char * const vmstat_text[];
2244b95a7   Christoph Lameter   [PATCH] zoned vm ...
249
250
  
  #endif /* _LINUX_VMSTAT_H */