Blame view

include/linux/vmstat.h 7.21 KB
f6ac2354d   Christoph Lameter   [PATCH] zoned vm ...
1
2
3
4
5
  #ifndef _LINUX_VMSTAT_H
  #define _LINUX_VMSTAT_H
  
  #include <linux/types.h>
  #include <linux/percpu.h>
961772994   Christoph Lameter   [PATCH] Drop free...
6
  #include <linux/mm.h>
2244b95a7   Christoph Lameter   [PATCH] zoned vm ...
7
  #include <linux/mmzone.h>
f042e707e   Andrew Morton   mm: move enum vm_...
8
  #include <linux/vm_event_item.h>
60063497a   Arun Sharma   atomic: use <linu...
9
  #include <linux/atomic.h>
f6ac2354d   Christoph Lameter   [PATCH] zoned vm ...
10

c748e1340   Adrian Bunk   mm/vmstat.c: prop...
11
  extern int sysctl_stat_interval;
780a06566   Andrew Morton   [PATCH] count_vm_...
12
13
14
15
16
17
18
19
20
21
  #ifdef CONFIG_VM_EVENT_COUNTERS
  /*
   * Light weight per cpu counter implementation.
   *
   * Counters should only be incremented and no critical kernel component
   * should rely on the counter values.
   *
   * Counters are handled completely inline. On many platforms the code
   * generated will simply be the increment of a global address.
   */
f8891e5e1   Christoph Lameter   [PATCH] Light wei...
22
23
  struct vm_event_state {
  	unsigned long event[NR_VM_EVENT_ITEMS];
f6ac2354d   Christoph Lameter   [PATCH] zoned vm ...
24
  };
f8891e5e1   Christoph Lameter   [PATCH] Light wei...
25
26
27
28
  DECLARE_PER_CPU(struct vm_event_state, vm_event_states);
  
  static inline void __count_vm_event(enum vm_event_item item)
  {
dd17c8f72   Rusty Russell   percpu: remove pe...
29
  	__this_cpu_inc(vm_event_states.event[item]);
f8891e5e1   Christoph Lameter   [PATCH] Light wei...
30
31
32
33
  }
  
  static inline void count_vm_event(enum vm_event_item item)
  {
dd17c8f72   Rusty Russell   percpu: remove pe...
34
  	this_cpu_inc(vm_event_states.event[item]);
f8891e5e1   Christoph Lameter   [PATCH] Light wei...
35
36
37
38
  }
  
  static inline void __count_vm_events(enum vm_event_item item, long delta)
  {
dd17c8f72   Rusty Russell   percpu: remove pe...
39
  	__this_cpu_add(vm_event_states.event[item], delta);
f8891e5e1   Christoph Lameter   [PATCH] Light wei...
40
41
42
43
  }
  
  static inline void count_vm_events(enum vm_event_item item, long delta)
  {
dd17c8f72   Rusty Russell   percpu: remove pe...
44
  	this_cpu_add(vm_event_states.event[item], delta);
f8891e5e1   Christoph Lameter   [PATCH] Light wei...
45
46
47
  }
  
  extern void all_vm_events(unsigned long *);
f1cb08798   Yijing Wang   mm: remove CONFIG...
48

f8891e5e1   Christoph Lameter   [PATCH] Light wei...
49
50
51
52
53
  extern void vm_events_fold_cpu(int cpu);
  
  #else
  
  /* Disable counters */
780a06566   Andrew Morton   [PATCH] count_vm_...
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
  static inline void count_vm_event(enum vm_event_item item)
  {
  }
  static inline void count_vm_events(enum vm_event_item item, long delta)
  {
  }
  static inline void __count_vm_event(enum vm_event_item item)
  {
  }
  static inline void __count_vm_events(enum vm_event_item item, long delta)
  {
  }
  static inline void all_vm_events(unsigned long *ret)
  {
  }
  static inline void vm_events_fold_cpu(int cpu)
  {
  }
f8891e5e1   Christoph Lameter   [PATCH] Light wei...
72
73
  
  #endif /* CONFIG_VM_EVENT_COUNTERS */
03c5a6e16   Mel Gorman   mm: numa: Add pte...
74
75
76
77
78
  #ifdef CONFIG_NUMA_BALANCING
  #define count_vm_numa_event(x)     count_vm_event(x)
  #define count_vm_numa_events(x, y) count_vm_events(x, y)
  #else
  #define count_vm_numa_event(x) do {} while (0)
3c0ff4689   Mel Gorman   mm: numa: handle ...
79
  #define count_vm_numa_events(x, y) do { (void)(y); } while (0)
03c5a6e16   Mel Gorman   mm: numa: Add pte...
80
  #endif /* CONFIG_NUMA_BALANCING */
f8891e5e1   Christoph Lameter   [PATCH] Light wei...
81
  #define __count_zone_vm_events(item, zone, delta) \
4b51d6698   Christoph Lameter   [PATCH] optional ...
82
83
  		__count_vm_events(item##_NORMAL - ZONE_NORMAL + \
  		zone_idx(zone), delta)
f6ac2354d   Christoph Lameter   [PATCH] zoned vm ...
84

2244b95a7   Christoph Lameter   [PATCH] zoned vm ...
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
  /*
   * Zone based page accounting with per cpu differentials.
   */
  extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
  
  static inline void zone_page_state_add(long x, struct zone *zone,
  				 enum zone_stat_item item)
  {
  	atomic_long_add(x, &zone->vm_stat[item]);
  	atomic_long_add(x, &vm_stat[item]);
  }
  
  static inline unsigned long global_page_state(enum zone_stat_item item)
  {
  	long x = atomic_long_read(&vm_stat[item]);
  #ifdef CONFIG_SMP
  	if (x < 0)
  		x = 0;
  #endif
  	return x;
  }
  
  static inline unsigned long zone_page_state(struct zone *zone,
  					enum zone_stat_item item)
  {
  	long x = atomic_long_read(&zone->vm_stat[item]);
  #ifdef CONFIG_SMP
  	if (x < 0)
  		x = 0;
  #endif
  	return x;
  }
aa4548403   Christoph Lameter   mm: page allocato...
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
  /*
   * More accurate version that also considers the currently pending
   * deltas. For that we need to loop over all cpus to find the current
   * deltas. There is no synchronization so the result cannot be
   * exactly accurate either.
   */
  static inline unsigned long zone_page_state_snapshot(struct zone *zone,
  					enum zone_stat_item item)
  {
  	long x = atomic_long_read(&zone->vm_stat[item]);
  
  #ifdef CONFIG_SMP
  	int cpu;
  	for_each_online_cpu(cpu)
  		x += per_cpu_ptr(zone->pageset, cpu)->vm_stat_diff[item];
  
  	if (x < 0)
  		x = 0;
  #endif
  	return x;
  }
adea02a1b   Wu Fengguang   mm: count only re...
138
139
  extern unsigned long global_reclaimable_pages(void);
  extern unsigned long zone_reclaimable_pages(struct zone *zone);
4f98a2fee   Rik van Riel   vmscan: split LRU...
140

2244b95a7   Christoph Lameter   [PATCH] zoned vm ...
141
142
143
144
145
146
147
148
149
150
151
152
  #ifdef CONFIG_NUMA
  /*
   * Determine the per node value of a stat item. This function
   * is called frequently in a NUMA machine, so try to be as
   * frugal as possible.
   */
  static inline unsigned long node_page_state(int node,
  				 enum zone_stat_item item)
  {
  	struct zone *zones = NODE_DATA(node)->node_zones;
  
  	return
4b51d6698   Christoph Lameter   [PATCH] optional ...
153
154
155
  #ifdef CONFIG_ZONE_DMA
  		zone_page_state(&zones[ZONE_DMA], item) +
  #endif
fb0e7942b   Christoph Lameter   [PATCH] reduce MA...
156
  #ifdef CONFIG_ZONE_DMA32
2244b95a7   Christoph Lameter   [PATCH] zoned vm ...
157
158
  		zone_page_state(&zones[ZONE_DMA32], item) +
  #endif
2244b95a7   Christoph Lameter   [PATCH] zoned vm ...
159
160
161
  #ifdef CONFIG_HIGHMEM
  		zone_page_state(&zones[ZONE_HIGHMEM], item) +
  #endif
2a1e274ac   Mel Gorman   Create the ZONE_M...
162
163
  		zone_page_state(&zones[ZONE_NORMAL], item) +
  		zone_page_state(&zones[ZONE_MOVABLE], item);
2244b95a7   Christoph Lameter   [PATCH] zoned vm ...
164
  }
ca889e6c4   Christoph Lameter   [PATCH] Use Zoned...
165

78afd5612   Andi Kleen   mm: add __GFP_OTH...
166
  extern void zone_statistics(struct zone *, struct zone *, gfp_t gfp);
ca889e6c4   Christoph Lameter   [PATCH] Use Zoned...
167

2244b95a7   Christoph Lameter   [PATCH] zoned vm ...
168
  #else
ca889e6c4   Christoph Lameter   [PATCH] Use Zoned...
169

2244b95a7   Christoph Lameter   [PATCH] zoned vm ...
170
  #define node_page_state(node, item) global_page_state(item)
78afd5612   Andi Kleen   mm: add __GFP_OTH...
171
  #define zone_statistics(_zl, _z, gfp) do { } while (0)
ca889e6c4   Christoph Lameter   [PATCH] Use Zoned...
172
173
  
  #endif /* CONFIG_NUMA */
2244b95a7   Christoph Lameter   [PATCH] zoned vm ...
174

2244b95a7   Christoph Lameter   [PATCH] zoned vm ...
175
176
  #define add_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, __d)
  #define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d))
ca889e6c4   Christoph Lameter   [PATCH] Use Zoned...
177
  extern void inc_zone_state(struct zone *, enum zone_stat_item);
2244b95a7   Christoph Lameter   [PATCH] zoned vm ...
178
179
180
181
  #ifdef CONFIG_SMP
  void __mod_zone_page_state(struct zone *, enum zone_stat_item item, int);
  void __inc_zone_page_state(struct page *, enum zone_stat_item);
  void __dec_zone_page_state(struct page *, enum zone_stat_item);
f6ac2354d   Christoph Lameter   [PATCH] zoned vm ...
182

2244b95a7   Christoph Lameter   [PATCH] zoned vm ...
183
184
185
186
187
  void mod_zone_page_state(struct zone *, enum zone_stat_item, int);
  void inc_zone_page_state(struct page *, enum zone_stat_item);
  void dec_zone_page_state(struct page *, enum zone_stat_item);
  
  extern void inc_zone_state(struct zone *, enum zone_stat_item);
c87853859   Christoph Lameter   [PATCH] Use ZVC f...
188
189
190
  extern void __inc_zone_state(struct zone *, enum zone_stat_item);
  extern void dec_zone_state(struct zone *, enum zone_stat_item);
  extern void __dec_zone_state(struct zone *, enum zone_stat_item);
2244b95a7   Christoph Lameter   [PATCH] zoned vm ...
191
192
  
  void refresh_cpu_vm_stats(int);
a6cccdc36   KOSAKI Motohiro   mm, mem-hotplug: ...
193
  void refresh_zone_stat_thresholds(void);
b44129b30   Mel Gorman   mm: vmstat: use a...
194

5a8838138   Minchan Kim   memory-hotplug: f...
195
  void drain_zonestat(struct zone *zone, struct per_cpu_pageset *);
b44129b30   Mel Gorman   mm: vmstat: use a...
196
197
198
199
  int calculate_pressure_threshold(struct zone *zone);
  int calculate_normal_threshold(struct zone *zone);
  void set_pgdat_percpu_threshold(pg_data_t *pgdat,
  				int (*calculate_pressure)(struct zone *));
2244b95a7   Christoph Lameter   [PATCH] zoned vm ...
200
201
202
203
204
205
206
207
208
209
210
  #else /* CONFIG_SMP */
  
  /*
   * We do not maintain differentials in a single processor configuration.
   * The functions directly modify the zone and global counters.
   */
  static inline void __mod_zone_page_state(struct zone *zone,
  			enum zone_stat_item item, int delta)
  {
  	zone_page_state_add(delta, zone, item);
  }
7f4599e9c   Christoph Lameter   [PATCH] ZVC: add ...
211
212
213
214
215
  static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
  {
  	atomic_long_inc(&zone->vm_stat[item]);
  	atomic_long_inc(&vm_stat[item]);
  }
2244b95a7   Christoph Lameter   [PATCH] zoned vm ...
216
217
218
  static inline void __inc_zone_page_state(struct page *page,
  			enum zone_stat_item item)
  {
7f4599e9c   Christoph Lameter   [PATCH] ZVC: add ...
219
  	__inc_zone_state(page_zone(page), item);
2244b95a7   Christoph Lameter   [PATCH] zoned vm ...
220
  }
c87853859   Christoph Lameter   [PATCH] Use ZVC f...
221
222
223
224
225
  static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
  {
  	atomic_long_dec(&zone->vm_stat[item]);
  	atomic_long_dec(&vm_stat[item]);
  }
2244b95a7   Christoph Lameter   [PATCH] zoned vm ...
226
227
228
  static inline void __dec_zone_page_state(struct page *page,
  			enum zone_stat_item item)
  {
57ce36feb   Uwe Kleine-König   let __dec_zone_pa...
229
  	__dec_zone_state(page_zone(page), item);
2244b95a7   Christoph Lameter   [PATCH] zoned vm ...
230
231
232
233
234
235
236
237
238
  }
  
  /*
   * We only use atomic operations to update counters. So there is no need to
   * disable interrupts.
   */
  #define inc_zone_page_state __inc_zone_page_state
  #define dec_zone_page_state __dec_zone_page_state
  #define mod_zone_page_state __mod_zone_page_state
b44129b30   Mel Gorman   mm: vmstat: use a...
239
  #define set_pgdat_percpu_threshold(pgdat, callback) { }
88f5acf88   Mel Gorman   mm: page allocato...
240

2244b95a7   Christoph Lameter   [PATCH] zoned vm ...
241
  static inline void refresh_cpu_vm_stats(int cpu) { }
a6cccdc36   KOSAKI Motohiro   mm, mem-hotplug: ...
242
  static inline void refresh_zone_stat_thresholds(void) { }
5a8838138   Minchan Kim   memory-hotplug: f...
243
244
  static inline void drain_zonestat(struct zone *zone,
  			struct per_cpu_pageset *pset) { }
fa25c503d   KOSAKI Motohiro   mm: per-node vmst...
245
  #endif		/* CONFIG_SMP */
d1ce749a0   Bartlomiej Zolnierkiewicz   cma: count free C...
246
247
248
249
250
251
252
  static inline void __mod_zone_freepage_state(struct zone *zone, int nr_pages,
  					     int migratetype)
  {
  	__mod_zone_page_state(zone, NR_FREE_PAGES, nr_pages);
  	if (is_migrate_cma(migratetype))
  		__mod_zone_page_state(zone, NR_FREE_CMA_PAGES, nr_pages);
  }
fa25c503d   KOSAKI Motohiro   mm: per-node vmst...
253
  extern const char * const vmstat_text[];
2244b95a7   Christoph Lameter   [PATCH] zoned vm ...
254
255
  
  #endif /* _LINUX_VMSTAT_H */