Commit f042e707ee671e4beb5389abeb9a1819a2cf5532
Committed by
Linus Torvalds
1 parent
a77aea9201
Exists in
master
and in
20 other branches
mm: move enum vm_event_item into a standalone header file
enums are problematic because they cannot be forward-declared: akpm2:/home/akpm> cat t.c enum foo; static inline void bar(enum foo f) { } akpm2:/home/akpm> gcc -c t.c t.c:4: error: parameter 1 ('f') has incomplete type So move the enum's definition into a standalone header file which can be used wherever its definition is needed. Cc: Ying Han <yinghan@google.com> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: Minchan Kim <minchan.kim@gmail.com> Cc: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp> Cc: Balbir Singh <balbir@linux.vnet.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Showing 2 changed files with 65 additions and 61 deletions Inline Diff
include/linux/vm_event_item.h
File was created | 1 | #ifndef VM_EVENT_ITEM_H_INCLUDED | |
2 | #define VM_EVENT_ITEM_H_INCLUDED | ||
3 | |||
4 | #ifdef CONFIG_ZONE_DMA | ||
5 | #define DMA_ZONE(xx) xx##_DMA, | ||
6 | #else | ||
7 | #define DMA_ZONE(xx) | ||
8 | #endif | ||
9 | |||
10 | #ifdef CONFIG_ZONE_DMA32 | ||
11 | #define DMA32_ZONE(xx) xx##_DMA32, | ||
12 | #else | ||
13 | #define DMA32_ZONE(xx) | ||
14 | #endif | ||
15 | |||
16 | #ifdef CONFIG_HIGHMEM | ||
17 | #define HIGHMEM_ZONE(xx) , xx##_HIGH | ||
18 | #else | ||
19 | #define HIGHMEM_ZONE(xx) | ||
20 | #endif | ||
21 | |||
22 | #define FOR_ALL_ZONES(xx) DMA_ZONE(xx) DMA32_ZONE(xx) xx##_NORMAL HIGHMEM_ZONE(xx) , xx##_MOVABLE | ||
23 | |||
24 | enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, | ||
25 | FOR_ALL_ZONES(PGALLOC), | ||
26 | PGFREE, PGACTIVATE, PGDEACTIVATE, | ||
27 | PGFAULT, PGMAJFAULT, | ||
28 | FOR_ALL_ZONES(PGREFILL), | ||
29 | FOR_ALL_ZONES(PGSTEAL), | ||
30 | FOR_ALL_ZONES(PGSCAN_KSWAPD), | ||
31 | FOR_ALL_ZONES(PGSCAN_DIRECT), | ||
32 | #ifdef CONFIG_NUMA | ||
33 | PGSCAN_ZONE_RECLAIM_FAILED, | ||
34 | #endif | ||
35 | PGINODESTEAL, SLABS_SCANNED, KSWAPD_STEAL, KSWAPD_INODESTEAL, | ||
36 | KSWAPD_LOW_WMARK_HIT_QUICKLY, KSWAPD_HIGH_WMARK_HIT_QUICKLY, | ||
37 | KSWAPD_SKIP_CONGESTION_WAIT, | ||
38 | PAGEOUTRUN, ALLOCSTALL, PGROTATED, | ||
39 | #ifdef CONFIG_COMPACTION | ||
40 | COMPACTBLOCKS, COMPACTPAGES, COMPACTPAGEFAILED, | ||
41 | COMPACTSTALL, COMPACTFAIL, COMPACTSUCCESS, | ||
42 | #endif | ||
43 | #ifdef CONFIG_HUGETLB_PAGE | ||
44 | HTLB_BUDDY_PGALLOC, HTLB_BUDDY_PGALLOC_FAIL, | ||
45 | #endif | ||
46 | UNEVICTABLE_PGCULLED, /* culled to noreclaim list */ | ||
47 | UNEVICTABLE_PGSCANNED, /* scanned for reclaimability */ | ||
48 | UNEVICTABLE_PGRESCUED, /* rescued from noreclaim list */ | ||
49 | UNEVICTABLE_PGMLOCKED, | ||
50 | UNEVICTABLE_PGMUNLOCKED, | ||
51 | UNEVICTABLE_PGCLEARED, /* on COW, page truncate */ | ||
52 | UNEVICTABLE_PGSTRANDED, /* unable to isolate on unlock */ | ||
53 | UNEVICTABLE_MLOCKFREED, | ||
54 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | ||
55 | THP_FAULT_ALLOC, | ||
56 | THP_FAULT_FALLBACK, | ||
57 | THP_COLLAPSE_ALLOC, | ||
58 | THP_COLLAPSE_ALLOC_FAILED, | ||
59 | THP_SPLIT, | ||
60 | #endif | ||
61 | NR_VM_EVENT_ITEMS | ||
62 | }; | ||
63 | |||
64 | #endif /* VM_EVENT_ITEM_H_INCLUDED */ | ||
65 |
include/linux/vmstat.h
1 | #ifndef _LINUX_VMSTAT_H | 1 | #ifndef _LINUX_VMSTAT_H |
2 | #define _LINUX_VMSTAT_H | 2 | #define _LINUX_VMSTAT_H |
3 | 3 | ||
4 | #include <linux/types.h> | 4 | #include <linux/types.h> |
5 | #include <linux/percpu.h> | 5 | #include <linux/percpu.h> |
6 | #include <linux/mm.h> | 6 | #include <linux/mm.h> |
7 | #include <linux/mmzone.h> | 7 | #include <linux/mmzone.h> |
8 | #include <linux/vm_event_item.h> | ||
8 | #include <asm/atomic.h> | 9 | #include <asm/atomic.h> |
9 | |||
10 | #ifdef CONFIG_ZONE_DMA | ||
11 | #define DMA_ZONE(xx) xx##_DMA, | ||
12 | #else | ||
13 | #define DMA_ZONE(xx) | ||
14 | #endif | ||
15 | |||
16 | #ifdef CONFIG_ZONE_DMA32 | ||
17 | #define DMA32_ZONE(xx) xx##_DMA32, | ||
18 | #else | ||
19 | #define DMA32_ZONE(xx) | ||
20 | #endif | ||
21 | |||
22 | #ifdef CONFIG_HIGHMEM | ||
23 | #define HIGHMEM_ZONE(xx) , xx##_HIGH | ||
24 | #else | ||
25 | #define HIGHMEM_ZONE(xx) | ||
26 | #endif | ||
27 | |||
28 | |||
29 | #define FOR_ALL_ZONES(xx) DMA_ZONE(xx) DMA32_ZONE(xx) xx##_NORMAL HIGHMEM_ZONE(xx) , xx##_MOVABLE | ||
30 | |||
31 | enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, | ||
32 | FOR_ALL_ZONES(PGALLOC), | ||
33 | PGFREE, PGACTIVATE, PGDEACTIVATE, | ||
34 | PGFAULT, PGMAJFAULT, | ||
35 | FOR_ALL_ZONES(PGREFILL), | ||
36 | FOR_ALL_ZONES(PGSTEAL), | ||
37 | FOR_ALL_ZONES(PGSCAN_KSWAPD), | ||
38 | FOR_ALL_ZONES(PGSCAN_DIRECT), | ||
39 | #ifdef CONFIG_NUMA | ||
40 | PGSCAN_ZONE_RECLAIM_FAILED, | ||
41 | #endif | ||
42 | PGINODESTEAL, SLABS_SCANNED, KSWAPD_STEAL, KSWAPD_INODESTEAL, | ||
43 | KSWAPD_LOW_WMARK_HIT_QUICKLY, KSWAPD_HIGH_WMARK_HIT_QUICKLY, | ||
44 | KSWAPD_SKIP_CONGESTION_WAIT, | ||
45 | PAGEOUTRUN, ALLOCSTALL, PGROTATED, | ||
46 | #ifdef CONFIG_COMPACTION | ||
47 | COMPACTBLOCKS, COMPACTPAGES, COMPACTPAGEFAILED, | ||
48 | COMPACTSTALL, COMPACTFAIL, COMPACTSUCCESS, | ||
49 | #endif | ||
50 | #ifdef CONFIG_HUGETLB_PAGE | ||
51 | HTLB_BUDDY_PGALLOC, HTLB_BUDDY_PGALLOC_FAIL, | ||
52 | #endif | ||
53 | UNEVICTABLE_PGCULLED, /* culled to noreclaim list */ | ||
54 | UNEVICTABLE_PGSCANNED, /* scanned for reclaimability */ | ||
55 | UNEVICTABLE_PGRESCUED, /* rescued from noreclaim list */ | ||
56 | UNEVICTABLE_PGMLOCKED, | ||
57 | UNEVICTABLE_PGMUNLOCKED, | ||
58 | UNEVICTABLE_PGCLEARED, /* on COW, page truncate */ | ||
59 | UNEVICTABLE_PGSTRANDED, /* unable to isolate on unlock */ | ||
60 | UNEVICTABLE_MLOCKFREED, | ||
61 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | ||
62 | THP_FAULT_ALLOC, | ||
63 | THP_FAULT_FALLBACK, | ||
64 | THP_COLLAPSE_ALLOC, | ||
65 | THP_COLLAPSE_ALLOC_FAILED, | ||
66 | THP_SPLIT, | ||
67 | #endif | ||
68 | NR_VM_EVENT_ITEMS | ||
69 | }; | ||
70 | 10 | ||
71 | extern int sysctl_stat_interval; | 11 | extern int sysctl_stat_interval; |
72 | 12 | ||
73 | #ifdef CONFIG_VM_EVENT_COUNTERS | 13 | #ifdef CONFIG_VM_EVENT_COUNTERS |
74 | /* | 14 | /* |
75 | * Light weight per cpu counter implementation. | 15 | * Light weight per cpu counter implementation. |
76 | * | 16 | * |
77 | * Counters should only be incremented and no critical kernel component | 17 | * Counters should only be incremented and no critical kernel component |
78 | * should rely on the counter values. | 18 | * should rely on the counter values. |
79 | * | 19 | * |
80 | * Counters are handled completely inline. On many platforms the code | 20 | * Counters are handled completely inline. On many platforms the code |
81 | * generated will simply be the increment of a global address. | 21 | * generated will simply be the increment of a global address. |
82 | */ | 22 | */ |
83 | 23 | ||
84 | struct vm_event_state { | 24 | struct vm_event_state { |
85 | unsigned long event[NR_VM_EVENT_ITEMS]; | 25 | unsigned long event[NR_VM_EVENT_ITEMS]; |
86 | }; | 26 | }; |
87 | 27 | ||
88 | DECLARE_PER_CPU(struct vm_event_state, vm_event_states); | 28 | DECLARE_PER_CPU(struct vm_event_state, vm_event_states); |
89 | 29 | ||
90 | static inline void __count_vm_event(enum vm_event_item item) | 30 | static inline void __count_vm_event(enum vm_event_item item) |
91 | { | 31 | { |
92 | __this_cpu_inc(vm_event_states.event[item]); | 32 | __this_cpu_inc(vm_event_states.event[item]); |
93 | } | 33 | } |
94 | 34 | ||
95 | static inline void count_vm_event(enum vm_event_item item) | 35 | static inline void count_vm_event(enum vm_event_item item) |
96 | { | 36 | { |
97 | this_cpu_inc(vm_event_states.event[item]); | 37 | this_cpu_inc(vm_event_states.event[item]); |
98 | } | 38 | } |
99 | 39 | ||
100 | static inline void __count_vm_events(enum vm_event_item item, long delta) | 40 | static inline void __count_vm_events(enum vm_event_item item, long delta) |
101 | { | 41 | { |
102 | __this_cpu_add(vm_event_states.event[item], delta); | 42 | __this_cpu_add(vm_event_states.event[item], delta); |
103 | } | 43 | } |
104 | 44 | ||
105 | static inline void count_vm_events(enum vm_event_item item, long delta) | 45 | static inline void count_vm_events(enum vm_event_item item, long delta) |
106 | { | 46 | { |
107 | this_cpu_add(vm_event_states.event[item], delta); | 47 | this_cpu_add(vm_event_states.event[item], delta); |
108 | } | 48 | } |
109 | 49 | ||
110 | extern void all_vm_events(unsigned long *); | 50 | extern void all_vm_events(unsigned long *); |
111 | #ifdef CONFIG_HOTPLUG | 51 | #ifdef CONFIG_HOTPLUG |
112 | extern void vm_events_fold_cpu(int cpu); | 52 | extern void vm_events_fold_cpu(int cpu); |
113 | #else | 53 | #else |
114 | static inline void vm_events_fold_cpu(int cpu) | 54 | static inline void vm_events_fold_cpu(int cpu) |
115 | { | 55 | { |
116 | } | 56 | } |
117 | #endif | 57 | #endif |
118 | 58 | ||
119 | #else | 59 | #else |
120 | 60 | ||
121 | /* Disable counters */ | 61 | /* Disable counters */ |
122 | static inline void count_vm_event(enum vm_event_item item) | 62 | static inline void count_vm_event(enum vm_event_item item) |
123 | { | 63 | { |
124 | } | 64 | } |
125 | static inline void count_vm_events(enum vm_event_item item, long delta) | 65 | static inline void count_vm_events(enum vm_event_item item, long delta) |
126 | { | 66 | { |
127 | } | 67 | } |
128 | static inline void __count_vm_event(enum vm_event_item item) | 68 | static inline void __count_vm_event(enum vm_event_item item) |
129 | { | 69 | { |
130 | } | 70 | } |
131 | static inline void __count_vm_events(enum vm_event_item item, long delta) | 71 | static inline void __count_vm_events(enum vm_event_item item, long delta) |
132 | { | 72 | { |
133 | } | 73 | } |
134 | static inline void all_vm_events(unsigned long *ret) | 74 | static inline void all_vm_events(unsigned long *ret) |
135 | { | 75 | { |
136 | } | 76 | } |
137 | static inline void vm_events_fold_cpu(int cpu) | 77 | static inline void vm_events_fold_cpu(int cpu) |
138 | { | 78 | { |
139 | } | 79 | } |
140 | 80 | ||
141 | #endif /* CONFIG_VM_EVENT_COUNTERS */ | 81 | #endif /* CONFIG_VM_EVENT_COUNTERS */ |
142 | 82 | ||
143 | #define __count_zone_vm_events(item, zone, delta) \ | 83 | #define __count_zone_vm_events(item, zone, delta) \ |
144 | __count_vm_events(item##_NORMAL - ZONE_NORMAL + \ | 84 | __count_vm_events(item##_NORMAL - ZONE_NORMAL + \ |
145 | zone_idx(zone), delta) | 85 | zone_idx(zone), delta) |
146 | 86 | ||
147 | /* | 87 | /* |
148 | * Zone based page accounting with per cpu differentials. | 88 | * Zone based page accounting with per cpu differentials. |
149 | */ | 89 | */ |
150 | extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; | 90 | extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; |
151 | 91 | ||
152 | static inline void zone_page_state_add(long x, struct zone *zone, | 92 | static inline void zone_page_state_add(long x, struct zone *zone, |
153 | enum zone_stat_item item) | 93 | enum zone_stat_item item) |
154 | { | 94 | { |
155 | atomic_long_add(x, &zone->vm_stat[item]); | 95 | atomic_long_add(x, &zone->vm_stat[item]); |
156 | atomic_long_add(x, &vm_stat[item]); | 96 | atomic_long_add(x, &vm_stat[item]); |
157 | } | 97 | } |
158 | 98 | ||
159 | static inline unsigned long global_page_state(enum zone_stat_item item) | 99 | static inline unsigned long global_page_state(enum zone_stat_item item) |
160 | { | 100 | { |
161 | long x = atomic_long_read(&vm_stat[item]); | 101 | long x = atomic_long_read(&vm_stat[item]); |
162 | #ifdef CONFIG_SMP | 102 | #ifdef CONFIG_SMP |
163 | if (x < 0) | 103 | if (x < 0) |
164 | x = 0; | 104 | x = 0; |
165 | #endif | 105 | #endif |
166 | return x; | 106 | return x; |
167 | } | 107 | } |
168 | 108 | ||
169 | static inline unsigned long zone_page_state(struct zone *zone, | 109 | static inline unsigned long zone_page_state(struct zone *zone, |
170 | enum zone_stat_item item) | 110 | enum zone_stat_item item) |
171 | { | 111 | { |
172 | long x = atomic_long_read(&zone->vm_stat[item]); | 112 | long x = atomic_long_read(&zone->vm_stat[item]); |
173 | #ifdef CONFIG_SMP | 113 | #ifdef CONFIG_SMP |
174 | if (x < 0) | 114 | if (x < 0) |
175 | x = 0; | 115 | x = 0; |
176 | #endif | 116 | #endif |
177 | return x; | 117 | return x; |
178 | } | 118 | } |
179 | 119 | ||
180 | /* | 120 | /* |
181 | * More accurate version that also considers the currently pending | 121 | * More accurate version that also considers the currently pending |
182 | * deltas. For that we need to loop over all cpus to find the current | 122 | * deltas. For that we need to loop over all cpus to find the current |
183 | * deltas. There is no synchronization so the result cannot be | 123 | * deltas. There is no synchronization so the result cannot be |
184 | * exactly accurate either. | 124 | * exactly accurate either. |
185 | */ | 125 | */ |
186 | static inline unsigned long zone_page_state_snapshot(struct zone *zone, | 126 | static inline unsigned long zone_page_state_snapshot(struct zone *zone, |
187 | enum zone_stat_item item) | 127 | enum zone_stat_item item) |
188 | { | 128 | { |
189 | long x = atomic_long_read(&zone->vm_stat[item]); | 129 | long x = atomic_long_read(&zone->vm_stat[item]); |
190 | 130 | ||
191 | #ifdef CONFIG_SMP | 131 | #ifdef CONFIG_SMP |
192 | int cpu; | 132 | int cpu; |
193 | for_each_online_cpu(cpu) | 133 | for_each_online_cpu(cpu) |
194 | x += per_cpu_ptr(zone->pageset, cpu)->vm_stat_diff[item]; | 134 | x += per_cpu_ptr(zone->pageset, cpu)->vm_stat_diff[item]; |
195 | 135 | ||
196 | if (x < 0) | 136 | if (x < 0) |
197 | x = 0; | 137 | x = 0; |
198 | #endif | 138 | #endif |
199 | return x; | 139 | return x; |
200 | } | 140 | } |
201 | 141 | ||
202 | extern unsigned long global_reclaimable_pages(void); | 142 | extern unsigned long global_reclaimable_pages(void); |
203 | extern unsigned long zone_reclaimable_pages(struct zone *zone); | 143 | extern unsigned long zone_reclaimable_pages(struct zone *zone); |
204 | 144 | ||
205 | #ifdef CONFIG_NUMA | 145 | #ifdef CONFIG_NUMA |
206 | /* | 146 | /* |
207 | * Determine the per node value of a stat item. This function | 147 | * Determine the per node value of a stat item. This function |
208 | * is called frequently in a NUMA machine, so try to be as | 148 | * is called frequently in a NUMA machine, so try to be as |
209 | * frugal as possible. | 149 | * frugal as possible. |
210 | */ | 150 | */ |
211 | static inline unsigned long node_page_state(int node, | 151 | static inline unsigned long node_page_state(int node, |
212 | enum zone_stat_item item) | 152 | enum zone_stat_item item) |
213 | { | 153 | { |
214 | struct zone *zones = NODE_DATA(node)->node_zones; | 154 | struct zone *zones = NODE_DATA(node)->node_zones; |
215 | 155 | ||
216 | return | 156 | return |
217 | #ifdef CONFIG_ZONE_DMA | 157 | #ifdef CONFIG_ZONE_DMA |
218 | zone_page_state(&zones[ZONE_DMA], item) + | 158 | zone_page_state(&zones[ZONE_DMA], item) + |
219 | #endif | 159 | #endif |
220 | #ifdef CONFIG_ZONE_DMA32 | 160 | #ifdef CONFIG_ZONE_DMA32 |
221 | zone_page_state(&zones[ZONE_DMA32], item) + | 161 | zone_page_state(&zones[ZONE_DMA32], item) + |
222 | #endif | 162 | #endif |
223 | #ifdef CONFIG_HIGHMEM | 163 | #ifdef CONFIG_HIGHMEM |
224 | zone_page_state(&zones[ZONE_HIGHMEM], item) + | 164 | zone_page_state(&zones[ZONE_HIGHMEM], item) + |
225 | #endif | 165 | #endif |
226 | zone_page_state(&zones[ZONE_NORMAL], item) + | 166 | zone_page_state(&zones[ZONE_NORMAL], item) + |
227 | zone_page_state(&zones[ZONE_MOVABLE], item); | 167 | zone_page_state(&zones[ZONE_MOVABLE], item); |
228 | } | 168 | } |
229 | 169 | ||
230 | extern void zone_statistics(struct zone *, struct zone *, gfp_t gfp); | 170 | extern void zone_statistics(struct zone *, struct zone *, gfp_t gfp); |
231 | 171 | ||
232 | #else | 172 | #else |
233 | 173 | ||
234 | #define node_page_state(node, item) global_page_state(item) | 174 | #define node_page_state(node, item) global_page_state(item) |
235 | #define zone_statistics(_zl, _z, gfp) do { } while (0) | 175 | #define zone_statistics(_zl, _z, gfp) do { } while (0) |
236 | 176 | ||
237 | #endif /* CONFIG_NUMA */ | 177 | #endif /* CONFIG_NUMA */ |
238 | 178 | ||
239 | #define add_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, __d) | 179 | #define add_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, __d) |
240 | #define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d)) | 180 | #define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d)) |
241 | 181 | ||
242 | static inline void zap_zone_vm_stats(struct zone *zone) | 182 | static inline void zap_zone_vm_stats(struct zone *zone) |
243 | { | 183 | { |
244 | memset(zone->vm_stat, 0, sizeof(zone->vm_stat)); | 184 | memset(zone->vm_stat, 0, sizeof(zone->vm_stat)); |
245 | } | 185 | } |
246 | 186 | ||
247 | extern void inc_zone_state(struct zone *, enum zone_stat_item); | 187 | extern void inc_zone_state(struct zone *, enum zone_stat_item); |
248 | 188 | ||
249 | #ifdef CONFIG_SMP | 189 | #ifdef CONFIG_SMP |
250 | void __mod_zone_page_state(struct zone *, enum zone_stat_item item, int); | 190 | void __mod_zone_page_state(struct zone *, enum zone_stat_item item, int); |
251 | void __inc_zone_page_state(struct page *, enum zone_stat_item); | 191 | void __inc_zone_page_state(struct page *, enum zone_stat_item); |
252 | void __dec_zone_page_state(struct page *, enum zone_stat_item); | 192 | void __dec_zone_page_state(struct page *, enum zone_stat_item); |
253 | 193 | ||
254 | void mod_zone_page_state(struct zone *, enum zone_stat_item, int); | 194 | void mod_zone_page_state(struct zone *, enum zone_stat_item, int); |
255 | void inc_zone_page_state(struct page *, enum zone_stat_item); | 195 | void inc_zone_page_state(struct page *, enum zone_stat_item); |
256 | void dec_zone_page_state(struct page *, enum zone_stat_item); | 196 | void dec_zone_page_state(struct page *, enum zone_stat_item); |
257 | 197 | ||
258 | extern void inc_zone_state(struct zone *, enum zone_stat_item); | 198 | extern void inc_zone_state(struct zone *, enum zone_stat_item); |
259 | extern void __inc_zone_state(struct zone *, enum zone_stat_item); | 199 | extern void __inc_zone_state(struct zone *, enum zone_stat_item); |
260 | extern void dec_zone_state(struct zone *, enum zone_stat_item); | 200 | extern void dec_zone_state(struct zone *, enum zone_stat_item); |
261 | extern void __dec_zone_state(struct zone *, enum zone_stat_item); | 201 | extern void __dec_zone_state(struct zone *, enum zone_stat_item); |
262 | 202 | ||
263 | void refresh_cpu_vm_stats(int); | 203 | void refresh_cpu_vm_stats(int); |
264 | void refresh_zone_stat_thresholds(void); | 204 | void refresh_zone_stat_thresholds(void); |
265 | 205 | ||
266 | int calculate_pressure_threshold(struct zone *zone); | 206 | int calculate_pressure_threshold(struct zone *zone); |
267 | int calculate_normal_threshold(struct zone *zone); | 207 | int calculate_normal_threshold(struct zone *zone); |
268 | void set_pgdat_percpu_threshold(pg_data_t *pgdat, | 208 | void set_pgdat_percpu_threshold(pg_data_t *pgdat, |
269 | int (*calculate_pressure)(struct zone *)); | 209 | int (*calculate_pressure)(struct zone *)); |
270 | #else /* CONFIG_SMP */ | 210 | #else /* CONFIG_SMP */ |
271 | 211 | ||
272 | /* | 212 | /* |
273 | * We do not maintain differentials in a single processor configuration. | 213 | * We do not maintain differentials in a single processor configuration. |
274 | * The functions directly modify the zone and global counters. | 214 | * The functions directly modify the zone and global counters. |
275 | */ | 215 | */ |
276 | static inline void __mod_zone_page_state(struct zone *zone, | 216 | static inline void __mod_zone_page_state(struct zone *zone, |
277 | enum zone_stat_item item, int delta) | 217 | enum zone_stat_item item, int delta) |
278 | { | 218 | { |
279 | zone_page_state_add(delta, zone, item); | 219 | zone_page_state_add(delta, zone, item); |
280 | } | 220 | } |
281 | 221 | ||
282 | static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item) | 222 | static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item) |
283 | { | 223 | { |
284 | atomic_long_inc(&zone->vm_stat[item]); | 224 | atomic_long_inc(&zone->vm_stat[item]); |
285 | atomic_long_inc(&vm_stat[item]); | 225 | atomic_long_inc(&vm_stat[item]); |
286 | } | 226 | } |
287 | 227 | ||
288 | static inline void __inc_zone_page_state(struct page *page, | 228 | static inline void __inc_zone_page_state(struct page *page, |
289 | enum zone_stat_item item) | 229 | enum zone_stat_item item) |
290 | { | 230 | { |
291 | __inc_zone_state(page_zone(page), item); | 231 | __inc_zone_state(page_zone(page), item); |
292 | } | 232 | } |
293 | 233 | ||
294 | static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item) | 234 | static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item) |
295 | { | 235 | { |
296 | atomic_long_dec(&zone->vm_stat[item]); | 236 | atomic_long_dec(&zone->vm_stat[item]); |
297 | atomic_long_dec(&vm_stat[item]); | 237 | atomic_long_dec(&vm_stat[item]); |
298 | } | 238 | } |
299 | 239 | ||
300 | static inline void __dec_zone_page_state(struct page *page, | 240 | static inline void __dec_zone_page_state(struct page *page, |
301 | enum zone_stat_item item) | 241 | enum zone_stat_item item) |
302 | { | 242 | { |
303 | __dec_zone_state(page_zone(page), item); | 243 | __dec_zone_state(page_zone(page), item); |
304 | } | 244 | } |
305 | 245 | ||
306 | /* | 246 | /* |
307 | * We only use atomic operations to update counters. So there is no need to | 247 | * We only use atomic operations to update counters. So there is no need to |
308 | * disable interrupts. | 248 | * disable interrupts. |
309 | */ | 249 | */ |
310 | #define inc_zone_page_state __inc_zone_page_state | 250 | #define inc_zone_page_state __inc_zone_page_state |
311 | #define dec_zone_page_state __dec_zone_page_state | 251 | #define dec_zone_page_state __dec_zone_page_state |
312 | #define mod_zone_page_state __mod_zone_page_state | 252 | #define mod_zone_page_state __mod_zone_page_state |
313 | 253 | ||
314 | #define set_pgdat_percpu_threshold(pgdat, callback) { } | 254 | #define set_pgdat_percpu_threshold(pgdat, callback) { } |
315 | 255 | ||
316 | static inline void refresh_cpu_vm_stats(int cpu) { } | 256 | static inline void refresh_cpu_vm_stats(int cpu) { } |
317 | static inline void refresh_zone_stat_thresholds(void) { } | 257 | static inline void refresh_zone_stat_thresholds(void) { } |
318 | 258 | ||
319 | #endif /* CONFIG_SMP */ | 259 | #endif /* CONFIG_SMP */ |
320 | 260 | ||
321 | extern const char * const vmstat_text[]; | 261 | extern const char * const vmstat_text[]; |
322 | 262 | ||
323 | #endif /* _LINUX_VMSTAT_H */ | 263 | #endif /* _LINUX_VMSTAT_H */ |