Commit 1edf223485c42c99655dcd001db1e46ad5e5d2d7
Committed by
Linus Torvalds
1 parent
e4e11180df
Exists in
master
and in
20 other branches
mm/page-writeback.c: make determine_dirtyable_memory static again
The tracing ring-buffer used this function briefly, but not anymore. Make it local to the writeback code again. Also, move the function so that no forward declaration needs to be reintroduced. Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Acked-by: Mel Gorman <mgorman@suse.de> Reviewed-by: Michal Hocko <mhocko@suse.cz> Cc: Wu Fengguang <fengguang.wu@intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Showing 2 changed files with 60 additions and 64 deletions Side-by-side Diff
include/linux/writeback.h
... | ... | @@ -138,8 +138,6 @@ |
138 | 138 | extern int block_dump; |
139 | 139 | extern int laptop_mode; |
140 | 140 | |
141 | -extern unsigned long determine_dirtyable_memory(void); | |
142 | - | |
143 | 141 | extern int dirty_background_ratio_handler(struct ctl_table *table, int write, |
144 | 142 | void __user *buffer, size_t *lenp, |
145 | 143 | loff_t *ppos); |
mm/page-writeback.c
... | ... | @@ -130,6 +130,66 @@ |
130 | 130 | static struct prop_descriptor vm_completions; |
131 | 131 | |
132 | 132 | /* |
133 | + * Work out the current dirty-memory clamping and background writeout | |
134 | + * thresholds. | |
135 | + * | |
136 | + * The main aim here is to lower them aggressively if there is a lot of mapped | |
137 | + * memory around. To avoid stressing page reclaim with lots of unreclaimable | |
138 | + * pages. It is better to clamp down on writers than to start swapping, and | |
139 | + * performing lots of scanning. | |
140 | + * | |
141 | + * We only allow 1/2 of the currently-unmapped memory to be dirtied. | |
142 | + * | |
143 | + * We don't permit the clamping level to fall below 5% - that is getting rather | |
144 | + * excessive. | |
145 | + * | |
146 | + * We make sure that the background writeout level is below the adjusted | |
147 | + * clamping level. | |
148 | + */ | |
149 | +static unsigned long highmem_dirtyable_memory(unsigned long total) | |
150 | +{ | |
151 | +#ifdef CONFIG_HIGHMEM | |
152 | + int node; | |
153 | + unsigned long x = 0; | |
154 | + | |
155 | + for_each_node_state(node, N_HIGH_MEMORY) { | |
156 | + struct zone *z = | |
157 | + &NODE_DATA(node)->node_zones[ZONE_HIGHMEM]; | |
158 | + | |
159 | + x += zone_page_state(z, NR_FREE_PAGES) + | |
160 | + zone_reclaimable_pages(z); | |
161 | + } | |
162 | + /* | |
163 | + * Make sure that the number of highmem pages is never larger | |
164 | + * than the number of the total dirtyable memory. This can only | |
165 | + * occur in very strange VM situations but we want to make sure | |
166 | + * that this does not occur. | |
167 | + */ | |
168 | + return min(x, total); | |
169 | +#else | |
170 | + return 0; | |
171 | +#endif | |
172 | +} | |
173 | + | |
174 | +/** | |
175 | + * determine_dirtyable_memory - amount of memory that may be used | |
176 | + * | |
177 | + * Returns the numebr of pages that can currently be freed and used | |
178 | + * by the kernel for direct mappings. | |
179 | + */ | |
180 | +static unsigned long determine_dirtyable_memory(void) | |
181 | +{ | |
182 | + unsigned long x; | |
183 | + | |
184 | + x = global_page_state(NR_FREE_PAGES) + global_reclaimable_pages(); | |
185 | + | |
186 | + if (!vm_highmem_is_dirtyable) | |
187 | + x -= highmem_dirtyable_memory(x); | |
188 | + | |
189 | + return x + 1; /* Ensure that we never return 0 */ | |
190 | +} | |
191 | + | |
192 | +/* | |
133 | 193 | * couple the period to the dirty_ratio: |
134 | 194 | * |
135 | 195 | * period/2 ~ roundup_pow_of_two(dirty limit) |
... | ... | @@ -196,7 +256,6 @@ |
196 | 256 | return ret; |
197 | 257 | } |
198 | 258 | |
199 | - | |
200 | 259 | int dirty_bytes_handler(struct ctl_table *table, int write, |
201 | 260 | void __user *buffer, size_t *lenp, |
202 | 261 | loff_t *ppos) |
... | ... | @@ -290,67 +349,6 @@ |
290 | 349 | return ret; |
291 | 350 | } |
292 | 351 | EXPORT_SYMBOL(bdi_set_max_ratio); |
293 | - | |
294 | -/* | |
295 | - * Work out the current dirty-memory clamping and background writeout | |
296 | - * thresholds. | |
297 | - * | |
298 | - * The main aim here is to lower them aggressively if there is a lot of mapped | |
299 | - * memory around. To avoid stressing page reclaim with lots of unreclaimable | |
300 | - * pages. It is better to clamp down on writers than to start swapping, and | |
301 | - * performing lots of scanning. | |
302 | - * | |
303 | - * We only allow 1/2 of the currently-unmapped memory to be dirtied. | |
304 | - * | |
305 | - * We don't permit the clamping level to fall below 5% - that is getting rather | |
306 | - * excessive. | |
307 | - * | |
308 | - * We make sure that the background writeout level is below the adjusted | |
309 | - * clamping level. | |
310 | - */ | |
311 | - | |
312 | -static unsigned long highmem_dirtyable_memory(unsigned long total) | |
313 | -{ | |
314 | -#ifdef CONFIG_HIGHMEM | |
315 | - int node; | |
316 | - unsigned long x = 0; | |
317 | - | |
318 | - for_each_node_state(node, N_HIGH_MEMORY) { | |
319 | - struct zone *z = | |
320 | - &NODE_DATA(node)->node_zones[ZONE_HIGHMEM]; | |
321 | - | |
322 | - x += zone_page_state(z, NR_FREE_PAGES) + | |
323 | - zone_reclaimable_pages(z); | |
324 | - } | |
325 | - /* | |
326 | - * Make sure that the number of highmem pages is never larger | |
327 | - * than the number of the total dirtyable memory. This can only | |
328 | - * occur in very strange VM situations but we want to make sure | |
329 | - * that this does not occur. | |
330 | - */ | |
331 | - return min(x, total); | |
332 | -#else | |
333 | - return 0; | |
334 | -#endif | |
335 | -} | |
336 | - | |
337 | -/** | |
338 | - * determine_dirtyable_memory - amount of memory that may be used | |
339 | - * | |
340 | - * Returns the numebr of pages that can currently be freed and used | |
341 | - * by the kernel for direct mappings. | |
342 | - */ | |
343 | -unsigned long determine_dirtyable_memory(void) | |
344 | -{ | |
345 | - unsigned long x; | |
346 | - | |
347 | - x = global_page_state(NR_FREE_PAGES) + global_reclaimable_pages(); | |
348 | - | |
349 | - if (!vm_highmem_is_dirtyable) | |
350 | - x -= highmem_dirtyable_memory(x); | |
351 | - | |
352 | - return x + 1; /* Ensure that we never return 0 */ | |
353 | -} | |
354 | 352 | |
355 | 353 | static unsigned long dirty_freerun_ceiling(unsigned long thresh, |
356 | 354 | unsigned long bg_thresh) |