Blame view
mm/page-writeback.c
75.2 KB
1da177e4c Linux-2.6.12-rc2 |
1 |
/* |
f30c22695 fix file specific... |
2 |
* mm/page-writeback.c |
1da177e4c Linux-2.6.12-rc2 |
3 4 |
* * Copyright (C) 2002, Linus Torvalds. |
04fbfdc14 mm: per device di... |
5 |
* Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> |
1da177e4c Linux-2.6.12-rc2 |
6 7 8 9 |
* * Contains functions related to writing back dirty pages at the * address_space level. * |
e1f8e8744 Remove Andrew Mor... |
10 |
* 10Apr2002 Andrew Morton |
1da177e4c Linux-2.6.12-rc2 |
11 12 13 14 |
* Initial version */ #include <linux/kernel.h> |
b95f1b31b mm: Map most file... |
15 |
#include <linux/export.h> |
1da177e4c Linux-2.6.12-rc2 |
16 17 18 19 20 21 22 23 24 |
#include <linux/spinlock.h> #include <linux/fs.h> #include <linux/mm.h> #include <linux/swap.h> #include <linux/slab.h> #include <linux/pagemap.h> #include <linux/writeback.h> #include <linux/init.h> #include <linux/backing-dev.h> |
55e829af0 [PATCH] io-accoun... |
25 |
#include <linux/task_io_accounting_ops.h> |
1da177e4c Linux-2.6.12-rc2 |
26 27 |
#include <linux/blkdev.h> #include <linux/mpage.h> |
d08b3851d [PATCH] mm: track... |
28 |
#include <linux/rmap.h> |
1da177e4c Linux-2.6.12-rc2 |
29 30 31 32 33 34 |
#include <linux/percpu.h> #include <linux/notifier.h> #include <linux/smp.h> #include <linux/sysctl.h> #include <linux/cpu.h> #include <linux/syscalls.h> |
ff01bb483 fs: move code out... |
35 |
#include <linux/buffer_head.h> /* __set_page_dirty_buffers */ |
811d736f9 [PATCH] BLOCK: Di... |
36 |
#include <linux/pagevec.h> |
eb608e3a3 block: Convert BD... |
37 |
#include <linux/timer.h> |
8bd75c77b sched/rt: Move rt... |
38 |
#include <linux/sched/rt.h> |
6e543d578 mm: vmscan: fix d... |
39 |
#include <linux/mm_inline.h> |
028c2dd18 writeback: Add tr... |
40 |
#include <trace/events/writeback.h> |
1da177e4c Linux-2.6.12-rc2 |
41 |
|
6e543d578 mm: vmscan: fix d... |
42 |
#include "internal.h" |
1da177e4c Linux-2.6.12-rc2 |
43 |
/* |
ffd1f609a writeback: introd... |
44 45 46 47 48 |
* Sleep at most 200ms at a time in balance_dirty_pages(). */ #define MAX_PAUSE max(HZ/5, 1) /* |
5b9b35743 writeback: avoid ... |
49 50 51 52 53 54 |
* Try to keep balance_dirty_pages() call intervals higher than this many pages * by raising pause time to max_pause when falls below it. */ #define DIRTY_POLL_THRESH (128 >> (PAGE_SHIFT - 10)) /* |
e98be2d59 writeback: bdi wr... |
55 56 57 |
* Estimate write bandwidth at 200ms intervals. */ #define BANDWIDTH_INTERVAL max(HZ/5, 1) |
6c14ae1e9 writeback: dirty ... |
58 |
#define RATELIMIT_CALC_SHIFT 10 |
e98be2d59 writeback: bdi wr... |
59 |
/* |
1da177e4c Linux-2.6.12-rc2 |
60 61 62 63 |
* After a CPU has dirtied this many pages, balance_dirty_pages_ratelimited * will look to see if it needs to force writeback or throttling. */ static long ratelimit_pages = 32; |
1da177e4c Linux-2.6.12-rc2 |
64 65 66 |
/* The following parameters are exported via /proc/sys/vm */ /* |
5b0830cb9 writeback: get ri... |
67 |
* Start background writeback (via writeback threads) at this percentage |
1da177e4c Linux-2.6.12-rc2 |
68 |
*/ |
1b5e62b42 writeback: double... |
69 |
int dirty_background_ratio = 10; |
1da177e4c Linux-2.6.12-rc2 |
70 71 |
/* |
2da02997e mm: add dirty_bac... |
72 73 74 75 76 77 |
* dirty_background_bytes starts at 0 (disabled) so that it is a function of * dirty_background_ratio * the amount of dirtyable memory */ unsigned long dirty_background_bytes; /* |
195cf453d mm/page-writeback... |
78 79 80 81 82 83 |
* free highmem will not be subtracted from the total free memory * for calculating free ratios if vm_highmem_is_dirtyable is true */ int vm_highmem_is_dirtyable; /* |
1da177e4c Linux-2.6.12-rc2 |
84 85 |
* The generator of dirty data starts writeback at this percentage */ |
1b5e62b42 writeback: double... |
86 |
int vm_dirty_ratio = 20; |
1da177e4c Linux-2.6.12-rc2 |
87 88 |
/* |
2da02997e mm: add dirty_bac... |
89 90 91 92 93 94 |
* vm_dirty_bytes starts at 0 (disabled) so that it is a function of * vm_dirty_ratio * the amount of dirtyable memory */ unsigned long vm_dirty_bytes; /* |
704503d83 mm: fix proc_doin... |
95 |
* The interval between `kupdate'-style writebacks |
1da177e4c Linux-2.6.12-rc2 |
96 |
*/ |
22ef37eed page-writeback: f... |
97 |
unsigned int dirty_writeback_interval = 5 * 100; /* centiseconds */ |
1da177e4c Linux-2.6.12-rc2 |
98 |
|
91913a294 mm: export dirty_... |
99 |
EXPORT_SYMBOL_GPL(dirty_writeback_interval); |
1da177e4c Linux-2.6.12-rc2 |
100 |
/* |
704503d83 mm: fix proc_doin... |
101 |
* The longest time for which data is allowed to remain dirty |
1da177e4c Linux-2.6.12-rc2 |
102 |
*/ |
22ef37eed page-writeback: f... |
103 |
unsigned int dirty_expire_interval = 30 * 100; /* centiseconds */ |
1da177e4c Linux-2.6.12-rc2 |
104 105 106 107 108 109 110 |
/* * Flag that makes the machine dump writes/reads and block dirtyings. */ int block_dump; /* |
ed5b43f15 [PATCH] Represent... |
111 112 |
* Flag that puts the machine in "laptop mode". Doubles as a timeout in jiffies: * a full sync is triggered after this time elapses without any disk activity. |
1da177e4c Linux-2.6.12-rc2 |
113 114 115 116 117 118 |
*/ int laptop_mode; EXPORT_SYMBOL(laptop_mode); /* End of sysctl-exported parameters */ |
c42843f2f writeback: introd... |
119 |
unsigned long global_dirty_limit; |
1da177e4c Linux-2.6.12-rc2 |
120 |
|
1da177e4c Linux-2.6.12-rc2 |
121 |
/* |
04fbfdc14 mm: per device di... |
122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 |
* Scale the writeback cache size proportional to the relative writeout speeds. * * We do this by keeping a floating proportion between BDIs, based on page * writeback completions [end_page_writeback()]. Those devices that write out * pages fastest will get the larger share, while the slower will get a smaller * share. * * We use page writeout completions because we are interested in getting rid of * dirty pages. Having them written out is the primary goal. * * We introduce a concept of time, a period over which we measure these events, * because demand can/will vary over time. The length of this period itself is * measured in page writeback completions. * */ |
eb608e3a3 block: Convert BD... |
137 138 139 140 141 142 143 144 145 146 147 148 149 150 |
static struct fprop_global writeout_completions; static void writeout_period(unsigned long t); /* Timer for aging of writeout_completions */ static struct timer_list writeout_period_timer = TIMER_DEFERRED_INITIALIZER(writeout_period, 0, 0); static unsigned long writeout_period_time = 0; /* * Length of period for aging writeout fractions of bdis. This is an * arbitrarily chosen number. The longer the period, the slower fractions will * reflect changes in current writeout rate. */ #define VM_COMPLETIONS_PERIOD_LEN (3*HZ) |
04fbfdc14 mm: per device di... |
151 |
|
04fbfdc14 mm: per device di... |
152 |
/* |
1edf22348 mm/page-writeback... |
153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 |
* Work out the current dirty-memory clamping and background writeout * thresholds. * * The main aim here is to lower them aggressively if there is a lot of mapped * memory around. To avoid stressing page reclaim with lots of unreclaimable * pages. It is better to clamp down on writers than to start swapping, and * performing lots of scanning. * * We only allow 1/2 of the currently-unmapped memory to be dirtied. * * We don't permit the clamping level to fall below 5% - that is getting rather * excessive. * * We make sure that the background writeout level is below the adjusted * clamping level. */ |
ccafa2879 mm: writeback: cl... |
169 |
|
a756cf590 mm: try to distri... |
170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 |
/* * In a memory zone, there is a certain amount of pages we consider * available for the page cache, which is essentially the number of * free and reclaimable pages, minus some zone reserves to protect * lowmem and the ability to uphold the zone's watermarks without * requiring writeback. * * This number of dirtyable pages is the base value of which the * user-configurable dirty ratio is the effictive number of pages that * are allowed to be actually dirtied. Per individual zone, or * globally by using the sum of dirtyable pages over all zones. * * Because the user is allowed to specify the dirty limit globally as * absolute number of bytes, calculating the per-zone dirty limit can * require translating the configured limit into a percentage of * global dirtyable memory first. */ |
a804552b9 mm/page-writeback... |
187 188 189 190 191 192 193 194 195 196 197 198 199 |
/** * zone_dirtyable_memory - number of dirtyable pages in a zone * @zone: the zone * * Returns the zone's number of pages potentially available for dirty * page cache. This is the base value for the per-zone dirty limits. */ static unsigned long zone_dirtyable_memory(struct zone *zone) { unsigned long nr_pages; nr_pages = zone_page_state(zone, NR_FREE_PAGES); nr_pages -= min(nr_pages, zone->dirty_balance_reserve); |
a1c3bfb2f mm/page-writeback... |
200 201 |
nr_pages += zone_page_state(zone, NR_INACTIVE_FILE); nr_pages += zone_page_state(zone, NR_ACTIVE_FILE); |
a804552b9 mm/page-writeback... |
202 203 204 |
return nr_pages; } |
1edf22348 mm/page-writeback... |
205 206 207 208 209 210 211 |
static unsigned long highmem_dirtyable_memory(unsigned long total) { #ifdef CONFIG_HIGHMEM int node; unsigned long x = 0; for_each_node_state(node, N_HIGH_MEMORY) { |
a804552b9 mm/page-writeback... |
212 |
struct zone *z = &NODE_DATA(node)->node_zones[ZONE_HIGHMEM]; |
1edf22348 mm/page-writeback... |
213 |
|
a804552b9 mm/page-writeback... |
214 |
x += zone_dirtyable_memory(z); |
1edf22348 mm/page-writeback... |
215 216 |
} /* |
c8b74c2f6 mm: fix calculati... |
217 218 219 220 221 222 223 224 225 226 227 228 |
* Unreclaimable memory (kernel memory or anonymous memory * without swap) can bring down the dirtyable pages below * the zone's dirty balance reserve and the above calculation * will underflow. However we still want to add in nodes * which are below threshold (negative values) to get a more * accurate calculation but make sure that the total never * underflows. */ if ((long)x < 0) x = 0; /* |
1edf22348 mm/page-writeback... |
229 230 231 232 233 234 235 236 237 238 239 240 |
* Make sure that the number of highmem pages is never larger * than the number of the total dirtyable memory. This can only * occur in very strange VM situations but we want to make sure * that this does not occur. */ return min(x, total); #else return 0; #endif } /** |
ccafa2879 mm: writeback: cl... |
241 |
* global_dirtyable_memory - number of globally dirtyable pages |
1edf22348 mm/page-writeback... |
242 |
* |
ccafa2879 mm: writeback: cl... |
243 244 |
* Returns the global number of pages potentially available for dirty * page cache. This is the base value for the global dirty limits. |
1edf22348 mm/page-writeback... |
245 |
*/ |
18cf8cf8b mm: page-writebac... |
246 |
static unsigned long global_dirtyable_memory(void) |
1edf22348 mm/page-writeback... |
247 248 |
{ unsigned long x; |
a804552b9 mm/page-writeback... |
249 |
x = global_page_state(NR_FREE_PAGES); |
c8b74c2f6 mm: fix calculati... |
250 |
x -= min(x, dirty_balance_reserve); |
1edf22348 mm/page-writeback... |
251 |
|
a1c3bfb2f mm/page-writeback... |
252 253 |
x += global_page_state(NR_INACTIVE_FILE); x += global_page_state(NR_ACTIVE_FILE); |
a804552b9 mm/page-writeback... |
254 |
|
1edf22348 mm/page-writeback... |
255 256 257 258 259 260 261 |
if (!vm_highmem_is_dirtyable) x -= highmem_dirtyable_memory(x); return x + 1; /* Ensure that we never return 0 */ } /* |
ccafa2879 mm: writeback: cl... |
262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 |
* global_dirty_limits - background-writeback and dirty-throttling thresholds * * Calculate the dirty thresholds based on sysctl parameters * - vm.dirty_background_ratio or vm.dirty_background_bytes * - vm.dirty_ratio or vm.dirty_bytes * The dirty limits will be lifted by 1/4 for PF_LESS_THROTTLE (ie. nfsd) and * real-time tasks. */ void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty) { unsigned long background; unsigned long dirty; unsigned long uninitialized_var(available_memory); struct task_struct *tsk; if (!vm_dirty_bytes || !dirty_background_bytes) available_memory = global_dirtyable_memory(); if (vm_dirty_bytes) dirty = DIV_ROUND_UP(vm_dirty_bytes, PAGE_SIZE); else dirty = (vm_dirty_ratio * available_memory) / 100; if (dirty_background_bytes) background = DIV_ROUND_UP(dirty_background_bytes, PAGE_SIZE); else background = (dirty_background_ratio * available_memory) / 100; if (background >= dirty) background = dirty / 2; tsk = current; if (tsk->flags & PF_LESS_THROTTLE || rt_task(tsk)) { background += background / 4; dirty += dirty / 4; } *pbackground = background; *pdirty = dirty; trace_global_dirty_state(background, dirty); } |
a756cf590 mm: try to distri... |
301 |
/** |
a756cf590 mm: try to distri... |
302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 |
* zone_dirty_limit - maximum number of dirty pages allowed in a zone * @zone: the zone * * Returns the maximum number of dirty pages allowed in a zone, based * on the zone's dirtyable memory. */ static unsigned long zone_dirty_limit(struct zone *zone) { unsigned long zone_memory = zone_dirtyable_memory(zone); struct task_struct *tsk = current; unsigned long dirty; if (vm_dirty_bytes) dirty = DIV_ROUND_UP(vm_dirty_bytes, PAGE_SIZE) * zone_memory / global_dirtyable_memory(); else dirty = vm_dirty_ratio * zone_memory / 100; if (tsk->flags & PF_LESS_THROTTLE || rt_task(tsk)) dirty += dirty / 4; return dirty; } /** * zone_dirty_ok - tells whether a zone is within its dirty limits * @zone: the zone to check * * Returns %true when the dirty pages in @zone are within the zone's * dirty limit, %false if the limit is exceeded. */ bool zone_dirty_ok(struct zone *zone) { unsigned long limit = zone_dirty_limit(zone); return zone_page_state(zone, NR_FILE_DIRTY) + zone_page_state(zone, NR_UNSTABLE_NFS) + zone_page_state(zone, NR_WRITEBACK) <= limit; } |
2da02997e mm: add dirty_bac... |
341 |
int dirty_background_ratio_handler(struct ctl_table *table, int write, |
8d65af789 sysctl: remove "s... |
342 |
void __user *buffer, size_t *lenp, |
2da02997e mm: add dirty_bac... |
343 344 345 |
loff_t *ppos) { int ret; |
8d65af789 sysctl: remove "s... |
346 |
ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); |
2da02997e mm: add dirty_bac... |
347 348 349 350 351 352 |
if (ret == 0 && write) dirty_background_bytes = 0; return ret; } int dirty_background_bytes_handler(struct ctl_table *table, int write, |
8d65af789 sysctl: remove "s... |
353 |
void __user *buffer, size_t *lenp, |
2da02997e mm: add dirty_bac... |
354 355 356 |
loff_t *ppos) { int ret; |
8d65af789 sysctl: remove "s... |
357 |
ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos); |
2da02997e mm: add dirty_bac... |
358 359 360 361 |
if (ret == 0 && write) dirty_background_ratio = 0; return ret; } |
04fbfdc14 mm: per device di... |
362 |
int dirty_ratio_handler(struct ctl_table *table, int write, |
8d65af789 sysctl: remove "s... |
363 |
void __user *buffer, size_t *lenp, |
04fbfdc14 mm: per device di... |
364 365 366 |
loff_t *ppos) { int old_ratio = vm_dirty_ratio; |
2da02997e mm: add dirty_bac... |
367 |
int ret; |
8d65af789 sysctl: remove "s... |
368 |
ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); |
04fbfdc14 mm: per device di... |
369 |
if (ret == 0 && write && vm_dirty_ratio != old_ratio) { |
eb608e3a3 block: Convert BD... |
370 |
writeback_set_ratelimit(); |
2da02997e mm: add dirty_bac... |
371 372 373 374 |
vm_dirty_bytes = 0; } return ret; } |
2da02997e mm: add dirty_bac... |
375 |
int dirty_bytes_handler(struct ctl_table *table, int write, |
8d65af789 sysctl: remove "s... |
376 |
void __user *buffer, size_t *lenp, |
2da02997e mm: add dirty_bac... |
377 378 |
loff_t *ppos) { |
fc3501d41 mm: fix dirty_byt... |
379 |
unsigned long old_bytes = vm_dirty_bytes; |
2da02997e mm: add dirty_bac... |
380 |
int ret; |
8d65af789 sysctl: remove "s... |
381 |
ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos); |
2da02997e mm: add dirty_bac... |
382 |
if (ret == 0 && write && vm_dirty_bytes != old_bytes) { |
eb608e3a3 block: Convert BD... |
383 |
writeback_set_ratelimit(); |
2da02997e mm: add dirty_bac... |
384 |
vm_dirty_ratio = 0; |
04fbfdc14 mm: per device di... |
385 386 387 |
} return ret; } |
eb608e3a3 block: Convert BD... |
388 389 390 391 392 393 394 395 |
static unsigned long wp_next_time(unsigned long cur_time) { cur_time += VM_COMPLETIONS_PERIOD_LEN; /* 0 has a special meaning... */ if (!cur_time) return 1; return cur_time; } |
04fbfdc14 mm: per device di... |
396 397 398 399 400 401 |
/* * Increment the BDI's writeout completion count and the global writeout * completion count. Called from test_clear_page_writeback(). */ static inline void __bdi_writeout_inc(struct backing_dev_info *bdi) { |
f7d2b1ecd writeback: accoun... |
402 |
__inc_bdi_stat(bdi, BDI_WRITTEN); |
eb608e3a3 block: Convert BD... |
403 404 405 406 407 408 409 410 411 412 413 414 415 |
__fprop_inc_percpu_max(&writeout_completions, &bdi->completions, bdi->max_prop_frac); /* First event after period switching was turned off? */ if (!unlikely(writeout_period_time)) { /* * We can race with other __bdi_writeout_inc calls here but * it does not cause any harm since the resulting time when * timer will fire and what is in writeout_period_time will be * roughly the same. */ writeout_period_time = wp_next_time(jiffies); mod_timer(&writeout_period_timer, writeout_period_time); } |
04fbfdc14 mm: per device di... |
416 |
} |
dd5656e59 mm: bdi: export b... |
417 418 419 420 421 422 423 424 425 |
void bdi_writeout_inc(struct backing_dev_info *bdi) { unsigned long flags; local_irq_save(flags); __bdi_writeout_inc(bdi); local_irq_restore(flags); } EXPORT_SYMBOL_GPL(bdi_writeout_inc); |
04fbfdc14 mm: per device di... |
426 427 428 429 430 431 |
/* * Obtain an accurate fraction of the BDI's portion. */ static void bdi_writeout_fraction(struct backing_dev_info *bdi, long *numerator, long *denominator) { |
eb608e3a3 block: Convert BD... |
432 |
fprop_fraction_percpu(&writeout_completions, &bdi->completions, |
04fbfdc14 mm: per device di... |
433 |
numerator, denominator); |
04fbfdc14 mm: per device di... |
434 |
} |
04fbfdc14 mm: per device di... |
435 |
/* |
eb608e3a3 block: Convert BD... |
436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 |
* On idle system, we can be called long after we scheduled because we use * deferred timers so count with missed periods. */ static void writeout_period(unsigned long t) { int miss_periods = (jiffies - writeout_period_time) / VM_COMPLETIONS_PERIOD_LEN; if (fprop_new_period(&writeout_completions, miss_periods + 1)) { writeout_period_time = wp_next_time(writeout_period_time + miss_periods * VM_COMPLETIONS_PERIOD_LEN); mod_timer(&writeout_period_timer, writeout_period_time); } else { /* * Aging has zeroed all fractions. Stop wasting CPU on period * updates. */ writeout_period_time = 0; } } /* |
d08c429b0 mm/page-writeback... |
458 459 460 |
* bdi_min_ratio keeps the sum of the minimum dirty shares of all * registered backing devices, which, for obvious reasons, can not * exceed 100%. |
189d3c4a9 mm: bdi: allow se... |
461 |
*/ |
189d3c4a9 mm: bdi: allow se... |
462 463 464 465 466 |
static unsigned int bdi_min_ratio; int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio) { int ret = 0; |
189d3c4a9 mm: bdi: allow se... |
467 |
|
cfc4ba536 writeback: use RC... |
468 |
spin_lock_bh(&bdi_lock); |
a42dde041 mm: bdi: allow se... |
469 |
if (min_ratio > bdi->max_ratio) { |
189d3c4a9 mm: bdi: allow se... |
470 |
ret = -EINVAL; |
a42dde041 mm: bdi: allow se... |
471 472 473 474 475 476 477 478 479 |
} else { min_ratio -= bdi->min_ratio; if (bdi_min_ratio + min_ratio < 100) { bdi_min_ratio += min_ratio; bdi->min_ratio += min_ratio; } else { ret = -EINVAL; } } |
cfc4ba536 writeback: use RC... |
480 |
spin_unlock_bh(&bdi_lock); |
a42dde041 mm: bdi: allow se... |
481 482 483 484 485 486 |
return ret; } int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned max_ratio) { |
a42dde041 mm: bdi: allow se... |
487 488 489 490 |
int ret = 0; if (max_ratio > 100) return -EINVAL; |
cfc4ba536 writeback: use RC... |
491 |
spin_lock_bh(&bdi_lock); |
a42dde041 mm: bdi: allow se... |
492 493 494 495 |
if (bdi->min_ratio > max_ratio) { ret = -EINVAL; } else { bdi->max_ratio = max_ratio; |
eb608e3a3 block: Convert BD... |
496 |
bdi->max_prop_frac = (FPROP_FRAC_BASE * max_ratio) / 100; |
a42dde041 mm: bdi: allow se... |
497 |
} |
cfc4ba536 writeback: use RC... |
498 |
spin_unlock_bh(&bdi_lock); |
189d3c4a9 mm: bdi: allow se... |
499 500 501 |
return ret; } |
a42dde041 mm: bdi: allow se... |
502 |
EXPORT_SYMBOL(bdi_set_max_ratio); |
189d3c4a9 mm: bdi: allow se... |
503 |
|
6c14ae1e9 writeback: dirty ... |
504 505 506 507 508 |
static unsigned long dirty_freerun_ceiling(unsigned long thresh, unsigned long bg_thresh) { return (thresh + bg_thresh) / 2; } |
ffd1f609a writeback: introd... |
509 510 511 512 |
static unsigned long hard_dirty_limit(unsigned long thresh) { return max(thresh, global_dirty_limit); } |
6f7186562 writeback: add bd... |
513 |
/** |
1babe1838 writeback: add co... |
514 |
* bdi_dirty_limit - @bdi's share of dirty throttling threshold |
6f7186562 writeback: add bd... |
515 516 |
* @bdi: the backing_dev_info to query * @dirty: global dirty limit in pages |
1babe1838 writeback: add co... |
517 |
* |
6f7186562 writeback: add bd... |
518 519 |
* Returns @bdi's dirty limit in pages. The term "dirty" in the context of * dirty balancing includes all PG_dirty, PG_writeback and NFS unstable pages. |
aed21ad28 writeback: commen... |
520 521 522 523 524 525 526 |
* * Note that balance_dirty_pages() will only seriously take it as a hard limit * when sleeping max_pause per page is not enough to keep the dirty pages under * control. For example, when the device is completely stalled due to some error * conditions, or when there are 1000 dd tasks writing to a slow 10MB/s USB key. * In the other normal situations, it acts more gently by throttling the tasks * more (rather than completely block them) when the bdi dirty pages go high. |
1babe1838 writeback: add co... |
527 |
* |
6f7186562 writeback: add bd... |
528 |
* It allocates high/low dirty limits to fast/slow devices, in order to prevent |
1babe1838 writeback: add co... |
529 530 531 532 533 534 535 |
* - starving fast devices * - piling up dirty pages (that will take long time to sync) on slow devices * * The bdi's share of dirty limit will be adapting to its throughput and * bounded by the bdi->min_ratio and/or bdi->max_ratio parameters, if set. */ unsigned long bdi_dirty_limit(struct backing_dev_info *bdi, unsigned long dirty) |
16c4042f0 writeback: avoid ... |
536 537 538 |
{ u64 bdi_dirty; long numerator, denominator; |
04fbfdc14 mm: per device di... |
539 |
|
16c4042f0 writeback: avoid ... |
540 541 542 543 |
/* * Calculate this BDI's share of the dirty ratio. */ bdi_writeout_fraction(bdi, &numerator, &denominator); |
04fbfdc14 mm: per device di... |
544 |
|
16c4042f0 writeback: avoid ... |
545 546 547 |
bdi_dirty = (dirty * (100 - bdi_min_ratio)) / 100; bdi_dirty *= numerator; do_div(bdi_dirty, denominator); |
04fbfdc14 mm: per device di... |
548 |
|
16c4042f0 writeback: avoid ... |
549 550 551 552 553 |
bdi_dirty += (dirty * bdi->min_ratio) / 100; if (bdi_dirty > (dirty * bdi->max_ratio) / 100) bdi_dirty = dirty * bdi->max_ratio / 100; return bdi_dirty; |
1da177e4c Linux-2.6.12-rc2 |
554 |
} |
6c14ae1e9 writeback: dirty ... |
555 |
/* |
5a5374856 mm/page-writeback... |
556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 |
* setpoint - dirty 3 * f(dirty) := 1.0 + (----------------) * limit - setpoint * * it's a 3rd order polynomial that subjects to * * (1) f(freerun) = 2.0 => rampup dirty_ratelimit reasonably fast * (2) f(setpoint) = 1.0 => the balance point * (3) f(limit) = 0 => the hard limit * (4) df/dx <= 0 => negative feedback control * (5) the closer to setpoint, the smaller |df/dx| (and the reverse) * => fast response on large errors; small oscillation near setpoint */ static inline long long pos_ratio_polynom(unsigned long setpoint, unsigned long dirty, unsigned long limit) { long long pos_ratio; long x; x = div_s64(((s64)setpoint - (s64)dirty) << RATELIMIT_CALC_SHIFT, limit - setpoint + 1); pos_ratio = x; pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT; pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT; pos_ratio += 1 << RATELIMIT_CALC_SHIFT; return clamp(pos_ratio, 0LL, 2LL << RATELIMIT_CALC_SHIFT); } /* |
6c14ae1e9 writeback: dirty ... |
587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 |
* Dirty position control. * * (o) global/bdi setpoints * * We want the dirty pages be balanced around the global/bdi setpoints. * When the number of dirty pages is higher/lower than the setpoint, the * dirty position control ratio (and hence task dirty ratelimit) will be * decreased/increased to bring the dirty pages back to the setpoint. * * pos_ratio = 1 << RATELIMIT_CALC_SHIFT * * if (dirty < setpoint) scale up pos_ratio * if (dirty > setpoint) scale down pos_ratio * * if (bdi_dirty < bdi_setpoint) scale up pos_ratio * if (bdi_dirty > bdi_setpoint) scale down pos_ratio * * task_ratelimit = dirty_ratelimit * pos_ratio >> RATELIMIT_CALC_SHIFT * * (o) global control line * * ^ pos_ratio * | * | |<===== global dirty control scope ======>| * 2.0 .............* * | .* * | . * * | . * * | . * * | . * * | . * * 1.0 ................................* * | . . * * | . . * * | . . * * | . . * * | . . * * 0 +------------.------------------.----------------------*-------------> * freerun^ setpoint^ limit^ dirty pages * * (o) bdi control line * * ^ pos_ratio * | * | * * | * * | * * | * * | * |<=========== span ============>| * 1.0 .......................* * | . * * | . * * | . * * | . * * | . * * | . * * | . * * | . * * | . * * | . * * | . * * 1/4 ...............................................* * * * * * * * * * * * * | . . * | . . * | . . * 0 +----------------------.-------------------------------.-------------> * bdi_setpoint^ x_intercept^ * * The bdi control line won't drop below pos_ratio=1/4, so that bdi_dirty can * be smoothly throttled down to normal if it starts high in situations like * - start writing to a slow SD card and a fast disk at the same time. The SD * card's bdi_dirty may rush to many times higher than bdi_setpoint. * - the bdi dirty thresh drops quickly due to change of JBOD workload */ static unsigned long bdi_position_ratio(struct backing_dev_info *bdi, unsigned long thresh, unsigned long bg_thresh, unsigned long dirty, unsigned long bdi_thresh, unsigned long bdi_dirty) { unsigned long write_bw = bdi->avg_write_bandwidth; unsigned long freerun = dirty_freerun_ceiling(thresh, bg_thresh); unsigned long limit = hard_dirty_limit(thresh); unsigned long x_intercept; unsigned long setpoint; /* dirty pages' target balance point */ unsigned long bdi_setpoint; unsigned long span; long long pos_ratio; /* for scaling up/down the rate limit */ long x; if (unlikely(dirty >= limit)) return 0; /* * global setpoint * |
5a5374856 mm/page-writeback... |
684 685 686 687 688 689 690 691 692 693 694 695 696 697 |
* See comment for pos_ratio_polynom(). */ setpoint = (freerun + limit) / 2; pos_ratio = pos_ratio_polynom(setpoint, dirty, limit); /* * The strictlimit feature is a tool preventing mistrusted filesystems * from growing a large number of dirty pages before throttling. For * such filesystems balance_dirty_pages always checks bdi counters * against bdi limits. Even if global "nr_dirty" is under "freerun". * This is especially important for fuse which sets bdi->max_ratio to * 1% by default. Without strictlimit feature, fuse writeback may * consume arbitrary amount of RAM because it is accounted in * NR_WRITEBACK_TEMP which is not involved in calculating "nr_dirty". |
6c14ae1e9 writeback: dirty ... |
698 |
* |
5a5374856 mm/page-writeback... |
699 700 701 702 703 704 705 706 707 |
* Here, in bdi_position_ratio(), we calculate pos_ratio based on * two values: bdi_dirty and bdi_thresh. Let's consider an example: * total amount of RAM is 16GB, bdi->max_ratio is equal to 1%, global * limits are set by default to 10% and 20% (background and throttle). * Then bdi_thresh is 1% of 20% of 16GB. This amounts to ~8K pages. * bdi_dirty_limit(bdi, bg_thresh) is about ~4K pages. bdi_setpoint is * about ~6K pages (as the average of background and throttle bdi * limits). The 3rd order polynomial will provide positive feedback if * bdi_dirty is under bdi_setpoint and vice versa. |
6c14ae1e9 writeback: dirty ... |
708 |
* |
5a5374856 mm/page-writeback... |
709 710 711 712 |
* Note, that we cannot use global counters in these calculations * because we want to throttle process writing to a strictlimit BDI * much earlier than global "freerun" is reached (~23MB vs. ~2.3GB * in the example above). |
6c14ae1e9 writeback: dirty ... |
713 |
*/ |
5a5374856 mm/page-writeback... |
714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 |
if (unlikely(bdi->capabilities & BDI_CAP_STRICTLIMIT)) { long long bdi_pos_ratio; unsigned long bdi_bg_thresh; if (bdi_dirty < 8) return min_t(long long, pos_ratio * 2, 2 << RATELIMIT_CALC_SHIFT); if (bdi_dirty >= bdi_thresh) return 0; bdi_bg_thresh = div_u64((u64)bdi_thresh * bg_thresh, thresh); bdi_setpoint = dirty_freerun_ceiling(bdi_thresh, bdi_bg_thresh); if (bdi_setpoint == 0 || bdi_setpoint == bdi_thresh) return 0; bdi_pos_ratio = pos_ratio_polynom(bdi_setpoint, bdi_dirty, bdi_thresh); /* * Typically, for strictlimit case, bdi_setpoint << setpoint * and pos_ratio >> bdi_pos_ratio. In the other words global * state ("dirty") is not limiting factor and we have to * make decision based on bdi counters. But there is an * important case when global pos_ratio should get precedence: * global limits are exceeded (e.g. due to activities on other * BDIs) while given strictlimit BDI is below limit. * * "pos_ratio * bdi_pos_ratio" would work for the case above, * but it would look too non-natural for the case of all * activity in the system coming from a single strictlimit BDI * with bdi->max_ratio == 100%. * * Note that min() below somewhat changes the dynamics of the * control system. Normally, pos_ratio value can be well over 3 * (when globally we are at freerun and bdi is well below bdi * setpoint). Now the maximum pos_ratio in the same situation * is 2. We might want to tweak this if we observe the control * system is too slow to adapt. */ return min(pos_ratio, bdi_pos_ratio); } |
6c14ae1e9 writeback: dirty ... |
758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 |
/* * We have computed basic pos_ratio above based on global situation. If * the bdi is over/under its share of dirty pages, we want to scale * pos_ratio further down/up. That is done by the following mechanism. */ /* * bdi setpoint * * f(bdi_dirty) := 1.0 + k * (bdi_dirty - bdi_setpoint) * * x_intercept - bdi_dirty * := -------------------------- * x_intercept - bdi_setpoint * * The main bdi control line is a linear function that subjects to * * (1) f(bdi_setpoint) = 1.0 * (2) k = - 1 / (8 * write_bw) (in single bdi case) * or equally: x_intercept = bdi_setpoint + 8 * write_bw * * For single bdi case, the dirty pages are observed to fluctuate * regularly within range * [bdi_setpoint - write_bw/2, bdi_setpoint + write_bw/2] * for various filesystems, where (2) can yield in a reasonable 12.5% * fluctuation range for pos_ratio. * * For JBOD case, bdi_thresh (not bdi_dirty!) could fluctuate up to its * own size, so move the slope over accordingly and choose a slope that * yields 100% pos_ratio fluctuation on suddenly doubled bdi_thresh. */ if (unlikely(bdi_thresh > thresh)) bdi_thresh = thresh; |
aed21ad28 writeback: commen... |
792 793 794 795 796 797 798 |
/* * It's very possible that bdi_thresh is close to 0 not because the * device is slow, but that it has remained inactive for long time. * Honour such devices a reasonable good (hopefully IO efficient) * threshold, so that the occasional writes won't be blocked and active * writes can rampup the threshold quickly. */ |
8927f66c4 writeback: dirty ... |
799 |
bdi_thresh = max(bdi_thresh, (limit - dirty) / 8); |
6c14ae1e9 writeback: dirty ... |
800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 |
/* * scale global setpoint to bdi's: * bdi_setpoint = setpoint * bdi_thresh / thresh */ x = div_u64((u64)bdi_thresh << 16, thresh + 1); bdi_setpoint = setpoint * (u64)x >> 16; /* * Use span=(8*write_bw) in single bdi case as indicated by * (thresh - bdi_thresh ~= 0) and transit to bdi_thresh in JBOD case. * * bdi_thresh thresh - bdi_thresh * span = ---------- * (8 * write_bw) + ------------------- * bdi_thresh * thresh thresh */ span = (thresh - bdi_thresh + 8 * write_bw) * (u64)x >> 16; x_intercept = bdi_setpoint + span; if (bdi_dirty < x_intercept - span / 4) { |
50657fc4d writeback: fix pp... |
818 819 |
pos_ratio = div_u64(pos_ratio * (x_intercept - bdi_dirty), x_intercept - bdi_setpoint + 1); |
6c14ae1e9 writeback: dirty ... |
820 821 |
} else pos_ratio /= 4; |
8927f66c4 writeback: dirty ... |
822 823 824 825 826 827 828 |
/* * bdi reserve area, safeguard against dirty pool underrun and disk idle * It may push the desired control point of global dirty pages higher * than setpoint. */ x_intercept = bdi_thresh / 2; if (bdi_dirty < x_intercept) { |
50657fc4d writeback: fix pp... |
829 830 831 |
if (bdi_dirty > x_intercept / 8) pos_ratio = div_u64(pos_ratio * x_intercept, bdi_dirty); else |
8927f66c4 writeback: dirty ... |
832 833 |
pos_ratio *= 8; } |
6c14ae1e9 writeback: dirty ... |
834 835 |
return pos_ratio; } |
e98be2d59 writeback: bdi wr... |
836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 |
static void bdi_update_write_bandwidth(struct backing_dev_info *bdi, unsigned long elapsed, unsigned long written) { const unsigned long period = roundup_pow_of_two(3 * HZ); unsigned long avg = bdi->avg_write_bandwidth; unsigned long old = bdi->write_bandwidth; u64 bw; /* * bw = written * HZ / elapsed * * bw * elapsed + write_bandwidth * (period - elapsed) * write_bandwidth = --------------------------------------------------- * period */ bw = written - bdi->written_stamp; bw *= HZ; if (unlikely(elapsed > period)) { do_div(bw, elapsed); avg = bw; goto out; } bw += (u64)bdi->write_bandwidth * (period - elapsed); bw >>= ilog2(period); /* * one more level of smoothing, for filtering out sudden spikes */ if (avg > old && old >= (unsigned long)bw) avg -= (avg - old) >> 3; if (avg < old && old <= (unsigned long)bw) avg += (old - avg) >> 3; out: bdi->write_bandwidth = bw; bdi->avg_write_bandwidth = avg; } |
c42843f2f writeback: introd... |
875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 |
/* * The global dirtyable memory and dirty threshold could be suddenly knocked * down by a large amount (eg. on the startup of KVM in a swapless system). * This may throw the system into deep dirty exceeded state and throttle * heavy/light dirtiers alike. To retain good responsiveness, maintain * global_dirty_limit for tracking slowly down to the knocked down dirty * threshold. */ static void update_dirty_limit(unsigned long thresh, unsigned long dirty) { unsigned long limit = global_dirty_limit; /* * Follow up in one step. */ if (limit < thresh) { limit = thresh; goto update; } /* * Follow down slowly. Use the higher one as the target, because thresh * may drop below dirty. This is exactly the reason to introduce * global_dirty_limit which is guaranteed to lie above the dirty pages. */ thresh = max(thresh, dirty); if (limit > thresh) { limit -= (limit - thresh) >> 5; goto update; } return; update: global_dirty_limit = limit; } static void global_update_bandwidth(unsigned long thresh, unsigned long dirty, unsigned long now) { static DEFINE_SPINLOCK(dirty_lock); static unsigned long update_time; /* * check locklessly first to optimize away locking for the most time */ if (time_before(now, update_time + BANDWIDTH_INTERVAL)) return; spin_lock(&dirty_lock); if (time_after_eq(now, update_time + BANDWIDTH_INTERVAL)) { update_dirty_limit(thresh, dirty); update_time = now; } spin_unlock(&dirty_lock); } |
be3ffa276 writeback: dirty ... |
930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 |
/* * Maintain bdi->dirty_ratelimit, the base dirty throttle rate. * * Normal bdi tasks will be curbed at or below it in long term. * Obviously it should be around (write_bw / N) when there are N dd tasks. */ static void bdi_update_dirty_ratelimit(struct backing_dev_info *bdi, unsigned long thresh, unsigned long bg_thresh, unsigned long dirty, unsigned long bdi_thresh, unsigned long bdi_dirty, unsigned long dirtied, unsigned long elapsed) { |
7381131cb writeback: stabil... |
945 946 947 |
unsigned long freerun = dirty_freerun_ceiling(thresh, bg_thresh); unsigned long limit = hard_dirty_limit(thresh); unsigned long setpoint = (freerun + limit) / 2; |
be3ffa276 writeback: dirty ... |
948 949 950 951 952 953 |
unsigned long write_bw = bdi->avg_write_bandwidth; unsigned long dirty_ratelimit = bdi->dirty_ratelimit; unsigned long dirty_rate; unsigned long task_ratelimit; unsigned long balanced_dirty_ratelimit; unsigned long pos_ratio; |
7381131cb writeback: stabil... |
954 955 |
unsigned long step; unsigned long x; |
be3ffa276 writeback: dirty ... |
956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 |
/* * The dirty rate will match the writeout rate in long term, except * when dirty pages are truncated by userspace or re-dirtied by FS. */ dirty_rate = (dirtied - bdi->dirtied_stamp) * HZ / elapsed; pos_ratio = bdi_position_ratio(bdi, thresh, bg_thresh, dirty, bdi_thresh, bdi_dirty); /* * task_ratelimit reflects each dd's dirty rate for the past 200ms. */ task_ratelimit = (u64)dirty_ratelimit * pos_ratio >> RATELIMIT_CALC_SHIFT; task_ratelimit++; /* it helps rampup dirty_ratelimit from tiny values */ /* * A linear estimation of the "balanced" throttle rate. The theory is, * if there are N dd tasks, each throttled at task_ratelimit, the bdi's * dirty_rate will be measured to be (N * task_ratelimit). So the below * formula will yield the balanced rate limit (write_bw / N). * * Note that the expanded form is not a pure rate feedback: * rate_(i+1) = rate_(i) * (write_bw / dirty_rate) (1) * but also takes pos_ratio into account: * rate_(i+1) = rate_(i) * (write_bw / dirty_rate) * pos_ratio (2) * * (1) is not realistic because pos_ratio also takes part in balancing * the dirty rate. Consider the state * pos_ratio = 0.5 (3) * rate = 2 * (write_bw / N) (4) * If (1) is used, it will stuck in that state! Because each dd will * be throttled at * task_ratelimit = pos_ratio * rate = (write_bw / N) (5) * yielding * dirty_rate = N * task_ratelimit = write_bw (6) * put (6) into (1) we get * rate_(i+1) = rate_(i) (7) * * So we end up using (2) to always keep * rate_(i+1) ~= (write_bw / N) (8) * regardless of the value of pos_ratio. As long as (8) is satisfied, * pos_ratio is able to drive itself to 1.0, which is not only where * the dirty count meet the setpoint, but also where the slope of * pos_ratio is most flat and hence task_ratelimit is least fluctuated. */ balanced_dirty_ratelimit = div_u64((u64)task_ratelimit * write_bw, dirty_rate | 1); |
bdaac4902 writeback: balanc... |
1004 1005 1006 1007 1008 |
/* * balanced_dirty_ratelimit ~= (write_bw / N) <= write_bw */ if (unlikely(balanced_dirty_ratelimit > write_bw)) balanced_dirty_ratelimit = write_bw; |
be3ffa276 writeback: dirty ... |
1009 |
|
7381131cb writeback: stabil... |
1010 1011 1012 1013 1014 1015 |
/* * We could safely do this and return immediately: * * bdi->dirty_ratelimit = balanced_dirty_ratelimit; * * However to get a more stable dirty_ratelimit, the below elaborated |
331cbdeed writeback: Fix so... |
1016 |
* code makes use of task_ratelimit to filter out singular points and |
7381131cb writeback: stabil... |
1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 |
* limit the step size. * * The below code essentially only uses the relative value of * * task_ratelimit - dirty_ratelimit * = (pos_ratio - 1) * dirty_ratelimit * * which reflects the direction and size of dirty position error. */ /* * dirty_ratelimit will follow balanced_dirty_ratelimit iff * task_ratelimit is on the same side of dirty_ratelimit, too. * For example, when * - dirty_ratelimit > balanced_dirty_ratelimit * - dirty_ratelimit > task_ratelimit (dirty pages are above setpoint) * lowering dirty_ratelimit will help meet both the position and rate * control targets. Otherwise, don't update dirty_ratelimit if it will * only help meet the rate target. After all, what the users ultimately * feel and care are stable dirty rate and small position error. * * |task_ratelimit - dirty_ratelimit| is used to limit the step size |
331cbdeed writeback: Fix so... |
1039 |
* and filter out the singular points of balanced_dirty_ratelimit. Which |
7381131cb writeback: stabil... |
1040 1041 1042 1043 1044 |
* keeps jumping around randomly and can even leap far away at times * due to the small 200ms estimation period of dirty_rate (we want to * keep that period small to reduce time lags). */ step = 0; |
5a5374856 mm/page-writeback... |
1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 |
/* * For strictlimit case, calculations above were based on bdi counters * and limits (starting from pos_ratio = bdi_position_ratio() and up to * balanced_dirty_ratelimit = task_ratelimit * write_bw / dirty_rate). * Hence, to calculate "step" properly, we have to use bdi_dirty as * "dirty" and bdi_setpoint as "setpoint". * * We rampup dirty_ratelimit forcibly if bdi_dirty is low because * it's possible that bdi_thresh is close to zero due to inactivity * of backing device (see the implementation of bdi_dirty_limit()). */ if (unlikely(bdi->capabilities & BDI_CAP_STRICTLIMIT)) { dirty = bdi_dirty; if (bdi_dirty < 8) setpoint = bdi_dirty + 1; else setpoint = (bdi_thresh + bdi_dirty_limit(bdi, bg_thresh)) / 2; } |
7381131cb writeback: stabil... |
1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 |
if (dirty < setpoint) { x = min(bdi->balanced_dirty_ratelimit, min(balanced_dirty_ratelimit, task_ratelimit)); if (dirty_ratelimit < x) step = x - dirty_ratelimit; } else { x = max(bdi->balanced_dirty_ratelimit, max(balanced_dirty_ratelimit, task_ratelimit)); if (dirty_ratelimit > x) step = dirty_ratelimit - x; } /* * Don't pursue 100% rate matching. It's impossible since the balanced * rate itself is constantly fluctuating. So decrease the track speed * when it gets close to the target. Helps eliminate pointless tremors. */ step >>= dirty_ratelimit / (2 * step + 1); /* * Limit the tracking speed to avoid overshooting. */ step = (step + 7) / 8; if (dirty_ratelimit < balanced_dirty_ratelimit) dirty_ratelimit += step; else dirty_ratelimit -= step; bdi->dirty_ratelimit = max(dirty_ratelimit, 1UL); bdi->balanced_dirty_ratelimit = balanced_dirty_ratelimit; |
b48c104d2 writeback: trace ... |
1095 1096 |
trace_bdi_dirty_ratelimit(bdi, dirty_rate, task_ratelimit); |
be3ffa276 writeback: dirty ... |
1097 |
} |
e98be2d59 writeback: bdi wr... |
1098 |
void __bdi_update_bandwidth(struct backing_dev_info *bdi, |
c42843f2f writeback: introd... |
1099 |
unsigned long thresh, |
af6a31138 writeback: add bg... |
1100 |
unsigned long bg_thresh, |
c42843f2f writeback: introd... |
1101 1102 1103 |
unsigned long dirty, unsigned long bdi_thresh, unsigned long bdi_dirty, |
e98be2d59 writeback: bdi wr... |
1104 1105 1106 1107 |
unsigned long start_time) { unsigned long now = jiffies; unsigned long elapsed = now - bdi->bw_time_stamp; |
be3ffa276 writeback: dirty ... |
1108 |
unsigned long dirtied; |
e98be2d59 writeback: bdi wr... |
1109 1110 1111 1112 1113 1114 1115 |
unsigned long written; /* * rate-limit, only update once every 200ms. */ if (elapsed < BANDWIDTH_INTERVAL) return; |
be3ffa276 writeback: dirty ... |
1116 |
dirtied = percpu_counter_read(&bdi->bdi_stat[BDI_DIRTIED]); |
e98be2d59 writeback: bdi wr... |
1117 1118 1119 1120 1121 1122 1123 1124 |
written = percpu_counter_read(&bdi->bdi_stat[BDI_WRITTEN]); /* * Skip quiet periods when disk bandwidth is under-utilized. * (at least 1s idle time between two flusher runs) */ if (elapsed > HZ && time_before(bdi->bw_time_stamp, start_time)) goto snapshot; |
be3ffa276 writeback: dirty ... |
1125 |
if (thresh) { |
c42843f2f writeback: introd... |
1126 |
global_update_bandwidth(thresh, dirty, now); |
be3ffa276 writeback: dirty ... |
1127 1128 1129 1130 |
bdi_update_dirty_ratelimit(bdi, thresh, bg_thresh, dirty, bdi_thresh, bdi_dirty, dirtied, elapsed); } |
e98be2d59 writeback: bdi wr... |
1131 1132 1133 |
bdi_update_write_bandwidth(bdi, elapsed, written); snapshot: |
be3ffa276 writeback: dirty ... |
1134 |
bdi->dirtied_stamp = dirtied; |
e98be2d59 writeback: bdi wr... |
1135 1136 1137 1138 1139 |
bdi->written_stamp = written; bdi->bw_time_stamp = now; } static void bdi_update_bandwidth(struct backing_dev_info *bdi, |
c42843f2f writeback: introd... |
1140 |
unsigned long thresh, |
af6a31138 writeback: add bg... |
1141 |
unsigned long bg_thresh, |
c42843f2f writeback: introd... |
1142 1143 1144 |
unsigned long dirty, unsigned long bdi_thresh, unsigned long bdi_dirty, |
e98be2d59 writeback: bdi wr... |
1145 1146 1147 1148 1149 |
unsigned long start_time) { if (time_is_after_eq_jiffies(bdi->bw_time_stamp + BANDWIDTH_INTERVAL)) return; spin_lock(&bdi->wb.list_lock); |
af6a31138 writeback: add bg... |
1150 1151 |
__bdi_update_bandwidth(bdi, thresh, bg_thresh, dirty, bdi_thresh, bdi_dirty, start_time); |
e98be2d59 writeback: bdi wr... |
1152 1153 |
spin_unlock(&bdi->wb.list_lock); } |
1da177e4c Linux-2.6.12-rc2 |
1154 |
/* |
d0e1d66b5 writeback: remove... |
1155 |
* After a task dirtied this many pages, balance_dirty_pages_ratelimited() |
9d823e8f6 writeback: per ta... |
1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 |
* will look to see if it needs to start dirty throttling. * * If dirty_poll_interval is too low, big NUMA machines will call the expensive * global_page_state() too often. So scale it near-sqrt to the safety margin * (the number of pages we may dirty without exceeding the dirty limits). */ static unsigned long dirty_poll_interval(unsigned long dirty, unsigned long thresh) { if (thresh > dirty) return 1UL << (ilog2(thresh - dirty) >> 1); return 1; } |
e3b6c655b writeback: fix ne... |
1170 1171 |
static unsigned long bdi_max_pause(struct backing_dev_info *bdi, unsigned long bdi_dirty) |
c8462cc9d writeback: limit ... |
1172 |
{ |
e3b6c655b writeback: fix ne... |
1173 1174 |
unsigned long bw = bdi->avg_write_bandwidth; unsigned long t; |
c8462cc9d writeback: limit ... |
1175 |
|
7ccb9ad53 writeback: max, m... |
1176 1177 1178 1179 1180 1181 1182 1183 1184 |
/* * Limit pause time for small memory systems. If sleeping for too long * time, a small pool of dirty/writeback pages may go empty and disk go * idle. * * 8 serves as the safety ratio. */ t = bdi_dirty / (1 + bw / roundup_pow_of_two(1 + HZ / 8)); t++; |
e3b6c655b writeback: fix ne... |
1185 |
return min_t(unsigned long, t, MAX_PAUSE); |
7ccb9ad53 writeback: max, m... |
1186 1187 1188 1189 1190 1191 1192 |
} static long bdi_min_pause(struct backing_dev_info *bdi, long max_pause, unsigned long task_ratelimit, unsigned long dirty_ratelimit, int *nr_dirtied_pause) |
c8462cc9d writeback: limit ... |
1193 |
{ |
7ccb9ad53 writeback: max, m... |
1194 1195 1196 1197 1198 |
long hi = ilog2(bdi->avg_write_bandwidth); long lo = ilog2(bdi->dirty_ratelimit); long t; /* target pause */ long pause; /* estimated next pause */ int pages; /* target nr_dirtied_pause */ |
c8462cc9d writeback: limit ... |
1199 |
|
7ccb9ad53 writeback: max, m... |
1200 1201 |
/* target for 10ms pause on 1-dd case */ t = max(1, HZ / 100); |
c8462cc9d writeback: limit ... |
1202 1203 1204 1205 1206 |
/* * Scale up pause time for concurrent dirtiers in order to reduce CPU * overheads. * |
7ccb9ad53 writeback: max, m... |
1207 |
* (N * 10ms) on 2^N concurrent tasks. |
c8462cc9d writeback: limit ... |
1208 1209 |
*/ if (hi > lo) |
7ccb9ad53 writeback: max, m... |
1210 |
t += (hi - lo) * (10 * HZ) / 1024; |
c8462cc9d writeback: limit ... |
1211 1212 |
/* |
7ccb9ad53 writeback: max, m... |
1213 1214 1215 1216 1217 1218 1219 1220 |
* This is a bit convoluted. We try to base the next nr_dirtied_pause * on the much more stable dirty_ratelimit. However the next pause time * will be computed based on task_ratelimit and the two rate limits may * depart considerably at some time. Especially if task_ratelimit goes * below dirty_ratelimit/2 and the target pause is max_pause, the next * pause time will be max_pause*2 _trimmed down_ to max_pause. As a * result task_ratelimit won't be executed faithfully, which could * eventually bring down dirty_ratelimit. |
c8462cc9d writeback: limit ... |
1221 |
* |
7ccb9ad53 writeback: max, m... |
1222 1223 1224 1225 1226 1227 1228 |
* We apply two rules to fix it up: * 1) try to estimate the next pause time and if necessary, use a lower * nr_dirtied_pause so as not to exceed max_pause. When this happens, * nr_dirtied_pause will be "dancing" with task_ratelimit. * 2) limit the target pause time to max_pause/2, so that the normal * small fluctuations of task_ratelimit won't trigger rule (1) and * nr_dirtied_pause will remain as stable as dirty_ratelimit. |
c8462cc9d writeback: limit ... |
1229 |
*/ |
7ccb9ad53 writeback: max, m... |
1230 1231 |
t = min(t, 1 + max_pause / 2); pages = dirty_ratelimit * t / roundup_pow_of_two(HZ); |
c8462cc9d writeback: limit ... |
1232 1233 |
/* |
5b9b35743 writeback: avoid ... |
1234 1235 1236 1237 1238 1239 |
* Tiny nr_dirtied_pause is found to hurt I/O performance in the test * case fio-mmap-randwrite-64k, which does 16*{sync read, async write}. * When the 16 consecutive reads are often interrupted by some dirty * throttling pause during the async writes, cfq will go into idles * (deadline is fine). So push nr_dirtied_pause as high as possible * until reaches DIRTY_POLL_THRESH=32 pages. |
c8462cc9d writeback: limit ... |
1240 |
*/ |
5b9b35743 writeback: avoid ... |
1241 1242 1243 1244 1245 1246 1247 1248 |
if (pages < DIRTY_POLL_THRESH) { t = max_pause; pages = dirty_ratelimit * t / roundup_pow_of_two(HZ); if (pages > DIRTY_POLL_THRESH) { pages = DIRTY_POLL_THRESH; t = HZ * DIRTY_POLL_THRESH / dirty_ratelimit; } } |
7ccb9ad53 writeback: max, m... |
1249 1250 1251 1252 1253 |
pause = HZ * pages / (task_ratelimit + 1); if (pause > max_pause) { t = max_pause; pages = task_ratelimit * t / roundup_pow_of_two(HZ); } |
c8462cc9d writeback: limit ... |
1254 |
|
7ccb9ad53 writeback: max, m... |
1255 |
*nr_dirtied_pause = pages; |
c8462cc9d writeback: limit ... |
1256 |
/* |
7ccb9ad53 writeback: max, m... |
1257 |
* The minimal pause time will normally be half the target pause time. |
c8462cc9d writeback: limit ... |
1258 |
*/ |
5b9b35743 writeback: avoid ... |
1259 |
return pages >= DIRTY_POLL_THRESH ? 1 + t / 2 : t; |
c8462cc9d writeback: limit ... |
1260 |
} |
5a5374856 mm/page-writeback... |
1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 |
static inline void bdi_dirty_limits(struct backing_dev_info *bdi, unsigned long dirty_thresh, unsigned long background_thresh, unsigned long *bdi_dirty, unsigned long *bdi_thresh, unsigned long *bdi_bg_thresh) { unsigned long bdi_reclaimable; /* * bdi_thresh is not treated as some limiting factor as * dirty_thresh, due to reasons * - in JBOD setup, bdi_thresh can fluctuate a lot * - in a system with HDD and USB key, the USB key may somehow * go into state (bdi_dirty >> bdi_thresh) either because * bdi_dirty starts high, or because bdi_thresh drops low. * In this case we don't want to hard throttle the USB key * dirtiers for 100 seconds until bdi_dirty drops under * bdi_thresh. Instead the auxiliary bdi control line in * bdi_position_ratio() will let the dirtier task progress * at some rate <= (write_bw / 2) for bringing down bdi_dirty. */ *bdi_thresh = bdi_dirty_limit(bdi, dirty_thresh); if (bdi_bg_thresh) *bdi_bg_thresh = div_u64((u64)*bdi_thresh * background_thresh, dirty_thresh); /* * In order to avoid the stacked BDI deadlock we need * to ensure we accurately count the 'dirty' pages when * the threshold is low. * * Otherwise it would be possible to get thresh+n pages * reported dirty, even though there are thresh-m pages * actually dirty; with m+n sitting in the percpu * deltas. */ if (*bdi_thresh < 2 * bdi_stat_error(bdi)) { bdi_reclaimable = bdi_stat_sum(bdi, BDI_RECLAIMABLE); *bdi_dirty = bdi_reclaimable + bdi_stat_sum(bdi, BDI_WRITEBACK); } else { bdi_reclaimable = bdi_stat(bdi, BDI_RECLAIMABLE); *bdi_dirty = bdi_reclaimable + bdi_stat(bdi, BDI_WRITEBACK); } } |
9d823e8f6 writeback: per ta... |
1310 |
/* |
1da177e4c Linux-2.6.12-rc2 |
1311 1312 |
* balance_dirty_pages() must be called by processes which are generating dirty * data. It looks at the number of dirty pages in the machine and will force |
143dfe861 writeback: IO-les... |
1313 |
* the caller to wait once crossing the (background_thresh + dirty_thresh) / 2. |
5b0830cb9 writeback: get ri... |
1314 1315 |
* If we're over `background_thresh' then the writeback threads are woken to * perform some writeout. |
1da177e4c Linux-2.6.12-rc2 |
1316 |
*/ |
3a2e9a5a2 writeback: balanc... |
1317 |
static void balance_dirty_pages(struct address_space *mapping, |
143dfe861 writeback: IO-les... |
1318 |
unsigned long pages_dirtied) |
1da177e4c Linux-2.6.12-rc2 |
1319 |
{ |
143dfe861 writeback: IO-les... |
1320 |
unsigned long nr_reclaimable; /* = file_dirty + unstable_nfs */ |
7762741e3 writeback: consol... |
1321 |
unsigned long nr_dirty; /* = file_dirty + writeback + unstable_nfs */ |
364aeb284 mm: change dirty ... |
1322 1323 |
unsigned long background_thresh; unsigned long dirty_thresh; |
83712358b writeback: dirty ... |
1324 |
long period; |
7ccb9ad53 writeback: max, m... |
1325 1326 1327 1328 |
long pause; long max_pause; long min_pause; int nr_dirtied_pause; |
e50e37201 writeback: balanc... |
1329 |
bool dirty_exceeded = false; |
143dfe861 writeback: IO-les... |
1330 |
unsigned long task_ratelimit; |
7ccb9ad53 writeback: max, m... |
1331 |
unsigned long dirty_ratelimit; |
143dfe861 writeback: IO-les... |
1332 |
unsigned long pos_ratio; |
1da177e4c Linux-2.6.12-rc2 |
1333 |
struct backing_dev_info *bdi = mapping->backing_dev_info; |
5a5374856 mm/page-writeback... |
1334 |
bool strictlimit = bdi->capabilities & BDI_CAP_STRICTLIMIT; |
e98be2d59 writeback: bdi wr... |
1335 |
unsigned long start_time = jiffies; |
1da177e4c Linux-2.6.12-rc2 |
1336 1337 |
for (;;) { |
83712358b writeback: dirty ... |
1338 |
unsigned long now = jiffies; |
5a5374856 mm/page-writeback... |
1339 1340 1341 1342 1343 |
unsigned long uninitialized_var(bdi_thresh); unsigned long thresh; unsigned long uninitialized_var(bdi_dirty); unsigned long dirty; unsigned long bg_thresh; |
83712358b writeback: dirty ... |
1344 |
|
143dfe861 writeback: IO-les... |
1345 1346 1347 1348 1349 1350 |
/* * Unstable writes are a feature of certain networked * filesystems (i.e. NFS) in which data may have been * written to the server's write cache, but has not yet * been flushed to permanent storage. */ |
5fce25a9d mm: speed up writ... |
1351 1352 |
nr_reclaimable = global_page_state(NR_FILE_DIRTY) + global_page_state(NR_UNSTABLE_NFS); |
7762741e3 writeback: consol... |
1353 |
nr_dirty = nr_reclaimable + global_page_state(NR_WRITEBACK); |
5fce25a9d mm: speed up writ... |
1354 |
|
16c4042f0 writeback: avoid ... |
1355 |
global_dirty_limits(&background_thresh, &dirty_thresh); |
5a5374856 mm/page-writeback... |
1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 |
if (unlikely(strictlimit)) { bdi_dirty_limits(bdi, dirty_thresh, background_thresh, &bdi_dirty, &bdi_thresh, &bg_thresh); dirty = bdi_dirty; thresh = bdi_thresh; } else { dirty = nr_dirty; thresh = dirty_thresh; bg_thresh = background_thresh; } |
16c4042f0 writeback: avoid ... |
1367 1368 1369 |
/* * Throttle it only when the background writeback cannot * catch-up. This avoids (excessively) small writeouts |
5a5374856 mm/page-writeback... |
1370 1371 1372 1373 1374 |
* when the bdi limits are ramping up in case of !strictlimit. * * In strictlimit case make decision based on the bdi counters * and limits. Small writeouts when the bdi limits are ramping * up are the price we consciously pay for strictlimit-ing. |
16c4042f0 writeback: avoid ... |
1375 |
*/ |
5a5374856 mm/page-writeback... |
1376 |
if (dirty <= dirty_freerun_ceiling(thresh, bg_thresh)) { |
83712358b writeback: dirty ... |
1377 1378 |
current->dirty_paused_when = now; current->nr_dirtied = 0; |
7ccb9ad53 writeback: max, m... |
1379 |
current->nr_dirtied_pause = |
5a5374856 mm/page-writeback... |
1380 |
dirty_poll_interval(dirty, thresh); |
16c4042f0 writeback: avoid ... |
1381 |
break; |
83712358b writeback: dirty ... |
1382 |
} |
16c4042f0 writeback: avoid ... |
1383 |
|
143dfe861 writeback: IO-les... |
1384 1385 |
if (unlikely(!writeback_in_progress(bdi))) bdi_start_background_writeback(bdi); |
5a5374856 mm/page-writeback... |
1386 1387 1388 |
if (!strictlimit) bdi_dirty_limits(bdi, dirty_thresh, background_thresh, &bdi_dirty, &bdi_thresh, NULL); |
5fce25a9d mm: speed up writ... |
1389 |
|
827919405 writeback: do str... |
1390 |
dirty_exceeded = (bdi_dirty > bdi_thresh) && |
5a5374856 mm/page-writeback... |
1391 |
((nr_dirty > dirty_thresh) || strictlimit); |
143dfe861 writeback: IO-les... |
1392 |
if (dirty_exceeded && !bdi->dirty_exceeded) |
04fbfdc14 mm: per device di... |
1393 |
bdi->dirty_exceeded = 1; |
1da177e4c Linux-2.6.12-rc2 |
1394 |
|
af6a31138 writeback: add bg... |
1395 1396 1397 |
bdi_update_bandwidth(bdi, dirty_thresh, background_thresh, nr_dirty, bdi_thresh, bdi_dirty, start_time); |
e98be2d59 writeback: bdi wr... |
1398 |
|
143dfe861 writeback: IO-les... |
1399 1400 1401 1402 |
dirty_ratelimit = bdi->dirty_ratelimit; pos_ratio = bdi_position_ratio(bdi, dirty_thresh, background_thresh, nr_dirty, bdi_thresh, bdi_dirty); |
3a73dbbc9 writeback: fix un... |
1403 1404 |
task_ratelimit = ((u64)dirty_ratelimit * pos_ratio) >> RATELIMIT_CALC_SHIFT; |
7ccb9ad53 writeback: max, m... |
1405 1406 1407 1408 |
max_pause = bdi_max_pause(bdi, bdi_dirty); min_pause = bdi_min_pause(bdi, max_pause, task_ratelimit, dirty_ratelimit, &nr_dirtied_pause); |
3a73dbbc9 writeback: fix un... |
1409 |
if (unlikely(task_ratelimit == 0)) { |
83712358b writeback: dirty ... |
1410 |
period = max_pause; |
c8462cc9d writeback: limit ... |
1411 |
pause = max_pause; |
143dfe861 writeback: IO-les... |
1412 |
goto pause; |
04fbfdc14 mm: per device di... |
1413 |
} |
83712358b writeback: dirty ... |
1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 |
period = HZ * pages_dirtied / task_ratelimit; pause = period; if (current->dirty_paused_when) pause -= now - current->dirty_paused_when; /* * For less than 1s think time (ext3/4 may block the dirtier * for up to 800ms from time to time on 1-HDD; so does xfs, * however at much less frequency), try to compensate it in * future periods by updating the virtual time; otherwise just * do a reset, as it may be a light dirtier. */ |
7ccb9ad53 writeback: max, m... |
1425 |
if (pause < min_pause) { |
ece13ac31 writeback: trace ... |
1426 1427 1428 1429 1430 1431 1432 1433 1434 |
trace_balance_dirty_pages(bdi, dirty_thresh, background_thresh, nr_dirty, bdi_thresh, bdi_dirty, dirty_ratelimit, task_ratelimit, pages_dirtied, |
83712358b writeback: dirty ... |
1435 |
period, |
7ccb9ad53 writeback: max, m... |
1436 |
min(pause, 0L), |
ece13ac31 writeback: trace ... |
1437 |
start_time); |
83712358b writeback: dirty ... |
1438 1439 1440 1441 1442 1443 |
if (pause < -HZ) { current->dirty_paused_when = now; current->nr_dirtied = 0; } else if (period) { current->dirty_paused_when += period; current->nr_dirtied = 0; |
7ccb9ad53 writeback: max, m... |
1444 1445 |
} else if (current->nr_dirtied_pause <= pages_dirtied) current->nr_dirtied_pause += pages_dirtied; |
57fc978cf writeback: contro... |
1446 |
break; |
04fbfdc14 mm: per device di... |
1447 |
} |
7ccb9ad53 writeback: max, m... |
1448 1449 1450 1451 1452 |
if (unlikely(pause > max_pause)) { /* for occasional dropped task_ratelimit */ now += min(pause - max_pause, max_pause); pause = max_pause; } |
143dfe861 writeback: IO-les... |
1453 1454 |
pause: |
ece13ac31 writeback: trace ... |
1455 1456 1457 1458 1459 1460 1461 1462 1463 |
trace_balance_dirty_pages(bdi, dirty_thresh, background_thresh, nr_dirty, bdi_thresh, bdi_dirty, dirty_ratelimit, task_ratelimit, pages_dirtied, |
83712358b writeback: dirty ... |
1464 |
period, |
ece13ac31 writeback: trace ... |
1465 1466 |
pause, start_time); |
499d05ecf mm: Make task in ... |
1467 |
__set_current_state(TASK_KILLABLE); |
d25105e89 writeback: accoun... |
1468 |
io_schedule_timeout(pause); |
87c6a9b25 writeback: make b... |
1469 |
|
83712358b writeback: dirty ... |
1470 1471 |
current->dirty_paused_when = now + pause; current->nr_dirtied = 0; |
7ccb9ad53 writeback: max, m... |
1472 |
current->nr_dirtied_pause = nr_dirtied_pause; |
83712358b writeback: dirty ... |
1473 |
|
ffd1f609a writeback: introd... |
1474 |
/* |
1df647197 writeback: hard t... |
1475 1476 |
* This is typically equal to (nr_dirty < dirty_thresh) and can * also keep "1000+ dd on a slow USB stick" under control. |
ffd1f609a writeback: introd... |
1477 |
*/ |
1df647197 writeback: hard t... |
1478 |
if (task_ratelimit) |
ffd1f609a writeback: introd... |
1479 |
break; |
499d05ecf mm: Make task in ... |
1480 |
|
c5c6343c4 writeback: permit... |
1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 |
/* * In the case of an unresponding NFS server and the NFS dirty * pages exceeds dirty_thresh, give the other good bdi's a pipe * to go through, so that tasks on them still remain responsive. * * In theory 1 page is enough to keep the comsumer-producer * pipe going: the flusher cleans 1 page => the task dirties 1 * more page. However bdi_dirty has accounting errors. So use * the larger and more IO friendly bdi_stat_error. */ if (bdi_dirty <= bdi_stat_error(bdi)) break; |
499d05ecf mm: Make task in ... |
1493 1494 |
if (fatal_signal_pending(current)) break; |
1da177e4c Linux-2.6.12-rc2 |
1495 |
} |
143dfe861 writeback: IO-les... |
1496 |
if (!dirty_exceeded && bdi->dirty_exceeded) |
04fbfdc14 mm: per device di... |
1497 |
bdi->dirty_exceeded = 0; |
1da177e4c Linux-2.6.12-rc2 |
1498 1499 |
if (writeback_in_progress(bdi)) |
5b0830cb9 writeback: get ri... |
1500 |
return; |
1da177e4c Linux-2.6.12-rc2 |
1501 1502 1503 1504 1505 1506 1507 1508 1509 |
/* * In laptop mode, we wait until hitting the higher threshold before * starting background writeout, and then write out all the way down * to the lower threshold. So slow writers cause minimal disk activity. * * In normal mode, we start background writeout at the lower * background_thresh, to keep the amount of dirty memory low. */ |
143dfe861 writeback: IO-les... |
1510 1511 1512 1513 |
if (laptop_mode) return; if (nr_reclaimable > background_thresh) |
c5444198c writeback: simpli... |
1514 |
bdi_start_background_writeback(bdi); |
1da177e4c Linux-2.6.12-rc2 |
1515 |
} |
ed6d7c8e5 mm: remove unused... |
1516 |
void set_page_dirty_balance(struct page *page) |
edc79b2a4 [PATCH] mm: balan... |
1517 |
{ |
ed6d7c8e5 mm: remove unused... |
1518 |
if (set_page_dirty(page)) { |
edc79b2a4 [PATCH] mm: balan... |
1519 1520 1521 1522 1523 1524 |
struct address_space *mapping = page_mapping(page); if (mapping) balance_dirty_pages_ratelimited(mapping); } } |
9d823e8f6 writeback: per ta... |
1525 |
static DEFINE_PER_CPU(int, bdp_ratelimits); |
245b2e70e percpu: clean up ... |
1526 |
|
54848d73f writeback: charge... |
1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 |
/* * Normal tasks are throttled by * loop { * dirty tsk->nr_dirtied_pause pages; * take a snap in balance_dirty_pages(); * } * However there is a worst case. If every task exit immediately when dirtied * (tsk->nr_dirtied_pause - 1) pages, balance_dirty_pages() will never be * called to throttle the page dirties. The solution is to save the not yet * throttled page dirties in dirty_throttle_leaks on task exit and charge them * randomly into the running tasks. This works well for the above worst case, * as the new task will pick up and accumulate the old task's leaked dirty * count and eventually get throttled. */ DEFINE_PER_CPU(int, dirty_throttle_leaks) = 0; |
1da177e4c Linux-2.6.12-rc2 |
1542 |
/** |
d0e1d66b5 writeback: remove... |
1543 |
* balance_dirty_pages_ratelimited - balance dirty memory state |
67be2dd1b [PATCH] DocBook: ... |
1544 |
* @mapping: address_space which was dirtied |
1da177e4c Linux-2.6.12-rc2 |
1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 |
* * Processes which are dirtying memory should call in here once for each page * which was newly dirtied. The function will periodically check the system's * dirty state and will initiate writeback if needed. * * On really big machines, get_writeback_state is expensive, so try to avoid * calling it too often (ratelimiting). But once we're over the dirty memory * limit we decrease the ratelimiting by a lot, to prevent individual processes * from overshooting the limit by (ratelimit_pages) each. */ |
d0e1d66b5 writeback: remove... |
1555 |
void balance_dirty_pages_ratelimited(struct address_space *mapping) |
1da177e4c Linux-2.6.12-rc2 |
1556 |
{ |
36715cef0 writeback: skip t... |
1557 |
struct backing_dev_info *bdi = mapping->backing_dev_info; |
9d823e8f6 writeback: per ta... |
1558 1559 |
int ratelimit; int *p; |
1da177e4c Linux-2.6.12-rc2 |
1560 |
|
36715cef0 writeback: skip t... |
1561 1562 |
if (!bdi_cap_account_dirty(bdi)) return; |
9d823e8f6 writeback: per ta... |
1563 1564 1565 |
ratelimit = current->nr_dirtied_pause; if (bdi->dirty_exceeded) ratelimit = min(ratelimit, 32 >> (PAGE_SHIFT - 10)); |
9d823e8f6 writeback: per ta... |
1566 |
preempt_disable(); |
1da177e4c Linux-2.6.12-rc2 |
1567 |
/* |
9d823e8f6 writeback: per ta... |
1568 1569 1570 1571 |
* This prevents one CPU to accumulate too many dirtied pages without * calling into balance_dirty_pages(), which can happen when there are * 1000+ tasks, all of them start dirtying pages at exactly the same * time, hence all honoured too large initial task->nr_dirtied_pause. |
1da177e4c Linux-2.6.12-rc2 |
1572 |
*/ |
245b2e70e percpu: clean up ... |
1573 |
p = &__get_cpu_var(bdp_ratelimits); |
9d823e8f6 writeback: per ta... |
1574 |
if (unlikely(current->nr_dirtied >= ratelimit)) |
fa5a734e4 [PATCH] balance_d... |
1575 |
*p = 0; |
d3bc1fef9 writeback: fix di... |
1576 1577 1578 |
else if (unlikely(*p >= ratelimit_pages)) { *p = 0; ratelimit = 0; |
1da177e4c Linux-2.6.12-rc2 |
1579 |
} |
54848d73f writeback: charge... |
1580 1581 1582 1583 1584 1585 1586 |
/* * Pick up the dirtied pages by the exited tasks. This avoids lots of * short-lived tasks (eg. gcc invocations in a kernel build) escaping * the dirty throttling and livelock other long-run dirtiers. */ p = &__get_cpu_var(dirty_throttle_leaks); if (*p > 0 && current->nr_dirtied < ratelimit) { |
d0e1d66b5 writeback: remove... |
1587 |
unsigned long nr_pages_dirtied; |
54848d73f writeback: charge... |
1588 1589 1590 |
nr_pages_dirtied = min(*p, ratelimit - current->nr_dirtied); *p -= nr_pages_dirtied; current->nr_dirtied += nr_pages_dirtied; |
1da177e4c Linux-2.6.12-rc2 |
1591 |
} |
fa5a734e4 [PATCH] balance_d... |
1592 |
preempt_enable(); |
9d823e8f6 writeback: per ta... |
1593 1594 1595 |
if (unlikely(current->nr_dirtied >= ratelimit)) balance_dirty_pages(mapping, current->nr_dirtied); |
1da177e4c Linux-2.6.12-rc2 |
1596 |
} |
d0e1d66b5 writeback: remove... |
1597 |
EXPORT_SYMBOL(balance_dirty_pages_ratelimited); |
1da177e4c Linux-2.6.12-rc2 |
1598 |
|
232ea4d69 [PATCH] throttle_... |
1599 |
void throttle_vm_writeout(gfp_t gfp_mask) |
1da177e4c Linux-2.6.12-rc2 |
1600 |
{ |
364aeb284 mm: change dirty ... |
1601 1602 |
unsigned long background_thresh; unsigned long dirty_thresh; |
1da177e4c Linux-2.6.12-rc2 |
1603 1604 |
for ( ; ; ) { |
16c4042f0 writeback: avoid ... |
1605 |
global_dirty_limits(&background_thresh, &dirty_thresh); |
47a133339 mm: use global_di... |
1606 |
dirty_thresh = hard_dirty_limit(dirty_thresh); |
1da177e4c Linux-2.6.12-rc2 |
1607 1608 1609 1610 1611 1612 |
/* * Boost the allowable dirty threshold a bit for page * allocators so they don't get DoS'ed by heavy writers */ dirty_thresh += dirty_thresh / 10; /* wheeee... */ |
c24f21bda [PATCH] zoned vm ... |
1613 1614 1615 |
if (global_page_state(NR_UNSTABLE_NFS) + global_page_state(NR_WRITEBACK) <= dirty_thresh) break; |
8aa7e847d Fix congestion_wa... |
1616 |
congestion_wait(BLK_RW_ASYNC, HZ/10); |
369f2389e writeback: remove... |
1617 1618 1619 1620 1621 1622 1623 1624 |
/* * The caller might hold locks which can prevent IO completion * or progress in the filesystem. So we cannot just sit here * waiting for IO to complete. */ if ((gfp_mask & (__GFP_FS|__GFP_IO)) != (__GFP_FS|__GFP_IO)) break; |
1da177e4c Linux-2.6.12-rc2 |
1625 1626 |
} } |
1da177e4c Linux-2.6.12-rc2 |
1627 |
/* |
1da177e4c Linux-2.6.12-rc2 |
1628 1629 1630 |
* sysctl handler for /proc/sys/vm/dirty_writeback_centisecs */ int dirty_writeback_centisecs_handler(ctl_table *table, int write, |
8d65af789 sysctl: remove "s... |
1631 |
void __user *buffer, size_t *length, loff_t *ppos) |
1da177e4c Linux-2.6.12-rc2 |
1632 |
{ |
8d65af789 sysctl: remove "s... |
1633 |
proc_dointvec(table, write, buffer, length, ppos); |
1da177e4c Linux-2.6.12-rc2 |
1634 1635 |
return 0; } |
c2c4986ed writeback: fix pr... |
1636 |
#ifdef CONFIG_BLOCK |
31373d09d laptop-mode: Make... |
1637 |
void laptop_mode_timer_fn(unsigned long data) |
1da177e4c Linux-2.6.12-rc2 |
1638 |
{ |
31373d09d laptop-mode: Make... |
1639 1640 1641 |
struct request_queue *q = (struct request_queue *)data; int nr_pages = global_page_state(NR_FILE_DIRTY) + global_page_state(NR_UNSTABLE_NFS); |
1da177e4c Linux-2.6.12-rc2 |
1642 |
|
31373d09d laptop-mode: Make... |
1643 1644 1645 1646 |
/* * We want to write everything out, not just down to the dirty * threshold */ |
31373d09d laptop-mode: Make... |
1647 |
if (bdi_has_dirty_io(&q->backing_dev_info)) |
0e175a183 writeback: Add a ... |
1648 1649 |
bdi_start_writeback(&q->backing_dev_info, nr_pages, WB_REASON_LAPTOP_TIMER); |
1da177e4c Linux-2.6.12-rc2 |
1650 1651 1652 1653 1654 1655 1656 |
} /* * We've spun up the disk and we're in laptop mode: schedule writeback * of all dirty data a few seconds from now. If the flush is already scheduled * then push it back - the user is still using the disk. */ |
31373d09d laptop-mode: Make... |
1657 |
void laptop_io_completion(struct backing_dev_info *info) |
1da177e4c Linux-2.6.12-rc2 |
1658 |
{ |
31373d09d laptop-mode: Make... |
1659 |
mod_timer(&info->laptop_mode_wb_timer, jiffies + laptop_mode); |
1da177e4c Linux-2.6.12-rc2 |
1660 1661 1662 1663 1664 1665 1666 1667 1668 |
} /* * We're in laptop mode and we've just synced. The sync's writes will have * caused another writeback to be scheduled by laptop_io_completion. * Nothing needs to be written back anymore, so we unschedule the writeback. */ void laptop_sync_completion(void) { |
31373d09d laptop-mode: Make... |
1669 1670 1671 1672 1673 1674 1675 1676 |
struct backing_dev_info *bdi; rcu_read_lock(); list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) del_timer(&bdi->laptop_mode_wb_timer); rcu_read_unlock(); |
1da177e4c Linux-2.6.12-rc2 |
1677 |
} |
c2c4986ed writeback: fix pr... |
1678 |
#endif |
1da177e4c Linux-2.6.12-rc2 |
1679 1680 1681 1682 1683 1684 1685 1686 1687 |
/* * If ratelimit_pages is too high then we can get into dirty-data overload * if a large number of processes all perform writes at the same time. * If it is too low then SMP machines will call the (expensive) * get_writeback_state too often. * * Here we set ratelimit_pages to a level which ensures that when all CPUs are * dirtying in parallel, we cannot go more than 3% (1/32) over the dirty memory |
9d823e8f6 writeback: per ta... |
1688 |
* thresholds. |
1da177e4c Linux-2.6.12-rc2 |
1689 |
*/ |
2d1d43f6a [PATCH] call mm/p... |
1690 |
void writeback_set_ratelimit(void) |
1da177e4c Linux-2.6.12-rc2 |
1691 |
{ |
9d823e8f6 writeback: per ta... |
1692 1693 1694 |
unsigned long background_thresh; unsigned long dirty_thresh; global_dirty_limits(&background_thresh, &dirty_thresh); |
68809c710 writeback: initia... |
1695 |
global_dirty_limit = dirty_thresh; |
9d823e8f6 writeback: per ta... |
1696 |
ratelimit_pages = dirty_thresh / (num_online_cpus() * 32); |
1da177e4c Linux-2.6.12-rc2 |
1697 1698 |
if (ratelimit_pages < 16) ratelimit_pages = 16; |
1da177e4c Linux-2.6.12-rc2 |
1699 |
} |
0db0628d9 kernel: delete __... |
1700 |
static int |
2f60d628f CPU hotplug, writ... |
1701 1702 |
ratelimit_handler(struct notifier_block *self, unsigned long action, void *hcpu) |
1da177e4c Linux-2.6.12-rc2 |
1703 |
{ |
2f60d628f CPU hotplug, writ... |
1704 1705 1706 1707 1708 1709 1710 1711 1712 |
switch (action & ~CPU_TASKS_FROZEN) { case CPU_ONLINE: case CPU_DEAD: writeback_set_ratelimit(); return NOTIFY_OK; default: return NOTIFY_DONE; } |
1da177e4c Linux-2.6.12-rc2 |
1713 |
} |
0db0628d9 kernel: delete __... |
1714 |
static struct notifier_block ratelimit_nb = { |
1da177e4c Linux-2.6.12-rc2 |
1715 1716 1717 1718 1719 |
.notifier_call = ratelimit_handler, .next = NULL, }; /* |
dc6e29da9 Fix balance_dirty... |
1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 |
* Called early on to tune the page writeback dirty limits. * * We used to scale dirty pages according to how total memory * related to pages that could be allocated for buffers (by * comparing nr_free_buffer_pages() to vm_total_pages. * * However, that was when we used "dirty_ratio" to scale with * all memory, and we don't do that any more. "dirty_ratio" * is now applied to total non-HIGHPAGE memory (by subtracting * totalhigh_pages from vm_total_pages), and as such we can't * get into the old insane situation any more where we had * large amounts of dirty pages compared to a small amount of * non-HIGHMEM memory. * * But we might still want to scale the dirty_ratio by how * much memory the box has.. |
1da177e4c Linux-2.6.12-rc2 |
1736 1737 1738 |
*/ void __init page_writeback_init(void) { |
2d1d43f6a [PATCH] call mm/p... |
1739 |
writeback_set_ratelimit(); |
1da177e4c Linux-2.6.12-rc2 |
1740 |
register_cpu_notifier(&ratelimit_nb); |
04fbfdc14 mm: per device di... |
1741 |
|
eb608e3a3 block: Convert BD... |
1742 |
fprop_global_init(&writeout_completions); |
1da177e4c Linux-2.6.12-rc2 |
1743 |
} |
811d736f9 [PATCH] BLOCK: Di... |
1744 |
/** |
f446daaea mm: implement wri... |
1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 |
* tag_pages_for_writeback - tag pages to be written by write_cache_pages * @mapping: address space structure to write * @start: starting page index * @end: ending page index (inclusive) * * This function scans the page range from @start to @end (inclusive) and tags * all pages that have DIRTY tag set with a special TOWRITE tag. The idea is * that write_cache_pages (or whoever calls this function) will then use * TOWRITE tag to identify pages eligible for writeback. This mechanism is * used to avoid livelocking of writeback by a process steadily creating new * dirty pages in the file (thus it is important for this function to be quick * so that it can tag pages faster than a dirtying process can create them). */ /* * We tag pages in batches of WRITEBACK_TAG_BATCH to reduce tree_lock latency. */ |
f446daaea mm: implement wri... |
1761 1762 1763 |
void tag_pages_for_writeback(struct address_space *mapping, pgoff_t start, pgoff_t end) { |
3c111a071 mm: fix fatal ker... |
1764 |
#define WRITEBACK_TAG_BATCH 4096 |
f446daaea mm: implement wri... |
1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 |
unsigned long tagged; do { spin_lock_irq(&mapping->tree_lock); tagged = radix_tree_range_tag_if_tagged(&mapping->page_tree, &start, end, WRITEBACK_TAG_BATCH, PAGECACHE_TAG_DIRTY, PAGECACHE_TAG_TOWRITE); spin_unlock_irq(&mapping->tree_lock); WARN_ON_ONCE(tagged > WRITEBACK_TAG_BATCH); cond_resched(); |
d5ed3a4af lib/radix-tree.c:... |
1775 1776 |
/* We check 'start' to handle wrapping when end == ~0UL */ } while (tagged >= WRITEBACK_TAG_BATCH && start); |
f446daaea mm: implement wri... |
1777 1778 1779 1780 |
} EXPORT_SYMBOL(tag_pages_for_writeback); /** |
0ea971801 consolidate gener... |
1781 |
* write_cache_pages - walk the list of dirty pages of the given address space and write all of them. |
811d736f9 [PATCH] BLOCK: Di... |
1782 1783 |
* @mapping: address space structure to write * @wbc: subtract the number of written pages from *@wbc->nr_to_write |
0ea971801 consolidate gener... |
1784 1785 |
* @writepage: function called for each page * @data: data passed to writepage function |
811d736f9 [PATCH] BLOCK: Di... |
1786 |
* |
0ea971801 consolidate gener... |
1787 |
* If a page is already under I/O, write_cache_pages() skips it, even |
811d736f9 [PATCH] BLOCK: Di... |
1788 1789 1790 1791 1792 1793 |
* if it's dirty. This is desirable behaviour for memory-cleaning writeback, * but it is INCORRECT for data-integrity system calls such as fsync(). fsync() * and msync() need to guarantee that all the data which was dirty at the time * the call was made get new I/O started against them. If wbc->sync_mode is * WB_SYNC_ALL then we were called for data integrity and we must wait for * existing IO to complete. |
f446daaea mm: implement wri... |
1794 1795 1796 1797 1798 1799 1800 |
* * To avoid livelocks (when other process dirties new pages), we first tag * pages which should be written back with TOWRITE tag and only then start * writing them. For data-integrity sync we have to be careful so that we do * not miss some pages (e.g., because some other process has cleared TOWRITE * tag we set). The rule we follow is that TOWRITE tag can be cleared only * by the process clearing the DIRTY tag (and submitting the page for IO). |
811d736f9 [PATCH] BLOCK: Di... |
1801 |
*/ |
0ea971801 consolidate gener... |
1802 1803 1804 |
int write_cache_pages(struct address_space *mapping, struct writeback_control *wbc, writepage_t writepage, void *data) |
811d736f9 [PATCH] BLOCK: Di... |
1805 |
{ |
811d736f9 [PATCH] BLOCK: Di... |
1806 1807 |
int ret = 0; int done = 0; |
811d736f9 [PATCH] BLOCK: Di... |
1808 1809 |
struct pagevec pvec; int nr_pages; |
31a12666d mm: write_cache_p... |
1810 |
pgoff_t uninitialized_var(writeback_index); |
811d736f9 [PATCH] BLOCK: Di... |
1811 1812 |
pgoff_t index; pgoff_t end; /* Inclusive */ |
bd19e012f mm: write_cache_p... |
1813 |
pgoff_t done_index; |
31a12666d mm: write_cache_p... |
1814 |
int cycled; |
811d736f9 [PATCH] BLOCK: Di... |
1815 |
int range_whole = 0; |
f446daaea mm: implement wri... |
1816 |
int tag; |
811d736f9 [PATCH] BLOCK: Di... |
1817 |
|
811d736f9 [PATCH] BLOCK: Di... |
1818 1819 |
pagevec_init(&pvec, 0); if (wbc->range_cyclic) { |
31a12666d mm: write_cache_p... |
1820 1821 1822 1823 1824 1825 |
writeback_index = mapping->writeback_index; /* prev offset */ index = writeback_index; if (index == 0) cycled = 1; else cycled = 0; |
811d736f9 [PATCH] BLOCK: Di... |
1826 1827 1828 1829 1830 1831 |
end = -1; } else { index = wbc->range_start >> PAGE_CACHE_SHIFT; end = wbc->range_end >> PAGE_CACHE_SHIFT; if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) range_whole = 1; |
31a12666d mm: write_cache_p... |
1832 |
cycled = 1; /* ignore range_cyclic tests */ |
811d736f9 [PATCH] BLOCK: Di... |
1833 |
} |
6e6938b6d writeback: introd... |
1834 |
if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) |
f446daaea mm: implement wri... |
1835 1836 1837 |
tag = PAGECACHE_TAG_TOWRITE; else tag = PAGECACHE_TAG_DIRTY; |
811d736f9 [PATCH] BLOCK: Di... |
1838 |
retry: |
6e6938b6d writeback: introd... |
1839 |
if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) |
f446daaea mm: implement wri... |
1840 |
tag_pages_for_writeback(mapping, index, end); |
bd19e012f mm: write_cache_p... |
1841 |
done_index = index; |
5a3d5c981 mm: write_cache_p... |
1842 1843 |
while (!done && (index <= end)) { int i; |
f446daaea mm: implement wri... |
1844 |
nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag, |
5a3d5c981 mm: write_cache_p... |
1845 1846 1847 |
min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1); if (nr_pages == 0) break; |
811d736f9 [PATCH] BLOCK: Di... |
1848 |
|
811d736f9 [PATCH] BLOCK: Di... |
1849 1850 1851 1852 |
for (i = 0; i < nr_pages; i++) { struct page *page = pvec.pages[i]; /* |
d5482cdf8 mm: write_cache_p... |
1853 1854 1855 1856 1857 |
* At this point, the page may be truncated or * invalidated (changing page->mapping to NULL), or * even swizzled back from swapper_space to tmpfs file * mapping. However, page->index will not change * because we have a reference on the page. |
811d736f9 [PATCH] BLOCK: Di... |
1858 |
*/ |
d5482cdf8 mm: write_cache_p... |
1859 1860 1861 1862 1863 1864 1865 1866 |
if (page->index > end) { /* * can't be range_cyclic (1st pass) because * end == -1 in that case. */ done = 1; break; } |
cf15b07cf writeback: make m... |
1867 |
done_index = page->index; |
d5482cdf8 mm: write_cache_p... |
1868 |
|
811d736f9 [PATCH] BLOCK: Di... |
1869 |
lock_page(page); |
5a3d5c981 mm: write_cache_p... |
1870 1871 1872 1873 1874 1875 1876 1877 |
/* * Page truncated or invalidated. We can freely skip it * then, even for data integrity operations: the page * has disappeared concurrently, so there could be no * real expectation of this data interity operation * even if there is now a new, dirty page at the same * pagecache address. */ |
811d736f9 [PATCH] BLOCK: Di... |
1878 |
if (unlikely(page->mapping != mapping)) { |
5a3d5c981 mm: write_cache_p... |
1879 |
continue_unlock: |
811d736f9 [PATCH] BLOCK: Di... |
1880 1881 1882 |
unlock_page(page); continue; } |
515f4a037 mm: write_cache_p... |
1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 |
if (!PageDirty(page)) { /* someone wrote it for us */ goto continue_unlock; } if (PageWriteback(page)) { if (wbc->sync_mode != WB_SYNC_NONE) wait_on_page_writeback(page); else goto continue_unlock; } |
811d736f9 [PATCH] BLOCK: Di... |
1894 |
|
515f4a037 mm: write_cache_p... |
1895 1896 |
BUG_ON(PageWriteback(page)); if (!clear_page_dirty_for_io(page)) |
5a3d5c981 mm: write_cache_p... |
1897 |
goto continue_unlock; |
811d736f9 [PATCH] BLOCK: Di... |
1898 |
|
9e094383b writeback: Add tr... |
1899 |
trace_wbc_writepage(wbc, mapping->backing_dev_info); |
0ea971801 consolidate gener... |
1900 |
ret = (*writepage)(page, wbc, data); |
00266770b mm: write_cache_p... |
1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 |
if (unlikely(ret)) { if (ret == AOP_WRITEPAGE_ACTIVATE) { unlock_page(page); ret = 0; } else { /* * done_index is set past this page, * so media errors will not choke * background writeout for the entire * file. This has consequences for * range_cyclic semantics (ie. it may * not be suitable for data integrity * writeout). */ |
cf15b07cf writeback: make m... |
1915 |
done_index = page->index + 1; |
00266770b mm: write_cache_p... |
1916 1917 1918 |
done = 1; break; } |
0b5649278 writeback: pay at... |
1919 |
} |
00266770b mm: write_cache_p... |
1920 |
|
546a19242 writeback: write_... |
1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 |
/* * We stop writing back only if we are not doing * integrity sync. In case of integrity sync we have to * keep going until we have written all the pages * we tagged for writeback prior to entering this loop. */ if (--wbc->nr_to_write <= 0 && wbc->sync_mode == WB_SYNC_NONE) { done = 1; break; |
05fe478dd mm: write_cache_p... |
1931 |
} |
811d736f9 [PATCH] BLOCK: Di... |
1932 1933 1934 1935 |
} pagevec_release(&pvec); cond_resched(); } |
3a4c6800f Fix page writebac... |
1936 |
if (!cycled && !done) { |
811d736f9 [PATCH] BLOCK: Di... |
1937 |
/* |
31a12666d mm: write_cache_p... |
1938 |
* range_cyclic: |
811d736f9 [PATCH] BLOCK: Di... |
1939 1940 1941 |
* We hit the last page and there is more work to be done: wrap * back to the start of the file */ |
31a12666d mm: write_cache_p... |
1942 |
cycled = 1; |
811d736f9 [PATCH] BLOCK: Di... |
1943 |
index = 0; |
31a12666d mm: write_cache_p... |
1944 |
end = writeback_index - 1; |
811d736f9 [PATCH] BLOCK: Di... |
1945 1946 |
goto retry; } |
0b5649278 writeback: pay at... |
1947 1948 |
if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) mapping->writeback_index = done_index; |
06d6cf695 mm: Add range_con... |
1949 |
|
811d736f9 [PATCH] BLOCK: Di... |
1950 1951 |
return ret; } |
0ea971801 consolidate gener... |
1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 |
EXPORT_SYMBOL(write_cache_pages); /* * Function used by generic_writepages to call the real writepage * function and set the mapping flags on error */ static int __writepage(struct page *page, struct writeback_control *wbc, void *data) { struct address_space *mapping = data; int ret = mapping->a_ops->writepage(page, wbc); mapping_set_error(mapping, ret); return ret; } /** * generic_writepages - walk the list of dirty pages of the given address space and writepage() all of them. * @mapping: address space structure to write * @wbc: subtract the number of written pages from *@wbc->nr_to_write * * This is a library function, which implements the writepages() * address_space_operation. */ int generic_writepages(struct address_space *mapping, struct writeback_control *wbc) { |
9b6096a65 mm: make generic_... |
1978 1979 |
struct blk_plug plug; int ret; |
0ea971801 consolidate gener... |
1980 1981 1982 |
/* deal with chardevs and other special file */ if (!mapping->a_ops->writepage) return 0; |
9b6096a65 mm: make generic_... |
1983 1984 1985 1986 |
blk_start_plug(&plug); ret = write_cache_pages(mapping, wbc, __writepage, mapping); blk_finish_plug(&plug); return ret; |
0ea971801 consolidate gener... |
1987 |
} |
811d736f9 [PATCH] BLOCK: Di... |
1988 1989 |
EXPORT_SYMBOL(generic_writepages); |
1da177e4c Linux-2.6.12-rc2 |
1990 1991 |
int do_writepages(struct address_space *mapping, struct writeback_control *wbc) { |
22905f775 identify multipag... |
1992 |
int ret; |
1da177e4c Linux-2.6.12-rc2 |
1993 1994 1995 |
if (wbc->nr_to_write <= 0) return 0; if (mapping->a_ops->writepages) |
d08b3851d [PATCH] mm: track... |
1996 |
ret = mapping->a_ops->writepages(mapping, wbc); |
22905f775 identify multipag... |
1997 1998 |
else ret = generic_writepages(mapping, wbc); |
22905f775 identify multipag... |
1999 |
return ret; |
1da177e4c Linux-2.6.12-rc2 |
2000 2001 2002 2003 |
} /** * write_one_page - write out a single page and optionally wait on I/O |
67be2dd1b [PATCH] DocBook: ... |
2004 2005 |
* @page: the page to write * @wait: if true, wait on writeout |
1da177e4c Linux-2.6.12-rc2 |
2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 |
* * The page must be locked by the caller and will be unlocked upon return. * * write_one_page() returns a negative error code if I/O failed. */ int write_one_page(struct page *page, int wait) { struct address_space *mapping = page->mapping; int ret = 0; struct writeback_control wbc = { .sync_mode = WB_SYNC_ALL, .nr_to_write = 1, }; BUG_ON(!PageLocked(page)); if (wait) wait_on_page_writeback(page); if (clear_page_dirty_for_io(page)) { page_cache_get(page); ret = mapping->a_ops->writepage(page, &wbc); if (ret == 0 && wait) { wait_on_page_writeback(page); if (PageError(page)) ret = -EIO; } page_cache_release(page); } else { unlock_page(page); } return ret; } EXPORT_SYMBOL(write_one_page); /* |
767193253 [PATCH] simplify ... |
2042 2043 2044 2045 2046 |
* For address_spaces which do not use buffers nor write back. */ int __set_page_dirty_no_writeback(struct page *page) { if (!PageDirty(page)) |
c3f0da631 mm/page-writeback... |
2047 |
return !TestSetPageDirty(page); |
767193253 [PATCH] simplify ... |
2048 2049 2050 2051 |
return 0; } /* |
e3a7cca1e vfs: add/use acco... |
2052 2053 2054 2055 2056 |
* Helper function for set_page_dirty family. * NOTE: This relies on being atomic wrt interrupts. */ void account_page_dirtied(struct page *page, struct address_space *mapping) { |
9fb0a7da0 writeback: add mo... |
2057 |
trace_writeback_dirty_page(page, mapping); |
e3a7cca1e vfs: add/use acco... |
2058 2059 |
if (mapping_cap_account_dirty(mapping)) { __inc_zone_page_state(page, NR_FILE_DIRTY); |
ea941f0e2 writeback: add nr... |
2060 |
__inc_zone_page_state(page, NR_DIRTIED); |
e3a7cca1e vfs: add/use acco... |
2061 |
__inc_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE); |
c8e28ce04 writeback: accoun... |
2062 |
__inc_bdi_stat(mapping->backing_dev_info, BDI_DIRTIED); |
e3a7cca1e vfs: add/use acco... |
2063 |
task_io_account_write(PAGE_CACHE_SIZE); |
d3bc1fef9 writeback: fix di... |
2064 2065 |
current->nr_dirtied++; this_cpu_inc(bdp_ratelimits); |
e3a7cca1e vfs: add/use acco... |
2066 2067 |
} } |
679ceace8 mm: exporting acc... |
2068 |
EXPORT_SYMBOL(account_page_dirtied); |
e3a7cca1e vfs: add/use acco... |
2069 2070 |
/* |
f629d1c9b mm: add account_p... |
2071 |
* Helper function for set_page_writeback family. |
3ea67d06e memcg: add per cg... |
2072 2073 2074 2075 2076 |
* * The caller must hold mem_cgroup_begin/end_update_page_stat() lock * while calling this function. * See test_set_page_writeback for example. * |
f629d1c9b mm: add account_p... |
2077 2078 2079 2080 2081 |
* NOTE: Unlike account_page_dirtied this does not rely on being atomic * wrt interrupts. */ void account_page_writeback(struct page *page) { |
3ea67d06e memcg: add per cg... |
2082 |
mem_cgroup_inc_page_stat(page, MEM_CGROUP_STAT_WRITEBACK); |
f629d1c9b mm: add account_p... |
2083 2084 2085 2086 2087 |
inc_zone_page_state(page, NR_WRITEBACK); } EXPORT_SYMBOL(account_page_writeback); /* |
1da177e4c Linux-2.6.12-rc2 |
2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 |
* For address_spaces which do not use buffers. Just tag the page as dirty in * its radix tree. * * This is also used when a single buffer is being dirtied: we want to set the * page dirty in that case, but not all the buffers. This is a "bottom-up" * dirtying, whereas __set_page_dirty_buffers() is a "top-down" dirtying. * * Most callers have locked the page, which pins the address_space in memory. * But zap_pte_range() does not lock the page, however in that case the * mapping is pinned by the vma's ->vm_file reference. * * We take care to handle the case where the page was truncated from the |
183ff22bb spelling fixes: mm/ |
2100 |
* mapping by re-checking page_mapping() inside tree_lock. |
1da177e4c Linux-2.6.12-rc2 |
2101 2102 2103 |
*/ int __set_page_dirty_nobuffers(struct page *page) { |
1da177e4c Linux-2.6.12-rc2 |
2104 2105 2106 |
if (!TestSetPageDirty(page)) { struct address_space *mapping = page_mapping(page); struct address_space *mapping2; |
a85d9df1e mm: __set_page_di... |
2107 |
unsigned long flags; |
1da177e4c Linux-2.6.12-rc2 |
2108 |
|
8c08540f8 [PATCH] clean up ... |
2109 2110 |
if (!mapping) return 1; |
a85d9df1e mm: __set_page_di... |
2111 |
spin_lock_irqsave(&mapping->tree_lock, flags); |
8c08540f8 [PATCH] clean up ... |
2112 2113 2114 |
mapping2 = page_mapping(page); if (mapping2) { /* Race with truncate? */ BUG_ON(mapping2 != mapping); |
787d2214c fs: introduce som... |
2115 |
WARN_ON_ONCE(!PagePrivate(page) && !PageUptodate(page)); |
e3a7cca1e vfs: add/use acco... |
2116 |
account_page_dirtied(page, mapping); |
8c08540f8 [PATCH] clean up ... |
2117 2118 2119 |
radix_tree_tag_set(&mapping->page_tree, page_index(page), PAGECACHE_TAG_DIRTY); } |
a85d9df1e mm: __set_page_di... |
2120 |
spin_unlock_irqrestore(&mapping->tree_lock, flags); |
8c08540f8 [PATCH] clean up ... |
2121 2122 2123 |
if (mapping->host) { /* !PageAnon && !swapper_space */ __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); |
1da177e4c Linux-2.6.12-rc2 |
2124 |
} |
4741c9fd3 [PATCH] set_page_... |
2125 |
return 1; |
1da177e4c Linux-2.6.12-rc2 |
2126 |
} |
4741c9fd3 [PATCH] set_page_... |
2127 |
return 0; |
1da177e4c Linux-2.6.12-rc2 |
2128 2129 2130 2131 |
} EXPORT_SYMBOL(__set_page_dirty_nobuffers); /* |
2f800fbd7 writeback: fix di... |
2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 |
* Call this whenever redirtying a page, to de-account the dirty counters * (NR_DIRTIED, BDI_DIRTIED, tsk->nr_dirtied), so that they match the written * counters (NR_WRITTEN, BDI_WRITTEN) in long term. The mismatches will lead to * systematic errors in balanced_dirty_ratelimit and the dirty pages position * control. */ void account_page_redirty(struct page *page) { struct address_space *mapping = page->mapping; if (mapping && mapping_cap_account_dirty(mapping)) { current->nr_dirtied--; dec_zone_page_state(page, NR_DIRTIED); dec_bdi_stat(mapping->backing_dev_info, BDI_DIRTIED); } } EXPORT_SYMBOL(account_page_redirty); /* |
1da177e4c Linux-2.6.12-rc2 |
2150 2151 2152 2153 2154 2155 2156 |
* When a writepage implementation decides that it doesn't want to write this * page for some reason, it should redirty the locked page via * redirty_page_for_writepage() and it should then unlock the page and return 0 */ int redirty_page_for_writepage(struct writeback_control *wbc, struct page *page) { wbc->pages_skipped++; |
2f800fbd7 writeback: fix di... |
2157 |
account_page_redirty(page); |
1da177e4c Linux-2.6.12-rc2 |
2158 2159 2160 2161 2162 |
return __set_page_dirty_nobuffers(page); } EXPORT_SYMBOL(redirty_page_for_writepage); /* |
6746aff74 HWPOISON: shmem: ... |
2163 2164 2165 2166 2167 2168 2169 |
* Dirty a page. * * For pages with a mapping this should be done under the page lock * for the benefit of asynchronous memory errors who prefer a consistent * dirty state. This rule can be broken in some special cases, * but should be better not to. * |
1da177e4c Linux-2.6.12-rc2 |
2170 2171 2172 |
* If the mapping doesn't provide a set_page_dirty a_op, then * just fall through and assume that it wants buffer_heads. */ |
1cf6e7d83 mm: task dirty ac... |
2173 |
int set_page_dirty(struct page *page) |
1da177e4c Linux-2.6.12-rc2 |
2174 2175 2176 2177 2178 |
{ struct address_space *mapping = page_mapping(page); if (likely(mapping)) { int (*spd)(struct page *) = mapping->a_ops->set_page_dirty; |
278df9f45 mm: reclaim inval... |
2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 |
/* * readahead/lru_deactivate_page could remain * PG_readahead/PG_reclaim due to race with end_page_writeback * About readahead, if the page is written, the flags would be * reset. So no problem. * About lru_deactivate_page, if the page is redirty, the flag * will be reset. So no problem. but if the page is used by readahead * it will confuse readahead and make it restart the size rampup * process. But it's a trivial problem. */ ClearPageReclaim(page); |
9361401eb [PATCH] BLOCK: Ma... |
2190 2191 2192 2193 2194 |
#ifdef CONFIG_BLOCK if (!spd) spd = __set_page_dirty_buffers; #endif return (*spd)(page); |
1da177e4c Linux-2.6.12-rc2 |
2195 |
} |
4741c9fd3 [PATCH] set_page_... |
2196 2197 2198 2199 |
if (!PageDirty(page)) { if (!TestSetPageDirty(page)) return 1; } |
1da177e4c Linux-2.6.12-rc2 |
2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 |
return 0; } EXPORT_SYMBOL(set_page_dirty); /* * set_page_dirty() is racy if the caller has no reference against * page->mapping->host, and if the page is unlocked. This is because another * CPU could truncate the page off the mapping and then free the mapping. * * Usually, the page _is_ locked, or the caller is a user-space process which * holds a reference on the inode by having an open file. * * In other cases, the page should be locked before running set_page_dirty(). */ int set_page_dirty_lock(struct page *page) { int ret; |
7eaceacca block: remove per... |
2217 |
lock_page(page); |
1da177e4c Linux-2.6.12-rc2 |
2218 2219 2220 2221 2222 2223 2224 |
ret = set_page_dirty(page); unlock_page(page); return ret; } EXPORT_SYMBOL(set_page_dirty_lock); /* |
1da177e4c Linux-2.6.12-rc2 |
2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 |
* Clear a page's dirty flag, while caring for dirty memory accounting. * Returns true if the page was previously dirty. * * This is for preparing to put the page under writeout. We leave the page * tagged as dirty in the radix tree so that a concurrent write-for-sync * can discover it via a PAGECACHE_TAG_DIRTY walk. The ->writepage * implementation will run either set_page_writeback() or set_page_dirty(), * at which stage we bring the page's dirty flag and radix-tree dirty tag * back into sync. * * This incoherency between the page's dirty flag and radix-tree tag is * unfortunate, but it only exists while the page is locked. */ int clear_page_dirty_for_io(struct page *page) { struct address_space *mapping = page_mapping(page); |
79352894b mm: fix clear_pag... |
2241 |
BUG_ON(!PageLocked(page)); |
7658cc289 VM: Fix nasty and... |
2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 |
if (mapping && mapping_cap_account_dirty(mapping)) { /* * Yes, Virginia, this is indeed insane. * * We use this sequence to make sure that * (a) we account for dirty stats properly * (b) we tell the low-level filesystem to * mark the whole page dirty if it was * dirty in a pagetable. Only to then * (c) clean the page again and return 1 to * cause the writeback. * * This way we avoid all nasty races with the * dirty bit in multiple places and clearing * them concurrently from different threads. * * Note! Normally the "set_page_dirty(page)" * has no effect on the actual dirty bit - since * that will already usually be set. But we * need the side effects, and it can help us * avoid races. * * We basically use the page "master dirty bit" * as a serialization point for all the different * threads doing their things. |
7658cc289 VM: Fix nasty and... |
2267 2268 2269 |
*/ if (page_mkclean(page)) set_page_dirty(page); |
79352894b mm: fix clear_pag... |
2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 |
/* * We carefully synchronise fault handlers against * installing a dirty pte and marking the page dirty * at this point. We do this by having them hold the * page lock at some point after installing their * pte, but before marking the page dirty. * Pages are always locked coming in here, so we get * the desired exclusion. See mm/memory.c:do_wp_page() * for more comments. */ |
7658cc289 VM: Fix nasty and... |
2280 |
if (TestClearPageDirty(page)) { |
8c08540f8 [PATCH] clean up ... |
2281 |
dec_zone_page_state(page, NR_FILE_DIRTY); |
c9e51e418 mm: count reclaim... |
2282 2283 |
dec_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE); |
7658cc289 VM: Fix nasty and... |
2284 |
return 1; |
1da177e4c Linux-2.6.12-rc2 |
2285 |
} |
7658cc289 VM: Fix nasty and... |
2286 |
return 0; |
1da177e4c Linux-2.6.12-rc2 |
2287 |
} |
7658cc289 VM: Fix nasty and... |
2288 |
return TestClearPageDirty(page); |
1da177e4c Linux-2.6.12-rc2 |
2289 |
} |
58bb01a9c [PATCH] re-export... |
2290 |
EXPORT_SYMBOL(clear_page_dirty_for_io); |
1da177e4c Linux-2.6.12-rc2 |
2291 2292 2293 2294 2295 |
int test_clear_page_writeback(struct page *page) { struct address_space *mapping = page_mapping(page); int ret; |
3ea67d06e memcg: add per cg... |
2296 2297 |
bool locked; unsigned long memcg_flags; |
1da177e4c Linux-2.6.12-rc2 |
2298 |
|
3ea67d06e memcg: add per cg... |
2299 |
mem_cgroup_begin_update_page_stat(page, &locked, &memcg_flags); |
1da177e4c Linux-2.6.12-rc2 |
2300 |
if (mapping) { |
69cb51d18 mm: count writeba... |
2301 |
struct backing_dev_info *bdi = mapping->backing_dev_info; |
1da177e4c Linux-2.6.12-rc2 |
2302 |
unsigned long flags; |
19fd62312 mm: spinlock tree... |
2303 |
spin_lock_irqsave(&mapping->tree_lock, flags); |
1da177e4c Linux-2.6.12-rc2 |
2304 |
ret = TestClearPageWriteback(page); |
69cb51d18 mm: count writeba... |
2305 |
if (ret) { |
1da177e4c Linux-2.6.12-rc2 |
2306 2307 2308 |
radix_tree_tag_clear(&mapping->page_tree, page_index(page), PAGECACHE_TAG_WRITEBACK); |
e4ad08fe6 mm: bdi: add sepa... |
2309 |
if (bdi_cap_account_writeback(bdi)) { |
69cb51d18 mm: count writeba... |
2310 |
__dec_bdi_stat(bdi, BDI_WRITEBACK); |
04fbfdc14 mm: per device di... |
2311 2312 |
__bdi_writeout_inc(bdi); } |
69cb51d18 mm: count writeba... |
2313 |
} |
19fd62312 mm: spinlock tree... |
2314 |
spin_unlock_irqrestore(&mapping->tree_lock, flags); |
1da177e4c Linux-2.6.12-rc2 |
2315 2316 2317 |
} else { ret = TestClearPageWriteback(page); } |
99b12e3d8 writeback: accoun... |
2318 |
if (ret) { |
3ea67d06e memcg: add per cg... |
2319 |
mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_WRITEBACK); |
d688abf50 move page writeba... |
2320 |
dec_zone_page_state(page, NR_WRITEBACK); |
99b12e3d8 writeback: accoun... |
2321 2322 |
inc_zone_page_state(page, NR_WRITTEN); } |
3ea67d06e memcg: add per cg... |
2323 |
mem_cgroup_end_update_page_stat(page, &locked, &memcg_flags); |
1da177e4c Linux-2.6.12-rc2 |
2324 2325 2326 2327 2328 2329 2330 |
return ret; } int test_set_page_writeback(struct page *page) { struct address_space *mapping = page_mapping(page); int ret; |
3ea67d06e memcg: add per cg... |
2331 2332 |
bool locked; unsigned long memcg_flags; |
1da177e4c Linux-2.6.12-rc2 |
2333 |
|
3ea67d06e memcg: add per cg... |
2334 |
mem_cgroup_begin_update_page_stat(page, &locked, &memcg_flags); |
1da177e4c Linux-2.6.12-rc2 |
2335 |
if (mapping) { |
69cb51d18 mm: count writeba... |
2336 |
struct backing_dev_info *bdi = mapping->backing_dev_info; |
1da177e4c Linux-2.6.12-rc2 |
2337 |
unsigned long flags; |
19fd62312 mm: spinlock tree... |
2338 |
spin_lock_irqsave(&mapping->tree_lock, flags); |
1da177e4c Linux-2.6.12-rc2 |
2339 |
ret = TestSetPageWriteback(page); |
69cb51d18 mm: count writeba... |
2340 |
if (!ret) { |
1da177e4c Linux-2.6.12-rc2 |
2341 2342 2343 |
radix_tree_tag_set(&mapping->page_tree, page_index(page), PAGECACHE_TAG_WRITEBACK); |
e4ad08fe6 mm: bdi: add sepa... |
2344 |
if (bdi_cap_account_writeback(bdi)) |
69cb51d18 mm: count writeba... |
2345 2346 |
__inc_bdi_stat(bdi, BDI_WRITEBACK); } |
1da177e4c Linux-2.6.12-rc2 |
2347 2348 2349 2350 |
if (!PageDirty(page)) radix_tree_tag_clear(&mapping->page_tree, page_index(page), PAGECACHE_TAG_DIRTY); |
f446daaea mm: implement wri... |
2351 2352 2353 |
radix_tree_tag_clear(&mapping->page_tree, page_index(page), PAGECACHE_TAG_TOWRITE); |
19fd62312 mm: spinlock tree... |
2354 |
spin_unlock_irqrestore(&mapping->tree_lock, flags); |
1da177e4c Linux-2.6.12-rc2 |
2355 2356 2357 |
} else { ret = TestSetPageWriteback(page); } |
d688abf50 move page writeba... |
2358 |
if (!ret) |
f629d1c9b mm: add account_p... |
2359 |
account_page_writeback(page); |
3ea67d06e memcg: add per cg... |
2360 |
mem_cgroup_end_update_page_stat(page, &locked, &memcg_flags); |
1da177e4c Linux-2.6.12-rc2 |
2361 2362 2363 2364 2365 2366 |
return ret; } EXPORT_SYMBOL(test_set_page_writeback); /* |
001281881 mm: use lockless ... |
2367 |
* Return true if any of the pages in the mapping are marked with the |
1da177e4c Linux-2.6.12-rc2 |
2368 2369 2370 2371 |
* passed tag. */ int mapping_tagged(struct address_space *mapping, int tag) { |
72c478321 mm: remove useles... |
2372 |
return radix_tree_tagged(&mapping->page_tree, tag); |
1da177e4c Linux-2.6.12-rc2 |
2373 2374 |
} EXPORT_SYMBOL(mapping_tagged); |
1d1d1a767 mm: only enforce ... |
2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 |
/** * wait_for_stable_page() - wait for writeback to finish, if necessary. * @page: The page to wait on. * * This function determines if the given page is related to a backing device * that requires page contents to be held stable during writeback. If so, then * it will wait for any pending writeback to complete. */ void wait_for_stable_page(struct page *page) { struct address_space *mapping = page_mapping(page); struct backing_dev_info *bdi = mapping->backing_dev_info; if (!bdi_cap_stable_pages_required(bdi)) return; wait_on_page_writeback(page); } EXPORT_SYMBOL_GPL(wait_for_stable_page); |