Blame view

mm/page-writeback.c 59.7 KB
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1
  /*
f30c22695   Uwe Zeisberger   fix file specific...
2
   * mm/page-writeback.c
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3
4
   *
   * Copyright (C) 2002, Linus Torvalds.
04fbfdc14   Peter Zijlstra   mm: per device di...
5
   * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
6
7
8
9
   *
   * Contains functions related to writing back dirty pages at the
   * address_space level.
   *
e1f8e8744   Francois Cami   Remove Andrew Mor...
10
   * 10Apr2002	Andrew Morton
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
11
12
13
14
   *		Initial version
   */
  
  #include <linux/kernel.h>
b95f1b31b   Paul Gortmaker   mm: Map most file...
15
  #include <linux/export.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
16
17
18
19
20
21
22
23
24
  #include <linux/spinlock.h>
  #include <linux/fs.h>
  #include <linux/mm.h>
  #include <linux/swap.h>
  #include <linux/slab.h>
  #include <linux/pagemap.h>
  #include <linux/writeback.h>
  #include <linux/init.h>
  #include <linux/backing-dev.h>
55e829af0   Andrew Morton   [PATCH] io-accoun...
25
  #include <linux/task_io_accounting_ops.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
26
27
  #include <linux/blkdev.h>
  #include <linux/mpage.h>
d08b3851d   Peter Zijlstra   [PATCH] mm: track...
28
  #include <linux/rmap.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
29
30
31
32
33
34
  #include <linux/percpu.h>
  #include <linux/notifier.h>
  #include <linux/smp.h>
  #include <linux/sysctl.h>
  #include <linux/cpu.h>
  #include <linux/syscalls.h>
cf9a2ae8d   David Howells   [PATCH] BLOCK: Mo...
35
  #include <linux/buffer_head.h>
811d736f9   David Howells   [PATCH] BLOCK: Di...
36
  #include <linux/pagevec.h>
028c2dd18   Dave Chinner   writeback: Add tr...
37
  #include <trace/events/writeback.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
38
39
  
  /*
ffd1f609a   Wu Fengguang   writeback: introd...
40
41
42
43
44
   * Sleep at most 200ms at a time in balance_dirty_pages().
   */
  #define MAX_PAUSE		max(HZ/5, 1)
  
  /*
e98be2d59   Wu Fengguang   writeback: bdi wr...
45
46
47
   * Estimate write bandwidth at 200ms intervals.
   */
  #define BANDWIDTH_INTERVAL	max(HZ/5, 1)
6c14ae1e9   Wu Fengguang   writeback: dirty ...
48
  #define RATELIMIT_CALC_SHIFT	10
e98be2d59   Wu Fengguang   writeback: bdi wr...
49
  /*
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
50
51
52
53
   * After a CPU has dirtied this many pages, balance_dirty_pages_ratelimited
   * will look to see if it needs to force writeback or throttling.
   */
  static long ratelimit_pages = 32;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
54
55
56
  /* The following parameters are exported via /proc/sys/vm */
  
  /*
5b0830cb9   Jens Axboe   writeback: get ri...
57
   * Start background writeback (via writeback threads) at this percentage
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
58
   */
1b5e62b42   Wu Fengguang   writeback: double...
59
  int dirty_background_ratio = 10;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
60
61
  
  /*
2da02997e   David Rientjes   mm: add dirty_bac...
62
63
64
65
66
67
   * dirty_background_bytes starts at 0 (disabled) so that it is a function of
   * dirty_background_ratio * the amount of dirtyable memory
   */
  unsigned long dirty_background_bytes;
  
  /*
195cf453d   Bron Gondwana   mm/page-writeback...
68
69
70
71
72
73
   * free highmem will not be subtracted from the total free memory
   * for calculating free ratios if vm_highmem_is_dirtyable is true
   */
  int vm_highmem_is_dirtyable;
  
  /*
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
74
75
   * The generator of dirty data starts writeback at this percentage
   */
1b5e62b42   Wu Fengguang   writeback: double...
76
  int vm_dirty_ratio = 20;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
77
78
  
  /*
2da02997e   David Rientjes   mm: add dirty_bac...
79
80
81
82
83
84
   * vm_dirty_bytes starts at 0 (disabled) so that it is a function of
   * vm_dirty_ratio * the amount of dirtyable memory
   */
  unsigned long vm_dirty_bytes;
  
  /*
704503d83   Alexey Dobriyan   mm: fix proc_doin...
85
   * The interval between `kupdate'-style writebacks
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
86
   */
22ef37eed   Toshiyuki Okajima   page-writeback: f...
87
  unsigned int dirty_writeback_interval = 5 * 100; /* centiseconds */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
88
89
  
  /*
704503d83   Alexey Dobriyan   mm: fix proc_doin...
90
   * The longest time for which data is allowed to remain dirty
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
91
   */
22ef37eed   Toshiyuki Okajima   page-writeback: f...
92
  unsigned int dirty_expire_interval = 30 * 100; /* centiseconds */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
93
94
95
96
97
98
99
  
  /*
   * Flag that makes the machine dump writes/reads and block dirtyings.
   */
  int block_dump;
  
  /*
ed5b43f15   Bart Samwel   [PATCH] Represent...
100
101
   * Flag that puts the machine in "laptop mode". Doubles as a timeout in jiffies:
   * a full sync is triggered after this time elapses without any disk activity.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
102
103
104
105
106
107
   */
  int laptop_mode;
  
  EXPORT_SYMBOL(laptop_mode);
  
  /* End of sysctl-exported parameters */
c42843f2f   Wu Fengguang   writeback: introd...
108
  unsigned long global_dirty_limit;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
109

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
110
  /*
04fbfdc14   Peter Zijlstra   mm: per device di...
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
   * Scale the writeback cache size proportional to the relative writeout speeds.
   *
   * We do this by keeping a floating proportion between BDIs, based on page
   * writeback completions [end_page_writeback()]. Those devices that write out
   * pages fastest will get the larger share, while the slower will get a smaller
   * share.
   *
   * We use page writeout completions because we are interested in getting rid of
   * dirty pages. Having them written out is the primary goal.
   *
   * We introduce a concept of time, a period over which we measure these events,
   * because demand can/will vary over time. The length of this period itself is
   * measured in page writeback completions.
   *
   */
  static struct prop_descriptor vm_completions;
04fbfdc14   Peter Zijlstra   mm: per device di...
127
128
129
130
131
132
133
134
  /*
   * couple the period to the dirty_ratio:
   *
   *   period/2 ~ roundup_pow_of_two(dirty limit)
   */
  static int calc_period_shift(void)
  {
  	unsigned long dirty_total;
2da02997e   David Rientjes   mm: add dirty_bac...
135
136
137
138
139
  	if (vm_dirty_bytes)
  		dirty_total = vm_dirty_bytes / PAGE_SIZE;
  	else
  		dirty_total = (vm_dirty_ratio * determine_dirtyable_memory()) /
  				100;
04fbfdc14   Peter Zijlstra   mm: per device di...
140
141
142
143
  	return 2 + ilog2(dirty_total - 1);
  }
  
  /*
2da02997e   David Rientjes   mm: add dirty_bac...
144
   * update the period when the dirty threshold changes.
04fbfdc14   Peter Zijlstra   mm: per device di...
145
   */
2da02997e   David Rientjes   mm: add dirty_bac...
146
147
148
149
  static void update_completion_period(void)
  {
  	int shift = calc_period_shift();
  	prop_change_shift(&vm_completions, shift);
9d823e8f6   Wu Fengguang   writeback: per ta...
150
151
  
  	writeback_set_ratelimit();
2da02997e   David Rientjes   mm: add dirty_bac...
152
153
154
  }
  
  int dirty_background_ratio_handler(struct ctl_table *table, int write,
8d65af789   Alexey Dobriyan   sysctl: remove "s...
155
  		void __user *buffer, size_t *lenp,
2da02997e   David Rientjes   mm: add dirty_bac...
156
157
158
  		loff_t *ppos)
  {
  	int ret;
8d65af789   Alexey Dobriyan   sysctl: remove "s...
159
  	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
2da02997e   David Rientjes   mm: add dirty_bac...
160
161
162
163
164
165
  	if (ret == 0 && write)
  		dirty_background_bytes = 0;
  	return ret;
  }
  
  int dirty_background_bytes_handler(struct ctl_table *table, int write,
8d65af789   Alexey Dobriyan   sysctl: remove "s...
166
  		void __user *buffer, size_t *lenp,
2da02997e   David Rientjes   mm: add dirty_bac...
167
168
169
  		loff_t *ppos)
  {
  	int ret;
8d65af789   Alexey Dobriyan   sysctl: remove "s...
170
  	ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
2da02997e   David Rientjes   mm: add dirty_bac...
171
172
173
174
  	if (ret == 0 && write)
  		dirty_background_ratio = 0;
  	return ret;
  }
04fbfdc14   Peter Zijlstra   mm: per device di...
175
  int dirty_ratio_handler(struct ctl_table *table, int write,
8d65af789   Alexey Dobriyan   sysctl: remove "s...
176
  		void __user *buffer, size_t *lenp,
04fbfdc14   Peter Zijlstra   mm: per device di...
177
178
179
  		loff_t *ppos)
  {
  	int old_ratio = vm_dirty_ratio;
2da02997e   David Rientjes   mm: add dirty_bac...
180
  	int ret;
8d65af789   Alexey Dobriyan   sysctl: remove "s...
181
  	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
04fbfdc14   Peter Zijlstra   mm: per device di...
182
  	if (ret == 0 && write && vm_dirty_ratio != old_ratio) {
2da02997e   David Rientjes   mm: add dirty_bac...
183
184
185
186
187
188
189
190
  		update_completion_period();
  		vm_dirty_bytes = 0;
  	}
  	return ret;
  }
  
  
  int dirty_bytes_handler(struct ctl_table *table, int write,
8d65af789   Alexey Dobriyan   sysctl: remove "s...
191
  		void __user *buffer, size_t *lenp,
2da02997e   David Rientjes   mm: add dirty_bac...
192
193
  		loff_t *ppos)
  {
fc3501d41   Sven Wegener   mm: fix dirty_byt...
194
  	unsigned long old_bytes = vm_dirty_bytes;
2da02997e   David Rientjes   mm: add dirty_bac...
195
  	int ret;
8d65af789   Alexey Dobriyan   sysctl: remove "s...
196
  	ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
2da02997e   David Rientjes   mm: add dirty_bac...
197
198
199
  	if (ret == 0 && write && vm_dirty_bytes != old_bytes) {
  		update_completion_period();
  		vm_dirty_ratio = 0;
04fbfdc14   Peter Zijlstra   mm: per device di...
200
201
202
203
204
205
206
207
208
209
  	}
  	return ret;
  }
  
  /*
   * Increment the BDI's writeout completion count and the global writeout
   * completion count. Called from test_clear_page_writeback().
   */
  static inline void __bdi_writeout_inc(struct backing_dev_info *bdi)
  {
f7d2b1ecd   Jan Kara   writeback: accoun...
210
  	__inc_bdi_stat(bdi, BDI_WRITTEN);
a42dde041   Peter Zijlstra   mm: bdi: allow se...
211
212
  	__prop_inc_percpu_max(&vm_completions, &bdi->completions,
  			      bdi->max_prop_frac);
04fbfdc14   Peter Zijlstra   mm: per device di...
213
  }
dd5656e59   Miklos Szeredi   mm: bdi: export b...
214
215
216
217
218
219
220
221
222
  void bdi_writeout_inc(struct backing_dev_info *bdi)
  {
  	unsigned long flags;
  
  	local_irq_save(flags);
  	__bdi_writeout_inc(bdi);
  	local_irq_restore(flags);
  }
  EXPORT_SYMBOL_GPL(bdi_writeout_inc);
04fbfdc14   Peter Zijlstra   mm: per device di...
223
224
225
226
227
228
  /*
   * Obtain an accurate fraction of the BDI's portion.
   */
  static void bdi_writeout_fraction(struct backing_dev_info *bdi,
  		long *numerator, long *denominator)
  {
3efaf0fab   Wu Fengguang   writeback: skip b...
229
  	prop_fraction_percpu(&vm_completions, &bdi->completions,
04fbfdc14   Peter Zijlstra   mm: per device di...
230
  				numerator, denominator);
04fbfdc14   Peter Zijlstra   mm: per device di...
231
  }
04fbfdc14   Peter Zijlstra   mm: per device di...
232
  /*
d08c429b0   Johannes Weiner   mm/page-writeback...
233
234
235
   * bdi_min_ratio keeps the sum of the minimum dirty shares of all
   * registered backing devices, which, for obvious reasons, can not
   * exceed 100%.
189d3c4a9   Peter Zijlstra   mm: bdi: allow se...
236
   */
189d3c4a9   Peter Zijlstra   mm: bdi: allow se...
237
238
239
240
241
  static unsigned int bdi_min_ratio;
  
  int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio)
  {
  	int ret = 0;
189d3c4a9   Peter Zijlstra   mm: bdi: allow se...
242

cfc4ba536   Jens Axboe   writeback: use RC...
243
  	spin_lock_bh(&bdi_lock);
a42dde041   Peter Zijlstra   mm: bdi: allow se...
244
  	if (min_ratio > bdi->max_ratio) {
189d3c4a9   Peter Zijlstra   mm: bdi: allow se...
245
  		ret = -EINVAL;
a42dde041   Peter Zijlstra   mm: bdi: allow se...
246
247
248
249
250
251
252
253
254
  	} else {
  		min_ratio -= bdi->min_ratio;
  		if (bdi_min_ratio + min_ratio < 100) {
  			bdi_min_ratio += min_ratio;
  			bdi->min_ratio += min_ratio;
  		} else {
  			ret = -EINVAL;
  		}
  	}
cfc4ba536   Jens Axboe   writeback: use RC...
255
  	spin_unlock_bh(&bdi_lock);
a42dde041   Peter Zijlstra   mm: bdi: allow se...
256
257
258
259
260
261
  
  	return ret;
  }
  
  int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned max_ratio)
  {
a42dde041   Peter Zijlstra   mm: bdi: allow se...
262
263
264
265
  	int ret = 0;
  
  	if (max_ratio > 100)
  		return -EINVAL;
cfc4ba536   Jens Axboe   writeback: use RC...
266
  	spin_lock_bh(&bdi_lock);
a42dde041   Peter Zijlstra   mm: bdi: allow se...
267
268
269
270
271
272
  	if (bdi->min_ratio > max_ratio) {
  		ret = -EINVAL;
  	} else {
  		bdi->max_ratio = max_ratio;
  		bdi->max_prop_frac = (PROP_FRAC_BASE * max_ratio) / 100;
  	}
cfc4ba536   Jens Axboe   writeback: use RC...
273
  	spin_unlock_bh(&bdi_lock);
189d3c4a9   Peter Zijlstra   mm: bdi: allow se...
274
275
276
  
  	return ret;
  }
a42dde041   Peter Zijlstra   mm: bdi: allow se...
277
  EXPORT_SYMBOL(bdi_set_max_ratio);
189d3c4a9   Peter Zijlstra   mm: bdi: allow se...
278
279
  
  /*
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
   * Work out the current dirty-memory clamping and background writeout
   * thresholds.
   *
   * The main aim here is to lower them aggressively if there is a lot of mapped
   * memory around.  To avoid stressing page reclaim with lots of unreclaimable
   * pages.  It is better to clamp down on writers than to start swapping, and
   * performing lots of scanning.
   *
   * We only allow 1/2 of the currently-unmapped memory to be dirtied.
   *
   * We don't permit the clamping level to fall below 5% - that is getting rather
   * excessive.
   *
   * We make sure that the background writeout level is below the adjusted
   * clamping level.
   */
1b4244647   Christoph Lameter   Use ZVC counters ...
296
297
298
299
300
301
  
  static unsigned long highmem_dirtyable_memory(unsigned long total)
  {
  #ifdef CONFIG_HIGHMEM
  	int node;
  	unsigned long x = 0;
37b07e416   Lee Schermerhorn   memoryless nodes:...
302
  	for_each_node_state(node, N_HIGH_MEMORY) {
1b4244647   Christoph Lameter   Use ZVC counters ...
303
304
  		struct zone *z =
  			&NODE_DATA(node)->node_zones[ZONE_HIGHMEM];
adea02a1b   Wu Fengguang   mm: count only re...
305
306
  		x += zone_page_state(z, NR_FREE_PAGES) +
  		     zone_reclaimable_pages(z);
1b4244647   Christoph Lameter   Use ZVC counters ...
307
308
309
310
311
312
313
314
315
316
317
318
  	}
  	/*
  	 * Make sure that the number of highmem pages is never larger
  	 * than the number of the total dirtyable memory. This can only
  	 * occur in very strange VM situations but we want to make sure
  	 * that this does not occur.
  	 */
  	return min(x, total);
  #else
  	return 0;
  #endif
  }
3eefae994   Steven Rostedt   ftrace: limit tra...
319
320
321
322
323
324
325
  /**
   * determine_dirtyable_memory - amount of memory that may be used
   *
   * Returns the numebr of pages that can currently be freed and used
   * by the kernel for direct mappings.
   */
  unsigned long determine_dirtyable_memory(void)
1b4244647   Christoph Lameter   Use ZVC counters ...
326
327
  {
  	unsigned long x;
adea02a1b   Wu Fengguang   mm: count only re...
328
  	x = global_page_state(NR_FREE_PAGES) + global_reclaimable_pages();
195cf453d   Bron Gondwana   mm/page-writeback...
329
330
331
  
  	if (!vm_highmem_is_dirtyable)
  		x -= highmem_dirtyable_memory(x);
1b4244647   Christoph Lameter   Use ZVC counters ...
332
333
  	return x + 1;	/* Ensure that we never return 0 */
  }
6c14ae1e9   Wu Fengguang   writeback: dirty ...
334
335
336
337
338
  static unsigned long dirty_freerun_ceiling(unsigned long thresh,
  					   unsigned long bg_thresh)
  {
  	return (thresh + bg_thresh) / 2;
  }
ffd1f609a   Wu Fengguang   writeback: introd...
339
340
341
342
  static unsigned long hard_dirty_limit(unsigned long thresh)
  {
  	return max(thresh, global_dirty_limit);
  }
03ab450f0   Randy Dunlap   mm/page-writeback...
343
  /*
1babe1838   Wu Fengguang   writeback: add co...
344
345
346
347
348
349
   * global_dirty_limits - background-writeback and dirty-throttling thresholds
   *
   * Calculate the dirty thresholds based on sysctl parameters
   * - vm.dirty_background_ratio  or  vm.dirty_background_bytes
   * - vm.dirty_ratio             or  vm.dirty_bytes
   * The dirty limits will be lifted by 1/4 for PF_LESS_THROTTLE (ie. nfsd) and
ebd1373d4   Minchan Kim   writeback: fix gl...
350
   * real-time tasks.
1babe1838   Wu Fengguang   writeback: add co...
351
   */
16c4042f0   Wu Fengguang   writeback: avoid ...
352
  void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
353
  {
364aeb284   David Rientjes   mm: change dirty ...
354
355
  	unsigned long background;
  	unsigned long dirty;
240c879f2   Minchan Kim   writeback: avoid ...
356
  	unsigned long uninitialized_var(available_memory);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
357
  	struct task_struct *tsk;
240c879f2   Minchan Kim   writeback: avoid ...
358
359
  	if (!vm_dirty_bytes || !dirty_background_bytes)
  		available_memory = determine_dirtyable_memory();
2da02997e   David Rientjes   mm: add dirty_bac...
360
361
  	if (vm_dirty_bytes)
  		dirty = DIV_ROUND_UP(vm_dirty_bytes, PAGE_SIZE);
4cbec4c8b   Wu Fengguang   writeback: remove...
362
363
  	else
  		dirty = (vm_dirty_ratio * available_memory) / 100;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
364

2da02997e   David Rientjes   mm: add dirty_bac...
365
366
367
368
  	if (dirty_background_bytes)
  		background = DIV_ROUND_UP(dirty_background_bytes, PAGE_SIZE);
  	else
  		background = (dirty_background_ratio * available_memory) / 100;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
369

2da02997e   David Rientjes   mm: add dirty_bac...
370
371
  	if (background >= dirty)
  		background = dirty / 2;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
372
373
374
375
376
377
378
  	tsk = current;
  	if (tsk->flags & PF_LESS_THROTTLE || rt_task(tsk)) {
  		background += background / 4;
  		dirty += dirty / 4;
  	}
  	*pbackground = background;
  	*pdirty = dirty;
e1cbe2360   Wu Fengguang   writeback: trace ...
379
  	trace_global_dirty_state(background, dirty);
16c4042f0   Wu Fengguang   writeback: avoid ...
380
  }
04fbfdc14   Peter Zijlstra   mm: per device di...
381

6f7186562   Wu Fengguang   writeback: add bd...
382
  /**
1babe1838   Wu Fengguang   writeback: add co...
383
   * bdi_dirty_limit - @bdi's share of dirty throttling threshold
6f7186562   Wu Fengguang   writeback: add bd...
384
385
   * @bdi: the backing_dev_info to query
   * @dirty: global dirty limit in pages
1babe1838   Wu Fengguang   writeback: add co...
386
   *
6f7186562   Wu Fengguang   writeback: add bd...
387
388
   * Returns @bdi's dirty limit in pages. The term "dirty" in the context of
   * dirty balancing includes all PG_dirty, PG_writeback and NFS unstable pages.
aed21ad28   Wu Fengguang   writeback: commen...
389
390
391
392
393
394
395
   *
   * Note that balance_dirty_pages() will only seriously take it as a hard limit
   * when sleeping max_pause per page is not enough to keep the dirty pages under
   * control. For example, when the device is completely stalled due to some error
   * conditions, or when there are 1000 dd tasks writing to a slow 10MB/s USB key.
   * In the other normal situations, it acts more gently by throttling the tasks
   * more (rather than completely block them) when the bdi dirty pages go high.
1babe1838   Wu Fengguang   writeback: add co...
396
   *
6f7186562   Wu Fengguang   writeback: add bd...
397
   * It allocates high/low dirty limits to fast/slow devices, in order to prevent
1babe1838   Wu Fengguang   writeback: add co...
398
399
400
401
402
403
404
   * - starving fast devices
   * - piling up dirty pages (that will take long time to sync) on slow devices
   *
   * The bdi's share of dirty limit will be adapting to its throughput and
   * bounded by the bdi->min_ratio and/or bdi->max_ratio parameters, if set.
   */
  unsigned long bdi_dirty_limit(struct backing_dev_info *bdi, unsigned long dirty)
16c4042f0   Wu Fengguang   writeback: avoid ...
405
406
407
  {
  	u64 bdi_dirty;
  	long numerator, denominator;
04fbfdc14   Peter Zijlstra   mm: per device di...
408

16c4042f0   Wu Fengguang   writeback: avoid ...
409
410
411
412
  	/*
  	 * Calculate this BDI's share of the dirty ratio.
  	 */
  	bdi_writeout_fraction(bdi, &numerator, &denominator);
04fbfdc14   Peter Zijlstra   mm: per device di...
413

16c4042f0   Wu Fengguang   writeback: avoid ...
414
415
416
  	bdi_dirty = (dirty * (100 - bdi_min_ratio)) / 100;
  	bdi_dirty *= numerator;
  	do_div(bdi_dirty, denominator);
04fbfdc14   Peter Zijlstra   mm: per device di...
417

16c4042f0   Wu Fengguang   writeback: avoid ...
418
419
420
421
422
  	bdi_dirty += (dirty * bdi->min_ratio) / 100;
  	if (bdi_dirty > (dirty * bdi->max_ratio) / 100)
  		bdi_dirty = dirty * bdi->max_ratio / 100;
  
  	return bdi_dirty;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
423
  }
6c14ae1e9   Wu Fengguang   writeback: dirty ...
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
  /*
   * Dirty position control.
   *
   * (o) global/bdi setpoints
   *
   * We want the dirty pages be balanced around the global/bdi setpoints.
   * When the number of dirty pages is higher/lower than the setpoint, the
   * dirty position control ratio (and hence task dirty ratelimit) will be
   * decreased/increased to bring the dirty pages back to the setpoint.
   *
   *     pos_ratio = 1 << RATELIMIT_CALC_SHIFT
   *
   *     if (dirty < setpoint) scale up   pos_ratio
   *     if (dirty > setpoint) scale down pos_ratio
   *
   *     if (bdi_dirty < bdi_setpoint) scale up   pos_ratio
   *     if (bdi_dirty > bdi_setpoint) scale down pos_ratio
   *
   *     task_ratelimit = dirty_ratelimit * pos_ratio >> RATELIMIT_CALC_SHIFT
   *
   * (o) global control line
   *
   *     ^ pos_ratio
   *     |
   *     |            |<===== global dirty control scope ======>|
   * 2.0 .............*
   *     |            .*
   *     |            . *
   *     |            .   *
   *     |            .     *
   *     |            .        *
   *     |            .            *
   * 1.0 ................................*
   *     |            .                  .     *
   *     |            .                  .          *
   *     |            .                  .              *
   *     |            .                  .                 *
   *     |            .                  .                    *
   *   0 +------------.------------------.----------------------*------------->
   *           freerun^          setpoint^                 limit^   dirty pages
   *
   * (o) bdi control line
   *
   *     ^ pos_ratio
   *     |
   *     |            *
   *     |              *
   *     |                *
   *     |                  *
   *     |                    * |<=========== span ============>|
   * 1.0 .......................*
   *     |                      . *
   *     |                      .   *
   *     |                      .     *
   *     |                      .       *
   *     |                      .         *
   *     |                      .           *
   *     |                      .             *
   *     |                      .               *
   *     |                      .                 *
   *     |                      .                   *
   *     |                      .                     *
   * 1/4 ...............................................* * * * * * * * * * * *
   *     |                      .                         .
   *     |                      .                           .
   *     |                      .                             .
   *   0 +----------------------.-------------------------------.------------->
   *                bdi_setpoint^                    x_intercept^
   *
   * The bdi control line won't drop below pos_ratio=1/4, so that bdi_dirty can
   * be smoothly throttled down to normal if it starts high in situations like
   * - start writing to a slow SD card and a fast disk at the same time. The SD
   *   card's bdi_dirty may rush to many times higher than bdi_setpoint.
   * - the bdi dirty thresh drops quickly due to change of JBOD workload
   */
  static unsigned long bdi_position_ratio(struct backing_dev_info *bdi,
  					unsigned long thresh,
  					unsigned long bg_thresh,
  					unsigned long dirty,
  					unsigned long bdi_thresh,
  					unsigned long bdi_dirty)
  {
  	unsigned long write_bw = bdi->avg_write_bandwidth;
  	unsigned long freerun = dirty_freerun_ceiling(thresh, bg_thresh);
  	unsigned long limit = hard_dirty_limit(thresh);
  	unsigned long x_intercept;
  	unsigned long setpoint;		/* dirty pages' target balance point */
  	unsigned long bdi_setpoint;
  	unsigned long span;
  	long long pos_ratio;		/* for scaling up/down the rate limit */
  	long x;
  
  	if (unlikely(dirty >= limit))
  		return 0;
  
  	/*
  	 * global setpoint
  	 *
  	 *                           setpoint - dirty 3
  	 *        f(dirty) := 1.0 + (----------------)
  	 *                           limit - setpoint
  	 *
  	 * it's a 3rd order polynomial that subjects to
  	 *
  	 * (1) f(freerun)  = 2.0 => rampup dirty_ratelimit reasonably fast
  	 * (2) f(setpoint) = 1.0 => the balance point
  	 * (3) f(limit)    = 0   => the hard limit
  	 * (4) df/dx      <= 0	 => negative feedback control
  	 * (5) the closer to setpoint, the smaller |df/dx| (and the reverse)
  	 *     => fast response on large errors; small oscillation near setpoint
  	 */
  	setpoint = (freerun + limit) / 2;
  	x = div_s64((setpoint - dirty) << RATELIMIT_CALC_SHIFT,
  		    limit - setpoint + 1);
  	pos_ratio = x;
  	pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT;
  	pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT;
  	pos_ratio += 1 << RATELIMIT_CALC_SHIFT;
  
  	/*
  	 * We have computed basic pos_ratio above based on global situation. If
  	 * the bdi is over/under its share of dirty pages, we want to scale
  	 * pos_ratio further down/up. That is done by the following mechanism.
  	 */
  
  	/*
  	 * bdi setpoint
  	 *
  	 *        f(bdi_dirty) := 1.0 + k * (bdi_dirty - bdi_setpoint)
  	 *
  	 *                        x_intercept - bdi_dirty
  	 *                     := --------------------------
  	 *                        x_intercept - bdi_setpoint
  	 *
  	 * The main bdi control line is a linear function that subjects to
  	 *
  	 * (1) f(bdi_setpoint) = 1.0
  	 * (2) k = - 1 / (8 * write_bw)  (in single bdi case)
  	 *     or equally: x_intercept = bdi_setpoint + 8 * write_bw
  	 *
  	 * For single bdi case, the dirty pages are observed to fluctuate
  	 * regularly within range
  	 *        [bdi_setpoint - write_bw/2, bdi_setpoint + write_bw/2]
  	 * for various filesystems, where (2) can yield in a reasonable 12.5%
  	 * fluctuation range for pos_ratio.
  	 *
  	 * For JBOD case, bdi_thresh (not bdi_dirty!) could fluctuate up to its
  	 * own size, so move the slope over accordingly and choose a slope that
  	 * yields 100% pos_ratio fluctuation on suddenly doubled bdi_thresh.
  	 */
  	if (unlikely(bdi_thresh > thresh))
  		bdi_thresh = thresh;
aed21ad28   Wu Fengguang   writeback: commen...
576
577
578
579
580
581
582
  	/*
  	 * It's very possible that bdi_thresh is close to 0 not because the
  	 * device is slow, but that it has remained inactive for long time.
  	 * Honour such devices a reasonable good (hopefully IO efficient)
  	 * threshold, so that the occasional writes won't be blocked and active
  	 * writes can rampup the threshold quickly.
  	 */
8927f66c4   Wu Fengguang   writeback: dirty ...
583
  	bdi_thresh = max(bdi_thresh, (limit - dirty) / 8);
6c14ae1e9   Wu Fengguang   writeback: dirty ...
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
  	/*
  	 * scale global setpoint to bdi's:
  	 *	bdi_setpoint = setpoint * bdi_thresh / thresh
  	 */
  	x = div_u64((u64)bdi_thresh << 16, thresh + 1);
  	bdi_setpoint = setpoint * (u64)x >> 16;
  	/*
  	 * Use span=(8*write_bw) in single bdi case as indicated by
  	 * (thresh - bdi_thresh ~= 0) and transit to bdi_thresh in JBOD case.
  	 *
  	 *        bdi_thresh                    thresh - bdi_thresh
  	 * span = ---------- * (8 * write_bw) + ------------------- * bdi_thresh
  	 *          thresh                            thresh
  	 */
  	span = (thresh - bdi_thresh + 8 * write_bw) * (u64)x >> 16;
  	x_intercept = bdi_setpoint + span;
  
  	if (bdi_dirty < x_intercept - span / 4) {
50657fc4d   Wu Fengguang   writeback: fix pp...
602
603
  		pos_ratio = div_u64(pos_ratio * (x_intercept - bdi_dirty),
  				    x_intercept - bdi_setpoint + 1);
6c14ae1e9   Wu Fengguang   writeback: dirty ...
604
605
  	} else
  		pos_ratio /= 4;
8927f66c4   Wu Fengguang   writeback: dirty ...
606
607
608
609
610
611
612
  	/*
  	 * bdi reserve area, safeguard against dirty pool underrun and disk idle
  	 * It may push the desired control point of global dirty pages higher
  	 * than setpoint.
  	 */
  	x_intercept = bdi_thresh / 2;
  	if (bdi_dirty < x_intercept) {
50657fc4d   Wu Fengguang   writeback: fix pp...
613
614
615
  		if (bdi_dirty > x_intercept / 8)
  			pos_ratio = div_u64(pos_ratio * x_intercept, bdi_dirty);
  		else
8927f66c4   Wu Fengguang   writeback: dirty ...
616
617
  			pos_ratio *= 8;
  	}
6c14ae1e9   Wu Fengguang   writeback: dirty ...
618
619
  	return pos_ratio;
  }
e98be2d59   Wu Fengguang   writeback: bdi wr...
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
  static void bdi_update_write_bandwidth(struct backing_dev_info *bdi,
  				       unsigned long elapsed,
  				       unsigned long written)
  {
  	const unsigned long period = roundup_pow_of_two(3 * HZ);
  	unsigned long avg = bdi->avg_write_bandwidth;
  	unsigned long old = bdi->write_bandwidth;
  	u64 bw;
  
  	/*
  	 * bw = written * HZ / elapsed
  	 *
  	 *                   bw * elapsed + write_bandwidth * (period - elapsed)
  	 * write_bandwidth = ---------------------------------------------------
  	 *                                          period
  	 */
  	bw = written - bdi->written_stamp;
  	bw *= HZ;
  	if (unlikely(elapsed > period)) {
  		do_div(bw, elapsed);
  		avg = bw;
  		goto out;
  	}
  	bw += (u64)bdi->write_bandwidth * (period - elapsed);
  	bw >>= ilog2(period);
  
  	/*
  	 * one more level of smoothing, for filtering out sudden spikes
  	 */
  	if (avg > old && old >= (unsigned long)bw)
  		avg -= (avg - old) >> 3;
  
  	if (avg < old && old <= (unsigned long)bw)
  		avg += (old - avg) >> 3;
  
  out:
  	bdi->write_bandwidth = bw;
  	bdi->avg_write_bandwidth = avg;
  }
c42843f2f   Wu Fengguang   writeback: introd...
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
  /*
   * The global dirtyable memory and dirty threshold could be suddenly knocked
   * down by a large amount (eg. on the startup of KVM in a swapless system).
   * This may throw the system into deep dirty exceeded state and throttle
   * heavy/light dirtiers alike. To retain good responsiveness, maintain
   * global_dirty_limit for tracking slowly down to the knocked down dirty
   * threshold.
   */
  static void update_dirty_limit(unsigned long thresh, unsigned long dirty)
  {
  	unsigned long limit = global_dirty_limit;
  
  	/*
  	 * Follow up in one step.
  	 */
  	if (limit < thresh) {
  		limit = thresh;
  		goto update;
  	}
  
  	/*
  	 * Follow down slowly. Use the higher one as the target, because thresh
  	 * may drop below dirty. This is exactly the reason to introduce
  	 * global_dirty_limit which is guaranteed to lie above the dirty pages.
  	 */
  	thresh = max(thresh, dirty);
  	if (limit > thresh) {
  		limit -= (limit - thresh) >> 5;
  		goto update;
  	}
  	return;
  update:
  	global_dirty_limit = limit;
  }
  
  static void global_update_bandwidth(unsigned long thresh,
  				    unsigned long dirty,
  				    unsigned long now)
  {
  	static DEFINE_SPINLOCK(dirty_lock);
  	static unsigned long update_time;
  
  	/*
  	 * check locklessly first to optimize away locking for the most time
  	 */
  	if (time_before(now, update_time + BANDWIDTH_INTERVAL))
  		return;
  
  	spin_lock(&dirty_lock);
  	if (time_after_eq(now, update_time + BANDWIDTH_INTERVAL)) {
  		update_dirty_limit(thresh, dirty);
  		update_time = now;
  	}
  	spin_unlock(&dirty_lock);
  }
be3ffa276   Wu Fengguang   writeback: dirty ...
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
  /*
   * Maintain bdi->dirty_ratelimit, the base dirty throttle rate.
   *
   * Normal bdi tasks will be curbed at or below it in long term.
   * Obviously it should be around (write_bw / N) when there are N dd tasks.
   */
  static void bdi_update_dirty_ratelimit(struct backing_dev_info *bdi,
  				       unsigned long thresh,
  				       unsigned long bg_thresh,
  				       unsigned long dirty,
  				       unsigned long bdi_thresh,
  				       unsigned long bdi_dirty,
  				       unsigned long dirtied,
  				       unsigned long elapsed)
  {
7381131cb   Wu Fengguang   writeback: stabil...
729
730
731
  	unsigned long freerun = dirty_freerun_ceiling(thresh, bg_thresh);
  	unsigned long limit = hard_dirty_limit(thresh);
  	unsigned long setpoint = (freerun + limit) / 2;
be3ffa276   Wu Fengguang   writeback: dirty ...
732
733
734
735
736
737
  	unsigned long write_bw = bdi->avg_write_bandwidth;
  	unsigned long dirty_ratelimit = bdi->dirty_ratelimit;
  	unsigned long dirty_rate;
  	unsigned long task_ratelimit;
  	unsigned long balanced_dirty_ratelimit;
  	unsigned long pos_ratio;
7381131cb   Wu Fengguang   writeback: stabil...
738
739
  	unsigned long step;
  	unsigned long x;
be3ffa276   Wu Fengguang   writeback: dirty ...
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
  
  	/*
  	 * The dirty rate will match the writeout rate in long term, except
  	 * when dirty pages are truncated by userspace or re-dirtied by FS.
  	 */
  	dirty_rate = (dirtied - bdi->dirtied_stamp) * HZ / elapsed;
  
  	pos_ratio = bdi_position_ratio(bdi, thresh, bg_thresh, dirty,
  				       bdi_thresh, bdi_dirty);
  	/*
  	 * task_ratelimit reflects each dd's dirty rate for the past 200ms.
  	 */
  	task_ratelimit = (u64)dirty_ratelimit *
  					pos_ratio >> RATELIMIT_CALC_SHIFT;
  	task_ratelimit++; /* it helps rampup dirty_ratelimit from tiny values */
  
  	/*
  	 * A linear estimation of the "balanced" throttle rate. The theory is,
  	 * if there are N dd tasks, each throttled at task_ratelimit, the bdi's
  	 * dirty_rate will be measured to be (N * task_ratelimit). So the below
  	 * formula will yield the balanced rate limit (write_bw / N).
  	 *
  	 * Note that the expanded form is not a pure rate feedback:
  	 *	rate_(i+1) = rate_(i) * (write_bw / dirty_rate)		     (1)
  	 * but also takes pos_ratio into account:
  	 *	rate_(i+1) = rate_(i) * (write_bw / dirty_rate) * pos_ratio  (2)
  	 *
  	 * (1) is not realistic because pos_ratio also takes part in balancing
  	 * the dirty rate.  Consider the state
  	 *	pos_ratio = 0.5						     (3)
  	 *	rate = 2 * (write_bw / N)				     (4)
  	 * If (1) is used, it will stuck in that state! Because each dd will
  	 * be throttled at
  	 *	task_ratelimit = pos_ratio * rate = (write_bw / N)	     (5)
  	 * yielding
  	 *	dirty_rate = N * task_ratelimit = write_bw		     (6)
  	 * put (6) into (1) we get
  	 *	rate_(i+1) = rate_(i)					     (7)
  	 *
  	 * So we end up using (2) to always keep
  	 *	rate_(i+1) ~= (write_bw / N)				     (8)
  	 * regardless of the value of pos_ratio. As long as (8) is satisfied,
  	 * pos_ratio is able to drive itself to 1.0, which is not only where
  	 * the dirty count meet the setpoint, but also where the slope of
  	 * pos_ratio is most flat and hence task_ratelimit is least fluctuated.
  	 */
  	balanced_dirty_ratelimit = div_u64((u64)task_ratelimit * write_bw,
  					   dirty_rate | 1);
7381131cb   Wu Fengguang   writeback: stabil...
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
  	/*
  	 * We could safely do this and return immediately:
  	 *
  	 *	bdi->dirty_ratelimit = balanced_dirty_ratelimit;
  	 *
  	 * However to get a more stable dirty_ratelimit, the below elaborated
  	 * code makes use of task_ratelimit to filter out sigular points and
  	 * limit the step size.
  	 *
  	 * The below code essentially only uses the relative value of
  	 *
  	 *	task_ratelimit - dirty_ratelimit
  	 *	= (pos_ratio - 1) * dirty_ratelimit
  	 *
  	 * which reflects the direction and size of dirty position error.
  	 */
  
  	/*
  	 * dirty_ratelimit will follow balanced_dirty_ratelimit iff
  	 * task_ratelimit is on the same side of dirty_ratelimit, too.
  	 * For example, when
  	 * - dirty_ratelimit > balanced_dirty_ratelimit
  	 * - dirty_ratelimit > task_ratelimit (dirty pages are above setpoint)
  	 * lowering dirty_ratelimit will help meet both the position and rate
  	 * control targets. Otherwise, don't update dirty_ratelimit if it will
  	 * only help meet the rate target. After all, what the users ultimately
  	 * feel and care are stable dirty rate and small position error.
  	 *
  	 * |task_ratelimit - dirty_ratelimit| is used to limit the step size
  	 * and filter out the sigular points of balanced_dirty_ratelimit. Which
  	 * keeps jumping around randomly and can even leap far away at times
  	 * due to the small 200ms estimation period of dirty_rate (we want to
  	 * keep that period small to reduce time lags).
  	 */
  	step = 0;
  	if (dirty < setpoint) {
  		x = min(bdi->balanced_dirty_ratelimit,
  			 min(balanced_dirty_ratelimit, task_ratelimit));
  		if (dirty_ratelimit < x)
  			step = x - dirty_ratelimit;
  	} else {
  		x = max(bdi->balanced_dirty_ratelimit,
  			 max(balanced_dirty_ratelimit, task_ratelimit));
  		if (dirty_ratelimit > x)
  			step = dirty_ratelimit - x;
  	}
  
  	/*
  	 * Don't pursue 100% rate matching. It's impossible since the balanced
  	 * rate itself is constantly fluctuating. So decrease the track speed
  	 * when it gets close to the target. Helps eliminate pointless tremors.
  	 */
  	step >>= dirty_ratelimit / (2 * step + 1);
  	/*
  	 * Limit the tracking speed to avoid overshooting.
  	 */
  	step = (step + 7) / 8;
  
  	if (dirty_ratelimit < balanced_dirty_ratelimit)
  		dirty_ratelimit += step;
  	else
  		dirty_ratelimit -= step;
  
  	bdi->dirty_ratelimit = max(dirty_ratelimit, 1UL);
  	bdi->balanced_dirty_ratelimit = balanced_dirty_ratelimit;
b48c104d2   Wu Fengguang   writeback: trace ...
853
854
  
  	trace_bdi_dirty_ratelimit(bdi, dirty_rate, task_ratelimit);
be3ffa276   Wu Fengguang   writeback: dirty ...
855
  }
e98be2d59   Wu Fengguang   writeback: bdi wr...
856
  void __bdi_update_bandwidth(struct backing_dev_info *bdi,
c42843f2f   Wu Fengguang   writeback: introd...
857
  			    unsigned long thresh,
af6a31138   Wu Fengguang   writeback: add bg...
858
  			    unsigned long bg_thresh,
c42843f2f   Wu Fengguang   writeback: introd...
859
860
861
  			    unsigned long dirty,
  			    unsigned long bdi_thresh,
  			    unsigned long bdi_dirty,
e98be2d59   Wu Fengguang   writeback: bdi wr...
862
863
864
865
  			    unsigned long start_time)
  {
  	unsigned long now = jiffies;
  	unsigned long elapsed = now - bdi->bw_time_stamp;
be3ffa276   Wu Fengguang   writeback: dirty ...
866
  	unsigned long dirtied;
e98be2d59   Wu Fengguang   writeback: bdi wr...
867
868
869
870
871
872
873
  	unsigned long written;
  
  	/*
  	 * rate-limit, only update once every 200ms.
  	 */
  	if (elapsed < BANDWIDTH_INTERVAL)
  		return;
be3ffa276   Wu Fengguang   writeback: dirty ...
874
  	dirtied = percpu_counter_read(&bdi->bdi_stat[BDI_DIRTIED]);
e98be2d59   Wu Fengguang   writeback: bdi wr...
875
876
877
878
879
880
881
882
  	written = percpu_counter_read(&bdi->bdi_stat[BDI_WRITTEN]);
  
  	/*
  	 * Skip quiet periods when disk bandwidth is under-utilized.
  	 * (at least 1s idle time between two flusher runs)
  	 */
  	if (elapsed > HZ && time_before(bdi->bw_time_stamp, start_time))
  		goto snapshot;
be3ffa276   Wu Fengguang   writeback: dirty ...
883
  	if (thresh) {
c42843f2f   Wu Fengguang   writeback: introd...
884
  		global_update_bandwidth(thresh, dirty, now);
be3ffa276   Wu Fengguang   writeback: dirty ...
885
886
887
888
  		bdi_update_dirty_ratelimit(bdi, thresh, bg_thresh, dirty,
  					   bdi_thresh, bdi_dirty,
  					   dirtied, elapsed);
  	}
e98be2d59   Wu Fengguang   writeback: bdi wr...
889
890
891
  	bdi_update_write_bandwidth(bdi, elapsed, written);
  
  snapshot:
be3ffa276   Wu Fengguang   writeback: dirty ...
892
  	bdi->dirtied_stamp = dirtied;
e98be2d59   Wu Fengguang   writeback: bdi wr...
893
894
895
896
897
  	bdi->written_stamp = written;
  	bdi->bw_time_stamp = now;
  }
  
  static void bdi_update_bandwidth(struct backing_dev_info *bdi,
c42843f2f   Wu Fengguang   writeback: introd...
898
  				 unsigned long thresh,
af6a31138   Wu Fengguang   writeback: add bg...
899
  				 unsigned long bg_thresh,
c42843f2f   Wu Fengguang   writeback: introd...
900
901
902
  				 unsigned long dirty,
  				 unsigned long bdi_thresh,
  				 unsigned long bdi_dirty,
e98be2d59   Wu Fengguang   writeback: bdi wr...
903
904
905
906
907
  				 unsigned long start_time)
  {
  	if (time_is_after_eq_jiffies(bdi->bw_time_stamp + BANDWIDTH_INTERVAL))
  		return;
  	spin_lock(&bdi->wb.list_lock);
af6a31138   Wu Fengguang   writeback: add bg...
908
909
  	__bdi_update_bandwidth(bdi, thresh, bg_thresh, dirty,
  			       bdi_thresh, bdi_dirty, start_time);
e98be2d59   Wu Fengguang   writeback: bdi wr...
910
911
  	spin_unlock(&bdi->wb.list_lock);
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
912
  /*
9d823e8f6   Wu Fengguang   writeback: per ta...
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
   * After a task dirtied this many pages, balance_dirty_pages_ratelimited_nr()
   * will look to see if it needs to start dirty throttling.
   *
   * If dirty_poll_interval is too low, big NUMA machines will call the expensive
   * global_page_state() too often. So scale it near-sqrt to the safety margin
   * (the number of pages we may dirty without exceeding the dirty limits).
   */
  static unsigned long dirty_poll_interval(unsigned long dirty,
  					 unsigned long thresh)
  {
  	if (thresh > dirty)
  		return 1UL << (ilog2(thresh - dirty) >> 1);
  
  	return 1;
  }
c8462cc9d   Wu Fengguang   writeback: limit ...
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
  static unsigned long bdi_max_pause(struct backing_dev_info *bdi,
  				   unsigned long bdi_dirty)
  {
  	unsigned long bw = bdi->avg_write_bandwidth;
  	unsigned long hi = ilog2(bw);
  	unsigned long lo = ilog2(bdi->dirty_ratelimit);
  	unsigned long t;
  
  	/* target for 20ms max pause on 1-dd case */
  	t = HZ / 50;
  
  	/*
  	 * Scale up pause time for concurrent dirtiers in order to reduce CPU
  	 * overheads.
  	 *
  	 * (N * 20ms) on 2^N concurrent tasks.
  	 */
  	if (hi > lo)
  		t += (hi - lo) * (20 * HZ) / 1024;
  
  	/*
  	 * Limit pause time for small memory systems. If sleeping for too long
  	 * time, a small pool of dirty/writeback pages may go empty and disk go
  	 * idle.
  	 *
  	 * 8 serves as the safety ratio.
  	 */
82e230a07   Wu Fengguang   writeback: set ma...
955
  	t = min(t, bdi_dirty * HZ / (8 * bw + 1));
c8462cc9d   Wu Fengguang   writeback: limit ...
956
957
958
959
960
961
962
  
  	/*
  	 * The pause time will be settled within range (max_pause/4, max_pause).
  	 * Apply a minimal value of 4 to get a non-zero max_pause/4.
  	 */
  	return clamp_val(t, 4, MAX_PAUSE);
  }
9d823e8f6   Wu Fengguang   writeback: per ta...
963
  /*
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
964
965
   * balance_dirty_pages() must be called by processes which are generating dirty
   * data.  It looks at the number of dirty pages in the machine and will force
143dfe861   Wu Fengguang   writeback: IO-les...
966
   * the caller to wait once crossing the (background_thresh + dirty_thresh) / 2.
5b0830cb9   Jens Axboe   writeback: get ri...
967
968
   * If we're over `background_thresh' then the writeback threads are woken to
   * perform some writeout.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
969
   */
3a2e9a5a2   Wu Fengguang   writeback: balanc...
970
  static void balance_dirty_pages(struct address_space *mapping,
143dfe861   Wu Fengguang   writeback: IO-les...
971
  				unsigned long pages_dirtied)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
972
  {
143dfe861   Wu Fengguang   writeback: IO-les...
973
974
  	unsigned long nr_reclaimable;	/* = file_dirty + unstable_nfs */
  	unsigned long bdi_reclaimable;
7762741e3   Wu Fengguang   writeback: consol...
975
976
  	unsigned long nr_dirty;  /* = file_dirty + writeback + unstable_nfs */
  	unsigned long bdi_dirty;
6c14ae1e9   Wu Fengguang   writeback: dirty ...
977
  	unsigned long freerun;
364aeb284   David Rientjes   mm: change dirty ...
978
979
980
  	unsigned long background_thresh;
  	unsigned long dirty_thresh;
  	unsigned long bdi_thresh;
143dfe861   Wu Fengguang   writeback: IO-les...
981
  	long pause = 0;
50657fc4d   Wu Fengguang   writeback: fix pp...
982
  	long uninitialized_var(max_pause);
e50e37201   Wu Fengguang   writeback: balanc...
983
  	bool dirty_exceeded = false;
143dfe861   Wu Fengguang   writeback: IO-les...
984
  	unsigned long task_ratelimit;
50657fc4d   Wu Fengguang   writeback: fix pp...
985
  	unsigned long uninitialized_var(dirty_ratelimit);
143dfe861   Wu Fengguang   writeback: IO-les...
986
  	unsigned long pos_ratio;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
987
  	struct backing_dev_info *bdi = mapping->backing_dev_info;
e98be2d59   Wu Fengguang   writeback: bdi wr...
988
  	unsigned long start_time = jiffies;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
989
990
  
  	for (;;) {
143dfe861   Wu Fengguang   writeback: IO-les...
991
992
993
994
995
996
  		/*
  		 * Unstable writes are a feature of certain networked
  		 * filesystems (i.e. NFS) in which data may have been
  		 * written to the server's write cache, but has not yet
  		 * been flushed to permanent storage.
  		 */
5fce25a9d   Peter Zijlstra   mm: speed up writ...
997
998
  		nr_reclaimable = global_page_state(NR_FILE_DIRTY) +
  					global_page_state(NR_UNSTABLE_NFS);
7762741e3   Wu Fengguang   writeback: consol...
999
  		nr_dirty = nr_reclaimable + global_page_state(NR_WRITEBACK);
5fce25a9d   Peter Zijlstra   mm: speed up writ...
1000

16c4042f0   Wu Fengguang   writeback: avoid ...
1001
1002
1003
1004
1005
1006
1007
  		global_dirty_limits(&background_thresh, &dirty_thresh);
  
  		/*
  		 * Throttle it only when the background writeback cannot
  		 * catch-up. This avoids (excessively) small writeouts
  		 * when the bdi limits are ramping up.
  		 */
6c14ae1e9   Wu Fengguang   writeback: dirty ...
1008
1009
1010
  		freerun = dirty_freerun_ceiling(dirty_thresh,
  						background_thresh);
  		if (nr_dirty <= freerun)
16c4042f0   Wu Fengguang   writeback: avoid ...
1011
  			break;
143dfe861   Wu Fengguang   writeback: IO-les...
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
  		if (unlikely(!writeback_in_progress(bdi)))
  			bdi_start_background_writeback(bdi);
  
  		/*
  		 * bdi_thresh is not treated as some limiting factor as
  		 * dirty_thresh, due to reasons
  		 * - in JBOD setup, bdi_thresh can fluctuate a lot
  		 * - in a system with HDD and USB key, the USB key may somehow
  		 *   go into state (bdi_dirty >> bdi_thresh) either because
  		 *   bdi_dirty starts high, or because bdi_thresh drops low.
  		 *   In this case we don't want to hard throttle the USB key
  		 *   dirtiers for 100 seconds until bdi_dirty drops under
  		 *   bdi_thresh. Instead the auxiliary bdi control line in
  		 *   bdi_position_ratio() will let the dirtier task progress
  		 *   at some rate <= (write_bw / 2) for bringing down bdi_dirty.
  		 */
16c4042f0   Wu Fengguang   writeback: avoid ...
1028
  		bdi_thresh = bdi_dirty_limit(bdi, dirty_thresh);
16c4042f0   Wu Fengguang   writeback: avoid ...
1029

e50e37201   Wu Fengguang   writeback: balanc...
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
  		/*
  		 * In order to avoid the stacked BDI deadlock we need
  		 * to ensure we accurately count the 'dirty' pages when
  		 * the threshold is low.
  		 *
  		 * Otherwise it would be possible to get thresh+n pages
  		 * reported dirty, even though there are thresh-m pages
  		 * actually dirty; with m+n sitting in the percpu
  		 * deltas.
  		 */
143dfe861   Wu Fengguang   writeback: IO-les...
1040
1041
1042
  		if (bdi_thresh < 2 * bdi_stat_error(bdi)) {
  			bdi_reclaimable = bdi_stat_sum(bdi, BDI_RECLAIMABLE);
  			bdi_dirty = bdi_reclaimable +
7762741e3   Wu Fengguang   writeback: consol...
1043
  				    bdi_stat_sum(bdi, BDI_WRITEBACK);
e50e37201   Wu Fengguang   writeback: balanc...
1044
  		} else {
143dfe861   Wu Fengguang   writeback: IO-les...
1045
1046
  			bdi_reclaimable = bdi_stat(bdi, BDI_RECLAIMABLE);
  			bdi_dirty = bdi_reclaimable +
7762741e3   Wu Fengguang   writeback: consol...
1047
  				    bdi_stat(bdi, BDI_WRITEBACK);
e50e37201   Wu Fengguang   writeback: balanc...
1048
  		}
5fce25a9d   Peter Zijlstra   mm: speed up writ...
1049

143dfe861   Wu Fengguang   writeback: IO-les...
1050
  		dirty_exceeded = (bdi_dirty > bdi_thresh) ||
7762741e3   Wu Fengguang   writeback: consol...
1051
  				  (nr_dirty > dirty_thresh);
143dfe861   Wu Fengguang   writeback: IO-les...
1052
  		if (dirty_exceeded && !bdi->dirty_exceeded)
04fbfdc14   Peter Zijlstra   mm: per device di...
1053
  			bdi->dirty_exceeded = 1;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1054

af6a31138   Wu Fengguang   writeback: add bg...
1055
1056
1057
  		bdi_update_bandwidth(bdi, dirty_thresh, background_thresh,
  				     nr_dirty, bdi_thresh, bdi_dirty,
  				     start_time);
e98be2d59   Wu Fengguang   writeback: bdi wr...
1058

c8462cc9d   Wu Fengguang   writeback: limit ...
1059
  		max_pause = bdi_max_pause(bdi, bdi_dirty);
143dfe861   Wu Fengguang   writeback: IO-les...
1060
1061
1062
1063
  		dirty_ratelimit = bdi->dirty_ratelimit;
  		pos_ratio = bdi_position_ratio(bdi, dirty_thresh,
  					       background_thresh, nr_dirty,
  					       bdi_thresh, bdi_dirty);
3a73dbbc9   Wu Fengguang   writeback: fix un...
1064
1065
1066
  		task_ratelimit = ((u64)dirty_ratelimit * pos_ratio) >>
  							RATELIMIT_CALC_SHIFT;
  		if (unlikely(task_ratelimit == 0)) {
c8462cc9d   Wu Fengguang   writeback: limit ...
1067
  			pause = max_pause;
143dfe861   Wu Fengguang   writeback: IO-les...
1068
  			goto pause;
04fbfdc14   Peter Zijlstra   mm: per device di...
1069
  		}
3a73dbbc9   Wu Fengguang   writeback: fix un...
1070
  		pause = HZ * pages_dirtied / task_ratelimit;
57fc978cf   Wu Fengguang   writeback: contro...
1071
  		if (unlikely(pause <= 0)) {
ece13ac31   Wu Fengguang   writeback: trace ...
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
  			trace_balance_dirty_pages(bdi,
  						  dirty_thresh,
  						  background_thresh,
  						  nr_dirty,
  						  bdi_thresh,
  						  bdi_dirty,
  						  dirty_ratelimit,
  						  task_ratelimit,
  						  pages_dirtied,
  						  pause,
  						  start_time);
57fc978cf   Wu Fengguang   writeback: contro...
1083
1084
  			pause = 1; /* avoid resetting nr_dirtied_pause below */
  			break;
04fbfdc14   Peter Zijlstra   mm: per device di...
1085
  		}
c8462cc9d   Wu Fengguang   writeback: limit ...
1086
  		pause = min(pause, max_pause);
143dfe861   Wu Fengguang   writeback: IO-les...
1087
1088
  
  pause:
ece13ac31   Wu Fengguang   writeback: trace ...
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
  		trace_balance_dirty_pages(bdi,
  					  dirty_thresh,
  					  background_thresh,
  					  nr_dirty,
  					  bdi_thresh,
  					  bdi_dirty,
  					  dirty_ratelimit,
  					  task_ratelimit,
  					  pages_dirtied,
  					  pause,
  					  start_time);
499d05ecf   Jan Kara   mm: Make task in ...
1100
  		__set_current_state(TASK_KILLABLE);
d25105e89   Wu Fengguang   writeback: accoun...
1101
  		io_schedule_timeout(pause);
87c6a9b25   Jens Axboe   writeback: make b...
1102

ffd1f609a   Wu Fengguang   writeback: introd...
1103
  		/*
1df647197   Wu Fengguang   writeback: hard t...
1104
1105
  		 * This is typically equal to (nr_dirty < dirty_thresh) and can
  		 * also keep "1000+ dd on a slow USB stick" under control.
ffd1f609a   Wu Fengguang   writeback: introd...
1106
  		 */
1df647197   Wu Fengguang   writeback: hard t...
1107
  		if (task_ratelimit)
ffd1f609a   Wu Fengguang   writeback: introd...
1108
  			break;
499d05ecf   Jan Kara   mm: Make task in ...
1109

c5c6343c4   Wu Fengguang   writeback: permit...
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
  		/*
  		 * In the case of an unresponding NFS server and the NFS dirty
  		 * pages exceeds dirty_thresh, give the other good bdi's a pipe
  		 * to go through, so that tasks on them still remain responsive.
  		 *
  		 * In theory 1 page is enough to keep the comsumer-producer
  		 * pipe going: the flusher cleans 1 page => the task dirties 1
  		 * more page. However bdi_dirty has accounting errors.  So use
  		 * the larger and more IO friendly bdi_stat_error.
  		 */
  		if (bdi_dirty <= bdi_stat_error(bdi))
  			break;
499d05ecf   Jan Kara   mm: Make task in ...
1122
1123
  		if (fatal_signal_pending(current))
  			break;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1124
  	}
143dfe861   Wu Fengguang   writeback: IO-les...
1125
  	if (!dirty_exceeded && bdi->dirty_exceeded)
04fbfdc14   Peter Zijlstra   mm: per device di...
1126
  		bdi->dirty_exceeded = 0;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1127

9d823e8f6   Wu Fengguang   writeback: per ta...
1128
  	current->nr_dirtied = 0;
57fc978cf   Wu Fengguang   writeback: contro...
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
  	if (pause == 0) { /* in freerun area */
  		current->nr_dirtied_pause =
  				dirty_poll_interval(nr_dirty, dirty_thresh);
  	} else if (pause <= max_pause / 4 &&
  		   pages_dirtied >= current->nr_dirtied_pause) {
  		current->nr_dirtied_pause = clamp_val(
  					dirty_ratelimit * (max_pause / 2) / HZ,
  					pages_dirtied + pages_dirtied / 8,
  					pages_dirtied * 4);
  	} else if (pause >= max_pause) {
  		current->nr_dirtied_pause = 1 | clamp_val(
  					dirty_ratelimit * (max_pause / 2) / HZ,
  					pages_dirtied / 4,
  					pages_dirtied - pages_dirtied / 8);
  	}
9d823e8f6   Wu Fengguang   writeback: per ta...
1144

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1145
  	if (writeback_in_progress(bdi))
5b0830cb9   Jens Axboe   writeback: get ri...
1146
  		return;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1147
1148
1149
1150
1151
1152
1153
1154
1155
  
  	/*
  	 * In laptop mode, we wait until hitting the higher threshold before
  	 * starting background writeout, and then write out all the way down
  	 * to the lower threshold.  So slow writers cause minimal disk activity.
  	 *
  	 * In normal mode, we start background writeout at the lower
  	 * background_thresh, to keep the amount of dirty memory low.
  	 */
143dfe861   Wu Fengguang   writeback: IO-les...
1156
1157
1158
1159
  	if (laptop_mode)
  		return;
  
  	if (nr_reclaimable > background_thresh)
c5444198c   Christoph Hellwig   writeback: simpli...
1160
  		bdi_start_background_writeback(bdi);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1161
  }
a200ee182   Peter Zijlstra   mm: set_page_dirt...
1162
  void set_page_dirty_balance(struct page *page, int page_mkwrite)
edc79b2a4   Peter Zijlstra   [PATCH] mm: balan...
1163
  {
a200ee182   Peter Zijlstra   mm: set_page_dirt...
1164
  	if (set_page_dirty(page) || page_mkwrite) {
edc79b2a4   Peter Zijlstra   [PATCH] mm: balan...
1165
1166
1167
1168
1169
1170
  		struct address_space *mapping = page_mapping(page);
  
  		if (mapping)
  			balance_dirty_pages_ratelimited(mapping);
  	}
  }
9d823e8f6   Wu Fengguang   writeback: per ta...
1171
  static DEFINE_PER_CPU(int, bdp_ratelimits);
245b2e70e   Tejun Heo   percpu: clean up ...
1172

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1173
  /**
fa5a734e4   Andrew Morton   [PATCH] balance_d...
1174
   * balance_dirty_pages_ratelimited_nr - balance dirty memory state
67be2dd1b   Martin Waitz   [PATCH] DocBook: ...
1175
   * @mapping: address_space which was dirtied
a580290c3   Martin Waitz   Documentation: fi...
1176
   * @nr_pages_dirtied: number of pages which the caller has just dirtied
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
   *
   * Processes which are dirtying memory should call in here once for each page
   * which was newly dirtied.  The function will periodically check the system's
   * dirty state and will initiate writeback if needed.
   *
   * On really big machines, get_writeback_state is expensive, so try to avoid
   * calling it too often (ratelimiting).  But once we're over the dirty memory
   * limit we decrease the ratelimiting by a lot, to prevent individual processes
   * from overshooting the limit by (ratelimit_pages) each.
   */
fa5a734e4   Andrew Morton   [PATCH] balance_d...
1187
1188
  void balance_dirty_pages_ratelimited_nr(struct address_space *mapping,
  					unsigned long nr_pages_dirtied)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1189
  {
36715cef0   Wu Fengguang   writeback: skip t...
1190
  	struct backing_dev_info *bdi = mapping->backing_dev_info;
9d823e8f6   Wu Fengguang   writeback: per ta...
1191
1192
  	int ratelimit;
  	int *p;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1193

36715cef0   Wu Fengguang   writeback: skip t...
1194
1195
  	if (!bdi_cap_account_dirty(bdi))
  		return;
9d823e8f6   Wu Fengguang   writeback: per ta...
1196
1197
1198
1199
1200
  	ratelimit = current->nr_dirtied_pause;
  	if (bdi->dirty_exceeded)
  		ratelimit = min(ratelimit, 32 >> (PAGE_SHIFT - 10));
  
  	current->nr_dirtied += nr_pages_dirtied;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1201

9d823e8f6   Wu Fengguang   writeback: per ta...
1202
  	preempt_disable();
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1203
  	/*
9d823e8f6   Wu Fengguang   writeback: per ta...
1204
1205
1206
1207
  	 * This prevents one CPU to accumulate too many dirtied pages without
  	 * calling into balance_dirty_pages(), which can happen when there are
  	 * 1000+ tasks, all of them start dirtying pages at exactly the same
  	 * time, hence all honoured too large initial task->nr_dirtied_pause.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1208
  	 */
245b2e70e   Tejun Heo   percpu: clean up ...
1209
  	p =  &__get_cpu_var(bdp_ratelimits);
9d823e8f6   Wu Fengguang   writeback: per ta...
1210
  	if (unlikely(current->nr_dirtied >= ratelimit))
fa5a734e4   Andrew Morton   [PATCH] balance_d...
1211
  		*p = 0;
9d823e8f6   Wu Fengguang   writeback: per ta...
1212
1213
1214
1215
1216
1217
  	else {
  		*p += nr_pages_dirtied;
  		if (unlikely(*p >= ratelimit_pages)) {
  			*p = 0;
  			ratelimit = 0;
  		}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1218
  	}
fa5a734e4   Andrew Morton   [PATCH] balance_d...
1219
  	preempt_enable();
9d823e8f6   Wu Fengguang   writeback: per ta...
1220
1221
1222
  
  	if (unlikely(current->nr_dirtied >= ratelimit))
  		balance_dirty_pages(mapping, current->nr_dirtied);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1223
  }
fa5a734e4   Andrew Morton   [PATCH] balance_d...
1224
  EXPORT_SYMBOL(balance_dirty_pages_ratelimited_nr);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1225

232ea4d69   Andrew Morton   [PATCH] throttle_...
1226
  void throttle_vm_writeout(gfp_t gfp_mask)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1227
  {
364aeb284   David Rientjes   mm: change dirty ...
1228
1229
  	unsigned long background_thresh;
  	unsigned long dirty_thresh;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1230
1231
  
          for ( ; ; ) {
16c4042f0   Wu Fengguang   writeback: avoid ...
1232
  		global_dirty_limits(&background_thresh, &dirty_thresh);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1233
1234
1235
1236
1237
1238
  
                  /*
                   * Boost the allowable dirty threshold a bit for page
                   * allocators so they don't get DoS'ed by heavy writers
                   */
                  dirty_thresh += dirty_thresh / 10;      /* wheeee... */
c24f21bda   Christoph Lameter   [PATCH] zoned vm ...
1239
1240
1241
                  if (global_page_state(NR_UNSTABLE_NFS) +
  			global_page_state(NR_WRITEBACK) <= dirty_thresh)
                          	break;
8aa7e847d   Jens Axboe   Fix congestion_wa...
1242
                  congestion_wait(BLK_RW_ASYNC, HZ/10);
369f2389e   Fengguang Wu   writeback: remove...
1243
1244
1245
1246
1247
1248
1249
1250
  
  		/*
  		 * The caller might hold locks which can prevent IO completion
  		 * or progress in the filesystem.  So we cannot just sit here
  		 * waiting for IO to complete.
  		 */
  		if ((gfp_mask & (__GFP_FS|__GFP_IO)) != (__GFP_FS|__GFP_IO))
  			break;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1251
1252
          }
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1253
  /*
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1254
1255
1256
   * sysctl handler for /proc/sys/vm/dirty_writeback_centisecs
   */
  int dirty_writeback_centisecs_handler(ctl_table *table, int write,
8d65af789   Alexey Dobriyan   sysctl: remove "s...
1257
  	void __user *buffer, size_t *length, loff_t *ppos)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1258
  {
8d65af789   Alexey Dobriyan   sysctl: remove "s...
1259
  	proc_dointvec(table, write, buffer, length, ppos);
6423104b6   Jens Axboe   writeback: fixups...
1260
  	bdi_arm_supers_timer();
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1261
1262
  	return 0;
  }
c2c4986ed   Jens Axboe   writeback: fix pr...
1263
  #ifdef CONFIG_BLOCK
31373d09d   Matthew Garrett   laptop-mode: Make...
1264
  void laptop_mode_timer_fn(unsigned long data)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1265
  {
31373d09d   Matthew Garrett   laptop-mode: Make...
1266
1267
1268
  	struct request_queue *q = (struct request_queue *)data;
  	int nr_pages = global_page_state(NR_FILE_DIRTY) +
  		global_page_state(NR_UNSTABLE_NFS);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1269

31373d09d   Matthew Garrett   laptop-mode: Make...
1270
1271
1272
1273
  	/*
  	 * We want to write everything out, not just down to the dirty
  	 * threshold
  	 */
31373d09d   Matthew Garrett   laptop-mode: Make...
1274
  	if (bdi_has_dirty_io(&q->backing_dev_info))
0e175a183   Curt Wohlgemuth   writeback: Add a ...
1275
1276
  		bdi_start_writeback(&q->backing_dev_info, nr_pages,
  					WB_REASON_LAPTOP_TIMER);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1277
1278
1279
1280
1281
1282
1283
  }
  
  /*
   * We've spun up the disk and we're in laptop mode: schedule writeback
   * of all dirty data a few seconds from now.  If the flush is already scheduled
   * then push it back - the user is still using the disk.
   */
31373d09d   Matthew Garrett   laptop-mode: Make...
1284
  void laptop_io_completion(struct backing_dev_info *info)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1285
  {
31373d09d   Matthew Garrett   laptop-mode: Make...
1286
  	mod_timer(&info->laptop_mode_wb_timer, jiffies + laptop_mode);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1287
1288
1289
1290
1291
1292
1293
1294
1295
  }
  
  /*
   * We're in laptop mode and we've just synced. The sync's writes will have
   * caused another writeback to be scheduled by laptop_io_completion.
   * Nothing needs to be written back anymore, so we unschedule the writeback.
   */
  void laptop_sync_completion(void)
  {
31373d09d   Matthew Garrett   laptop-mode: Make...
1296
1297
1298
1299
1300
1301
1302
1303
  	struct backing_dev_info *bdi;
  
  	rcu_read_lock();
  
  	list_for_each_entry_rcu(bdi, &bdi_list, bdi_list)
  		del_timer(&bdi->laptop_mode_wb_timer);
  
  	rcu_read_unlock();
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1304
  }
c2c4986ed   Jens Axboe   writeback: fix pr...
1305
  #endif
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1306
1307
1308
1309
1310
1311
1312
1313
1314
  
  /*
   * If ratelimit_pages is too high then we can get into dirty-data overload
   * if a large number of processes all perform writes at the same time.
   * If it is too low then SMP machines will call the (expensive)
   * get_writeback_state too often.
   *
   * Here we set ratelimit_pages to a level which ensures that when all CPUs are
   * dirtying in parallel, we cannot go more than 3% (1/32) over the dirty memory
9d823e8f6   Wu Fengguang   writeback: per ta...
1315
   * thresholds.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1316
   */
2d1d43f6a   Chandra Seetharaman   [PATCH] call mm/p...
1317
  void writeback_set_ratelimit(void)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1318
  {
9d823e8f6   Wu Fengguang   writeback: per ta...
1319
1320
1321
1322
  	unsigned long background_thresh;
  	unsigned long dirty_thresh;
  	global_dirty_limits(&background_thresh, &dirty_thresh);
  	ratelimit_pages = dirty_thresh / (num_online_cpus() * 32);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1323
1324
  	if (ratelimit_pages < 16)
  		ratelimit_pages = 16;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1325
  }
26c2143b6   Chandra Seetharaman   [PATCH] cpu hotpl...
1326
  static int __cpuinit
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1327
1328
  ratelimit_handler(struct notifier_block *self, unsigned long u, void *v)
  {
2d1d43f6a   Chandra Seetharaman   [PATCH] call mm/p...
1329
  	writeback_set_ratelimit();
aa0f03037   Paul E. McKenney   [PATCH] Change co...
1330
  	return NOTIFY_DONE;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1331
  }
74b85f379   Chandra Seetharaman   [PATCH] cpu hotpl...
1332
  static struct notifier_block __cpuinitdata ratelimit_nb = {
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1333
1334
1335
1336
1337
  	.notifier_call	= ratelimit_handler,
  	.next		= NULL,
  };
  
  /*
dc6e29da9   Linus Torvalds   Fix balance_dirty...
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
   * Called early on to tune the page writeback dirty limits.
   *
   * We used to scale dirty pages according to how total memory
   * related to pages that could be allocated for buffers (by
   * comparing nr_free_buffer_pages() to vm_total_pages.
   *
   * However, that was when we used "dirty_ratio" to scale with
   * all memory, and we don't do that any more. "dirty_ratio"
   * is now applied to total non-HIGHPAGE memory (by subtracting
   * totalhigh_pages from vm_total_pages), and as such we can't
   * get into the old insane situation any more where we had
   * large amounts of dirty pages compared to a small amount of
   * non-HIGHMEM memory.
   *
   * But we might still want to scale the dirty_ratio by how
   * much memory the box has..
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1354
1355
1356
   */
  void __init page_writeback_init(void)
  {
04fbfdc14   Peter Zijlstra   mm: per device di...
1357
  	int shift;
2d1d43f6a   Chandra Seetharaman   [PATCH] call mm/p...
1358
  	writeback_set_ratelimit();
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1359
  	register_cpu_notifier(&ratelimit_nb);
04fbfdc14   Peter Zijlstra   mm: per device di...
1360
1361
1362
  
  	shift = calc_period_shift();
  	prop_descriptor_init(&vm_completions, shift);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1363
  }
811d736f9   David Howells   [PATCH] BLOCK: Di...
1364
  /**
f446daaea   Jan Kara   mm: implement wri...
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
   * tag_pages_for_writeback - tag pages to be written by write_cache_pages
   * @mapping: address space structure to write
   * @start: starting page index
   * @end: ending page index (inclusive)
   *
   * This function scans the page range from @start to @end (inclusive) and tags
   * all pages that have DIRTY tag set with a special TOWRITE tag. The idea is
   * that write_cache_pages (or whoever calls this function) will then use
   * TOWRITE tag to identify pages eligible for writeback.  This mechanism is
   * used to avoid livelocking of writeback by a process steadily creating new
   * dirty pages in the file (thus it is important for this function to be quick
   * so that it can tag pages faster than a dirtying process can create them).
   */
  /*
   * We tag pages in batches of WRITEBACK_TAG_BATCH to reduce tree_lock latency.
   */
f446daaea   Jan Kara   mm: implement wri...
1381
1382
1383
  void tag_pages_for_writeback(struct address_space *mapping,
  			     pgoff_t start, pgoff_t end)
  {
3c111a071   Randy Dunlap   mm: fix fatal ker...
1384
  #define WRITEBACK_TAG_BATCH 4096
f446daaea   Jan Kara   mm: implement wri...
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
  	unsigned long tagged;
  
  	do {
  		spin_lock_irq(&mapping->tree_lock);
  		tagged = radix_tree_range_tag_if_tagged(&mapping->page_tree,
  				&start, end, WRITEBACK_TAG_BATCH,
  				PAGECACHE_TAG_DIRTY, PAGECACHE_TAG_TOWRITE);
  		spin_unlock_irq(&mapping->tree_lock);
  		WARN_ON_ONCE(tagged > WRITEBACK_TAG_BATCH);
  		cond_resched();
d5ed3a4af   Jan Kara   lib/radix-tree.c:...
1395
1396
  		/* We check 'start' to handle wrapping when end == ~0UL */
  	} while (tagged >= WRITEBACK_TAG_BATCH && start);
f446daaea   Jan Kara   mm: implement wri...
1397
1398
1399
1400
  }
  EXPORT_SYMBOL(tag_pages_for_writeback);
  
  /**
0ea971801   Miklos Szeredi   consolidate gener...
1401
   * write_cache_pages - walk the list of dirty pages of the given address space and write all of them.
811d736f9   David Howells   [PATCH] BLOCK: Di...
1402
1403
   * @mapping: address space structure to write
   * @wbc: subtract the number of written pages from *@wbc->nr_to_write
0ea971801   Miklos Szeredi   consolidate gener...
1404
1405
   * @writepage: function called for each page
   * @data: data passed to writepage function
811d736f9   David Howells   [PATCH] BLOCK: Di...
1406
   *
0ea971801   Miklos Szeredi   consolidate gener...
1407
   * If a page is already under I/O, write_cache_pages() skips it, even
811d736f9   David Howells   [PATCH] BLOCK: Di...
1408
1409
1410
1411
1412
1413
   * if it's dirty.  This is desirable behaviour for memory-cleaning writeback,
   * but it is INCORRECT for data-integrity system calls such as fsync().  fsync()
   * and msync() need to guarantee that all the data which was dirty at the time
   * the call was made get new I/O started against them.  If wbc->sync_mode is
   * WB_SYNC_ALL then we were called for data integrity and we must wait for
   * existing IO to complete.
f446daaea   Jan Kara   mm: implement wri...
1414
1415
1416
1417
1418
1419
1420
   *
   * To avoid livelocks (when other process dirties new pages), we first tag
   * pages which should be written back with TOWRITE tag and only then start
   * writing them. For data-integrity sync we have to be careful so that we do
   * not miss some pages (e.g., because some other process has cleared TOWRITE
   * tag we set). The rule we follow is that TOWRITE tag can be cleared only
   * by the process clearing the DIRTY tag (and submitting the page for IO).
811d736f9   David Howells   [PATCH] BLOCK: Di...
1421
   */
0ea971801   Miklos Szeredi   consolidate gener...
1422
1423
1424
  int write_cache_pages(struct address_space *mapping,
  		      struct writeback_control *wbc, writepage_t writepage,
  		      void *data)
811d736f9   David Howells   [PATCH] BLOCK: Di...
1425
  {
811d736f9   David Howells   [PATCH] BLOCK: Di...
1426
1427
  	int ret = 0;
  	int done = 0;
811d736f9   David Howells   [PATCH] BLOCK: Di...
1428
1429
  	struct pagevec pvec;
  	int nr_pages;
31a12666d   Nick Piggin   mm: write_cache_p...
1430
  	pgoff_t uninitialized_var(writeback_index);
811d736f9   David Howells   [PATCH] BLOCK: Di...
1431
1432
  	pgoff_t index;
  	pgoff_t end;		/* Inclusive */
bd19e012f   Nick Piggin   mm: write_cache_p...
1433
  	pgoff_t done_index;
31a12666d   Nick Piggin   mm: write_cache_p...
1434
  	int cycled;
811d736f9   David Howells   [PATCH] BLOCK: Di...
1435
  	int range_whole = 0;
f446daaea   Jan Kara   mm: implement wri...
1436
  	int tag;
811d736f9   David Howells   [PATCH] BLOCK: Di...
1437

811d736f9   David Howells   [PATCH] BLOCK: Di...
1438
1439
  	pagevec_init(&pvec, 0);
  	if (wbc->range_cyclic) {
31a12666d   Nick Piggin   mm: write_cache_p...
1440
1441
1442
1443
1444
1445
  		writeback_index = mapping->writeback_index; /* prev offset */
  		index = writeback_index;
  		if (index == 0)
  			cycled = 1;
  		else
  			cycled = 0;
811d736f9   David Howells   [PATCH] BLOCK: Di...
1446
1447
1448
1449
1450
1451
  		end = -1;
  	} else {
  		index = wbc->range_start >> PAGE_CACHE_SHIFT;
  		end = wbc->range_end >> PAGE_CACHE_SHIFT;
  		if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
  			range_whole = 1;
31a12666d   Nick Piggin   mm: write_cache_p...
1452
  		cycled = 1; /* ignore range_cyclic tests */
811d736f9   David Howells   [PATCH] BLOCK: Di...
1453
  	}
6e6938b6d   Wu Fengguang   writeback: introd...
1454
  	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
f446daaea   Jan Kara   mm: implement wri...
1455
1456
1457
  		tag = PAGECACHE_TAG_TOWRITE;
  	else
  		tag = PAGECACHE_TAG_DIRTY;
811d736f9   David Howells   [PATCH] BLOCK: Di...
1458
  retry:
6e6938b6d   Wu Fengguang   writeback: introd...
1459
  	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
f446daaea   Jan Kara   mm: implement wri...
1460
  		tag_pages_for_writeback(mapping, index, end);
bd19e012f   Nick Piggin   mm: write_cache_p...
1461
  	done_index = index;
5a3d5c981   Nick Piggin   mm: write_cache_p...
1462
1463
  	while (!done && (index <= end)) {
  		int i;
f446daaea   Jan Kara   mm: implement wri...
1464
  		nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
5a3d5c981   Nick Piggin   mm: write_cache_p...
1465
1466
1467
  			      min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
  		if (nr_pages == 0)
  			break;
811d736f9   David Howells   [PATCH] BLOCK: Di...
1468

811d736f9   David Howells   [PATCH] BLOCK: Di...
1469
1470
1471
1472
  		for (i = 0; i < nr_pages; i++) {
  			struct page *page = pvec.pages[i];
  
  			/*
d5482cdf8   Nick Piggin   mm: write_cache_p...
1473
1474
1475
1476
1477
  			 * At this point, the page may be truncated or
  			 * invalidated (changing page->mapping to NULL), or
  			 * even swizzled back from swapper_space to tmpfs file
  			 * mapping. However, page->index will not change
  			 * because we have a reference on the page.
811d736f9   David Howells   [PATCH] BLOCK: Di...
1478
  			 */
d5482cdf8   Nick Piggin   mm: write_cache_p...
1479
1480
1481
1482
1483
1484
1485
1486
  			if (page->index > end) {
  				/*
  				 * can't be range_cyclic (1st pass) because
  				 * end == -1 in that case.
  				 */
  				done = 1;
  				break;
  			}
cf15b07cf   Jun'ichi Nomura   writeback: make m...
1487
  			done_index = page->index;
d5482cdf8   Nick Piggin   mm: write_cache_p...
1488

811d736f9   David Howells   [PATCH] BLOCK: Di...
1489
  			lock_page(page);
5a3d5c981   Nick Piggin   mm: write_cache_p...
1490
1491
1492
1493
1494
1495
1496
1497
  			/*
  			 * Page truncated or invalidated. We can freely skip it
  			 * then, even for data integrity operations: the page
  			 * has disappeared concurrently, so there could be no
  			 * real expectation of this data interity operation
  			 * even if there is now a new, dirty page at the same
  			 * pagecache address.
  			 */
811d736f9   David Howells   [PATCH] BLOCK: Di...
1498
  			if (unlikely(page->mapping != mapping)) {
5a3d5c981   Nick Piggin   mm: write_cache_p...
1499
  continue_unlock:
811d736f9   David Howells   [PATCH] BLOCK: Di...
1500
1501
1502
  				unlock_page(page);
  				continue;
  			}
515f4a037   Nick Piggin   mm: write_cache_p...
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
  			if (!PageDirty(page)) {
  				/* someone wrote it for us */
  				goto continue_unlock;
  			}
  
  			if (PageWriteback(page)) {
  				if (wbc->sync_mode != WB_SYNC_NONE)
  					wait_on_page_writeback(page);
  				else
  					goto continue_unlock;
  			}
811d736f9   David Howells   [PATCH] BLOCK: Di...
1514

515f4a037   Nick Piggin   mm: write_cache_p...
1515
1516
  			BUG_ON(PageWriteback(page));
  			if (!clear_page_dirty_for_io(page))
5a3d5c981   Nick Piggin   mm: write_cache_p...
1517
  				goto continue_unlock;
811d736f9   David Howells   [PATCH] BLOCK: Di...
1518

9e094383b   Dave Chinner   writeback: Add tr...
1519
  			trace_wbc_writepage(wbc, mapping->backing_dev_info);
0ea971801   Miklos Szeredi   consolidate gener...
1520
  			ret = (*writepage)(page, wbc, data);
00266770b   Nick Piggin   mm: write_cache_p...
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
  			if (unlikely(ret)) {
  				if (ret == AOP_WRITEPAGE_ACTIVATE) {
  					unlock_page(page);
  					ret = 0;
  				} else {
  					/*
  					 * done_index is set past this page,
  					 * so media errors will not choke
  					 * background writeout for the entire
  					 * file. This has consequences for
  					 * range_cyclic semantics (ie. it may
  					 * not be suitable for data integrity
  					 * writeout).
  					 */
cf15b07cf   Jun'ichi Nomura   writeback: make m...
1535
  					done_index = page->index + 1;
00266770b   Nick Piggin   mm: write_cache_p...
1536
1537
1538
  					done = 1;
  					break;
  				}
0b5649278   Dave Chinner   writeback: pay at...
1539
  			}
00266770b   Nick Piggin   mm: write_cache_p...
1540

546a19242   Dave Chinner   writeback: write_...
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
  			/*
  			 * We stop writing back only if we are not doing
  			 * integrity sync. In case of integrity sync we have to
  			 * keep going until we have written all the pages
  			 * we tagged for writeback prior to entering this loop.
  			 */
  			if (--wbc->nr_to_write <= 0 &&
  			    wbc->sync_mode == WB_SYNC_NONE) {
  				done = 1;
  				break;
05fe478dd   Nick Piggin   mm: write_cache_p...
1551
  			}
811d736f9   David Howells   [PATCH] BLOCK: Di...
1552
1553
1554
1555
  		}
  		pagevec_release(&pvec);
  		cond_resched();
  	}
3a4c6800f   Nick Piggin   Fix page writebac...
1556
  	if (!cycled && !done) {
811d736f9   David Howells   [PATCH] BLOCK: Di...
1557
  		/*
31a12666d   Nick Piggin   mm: write_cache_p...
1558
  		 * range_cyclic:
811d736f9   David Howells   [PATCH] BLOCK: Di...
1559
1560
1561
  		 * We hit the last page and there is more work to be done: wrap
  		 * back to the start of the file
  		 */
31a12666d   Nick Piggin   mm: write_cache_p...
1562
  		cycled = 1;
811d736f9   David Howells   [PATCH] BLOCK: Di...
1563
  		index = 0;
31a12666d   Nick Piggin   mm: write_cache_p...
1564
  		end = writeback_index - 1;
811d736f9   David Howells   [PATCH] BLOCK: Di...
1565
1566
  		goto retry;
  	}
0b5649278   Dave Chinner   writeback: pay at...
1567
1568
  	if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
  		mapping->writeback_index = done_index;
06d6cf695   Aneesh Kumar K.V   mm: Add range_con...
1569

811d736f9   David Howells   [PATCH] BLOCK: Di...
1570
1571
  	return ret;
  }
0ea971801   Miklos Szeredi   consolidate gener...
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
  EXPORT_SYMBOL(write_cache_pages);
  
  /*
   * Function used by generic_writepages to call the real writepage
   * function and set the mapping flags on error
   */
  static int __writepage(struct page *page, struct writeback_control *wbc,
  		       void *data)
  {
  	struct address_space *mapping = data;
  	int ret = mapping->a_ops->writepage(page, wbc);
  	mapping_set_error(mapping, ret);
  	return ret;
  }
  
  /**
   * generic_writepages - walk the list of dirty pages of the given address space and writepage() all of them.
   * @mapping: address space structure to write
   * @wbc: subtract the number of written pages from *@wbc->nr_to_write
   *
   * This is a library function, which implements the writepages()
   * address_space_operation.
   */
  int generic_writepages(struct address_space *mapping,
  		       struct writeback_control *wbc)
  {
9b6096a65   Shaohua Li   mm: make generic_...
1598
1599
  	struct blk_plug plug;
  	int ret;
0ea971801   Miklos Szeredi   consolidate gener...
1600
1601
1602
  	/* deal with chardevs and other special file */
  	if (!mapping->a_ops->writepage)
  		return 0;
9b6096a65   Shaohua Li   mm: make generic_...
1603
1604
1605
1606
  	blk_start_plug(&plug);
  	ret = write_cache_pages(mapping, wbc, __writepage, mapping);
  	blk_finish_plug(&plug);
  	return ret;
0ea971801   Miklos Szeredi   consolidate gener...
1607
  }
811d736f9   David Howells   [PATCH] BLOCK: Di...
1608
1609
  
  EXPORT_SYMBOL(generic_writepages);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1610
1611
  int do_writepages(struct address_space *mapping, struct writeback_control *wbc)
  {
22905f775   Andrew Morton   identify multipag...
1612
  	int ret;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1613
1614
1615
  	if (wbc->nr_to_write <= 0)
  		return 0;
  	if (mapping->a_ops->writepages)
d08b3851d   Peter Zijlstra   [PATCH] mm: track...
1616
  		ret = mapping->a_ops->writepages(mapping, wbc);
22905f775   Andrew Morton   identify multipag...
1617
1618
  	else
  		ret = generic_writepages(mapping, wbc);
22905f775   Andrew Morton   identify multipag...
1619
  	return ret;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1620
1621
1622
1623
  }
  
  /**
   * write_one_page - write out a single page and optionally wait on I/O
67be2dd1b   Martin Waitz   [PATCH] DocBook: ...
1624
1625
   * @page: the page to write
   * @wait: if true, wait on writeout
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
   *
   * The page must be locked by the caller and will be unlocked upon return.
   *
   * write_one_page() returns a negative error code if I/O failed.
   */
  int write_one_page(struct page *page, int wait)
  {
  	struct address_space *mapping = page->mapping;
  	int ret = 0;
  	struct writeback_control wbc = {
  		.sync_mode = WB_SYNC_ALL,
  		.nr_to_write = 1,
  	};
  
  	BUG_ON(!PageLocked(page));
  
  	if (wait)
  		wait_on_page_writeback(page);
  
  	if (clear_page_dirty_for_io(page)) {
  		page_cache_get(page);
  		ret = mapping->a_ops->writepage(page, &wbc);
  		if (ret == 0 && wait) {
  			wait_on_page_writeback(page);
  			if (PageError(page))
  				ret = -EIO;
  		}
  		page_cache_release(page);
  	} else {
  		unlock_page(page);
  	}
  	return ret;
  }
  EXPORT_SYMBOL(write_one_page);
  
  /*
767193253   Ken Chen   [PATCH] simplify ...
1662
1663
1664
1665
1666
   * For address_spaces which do not use buffers nor write back.
   */
  int __set_page_dirty_no_writeback(struct page *page)
  {
  	if (!PageDirty(page))
c3f0da631   Bob Liu   mm/page-writeback...
1667
  		return !TestSetPageDirty(page);
767193253   Ken Chen   [PATCH] simplify ...
1668
1669
1670
1671
  	return 0;
  }
  
  /*
e3a7cca1e   Edward Shishkin   vfs: add/use acco...
1672
1673
1674
1675
1676
1677
1678
   * Helper function for set_page_dirty family.
   * NOTE: This relies on being atomic wrt interrupts.
   */
  void account_page_dirtied(struct page *page, struct address_space *mapping)
  {
  	if (mapping_cap_account_dirty(mapping)) {
  		__inc_zone_page_state(page, NR_FILE_DIRTY);
ea941f0e2   Michael Rubin   writeback: add nr...
1679
  		__inc_zone_page_state(page, NR_DIRTIED);
e3a7cca1e   Edward Shishkin   vfs: add/use acco...
1680
  		__inc_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE);
c8e28ce04   Wu Fengguang   writeback: accoun...
1681
  		__inc_bdi_stat(mapping->backing_dev_info, BDI_DIRTIED);
e3a7cca1e   Edward Shishkin   vfs: add/use acco...
1682
1683
1684
  		task_io_account_write(PAGE_CACHE_SIZE);
  	}
  }
679ceace8   Michael Rubin   mm: exporting acc...
1685
  EXPORT_SYMBOL(account_page_dirtied);
e3a7cca1e   Edward Shishkin   vfs: add/use acco...
1686
1687
  
  /*
f629d1c9b   Michael Rubin   mm: add account_p...
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
   * Helper function for set_page_writeback family.
   * NOTE: Unlike account_page_dirtied this does not rely on being atomic
   * wrt interrupts.
   */
  void account_page_writeback(struct page *page)
  {
  	inc_zone_page_state(page, NR_WRITEBACK);
  }
  EXPORT_SYMBOL(account_page_writeback);
  
  /*
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
   * For address_spaces which do not use buffers.  Just tag the page as dirty in
   * its radix tree.
   *
   * This is also used when a single buffer is being dirtied: we want to set the
   * page dirty in that case, but not all the buffers.  This is a "bottom-up"
   * dirtying, whereas __set_page_dirty_buffers() is a "top-down" dirtying.
   *
   * Most callers have locked the page, which pins the address_space in memory.
   * But zap_pte_range() does not lock the page, however in that case the
   * mapping is pinned by the vma's ->vm_file reference.
   *
   * We take care to handle the case where the page was truncated from the
183ff22bb   Simon Arlott   spelling fixes: mm/
1711
   * mapping by re-checking page_mapping() inside tree_lock.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1712
1713
1714
   */
  int __set_page_dirty_nobuffers(struct page *page)
  {
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1715
1716
1717
  	if (!TestSetPageDirty(page)) {
  		struct address_space *mapping = page_mapping(page);
  		struct address_space *mapping2;
8c08540f8   Andrew Morton   [PATCH] clean up ...
1718
1719
  		if (!mapping)
  			return 1;
19fd62312   Nick Piggin   mm: spinlock tree...
1720
  		spin_lock_irq(&mapping->tree_lock);
8c08540f8   Andrew Morton   [PATCH] clean up ...
1721
1722
1723
  		mapping2 = page_mapping(page);
  		if (mapping2) { /* Race with truncate? */
  			BUG_ON(mapping2 != mapping);
787d2214c   Nick Piggin   fs: introduce som...
1724
  			WARN_ON_ONCE(!PagePrivate(page) && !PageUptodate(page));
e3a7cca1e   Edward Shishkin   vfs: add/use acco...
1725
  			account_page_dirtied(page, mapping);
8c08540f8   Andrew Morton   [PATCH] clean up ...
1726
1727
1728
  			radix_tree_tag_set(&mapping->page_tree,
  				page_index(page), PAGECACHE_TAG_DIRTY);
  		}
19fd62312   Nick Piggin   mm: spinlock tree...
1729
  		spin_unlock_irq(&mapping->tree_lock);
8c08540f8   Andrew Morton   [PATCH] clean up ...
1730
1731
1732
  		if (mapping->host) {
  			/* !PageAnon && !swapper_space */
  			__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1733
  		}
4741c9fd3   Andrew Morton   [PATCH] set_page_...
1734
  		return 1;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1735
  	}
4741c9fd3   Andrew Morton   [PATCH] set_page_...
1736
  	return 0;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
  }
  EXPORT_SYMBOL(__set_page_dirty_nobuffers);
  
  /*
   * When a writepage implementation decides that it doesn't want to write this
   * page for some reason, it should redirty the locked page via
   * redirty_page_for_writepage() and it should then unlock the page and return 0
   */
  int redirty_page_for_writepage(struct writeback_control *wbc, struct page *page)
  {
  	wbc->pages_skipped++;
  	return __set_page_dirty_nobuffers(page);
  }
  EXPORT_SYMBOL(redirty_page_for_writepage);
  
  /*
6746aff74   Wu Fengguang   HWPOISON: shmem: ...
1753
1754
1755
1756
1757
1758
1759
   * Dirty a page.
   *
   * For pages with a mapping this should be done under the page lock
   * for the benefit of asynchronous memory errors who prefer a consistent
   * dirty state. This rule can be broken in some special cases,
   * but should be better not to.
   *
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1760
1761
1762
   * If the mapping doesn't provide a set_page_dirty a_op, then
   * just fall through and assume that it wants buffer_heads.
   */
1cf6e7d83   Nick Piggin   mm: task dirty ac...
1763
  int set_page_dirty(struct page *page)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1764
1765
1766
1767
1768
  {
  	struct address_space *mapping = page_mapping(page);
  
  	if (likely(mapping)) {
  		int (*spd)(struct page *) = mapping->a_ops->set_page_dirty;
278df9f45   Minchan Kim   mm: reclaim inval...
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
  		/*
  		 * readahead/lru_deactivate_page could remain
  		 * PG_readahead/PG_reclaim due to race with end_page_writeback
  		 * About readahead, if the page is written, the flags would be
  		 * reset. So no problem.
  		 * About lru_deactivate_page, if the page is redirty, the flag
  		 * will be reset. So no problem. but if the page is used by readahead
  		 * it will confuse readahead and make it restart the size rampup
  		 * process. But it's a trivial problem.
  		 */
  		ClearPageReclaim(page);
9361401eb   David Howells   [PATCH] BLOCK: Ma...
1780
1781
1782
1783
1784
  #ifdef CONFIG_BLOCK
  		if (!spd)
  			spd = __set_page_dirty_buffers;
  #endif
  		return (*spd)(page);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1785
  	}
4741c9fd3   Andrew Morton   [PATCH] set_page_...
1786
1787
1788
1789
  	if (!PageDirty(page)) {
  		if (!TestSetPageDirty(page))
  			return 1;
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
  	return 0;
  }
  EXPORT_SYMBOL(set_page_dirty);
  
  /*
   * set_page_dirty() is racy if the caller has no reference against
   * page->mapping->host, and if the page is unlocked.  This is because another
   * CPU could truncate the page off the mapping and then free the mapping.
   *
   * Usually, the page _is_ locked, or the caller is a user-space process which
   * holds a reference on the inode by having an open file.
   *
   * In other cases, the page should be locked before running set_page_dirty().
   */
  int set_page_dirty_lock(struct page *page)
  {
  	int ret;
7eaceacca   Jens Axboe   block: remove per...
1807
  	lock_page(page);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1808
1809
1810
1811
1812
1813
1814
  	ret = set_page_dirty(page);
  	unlock_page(page);
  	return ret;
  }
  EXPORT_SYMBOL(set_page_dirty_lock);
  
  /*
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
   * Clear a page's dirty flag, while caring for dirty memory accounting.
   * Returns true if the page was previously dirty.
   *
   * This is for preparing to put the page under writeout.  We leave the page
   * tagged as dirty in the radix tree so that a concurrent write-for-sync
   * can discover it via a PAGECACHE_TAG_DIRTY walk.  The ->writepage
   * implementation will run either set_page_writeback() or set_page_dirty(),
   * at which stage we bring the page's dirty flag and radix-tree dirty tag
   * back into sync.
   *
   * This incoherency between the page's dirty flag and radix-tree tag is
   * unfortunate, but it only exists while the page is locked.
   */
  int clear_page_dirty_for_io(struct page *page)
  {
  	struct address_space *mapping = page_mapping(page);
79352894b   Nick Piggin   mm: fix clear_pag...
1831
  	BUG_ON(!PageLocked(page));
7658cc289   Linus Torvalds   VM: Fix nasty and...
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
  	if (mapping && mapping_cap_account_dirty(mapping)) {
  		/*
  		 * Yes, Virginia, this is indeed insane.
  		 *
  		 * We use this sequence to make sure that
  		 *  (a) we account for dirty stats properly
  		 *  (b) we tell the low-level filesystem to
  		 *      mark the whole page dirty if it was
  		 *      dirty in a pagetable. Only to then
  		 *  (c) clean the page again and return 1 to
  		 *      cause the writeback.
  		 *
  		 * This way we avoid all nasty races with the
  		 * dirty bit in multiple places and clearing
  		 * them concurrently from different threads.
  		 *
  		 * Note! Normally the "set_page_dirty(page)"
  		 * has no effect on the actual dirty bit - since
  		 * that will already usually be set. But we
  		 * need the side effects, and it can help us
  		 * avoid races.
  		 *
  		 * We basically use the page "master dirty bit"
  		 * as a serialization point for all the different
  		 * threads doing their things.
7658cc289   Linus Torvalds   VM: Fix nasty and...
1857
1858
1859
  		 */
  		if (page_mkclean(page))
  			set_page_dirty(page);
79352894b   Nick Piggin   mm: fix clear_pag...
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
  		/*
  		 * We carefully synchronise fault handlers against
  		 * installing a dirty pte and marking the page dirty
  		 * at this point. We do this by having them hold the
  		 * page lock at some point after installing their
  		 * pte, but before marking the page dirty.
  		 * Pages are always locked coming in here, so we get
  		 * the desired exclusion. See mm/memory.c:do_wp_page()
  		 * for more comments.
  		 */
7658cc289   Linus Torvalds   VM: Fix nasty and...
1870
  		if (TestClearPageDirty(page)) {
8c08540f8   Andrew Morton   [PATCH] clean up ...
1871
  			dec_zone_page_state(page, NR_FILE_DIRTY);
c9e51e418   Peter Zijlstra   mm: count reclaim...
1872
1873
  			dec_bdi_stat(mapping->backing_dev_info,
  					BDI_RECLAIMABLE);
7658cc289   Linus Torvalds   VM: Fix nasty and...
1874
  			return 1;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1875
  		}
7658cc289   Linus Torvalds   VM: Fix nasty and...
1876
  		return 0;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1877
  	}
7658cc289   Linus Torvalds   VM: Fix nasty and...
1878
  	return TestClearPageDirty(page);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1879
  }
58bb01a9c   Hans Reiser   [PATCH] re-export...
1880
  EXPORT_SYMBOL(clear_page_dirty_for_io);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1881
1882
1883
1884
1885
1886
1887
  
  int test_clear_page_writeback(struct page *page)
  {
  	struct address_space *mapping = page_mapping(page);
  	int ret;
  
  	if (mapping) {
69cb51d18   Peter Zijlstra   mm: count writeba...
1888
  		struct backing_dev_info *bdi = mapping->backing_dev_info;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1889
  		unsigned long flags;
19fd62312   Nick Piggin   mm: spinlock tree...
1890
  		spin_lock_irqsave(&mapping->tree_lock, flags);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1891
  		ret = TestClearPageWriteback(page);
69cb51d18   Peter Zijlstra   mm: count writeba...
1892
  		if (ret) {
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1893
1894
1895
  			radix_tree_tag_clear(&mapping->page_tree,
  						page_index(page),
  						PAGECACHE_TAG_WRITEBACK);
e4ad08fe6   Miklos Szeredi   mm: bdi: add sepa...
1896
  			if (bdi_cap_account_writeback(bdi)) {
69cb51d18   Peter Zijlstra   mm: count writeba...
1897
  				__dec_bdi_stat(bdi, BDI_WRITEBACK);
04fbfdc14   Peter Zijlstra   mm: per device di...
1898
1899
  				__bdi_writeout_inc(bdi);
  			}
69cb51d18   Peter Zijlstra   mm: count writeba...
1900
  		}
19fd62312   Nick Piggin   mm: spinlock tree...
1901
  		spin_unlock_irqrestore(&mapping->tree_lock, flags);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1902
1903
1904
  	} else {
  		ret = TestClearPageWriteback(page);
  	}
99b12e3d8   Wu Fengguang   writeback: accoun...
1905
  	if (ret) {
d688abf50   Andrew Morton   move page writeba...
1906
  		dec_zone_page_state(page, NR_WRITEBACK);
99b12e3d8   Wu Fengguang   writeback: accoun...
1907
1908
  		inc_zone_page_state(page, NR_WRITTEN);
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1909
1910
1911
1912
1913
1914
1915
1916
1917
  	return ret;
  }
  
  int test_set_page_writeback(struct page *page)
  {
  	struct address_space *mapping = page_mapping(page);
  	int ret;
  
  	if (mapping) {
69cb51d18   Peter Zijlstra   mm: count writeba...
1918
  		struct backing_dev_info *bdi = mapping->backing_dev_info;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1919
  		unsigned long flags;
19fd62312   Nick Piggin   mm: spinlock tree...
1920
  		spin_lock_irqsave(&mapping->tree_lock, flags);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1921
  		ret = TestSetPageWriteback(page);
69cb51d18   Peter Zijlstra   mm: count writeba...
1922
  		if (!ret) {
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1923
1924
1925
  			radix_tree_tag_set(&mapping->page_tree,
  						page_index(page),
  						PAGECACHE_TAG_WRITEBACK);
e4ad08fe6   Miklos Szeredi   mm: bdi: add sepa...
1926
  			if (bdi_cap_account_writeback(bdi))
69cb51d18   Peter Zijlstra   mm: count writeba...
1927
1928
  				__inc_bdi_stat(bdi, BDI_WRITEBACK);
  		}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1929
1930
1931
1932
  		if (!PageDirty(page))
  			radix_tree_tag_clear(&mapping->page_tree,
  						page_index(page),
  						PAGECACHE_TAG_DIRTY);
f446daaea   Jan Kara   mm: implement wri...
1933
1934
1935
  		radix_tree_tag_clear(&mapping->page_tree,
  				     page_index(page),
  				     PAGECACHE_TAG_TOWRITE);
19fd62312   Nick Piggin   mm: spinlock tree...
1936
  		spin_unlock_irqrestore(&mapping->tree_lock, flags);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1937
1938
1939
  	} else {
  		ret = TestSetPageWriteback(page);
  	}
d688abf50   Andrew Morton   move page writeba...
1940
  	if (!ret)
f629d1c9b   Michael Rubin   mm: add account_p...
1941
  		account_page_writeback(page);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1942
1943
1944
1945
1946
1947
  	return ret;
  
  }
  EXPORT_SYMBOL(test_set_page_writeback);
  
  /*
001281881   Nick Piggin   mm: use lockless ...
1948
   * Return true if any of the pages in the mapping are marked with the
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1949
1950
1951
1952
   * passed tag.
   */
  int mapping_tagged(struct address_space *mapping, int tag)
  {
72c478321   Konstantin Khlebnikov   mm: remove useles...
1953
  	return radix_tree_tagged(&mapping->page_tree, tag);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1954
1955
  }
  EXPORT_SYMBOL(mapping_tagged);