Blame view

mm/page-writeback.c 68.5 KB
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1
  /*
f30c22695   Uwe Zeisberger   fix file specific...
2
   * mm/page-writeback.c
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3
4
   *
   * Copyright (C) 2002, Linus Torvalds.
04fbfdc14   Peter Zijlstra   mm: per device di...
5
   * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
6
7
8
9
   *
   * Contains functions related to writing back dirty pages at the
   * address_space level.
   *
e1f8e8744   Francois Cami   Remove Andrew Mor...
10
   * 10Apr2002	Andrew Morton
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
11
12
13
14
   *		Initial version
   */
  
  #include <linux/kernel.h>
b95f1b31b   Paul Gortmaker   mm: Map most file...
15
  #include <linux/export.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
16
17
18
19
20
21
22
23
24
  #include <linux/spinlock.h>
  #include <linux/fs.h>
  #include <linux/mm.h>
  #include <linux/swap.h>
  #include <linux/slab.h>
  #include <linux/pagemap.h>
  #include <linux/writeback.h>
  #include <linux/init.h>
  #include <linux/backing-dev.h>
55e829af0   Andrew Morton   [PATCH] io-accoun...
25
  #include <linux/task_io_accounting_ops.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
26
27
  #include <linux/blkdev.h>
  #include <linux/mpage.h>
d08b3851d   Peter Zijlstra   [PATCH] mm: track...
28
  #include <linux/rmap.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
29
30
31
32
33
34
  #include <linux/percpu.h>
  #include <linux/notifier.h>
  #include <linux/smp.h>
  #include <linux/sysctl.h>
  #include <linux/cpu.h>
  #include <linux/syscalls.h>
ff01bb483   Al Viro   fs: move code out...
35
  #include <linux/buffer_head.h> /* __set_page_dirty_buffers */
811d736f9   David Howells   [PATCH] BLOCK: Di...
36
  #include <linux/pagevec.h>
eb608e3a3   Jan Kara   block: Convert BD...
37
  #include <linux/timer.h>
028c2dd18   Dave Chinner   writeback: Add tr...
38
  #include <trace/events/writeback.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
39
40
  
  /*
ffd1f609a   Wu Fengguang   writeback: introd...
41
42
43
44
45
   * Sleep at most 200ms at a time in balance_dirty_pages().
   */
  #define MAX_PAUSE		max(HZ/5, 1)
  
  /*
5b9b35743   Wu Fengguang   writeback: avoid ...
46
47
48
49
50
51
   * Try to keep balance_dirty_pages() call intervals higher than this many pages
   * by raising pause time to max_pause when falls below it.
   */
  #define DIRTY_POLL_THRESH	(128 >> (PAGE_SHIFT - 10))
  
  /*
e98be2d59   Wu Fengguang   writeback: bdi wr...
52
53
54
   * Estimate write bandwidth at 200ms intervals.
   */
  #define BANDWIDTH_INTERVAL	max(HZ/5, 1)
6c14ae1e9   Wu Fengguang   writeback: dirty ...
55
  #define RATELIMIT_CALC_SHIFT	10
e98be2d59   Wu Fengguang   writeback: bdi wr...
56
  /*
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
57
58
59
60
   * After a CPU has dirtied this many pages, balance_dirty_pages_ratelimited
   * will look to see if it needs to force writeback or throttling.
   */
  static long ratelimit_pages = 32;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
61
62
63
  /* The following parameters are exported via /proc/sys/vm */
  
  /*
5b0830cb9   Jens Axboe   writeback: get ri...
64
   * Start background writeback (via writeback threads) at this percentage
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
65
   */
1b5e62b42   Wu Fengguang   writeback: double...
66
  int dirty_background_ratio = 10;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
67
68
  
  /*
2da02997e   David Rientjes   mm: add dirty_bac...
69
70
71
72
73
74
   * dirty_background_bytes starts at 0 (disabled) so that it is a function of
   * dirty_background_ratio * the amount of dirtyable memory
   */
  unsigned long dirty_background_bytes;
  
  /*
195cf453d   Bron Gondwana   mm/page-writeback...
75
76
77
78
79
80
   * free highmem will not be subtracted from the total free memory
   * for calculating free ratios if vm_highmem_is_dirtyable is true
   */
  int vm_highmem_is_dirtyable;
  
  /*
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
81
82
   * The generator of dirty data starts writeback at this percentage
   */
1b5e62b42   Wu Fengguang   writeback: double...
83
  int vm_dirty_ratio = 20;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
84
85
  
  /*
2da02997e   David Rientjes   mm: add dirty_bac...
86
87
88
89
90
91
   * vm_dirty_bytes starts at 0 (disabled) so that it is a function of
   * vm_dirty_ratio * the amount of dirtyable memory
   */
  unsigned long vm_dirty_bytes;
  
  /*
704503d83   Alexey Dobriyan   mm: fix proc_doin...
92
   * The interval between `kupdate'-style writebacks
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
93
   */
22ef37eed   Toshiyuki Okajima   page-writeback: f...
94
  unsigned int dirty_writeback_interval = 5 * 100; /* centiseconds */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
95

91913a294   Artem Bityutskiy   mm: export dirty_...
96
  EXPORT_SYMBOL_GPL(dirty_writeback_interval);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
97
  /*
704503d83   Alexey Dobriyan   mm: fix proc_doin...
98
   * The longest time for which data is allowed to remain dirty
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
99
   */
22ef37eed   Toshiyuki Okajima   page-writeback: f...
100
  unsigned int dirty_expire_interval = 30 * 100; /* centiseconds */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
101
102
103
104
105
106
107
  
  /*
   * Flag that makes the machine dump writes/reads and block dirtyings.
   */
  int block_dump;
  
  /*
ed5b43f15   Bart Samwel   [PATCH] Represent...
108
109
   * Flag that puts the machine in "laptop mode". Doubles as a timeout in jiffies:
   * a full sync is triggered after this time elapses without any disk activity.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
110
111
112
113
114
115
   */
  int laptop_mode;
  
  EXPORT_SYMBOL(laptop_mode);
  
  /* End of sysctl-exported parameters */
c42843f2f   Wu Fengguang   writeback: introd...
116
  unsigned long global_dirty_limit;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
117

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
118
  /*
04fbfdc14   Peter Zijlstra   mm: per device di...
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
   * Scale the writeback cache size proportional to the relative writeout speeds.
   *
   * We do this by keeping a floating proportion between BDIs, based on page
   * writeback completions [end_page_writeback()]. Those devices that write out
   * pages fastest will get the larger share, while the slower will get a smaller
   * share.
   *
   * We use page writeout completions because we are interested in getting rid of
   * dirty pages. Having them written out is the primary goal.
   *
   * We introduce a concept of time, a period over which we measure these events,
   * because demand can/will vary over time. The length of this period itself is
   * measured in page writeback completions.
   *
   */
eb608e3a3   Jan Kara   block: Convert BD...
134
135
136
137
138
139
140
141
142
143
144
145
146
147
  static struct fprop_global writeout_completions;
  
  static void writeout_period(unsigned long t);
  /* Timer for aging of writeout_completions */
  static struct timer_list writeout_period_timer =
  		TIMER_DEFERRED_INITIALIZER(writeout_period, 0, 0);
  static unsigned long writeout_period_time = 0;
  
  /*
   * Length of period for aging writeout fractions of bdis. This is an
   * arbitrarily chosen number. The longer the period, the slower fractions will
   * reflect changes in current writeout rate.
   */
  #define VM_COMPLETIONS_PERIOD_LEN (3*HZ)
04fbfdc14   Peter Zijlstra   mm: per device di...
148

04fbfdc14   Peter Zijlstra   mm: per device di...
149
  /*
1edf22348   Johannes Weiner   mm/page-writeback...
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
   * Work out the current dirty-memory clamping and background writeout
   * thresholds.
   *
   * The main aim here is to lower them aggressively if there is a lot of mapped
   * memory around.  To avoid stressing page reclaim with lots of unreclaimable
   * pages.  It is better to clamp down on writers than to start swapping, and
   * performing lots of scanning.
   *
   * We only allow 1/2 of the currently-unmapped memory to be dirtied.
   *
   * We don't permit the clamping level to fall below 5% - that is getting rather
   * excessive.
   *
   * We make sure that the background writeout level is below the adjusted
   * clamping level.
   */
ccafa2879   Johannes Weiner   mm: writeback: cl...
166

a756cf590   Johannes Weiner   mm: try to distri...
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
  /*
   * In a memory zone, there is a certain amount of pages we consider
   * available for the page cache, which is essentially the number of
   * free and reclaimable pages, minus some zone reserves to protect
   * lowmem and the ability to uphold the zone's watermarks without
   * requiring writeback.
   *
   * This number of dirtyable pages is the base value of which the
   * user-configurable dirty ratio is the effictive number of pages that
   * are allowed to be actually dirtied.  Per individual zone, or
   * globally by using the sum of dirtyable pages over all zones.
   *
   * Because the user is allowed to specify the dirty limit globally as
   * absolute number of bytes, calculating the per-zone dirty limit can
   * require translating the configured limit into a percentage of
   * global dirtyable memory first.
   */
1edf22348   Johannes Weiner   mm/page-writeback...
184
185
186
187
188
189
190
191
192
193
194
  static unsigned long highmem_dirtyable_memory(unsigned long total)
  {
  #ifdef CONFIG_HIGHMEM
  	int node;
  	unsigned long x = 0;
  
  	for_each_node_state(node, N_HIGH_MEMORY) {
  		struct zone *z =
  			&NODE_DATA(node)->node_zones[ZONE_HIGHMEM];
  
  		x += zone_page_state(z, NR_FREE_PAGES) +
ab8fabd46   Johannes Weiner   mm: exclude reser...
195
  		     zone_reclaimable_pages(z) - z->dirty_balance_reserve;
1edf22348   Johannes Weiner   mm/page-writeback...
196
197
198
199
200
201
202
203
204
205
206
207
208
209
  	}
  	/*
  	 * Make sure that the number of highmem pages is never larger
  	 * than the number of the total dirtyable memory. This can only
  	 * occur in very strange VM situations but we want to make sure
  	 * that this does not occur.
  	 */
  	return min(x, total);
  #else
  	return 0;
  #endif
  }
  
  /**
ccafa2879   Johannes Weiner   mm: writeback: cl...
210
   * global_dirtyable_memory - number of globally dirtyable pages
1edf22348   Johannes Weiner   mm/page-writeback...
211
   *
ccafa2879   Johannes Weiner   mm: writeback: cl...
212
213
   * Returns the global number of pages potentially available for dirty
   * page cache.  This is the base value for the global dirty limits.
1edf22348   Johannes Weiner   mm/page-writeback...
214
   */
18cf8cf8b   H Hartley Sweeten   mm: page-writebac...
215
  static unsigned long global_dirtyable_memory(void)
1edf22348   Johannes Weiner   mm/page-writeback...
216
217
  {
  	unsigned long x;
ab8fabd46   Johannes Weiner   mm: exclude reser...
218
219
  	x = global_page_state(NR_FREE_PAGES) + global_reclaimable_pages() -
  	    dirty_balance_reserve;
1edf22348   Johannes Weiner   mm/page-writeback...
220
221
222
223
224
225
226
227
  
  	if (!vm_highmem_is_dirtyable)
  		x -= highmem_dirtyable_memory(x);
  
  	return x + 1;	/* Ensure that we never return 0 */
  }
  
  /*
ccafa2879   Johannes Weiner   mm: writeback: cl...
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
   * global_dirty_limits - background-writeback and dirty-throttling thresholds
   *
   * Calculate the dirty thresholds based on sysctl parameters
   * - vm.dirty_background_ratio  or  vm.dirty_background_bytes
   * - vm.dirty_ratio             or  vm.dirty_bytes
   * The dirty limits will be lifted by 1/4 for PF_LESS_THROTTLE (ie. nfsd) and
   * real-time tasks.
   */
  void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty)
  {
  	unsigned long background;
  	unsigned long dirty;
  	unsigned long uninitialized_var(available_memory);
  	struct task_struct *tsk;
  
  	if (!vm_dirty_bytes || !dirty_background_bytes)
  		available_memory = global_dirtyable_memory();
  
  	if (vm_dirty_bytes)
  		dirty = DIV_ROUND_UP(vm_dirty_bytes, PAGE_SIZE);
  	else
  		dirty = (vm_dirty_ratio * available_memory) / 100;
  
  	if (dirty_background_bytes)
  		background = DIV_ROUND_UP(dirty_background_bytes, PAGE_SIZE);
  	else
  		background = (dirty_background_ratio * available_memory) / 100;
  
  	if (background >= dirty)
  		background = dirty / 2;
  	tsk = current;
  	if (tsk->flags & PF_LESS_THROTTLE || rt_task(tsk)) {
  		background += background / 4;
  		dirty += dirty / 4;
  	}
  	*pbackground = background;
  	*pdirty = dirty;
  	trace_global_dirty_state(background, dirty);
  }
a756cf590   Johannes Weiner   mm: try to distri...
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
  /**
   * zone_dirtyable_memory - number of dirtyable pages in a zone
   * @zone: the zone
   *
   * Returns the zone's number of pages potentially available for dirty
   * page cache.  This is the base value for the per-zone dirty limits.
   */
  static unsigned long zone_dirtyable_memory(struct zone *zone)
  {
  	/*
  	 * The effective global number of dirtyable pages may exclude
  	 * highmem as a big-picture measure to keep the ratio between
  	 * dirty memory and lowmem reasonable.
  	 *
  	 * But this function is purely about the individual zone and a
  	 * highmem zone can hold its share of dirty pages, so we don't
  	 * care about vm_highmem_is_dirtyable here.
  	 */
  	return zone_page_state(zone, NR_FREE_PAGES) +
  	       zone_reclaimable_pages(zone) -
  	       zone->dirty_balance_reserve;
  }
  
  /**
   * zone_dirty_limit - maximum number of dirty pages allowed in a zone
   * @zone: the zone
   *
   * Returns the maximum number of dirty pages allowed in a zone, based
   * on the zone's dirtyable memory.
   */
  static unsigned long zone_dirty_limit(struct zone *zone)
  {
  	unsigned long zone_memory = zone_dirtyable_memory(zone);
  	struct task_struct *tsk = current;
  	unsigned long dirty;
  
  	if (vm_dirty_bytes)
  		dirty = DIV_ROUND_UP(vm_dirty_bytes, PAGE_SIZE) *
  			zone_memory / global_dirtyable_memory();
  	else
  		dirty = vm_dirty_ratio * zone_memory / 100;
  
  	if (tsk->flags & PF_LESS_THROTTLE || rt_task(tsk))
  		dirty += dirty / 4;
  
  	return dirty;
  }
  
  /**
   * zone_dirty_ok - tells whether a zone is within its dirty limits
   * @zone: the zone to check
   *
   * Returns %true when the dirty pages in @zone are within the zone's
   * dirty limit, %false if the limit is exceeded.
   */
  bool zone_dirty_ok(struct zone *zone)
  {
  	unsigned long limit = zone_dirty_limit(zone);
  
  	return zone_page_state(zone, NR_FILE_DIRTY) +
  	       zone_page_state(zone, NR_UNSTABLE_NFS) +
  	       zone_page_state(zone, NR_WRITEBACK) <= limit;
  }
2da02997e   David Rientjes   mm: add dirty_bac...
330
  int dirty_background_ratio_handler(struct ctl_table *table, int write,
8d65af789   Alexey Dobriyan   sysctl: remove "s...
331
  		void __user *buffer, size_t *lenp,
2da02997e   David Rientjes   mm: add dirty_bac...
332
333
334
  		loff_t *ppos)
  {
  	int ret;
8d65af789   Alexey Dobriyan   sysctl: remove "s...
335
  	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
2da02997e   David Rientjes   mm: add dirty_bac...
336
337
338
339
340
341
  	if (ret == 0 && write)
  		dirty_background_bytes = 0;
  	return ret;
  }
  
  int dirty_background_bytes_handler(struct ctl_table *table, int write,
8d65af789   Alexey Dobriyan   sysctl: remove "s...
342
  		void __user *buffer, size_t *lenp,
2da02997e   David Rientjes   mm: add dirty_bac...
343
344
345
  		loff_t *ppos)
  {
  	int ret;
8d65af789   Alexey Dobriyan   sysctl: remove "s...
346
  	ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
2da02997e   David Rientjes   mm: add dirty_bac...
347
348
349
350
  	if (ret == 0 && write)
  		dirty_background_ratio = 0;
  	return ret;
  }
04fbfdc14   Peter Zijlstra   mm: per device di...
351
  int dirty_ratio_handler(struct ctl_table *table, int write,
8d65af789   Alexey Dobriyan   sysctl: remove "s...
352
  		void __user *buffer, size_t *lenp,
04fbfdc14   Peter Zijlstra   mm: per device di...
353
354
355
  		loff_t *ppos)
  {
  	int old_ratio = vm_dirty_ratio;
2da02997e   David Rientjes   mm: add dirty_bac...
356
  	int ret;
8d65af789   Alexey Dobriyan   sysctl: remove "s...
357
  	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
04fbfdc14   Peter Zijlstra   mm: per device di...
358
  	if (ret == 0 && write && vm_dirty_ratio != old_ratio) {
eb608e3a3   Jan Kara   block: Convert BD...
359
  		writeback_set_ratelimit();
2da02997e   David Rientjes   mm: add dirty_bac...
360
361
362
363
  		vm_dirty_bytes = 0;
  	}
  	return ret;
  }
2da02997e   David Rientjes   mm: add dirty_bac...
364
  int dirty_bytes_handler(struct ctl_table *table, int write,
8d65af789   Alexey Dobriyan   sysctl: remove "s...
365
  		void __user *buffer, size_t *lenp,
2da02997e   David Rientjes   mm: add dirty_bac...
366
367
  		loff_t *ppos)
  {
fc3501d41   Sven Wegener   mm: fix dirty_byt...
368
  	unsigned long old_bytes = vm_dirty_bytes;
2da02997e   David Rientjes   mm: add dirty_bac...
369
  	int ret;
8d65af789   Alexey Dobriyan   sysctl: remove "s...
370
  	ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
2da02997e   David Rientjes   mm: add dirty_bac...
371
  	if (ret == 0 && write && vm_dirty_bytes != old_bytes) {
eb608e3a3   Jan Kara   block: Convert BD...
372
  		writeback_set_ratelimit();
2da02997e   David Rientjes   mm: add dirty_bac...
373
  		vm_dirty_ratio = 0;
04fbfdc14   Peter Zijlstra   mm: per device di...
374
375
376
  	}
  	return ret;
  }
eb608e3a3   Jan Kara   block: Convert BD...
377
378
379
380
381
382
383
384
  static unsigned long wp_next_time(unsigned long cur_time)
  {
  	cur_time += VM_COMPLETIONS_PERIOD_LEN;
  	/* 0 has a special meaning... */
  	if (!cur_time)
  		return 1;
  	return cur_time;
  }
04fbfdc14   Peter Zijlstra   mm: per device di...
385
386
387
388
389
390
  /*
   * Increment the BDI's writeout completion count and the global writeout
   * completion count. Called from test_clear_page_writeback().
   */
  static inline void __bdi_writeout_inc(struct backing_dev_info *bdi)
  {
f7d2b1ecd   Jan Kara   writeback: accoun...
391
  	__inc_bdi_stat(bdi, BDI_WRITTEN);
eb608e3a3   Jan Kara   block: Convert BD...
392
393
394
395
396
397
398
399
400
401
402
403
404
  	__fprop_inc_percpu_max(&writeout_completions, &bdi->completions,
  			       bdi->max_prop_frac);
  	/* First event after period switching was turned off? */
  	if (!unlikely(writeout_period_time)) {
  		/*
  		 * We can race with other __bdi_writeout_inc calls here but
  		 * it does not cause any harm since the resulting time when
  		 * timer will fire and what is in writeout_period_time will be
  		 * roughly the same.
  		 */
  		writeout_period_time = wp_next_time(jiffies);
  		mod_timer(&writeout_period_timer, writeout_period_time);
  	}
04fbfdc14   Peter Zijlstra   mm: per device di...
405
  }
dd5656e59   Miklos Szeredi   mm: bdi: export b...
406
407
408
409
410
411
412
413
414
  void bdi_writeout_inc(struct backing_dev_info *bdi)
  {
  	unsigned long flags;
  
  	local_irq_save(flags);
  	__bdi_writeout_inc(bdi);
  	local_irq_restore(flags);
  }
  EXPORT_SYMBOL_GPL(bdi_writeout_inc);
04fbfdc14   Peter Zijlstra   mm: per device di...
415
416
417
418
419
420
  /*
   * Obtain an accurate fraction of the BDI's portion.
   */
  static void bdi_writeout_fraction(struct backing_dev_info *bdi,
  		long *numerator, long *denominator)
  {
eb608e3a3   Jan Kara   block: Convert BD...
421
  	fprop_fraction_percpu(&writeout_completions, &bdi->completions,
04fbfdc14   Peter Zijlstra   mm: per device di...
422
  				numerator, denominator);
04fbfdc14   Peter Zijlstra   mm: per device di...
423
  }
04fbfdc14   Peter Zijlstra   mm: per device di...
424
  /*
eb608e3a3   Jan Kara   block: Convert BD...
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
   * On idle system, we can be called long after we scheduled because we use
   * deferred timers so count with missed periods.
   */
  static void writeout_period(unsigned long t)
  {
  	int miss_periods = (jiffies - writeout_period_time) /
  						 VM_COMPLETIONS_PERIOD_LEN;
  
  	if (fprop_new_period(&writeout_completions, miss_periods + 1)) {
  		writeout_period_time = wp_next_time(writeout_period_time +
  				miss_periods * VM_COMPLETIONS_PERIOD_LEN);
  		mod_timer(&writeout_period_timer, writeout_period_time);
  	} else {
  		/*
  		 * Aging has zeroed all fractions. Stop wasting CPU on period
  		 * updates.
  		 */
  		writeout_period_time = 0;
  	}
  }
  
  /*
d08c429b0   Johannes Weiner   mm/page-writeback...
447
448
449
   * bdi_min_ratio keeps the sum of the minimum dirty shares of all
   * registered backing devices, which, for obvious reasons, can not
   * exceed 100%.
189d3c4a9   Peter Zijlstra   mm: bdi: allow se...
450
   */
189d3c4a9   Peter Zijlstra   mm: bdi: allow se...
451
452
453
454
455
  static unsigned int bdi_min_ratio;
  
  int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio)
  {
  	int ret = 0;
189d3c4a9   Peter Zijlstra   mm: bdi: allow se...
456

cfc4ba536   Jens Axboe   writeback: use RC...
457
  	spin_lock_bh(&bdi_lock);
a42dde041   Peter Zijlstra   mm: bdi: allow se...
458
  	if (min_ratio > bdi->max_ratio) {
189d3c4a9   Peter Zijlstra   mm: bdi: allow se...
459
  		ret = -EINVAL;
a42dde041   Peter Zijlstra   mm: bdi: allow se...
460
461
462
463
464
465
466
467
468
  	} else {
  		min_ratio -= bdi->min_ratio;
  		if (bdi_min_ratio + min_ratio < 100) {
  			bdi_min_ratio += min_ratio;
  			bdi->min_ratio += min_ratio;
  		} else {
  			ret = -EINVAL;
  		}
  	}
cfc4ba536   Jens Axboe   writeback: use RC...
469
  	spin_unlock_bh(&bdi_lock);
a42dde041   Peter Zijlstra   mm: bdi: allow se...
470
471
472
473
474
475
  
  	return ret;
  }
  
  int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned max_ratio)
  {
a42dde041   Peter Zijlstra   mm: bdi: allow se...
476
477
478
479
  	int ret = 0;
  
  	if (max_ratio > 100)
  		return -EINVAL;
cfc4ba536   Jens Axboe   writeback: use RC...
480
  	spin_lock_bh(&bdi_lock);
a42dde041   Peter Zijlstra   mm: bdi: allow se...
481
482
483
484
  	if (bdi->min_ratio > max_ratio) {
  		ret = -EINVAL;
  	} else {
  		bdi->max_ratio = max_ratio;
eb608e3a3   Jan Kara   block: Convert BD...
485
  		bdi->max_prop_frac = (FPROP_FRAC_BASE * max_ratio) / 100;
a42dde041   Peter Zijlstra   mm: bdi: allow se...
486
  	}
cfc4ba536   Jens Axboe   writeback: use RC...
487
  	spin_unlock_bh(&bdi_lock);
189d3c4a9   Peter Zijlstra   mm: bdi: allow se...
488
489
490
  
  	return ret;
  }
a42dde041   Peter Zijlstra   mm: bdi: allow se...
491
  EXPORT_SYMBOL(bdi_set_max_ratio);
189d3c4a9   Peter Zijlstra   mm: bdi: allow se...
492

6c14ae1e9   Wu Fengguang   writeback: dirty ...
493
494
495
496
497
  static unsigned long dirty_freerun_ceiling(unsigned long thresh,
  					   unsigned long bg_thresh)
  {
  	return (thresh + bg_thresh) / 2;
  }
ffd1f609a   Wu Fengguang   writeback: introd...
498
499
500
501
  static unsigned long hard_dirty_limit(unsigned long thresh)
  {
  	return max(thresh, global_dirty_limit);
  }
6f7186562   Wu Fengguang   writeback: add bd...
502
  /**
1babe1838   Wu Fengguang   writeback: add co...
503
   * bdi_dirty_limit - @bdi's share of dirty throttling threshold
6f7186562   Wu Fengguang   writeback: add bd...
504
505
   * @bdi: the backing_dev_info to query
   * @dirty: global dirty limit in pages
1babe1838   Wu Fengguang   writeback: add co...
506
   *
6f7186562   Wu Fengguang   writeback: add bd...
507
508
   * Returns @bdi's dirty limit in pages. The term "dirty" in the context of
   * dirty balancing includes all PG_dirty, PG_writeback and NFS unstable pages.
aed21ad28   Wu Fengguang   writeback: commen...
509
510
511
512
513
514
515
   *
   * Note that balance_dirty_pages() will only seriously take it as a hard limit
   * when sleeping max_pause per page is not enough to keep the dirty pages under
   * control. For example, when the device is completely stalled due to some error
   * conditions, or when there are 1000 dd tasks writing to a slow 10MB/s USB key.
   * In the other normal situations, it acts more gently by throttling the tasks
   * more (rather than completely block them) when the bdi dirty pages go high.
1babe1838   Wu Fengguang   writeback: add co...
516
   *
6f7186562   Wu Fengguang   writeback: add bd...
517
   * It allocates high/low dirty limits to fast/slow devices, in order to prevent
1babe1838   Wu Fengguang   writeback: add co...
518
519
520
521
522
523
524
   * - starving fast devices
   * - piling up dirty pages (that will take long time to sync) on slow devices
   *
   * The bdi's share of dirty limit will be adapting to its throughput and
   * bounded by the bdi->min_ratio and/or bdi->max_ratio parameters, if set.
   */
  unsigned long bdi_dirty_limit(struct backing_dev_info *bdi, unsigned long dirty)
16c4042f0   Wu Fengguang   writeback: avoid ...
525
526
527
  {
  	u64 bdi_dirty;
  	long numerator, denominator;
04fbfdc14   Peter Zijlstra   mm: per device di...
528

16c4042f0   Wu Fengguang   writeback: avoid ...
529
530
531
532
  	/*
  	 * Calculate this BDI's share of the dirty ratio.
  	 */
  	bdi_writeout_fraction(bdi, &numerator, &denominator);
04fbfdc14   Peter Zijlstra   mm: per device di...
533

16c4042f0   Wu Fengguang   writeback: avoid ...
534
535
536
  	bdi_dirty = (dirty * (100 - bdi_min_ratio)) / 100;
  	bdi_dirty *= numerator;
  	do_div(bdi_dirty, denominator);
04fbfdc14   Peter Zijlstra   mm: per device di...
537

16c4042f0   Wu Fengguang   writeback: avoid ...
538
539
540
541
542
  	bdi_dirty += (dirty * bdi->min_ratio) / 100;
  	if (bdi_dirty > (dirty * bdi->max_ratio) / 100)
  		bdi_dirty = dirty * bdi->max_ratio / 100;
  
  	return bdi_dirty;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
543
  }
6c14ae1e9   Wu Fengguang   writeback: dirty ...
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
  /*
   * Dirty position control.
   *
   * (o) global/bdi setpoints
   *
   * We want the dirty pages be balanced around the global/bdi setpoints.
   * When the number of dirty pages is higher/lower than the setpoint, the
   * dirty position control ratio (and hence task dirty ratelimit) will be
   * decreased/increased to bring the dirty pages back to the setpoint.
   *
   *     pos_ratio = 1 << RATELIMIT_CALC_SHIFT
   *
   *     if (dirty < setpoint) scale up   pos_ratio
   *     if (dirty > setpoint) scale down pos_ratio
   *
   *     if (bdi_dirty < bdi_setpoint) scale up   pos_ratio
   *     if (bdi_dirty > bdi_setpoint) scale down pos_ratio
   *
   *     task_ratelimit = dirty_ratelimit * pos_ratio >> RATELIMIT_CALC_SHIFT
   *
   * (o) global control line
   *
   *     ^ pos_ratio
   *     |
   *     |            |<===== global dirty control scope ======>|
   * 2.0 .............*
   *     |            .*
   *     |            . *
   *     |            .   *
   *     |            .     *
   *     |            .        *
   *     |            .            *
   * 1.0 ................................*
   *     |            .                  .     *
   *     |            .                  .          *
   *     |            .                  .              *
   *     |            .                  .                 *
   *     |            .                  .                    *
   *   0 +------------.------------------.----------------------*------------->
   *           freerun^          setpoint^                 limit^   dirty pages
   *
   * (o) bdi control line
   *
   *     ^ pos_ratio
   *     |
   *     |            *
   *     |              *
   *     |                *
   *     |                  *
   *     |                    * |<=========== span ============>|
   * 1.0 .......................*
   *     |                      . *
   *     |                      .   *
   *     |                      .     *
   *     |                      .       *
   *     |                      .         *
   *     |                      .           *
   *     |                      .             *
   *     |                      .               *
   *     |                      .                 *
   *     |                      .                   *
   *     |                      .                     *
   * 1/4 ...............................................* * * * * * * * * * * *
   *     |                      .                         .
   *     |                      .                           .
   *     |                      .                             .
   *   0 +----------------------.-------------------------------.------------->
   *                bdi_setpoint^                    x_intercept^
   *
   * The bdi control line won't drop below pos_ratio=1/4, so that bdi_dirty can
   * be smoothly throttled down to normal if it starts high in situations like
   * - start writing to a slow SD card and a fast disk at the same time. The SD
   *   card's bdi_dirty may rush to many times higher than bdi_setpoint.
   * - the bdi dirty thresh drops quickly due to change of JBOD workload
   */
  static unsigned long bdi_position_ratio(struct backing_dev_info *bdi,
  					unsigned long thresh,
  					unsigned long bg_thresh,
  					unsigned long dirty,
  					unsigned long bdi_thresh,
  					unsigned long bdi_dirty)
  {
  	unsigned long write_bw = bdi->avg_write_bandwidth;
  	unsigned long freerun = dirty_freerun_ceiling(thresh, bg_thresh);
  	unsigned long limit = hard_dirty_limit(thresh);
  	unsigned long x_intercept;
  	unsigned long setpoint;		/* dirty pages' target balance point */
  	unsigned long bdi_setpoint;
  	unsigned long span;
  	long long pos_ratio;		/* for scaling up/down the rate limit */
  	long x;
  
  	if (unlikely(dirty >= limit))
  		return 0;
  
  	/*
  	 * global setpoint
  	 *
  	 *                           setpoint - dirty 3
  	 *        f(dirty) := 1.0 + (----------------)
  	 *                           limit - setpoint
  	 *
  	 * it's a 3rd order polynomial that subjects to
  	 *
  	 * (1) f(freerun)  = 2.0 => rampup dirty_ratelimit reasonably fast
  	 * (2) f(setpoint) = 1.0 => the balance point
  	 * (3) f(limit)    = 0   => the hard limit
  	 * (4) df/dx      <= 0	 => negative feedback control
  	 * (5) the closer to setpoint, the smaller |df/dx| (and the reverse)
  	 *     => fast response on large errors; small oscillation near setpoint
  	 */
  	setpoint = (freerun + limit) / 2;
  	x = div_s64((setpoint - dirty) << RATELIMIT_CALC_SHIFT,
  		    limit - setpoint + 1);
  	pos_ratio = x;
  	pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT;
  	pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT;
  	pos_ratio += 1 << RATELIMIT_CALC_SHIFT;
  
  	/*
  	 * We have computed basic pos_ratio above based on global situation. If
  	 * the bdi is over/under its share of dirty pages, we want to scale
  	 * pos_ratio further down/up. That is done by the following mechanism.
  	 */
  
  	/*
  	 * bdi setpoint
  	 *
  	 *        f(bdi_dirty) := 1.0 + k * (bdi_dirty - bdi_setpoint)
  	 *
  	 *                        x_intercept - bdi_dirty
  	 *                     := --------------------------
  	 *                        x_intercept - bdi_setpoint
  	 *
  	 * The main bdi control line is a linear function that subjects to
  	 *
  	 * (1) f(bdi_setpoint) = 1.0
  	 * (2) k = - 1 / (8 * write_bw)  (in single bdi case)
  	 *     or equally: x_intercept = bdi_setpoint + 8 * write_bw
  	 *
  	 * For single bdi case, the dirty pages are observed to fluctuate
  	 * regularly within range
  	 *        [bdi_setpoint - write_bw/2, bdi_setpoint + write_bw/2]
  	 * for various filesystems, where (2) can yield in a reasonable 12.5%
  	 * fluctuation range for pos_ratio.
  	 *
  	 * For JBOD case, bdi_thresh (not bdi_dirty!) could fluctuate up to its
  	 * own size, so move the slope over accordingly and choose a slope that
  	 * yields 100% pos_ratio fluctuation on suddenly doubled bdi_thresh.
  	 */
  	if (unlikely(bdi_thresh > thresh))
  		bdi_thresh = thresh;
aed21ad28   Wu Fengguang   writeback: commen...
696
697
698
699
700
701
702
  	/*
  	 * It's very possible that bdi_thresh is close to 0 not because the
  	 * device is slow, but that it has remained inactive for long time.
  	 * Honour such devices a reasonable good (hopefully IO efficient)
  	 * threshold, so that the occasional writes won't be blocked and active
  	 * writes can rampup the threshold quickly.
  	 */
8927f66c4   Wu Fengguang   writeback: dirty ...
703
  	bdi_thresh = max(bdi_thresh, (limit - dirty) / 8);
6c14ae1e9   Wu Fengguang   writeback: dirty ...
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
  	/*
  	 * scale global setpoint to bdi's:
  	 *	bdi_setpoint = setpoint * bdi_thresh / thresh
  	 */
  	x = div_u64((u64)bdi_thresh << 16, thresh + 1);
  	bdi_setpoint = setpoint * (u64)x >> 16;
  	/*
  	 * Use span=(8*write_bw) in single bdi case as indicated by
  	 * (thresh - bdi_thresh ~= 0) and transit to bdi_thresh in JBOD case.
  	 *
  	 *        bdi_thresh                    thresh - bdi_thresh
  	 * span = ---------- * (8 * write_bw) + ------------------- * bdi_thresh
  	 *          thresh                            thresh
  	 */
  	span = (thresh - bdi_thresh + 8 * write_bw) * (u64)x >> 16;
  	x_intercept = bdi_setpoint + span;
  
  	if (bdi_dirty < x_intercept - span / 4) {
50657fc4d   Wu Fengguang   writeback: fix pp...
722
723
  		pos_ratio = div_u64(pos_ratio * (x_intercept - bdi_dirty),
  				    x_intercept - bdi_setpoint + 1);
6c14ae1e9   Wu Fengguang   writeback: dirty ...
724
725
  	} else
  		pos_ratio /= 4;
8927f66c4   Wu Fengguang   writeback: dirty ...
726
727
728
729
730
731
732
  	/*
  	 * bdi reserve area, safeguard against dirty pool underrun and disk idle
  	 * It may push the desired control point of global dirty pages higher
  	 * than setpoint.
  	 */
  	x_intercept = bdi_thresh / 2;
  	if (bdi_dirty < x_intercept) {
50657fc4d   Wu Fengguang   writeback: fix pp...
733
734
735
  		if (bdi_dirty > x_intercept / 8)
  			pos_ratio = div_u64(pos_ratio * x_intercept, bdi_dirty);
  		else
8927f66c4   Wu Fengguang   writeback: dirty ...
736
737
  			pos_ratio *= 8;
  	}
6c14ae1e9   Wu Fengguang   writeback: dirty ...
738
739
  	return pos_ratio;
  }
e98be2d59   Wu Fengguang   writeback: bdi wr...
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
  static void bdi_update_write_bandwidth(struct backing_dev_info *bdi,
  				       unsigned long elapsed,
  				       unsigned long written)
  {
  	const unsigned long period = roundup_pow_of_two(3 * HZ);
  	unsigned long avg = bdi->avg_write_bandwidth;
  	unsigned long old = bdi->write_bandwidth;
  	u64 bw;
  
  	/*
  	 * bw = written * HZ / elapsed
  	 *
  	 *                   bw * elapsed + write_bandwidth * (period - elapsed)
  	 * write_bandwidth = ---------------------------------------------------
  	 *                                          period
  	 */
  	bw = written - bdi->written_stamp;
  	bw *= HZ;
  	if (unlikely(elapsed > period)) {
  		do_div(bw, elapsed);
  		avg = bw;
  		goto out;
  	}
  	bw += (u64)bdi->write_bandwidth * (period - elapsed);
  	bw >>= ilog2(period);
  
  	/*
  	 * one more level of smoothing, for filtering out sudden spikes
  	 */
  	if (avg > old && old >= (unsigned long)bw)
  		avg -= (avg - old) >> 3;
  
  	if (avg < old && old <= (unsigned long)bw)
  		avg += (old - avg) >> 3;
  
  out:
  	bdi->write_bandwidth = bw;
  	bdi->avg_write_bandwidth = avg;
  }
c42843f2f   Wu Fengguang   writeback: introd...
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
  /*
   * The global dirtyable memory and dirty threshold could be suddenly knocked
   * down by a large amount (eg. on the startup of KVM in a swapless system).
   * This may throw the system into deep dirty exceeded state and throttle
   * heavy/light dirtiers alike. To retain good responsiveness, maintain
   * global_dirty_limit for tracking slowly down to the knocked down dirty
   * threshold.
   */
  static void update_dirty_limit(unsigned long thresh, unsigned long dirty)
  {
  	unsigned long limit = global_dirty_limit;
  
  	/*
  	 * Follow up in one step.
  	 */
  	if (limit < thresh) {
  		limit = thresh;
  		goto update;
  	}
  
  	/*
  	 * Follow down slowly. Use the higher one as the target, because thresh
  	 * may drop below dirty. This is exactly the reason to introduce
  	 * global_dirty_limit which is guaranteed to lie above the dirty pages.
  	 */
  	thresh = max(thresh, dirty);
  	if (limit > thresh) {
  		limit -= (limit - thresh) >> 5;
  		goto update;
  	}
  	return;
  update:
  	global_dirty_limit = limit;
  }
  
  static void global_update_bandwidth(unsigned long thresh,
  				    unsigned long dirty,
  				    unsigned long now)
  {
  	static DEFINE_SPINLOCK(dirty_lock);
  	static unsigned long update_time;
  
  	/*
  	 * check locklessly first to optimize away locking for the most time
  	 */
  	if (time_before(now, update_time + BANDWIDTH_INTERVAL))
  		return;
  
  	spin_lock(&dirty_lock);
  	if (time_after_eq(now, update_time + BANDWIDTH_INTERVAL)) {
  		update_dirty_limit(thresh, dirty);
  		update_time = now;
  	}
  	spin_unlock(&dirty_lock);
  }
be3ffa276   Wu Fengguang   writeback: dirty ...
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
  /*
   * Maintain bdi->dirty_ratelimit, the base dirty throttle rate.
   *
   * Normal bdi tasks will be curbed at or below it in long term.
   * Obviously it should be around (write_bw / N) when there are N dd tasks.
   */
  static void bdi_update_dirty_ratelimit(struct backing_dev_info *bdi,
  				       unsigned long thresh,
  				       unsigned long bg_thresh,
  				       unsigned long dirty,
  				       unsigned long bdi_thresh,
  				       unsigned long bdi_dirty,
  				       unsigned long dirtied,
  				       unsigned long elapsed)
  {
7381131cb   Wu Fengguang   writeback: stabil...
849
850
851
  	unsigned long freerun = dirty_freerun_ceiling(thresh, bg_thresh);
  	unsigned long limit = hard_dirty_limit(thresh);
  	unsigned long setpoint = (freerun + limit) / 2;
be3ffa276   Wu Fengguang   writeback: dirty ...
852
853
854
855
856
857
  	unsigned long write_bw = bdi->avg_write_bandwidth;
  	unsigned long dirty_ratelimit = bdi->dirty_ratelimit;
  	unsigned long dirty_rate;
  	unsigned long task_ratelimit;
  	unsigned long balanced_dirty_ratelimit;
  	unsigned long pos_ratio;
7381131cb   Wu Fengguang   writeback: stabil...
858
859
  	unsigned long step;
  	unsigned long x;
be3ffa276   Wu Fengguang   writeback: dirty ...
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
  
  	/*
  	 * The dirty rate will match the writeout rate in long term, except
  	 * when dirty pages are truncated by userspace or re-dirtied by FS.
  	 */
  	dirty_rate = (dirtied - bdi->dirtied_stamp) * HZ / elapsed;
  
  	pos_ratio = bdi_position_ratio(bdi, thresh, bg_thresh, dirty,
  				       bdi_thresh, bdi_dirty);
  	/*
  	 * task_ratelimit reflects each dd's dirty rate for the past 200ms.
  	 */
  	task_ratelimit = (u64)dirty_ratelimit *
  					pos_ratio >> RATELIMIT_CALC_SHIFT;
  	task_ratelimit++; /* it helps rampup dirty_ratelimit from tiny values */
  
  	/*
  	 * A linear estimation of the "balanced" throttle rate. The theory is,
  	 * if there are N dd tasks, each throttled at task_ratelimit, the bdi's
  	 * dirty_rate will be measured to be (N * task_ratelimit). So the below
  	 * formula will yield the balanced rate limit (write_bw / N).
  	 *
  	 * Note that the expanded form is not a pure rate feedback:
  	 *	rate_(i+1) = rate_(i) * (write_bw / dirty_rate)		     (1)
  	 * but also takes pos_ratio into account:
  	 *	rate_(i+1) = rate_(i) * (write_bw / dirty_rate) * pos_ratio  (2)
  	 *
  	 * (1) is not realistic because pos_ratio also takes part in balancing
  	 * the dirty rate.  Consider the state
  	 *	pos_ratio = 0.5						     (3)
  	 *	rate = 2 * (write_bw / N)				     (4)
  	 * If (1) is used, it will stuck in that state! Because each dd will
  	 * be throttled at
  	 *	task_ratelimit = pos_ratio * rate = (write_bw / N)	     (5)
  	 * yielding
  	 *	dirty_rate = N * task_ratelimit = write_bw		     (6)
  	 * put (6) into (1) we get
  	 *	rate_(i+1) = rate_(i)					     (7)
  	 *
  	 * So we end up using (2) to always keep
  	 *	rate_(i+1) ~= (write_bw / N)				     (8)
  	 * regardless of the value of pos_ratio. As long as (8) is satisfied,
  	 * pos_ratio is able to drive itself to 1.0, which is not only where
  	 * the dirty count meet the setpoint, but also where the slope of
  	 * pos_ratio is most flat and hence task_ratelimit is least fluctuated.
  	 */
  	balanced_dirty_ratelimit = div_u64((u64)task_ratelimit * write_bw,
  					   dirty_rate | 1);
bdaac4902   Wu Fengguang   writeback: balanc...
908
909
910
911
912
  	/*
  	 * balanced_dirty_ratelimit ~= (write_bw / N) <= write_bw
  	 */
  	if (unlikely(balanced_dirty_ratelimit > write_bw))
  		balanced_dirty_ratelimit = write_bw;
be3ffa276   Wu Fengguang   writeback: dirty ...
913

7381131cb   Wu Fengguang   writeback: stabil...
914
915
916
917
918
919
  	/*
  	 * We could safely do this and return immediately:
  	 *
  	 *	bdi->dirty_ratelimit = balanced_dirty_ratelimit;
  	 *
  	 * However to get a more stable dirty_ratelimit, the below elaborated
331cbdeed   Wanpeng Li   writeback: Fix so...
920
  	 * code makes use of task_ratelimit to filter out singular points and
7381131cb   Wu Fengguang   writeback: stabil...
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
  	 * limit the step size.
  	 *
  	 * The below code essentially only uses the relative value of
  	 *
  	 *	task_ratelimit - dirty_ratelimit
  	 *	= (pos_ratio - 1) * dirty_ratelimit
  	 *
  	 * which reflects the direction and size of dirty position error.
  	 */
  
  	/*
  	 * dirty_ratelimit will follow balanced_dirty_ratelimit iff
  	 * task_ratelimit is on the same side of dirty_ratelimit, too.
  	 * For example, when
  	 * - dirty_ratelimit > balanced_dirty_ratelimit
  	 * - dirty_ratelimit > task_ratelimit (dirty pages are above setpoint)
  	 * lowering dirty_ratelimit will help meet both the position and rate
  	 * control targets. Otherwise, don't update dirty_ratelimit if it will
  	 * only help meet the rate target. After all, what the users ultimately
  	 * feel and care are stable dirty rate and small position error.
  	 *
  	 * |task_ratelimit - dirty_ratelimit| is used to limit the step size
331cbdeed   Wanpeng Li   writeback: Fix so...
943
  	 * and filter out the singular points of balanced_dirty_ratelimit. Which
7381131cb   Wu Fengguang   writeback: stabil...
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
  	 * keeps jumping around randomly and can even leap far away at times
  	 * due to the small 200ms estimation period of dirty_rate (we want to
  	 * keep that period small to reduce time lags).
  	 */
  	step = 0;
  	if (dirty < setpoint) {
  		x = min(bdi->balanced_dirty_ratelimit,
  			 min(balanced_dirty_ratelimit, task_ratelimit));
  		if (dirty_ratelimit < x)
  			step = x - dirty_ratelimit;
  	} else {
  		x = max(bdi->balanced_dirty_ratelimit,
  			 max(balanced_dirty_ratelimit, task_ratelimit));
  		if (dirty_ratelimit > x)
  			step = dirty_ratelimit - x;
  	}
  
  	/*
  	 * Don't pursue 100% rate matching. It's impossible since the balanced
  	 * rate itself is constantly fluctuating. So decrease the track speed
  	 * when it gets close to the target. Helps eliminate pointless tremors.
  	 */
  	step >>= dirty_ratelimit / (2 * step + 1);
  	/*
  	 * Limit the tracking speed to avoid overshooting.
  	 */
  	step = (step + 7) / 8;
  
  	if (dirty_ratelimit < balanced_dirty_ratelimit)
  		dirty_ratelimit += step;
  	else
  		dirty_ratelimit -= step;
  
  	bdi->dirty_ratelimit = max(dirty_ratelimit, 1UL);
  	bdi->balanced_dirty_ratelimit = balanced_dirty_ratelimit;
b48c104d2   Wu Fengguang   writeback: trace ...
979
980
  
  	trace_bdi_dirty_ratelimit(bdi, dirty_rate, task_ratelimit);
be3ffa276   Wu Fengguang   writeback: dirty ...
981
  }
e98be2d59   Wu Fengguang   writeback: bdi wr...
982
  void __bdi_update_bandwidth(struct backing_dev_info *bdi,
c42843f2f   Wu Fengguang   writeback: introd...
983
  			    unsigned long thresh,
af6a31138   Wu Fengguang   writeback: add bg...
984
  			    unsigned long bg_thresh,
c42843f2f   Wu Fengguang   writeback: introd...
985
986
987
  			    unsigned long dirty,
  			    unsigned long bdi_thresh,
  			    unsigned long bdi_dirty,
e98be2d59   Wu Fengguang   writeback: bdi wr...
988
989
990
991
  			    unsigned long start_time)
  {
  	unsigned long now = jiffies;
  	unsigned long elapsed = now - bdi->bw_time_stamp;
be3ffa276   Wu Fengguang   writeback: dirty ...
992
  	unsigned long dirtied;
e98be2d59   Wu Fengguang   writeback: bdi wr...
993
994
995
996
997
998
999
  	unsigned long written;
  
  	/*
  	 * rate-limit, only update once every 200ms.
  	 */
  	if (elapsed < BANDWIDTH_INTERVAL)
  		return;
be3ffa276   Wu Fengguang   writeback: dirty ...
1000
  	dirtied = percpu_counter_read(&bdi->bdi_stat[BDI_DIRTIED]);
e98be2d59   Wu Fengguang   writeback: bdi wr...
1001
1002
1003
1004
1005
1006
1007
1008
  	written = percpu_counter_read(&bdi->bdi_stat[BDI_WRITTEN]);
  
  	/*
  	 * Skip quiet periods when disk bandwidth is under-utilized.
  	 * (at least 1s idle time between two flusher runs)
  	 */
  	if (elapsed > HZ && time_before(bdi->bw_time_stamp, start_time))
  		goto snapshot;
be3ffa276   Wu Fengguang   writeback: dirty ...
1009
  	if (thresh) {
c42843f2f   Wu Fengguang   writeback: introd...
1010
  		global_update_bandwidth(thresh, dirty, now);
be3ffa276   Wu Fengguang   writeback: dirty ...
1011
1012
1013
1014
  		bdi_update_dirty_ratelimit(bdi, thresh, bg_thresh, dirty,
  					   bdi_thresh, bdi_dirty,
  					   dirtied, elapsed);
  	}
e98be2d59   Wu Fengguang   writeback: bdi wr...
1015
1016
1017
  	bdi_update_write_bandwidth(bdi, elapsed, written);
  
  snapshot:
be3ffa276   Wu Fengguang   writeback: dirty ...
1018
  	bdi->dirtied_stamp = dirtied;
e98be2d59   Wu Fengguang   writeback: bdi wr...
1019
1020
1021
1022
1023
  	bdi->written_stamp = written;
  	bdi->bw_time_stamp = now;
  }
  
  static void bdi_update_bandwidth(struct backing_dev_info *bdi,
c42843f2f   Wu Fengguang   writeback: introd...
1024
  				 unsigned long thresh,
af6a31138   Wu Fengguang   writeback: add bg...
1025
  				 unsigned long bg_thresh,
c42843f2f   Wu Fengguang   writeback: introd...
1026
1027
1028
  				 unsigned long dirty,
  				 unsigned long bdi_thresh,
  				 unsigned long bdi_dirty,
e98be2d59   Wu Fengguang   writeback: bdi wr...
1029
1030
1031
1032
1033
  				 unsigned long start_time)
  {
  	if (time_is_after_eq_jiffies(bdi->bw_time_stamp + BANDWIDTH_INTERVAL))
  		return;
  	spin_lock(&bdi->wb.list_lock);
af6a31138   Wu Fengguang   writeback: add bg...
1034
1035
  	__bdi_update_bandwidth(bdi, thresh, bg_thresh, dirty,
  			       bdi_thresh, bdi_dirty, start_time);
e98be2d59   Wu Fengguang   writeback: bdi wr...
1036
1037
  	spin_unlock(&bdi->wb.list_lock);
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1038
  /*
9d823e8f6   Wu Fengguang   writeback: per ta...
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
   * After a task dirtied this many pages, balance_dirty_pages_ratelimited_nr()
   * will look to see if it needs to start dirty throttling.
   *
   * If dirty_poll_interval is too low, big NUMA machines will call the expensive
   * global_page_state() too often. So scale it near-sqrt to the safety margin
   * (the number of pages we may dirty without exceeding the dirty limits).
   */
  static unsigned long dirty_poll_interval(unsigned long dirty,
  					 unsigned long thresh)
  {
  	if (thresh > dirty)
  		return 1UL << (ilog2(thresh - dirty) >> 1);
  
  	return 1;
  }
7ccb9ad53   Wu Fengguang   writeback: max, m...
1054
1055
  static long bdi_max_pause(struct backing_dev_info *bdi,
  			  unsigned long bdi_dirty)
c8462cc9d   Wu Fengguang   writeback: limit ...
1056
  {
7ccb9ad53   Wu Fengguang   writeback: max, m...
1057
1058
  	long bw = bdi->avg_write_bandwidth;
  	long t;
c8462cc9d   Wu Fengguang   writeback: limit ...
1059

7ccb9ad53   Wu Fengguang   writeback: max, m...
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
  	/*
  	 * Limit pause time for small memory systems. If sleeping for too long
  	 * time, a small pool of dirty/writeback pages may go empty and disk go
  	 * idle.
  	 *
  	 * 8 serves as the safety ratio.
  	 */
  	t = bdi_dirty / (1 + bw / roundup_pow_of_two(1 + HZ / 8));
  	t++;
  
  	return min_t(long, t, MAX_PAUSE);
  }
  
  static long bdi_min_pause(struct backing_dev_info *bdi,
  			  long max_pause,
  			  unsigned long task_ratelimit,
  			  unsigned long dirty_ratelimit,
  			  int *nr_dirtied_pause)
c8462cc9d   Wu Fengguang   writeback: limit ...
1078
  {
7ccb9ad53   Wu Fengguang   writeback: max, m...
1079
1080
1081
1082
1083
  	long hi = ilog2(bdi->avg_write_bandwidth);
  	long lo = ilog2(bdi->dirty_ratelimit);
  	long t;		/* target pause */
  	long pause;	/* estimated next pause */
  	int pages;	/* target nr_dirtied_pause */
c8462cc9d   Wu Fengguang   writeback: limit ...
1084

7ccb9ad53   Wu Fengguang   writeback: max, m...
1085
1086
  	/* target for 10ms pause on 1-dd case */
  	t = max(1, HZ / 100);
c8462cc9d   Wu Fengguang   writeback: limit ...
1087
1088
1089
1090
1091
  
  	/*
  	 * Scale up pause time for concurrent dirtiers in order to reduce CPU
  	 * overheads.
  	 *
7ccb9ad53   Wu Fengguang   writeback: max, m...
1092
  	 * (N * 10ms) on 2^N concurrent tasks.
c8462cc9d   Wu Fengguang   writeback: limit ...
1093
1094
  	 */
  	if (hi > lo)
7ccb9ad53   Wu Fengguang   writeback: max, m...
1095
  		t += (hi - lo) * (10 * HZ) / 1024;
c8462cc9d   Wu Fengguang   writeback: limit ...
1096
1097
  
  	/*
7ccb9ad53   Wu Fengguang   writeback: max, m...
1098
1099
1100
1101
1102
1103
1104
1105
  	 * This is a bit convoluted. We try to base the next nr_dirtied_pause
  	 * on the much more stable dirty_ratelimit. However the next pause time
  	 * will be computed based on task_ratelimit and the two rate limits may
  	 * depart considerably at some time. Especially if task_ratelimit goes
  	 * below dirty_ratelimit/2 and the target pause is max_pause, the next
  	 * pause time will be max_pause*2 _trimmed down_ to max_pause.  As a
  	 * result task_ratelimit won't be executed faithfully, which could
  	 * eventually bring down dirty_ratelimit.
c8462cc9d   Wu Fengguang   writeback: limit ...
1106
  	 *
7ccb9ad53   Wu Fengguang   writeback: max, m...
1107
1108
1109
1110
1111
1112
1113
  	 * We apply two rules to fix it up:
  	 * 1) try to estimate the next pause time and if necessary, use a lower
  	 *    nr_dirtied_pause so as not to exceed max_pause. When this happens,
  	 *    nr_dirtied_pause will be "dancing" with task_ratelimit.
  	 * 2) limit the target pause time to max_pause/2, so that the normal
  	 *    small fluctuations of task_ratelimit won't trigger rule (1) and
  	 *    nr_dirtied_pause will remain as stable as dirty_ratelimit.
c8462cc9d   Wu Fengguang   writeback: limit ...
1114
  	 */
7ccb9ad53   Wu Fengguang   writeback: max, m...
1115
1116
  	t = min(t, 1 + max_pause / 2);
  	pages = dirty_ratelimit * t / roundup_pow_of_two(HZ);
c8462cc9d   Wu Fengguang   writeback: limit ...
1117
1118
  
  	/*
5b9b35743   Wu Fengguang   writeback: avoid ...
1119
1120
1121
1122
1123
1124
  	 * Tiny nr_dirtied_pause is found to hurt I/O performance in the test
  	 * case fio-mmap-randwrite-64k, which does 16*{sync read, async write}.
  	 * When the 16 consecutive reads are often interrupted by some dirty
  	 * throttling pause during the async writes, cfq will go into idles
  	 * (deadline is fine). So push nr_dirtied_pause as high as possible
  	 * until reaches DIRTY_POLL_THRESH=32 pages.
c8462cc9d   Wu Fengguang   writeback: limit ...
1125
  	 */
5b9b35743   Wu Fengguang   writeback: avoid ...
1126
1127
1128
1129
1130
1131
1132
1133
  	if (pages < DIRTY_POLL_THRESH) {
  		t = max_pause;
  		pages = dirty_ratelimit * t / roundup_pow_of_two(HZ);
  		if (pages > DIRTY_POLL_THRESH) {
  			pages = DIRTY_POLL_THRESH;
  			t = HZ * DIRTY_POLL_THRESH / dirty_ratelimit;
  		}
  	}
7ccb9ad53   Wu Fengguang   writeback: max, m...
1134
1135
1136
1137
1138
  	pause = HZ * pages / (task_ratelimit + 1);
  	if (pause > max_pause) {
  		t = max_pause;
  		pages = task_ratelimit * t / roundup_pow_of_two(HZ);
  	}
c8462cc9d   Wu Fengguang   writeback: limit ...
1139

7ccb9ad53   Wu Fengguang   writeback: max, m...
1140
  	*nr_dirtied_pause = pages;
c8462cc9d   Wu Fengguang   writeback: limit ...
1141
  	/*
7ccb9ad53   Wu Fengguang   writeback: max, m...
1142
  	 * The minimal pause time will normally be half the target pause time.
c8462cc9d   Wu Fengguang   writeback: limit ...
1143
  	 */
5b9b35743   Wu Fengguang   writeback: avoid ...
1144
  	return pages >= DIRTY_POLL_THRESH ? 1 + t / 2 : t;
c8462cc9d   Wu Fengguang   writeback: limit ...
1145
  }
9d823e8f6   Wu Fengguang   writeback: per ta...
1146
  /*
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1147
1148
   * balance_dirty_pages() must be called by processes which are generating dirty
   * data.  It looks at the number of dirty pages in the machine and will force
143dfe861   Wu Fengguang   writeback: IO-les...
1149
   * the caller to wait once crossing the (background_thresh + dirty_thresh) / 2.
5b0830cb9   Jens Axboe   writeback: get ri...
1150
1151
   * If we're over `background_thresh' then the writeback threads are woken to
   * perform some writeout.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1152
   */
3a2e9a5a2   Wu Fengguang   writeback: balanc...
1153
  static void balance_dirty_pages(struct address_space *mapping,
143dfe861   Wu Fengguang   writeback: IO-les...
1154
  				unsigned long pages_dirtied)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1155
  {
143dfe861   Wu Fengguang   writeback: IO-les...
1156
1157
  	unsigned long nr_reclaimable;	/* = file_dirty + unstable_nfs */
  	unsigned long bdi_reclaimable;
7762741e3   Wu Fengguang   writeback: consol...
1158
1159
  	unsigned long nr_dirty;  /* = file_dirty + writeback + unstable_nfs */
  	unsigned long bdi_dirty;
6c14ae1e9   Wu Fengguang   writeback: dirty ...
1160
  	unsigned long freerun;
364aeb284   David Rientjes   mm: change dirty ...
1161
1162
1163
  	unsigned long background_thresh;
  	unsigned long dirty_thresh;
  	unsigned long bdi_thresh;
83712358b   Wu Fengguang   writeback: dirty ...
1164
  	long period;
7ccb9ad53   Wu Fengguang   writeback: max, m...
1165
1166
1167
1168
  	long pause;
  	long max_pause;
  	long min_pause;
  	int nr_dirtied_pause;
e50e37201   Wu Fengguang   writeback: balanc...
1169
  	bool dirty_exceeded = false;
143dfe861   Wu Fengguang   writeback: IO-les...
1170
  	unsigned long task_ratelimit;
7ccb9ad53   Wu Fengguang   writeback: max, m...
1171
  	unsigned long dirty_ratelimit;
143dfe861   Wu Fengguang   writeback: IO-les...
1172
  	unsigned long pos_ratio;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1173
  	struct backing_dev_info *bdi = mapping->backing_dev_info;
e98be2d59   Wu Fengguang   writeback: bdi wr...
1174
  	unsigned long start_time = jiffies;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1175
1176
  
  	for (;;) {
83712358b   Wu Fengguang   writeback: dirty ...
1177
  		unsigned long now = jiffies;
143dfe861   Wu Fengguang   writeback: IO-les...
1178
1179
1180
1181
1182
1183
  		/*
  		 * Unstable writes are a feature of certain networked
  		 * filesystems (i.e. NFS) in which data may have been
  		 * written to the server's write cache, but has not yet
  		 * been flushed to permanent storage.
  		 */
5fce25a9d   Peter Zijlstra   mm: speed up writ...
1184
1185
  		nr_reclaimable = global_page_state(NR_FILE_DIRTY) +
  					global_page_state(NR_UNSTABLE_NFS);
7762741e3   Wu Fengguang   writeback: consol...
1186
  		nr_dirty = nr_reclaimable + global_page_state(NR_WRITEBACK);
5fce25a9d   Peter Zijlstra   mm: speed up writ...
1187

16c4042f0   Wu Fengguang   writeback: avoid ...
1188
1189
1190
1191
1192
1193
1194
  		global_dirty_limits(&background_thresh, &dirty_thresh);
  
  		/*
  		 * Throttle it only when the background writeback cannot
  		 * catch-up. This avoids (excessively) small writeouts
  		 * when the bdi limits are ramping up.
  		 */
6c14ae1e9   Wu Fengguang   writeback: dirty ...
1195
1196
  		freerun = dirty_freerun_ceiling(dirty_thresh,
  						background_thresh);
83712358b   Wu Fengguang   writeback: dirty ...
1197
1198
1199
  		if (nr_dirty <= freerun) {
  			current->dirty_paused_when = now;
  			current->nr_dirtied = 0;
7ccb9ad53   Wu Fengguang   writeback: max, m...
1200
1201
  			current->nr_dirtied_pause =
  				dirty_poll_interval(nr_dirty, dirty_thresh);
16c4042f0   Wu Fengguang   writeback: avoid ...
1202
  			break;
83712358b   Wu Fengguang   writeback: dirty ...
1203
  		}
16c4042f0   Wu Fengguang   writeback: avoid ...
1204

143dfe861   Wu Fengguang   writeback: IO-les...
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
  		if (unlikely(!writeback_in_progress(bdi)))
  			bdi_start_background_writeback(bdi);
  
  		/*
  		 * bdi_thresh is not treated as some limiting factor as
  		 * dirty_thresh, due to reasons
  		 * - in JBOD setup, bdi_thresh can fluctuate a lot
  		 * - in a system with HDD and USB key, the USB key may somehow
  		 *   go into state (bdi_dirty >> bdi_thresh) either because
  		 *   bdi_dirty starts high, or because bdi_thresh drops low.
  		 *   In this case we don't want to hard throttle the USB key
  		 *   dirtiers for 100 seconds until bdi_dirty drops under
  		 *   bdi_thresh. Instead the auxiliary bdi control line in
  		 *   bdi_position_ratio() will let the dirtier task progress
  		 *   at some rate <= (write_bw / 2) for bringing down bdi_dirty.
  		 */
16c4042f0   Wu Fengguang   writeback: avoid ...
1221
  		bdi_thresh = bdi_dirty_limit(bdi, dirty_thresh);
16c4042f0   Wu Fengguang   writeback: avoid ...
1222

e50e37201   Wu Fengguang   writeback: balanc...
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
  		/*
  		 * In order to avoid the stacked BDI deadlock we need
  		 * to ensure we accurately count the 'dirty' pages when
  		 * the threshold is low.
  		 *
  		 * Otherwise it would be possible to get thresh+n pages
  		 * reported dirty, even though there are thresh-m pages
  		 * actually dirty; with m+n sitting in the percpu
  		 * deltas.
  		 */
143dfe861   Wu Fengguang   writeback: IO-les...
1233
1234
1235
  		if (bdi_thresh < 2 * bdi_stat_error(bdi)) {
  			bdi_reclaimable = bdi_stat_sum(bdi, BDI_RECLAIMABLE);
  			bdi_dirty = bdi_reclaimable +
7762741e3   Wu Fengguang   writeback: consol...
1236
  				    bdi_stat_sum(bdi, BDI_WRITEBACK);
e50e37201   Wu Fengguang   writeback: balanc...
1237
  		} else {
143dfe861   Wu Fengguang   writeback: IO-les...
1238
1239
  			bdi_reclaimable = bdi_stat(bdi, BDI_RECLAIMABLE);
  			bdi_dirty = bdi_reclaimable +
7762741e3   Wu Fengguang   writeback: consol...
1240
  				    bdi_stat(bdi, BDI_WRITEBACK);
e50e37201   Wu Fengguang   writeback: balanc...
1241
  		}
5fce25a9d   Peter Zijlstra   mm: speed up writ...
1242

827919405   Wu Fengguang   writeback: do str...
1243
  		dirty_exceeded = (bdi_dirty > bdi_thresh) &&
7762741e3   Wu Fengguang   writeback: consol...
1244
  				  (nr_dirty > dirty_thresh);
143dfe861   Wu Fengguang   writeback: IO-les...
1245
  		if (dirty_exceeded && !bdi->dirty_exceeded)
04fbfdc14   Peter Zijlstra   mm: per device di...
1246
  			bdi->dirty_exceeded = 1;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1247

af6a31138   Wu Fengguang   writeback: add bg...
1248
1249
1250
  		bdi_update_bandwidth(bdi, dirty_thresh, background_thresh,
  				     nr_dirty, bdi_thresh, bdi_dirty,
  				     start_time);
e98be2d59   Wu Fengguang   writeback: bdi wr...
1251

143dfe861   Wu Fengguang   writeback: IO-les...
1252
1253
1254
1255
  		dirty_ratelimit = bdi->dirty_ratelimit;
  		pos_ratio = bdi_position_ratio(bdi, dirty_thresh,
  					       background_thresh, nr_dirty,
  					       bdi_thresh, bdi_dirty);
3a73dbbc9   Wu Fengguang   writeback: fix un...
1256
1257
  		task_ratelimit = ((u64)dirty_ratelimit * pos_ratio) >>
  							RATELIMIT_CALC_SHIFT;
7ccb9ad53   Wu Fengguang   writeback: max, m...
1258
1259
1260
1261
  		max_pause = bdi_max_pause(bdi, bdi_dirty);
  		min_pause = bdi_min_pause(bdi, max_pause,
  					  task_ratelimit, dirty_ratelimit,
  					  &nr_dirtied_pause);
3a73dbbc9   Wu Fengguang   writeback: fix un...
1262
  		if (unlikely(task_ratelimit == 0)) {
83712358b   Wu Fengguang   writeback: dirty ...
1263
  			period = max_pause;
c8462cc9d   Wu Fengguang   writeback: limit ...
1264
  			pause = max_pause;
143dfe861   Wu Fengguang   writeback: IO-les...
1265
  			goto pause;
04fbfdc14   Peter Zijlstra   mm: per device di...
1266
  		}
83712358b   Wu Fengguang   writeback: dirty ...
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
  		period = HZ * pages_dirtied / task_ratelimit;
  		pause = period;
  		if (current->dirty_paused_when)
  			pause -= now - current->dirty_paused_when;
  		/*
  		 * For less than 1s think time (ext3/4 may block the dirtier
  		 * for up to 800ms from time to time on 1-HDD; so does xfs,
  		 * however at much less frequency), try to compensate it in
  		 * future periods by updating the virtual time; otherwise just
  		 * do a reset, as it may be a light dirtier.
  		 */
7ccb9ad53   Wu Fengguang   writeback: max, m...
1278
  		if (pause < min_pause) {
ece13ac31   Wu Fengguang   writeback: trace ...
1279
1280
1281
1282
1283
1284
1285
1286
1287
  			trace_balance_dirty_pages(bdi,
  						  dirty_thresh,
  						  background_thresh,
  						  nr_dirty,
  						  bdi_thresh,
  						  bdi_dirty,
  						  dirty_ratelimit,
  						  task_ratelimit,
  						  pages_dirtied,
83712358b   Wu Fengguang   writeback: dirty ...
1288
  						  period,
7ccb9ad53   Wu Fengguang   writeback: max, m...
1289
  						  min(pause, 0L),
ece13ac31   Wu Fengguang   writeback: trace ...
1290
  						  start_time);
83712358b   Wu Fengguang   writeback: dirty ...
1291
1292
1293
1294
1295
1296
  			if (pause < -HZ) {
  				current->dirty_paused_when = now;
  				current->nr_dirtied = 0;
  			} else if (period) {
  				current->dirty_paused_when += period;
  				current->nr_dirtied = 0;
7ccb9ad53   Wu Fengguang   writeback: max, m...
1297
1298
  			} else if (current->nr_dirtied_pause <= pages_dirtied)
  				current->nr_dirtied_pause += pages_dirtied;
57fc978cf   Wu Fengguang   writeback: contro...
1299
  			break;
04fbfdc14   Peter Zijlstra   mm: per device di...
1300
  		}
7ccb9ad53   Wu Fengguang   writeback: max, m...
1301
1302
1303
1304
1305
  		if (unlikely(pause > max_pause)) {
  			/* for occasional dropped task_ratelimit */
  			now += min(pause - max_pause, max_pause);
  			pause = max_pause;
  		}
143dfe861   Wu Fengguang   writeback: IO-les...
1306
1307
  
  pause:
ece13ac31   Wu Fengguang   writeback: trace ...
1308
1309
1310
1311
1312
1313
1314
1315
1316
  		trace_balance_dirty_pages(bdi,
  					  dirty_thresh,
  					  background_thresh,
  					  nr_dirty,
  					  bdi_thresh,
  					  bdi_dirty,
  					  dirty_ratelimit,
  					  task_ratelimit,
  					  pages_dirtied,
83712358b   Wu Fengguang   writeback: dirty ...
1317
  					  period,
ece13ac31   Wu Fengguang   writeback: trace ...
1318
1319
  					  pause,
  					  start_time);
499d05ecf   Jan Kara   mm: Make task in ...
1320
  		__set_current_state(TASK_KILLABLE);
d25105e89   Wu Fengguang   writeback: accoun...
1321
  		io_schedule_timeout(pause);
87c6a9b25   Jens Axboe   writeback: make b...
1322

83712358b   Wu Fengguang   writeback: dirty ...
1323
1324
  		current->dirty_paused_when = now + pause;
  		current->nr_dirtied = 0;
7ccb9ad53   Wu Fengguang   writeback: max, m...
1325
  		current->nr_dirtied_pause = nr_dirtied_pause;
83712358b   Wu Fengguang   writeback: dirty ...
1326

ffd1f609a   Wu Fengguang   writeback: introd...
1327
  		/*
1df647197   Wu Fengguang   writeback: hard t...
1328
1329
  		 * This is typically equal to (nr_dirty < dirty_thresh) and can
  		 * also keep "1000+ dd on a slow USB stick" under control.
ffd1f609a   Wu Fengguang   writeback: introd...
1330
  		 */
1df647197   Wu Fengguang   writeback: hard t...
1331
  		if (task_ratelimit)
ffd1f609a   Wu Fengguang   writeback: introd...
1332
  			break;
499d05ecf   Jan Kara   mm: Make task in ...
1333

c5c6343c4   Wu Fengguang   writeback: permit...
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
  		/*
  		 * In the case of an unresponding NFS server and the NFS dirty
  		 * pages exceeds dirty_thresh, give the other good bdi's a pipe
  		 * to go through, so that tasks on them still remain responsive.
  		 *
  		 * In theory 1 page is enough to keep the comsumer-producer
  		 * pipe going: the flusher cleans 1 page => the task dirties 1
  		 * more page. However bdi_dirty has accounting errors.  So use
  		 * the larger and more IO friendly bdi_stat_error.
  		 */
  		if (bdi_dirty <= bdi_stat_error(bdi))
  			break;
499d05ecf   Jan Kara   mm: Make task in ...
1346
1347
  		if (fatal_signal_pending(current))
  			break;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1348
  	}
143dfe861   Wu Fengguang   writeback: IO-les...
1349
  	if (!dirty_exceeded && bdi->dirty_exceeded)
04fbfdc14   Peter Zijlstra   mm: per device di...
1350
  		bdi->dirty_exceeded = 0;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1351
1352
  
  	if (writeback_in_progress(bdi))
5b0830cb9   Jens Axboe   writeback: get ri...
1353
  		return;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1354
1355
1356
1357
1358
1359
1360
1361
1362
  
  	/*
  	 * In laptop mode, we wait until hitting the higher threshold before
  	 * starting background writeout, and then write out all the way down
  	 * to the lower threshold.  So slow writers cause minimal disk activity.
  	 *
  	 * In normal mode, we start background writeout at the lower
  	 * background_thresh, to keep the amount of dirty memory low.
  	 */
143dfe861   Wu Fengguang   writeback: IO-les...
1363
1364
1365
1366
  	if (laptop_mode)
  		return;
  
  	if (nr_reclaimable > background_thresh)
c5444198c   Christoph Hellwig   writeback: simpli...
1367
  		bdi_start_background_writeback(bdi);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1368
  }
a200ee182   Peter Zijlstra   mm: set_page_dirt...
1369
  void set_page_dirty_balance(struct page *page, int page_mkwrite)
edc79b2a4   Peter Zijlstra   [PATCH] mm: balan...
1370
  {
a200ee182   Peter Zijlstra   mm: set_page_dirt...
1371
  	if (set_page_dirty(page) || page_mkwrite) {
edc79b2a4   Peter Zijlstra   [PATCH] mm: balan...
1372
1373
1374
1375
1376
1377
  		struct address_space *mapping = page_mapping(page);
  
  		if (mapping)
  			balance_dirty_pages_ratelimited(mapping);
  	}
  }
9d823e8f6   Wu Fengguang   writeback: per ta...
1378
  static DEFINE_PER_CPU(int, bdp_ratelimits);
245b2e70e   Tejun Heo   percpu: clean up ...
1379

54848d73f   Wu Fengguang   writeback: charge...
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
  /*
   * Normal tasks are throttled by
   *	loop {
   *		dirty tsk->nr_dirtied_pause pages;
   *		take a snap in balance_dirty_pages();
   *	}
   * However there is a worst case. If every task exit immediately when dirtied
   * (tsk->nr_dirtied_pause - 1) pages, balance_dirty_pages() will never be
   * called to throttle the page dirties. The solution is to save the not yet
   * throttled page dirties in dirty_throttle_leaks on task exit and charge them
   * randomly into the running tasks. This works well for the above worst case,
   * as the new task will pick up and accumulate the old task's leaked dirty
   * count and eventually get throttled.
   */
  DEFINE_PER_CPU(int, dirty_throttle_leaks) = 0;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1395
  /**
fa5a734e4   Andrew Morton   [PATCH] balance_d...
1396
   * balance_dirty_pages_ratelimited_nr - balance dirty memory state
67be2dd1b   Martin Waitz   [PATCH] DocBook: ...
1397
   * @mapping: address_space which was dirtied
a580290c3   Martin Waitz   Documentation: fi...
1398
   * @nr_pages_dirtied: number of pages which the caller has just dirtied
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
   *
   * Processes which are dirtying memory should call in here once for each page
   * which was newly dirtied.  The function will periodically check the system's
   * dirty state and will initiate writeback if needed.
   *
   * On really big machines, get_writeback_state is expensive, so try to avoid
   * calling it too often (ratelimiting).  But once we're over the dirty memory
   * limit we decrease the ratelimiting by a lot, to prevent individual processes
   * from overshooting the limit by (ratelimit_pages) each.
   */
fa5a734e4   Andrew Morton   [PATCH] balance_d...
1409
1410
  void balance_dirty_pages_ratelimited_nr(struct address_space *mapping,
  					unsigned long nr_pages_dirtied)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1411
  {
36715cef0   Wu Fengguang   writeback: skip t...
1412
  	struct backing_dev_info *bdi = mapping->backing_dev_info;
9d823e8f6   Wu Fengguang   writeback: per ta...
1413
1414
  	int ratelimit;
  	int *p;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1415

36715cef0   Wu Fengguang   writeback: skip t...
1416
1417
  	if (!bdi_cap_account_dirty(bdi))
  		return;
9d823e8f6   Wu Fengguang   writeback: per ta...
1418
1419
1420
  	ratelimit = current->nr_dirtied_pause;
  	if (bdi->dirty_exceeded)
  		ratelimit = min(ratelimit, 32 >> (PAGE_SHIFT - 10));
9d823e8f6   Wu Fengguang   writeback: per ta...
1421
  	preempt_disable();
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1422
  	/*
9d823e8f6   Wu Fengguang   writeback: per ta...
1423
1424
1425
1426
  	 * This prevents one CPU to accumulate too many dirtied pages without
  	 * calling into balance_dirty_pages(), which can happen when there are
  	 * 1000+ tasks, all of them start dirtying pages at exactly the same
  	 * time, hence all honoured too large initial task->nr_dirtied_pause.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1427
  	 */
245b2e70e   Tejun Heo   percpu: clean up ...
1428
  	p =  &__get_cpu_var(bdp_ratelimits);
9d823e8f6   Wu Fengguang   writeback: per ta...
1429
  	if (unlikely(current->nr_dirtied >= ratelimit))
fa5a734e4   Andrew Morton   [PATCH] balance_d...
1430
  		*p = 0;
d3bc1fef9   Wu Fengguang   writeback: fix di...
1431
1432
1433
  	else if (unlikely(*p >= ratelimit_pages)) {
  		*p = 0;
  		ratelimit = 0;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1434
  	}
54848d73f   Wu Fengguang   writeback: charge...
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
  	/*
  	 * Pick up the dirtied pages by the exited tasks. This avoids lots of
  	 * short-lived tasks (eg. gcc invocations in a kernel build) escaping
  	 * the dirty throttling and livelock other long-run dirtiers.
  	 */
  	p = &__get_cpu_var(dirty_throttle_leaks);
  	if (*p > 0 && current->nr_dirtied < ratelimit) {
  		nr_pages_dirtied = min(*p, ratelimit - current->nr_dirtied);
  		*p -= nr_pages_dirtied;
  		current->nr_dirtied += nr_pages_dirtied;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1445
  	}
fa5a734e4   Andrew Morton   [PATCH] balance_d...
1446
  	preempt_enable();
9d823e8f6   Wu Fengguang   writeback: per ta...
1447
1448
1449
  
  	if (unlikely(current->nr_dirtied >= ratelimit))
  		balance_dirty_pages(mapping, current->nr_dirtied);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1450
  }
fa5a734e4   Andrew Morton   [PATCH] balance_d...
1451
  EXPORT_SYMBOL(balance_dirty_pages_ratelimited_nr);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1452

232ea4d69   Andrew Morton   [PATCH] throttle_...
1453
  void throttle_vm_writeout(gfp_t gfp_mask)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1454
  {
364aeb284   David Rientjes   mm: change dirty ...
1455
1456
  	unsigned long background_thresh;
  	unsigned long dirty_thresh;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1457
1458
  
          for ( ; ; ) {
16c4042f0   Wu Fengguang   writeback: avoid ...
1459
  		global_dirty_limits(&background_thresh, &dirty_thresh);
47a133339   Fengguang Wu   mm: use global_di...
1460
  		dirty_thresh = hard_dirty_limit(dirty_thresh);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1461
1462
1463
1464
1465
1466
  
                  /*
                   * Boost the allowable dirty threshold a bit for page
                   * allocators so they don't get DoS'ed by heavy writers
                   */
                  dirty_thresh += dirty_thresh / 10;      /* wheeee... */
c24f21bda   Christoph Lameter   [PATCH] zoned vm ...
1467
1468
1469
                  if (global_page_state(NR_UNSTABLE_NFS) +
  			global_page_state(NR_WRITEBACK) <= dirty_thresh)
                          	break;
8aa7e847d   Jens Axboe   Fix congestion_wa...
1470
                  congestion_wait(BLK_RW_ASYNC, HZ/10);
369f2389e   Fengguang Wu   writeback: remove...
1471
1472
1473
1474
1475
1476
1477
1478
  
  		/*
  		 * The caller might hold locks which can prevent IO completion
  		 * or progress in the filesystem.  So we cannot just sit here
  		 * waiting for IO to complete.
  		 */
  		if ((gfp_mask & (__GFP_FS|__GFP_IO)) != (__GFP_FS|__GFP_IO))
  			break;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1479
1480
          }
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1481
  /*
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1482
1483
1484
   * sysctl handler for /proc/sys/vm/dirty_writeback_centisecs
   */
  int dirty_writeback_centisecs_handler(ctl_table *table, int write,
8d65af789   Alexey Dobriyan   sysctl: remove "s...
1485
  	void __user *buffer, size_t *length, loff_t *ppos)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1486
  {
8d65af789   Alexey Dobriyan   sysctl: remove "s...
1487
  	proc_dointvec(table, write, buffer, length, ppos);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1488
1489
  	return 0;
  }
c2c4986ed   Jens Axboe   writeback: fix pr...
1490
  #ifdef CONFIG_BLOCK
31373d09d   Matthew Garrett   laptop-mode: Make...
1491
  void laptop_mode_timer_fn(unsigned long data)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1492
  {
31373d09d   Matthew Garrett   laptop-mode: Make...
1493
1494
1495
  	struct request_queue *q = (struct request_queue *)data;
  	int nr_pages = global_page_state(NR_FILE_DIRTY) +
  		global_page_state(NR_UNSTABLE_NFS);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1496

31373d09d   Matthew Garrett   laptop-mode: Make...
1497
1498
1499
1500
  	/*
  	 * We want to write everything out, not just down to the dirty
  	 * threshold
  	 */
31373d09d   Matthew Garrett   laptop-mode: Make...
1501
  	if (bdi_has_dirty_io(&q->backing_dev_info))
0e175a183   Curt Wohlgemuth   writeback: Add a ...
1502
1503
  		bdi_start_writeback(&q->backing_dev_info, nr_pages,
  					WB_REASON_LAPTOP_TIMER);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1504
1505
1506
1507
1508
1509
1510
  }
  
  /*
   * We've spun up the disk and we're in laptop mode: schedule writeback
   * of all dirty data a few seconds from now.  If the flush is already scheduled
   * then push it back - the user is still using the disk.
   */
31373d09d   Matthew Garrett   laptop-mode: Make...
1511
  void laptop_io_completion(struct backing_dev_info *info)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1512
  {
31373d09d   Matthew Garrett   laptop-mode: Make...
1513
  	mod_timer(&info->laptop_mode_wb_timer, jiffies + laptop_mode);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1514
1515
1516
1517
1518
1519
1520
1521
1522
  }
  
  /*
   * We're in laptop mode and we've just synced. The sync's writes will have
   * caused another writeback to be scheduled by laptop_io_completion.
   * Nothing needs to be written back anymore, so we unschedule the writeback.
   */
  void laptop_sync_completion(void)
  {
31373d09d   Matthew Garrett   laptop-mode: Make...
1523
1524
1525
1526
1527
1528
1529
1530
  	struct backing_dev_info *bdi;
  
  	rcu_read_lock();
  
  	list_for_each_entry_rcu(bdi, &bdi_list, bdi_list)
  		del_timer(&bdi->laptop_mode_wb_timer);
  
  	rcu_read_unlock();
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1531
  }
c2c4986ed   Jens Axboe   writeback: fix pr...
1532
  #endif
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1533
1534
1535
1536
1537
1538
1539
1540
1541
  
  /*
   * If ratelimit_pages is too high then we can get into dirty-data overload
   * if a large number of processes all perform writes at the same time.
   * If it is too low then SMP machines will call the (expensive)
   * get_writeback_state too often.
   *
   * Here we set ratelimit_pages to a level which ensures that when all CPUs are
   * dirtying in parallel, we cannot go more than 3% (1/32) over the dirty memory
9d823e8f6   Wu Fengguang   writeback: per ta...
1542
   * thresholds.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1543
   */
2d1d43f6a   Chandra Seetharaman   [PATCH] call mm/p...
1544
  void writeback_set_ratelimit(void)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1545
  {
9d823e8f6   Wu Fengguang   writeback: per ta...
1546
1547
1548
  	unsigned long background_thresh;
  	unsigned long dirty_thresh;
  	global_dirty_limits(&background_thresh, &dirty_thresh);
68809c710   Fengguang Wu   writeback: initia...
1549
  	global_dirty_limit = dirty_thresh;
9d823e8f6   Wu Fengguang   writeback: per ta...
1550
  	ratelimit_pages = dirty_thresh / (num_online_cpus() * 32);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1551
1552
  	if (ratelimit_pages < 16)
  		ratelimit_pages = 16;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1553
  }
26c2143b6   Chandra Seetharaman   [PATCH] cpu hotpl...
1554
  static int __cpuinit
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1555
1556
  ratelimit_handler(struct notifier_block *self, unsigned long u, void *v)
  {
2d1d43f6a   Chandra Seetharaman   [PATCH] call mm/p...
1557
  	writeback_set_ratelimit();
aa0f03037   Paul E. McKenney   [PATCH] Change co...
1558
  	return NOTIFY_DONE;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1559
  }
74b85f379   Chandra Seetharaman   [PATCH] cpu hotpl...
1560
  static struct notifier_block __cpuinitdata ratelimit_nb = {
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1561
1562
1563
1564
1565
  	.notifier_call	= ratelimit_handler,
  	.next		= NULL,
  };
  
  /*
dc6e29da9   Linus Torvalds   Fix balance_dirty...
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
   * Called early on to tune the page writeback dirty limits.
   *
   * We used to scale dirty pages according to how total memory
   * related to pages that could be allocated for buffers (by
   * comparing nr_free_buffer_pages() to vm_total_pages.
   *
   * However, that was when we used "dirty_ratio" to scale with
   * all memory, and we don't do that any more. "dirty_ratio"
   * is now applied to total non-HIGHPAGE memory (by subtracting
   * totalhigh_pages from vm_total_pages), and as such we can't
   * get into the old insane situation any more where we had
   * large amounts of dirty pages compared to a small amount of
   * non-HIGHMEM memory.
   *
   * But we might still want to scale the dirty_ratio by how
   * much memory the box has..
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1582
1583
1584
   */
  void __init page_writeback_init(void)
  {
2d1d43f6a   Chandra Seetharaman   [PATCH] call mm/p...
1585
  	writeback_set_ratelimit();
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1586
  	register_cpu_notifier(&ratelimit_nb);
04fbfdc14   Peter Zijlstra   mm: per device di...
1587

eb608e3a3   Jan Kara   block: Convert BD...
1588
  	fprop_global_init(&writeout_completions);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1589
  }
811d736f9   David Howells   [PATCH] BLOCK: Di...
1590
  /**
f446daaea   Jan Kara   mm: implement wri...
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
   * tag_pages_for_writeback - tag pages to be written by write_cache_pages
   * @mapping: address space structure to write
   * @start: starting page index
   * @end: ending page index (inclusive)
   *
   * This function scans the page range from @start to @end (inclusive) and tags
   * all pages that have DIRTY tag set with a special TOWRITE tag. The idea is
   * that write_cache_pages (or whoever calls this function) will then use
   * TOWRITE tag to identify pages eligible for writeback.  This mechanism is
   * used to avoid livelocking of writeback by a process steadily creating new
   * dirty pages in the file (thus it is important for this function to be quick
   * so that it can tag pages faster than a dirtying process can create them).
   */
  /*
   * We tag pages in batches of WRITEBACK_TAG_BATCH to reduce tree_lock latency.
   */
f446daaea   Jan Kara   mm: implement wri...
1607
1608
1609
  void tag_pages_for_writeback(struct address_space *mapping,
  			     pgoff_t start, pgoff_t end)
  {
3c111a071   Randy Dunlap   mm: fix fatal ker...
1610
  #define WRITEBACK_TAG_BATCH 4096
f446daaea   Jan Kara   mm: implement wri...
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
  	unsigned long tagged;
  
  	do {
  		spin_lock_irq(&mapping->tree_lock);
  		tagged = radix_tree_range_tag_if_tagged(&mapping->page_tree,
  				&start, end, WRITEBACK_TAG_BATCH,
  				PAGECACHE_TAG_DIRTY, PAGECACHE_TAG_TOWRITE);
  		spin_unlock_irq(&mapping->tree_lock);
  		WARN_ON_ONCE(tagged > WRITEBACK_TAG_BATCH);
  		cond_resched();
d5ed3a4af   Jan Kara   lib/radix-tree.c:...
1621
1622
  		/* We check 'start' to handle wrapping when end == ~0UL */
  	} while (tagged >= WRITEBACK_TAG_BATCH && start);
f446daaea   Jan Kara   mm: implement wri...
1623
1624
1625
1626
  }
  EXPORT_SYMBOL(tag_pages_for_writeback);
  
  /**
0ea971801   Miklos Szeredi   consolidate gener...
1627
   * write_cache_pages - walk the list of dirty pages of the given address space and write all of them.
811d736f9   David Howells   [PATCH] BLOCK: Di...
1628
1629
   * @mapping: address space structure to write
   * @wbc: subtract the number of written pages from *@wbc->nr_to_write
0ea971801   Miklos Szeredi   consolidate gener...
1630
1631
   * @writepage: function called for each page
   * @data: data passed to writepage function
811d736f9   David Howells   [PATCH] BLOCK: Di...
1632
   *
0ea971801   Miklos Szeredi   consolidate gener...
1633
   * If a page is already under I/O, write_cache_pages() skips it, even
811d736f9   David Howells   [PATCH] BLOCK: Di...
1634
1635
1636
1637
1638
1639
   * if it's dirty.  This is desirable behaviour for memory-cleaning writeback,
   * but it is INCORRECT for data-integrity system calls such as fsync().  fsync()
   * and msync() need to guarantee that all the data which was dirty at the time
   * the call was made get new I/O started against them.  If wbc->sync_mode is
   * WB_SYNC_ALL then we were called for data integrity and we must wait for
   * existing IO to complete.
f446daaea   Jan Kara   mm: implement wri...
1640
1641
1642
1643
1644
1645
1646
   *
   * To avoid livelocks (when other process dirties new pages), we first tag
   * pages which should be written back with TOWRITE tag and only then start
   * writing them. For data-integrity sync we have to be careful so that we do
   * not miss some pages (e.g., because some other process has cleared TOWRITE
   * tag we set). The rule we follow is that TOWRITE tag can be cleared only
   * by the process clearing the DIRTY tag (and submitting the page for IO).
811d736f9   David Howells   [PATCH] BLOCK: Di...
1647
   */
0ea971801   Miklos Szeredi   consolidate gener...
1648
1649
1650
  int write_cache_pages(struct address_space *mapping,
  		      struct writeback_control *wbc, writepage_t writepage,
  		      void *data)
811d736f9   David Howells   [PATCH] BLOCK: Di...
1651
  {
811d736f9   David Howells   [PATCH] BLOCK: Di...
1652
1653
  	int ret = 0;
  	int done = 0;
811d736f9   David Howells   [PATCH] BLOCK: Di...
1654
1655
  	struct pagevec pvec;
  	int nr_pages;
31a12666d   Nick Piggin   mm: write_cache_p...
1656
  	pgoff_t uninitialized_var(writeback_index);
811d736f9   David Howells   [PATCH] BLOCK: Di...
1657
1658
  	pgoff_t index;
  	pgoff_t end;		/* Inclusive */
bd19e012f   Nick Piggin   mm: write_cache_p...
1659
  	pgoff_t done_index;
31a12666d   Nick Piggin   mm: write_cache_p...
1660
  	int cycled;
811d736f9   David Howells   [PATCH] BLOCK: Di...
1661
  	int range_whole = 0;
f446daaea   Jan Kara   mm: implement wri...
1662
  	int tag;
811d736f9   David Howells   [PATCH] BLOCK: Di...
1663

811d736f9   David Howells   [PATCH] BLOCK: Di...
1664
1665
  	pagevec_init(&pvec, 0);
  	if (wbc->range_cyclic) {
31a12666d   Nick Piggin   mm: write_cache_p...
1666
1667
1668
1669
1670
1671
  		writeback_index = mapping->writeback_index; /* prev offset */
  		index = writeback_index;
  		if (index == 0)
  			cycled = 1;
  		else
  			cycled = 0;
811d736f9   David Howells   [PATCH] BLOCK: Di...
1672
1673
1674
1675
1676
1677
  		end = -1;
  	} else {
  		index = wbc->range_start >> PAGE_CACHE_SHIFT;
  		end = wbc->range_end >> PAGE_CACHE_SHIFT;
  		if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
  			range_whole = 1;
31a12666d   Nick Piggin   mm: write_cache_p...
1678
  		cycled = 1; /* ignore range_cyclic tests */
811d736f9   David Howells   [PATCH] BLOCK: Di...
1679
  	}
6e6938b6d   Wu Fengguang   writeback: introd...
1680
  	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
f446daaea   Jan Kara   mm: implement wri...
1681
1682
1683
  		tag = PAGECACHE_TAG_TOWRITE;
  	else
  		tag = PAGECACHE_TAG_DIRTY;
811d736f9   David Howells   [PATCH] BLOCK: Di...
1684
  retry:
6e6938b6d   Wu Fengguang   writeback: introd...
1685
  	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
f446daaea   Jan Kara   mm: implement wri...
1686
  		tag_pages_for_writeback(mapping, index, end);
bd19e012f   Nick Piggin   mm: write_cache_p...
1687
  	done_index = index;
5a3d5c981   Nick Piggin   mm: write_cache_p...
1688
1689
  	while (!done && (index <= end)) {
  		int i;
f446daaea   Jan Kara   mm: implement wri...
1690
  		nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
5a3d5c981   Nick Piggin   mm: write_cache_p...
1691
1692
1693
  			      min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
  		if (nr_pages == 0)
  			break;
811d736f9   David Howells   [PATCH] BLOCK: Di...
1694

811d736f9   David Howells   [PATCH] BLOCK: Di...
1695
1696
1697
1698
  		for (i = 0; i < nr_pages; i++) {
  			struct page *page = pvec.pages[i];
  
  			/*
d5482cdf8   Nick Piggin   mm: write_cache_p...
1699
1700
1701
1702
1703
  			 * At this point, the page may be truncated or
  			 * invalidated (changing page->mapping to NULL), or
  			 * even swizzled back from swapper_space to tmpfs file
  			 * mapping. However, page->index will not change
  			 * because we have a reference on the page.
811d736f9   David Howells   [PATCH] BLOCK: Di...
1704
  			 */
d5482cdf8   Nick Piggin   mm: write_cache_p...
1705
1706
1707
1708
1709
1710
1711
1712
  			if (page->index > end) {
  				/*
  				 * can't be range_cyclic (1st pass) because
  				 * end == -1 in that case.
  				 */
  				done = 1;
  				break;
  			}
cf15b07cf   Jun'ichi Nomura   writeback: make m...
1713
  			done_index = page->index;
d5482cdf8   Nick Piggin   mm: write_cache_p...
1714

811d736f9   David Howells   [PATCH] BLOCK: Di...
1715
  			lock_page(page);
5a3d5c981   Nick Piggin   mm: write_cache_p...
1716
1717
1718
1719
1720
1721
1722
1723
  			/*
  			 * Page truncated or invalidated. We can freely skip it
  			 * then, even for data integrity operations: the page
  			 * has disappeared concurrently, so there could be no
  			 * real expectation of this data interity operation
  			 * even if there is now a new, dirty page at the same
  			 * pagecache address.
  			 */
811d736f9   David Howells   [PATCH] BLOCK: Di...
1724
  			if (unlikely(page->mapping != mapping)) {
5a3d5c981   Nick Piggin   mm: write_cache_p...
1725
  continue_unlock:
811d736f9   David Howells   [PATCH] BLOCK: Di...
1726
1727
1728
  				unlock_page(page);
  				continue;
  			}
515f4a037   Nick Piggin   mm: write_cache_p...
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
  			if (!PageDirty(page)) {
  				/* someone wrote it for us */
  				goto continue_unlock;
  			}
  
  			if (PageWriteback(page)) {
  				if (wbc->sync_mode != WB_SYNC_NONE)
  					wait_on_page_writeback(page);
  				else
  					goto continue_unlock;
  			}
811d736f9   David Howells   [PATCH] BLOCK: Di...
1740

515f4a037   Nick Piggin   mm: write_cache_p...
1741
1742
  			BUG_ON(PageWriteback(page));
  			if (!clear_page_dirty_for_io(page))
5a3d5c981   Nick Piggin   mm: write_cache_p...
1743
  				goto continue_unlock;
811d736f9   David Howells   [PATCH] BLOCK: Di...
1744

9e094383b   Dave Chinner   writeback: Add tr...
1745
  			trace_wbc_writepage(wbc, mapping->backing_dev_info);
0ea971801   Miklos Szeredi   consolidate gener...
1746
  			ret = (*writepage)(page, wbc, data);
00266770b   Nick Piggin   mm: write_cache_p...
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
  			if (unlikely(ret)) {
  				if (ret == AOP_WRITEPAGE_ACTIVATE) {
  					unlock_page(page);
  					ret = 0;
  				} else {
  					/*
  					 * done_index is set past this page,
  					 * so media errors will not choke
  					 * background writeout for the entire
  					 * file. This has consequences for
  					 * range_cyclic semantics (ie. it may
  					 * not be suitable for data integrity
  					 * writeout).
  					 */
cf15b07cf   Jun'ichi Nomura   writeback: make m...
1761
  					done_index = page->index + 1;
00266770b   Nick Piggin   mm: write_cache_p...
1762
1763
1764
  					done = 1;
  					break;
  				}
0b5649278   Dave Chinner   writeback: pay at...
1765
  			}
00266770b   Nick Piggin   mm: write_cache_p...
1766

546a19242   Dave Chinner   writeback: write_...
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
  			/*
  			 * We stop writing back only if we are not doing
  			 * integrity sync. In case of integrity sync we have to
  			 * keep going until we have written all the pages
  			 * we tagged for writeback prior to entering this loop.
  			 */
  			if (--wbc->nr_to_write <= 0 &&
  			    wbc->sync_mode == WB_SYNC_NONE) {
  				done = 1;
  				break;
05fe478dd   Nick Piggin   mm: write_cache_p...
1777
  			}
811d736f9   David Howells   [PATCH] BLOCK: Di...
1778
1779
1780
1781
  		}
  		pagevec_release(&pvec);
  		cond_resched();
  	}
3a4c6800f   Nick Piggin   Fix page writebac...
1782
  	if (!cycled && !done) {
811d736f9   David Howells   [PATCH] BLOCK: Di...
1783
  		/*
31a12666d   Nick Piggin   mm: write_cache_p...
1784
  		 * range_cyclic:
811d736f9   David Howells   [PATCH] BLOCK: Di...
1785
1786
1787
  		 * We hit the last page and there is more work to be done: wrap
  		 * back to the start of the file
  		 */
31a12666d   Nick Piggin   mm: write_cache_p...
1788
  		cycled = 1;
811d736f9   David Howells   [PATCH] BLOCK: Di...
1789
  		index = 0;
31a12666d   Nick Piggin   mm: write_cache_p...
1790
  		end = writeback_index - 1;
811d736f9   David Howells   [PATCH] BLOCK: Di...
1791
1792
  		goto retry;
  	}
0b5649278   Dave Chinner   writeback: pay at...
1793
1794
  	if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
  		mapping->writeback_index = done_index;
06d6cf695   Aneesh Kumar K.V   mm: Add range_con...
1795

811d736f9   David Howells   [PATCH] BLOCK: Di...
1796
1797
  	return ret;
  }
0ea971801   Miklos Szeredi   consolidate gener...
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
  EXPORT_SYMBOL(write_cache_pages);
  
  /*
   * Function used by generic_writepages to call the real writepage
   * function and set the mapping flags on error
   */
  static int __writepage(struct page *page, struct writeback_control *wbc,
  		       void *data)
  {
  	struct address_space *mapping = data;
  	int ret = mapping->a_ops->writepage(page, wbc);
  	mapping_set_error(mapping, ret);
  	return ret;
  }
  
  /**
   * generic_writepages - walk the list of dirty pages of the given address space and writepage() all of them.
   * @mapping: address space structure to write
   * @wbc: subtract the number of written pages from *@wbc->nr_to_write
   *
   * This is a library function, which implements the writepages()
   * address_space_operation.
   */
  int generic_writepages(struct address_space *mapping,
  		       struct writeback_control *wbc)
  {
9b6096a65   Shaohua Li   mm: make generic_...
1824
1825
  	struct blk_plug plug;
  	int ret;
0ea971801   Miklos Szeredi   consolidate gener...
1826
1827
1828
  	/* deal with chardevs and other special file */
  	if (!mapping->a_ops->writepage)
  		return 0;
9b6096a65   Shaohua Li   mm: make generic_...
1829
1830
1831
1832
  	blk_start_plug(&plug);
  	ret = write_cache_pages(mapping, wbc, __writepage, mapping);
  	blk_finish_plug(&plug);
  	return ret;
0ea971801   Miklos Szeredi   consolidate gener...
1833
  }
811d736f9   David Howells   [PATCH] BLOCK: Di...
1834
1835
  
  EXPORT_SYMBOL(generic_writepages);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1836
1837
  int do_writepages(struct address_space *mapping, struct writeback_control *wbc)
  {
22905f775   Andrew Morton   identify multipag...
1838
  	int ret;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1839
1840
1841
  	if (wbc->nr_to_write <= 0)
  		return 0;
  	if (mapping->a_ops->writepages)
d08b3851d   Peter Zijlstra   [PATCH] mm: track...
1842
  		ret = mapping->a_ops->writepages(mapping, wbc);
22905f775   Andrew Morton   identify multipag...
1843
1844
  	else
  		ret = generic_writepages(mapping, wbc);
22905f775   Andrew Morton   identify multipag...
1845
  	return ret;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1846
1847
1848
1849
  }
  
  /**
   * write_one_page - write out a single page and optionally wait on I/O
67be2dd1b   Martin Waitz   [PATCH] DocBook: ...
1850
1851
   * @page: the page to write
   * @wait: if true, wait on writeout
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
   *
   * The page must be locked by the caller and will be unlocked upon return.
   *
   * write_one_page() returns a negative error code if I/O failed.
   */
  int write_one_page(struct page *page, int wait)
  {
  	struct address_space *mapping = page->mapping;
  	int ret = 0;
  	struct writeback_control wbc = {
  		.sync_mode = WB_SYNC_ALL,
  		.nr_to_write = 1,
  	};
  
  	BUG_ON(!PageLocked(page));
  
  	if (wait)
  		wait_on_page_writeback(page);
  
  	if (clear_page_dirty_for_io(page)) {
  		page_cache_get(page);
  		ret = mapping->a_ops->writepage(page, &wbc);
  		if (ret == 0 && wait) {
  			wait_on_page_writeback(page);
  			if (PageError(page))
  				ret = -EIO;
  		}
  		page_cache_release(page);
  	} else {
  		unlock_page(page);
  	}
  	return ret;
  }
  EXPORT_SYMBOL(write_one_page);
  
  /*
767193253   Ken Chen   [PATCH] simplify ...
1888
1889
1890
1891
1892
   * For address_spaces which do not use buffers nor write back.
   */
  int __set_page_dirty_no_writeback(struct page *page)
  {
  	if (!PageDirty(page))
c3f0da631   Bob Liu   mm/page-writeback...
1893
  		return !TestSetPageDirty(page);
767193253   Ken Chen   [PATCH] simplify ...
1894
1895
1896
1897
  	return 0;
  }
  
  /*
e3a7cca1e   Edward Shishkin   vfs: add/use acco...
1898
1899
1900
1901
1902
1903
1904
   * Helper function for set_page_dirty family.
   * NOTE: This relies on being atomic wrt interrupts.
   */
  void account_page_dirtied(struct page *page, struct address_space *mapping)
  {
  	if (mapping_cap_account_dirty(mapping)) {
  		__inc_zone_page_state(page, NR_FILE_DIRTY);
ea941f0e2   Michael Rubin   writeback: add nr...
1905
  		__inc_zone_page_state(page, NR_DIRTIED);
e3a7cca1e   Edward Shishkin   vfs: add/use acco...
1906
  		__inc_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE);
c8e28ce04   Wu Fengguang   writeback: accoun...
1907
  		__inc_bdi_stat(mapping->backing_dev_info, BDI_DIRTIED);
e3a7cca1e   Edward Shishkin   vfs: add/use acco...
1908
  		task_io_account_write(PAGE_CACHE_SIZE);
d3bc1fef9   Wu Fengguang   writeback: fix di...
1909
1910
  		current->nr_dirtied++;
  		this_cpu_inc(bdp_ratelimits);
e3a7cca1e   Edward Shishkin   vfs: add/use acco...
1911
1912
  	}
  }
679ceace8   Michael Rubin   mm: exporting acc...
1913
  EXPORT_SYMBOL(account_page_dirtied);
e3a7cca1e   Edward Shishkin   vfs: add/use acco...
1914
1915
  
  /*
f629d1c9b   Michael Rubin   mm: add account_p...
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
   * Helper function for set_page_writeback family.
   * NOTE: Unlike account_page_dirtied this does not rely on being atomic
   * wrt interrupts.
   */
  void account_page_writeback(struct page *page)
  {
  	inc_zone_page_state(page, NR_WRITEBACK);
  }
  EXPORT_SYMBOL(account_page_writeback);
  
  /*
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
   * For address_spaces which do not use buffers.  Just tag the page as dirty in
   * its radix tree.
   *
   * This is also used when a single buffer is being dirtied: we want to set the
   * page dirty in that case, but not all the buffers.  This is a "bottom-up"
   * dirtying, whereas __set_page_dirty_buffers() is a "top-down" dirtying.
   *
   * Most callers have locked the page, which pins the address_space in memory.
   * But zap_pte_range() does not lock the page, however in that case the
   * mapping is pinned by the vma's ->vm_file reference.
   *
   * We take care to handle the case where the page was truncated from the
183ff22bb   Simon Arlott   spelling fixes: mm/
1939
   * mapping by re-checking page_mapping() inside tree_lock.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1940
1941
1942
   */
  int __set_page_dirty_nobuffers(struct page *page)
  {
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1943
1944
1945
  	if (!TestSetPageDirty(page)) {
  		struct address_space *mapping = page_mapping(page);
  		struct address_space *mapping2;
8c08540f8   Andrew Morton   [PATCH] clean up ...
1946
1947
  		if (!mapping)
  			return 1;
19fd62312   Nick Piggin   mm: spinlock tree...
1948
  		spin_lock_irq(&mapping->tree_lock);
8c08540f8   Andrew Morton   [PATCH] clean up ...
1949
1950
1951
  		mapping2 = page_mapping(page);
  		if (mapping2) { /* Race with truncate? */
  			BUG_ON(mapping2 != mapping);
787d2214c   Nick Piggin   fs: introduce som...
1952
  			WARN_ON_ONCE(!PagePrivate(page) && !PageUptodate(page));
e3a7cca1e   Edward Shishkin   vfs: add/use acco...
1953
  			account_page_dirtied(page, mapping);
8c08540f8   Andrew Morton   [PATCH] clean up ...
1954
1955
1956
  			radix_tree_tag_set(&mapping->page_tree,
  				page_index(page), PAGECACHE_TAG_DIRTY);
  		}
19fd62312   Nick Piggin   mm: spinlock tree...
1957
  		spin_unlock_irq(&mapping->tree_lock);
8c08540f8   Andrew Morton   [PATCH] clean up ...
1958
1959
1960
  		if (mapping->host) {
  			/* !PageAnon && !swapper_space */
  			__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1961
  		}
4741c9fd3   Andrew Morton   [PATCH] set_page_...
1962
  		return 1;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1963
  	}
4741c9fd3   Andrew Morton   [PATCH] set_page_...
1964
  	return 0;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1965
1966
1967
1968
  }
  EXPORT_SYMBOL(__set_page_dirty_nobuffers);
  
  /*
2f800fbd7   Wu Fengguang   writeback: fix di...
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
   * Call this whenever redirtying a page, to de-account the dirty counters
   * (NR_DIRTIED, BDI_DIRTIED, tsk->nr_dirtied), so that they match the written
   * counters (NR_WRITTEN, BDI_WRITTEN) in long term. The mismatches will lead to
   * systematic errors in balanced_dirty_ratelimit and the dirty pages position
   * control.
   */
  void account_page_redirty(struct page *page)
  {
  	struct address_space *mapping = page->mapping;
  	if (mapping && mapping_cap_account_dirty(mapping)) {
  		current->nr_dirtied--;
  		dec_zone_page_state(page, NR_DIRTIED);
  		dec_bdi_stat(mapping->backing_dev_info, BDI_DIRTIED);
  	}
  }
  EXPORT_SYMBOL(account_page_redirty);
  
  /*
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1987
1988
1989
1990
1991
1992
1993
   * When a writepage implementation decides that it doesn't want to write this
   * page for some reason, it should redirty the locked page via
   * redirty_page_for_writepage() and it should then unlock the page and return 0
   */
  int redirty_page_for_writepage(struct writeback_control *wbc, struct page *page)
  {
  	wbc->pages_skipped++;
2f800fbd7   Wu Fengguang   writeback: fix di...
1994
  	account_page_redirty(page);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1995
1996
1997
1998
1999
  	return __set_page_dirty_nobuffers(page);
  }
  EXPORT_SYMBOL(redirty_page_for_writepage);
  
  /*
6746aff74   Wu Fengguang   HWPOISON: shmem: ...
2000
2001
2002
2003
2004
2005
2006
   * Dirty a page.
   *
   * For pages with a mapping this should be done under the page lock
   * for the benefit of asynchronous memory errors who prefer a consistent
   * dirty state. This rule can be broken in some special cases,
   * but should be better not to.
   *
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2007
2008
2009
   * If the mapping doesn't provide a set_page_dirty a_op, then
   * just fall through and assume that it wants buffer_heads.
   */
1cf6e7d83   Nick Piggin   mm: task dirty ac...
2010
  int set_page_dirty(struct page *page)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2011
2012
2013
2014
2015
  {
  	struct address_space *mapping = page_mapping(page);
  
  	if (likely(mapping)) {
  		int (*spd)(struct page *) = mapping->a_ops->set_page_dirty;
278df9f45   Minchan Kim   mm: reclaim inval...
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
  		/*
  		 * readahead/lru_deactivate_page could remain
  		 * PG_readahead/PG_reclaim due to race with end_page_writeback
  		 * About readahead, if the page is written, the flags would be
  		 * reset. So no problem.
  		 * About lru_deactivate_page, if the page is redirty, the flag
  		 * will be reset. So no problem. but if the page is used by readahead
  		 * it will confuse readahead and make it restart the size rampup
  		 * process. But it's a trivial problem.
  		 */
  		ClearPageReclaim(page);
9361401eb   David Howells   [PATCH] BLOCK: Ma...
2027
2028
2029
2030
2031
  #ifdef CONFIG_BLOCK
  		if (!spd)
  			spd = __set_page_dirty_buffers;
  #endif
  		return (*spd)(page);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2032
  	}
4741c9fd3   Andrew Morton   [PATCH] set_page_...
2033
2034
2035
2036
  	if (!PageDirty(page)) {
  		if (!TestSetPageDirty(page))
  			return 1;
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053
  	return 0;
  }
  EXPORT_SYMBOL(set_page_dirty);
  
  /*
   * set_page_dirty() is racy if the caller has no reference against
   * page->mapping->host, and if the page is unlocked.  This is because another
   * CPU could truncate the page off the mapping and then free the mapping.
   *
   * Usually, the page _is_ locked, or the caller is a user-space process which
   * holds a reference on the inode by having an open file.
   *
   * In other cases, the page should be locked before running set_page_dirty().
   */
  int set_page_dirty_lock(struct page *page)
  {
  	int ret;
7eaceacca   Jens Axboe   block: remove per...
2054
  	lock_page(page);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2055
2056
2057
2058
2059
2060
2061
  	ret = set_page_dirty(page);
  	unlock_page(page);
  	return ret;
  }
  EXPORT_SYMBOL(set_page_dirty_lock);
  
  /*
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
   * Clear a page's dirty flag, while caring for dirty memory accounting.
   * Returns true if the page was previously dirty.
   *
   * This is for preparing to put the page under writeout.  We leave the page
   * tagged as dirty in the radix tree so that a concurrent write-for-sync
   * can discover it via a PAGECACHE_TAG_DIRTY walk.  The ->writepage
   * implementation will run either set_page_writeback() or set_page_dirty(),
   * at which stage we bring the page's dirty flag and radix-tree dirty tag
   * back into sync.
   *
   * This incoherency between the page's dirty flag and radix-tree tag is
   * unfortunate, but it only exists while the page is locked.
   */
  int clear_page_dirty_for_io(struct page *page)
  {
  	struct address_space *mapping = page_mapping(page);
79352894b   Nick Piggin   mm: fix clear_pag...
2078
  	BUG_ON(!PageLocked(page));
7658cc289   Linus Torvalds   VM: Fix nasty and...
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103
  	if (mapping && mapping_cap_account_dirty(mapping)) {
  		/*
  		 * Yes, Virginia, this is indeed insane.
  		 *
  		 * We use this sequence to make sure that
  		 *  (a) we account for dirty stats properly
  		 *  (b) we tell the low-level filesystem to
  		 *      mark the whole page dirty if it was
  		 *      dirty in a pagetable. Only to then
  		 *  (c) clean the page again and return 1 to
  		 *      cause the writeback.
  		 *
  		 * This way we avoid all nasty races with the
  		 * dirty bit in multiple places and clearing
  		 * them concurrently from different threads.
  		 *
  		 * Note! Normally the "set_page_dirty(page)"
  		 * has no effect on the actual dirty bit - since
  		 * that will already usually be set. But we
  		 * need the side effects, and it can help us
  		 * avoid races.
  		 *
  		 * We basically use the page "master dirty bit"
  		 * as a serialization point for all the different
  		 * threads doing their things.
7658cc289   Linus Torvalds   VM: Fix nasty and...
2104
2105
2106
  		 */
  		if (page_mkclean(page))
  			set_page_dirty(page);
79352894b   Nick Piggin   mm: fix clear_pag...
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
  		/*
  		 * We carefully synchronise fault handlers against
  		 * installing a dirty pte and marking the page dirty
  		 * at this point. We do this by having them hold the
  		 * page lock at some point after installing their
  		 * pte, but before marking the page dirty.
  		 * Pages are always locked coming in here, so we get
  		 * the desired exclusion. See mm/memory.c:do_wp_page()
  		 * for more comments.
  		 */
7658cc289   Linus Torvalds   VM: Fix nasty and...
2117
  		if (TestClearPageDirty(page)) {
8c08540f8   Andrew Morton   [PATCH] clean up ...
2118
  			dec_zone_page_state(page, NR_FILE_DIRTY);
c9e51e418   Peter Zijlstra   mm: count reclaim...
2119
2120
  			dec_bdi_stat(mapping->backing_dev_info,
  					BDI_RECLAIMABLE);
7658cc289   Linus Torvalds   VM: Fix nasty and...
2121
  			return 1;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2122
  		}
7658cc289   Linus Torvalds   VM: Fix nasty and...
2123
  		return 0;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2124
  	}
7658cc289   Linus Torvalds   VM: Fix nasty and...
2125
  	return TestClearPageDirty(page);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2126
  }
58bb01a9c   Hans Reiser   [PATCH] re-export...
2127
  EXPORT_SYMBOL(clear_page_dirty_for_io);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2128
2129
2130
2131
2132
2133
2134
  
  int test_clear_page_writeback(struct page *page)
  {
  	struct address_space *mapping = page_mapping(page);
  	int ret;
  
  	if (mapping) {
69cb51d18   Peter Zijlstra   mm: count writeba...
2135
  		struct backing_dev_info *bdi = mapping->backing_dev_info;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2136
  		unsigned long flags;
19fd62312   Nick Piggin   mm: spinlock tree...
2137
  		spin_lock_irqsave(&mapping->tree_lock, flags);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2138
  		ret = TestClearPageWriteback(page);
69cb51d18   Peter Zijlstra   mm: count writeba...
2139
  		if (ret) {
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2140
2141
2142
  			radix_tree_tag_clear(&mapping->page_tree,
  						page_index(page),
  						PAGECACHE_TAG_WRITEBACK);
e4ad08fe6   Miklos Szeredi   mm: bdi: add sepa...
2143
  			if (bdi_cap_account_writeback(bdi)) {
69cb51d18   Peter Zijlstra   mm: count writeba...
2144
  				__dec_bdi_stat(bdi, BDI_WRITEBACK);
04fbfdc14   Peter Zijlstra   mm: per device di...
2145
2146
  				__bdi_writeout_inc(bdi);
  			}
69cb51d18   Peter Zijlstra   mm: count writeba...
2147
  		}
19fd62312   Nick Piggin   mm: spinlock tree...
2148
  		spin_unlock_irqrestore(&mapping->tree_lock, flags);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2149
2150
2151
  	} else {
  		ret = TestClearPageWriteback(page);
  	}
99b12e3d8   Wu Fengguang   writeback: accoun...
2152
  	if (ret) {
d688abf50   Andrew Morton   move page writeba...
2153
  		dec_zone_page_state(page, NR_WRITEBACK);
99b12e3d8   Wu Fengguang   writeback: accoun...
2154
2155
  		inc_zone_page_state(page, NR_WRITTEN);
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2156
2157
2158
2159
2160
2161
2162
2163
2164
  	return ret;
  }
  
  int test_set_page_writeback(struct page *page)
  {
  	struct address_space *mapping = page_mapping(page);
  	int ret;
  
  	if (mapping) {
69cb51d18   Peter Zijlstra   mm: count writeba...
2165
  		struct backing_dev_info *bdi = mapping->backing_dev_info;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2166
  		unsigned long flags;
19fd62312   Nick Piggin   mm: spinlock tree...
2167
  		spin_lock_irqsave(&mapping->tree_lock, flags);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2168
  		ret = TestSetPageWriteback(page);
69cb51d18   Peter Zijlstra   mm: count writeba...
2169
  		if (!ret) {
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2170
2171
2172
  			radix_tree_tag_set(&mapping->page_tree,
  						page_index(page),
  						PAGECACHE_TAG_WRITEBACK);
e4ad08fe6   Miklos Szeredi   mm: bdi: add sepa...
2173
  			if (bdi_cap_account_writeback(bdi))
69cb51d18   Peter Zijlstra   mm: count writeba...
2174
2175
  				__inc_bdi_stat(bdi, BDI_WRITEBACK);
  		}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2176
2177
2178
2179
  		if (!PageDirty(page))
  			radix_tree_tag_clear(&mapping->page_tree,
  						page_index(page),
  						PAGECACHE_TAG_DIRTY);
f446daaea   Jan Kara   mm: implement wri...
2180
2181
2182
  		radix_tree_tag_clear(&mapping->page_tree,
  				     page_index(page),
  				     PAGECACHE_TAG_TOWRITE);
19fd62312   Nick Piggin   mm: spinlock tree...
2183
  		spin_unlock_irqrestore(&mapping->tree_lock, flags);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2184
2185
2186
  	} else {
  		ret = TestSetPageWriteback(page);
  	}
d688abf50   Andrew Morton   move page writeba...
2187
  	if (!ret)
f629d1c9b   Michael Rubin   mm: add account_p...
2188
  		account_page_writeback(page);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2189
2190
2191
2192
2193
2194
  	return ret;
  
  }
  EXPORT_SYMBOL(test_set_page_writeback);
  
  /*
001281881   Nick Piggin   mm: use lockless ...
2195
   * Return true if any of the pages in the mapping are marked with the
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2196
2197
2198
2199
   * passed tag.
   */
  int mapping_tagged(struct address_space *mapping, int tag)
  {
72c478321   Konstantin Khlebnikov   mm: remove useles...
2200
  	return radix_tree_tagged(&mapping->page_tree, tag);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2201
2202
  }
  EXPORT_SYMBOL(mapping_tagged);