Blame view

mm/page-writeback.c 36.4 KB
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1
  /*
f30c22695   Uwe Zeisberger   fix file specific...
2
   * mm/page-writeback.c
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3
4
   *
   * Copyright (C) 2002, Linus Torvalds.
04fbfdc14   Peter Zijlstra   mm: per device di...
5
   * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
6
7
8
9
   *
   * Contains functions related to writing back dirty pages at the
   * address_space level.
   *
e1f8e8744   Francois Cami   Remove Andrew Mor...
10
   * 10Apr2002	Andrew Morton
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
11
12
13
14
15
16
17
18
19
20
21
22
23
24
   *		Initial version
   */
  
  #include <linux/kernel.h>
  #include <linux/module.h>
  #include <linux/spinlock.h>
  #include <linux/fs.h>
  #include <linux/mm.h>
  #include <linux/swap.h>
  #include <linux/slab.h>
  #include <linux/pagemap.h>
  #include <linux/writeback.h>
  #include <linux/init.h>
  #include <linux/backing-dev.h>
55e829af0   Andrew Morton   [PATCH] io-accoun...
25
  #include <linux/task_io_accounting_ops.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
26
27
  #include <linux/blkdev.h>
  #include <linux/mpage.h>
d08b3851d   Peter Zijlstra   [PATCH] mm: track...
28
  #include <linux/rmap.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
29
30
31
32
33
34
  #include <linux/percpu.h>
  #include <linux/notifier.h>
  #include <linux/smp.h>
  #include <linux/sysctl.h>
  #include <linux/cpu.h>
  #include <linux/syscalls.h>
cf9a2ae8d   David Howells   [PATCH] BLOCK: Mo...
35
  #include <linux/buffer_head.h>
811d736f9   David Howells   [PATCH] BLOCK: Di...
36
  #include <linux/pagevec.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
37
38
  
  /*
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
39
40
41
42
   * After a CPU has dirtied this many pages, balance_dirty_pages_ratelimited
   * will look to see if it needs to force writeback or throttling.
   */
  static long ratelimit_pages = 32;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
43
44
45
  /*
   * When balance_dirty_pages decides that the caller needs to perform some
   * non-background writeback, this is how many pages it will attempt to write.
3a2e9a5a2   Wu Fengguang   writeback: balanc...
46
   * It should be somewhat larger than dirtied pages to ensure that reasonably
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
47
48
   * large amounts of I/O are submitted.
   */
3a2e9a5a2   Wu Fengguang   writeback: balanc...
49
  static inline long sync_writeback_pages(unsigned long dirtied)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
50
  {
3a2e9a5a2   Wu Fengguang   writeback: balanc...
51
52
53
54
  	if (dirtied < ratelimit_pages)
  		dirtied = ratelimit_pages;
  
  	return dirtied + dirtied / 2;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
55
56
57
58
59
  }
  
  /* The following parameters are exported via /proc/sys/vm */
  
  /*
5b0830cb9   Jens Axboe   writeback: get ri...
60
   * Start background writeback (via writeback threads) at this percentage
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
61
   */
1b5e62b42   Wu Fengguang   writeback: double...
62
  int dirty_background_ratio = 10;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
63
64
  
  /*
2da02997e   David Rientjes   mm: add dirty_bac...
65
66
67
68
69
70
   * dirty_background_bytes starts at 0 (disabled) so that it is a function of
   * dirty_background_ratio * the amount of dirtyable memory
   */
  unsigned long dirty_background_bytes;
  
  /*
195cf453d   Bron Gondwana   mm/page-writeback...
71
72
73
74
75
76
   * free highmem will not be subtracted from the total free memory
   * for calculating free ratios if vm_highmem_is_dirtyable is true
   */
  int vm_highmem_is_dirtyable;
  
  /*
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
77
78
   * The generator of dirty data starts writeback at this percentage
   */
1b5e62b42   Wu Fengguang   writeback: double...
79
  int vm_dirty_ratio = 20;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
80
81
  
  /*
2da02997e   David Rientjes   mm: add dirty_bac...
82
83
84
85
86
87
   * vm_dirty_bytes starts at 0 (disabled) so that it is a function of
   * vm_dirty_ratio * the amount of dirtyable memory
   */
  unsigned long vm_dirty_bytes;
  
  /*
704503d83   Alexey Dobriyan   mm: fix proc_doin...
88
   * The interval between `kupdate'-style writebacks
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
89
   */
22ef37eed   Toshiyuki Okajima   page-writeback: f...
90
  unsigned int dirty_writeback_interval = 5 * 100; /* centiseconds */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
91
92
  
  /*
704503d83   Alexey Dobriyan   mm: fix proc_doin...
93
   * The longest time for which data is allowed to remain dirty
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
94
   */
22ef37eed   Toshiyuki Okajima   page-writeback: f...
95
  unsigned int dirty_expire_interval = 30 * 100; /* centiseconds */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
96
97
98
99
100
101
102
  
  /*
   * Flag that makes the machine dump writes/reads and block dirtyings.
   */
  int block_dump;
  
  /*
ed5b43f15   Bart Samwel   [PATCH] Represent...
103
104
   * Flag that puts the machine in "laptop mode". Doubles as a timeout in jiffies:
   * a full sync is triggered after this time elapses without any disk activity.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
105
106
107
108
109
110
   */
  int laptop_mode;
  
  EXPORT_SYMBOL(laptop_mode);
  
  /* End of sysctl-exported parameters */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
111
  /*
04fbfdc14   Peter Zijlstra   mm: per device di...
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
   * Scale the writeback cache size proportional to the relative writeout speeds.
   *
   * We do this by keeping a floating proportion between BDIs, based on page
   * writeback completions [end_page_writeback()]. Those devices that write out
   * pages fastest will get the larger share, while the slower will get a smaller
   * share.
   *
   * We use page writeout completions because we are interested in getting rid of
   * dirty pages. Having them written out is the primary goal.
   *
   * We introduce a concept of time, a period over which we measure these events,
   * because demand can/will vary over time. The length of this period itself is
   * measured in page writeback completions.
   *
   */
  static struct prop_descriptor vm_completions;
3e26c149c   Peter Zijlstra   mm: dirty balanci...
128
  static struct prop_descriptor vm_dirties;
04fbfdc14   Peter Zijlstra   mm: per device di...
129

04fbfdc14   Peter Zijlstra   mm: per device di...
130
131
132
133
134
135
136
137
  /*
   * couple the period to the dirty_ratio:
   *
   *   period/2 ~ roundup_pow_of_two(dirty limit)
   */
  static int calc_period_shift(void)
  {
  	unsigned long dirty_total;
2da02997e   David Rientjes   mm: add dirty_bac...
138
139
140
141
142
  	if (vm_dirty_bytes)
  		dirty_total = vm_dirty_bytes / PAGE_SIZE;
  	else
  		dirty_total = (vm_dirty_ratio * determine_dirtyable_memory()) /
  				100;
04fbfdc14   Peter Zijlstra   mm: per device di...
143
144
145
146
  	return 2 + ilog2(dirty_total - 1);
  }
  
  /*
2da02997e   David Rientjes   mm: add dirty_bac...
147
   * update the period when the dirty threshold changes.
04fbfdc14   Peter Zijlstra   mm: per device di...
148
   */
2da02997e   David Rientjes   mm: add dirty_bac...
149
150
151
152
153
154
155
156
  static void update_completion_period(void)
  {
  	int shift = calc_period_shift();
  	prop_change_shift(&vm_completions, shift);
  	prop_change_shift(&vm_dirties, shift);
  }
  
  int dirty_background_ratio_handler(struct ctl_table *table, int write,
8d65af789   Alexey Dobriyan   sysctl: remove "s...
157
  		void __user *buffer, size_t *lenp,
2da02997e   David Rientjes   mm: add dirty_bac...
158
159
160
  		loff_t *ppos)
  {
  	int ret;
8d65af789   Alexey Dobriyan   sysctl: remove "s...
161
  	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
2da02997e   David Rientjes   mm: add dirty_bac...
162
163
164
165
166
167
  	if (ret == 0 && write)
  		dirty_background_bytes = 0;
  	return ret;
  }
  
  int dirty_background_bytes_handler(struct ctl_table *table, int write,
8d65af789   Alexey Dobriyan   sysctl: remove "s...
168
  		void __user *buffer, size_t *lenp,
2da02997e   David Rientjes   mm: add dirty_bac...
169
170
171
  		loff_t *ppos)
  {
  	int ret;
8d65af789   Alexey Dobriyan   sysctl: remove "s...
172
  	ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
2da02997e   David Rientjes   mm: add dirty_bac...
173
174
175
176
  	if (ret == 0 && write)
  		dirty_background_ratio = 0;
  	return ret;
  }
04fbfdc14   Peter Zijlstra   mm: per device di...
177
  int dirty_ratio_handler(struct ctl_table *table, int write,
8d65af789   Alexey Dobriyan   sysctl: remove "s...
178
  		void __user *buffer, size_t *lenp,
04fbfdc14   Peter Zijlstra   mm: per device di...
179
180
181
  		loff_t *ppos)
  {
  	int old_ratio = vm_dirty_ratio;
2da02997e   David Rientjes   mm: add dirty_bac...
182
  	int ret;
8d65af789   Alexey Dobriyan   sysctl: remove "s...
183
  	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
04fbfdc14   Peter Zijlstra   mm: per device di...
184
  	if (ret == 0 && write && vm_dirty_ratio != old_ratio) {
2da02997e   David Rientjes   mm: add dirty_bac...
185
186
187
188
189
190
191
192
  		update_completion_period();
  		vm_dirty_bytes = 0;
  	}
  	return ret;
  }
  
  
  int dirty_bytes_handler(struct ctl_table *table, int write,
8d65af789   Alexey Dobriyan   sysctl: remove "s...
193
  		void __user *buffer, size_t *lenp,
2da02997e   David Rientjes   mm: add dirty_bac...
194
195
  		loff_t *ppos)
  {
fc3501d41   Sven Wegener   mm: fix dirty_byt...
196
  	unsigned long old_bytes = vm_dirty_bytes;
2da02997e   David Rientjes   mm: add dirty_bac...
197
  	int ret;
8d65af789   Alexey Dobriyan   sysctl: remove "s...
198
  	ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
2da02997e   David Rientjes   mm: add dirty_bac...
199
200
201
  	if (ret == 0 && write && vm_dirty_bytes != old_bytes) {
  		update_completion_period();
  		vm_dirty_ratio = 0;
04fbfdc14   Peter Zijlstra   mm: per device di...
202
203
204
205
206
207
208
209
210
211
  	}
  	return ret;
  }
  
  /*
   * Increment the BDI's writeout completion count and the global writeout
   * completion count. Called from test_clear_page_writeback().
   */
  static inline void __bdi_writeout_inc(struct backing_dev_info *bdi)
  {
a42dde041   Peter Zijlstra   mm: bdi: allow se...
212
213
  	__prop_inc_percpu_max(&vm_completions, &bdi->completions,
  			      bdi->max_prop_frac);
04fbfdc14   Peter Zijlstra   mm: per device di...
214
  }
dd5656e59   Miklos Szeredi   mm: bdi: export b...
215
216
217
218
219
220
221
222
223
  void bdi_writeout_inc(struct backing_dev_info *bdi)
  {
  	unsigned long flags;
  
  	local_irq_save(flags);
  	__bdi_writeout_inc(bdi);
  	local_irq_restore(flags);
  }
  EXPORT_SYMBOL_GPL(bdi_writeout_inc);
1cf6e7d83   Nick Piggin   mm: task dirty ac...
224
  void task_dirty_inc(struct task_struct *tsk)
3e26c149c   Peter Zijlstra   mm: dirty balanci...
225
226
227
  {
  	prop_inc_single(&vm_dirties, &tsk->dirties);
  }
04fbfdc14   Peter Zijlstra   mm: per device di...
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
  /*
   * Obtain an accurate fraction of the BDI's portion.
   */
  static void bdi_writeout_fraction(struct backing_dev_info *bdi,
  		long *numerator, long *denominator)
  {
  	if (bdi_cap_writeback_dirty(bdi)) {
  		prop_fraction_percpu(&vm_completions, &bdi->completions,
  				numerator, denominator);
  	} else {
  		*numerator = 0;
  		*denominator = 1;
  	}
  }
  
  /*
   * Clip the earned share of dirty pages to that which is actually available.
   * This avoids exceeding the total dirty_limit when the floating averages
   * fluctuate too quickly.
   */
dcf975d58   H Hartley Sweeten   mm/page-writeback...
248
249
  static void clip_bdi_dirty_limit(struct backing_dev_info *bdi,
  		unsigned long dirty, unsigned long *pbdi_dirty)
04fbfdc14   Peter Zijlstra   mm: per device di...
250
  {
dcf975d58   H Hartley Sweeten   mm/page-writeback...
251
  	unsigned long avail_dirty;
04fbfdc14   Peter Zijlstra   mm: per device di...
252

dcf975d58   H Hartley Sweeten   mm/page-writeback...
253
  	avail_dirty = global_page_state(NR_FILE_DIRTY) +
04fbfdc14   Peter Zijlstra   mm: per device di...
254
  		 global_page_state(NR_WRITEBACK) +
fc3ba692a   Miklos Szeredi   mm: Add NR_WRITEB...
255
  		 global_page_state(NR_UNSTABLE_NFS) +
dcf975d58   H Hartley Sweeten   mm/page-writeback...
256
  		 global_page_state(NR_WRITEBACK_TEMP);
04fbfdc14   Peter Zijlstra   mm: per device di...
257

dcf975d58   H Hartley Sweeten   mm/page-writeback...
258
259
260
  	if (avail_dirty < dirty)
  		avail_dirty = dirty - avail_dirty;
  	else
04fbfdc14   Peter Zijlstra   mm: per device di...
261
262
263
264
265
266
267
  		avail_dirty = 0;
  
  	avail_dirty += bdi_stat(bdi, BDI_RECLAIMABLE) +
  		bdi_stat(bdi, BDI_WRITEBACK);
  
  	*pbdi_dirty = min(*pbdi_dirty, avail_dirty);
  }
3e26c149c   Peter Zijlstra   mm: dirty balanci...
268
269
270
271
272
273
274
275
276
277
278
279
280
281
  static inline void task_dirties_fraction(struct task_struct *tsk,
  		long *numerator, long *denominator)
  {
  	prop_fraction_single(&vm_dirties, &tsk->dirties,
  				numerator, denominator);
  }
  
  /*
   * scale the dirty limit
   *
   * task specific dirty limit:
   *
   *   dirty -= (dirty/8) * p_{t}
   */
dcf975d58   H Hartley Sweeten   mm/page-writeback...
282
  static void task_dirty_limit(struct task_struct *tsk, unsigned long *pdirty)
3e26c149c   Peter Zijlstra   mm: dirty balanci...
283
284
  {
  	long numerator, denominator;
dcf975d58   H Hartley Sweeten   mm/page-writeback...
285
  	unsigned long dirty = *pdirty;
3e26c149c   Peter Zijlstra   mm: dirty balanci...
286
287
288
289
290
291
292
293
294
295
296
297
  	u64 inv = dirty >> 3;
  
  	task_dirties_fraction(tsk, &numerator, &denominator);
  	inv *= numerator;
  	do_div(inv, denominator);
  
  	dirty -= inv;
  	if (dirty < *pdirty/2)
  		dirty = *pdirty/2;
  
  	*pdirty = dirty;
  }
04fbfdc14   Peter Zijlstra   mm: per device di...
298
  /*
189d3c4a9   Peter Zijlstra   mm: bdi: allow se...
299
300
   *
   */
189d3c4a9   Peter Zijlstra   mm: bdi: allow se...
301
302
303
304
305
  static unsigned int bdi_min_ratio;
  
  int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio)
  {
  	int ret = 0;
189d3c4a9   Peter Zijlstra   mm: bdi: allow se...
306

cfc4ba536   Jens Axboe   writeback: use RC...
307
  	spin_lock_bh(&bdi_lock);
a42dde041   Peter Zijlstra   mm: bdi: allow se...
308
  	if (min_ratio > bdi->max_ratio) {
189d3c4a9   Peter Zijlstra   mm: bdi: allow se...
309
  		ret = -EINVAL;
a42dde041   Peter Zijlstra   mm: bdi: allow se...
310
311
312
313
314
315
316
317
318
  	} else {
  		min_ratio -= bdi->min_ratio;
  		if (bdi_min_ratio + min_ratio < 100) {
  			bdi_min_ratio += min_ratio;
  			bdi->min_ratio += min_ratio;
  		} else {
  			ret = -EINVAL;
  		}
  	}
cfc4ba536   Jens Axboe   writeback: use RC...
319
  	spin_unlock_bh(&bdi_lock);
a42dde041   Peter Zijlstra   mm: bdi: allow se...
320
321
322
323
324
325
  
  	return ret;
  }
  
  int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned max_ratio)
  {
a42dde041   Peter Zijlstra   mm: bdi: allow se...
326
327
328
329
  	int ret = 0;
  
  	if (max_ratio > 100)
  		return -EINVAL;
cfc4ba536   Jens Axboe   writeback: use RC...
330
  	spin_lock_bh(&bdi_lock);
a42dde041   Peter Zijlstra   mm: bdi: allow se...
331
332
333
334
335
336
  	if (bdi->min_ratio > max_ratio) {
  		ret = -EINVAL;
  	} else {
  		bdi->max_ratio = max_ratio;
  		bdi->max_prop_frac = (PROP_FRAC_BASE * max_ratio) / 100;
  	}
cfc4ba536   Jens Axboe   writeback: use RC...
337
  	spin_unlock_bh(&bdi_lock);
189d3c4a9   Peter Zijlstra   mm: bdi: allow se...
338
339
340
  
  	return ret;
  }
a42dde041   Peter Zijlstra   mm: bdi: allow se...
341
  EXPORT_SYMBOL(bdi_set_max_ratio);
189d3c4a9   Peter Zijlstra   mm: bdi: allow se...
342
343
  
  /*
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
   * Work out the current dirty-memory clamping and background writeout
   * thresholds.
   *
   * The main aim here is to lower them aggressively if there is a lot of mapped
   * memory around.  To avoid stressing page reclaim with lots of unreclaimable
   * pages.  It is better to clamp down on writers than to start swapping, and
   * performing lots of scanning.
   *
   * We only allow 1/2 of the currently-unmapped memory to be dirtied.
   *
   * We don't permit the clamping level to fall below 5% - that is getting rather
   * excessive.
   *
   * We make sure that the background writeout level is below the adjusted
   * clamping level.
   */
1b4244647   Christoph Lameter   Use ZVC counters ...
360
361
362
363
364
365
  
  static unsigned long highmem_dirtyable_memory(unsigned long total)
  {
  #ifdef CONFIG_HIGHMEM
  	int node;
  	unsigned long x = 0;
37b07e416   Lee Schermerhorn   memoryless nodes:...
366
  	for_each_node_state(node, N_HIGH_MEMORY) {
1b4244647   Christoph Lameter   Use ZVC counters ...
367
368
  		struct zone *z =
  			&NODE_DATA(node)->node_zones[ZONE_HIGHMEM];
adea02a1b   Wu Fengguang   mm: count only re...
369
370
  		x += zone_page_state(z, NR_FREE_PAGES) +
  		     zone_reclaimable_pages(z);
1b4244647   Christoph Lameter   Use ZVC counters ...
371
372
373
374
375
376
377
378
379
380
381
382
  	}
  	/*
  	 * Make sure that the number of highmem pages is never larger
  	 * than the number of the total dirtyable memory. This can only
  	 * occur in very strange VM situations but we want to make sure
  	 * that this does not occur.
  	 */
  	return min(x, total);
  #else
  	return 0;
  #endif
  }
3eefae994   Steven Rostedt   ftrace: limit tra...
383
384
385
386
387
388
389
  /**
   * determine_dirtyable_memory - amount of memory that may be used
   *
   * Returns the numebr of pages that can currently be freed and used
   * by the kernel for direct mappings.
   */
  unsigned long determine_dirtyable_memory(void)
1b4244647   Christoph Lameter   Use ZVC counters ...
390
391
  {
  	unsigned long x;
adea02a1b   Wu Fengguang   mm: count only re...
392
  	x = global_page_state(NR_FREE_PAGES) + global_reclaimable_pages();
195cf453d   Bron Gondwana   mm/page-writeback...
393
394
395
  
  	if (!vm_highmem_is_dirtyable)
  		x -= highmem_dirtyable_memory(x);
1b4244647   Christoph Lameter   Use ZVC counters ...
396
397
  	return x + 1;	/* Ensure that we never return 0 */
  }
cf0ca9fe5   Peter Zijlstra   mm: bdi: export B...
398
  void
364aeb284   David Rientjes   mm: change dirty ...
399
400
  get_dirty_limits(unsigned long *pbackground, unsigned long *pdirty,
  		 unsigned long *pbdi_dirty, struct backing_dev_info *bdi)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
401
  {
364aeb284   David Rientjes   mm: change dirty ...
402
403
  	unsigned long background;
  	unsigned long dirty;
1b4244647   Christoph Lameter   Use ZVC counters ...
404
  	unsigned long available_memory = determine_dirtyable_memory();
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
405
  	struct task_struct *tsk;
2da02997e   David Rientjes   mm: add dirty_bac...
406
407
408
409
410
411
412
413
414
415
  	if (vm_dirty_bytes)
  		dirty = DIV_ROUND_UP(vm_dirty_bytes, PAGE_SIZE);
  	else {
  		int dirty_ratio;
  
  		dirty_ratio = vm_dirty_ratio;
  		if (dirty_ratio < 5)
  			dirty_ratio = 5;
  		dirty = (dirty_ratio * available_memory) / 100;
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
416

2da02997e   David Rientjes   mm: add dirty_bac...
417
418
419
420
  	if (dirty_background_bytes)
  		background = DIV_ROUND_UP(dirty_background_bytes, PAGE_SIZE);
  	else
  		background = (dirty_background_ratio * available_memory) / 100;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
421

2da02997e   David Rientjes   mm: add dirty_bac...
422
423
  	if (background >= dirty)
  		background = dirty / 2;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
424
425
426
427
428
429
430
  	tsk = current;
  	if (tsk->flags & PF_LESS_THROTTLE || rt_task(tsk)) {
  		background += background / 4;
  		dirty += dirty / 4;
  	}
  	*pbackground = background;
  	*pdirty = dirty;
04fbfdc14   Peter Zijlstra   mm: per device di...
431
432
  
  	if (bdi) {
189d3c4a9   Peter Zijlstra   mm: bdi: allow se...
433
  		u64 bdi_dirty;
04fbfdc14   Peter Zijlstra   mm: per device di...
434
435
436
437
438
439
  		long numerator, denominator;
  
  		/*
  		 * Calculate this BDI's share of the dirty ratio.
  		 */
  		bdi_writeout_fraction(bdi, &numerator, &denominator);
189d3c4a9   Peter Zijlstra   mm: bdi: allow se...
440
  		bdi_dirty = (dirty * (100 - bdi_min_ratio)) / 100;
04fbfdc14   Peter Zijlstra   mm: per device di...
441
442
  		bdi_dirty *= numerator;
  		do_div(bdi_dirty, denominator);
189d3c4a9   Peter Zijlstra   mm: bdi: allow se...
443
  		bdi_dirty += (dirty * bdi->min_ratio) / 100;
a42dde041   Peter Zijlstra   mm: bdi: allow se...
444
445
  		if (bdi_dirty > (dirty * bdi->max_ratio) / 100)
  			bdi_dirty = dirty * bdi->max_ratio / 100;
04fbfdc14   Peter Zijlstra   mm: per device di...
446
447
448
  
  		*pbdi_dirty = bdi_dirty;
  		clip_bdi_dirty_limit(bdi, dirty, pbdi_dirty);
3e26c149c   Peter Zijlstra   mm: dirty balanci...
449
  		task_dirty_limit(current, pbdi_dirty);
04fbfdc14   Peter Zijlstra   mm: per device di...
450
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
451
452
453
454
455
456
  }
  
  /*
   * balance_dirty_pages() must be called by processes which are generating dirty
   * data.  It looks at the number of dirty pages in the machine and will force
   * the caller to perform writeback if the system is over `vm_dirty_ratio'.
5b0830cb9   Jens Axboe   writeback: get ri...
457
458
   * If we're over `background_thresh' then the writeback threads are woken to
   * perform some writeout.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
459
   */
3a2e9a5a2   Wu Fengguang   writeback: balanc...
460
461
  static void balance_dirty_pages(struct address_space *mapping,
  				unsigned long write_chunk)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
462
  {
5fce25a9d   Peter Zijlstra   mm: speed up writ...
463
464
  	long nr_reclaimable, bdi_nr_reclaimable;
  	long nr_writeback, bdi_nr_writeback;
364aeb284   David Rientjes   mm: change dirty ...
465
466
467
  	unsigned long background_thresh;
  	unsigned long dirty_thresh;
  	unsigned long bdi_thresh;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
468
  	unsigned long pages_written = 0;
87c6a9b25   Jens Axboe   writeback: make b...
469
  	unsigned long pause = 1;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
470
471
472
473
474
475
476
477
478
  
  	struct backing_dev_info *bdi = mapping->backing_dev_info;
  
  	for (;;) {
  		struct writeback_control wbc = {
  			.bdi		= bdi,
  			.sync_mode	= WB_SYNC_NONE,
  			.older_than_this = NULL,
  			.nr_to_write	= write_chunk,
111ebb6e6   OGAWA Hirofumi   [PATCH] writeback...
479
  			.range_cyclic	= 1,
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
480
  		};
04fbfdc14   Peter Zijlstra   mm: per device di...
481
482
  		get_dirty_limits(&background_thresh, &dirty_thresh,
  				&bdi_thresh, bdi);
5fce25a9d   Peter Zijlstra   mm: speed up writ...
483
484
485
486
  
  		nr_reclaimable = global_page_state(NR_FILE_DIRTY) +
  					global_page_state(NR_UNSTABLE_NFS);
  		nr_writeback = global_page_state(NR_WRITEBACK);
04fbfdc14   Peter Zijlstra   mm: per device di...
487
488
  		bdi_nr_reclaimable = bdi_stat(bdi, BDI_RECLAIMABLE);
  		bdi_nr_writeback = bdi_stat(bdi, BDI_WRITEBACK);
5fce25a9d   Peter Zijlstra   mm: speed up writ...
489

04fbfdc14   Peter Zijlstra   mm: per device di...
490
491
  		if (bdi_nr_reclaimable + bdi_nr_writeback <= bdi_thresh)
  			break;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
492

5fce25a9d   Peter Zijlstra   mm: speed up writ...
493
494
495
496
497
498
499
500
  		/*
  		 * Throttle it only when the background writeback cannot
  		 * catch-up. This avoids (excessively) small writeouts
  		 * when the bdi limits are ramping up.
  		 */
  		if (nr_reclaimable + nr_writeback <
  				(background_thresh + dirty_thresh) / 2)
  			break;
04fbfdc14   Peter Zijlstra   mm: per device di...
501
502
  		if (!bdi->dirty_exceeded)
  			bdi->dirty_exceeded = 1;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
503
504
505
506
507
508
  
  		/* Note: nr_reclaimable denotes nr_dirty + nr_unstable.
  		 * Unstable writes are a feature of certain networked
  		 * filesystems (i.e. NFS) in which data may have been
  		 * written to the server's write cache, but has not yet
  		 * been flushed to permanent storage.
d7831a0bd   Richard Kennedy   mm: prevent balan...
509
510
511
  		 * Only move pages to writeback if this bdi is over its
  		 * threshold otherwise wait until the disk writes catch
  		 * up.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
512
  		 */
d7831a0bd   Richard Kennedy   mm: prevent balan...
513
  		if (bdi_nr_reclaimable > bdi_thresh) {
03ba3782e   Jens Axboe   writeback: switch...
514
  			writeback_inodes_wbc(&wbc);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
515
  			pages_written += write_chunk - wbc.nr_to_write;
04fbfdc14   Peter Zijlstra   mm: per device di...
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
  			get_dirty_limits(&background_thresh, &dirty_thresh,
  				       &bdi_thresh, bdi);
  		}
  
  		/*
  		 * In order to avoid the stacked BDI deadlock we need
  		 * to ensure we accurately count the 'dirty' pages when
  		 * the threshold is low.
  		 *
  		 * Otherwise it would be possible to get thresh+n pages
  		 * reported dirty, even though there are thresh-m pages
  		 * actually dirty; with m+n sitting in the percpu
  		 * deltas.
  		 */
  		if (bdi_thresh < 2*bdi_stat_error(bdi)) {
  			bdi_nr_reclaimable = bdi_stat_sum(bdi, BDI_RECLAIMABLE);
  			bdi_nr_writeback = bdi_stat_sum(bdi, BDI_WRITEBACK);
  		} else if (bdi_nr_reclaimable) {
  			bdi_nr_reclaimable = bdi_stat(bdi, BDI_RECLAIMABLE);
  			bdi_nr_writeback = bdi_stat(bdi, BDI_WRITEBACK);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
536
  		}
04fbfdc14   Peter Zijlstra   mm: per device di...
537
538
539
540
541
  
  		if (bdi_nr_reclaimable + bdi_nr_writeback <= bdi_thresh)
  			break;
  		if (pages_written >= write_chunk)
  			break;		/* We've done our duty */
d25105e89   Wu Fengguang   writeback: accoun...
542
543
  		__set_current_state(TASK_INTERRUPTIBLE);
  		io_schedule_timeout(pause);
87c6a9b25   Jens Axboe   writeback: make b...
544
545
546
547
548
549
550
551
  
  		/*
  		 * Increase the delay for each loop, up to our previous
  		 * default of taking a 100ms nap.
  		 */
  		pause <<= 1;
  		if (pause > HZ / 10)
  			pause = HZ / 10;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
552
  	}
04fbfdc14   Peter Zijlstra   mm: per device di...
553
554
555
  	if (bdi_nr_reclaimable + bdi_nr_writeback < bdi_thresh &&
  			bdi->dirty_exceeded)
  		bdi->dirty_exceeded = 0;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
556
557
  
  	if (writeback_in_progress(bdi))
5b0830cb9   Jens Axboe   writeback: get ri...
558
  		return;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
559
560
561
562
563
564
565
566
567
568
  
  	/*
  	 * In laptop mode, we wait until hitting the higher threshold before
  	 * starting background writeout, and then write out all the way down
  	 * to the lower threshold.  So slow writers cause minimal disk activity.
  	 *
  	 * In normal mode, we start background writeout at the lower
  	 * background_thresh, to keep the amount of dirty memory low.
  	 */
  	if ((laptop_mode && pages_written) ||
d3ddec763   Wu Fengguang   writeback: stop b...
569
570
  	    (!laptop_mode && ((global_page_state(NR_FILE_DIRTY)
  			       + global_page_state(NR_UNSTABLE_NFS))
b6e51316d   Jens Axboe   writeback: separa...
571
  					  > background_thresh)))
a72bfd4de   Jens Axboe   writeback: pass i...
572
  		bdi_start_writeback(bdi, NULL, 0);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
573
  }
a200ee182   Peter Zijlstra   mm: set_page_dirt...
574
  void set_page_dirty_balance(struct page *page, int page_mkwrite)
edc79b2a4   Peter Zijlstra   [PATCH] mm: balan...
575
  {
a200ee182   Peter Zijlstra   mm: set_page_dirt...
576
  	if (set_page_dirty(page) || page_mkwrite) {
edc79b2a4   Peter Zijlstra   [PATCH] mm: balan...
577
578
579
580
581
582
  		struct address_space *mapping = page_mapping(page);
  
  		if (mapping)
  			balance_dirty_pages_ratelimited(mapping);
  	}
  }
245b2e70e   Tejun Heo   percpu: clean up ...
583
  static DEFINE_PER_CPU(unsigned long, bdp_ratelimits) = 0;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
584
  /**
fa5a734e4   Andrew Morton   [PATCH] balance_d...
585
   * balance_dirty_pages_ratelimited_nr - balance dirty memory state
67be2dd1b   Martin Waitz   [PATCH] DocBook: ...
586
   * @mapping: address_space which was dirtied
a580290c3   Martin Waitz   Documentation: fi...
587
   * @nr_pages_dirtied: number of pages which the caller has just dirtied
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
588
589
590
591
592
593
594
595
596
597
   *
   * Processes which are dirtying memory should call in here once for each page
   * which was newly dirtied.  The function will periodically check the system's
   * dirty state and will initiate writeback if needed.
   *
   * On really big machines, get_writeback_state is expensive, so try to avoid
   * calling it too often (ratelimiting).  But once we're over the dirty memory
   * limit we decrease the ratelimiting by a lot, to prevent individual processes
   * from overshooting the limit by (ratelimit_pages) each.
   */
fa5a734e4   Andrew Morton   [PATCH] balance_d...
598
599
  void balance_dirty_pages_ratelimited_nr(struct address_space *mapping,
  					unsigned long nr_pages_dirtied)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
600
  {
fa5a734e4   Andrew Morton   [PATCH] balance_d...
601
602
  	unsigned long ratelimit;
  	unsigned long *p;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
603
604
  
  	ratelimit = ratelimit_pages;
04fbfdc14   Peter Zijlstra   mm: per device di...
605
  	if (mapping->backing_dev_info->dirty_exceeded)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
606
607
608
609
610
611
  		ratelimit = 8;
  
  	/*
  	 * Check the rate limiting. Also, we do not want to throttle real-time
  	 * tasks in balance_dirty_pages(). Period.
  	 */
fa5a734e4   Andrew Morton   [PATCH] balance_d...
612
  	preempt_disable();
245b2e70e   Tejun Heo   percpu: clean up ...
613
  	p =  &__get_cpu_var(bdp_ratelimits);
fa5a734e4   Andrew Morton   [PATCH] balance_d...
614
615
  	*p += nr_pages_dirtied;
  	if (unlikely(*p >= ratelimit)) {
3a2e9a5a2   Wu Fengguang   writeback: balanc...
616
  		ratelimit = sync_writeback_pages(*p);
fa5a734e4   Andrew Morton   [PATCH] balance_d...
617
618
  		*p = 0;
  		preempt_enable();
3a2e9a5a2   Wu Fengguang   writeback: balanc...
619
  		balance_dirty_pages(mapping, ratelimit);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
620
621
  		return;
  	}
fa5a734e4   Andrew Morton   [PATCH] balance_d...
622
  	preempt_enable();
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
623
  }
fa5a734e4   Andrew Morton   [PATCH] balance_d...
624
  EXPORT_SYMBOL(balance_dirty_pages_ratelimited_nr);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
625

232ea4d69   Andrew Morton   [PATCH] throttle_...
626
  void throttle_vm_writeout(gfp_t gfp_mask)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
627
  {
364aeb284   David Rientjes   mm: change dirty ...
628
629
  	unsigned long background_thresh;
  	unsigned long dirty_thresh;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
630
631
  
          for ( ; ; ) {
04fbfdc14   Peter Zijlstra   mm: per device di...
632
  		get_dirty_limits(&background_thresh, &dirty_thresh, NULL, NULL);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
633
634
635
636
637
638
  
                  /*
                   * Boost the allowable dirty threshold a bit for page
                   * allocators so they don't get DoS'ed by heavy writers
                   */
                  dirty_thresh += dirty_thresh / 10;      /* wheeee... */
c24f21bda   Christoph Lameter   [PATCH] zoned vm ...
639
640
641
                  if (global_page_state(NR_UNSTABLE_NFS) +
  			global_page_state(NR_WRITEBACK) <= dirty_thresh)
                          	break;
8aa7e847d   Jens Axboe   Fix congestion_wa...
642
                  congestion_wait(BLK_RW_ASYNC, HZ/10);
369f2389e   Fengguang Wu   writeback: remove...
643
644
645
646
647
648
649
650
  
  		/*
  		 * The caller might hold locks which can prevent IO completion
  		 * or progress in the filesystem.  So we cannot just sit here
  		 * waiting for IO to complete.
  		 */
  		if ((gfp_mask & (__GFP_FS|__GFP_IO)) != (__GFP_FS|__GFP_IO))
  			break;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
651
652
          }
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
653
  static void laptop_timer_fn(unsigned long unused);
8d06afab7   Ingo Molnar   [PATCH] timer ini...
654
  static DEFINE_TIMER(laptop_mode_wb_timer, laptop_timer_fn, 0, 0);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
655
656
  
  /*
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
657
658
659
   * sysctl handler for /proc/sys/vm/dirty_writeback_centisecs
   */
  int dirty_writeback_centisecs_handler(ctl_table *table, int write,
8d65af789   Alexey Dobriyan   sysctl: remove "s...
660
  	void __user *buffer, size_t *length, loff_t *ppos)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
661
  {
8d65af789   Alexey Dobriyan   sysctl: remove "s...
662
  	proc_dointvec(table, write, buffer, length, ppos);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
663
664
  	return 0;
  }
03ba3782e   Jens Axboe   writeback: switch...
665
  static void do_laptop_sync(struct work_struct *work)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
666
  {
03ba3782e   Jens Axboe   writeback: switch...
667
668
  	wakeup_flusher_threads(0);
  	kfree(work);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
669
670
671
672
  }
  
  static void laptop_timer_fn(unsigned long unused)
  {
03ba3782e   Jens Axboe   writeback: switch...
673
674
675
676
677
678
679
  	struct work_struct *work;
  
  	work = kmalloc(sizeof(*work), GFP_ATOMIC);
  	if (work) {
  		INIT_WORK(work, do_laptop_sync);
  		schedule_work(work);
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
680
681
682
683
684
685
686
687
688
  }
  
  /*
   * We've spun up the disk and we're in laptop mode: schedule writeback
   * of all dirty data a few seconds from now.  If the flush is already scheduled
   * then push it back - the user is still using the disk.
   */
  void laptop_io_completion(void)
  {
ed5b43f15   Bart Samwel   [PATCH] Represent...
689
  	mod_timer(&laptop_mode_wb_timer, jiffies + laptop_mode);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
  }
  
  /*
   * We're in laptop mode and we've just synced. The sync's writes will have
   * caused another writeback to be scheduled by laptop_io_completion.
   * Nothing needs to be written back anymore, so we unschedule the writeback.
   */
  void laptop_sync_completion(void)
  {
  	del_timer(&laptop_mode_wb_timer);
  }
  
  /*
   * If ratelimit_pages is too high then we can get into dirty-data overload
   * if a large number of processes all perform writes at the same time.
   * If it is too low then SMP machines will call the (expensive)
   * get_writeback_state too often.
   *
   * Here we set ratelimit_pages to a level which ensures that when all CPUs are
   * dirtying in parallel, we cannot go more than 3% (1/32) over the dirty memory
   * thresholds before writeback cuts in.
   *
   * But the limit should not be set too high.  Because it also controls the
   * amount of memory which the balance_dirty_pages() caller has to write back.
   * If this is too large then the caller will block on the IO queue all the
   * time.  So limit it to four megabytes - the balance_dirty_pages() caller
   * will write six megabyte chunks, max.
   */
2d1d43f6a   Chandra Seetharaman   [PATCH] call mm/p...
718
  void writeback_set_ratelimit(void)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
719
  {
40c99aae2   Chandra Seetharaman   [PATCH] remove st...
720
  	ratelimit_pages = vm_total_pages / (num_online_cpus() * 32);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
721
722
723
724
725
  	if (ratelimit_pages < 16)
  		ratelimit_pages = 16;
  	if (ratelimit_pages * PAGE_CACHE_SIZE > 4096 * 1024)
  		ratelimit_pages = (4096 * 1024) / PAGE_CACHE_SIZE;
  }
26c2143b6   Chandra Seetharaman   [PATCH] cpu hotpl...
726
  static int __cpuinit
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
727
728
  ratelimit_handler(struct notifier_block *self, unsigned long u, void *v)
  {
2d1d43f6a   Chandra Seetharaman   [PATCH] call mm/p...
729
  	writeback_set_ratelimit();
aa0f03037   Paul E. McKenney   [PATCH] Change co...
730
  	return NOTIFY_DONE;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
731
  }
74b85f379   Chandra Seetharaman   [PATCH] cpu hotpl...
732
  static struct notifier_block __cpuinitdata ratelimit_nb = {
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
733
734
735
736
737
  	.notifier_call	= ratelimit_handler,
  	.next		= NULL,
  };
  
  /*
dc6e29da9   Linus Torvalds   Fix balance_dirty...
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
   * Called early on to tune the page writeback dirty limits.
   *
   * We used to scale dirty pages according to how total memory
   * related to pages that could be allocated for buffers (by
   * comparing nr_free_buffer_pages() to vm_total_pages.
   *
   * However, that was when we used "dirty_ratio" to scale with
   * all memory, and we don't do that any more. "dirty_ratio"
   * is now applied to total non-HIGHPAGE memory (by subtracting
   * totalhigh_pages from vm_total_pages), and as such we can't
   * get into the old insane situation any more where we had
   * large amounts of dirty pages compared to a small amount of
   * non-HIGHMEM memory.
   *
   * But we might still want to scale the dirty_ratio by how
   * much memory the box has..
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
754
755
756
   */
  void __init page_writeback_init(void)
  {
04fbfdc14   Peter Zijlstra   mm: per device di...
757
  	int shift;
2d1d43f6a   Chandra Seetharaman   [PATCH] call mm/p...
758
  	writeback_set_ratelimit();
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
759
  	register_cpu_notifier(&ratelimit_nb);
04fbfdc14   Peter Zijlstra   mm: per device di...
760
761
762
  
  	shift = calc_period_shift();
  	prop_descriptor_init(&vm_completions, shift);
3e26c149c   Peter Zijlstra   mm: dirty balanci...
763
  	prop_descriptor_init(&vm_dirties, shift);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
764
  }
811d736f9   David Howells   [PATCH] BLOCK: Di...
765
  /**
0ea971801   Miklos Szeredi   consolidate gener...
766
   * write_cache_pages - walk the list of dirty pages of the given address space and write all of them.
811d736f9   David Howells   [PATCH] BLOCK: Di...
767
768
   * @mapping: address space structure to write
   * @wbc: subtract the number of written pages from *@wbc->nr_to_write
0ea971801   Miklos Szeredi   consolidate gener...
769
770
   * @writepage: function called for each page
   * @data: data passed to writepage function
811d736f9   David Howells   [PATCH] BLOCK: Di...
771
   *
0ea971801   Miklos Szeredi   consolidate gener...
772
   * If a page is already under I/O, write_cache_pages() skips it, even
811d736f9   David Howells   [PATCH] BLOCK: Di...
773
774
775
776
777
778
   * if it's dirty.  This is desirable behaviour for memory-cleaning writeback,
   * but it is INCORRECT for data-integrity system calls such as fsync().  fsync()
   * and msync() need to guarantee that all the data which was dirty at the time
   * the call was made get new I/O started against them.  If wbc->sync_mode is
   * WB_SYNC_ALL then we were called for data integrity and we must wait for
   * existing IO to complete.
811d736f9   David Howells   [PATCH] BLOCK: Di...
779
   */
0ea971801   Miklos Szeredi   consolidate gener...
780
781
782
  int write_cache_pages(struct address_space *mapping,
  		      struct writeback_control *wbc, writepage_t writepage,
  		      void *data)
811d736f9   David Howells   [PATCH] BLOCK: Di...
783
784
785
786
  {
  	struct backing_dev_info *bdi = mapping->backing_dev_info;
  	int ret = 0;
  	int done = 0;
811d736f9   David Howells   [PATCH] BLOCK: Di...
787
788
  	struct pagevec pvec;
  	int nr_pages;
31a12666d   Nick Piggin   mm: write_cache_p...
789
  	pgoff_t uninitialized_var(writeback_index);
811d736f9   David Howells   [PATCH] BLOCK: Di...
790
791
  	pgoff_t index;
  	pgoff_t end;		/* Inclusive */
bd19e012f   Nick Piggin   mm: write_cache_p...
792
  	pgoff_t done_index;
31a12666d   Nick Piggin   mm: write_cache_p...
793
  	int cycled;
811d736f9   David Howells   [PATCH] BLOCK: Di...
794
  	int range_whole = 0;
17bc6c30c   Aneesh Kumar K.V   vfs: Add no_nrwri...
795
  	long nr_to_write = wbc->nr_to_write;
811d736f9   David Howells   [PATCH] BLOCK: Di...
796
797
798
799
800
  
  	if (wbc->nonblocking && bdi_write_congested(bdi)) {
  		wbc->encountered_congestion = 1;
  		return 0;
  	}
811d736f9   David Howells   [PATCH] BLOCK: Di...
801
802
  	pagevec_init(&pvec, 0);
  	if (wbc->range_cyclic) {
31a12666d   Nick Piggin   mm: write_cache_p...
803
804
805
806
807
808
  		writeback_index = mapping->writeback_index; /* prev offset */
  		index = writeback_index;
  		if (index == 0)
  			cycled = 1;
  		else
  			cycled = 0;
811d736f9   David Howells   [PATCH] BLOCK: Di...
809
810
811
812
813
814
  		end = -1;
  	} else {
  		index = wbc->range_start >> PAGE_CACHE_SHIFT;
  		end = wbc->range_end >> PAGE_CACHE_SHIFT;
  		if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
  			range_whole = 1;
31a12666d   Nick Piggin   mm: write_cache_p...
815
  		cycled = 1; /* ignore range_cyclic tests */
811d736f9   David Howells   [PATCH] BLOCK: Di...
816
817
  	}
  retry:
bd19e012f   Nick Piggin   mm: write_cache_p...
818
  	done_index = index;
5a3d5c981   Nick Piggin   mm: write_cache_p...
819
820
821
822
823
824
825
826
  	while (!done && (index <= end)) {
  		int i;
  
  		nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
  			      PAGECACHE_TAG_DIRTY,
  			      min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
  		if (nr_pages == 0)
  			break;
811d736f9   David Howells   [PATCH] BLOCK: Di...
827

811d736f9   David Howells   [PATCH] BLOCK: Di...
828
829
830
831
  		for (i = 0; i < nr_pages; i++) {
  			struct page *page = pvec.pages[i];
  
  			/*
d5482cdf8   Nick Piggin   mm: write_cache_p...
832
833
834
835
836
  			 * At this point, the page may be truncated or
  			 * invalidated (changing page->mapping to NULL), or
  			 * even swizzled back from swapper_space to tmpfs file
  			 * mapping. However, page->index will not change
  			 * because we have a reference on the page.
811d736f9   David Howells   [PATCH] BLOCK: Di...
837
  			 */
d5482cdf8   Nick Piggin   mm: write_cache_p...
838
839
840
841
842
843
844
845
846
847
  			if (page->index > end) {
  				/*
  				 * can't be range_cyclic (1st pass) because
  				 * end == -1 in that case.
  				 */
  				done = 1;
  				break;
  			}
  
  			done_index = page->index + 1;
811d736f9   David Howells   [PATCH] BLOCK: Di...
848
  			lock_page(page);
5a3d5c981   Nick Piggin   mm: write_cache_p...
849
850
851
852
853
854
855
856
  			/*
  			 * Page truncated or invalidated. We can freely skip it
  			 * then, even for data integrity operations: the page
  			 * has disappeared concurrently, so there could be no
  			 * real expectation of this data interity operation
  			 * even if there is now a new, dirty page at the same
  			 * pagecache address.
  			 */
811d736f9   David Howells   [PATCH] BLOCK: Di...
857
  			if (unlikely(page->mapping != mapping)) {
5a3d5c981   Nick Piggin   mm: write_cache_p...
858
  continue_unlock:
811d736f9   David Howells   [PATCH] BLOCK: Di...
859
860
861
  				unlock_page(page);
  				continue;
  			}
515f4a037   Nick Piggin   mm: write_cache_p...
862
863
864
865
866
867
868
869
870
871
872
  			if (!PageDirty(page)) {
  				/* someone wrote it for us */
  				goto continue_unlock;
  			}
  
  			if (PageWriteback(page)) {
  				if (wbc->sync_mode != WB_SYNC_NONE)
  					wait_on_page_writeback(page);
  				else
  					goto continue_unlock;
  			}
811d736f9   David Howells   [PATCH] BLOCK: Di...
873

515f4a037   Nick Piggin   mm: write_cache_p...
874
875
  			BUG_ON(PageWriteback(page));
  			if (!clear_page_dirty_for_io(page))
5a3d5c981   Nick Piggin   mm: write_cache_p...
876
  				goto continue_unlock;
811d736f9   David Howells   [PATCH] BLOCK: Di...
877

0ea971801   Miklos Szeredi   consolidate gener...
878
  			ret = (*writepage)(page, wbc, data);
00266770b   Nick Piggin   mm: write_cache_p...
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
  			if (unlikely(ret)) {
  				if (ret == AOP_WRITEPAGE_ACTIVATE) {
  					unlock_page(page);
  					ret = 0;
  				} else {
  					/*
  					 * done_index is set past this page,
  					 * so media errors will not choke
  					 * background writeout for the entire
  					 * file. This has consequences for
  					 * range_cyclic semantics (ie. it may
  					 * not be suitable for data integrity
  					 * writeout).
  					 */
  					done = 1;
  					break;
  				}
   			}
89e121900   Federico Cuello   writeback: fix br...
897
  			if (nr_to_write > 0) {
dcf6a79dd   Artem Bityutskiy   write-back: fix n...
898
  				nr_to_write--;
89e121900   Federico Cuello   writeback: fix br...
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
  				if (nr_to_write == 0 &&
  				    wbc->sync_mode == WB_SYNC_NONE) {
  					/*
  					 * We stop writing back only if we are
  					 * not doing integrity sync. In case of
  					 * integrity sync we have to keep going
  					 * because someone may be concurrently
  					 * dirtying pages, and we might have
  					 * synced a lot of newly appeared dirty
  					 * pages, but have not synced all of the
  					 * old dirty pages.
  					 */
  					done = 1;
  					break;
  				}
05fe478dd   Nick Piggin   mm: write_cache_p...
914
  			}
dcf6a79dd   Artem Bityutskiy   write-back: fix n...
915

811d736f9   David Howells   [PATCH] BLOCK: Di...
916
917
918
  			if (wbc->nonblocking && bdi_write_congested(bdi)) {
  				wbc->encountered_congestion = 1;
  				done = 1;
82fd1a9a8   Andrew Morton   mm: write_cache_p...
919
  				break;
811d736f9   David Howells   [PATCH] BLOCK: Di...
920
921
922
923
924
  			}
  		}
  		pagevec_release(&pvec);
  		cond_resched();
  	}
3a4c6800f   Nick Piggin   Fix page writebac...
925
  	if (!cycled && !done) {
811d736f9   David Howells   [PATCH] BLOCK: Di...
926
  		/*
31a12666d   Nick Piggin   mm: write_cache_p...
927
  		 * range_cyclic:
811d736f9   David Howells   [PATCH] BLOCK: Di...
928
929
930
  		 * We hit the last page and there is more work to be done: wrap
  		 * back to the start of the file
  		 */
31a12666d   Nick Piggin   mm: write_cache_p...
931
  		cycled = 1;
811d736f9   David Howells   [PATCH] BLOCK: Di...
932
  		index = 0;
31a12666d   Nick Piggin   mm: write_cache_p...
933
  		end = writeback_index - 1;
811d736f9   David Howells   [PATCH] BLOCK: Di...
934
935
  		goto retry;
  	}
17bc6c30c   Aneesh Kumar K.V   vfs: Add no_nrwri...
936
937
  	if (!wbc->no_nrwrite_index_update) {
  		if (wbc->range_cyclic || (range_whole && nr_to_write > 0))
bd19e012f   Nick Piggin   mm: write_cache_p...
938
  			mapping->writeback_index = done_index;
17bc6c30c   Aneesh Kumar K.V   vfs: Add no_nrwri...
939
940
  		wbc->nr_to_write = nr_to_write;
  	}
06d6cf695   Aneesh Kumar K.V   mm: Add range_con...
941

811d736f9   David Howells   [PATCH] BLOCK: Di...
942
943
  	return ret;
  }
0ea971801   Miklos Szeredi   consolidate gener...
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
  EXPORT_SYMBOL(write_cache_pages);
  
  /*
   * Function used by generic_writepages to call the real writepage
   * function and set the mapping flags on error
   */
  static int __writepage(struct page *page, struct writeback_control *wbc,
  		       void *data)
  {
  	struct address_space *mapping = data;
  	int ret = mapping->a_ops->writepage(page, wbc);
  	mapping_set_error(mapping, ret);
  	return ret;
  }
  
  /**
   * generic_writepages - walk the list of dirty pages of the given address space and writepage() all of them.
   * @mapping: address space structure to write
   * @wbc: subtract the number of written pages from *@wbc->nr_to_write
   *
   * This is a library function, which implements the writepages()
   * address_space_operation.
   */
  int generic_writepages(struct address_space *mapping,
  		       struct writeback_control *wbc)
  {
  	/* deal with chardevs and other special file */
  	if (!mapping->a_ops->writepage)
  		return 0;
  
  	return write_cache_pages(mapping, wbc, __writepage, mapping);
  }
811d736f9   David Howells   [PATCH] BLOCK: Di...
976
977
  
  EXPORT_SYMBOL(generic_writepages);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
978
979
  int do_writepages(struct address_space *mapping, struct writeback_control *wbc)
  {
22905f775   Andrew Morton   identify multipag...
980
  	int ret;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
981
982
983
  	if (wbc->nr_to_write <= 0)
  		return 0;
  	if (mapping->a_ops->writepages)
d08b3851d   Peter Zijlstra   [PATCH] mm: track...
984
  		ret = mapping->a_ops->writepages(mapping, wbc);
22905f775   Andrew Morton   identify multipag...
985
986
  	else
  		ret = generic_writepages(mapping, wbc);
22905f775   Andrew Morton   identify multipag...
987
  	return ret;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
988
989
990
991
  }
  
  /**
   * write_one_page - write out a single page and optionally wait on I/O
67be2dd1b   Martin Waitz   [PATCH] DocBook: ...
992
993
   * @page: the page to write
   * @wait: if true, wait on writeout
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
   *
   * The page must be locked by the caller and will be unlocked upon return.
   *
   * write_one_page() returns a negative error code if I/O failed.
   */
  int write_one_page(struct page *page, int wait)
  {
  	struct address_space *mapping = page->mapping;
  	int ret = 0;
  	struct writeback_control wbc = {
  		.sync_mode = WB_SYNC_ALL,
  		.nr_to_write = 1,
  	};
  
  	BUG_ON(!PageLocked(page));
  
  	if (wait)
  		wait_on_page_writeback(page);
  
  	if (clear_page_dirty_for_io(page)) {
  		page_cache_get(page);
  		ret = mapping->a_ops->writepage(page, &wbc);
  		if (ret == 0 && wait) {
  			wait_on_page_writeback(page);
  			if (PageError(page))
  				ret = -EIO;
  		}
  		page_cache_release(page);
  	} else {
  		unlock_page(page);
  	}
  	return ret;
  }
  EXPORT_SYMBOL(write_one_page);
  
  /*
767193253   Ken Chen   [PATCH] simplify ...
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
   * For address_spaces which do not use buffers nor write back.
   */
  int __set_page_dirty_no_writeback(struct page *page)
  {
  	if (!PageDirty(page))
  		SetPageDirty(page);
  	return 0;
  }
  
  /*
e3a7cca1e   Edward Shishkin   vfs: add/use acco...
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
   * Helper function for set_page_dirty family.
   * NOTE: This relies on being atomic wrt interrupts.
   */
  void account_page_dirtied(struct page *page, struct address_space *mapping)
  {
  	if (mapping_cap_account_dirty(mapping)) {
  		__inc_zone_page_state(page, NR_FILE_DIRTY);
  		__inc_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE);
  		task_dirty_inc(current);
  		task_io_account_write(PAGE_CACHE_SIZE);
  	}
  }
  
  /*
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
   * For address_spaces which do not use buffers.  Just tag the page as dirty in
   * its radix tree.
   *
   * This is also used when a single buffer is being dirtied: we want to set the
   * page dirty in that case, but not all the buffers.  This is a "bottom-up"
   * dirtying, whereas __set_page_dirty_buffers() is a "top-down" dirtying.
   *
   * Most callers have locked the page, which pins the address_space in memory.
   * But zap_pte_range() does not lock the page, however in that case the
   * mapping is pinned by the vma's ->vm_file reference.
   *
   * We take care to handle the case where the page was truncated from the
183ff22bb   Simon Arlott   spelling fixes: mm/
1066
   * mapping by re-checking page_mapping() inside tree_lock.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1067
1068
1069
   */
  int __set_page_dirty_nobuffers(struct page *page)
  {
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1070
1071
1072
  	if (!TestSetPageDirty(page)) {
  		struct address_space *mapping = page_mapping(page);
  		struct address_space *mapping2;
8c08540f8   Andrew Morton   [PATCH] clean up ...
1073
1074
  		if (!mapping)
  			return 1;
19fd62312   Nick Piggin   mm: spinlock tree...
1075
  		spin_lock_irq(&mapping->tree_lock);
8c08540f8   Andrew Morton   [PATCH] clean up ...
1076
1077
1078
  		mapping2 = page_mapping(page);
  		if (mapping2) { /* Race with truncate? */
  			BUG_ON(mapping2 != mapping);
787d2214c   Nick Piggin   fs: introduce som...
1079
  			WARN_ON_ONCE(!PagePrivate(page) && !PageUptodate(page));
e3a7cca1e   Edward Shishkin   vfs: add/use acco...
1080
  			account_page_dirtied(page, mapping);
8c08540f8   Andrew Morton   [PATCH] clean up ...
1081
1082
1083
  			radix_tree_tag_set(&mapping->page_tree,
  				page_index(page), PAGECACHE_TAG_DIRTY);
  		}
19fd62312   Nick Piggin   mm: spinlock tree...
1084
  		spin_unlock_irq(&mapping->tree_lock);
8c08540f8   Andrew Morton   [PATCH] clean up ...
1085
1086
1087
  		if (mapping->host) {
  			/* !PageAnon && !swapper_space */
  			__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1088
  		}
4741c9fd3   Andrew Morton   [PATCH] set_page_...
1089
  		return 1;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1090
  	}
4741c9fd3   Andrew Morton   [PATCH] set_page_...
1091
  	return 0;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
  }
  EXPORT_SYMBOL(__set_page_dirty_nobuffers);
  
  /*
   * When a writepage implementation decides that it doesn't want to write this
   * page for some reason, it should redirty the locked page via
   * redirty_page_for_writepage() and it should then unlock the page and return 0
   */
  int redirty_page_for_writepage(struct writeback_control *wbc, struct page *page)
  {
  	wbc->pages_skipped++;
  	return __set_page_dirty_nobuffers(page);
  }
  EXPORT_SYMBOL(redirty_page_for_writepage);
  
  /*
6746aff74   Wu Fengguang   HWPOISON: shmem: ...
1108
1109
1110
1111
1112
1113
1114
   * Dirty a page.
   *
   * For pages with a mapping this should be done under the page lock
   * for the benefit of asynchronous memory errors who prefer a consistent
   * dirty state. This rule can be broken in some special cases,
   * but should be better not to.
   *
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1115
1116
1117
   * If the mapping doesn't provide a set_page_dirty a_op, then
   * just fall through and assume that it wants buffer_heads.
   */
1cf6e7d83   Nick Piggin   mm: task dirty ac...
1118
  int set_page_dirty(struct page *page)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1119
1120
1121
1122
1123
  {
  	struct address_space *mapping = page_mapping(page);
  
  	if (likely(mapping)) {
  		int (*spd)(struct page *) = mapping->a_ops->set_page_dirty;
9361401eb   David Howells   [PATCH] BLOCK: Ma...
1124
1125
1126
1127
1128
  #ifdef CONFIG_BLOCK
  		if (!spd)
  			spd = __set_page_dirty_buffers;
  #endif
  		return (*spd)(page);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1129
  	}
4741c9fd3   Andrew Morton   [PATCH] set_page_...
1130
1131
1132
1133
  	if (!PageDirty(page)) {
  		if (!TestSetPageDirty(page))
  			return 1;
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
  	return 0;
  }
  EXPORT_SYMBOL(set_page_dirty);
  
  /*
   * set_page_dirty() is racy if the caller has no reference against
   * page->mapping->host, and if the page is unlocked.  This is because another
   * CPU could truncate the page off the mapping and then free the mapping.
   *
   * Usually, the page _is_ locked, or the caller is a user-space process which
   * holds a reference on the inode by having an open file.
   *
   * In other cases, the page should be locked before running set_page_dirty().
   */
  int set_page_dirty_lock(struct page *page)
  {
  	int ret;
db37648cd   Nick Piggin   [PATCH] mm: non s...
1151
  	lock_page_nosync(page);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1152
1153
1154
1155
1156
1157
1158
  	ret = set_page_dirty(page);
  	unlock_page(page);
  	return ret;
  }
  EXPORT_SYMBOL(set_page_dirty_lock);
  
  /*
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
   * Clear a page's dirty flag, while caring for dirty memory accounting.
   * Returns true if the page was previously dirty.
   *
   * This is for preparing to put the page under writeout.  We leave the page
   * tagged as dirty in the radix tree so that a concurrent write-for-sync
   * can discover it via a PAGECACHE_TAG_DIRTY walk.  The ->writepage
   * implementation will run either set_page_writeback() or set_page_dirty(),
   * at which stage we bring the page's dirty flag and radix-tree dirty tag
   * back into sync.
   *
   * This incoherency between the page's dirty flag and radix-tree tag is
   * unfortunate, but it only exists while the page is locked.
   */
  int clear_page_dirty_for_io(struct page *page)
  {
  	struct address_space *mapping = page_mapping(page);
79352894b   Nick Piggin   mm: fix clear_pag...
1175
  	BUG_ON(!PageLocked(page));
fe3cba17c   Fengguang Wu   mm: share PG_read...
1176
  	ClearPageReclaim(page);
7658cc289   Linus Torvalds   VM: Fix nasty and...
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
  	if (mapping && mapping_cap_account_dirty(mapping)) {
  		/*
  		 * Yes, Virginia, this is indeed insane.
  		 *
  		 * We use this sequence to make sure that
  		 *  (a) we account for dirty stats properly
  		 *  (b) we tell the low-level filesystem to
  		 *      mark the whole page dirty if it was
  		 *      dirty in a pagetable. Only to then
  		 *  (c) clean the page again and return 1 to
  		 *      cause the writeback.
  		 *
  		 * This way we avoid all nasty races with the
  		 * dirty bit in multiple places and clearing
  		 * them concurrently from different threads.
  		 *
  		 * Note! Normally the "set_page_dirty(page)"
  		 * has no effect on the actual dirty bit - since
  		 * that will already usually be set. But we
  		 * need the side effects, and it can help us
  		 * avoid races.
  		 *
  		 * We basically use the page "master dirty bit"
  		 * as a serialization point for all the different
  		 * threads doing their things.
7658cc289   Linus Torvalds   VM: Fix nasty and...
1202
1203
1204
  		 */
  		if (page_mkclean(page))
  			set_page_dirty(page);
79352894b   Nick Piggin   mm: fix clear_pag...
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
  		/*
  		 * We carefully synchronise fault handlers against
  		 * installing a dirty pte and marking the page dirty
  		 * at this point. We do this by having them hold the
  		 * page lock at some point after installing their
  		 * pte, but before marking the page dirty.
  		 * Pages are always locked coming in here, so we get
  		 * the desired exclusion. See mm/memory.c:do_wp_page()
  		 * for more comments.
  		 */
7658cc289   Linus Torvalds   VM: Fix nasty and...
1215
  		if (TestClearPageDirty(page)) {
8c08540f8   Andrew Morton   [PATCH] clean up ...
1216
  			dec_zone_page_state(page, NR_FILE_DIRTY);
c9e51e418   Peter Zijlstra   mm: count reclaim...
1217
1218
  			dec_bdi_stat(mapping->backing_dev_info,
  					BDI_RECLAIMABLE);
7658cc289   Linus Torvalds   VM: Fix nasty and...
1219
  			return 1;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1220
  		}
7658cc289   Linus Torvalds   VM: Fix nasty and...
1221
  		return 0;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1222
  	}
7658cc289   Linus Torvalds   VM: Fix nasty and...
1223
  	return TestClearPageDirty(page);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1224
  }
58bb01a9c   Hans Reiser   [PATCH] re-export...
1225
  EXPORT_SYMBOL(clear_page_dirty_for_io);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1226
1227
1228
1229
1230
1231
1232
  
  int test_clear_page_writeback(struct page *page)
  {
  	struct address_space *mapping = page_mapping(page);
  	int ret;
  
  	if (mapping) {
69cb51d18   Peter Zijlstra   mm: count writeba...
1233
  		struct backing_dev_info *bdi = mapping->backing_dev_info;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1234
  		unsigned long flags;
19fd62312   Nick Piggin   mm: spinlock tree...
1235
  		spin_lock_irqsave(&mapping->tree_lock, flags);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1236
  		ret = TestClearPageWriteback(page);
69cb51d18   Peter Zijlstra   mm: count writeba...
1237
  		if (ret) {
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1238
1239
1240
  			radix_tree_tag_clear(&mapping->page_tree,
  						page_index(page),
  						PAGECACHE_TAG_WRITEBACK);
e4ad08fe6   Miklos Szeredi   mm: bdi: add sepa...
1241
  			if (bdi_cap_account_writeback(bdi)) {
69cb51d18   Peter Zijlstra   mm: count writeba...
1242
  				__dec_bdi_stat(bdi, BDI_WRITEBACK);
04fbfdc14   Peter Zijlstra   mm: per device di...
1243
1244
  				__bdi_writeout_inc(bdi);
  			}
69cb51d18   Peter Zijlstra   mm: count writeba...
1245
  		}
19fd62312   Nick Piggin   mm: spinlock tree...
1246
  		spin_unlock_irqrestore(&mapping->tree_lock, flags);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1247
1248
1249
  	} else {
  		ret = TestClearPageWriteback(page);
  	}
d688abf50   Andrew Morton   move page writeba...
1250
1251
  	if (ret)
  		dec_zone_page_state(page, NR_WRITEBACK);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1252
1253
1254
1255
1256
1257
1258
1259
1260
  	return ret;
  }
  
  int test_set_page_writeback(struct page *page)
  {
  	struct address_space *mapping = page_mapping(page);
  	int ret;
  
  	if (mapping) {
69cb51d18   Peter Zijlstra   mm: count writeba...
1261
  		struct backing_dev_info *bdi = mapping->backing_dev_info;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1262
  		unsigned long flags;
19fd62312   Nick Piggin   mm: spinlock tree...
1263
  		spin_lock_irqsave(&mapping->tree_lock, flags);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1264
  		ret = TestSetPageWriteback(page);
69cb51d18   Peter Zijlstra   mm: count writeba...
1265
  		if (!ret) {
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1266
1267
1268
  			radix_tree_tag_set(&mapping->page_tree,
  						page_index(page),
  						PAGECACHE_TAG_WRITEBACK);
e4ad08fe6   Miklos Szeredi   mm: bdi: add sepa...
1269
  			if (bdi_cap_account_writeback(bdi))
69cb51d18   Peter Zijlstra   mm: count writeba...
1270
1271
  				__inc_bdi_stat(bdi, BDI_WRITEBACK);
  		}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1272
1273
1274
1275
  		if (!PageDirty(page))
  			radix_tree_tag_clear(&mapping->page_tree,
  						page_index(page),
  						PAGECACHE_TAG_DIRTY);
19fd62312   Nick Piggin   mm: spinlock tree...
1276
  		spin_unlock_irqrestore(&mapping->tree_lock, flags);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1277
1278
1279
  	} else {
  		ret = TestSetPageWriteback(page);
  	}
d688abf50   Andrew Morton   move page writeba...
1280
1281
  	if (!ret)
  		inc_zone_page_state(page, NR_WRITEBACK);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1282
1283
1284
1285
1286
1287
  	return ret;
  
  }
  EXPORT_SYMBOL(test_set_page_writeback);
  
  /*
001281881   Nick Piggin   mm: use lockless ...
1288
   * Return true if any of the pages in the mapping are marked with the
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1289
1290
1291
1292
   * passed tag.
   */
  int mapping_tagged(struct address_space *mapping, int tag)
  {
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1293
  	int ret;
001281881   Nick Piggin   mm: use lockless ...
1294
  	rcu_read_lock();
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1295
  	ret = radix_tree_tagged(&mapping->page_tree, tag);
001281881   Nick Piggin   mm: use lockless ...
1296
  	rcu_read_unlock();
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1297
1298
1299
  	return ret;
  }
  EXPORT_SYMBOL(mapping_tagged);