Blame view

fs/fs-writeback.c 74.6 KB
457c89965   Thomas Gleixner   treewide: Add SPD...
1
  // SPDX-License-Identifier: GPL-2.0-only
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2
3
4
5
6
7
8
9
10
11
  /*
   * fs/fs-writeback.c
   *
   * Copyright (C) 2002, Linus Torvalds.
   *
   * Contains all the functions related to writing back and waiting
   * upon dirty inodes against superblocks, and writing back dirty
   * pages against inodes.  ie: data writeback.  Writeout of the
   * inode itself is not handled here.
   *
e1f8e8744   Francois Cami   Remove Andrew Mor...
12
   * 10Apr2002	Andrew Morton
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
13
14
15
16
17
   *		Split out of fs/inode.c
   *		Additions for address_space-based writeback
   */
  
  #include <linux/kernel.h>
630d9c472   Paul Gortmaker   fs: reduce the us...
18
  #include <linux/export.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
19
  #include <linux/spinlock.h>
5a0e3ad6a   Tejun Heo   include cleanup: ...
20
  #include <linux/slab.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
21
22
23
  #include <linux/sched.h>
  #include <linux/fs.h>
  #include <linux/mm.h>
bc31b86a5   Wu Fengguang   writeback: move M...
24
  #include <linux/pagemap.h>
03ba3782e   Jens Axboe   writeback: switch...
25
  #include <linux/kthread.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
26
27
28
  #include <linux/writeback.h>
  #include <linux/blkdev.h>
  #include <linux/backing-dev.h>
455b28646   Dave Chinner   writeback: Initia...
29
  #include <linux/tracepoint.h>
719ea2fbb   Al Viro   new helpers: lock...
30
  #include <linux/device.h>
21c6321fb   Tejun Heo   writeback: reloca...
31
  #include <linux/memcontrol.h>
07f3f05c1   David Howells   [PATCH] BLOCK: Mo...
32
  #include "internal.h"
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
33

d0bceac74   Jens Axboe   writeback: get ri...
34
  /*
bc31b86a5   Wu Fengguang   writeback: move M...
35
36
   * 4MB minimal write chunk size
   */
09cbfeaf1   Kirill A. Shutemov   mm, fs: get rid o...
37
  #define MIN_WRITEBACK_PAGES	(4096UL >> (PAGE_SHIFT - 10))
bc31b86a5   Wu Fengguang   writeback: move M...
38
39
  
  /*
c4a77a6c7   Jens Axboe   writeback: make w...
40
41
   * Passed into wb_writeback(), essentially a subset of writeback_control
   */
83ba7b071   Christoph Hellwig   writeback: simpli...
42
  struct wb_writeback_work {
c4a77a6c7   Jens Axboe   writeback: make w...
43
44
  	long nr_pages;
  	struct super_block *sb;
0dc83bd30   Jan Kara   Revert "writeback...
45
  	unsigned long *older_than_this;
c4a77a6c7   Jens Axboe   writeback: make w...
46
  	enum writeback_sync_modes sync_mode;
6e6938b6d   Wu Fengguang   writeback: introd...
47
  	unsigned int tagged_writepages:1;
52957fe1c   H Hartley Sweeten   fs-writeback.c: b...
48
49
50
  	unsigned int for_kupdate:1;
  	unsigned int range_cyclic:1;
  	unsigned int for_background:1;
7747bd4bc   Dave Chinner   sync: don't block...
51
  	unsigned int for_sync:1;	/* sync(2) WB_SYNC_ALL writeback */
ac7b19a34   Tejun Heo   writeback: add wb...
52
  	unsigned int auto_free:1;	/* free on completion */
0e175a183   Curt Wohlgemuth   writeback: Add a ...
53
  	enum wb_reason reason;		/* why was writeback initiated? */
c4a77a6c7   Jens Axboe   writeback: make w...
54

8010c3b63   Jens Axboe   writeback: add co...
55
  	struct list_head list;		/* pending work list */
cc395d7f1   Tejun Heo   writeback: implem...
56
  	struct wb_completion *done;	/* set if the caller waits */
03ba3782e   Jens Axboe   writeback: switch...
57
  };
a2f487069   Theodore Ts'o   fs: make sure the...
58
59
60
61
62
63
64
65
66
67
68
  /*
   * If an inode is constantly having its pages dirtied, but then the
   * updates stop dirtytime_expire_interval seconds in the past, it's
   * possible for the worst case time between when an inode has its
   * timestamps updated and when they finally get written out to be two
   * dirtytime_expire_intervals.  We set the default to 12 hours (in
   * seconds), which means most of the time inodes will have their
   * timestamps written to disk after 12 hours, but in the worst case a
   * few inodes might not their timestamps updated for 24 hours.
   */
  unsigned int dirtytime_expire_interval = 12 * 60 * 60;
7ccf19a80   Nick Piggin   fs: inode split I...
69
70
  static inline struct inode *wb_inode(struct list_head *head)
  {
c7f540849   Dave Chinner   inode: rename i_w...
71
  	return list_entry(head, struct inode, i_io_list);
7ccf19a80   Nick Piggin   fs: inode split I...
72
  }
15eb77a07   Wu Fengguang   writeback: fix NU...
73
74
75
76
77
78
79
  /*
   * Include the creation of the trace points after defining the
   * wb_writeback_work structure and inline functions so that the definition
   * remains local to this file.
   */
  #define CREATE_TRACE_POINTS
  #include <trace/events/writeback.h>
774016b2d   Steven Whitehouse   GFS2: journal dat...
80
  EXPORT_TRACEPOINT_SYMBOL_GPL(wbc_writepage);
d6c10f1fc   Tejun Heo   writeback: implem...
81
82
83
84
85
86
  static bool wb_io_lists_populated(struct bdi_writeback *wb)
  {
  	if (wb_has_dirty_io(wb)) {
  		return false;
  	} else {
  		set_bit(WB_has_dirty_io, &wb->state);
95a46c65e   Tejun Heo   writeback: make b...
87
  		WARN_ON_ONCE(!wb->avg_write_bandwidth);
766a9d6e6   Tejun Heo   writeback: implem...
88
89
  		atomic_long_add(wb->avg_write_bandwidth,
  				&wb->bdi->tot_write_bandwidth);
d6c10f1fc   Tejun Heo   writeback: implem...
90
91
92
93
94
95
96
  		return true;
  	}
  }
  
  static void wb_io_lists_depopulated(struct bdi_writeback *wb)
  {
  	if (wb_has_dirty_io(wb) && list_empty(&wb->b_dirty) &&
766a9d6e6   Tejun Heo   writeback: implem...
97
  	    list_empty(&wb->b_io) && list_empty(&wb->b_more_io)) {
d6c10f1fc   Tejun Heo   writeback: implem...
98
  		clear_bit(WB_has_dirty_io, &wb->state);
95a46c65e   Tejun Heo   writeback: make b...
99
100
  		WARN_ON_ONCE(atomic_long_sub_return(wb->avg_write_bandwidth,
  					&wb->bdi->tot_write_bandwidth) < 0);
766a9d6e6   Tejun Heo   writeback: implem...
101
  	}
d6c10f1fc   Tejun Heo   writeback: implem...
102
103
104
  }
  
  /**
c7f540849   Dave Chinner   inode: rename i_w...
105
   * inode_io_list_move_locked - move an inode onto a bdi_writeback IO list
d6c10f1fc   Tejun Heo   writeback: implem...
106
107
   * @inode: inode to be moved
   * @wb: target bdi_writeback
bbbc3c1cf   Wang Long   writeback: update...
108
   * @head: one of @wb->b_{dirty|io|more_io|dirty_time}
d6c10f1fc   Tejun Heo   writeback: implem...
109
   *
c7f540849   Dave Chinner   inode: rename i_w...
110
   * Move @inode->i_io_list to @list of @wb and set %WB_has_dirty_io.
d6c10f1fc   Tejun Heo   writeback: implem...
111
112
113
   * Returns %true if @inode is the first occupant of the !dirty_time IO
   * lists; otherwise, %false.
   */
c7f540849   Dave Chinner   inode: rename i_w...
114
  static bool inode_io_list_move_locked(struct inode *inode,
d6c10f1fc   Tejun Heo   writeback: implem...
115
116
117
118
  				      struct bdi_writeback *wb,
  				      struct list_head *head)
  {
  	assert_spin_locked(&wb->list_lock);
c7f540849   Dave Chinner   inode: rename i_w...
119
  	list_move(&inode->i_io_list, head);
d6c10f1fc   Tejun Heo   writeback: implem...
120
121
122
123
124
125
126
127
128
129
  
  	/* dirty_time doesn't count as dirty_io until expiration */
  	if (head != &wb->b_dirty_time)
  		return wb_io_lists_populated(wb);
  
  	wb_io_lists_depopulated(wb);
  	return false;
  }
  
  /**
c7f540849   Dave Chinner   inode: rename i_w...
130
   * inode_io_list_del_locked - remove an inode from its bdi_writeback IO list
d6c10f1fc   Tejun Heo   writeback: implem...
131
132
133
134
135
136
   * @inode: inode to be removed
   * @wb: bdi_writeback @inode is being removed from
   *
   * Remove @inode which may be on one of @wb->b_{dirty|io|more_io} lists and
   * clear %WB_has_dirty_io if all are empty afterwards.
   */
c7f540849   Dave Chinner   inode: rename i_w...
137
  static void inode_io_list_del_locked(struct inode *inode,
d6c10f1fc   Tejun Heo   writeback: implem...
138
139
140
  				     struct bdi_writeback *wb)
  {
  	assert_spin_locked(&wb->list_lock);
c7f540849   Dave Chinner   inode: rename i_w...
141
  	list_del_init(&inode->i_io_list);
d6c10f1fc   Tejun Heo   writeback: implem...
142
143
  	wb_io_lists_depopulated(wb);
  }
f0054bb1e   Tejun Heo   writeback: move b...
144
  static void wb_wakeup(struct bdi_writeback *wb)
5acda9d12   Jan Kara   bdi: avoid oops o...
145
  {
f0054bb1e   Tejun Heo   writeback: move b...
146
147
148
149
  	spin_lock_bh(&wb->work_lock);
  	if (test_bit(WB_registered, &wb->state))
  		mod_delayed_work(bdi_wq, &wb->dwork, 0);
  	spin_unlock_bh(&wb->work_lock);
5acda9d12   Jan Kara   bdi: avoid oops o...
150
  }
4a3a485b1   Tahsin Erdogan   writeback: fix me...
151
152
153
154
155
156
157
  static void finish_writeback_work(struct bdi_writeback *wb,
  				  struct wb_writeback_work *work)
  {
  	struct wb_completion *done = work->done;
  
  	if (work->auto_free)
  		kfree(work);
8e00c4e9d   Tejun Heo   writeback: fix us...
158
159
160
161
162
163
164
  	if (done) {
  		wait_queue_head_t *waitq = done->waitq;
  
  		/* @done can't be accessed after the following dec */
  		if (atomic_dec_and_test(&done->cnt))
  			wake_up_all(waitq);
  	}
4a3a485b1   Tahsin Erdogan   writeback: fix me...
165
  }
f0054bb1e   Tejun Heo   writeback: move b...
166
167
  static void wb_queue_work(struct bdi_writeback *wb,
  			  struct wb_writeback_work *work)
6585027a5   Jan Kara   writeback: integr...
168
  {
5634cc2aa   Tejun Heo   writeback: update...
169
  	trace_writeback_queue(wb, work);
6585027a5   Jan Kara   writeback: integr...
170

cc395d7f1   Tejun Heo   writeback: implem...
171
172
  	if (work->done)
  		atomic_inc(&work->done->cnt);
4a3a485b1   Tahsin Erdogan   writeback: fix me...
173
174
175
176
177
178
179
180
  
  	spin_lock_bh(&wb->work_lock);
  
  	if (test_bit(WB_registered, &wb->state)) {
  		list_add_tail(&work->list, &wb->work_list);
  		mod_delayed_work(bdi_wq, &wb->dwork, 0);
  	} else
  		finish_writeback_work(wb, work);
f0054bb1e   Tejun Heo   writeback: move b...
181
  	spin_unlock_bh(&wb->work_lock);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
182
  }
cc395d7f1   Tejun Heo   writeback: implem...
183
184
  /**
   * wb_wait_for_completion - wait for completion of bdi_writeback_works
cc395d7f1   Tejun Heo   writeback: implem...
185
186
187
   * @done: target wb_completion
   *
   * Wait for one or more work items issued to @bdi with their ->done field
5b9cce4c7   Tejun Heo   writeback: Genera...
188
189
190
   * set to @done, which should have been initialized with
   * DEFINE_WB_COMPLETION().  This function returns after all such work items
   * are completed.  Work items which are waited upon aren't freed
cc395d7f1   Tejun Heo   writeback: implem...
191
192
   * automatically on completion.
   */
5b9cce4c7   Tejun Heo   writeback: Genera...
193
  void wb_wait_for_completion(struct wb_completion *done)
cc395d7f1   Tejun Heo   writeback: implem...
194
195
  {
  	atomic_dec(&done->cnt);		/* put down the initial count */
5b9cce4c7   Tejun Heo   writeback: Genera...
196
  	wait_event(*done->waitq, !atomic_read(&done->cnt));
cc395d7f1   Tejun Heo   writeback: implem...
197
  }
703c27088   Tejun Heo   writeback: implem...
198
  #ifdef CONFIG_CGROUP_WRITEBACK
55a694dff   Tejun Heo   writeback, cgroup...
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
  /*
   * Parameters for foreign inode detection, see wbc_detach_inode() to see
   * how they're used.
   *
   * These paramters are inherently heuristical as the detection target
   * itself is fuzzy.  All we want to do is detaching an inode from the
   * current owner if it's being written to by some other cgroups too much.
   *
   * The current cgroup writeback is built on the assumption that multiple
   * cgroups writing to the same inode concurrently is very rare and a mode
   * of operation which isn't well supported.  As such, the goal is not
   * taking too long when a different cgroup takes over an inode while
   * avoiding too aggressive flip-flops from occasional foreign writes.
   *
   * We record, very roughly, 2s worth of IO time history and if more than
   * half of that is foreign, trigger the switch.  The recording is quantized
   * to 16 slots.  To avoid tiny writes from swinging the decision too much,
   * writes smaller than 1/8 of avg size are ignored.
   */
2a8149081   Tejun Heo   writeback: implem...
218
219
  #define WB_FRN_TIME_SHIFT	13	/* 1s = 2^13, upto 8 secs w/ 16bit */
  #define WB_FRN_TIME_AVG_SHIFT	3	/* avg = avg * 7/8 + new * 1/8 */
55a694dff   Tejun Heo   writeback, cgroup...
220
  #define WB_FRN_TIME_CUT_DIV	8	/* ignore rounds < avg / 8 */
2a8149081   Tejun Heo   writeback: implem...
221
222
223
224
225
226
227
228
229
  #define WB_FRN_TIME_PERIOD	(2 * (1 << WB_FRN_TIME_SHIFT))	/* 2s */
  
  #define WB_FRN_HIST_SLOTS	16	/* inode->i_wb_frn_history is 16bit */
  #define WB_FRN_HIST_UNIT	(WB_FRN_TIME_PERIOD / WB_FRN_HIST_SLOTS)
  					/* each slot's duration is 2s / 16 */
  #define WB_FRN_HIST_THR_SLOTS	(WB_FRN_HIST_SLOTS / 2)
  					/* if foreign slots >= 8, switch */
  #define WB_FRN_HIST_MAX_SLOTS	(WB_FRN_HIST_THR_SLOTS / 2 + 1)
  					/* one round can affect upto 5 slots */
6444f47eb   Tejun Heo   writeback, cgroup...
230
  #define WB_FRN_MAX_IN_FLIGHT	1024	/* don't queue too many concurrently */
2a8149081   Tejun Heo   writeback: implem...
231

a1a0e23e4   Tejun Heo   writeback: flush ...
232
233
  static atomic_t isw_nr_in_flight = ATOMIC_INIT(0);
  static struct workqueue_struct *isw_wq;
21c6321fb   Tejun Heo   writeback: reloca...
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
  void __inode_attach_wb(struct inode *inode, struct page *page)
  {
  	struct backing_dev_info *bdi = inode_to_bdi(inode);
  	struct bdi_writeback *wb = NULL;
  
  	if (inode_cgwb_enabled(inode)) {
  		struct cgroup_subsys_state *memcg_css;
  
  		if (page) {
  			memcg_css = mem_cgroup_css_from_page(page);
  			wb = wb_get_create(bdi, memcg_css, GFP_ATOMIC);
  		} else {
  			/* must pin memcg_css, see wb_get_create() */
  			memcg_css = task_get_css(current, memory_cgrp_id);
  			wb = wb_get_create(bdi, memcg_css, GFP_ATOMIC);
  			css_put(memcg_css);
  		}
  	}
  
  	if (!wb)
  		wb = &bdi->wb;
  
  	/*
  	 * There may be multiple instances of this function racing to
  	 * update the same inode.  Use cmpxchg() to tell the winner.
  	 */
  	if (unlikely(cmpxchg(&inode->i_wb, NULL, wb)))
  		wb_put(wb);
  }
9b0eb69b7   Tejun Heo   cgroup, blkcg: Pr...
263
  EXPORT_SYMBOL_GPL(__inode_attach_wb);
21c6321fb   Tejun Heo   writeback: reloca...
264

703c27088   Tejun Heo   writeback: implem...
265
  /**
87e1d789b   Tejun Heo   writeback: implem...
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
   * locked_inode_to_wb_and_lock_list - determine a locked inode's wb and lock it
   * @inode: inode of interest with i_lock held
   *
   * Returns @inode's wb with its list_lock held.  @inode->i_lock must be
   * held on entry and is released on return.  The returned wb is guaranteed
   * to stay @inode's associated wb until its list_lock is released.
   */
  static struct bdi_writeback *
  locked_inode_to_wb_and_lock_list(struct inode *inode)
  	__releases(&inode->i_lock)
  	__acquires(&wb->list_lock)
  {
  	while (true) {
  		struct bdi_writeback *wb = inode_to_wb(inode);
  
  		/*
  		 * inode_to_wb() association is protected by both
  		 * @inode->i_lock and @wb->list_lock but list_lock nests
  		 * outside i_lock.  Drop i_lock and verify that the
  		 * association hasn't changed after acquiring list_lock.
  		 */
  		wb_get(wb);
  		spin_unlock(&inode->i_lock);
  		spin_lock(&wb->list_lock);
87e1d789b   Tejun Heo   writeback: implem...
290

aaa2cacf8   Tejun Heo   writeback: add lo...
291
  		/* i_wb may have changed inbetween, can't use inode_to_wb() */
614a4e377   Tejun Heo   writeback, cgroup...
292
293
294
295
  		if (likely(wb == inode->i_wb)) {
  			wb_put(wb);	/* @inode already has ref */
  			return wb;
  		}
87e1d789b   Tejun Heo   writeback: implem...
296
297
  
  		spin_unlock(&wb->list_lock);
614a4e377   Tejun Heo   writeback, cgroup...
298
  		wb_put(wb);
87e1d789b   Tejun Heo   writeback: implem...
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
  		cpu_relax();
  		spin_lock(&inode->i_lock);
  	}
  }
  
  /**
   * inode_to_wb_and_lock_list - determine an inode's wb and lock it
   * @inode: inode of interest
   *
   * Same as locked_inode_to_wb_and_lock_list() but @inode->i_lock isn't held
   * on entry.
   */
  static struct bdi_writeback *inode_to_wb_and_lock_list(struct inode *inode)
  	__acquires(&wb->list_lock)
  {
  	spin_lock(&inode->i_lock);
  	return locked_inode_to_wb_and_lock_list(inode);
  }
682aa8e1a   Tejun Heo   writeback: implem...
317
318
319
320
321
322
323
  struct inode_switch_wbs_context {
  	struct inode		*inode;
  	struct bdi_writeback	*new_wb;
  
  	struct rcu_head		rcu_head;
  	struct work_struct	work;
  };
7fc5854f8   Tejun Heo   writeback: synchr...
324
325
326
327
328
329
330
331
332
  static void bdi_down_write_wb_switch_rwsem(struct backing_dev_info *bdi)
  {
  	down_write(&bdi->wb_switch_rwsem);
  }
  
  static void bdi_up_write_wb_switch_rwsem(struct backing_dev_info *bdi)
  {
  	up_write(&bdi->wb_switch_rwsem);
  }
682aa8e1a   Tejun Heo   writeback: implem...
333
334
335
336
337
  static void inode_switch_wbs_work_fn(struct work_struct *work)
  {
  	struct inode_switch_wbs_context *isw =
  		container_of(work, struct inode_switch_wbs_context, work);
  	struct inode *inode = isw->inode;
7fc5854f8   Tejun Heo   writeback: synchr...
338
  	struct backing_dev_info *bdi = inode_to_bdi(inode);
d10c80955   Tejun Heo   writeback: implem...
339
340
  	struct address_space *mapping = inode->i_mapping;
  	struct bdi_writeback *old_wb = inode->i_wb;
682aa8e1a   Tejun Heo   writeback: implem...
341
  	struct bdi_writeback *new_wb = isw->new_wb;
04edf02cd   Matthew Wilcox   fs: Convert write...
342
343
  	XA_STATE(xas, &mapping->i_pages, 0);
  	struct page *page;
d10c80955   Tejun Heo   writeback: implem...
344
  	bool switched = false;
682aa8e1a   Tejun Heo   writeback: implem...
345
346
  
  	/*
7fc5854f8   Tejun Heo   writeback: synchr...
347
348
349
350
351
352
  	 * If @inode switches cgwb membership while sync_inodes_sb() is
  	 * being issued, sync_inodes_sb() might miss it.  Synchronize.
  	 */
  	down_read(&bdi->wb_switch_rwsem);
  
  	/*
682aa8e1a   Tejun Heo   writeback: implem...
353
354
355
  	 * By the time control reaches here, RCU grace period has passed
  	 * since I_WB_SWITCH assertion and all wb stat update transactions
  	 * between unlocked_inode_to_wb_begin/end() are guaranteed to be
b93b01631   Matthew Wilcox   page cache: use x...
356
  	 * synchronizing against the i_pages lock.
d10c80955   Tejun Heo   writeback: implem...
357
  	 *
b93b01631   Matthew Wilcox   page cache: use x...
358
  	 * Grabbing old_wb->list_lock, inode->i_lock and the i_pages lock
d10c80955   Tejun Heo   writeback: implem...
359
360
  	 * gives us exclusion against all wb related operations on @inode
  	 * including IO list manipulations and stat updates.
682aa8e1a   Tejun Heo   writeback: implem...
361
  	 */
d10c80955   Tejun Heo   writeback: implem...
362
363
364
365
366
367
368
  	if (old_wb < new_wb) {
  		spin_lock(&old_wb->list_lock);
  		spin_lock_nested(&new_wb->list_lock, SINGLE_DEPTH_NESTING);
  	} else {
  		spin_lock(&new_wb->list_lock);
  		spin_lock_nested(&old_wb->list_lock, SINGLE_DEPTH_NESTING);
  	}
682aa8e1a   Tejun Heo   writeback: implem...
369
  	spin_lock(&inode->i_lock);
b93b01631   Matthew Wilcox   page cache: use x...
370
  	xa_lock_irq(&mapping->i_pages);
d10c80955   Tejun Heo   writeback: implem...
371
372
373
  
  	/*
  	 * Once I_FREEING is visible under i_lock, the eviction path owns
c7f540849   Dave Chinner   inode: rename i_w...
374
  	 * the inode and we shouldn't modify ->i_io_list.
d10c80955   Tejun Heo   writeback: implem...
375
376
377
  	 */
  	if (unlikely(inode->i_state & I_FREEING))
  		goto skip_switch;
3a8e9ac89   Tejun Heo   writeback: add tr...
378
  	trace_inode_switch_wbs(inode, old_wb, new_wb);
d10c80955   Tejun Heo   writeback: implem...
379
380
381
  	/*
  	 * Count and transfer stats.  Note that PAGECACHE_TAG_DIRTY points
  	 * to possibly dirty pages while PAGECACHE_TAG_WRITEBACK points to
b93b01631   Matthew Wilcox   page cache: use x...
382
  	 * pages actually under writeback.
d10c80955   Tejun Heo   writeback: implem...
383
  	 */
04edf02cd   Matthew Wilcox   fs: Convert write...
384
385
  	xas_for_each_marked(&xas, page, ULONG_MAX, PAGECACHE_TAG_DIRTY) {
  		if (PageDirty(page)) {
3e8f399da   Nikolay Borisov   writeback: rework...
386
387
  			dec_wb_stat(old_wb, WB_RECLAIMABLE);
  			inc_wb_stat(new_wb, WB_RECLAIMABLE);
d10c80955   Tejun Heo   writeback: implem...
388
389
  		}
  	}
04edf02cd   Matthew Wilcox   fs: Convert write...
390
391
392
393
394
  	xas_set(&xas, 0);
  	xas_for_each_marked(&xas, page, ULONG_MAX, PAGECACHE_TAG_WRITEBACK) {
  		WARN_ON_ONCE(!PageWriteback(page));
  		dec_wb_stat(old_wb, WB_WRITEBACK);
  		inc_wb_stat(new_wb, WB_WRITEBACK);
d10c80955   Tejun Heo   writeback: implem...
395
396
397
398
399
400
401
402
403
404
  	}
  
  	wb_get(new_wb);
  
  	/*
  	 * Transfer to @new_wb's IO list if necessary.  The specific list
  	 * @inode was on is ignored and the inode is put on ->b_dirty which
  	 * is always correct including from ->b_dirty_time.  The transfer
  	 * preserves @inode->dirtied_when ordering.
  	 */
c7f540849   Dave Chinner   inode: rename i_w...
405
  	if (!list_empty(&inode->i_io_list)) {
d10c80955   Tejun Heo   writeback: implem...
406
  		struct inode *pos;
c7f540849   Dave Chinner   inode: rename i_w...
407
  		inode_io_list_del_locked(inode, old_wb);
d10c80955   Tejun Heo   writeback: implem...
408
  		inode->i_wb = new_wb;
c7f540849   Dave Chinner   inode: rename i_w...
409
  		list_for_each_entry(pos, &new_wb->b_dirty, i_io_list)
d10c80955   Tejun Heo   writeback: implem...
410
411
412
  			if (time_after_eq(inode->dirtied_when,
  					  pos->dirtied_when))
  				break;
c7f540849   Dave Chinner   inode: rename i_w...
413
  		inode_io_list_move_locked(inode, new_wb, pos->i_io_list.prev);
d10c80955   Tejun Heo   writeback: implem...
414
415
416
  	} else {
  		inode->i_wb = new_wb;
  	}
682aa8e1a   Tejun Heo   writeback: implem...
417

d10c80955   Tejun Heo   writeback: implem...
418
  	/* ->i_wb_frn updates may race wbc_detach_inode() but doesn't matter */
682aa8e1a   Tejun Heo   writeback: implem...
419
420
421
  	inode->i_wb_frn_winner = 0;
  	inode->i_wb_frn_avg_time = 0;
  	inode->i_wb_frn_history = 0;
d10c80955   Tejun Heo   writeback: implem...
422
423
  	switched = true;
  skip_switch:
682aa8e1a   Tejun Heo   writeback: implem...
424
425
426
427
428
  	/*
  	 * Paired with load_acquire in unlocked_inode_to_wb_begin() and
  	 * ensures that the new wb is visible if they see !I_WB_SWITCH.
  	 */
  	smp_store_release(&inode->i_state, inode->i_state & ~I_WB_SWITCH);
b93b01631   Matthew Wilcox   page cache: use x...
429
  	xa_unlock_irq(&mapping->i_pages);
682aa8e1a   Tejun Heo   writeback: implem...
430
  	spin_unlock(&inode->i_lock);
d10c80955   Tejun Heo   writeback: implem...
431
432
  	spin_unlock(&new_wb->list_lock);
  	spin_unlock(&old_wb->list_lock);
682aa8e1a   Tejun Heo   writeback: implem...
433

7fc5854f8   Tejun Heo   writeback: synchr...
434
  	up_read(&bdi->wb_switch_rwsem);
d10c80955   Tejun Heo   writeback: implem...
435
436
437
438
  	if (switched) {
  		wb_wakeup(new_wb);
  		wb_put(old_wb);
  	}
682aa8e1a   Tejun Heo   writeback: implem...
439
  	wb_put(new_wb);
d10c80955   Tejun Heo   writeback: implem...
440
441
  
  	iput(inode);
682aa8e1a   Tejun Heo   writeback: implem...
442
  	kfree(isw);
a1a0e23e4   Tejun Heo   writeback: flush ...
443
444
  
  	atomic_dec(&isw_nr_in_flight);
682aa8e1a   Tejun Heo   writeback: implem...
445
446
447
448
449
450
451
452
453
  }
  
  static void inode_switch_wbs_rcu_fn(struct rcu_head *rcu_head)
  {
  	struct inode_switch_wbs_context *isw = container_of(rcu_head,
  				struct inode_switch_wbs_context, rcu_head);
  
  	/* needs to grab bh-unsafe locks, bounce to work item */
  	INIT_WORK(&isw->work, inode_switch_wbs_work_fn);
a1a0e23e4   Tejun Heo   writeback: flush ...
454
  	queue_work(isw_wq, &isw->work);
682aa8e1a   Tejun Heo   writeback: implem...
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
  }
  
  /**
   * inode_switch_wbs - change the wb association of an inode
   * @inode: target inode
   * @new_wb_id: ID of the new wb
   *
   * Switch @inode's wb association to the wb identified by @new_wb_id.  The
   * switching is performed asynchronously and may fail silently.
   */
  static void inode_switch_wbs(struct inode *inode, int new_wb_id)
  {
  	struct backing_dev_info *bdi = inode_to_bdi(inode);
  	struct cgroup_subsys_state *memcg_css;
  	struct inode_switch_wbs_context *isw;
  
  	/* noop if seems to be already in progress */
  	if (inode->i_state & I_WB_SWITCH)
  		return;
6444f47eb   Tejun Heo   writeback, cgroup...
474
475
  	/* avoid queueing a new switch if too many are already in flight */
  	if (atomic_read(&isw_nr_in_flight) > WB_FRN_MAX_IN_FLIGHT)
7fc5854f8   Tejun Heo   writeback: synchr...
476
  		return;
682aa8e1a   Tejun Heo   writeback: implem...
477
478
  	isw = kzalloc(sizeof(*isw), GFP_ATOMIC);
  	if (!isw)
6444f47eb   Tejun Heo   writeback, cgroup...
479
  		return;
682aa8e1a   Tejun Heo   writeback: implem...
480
481
482
483
484
485
486
487
488
489
490
491
  
  	/* find and pin the new wb */
  	rcu_read_lock();
  	memcg_css = css_from_id(new_wb_id, &memory_cgrp_subsys);
  	if (memcg_css)
  		isw->new_wb = wb_get_create(bdi, memcg_css, GFP_ATOMIC);
  	rcu_read_unlock();
  	if (!isw->new_wb)
  		goto out_free;
  
  	/* while holding I_WB_SWITCH, no one else can update the association */
  	spin_lock(&inode->i_lock);
1751e8a6c   Linus Torvalds   Rename superblock...
492
  	if (!(inode->i_sb->s_flags & SB_ACTIVE) ||
a1a0e23e4   Tejun Heo   writeback: flush ...
493
494
495
496
497
  	    inode->i_state & (I_WB_SWITCH | I_FREEING) ||
  	    inode_to_wb(inode) == isw->new_wb) {
  		spin_unlock(&inode->i_lock);
  		goto out_free;
  	}
682aa8e1a   Tejun Heo   writeback: implem...
498
  	inode->i_state |= I_WB_SWITCH;
745249555   Tahsin Erdogan   writeback: inode ...
499
  	__iget(inode);
682aa8e1a   Tejun Heo   writeback: implem...
500
  	spin_unlock(&inode->i_lock);
682aa8e1a   Tejun Heo   writeback: implem...
501
502
503
504
  	isw->inode = inode;
  
  	/*
  	 * In addition to synchronizing among switchers, I_WB_SWITCH tells
b93b01631   Matthew Wilcox   page cache: use x...
505
506
  	 * the RCU protected stat update paths to grab the i_page
  	 * lock so that stat transfer can synchronize against them.
682aa8e1a   Tejun Heo   writeback: implem...
507
508
509
  	 * Let's continue after I_WB_SWITCH is guaranteed to be visible.
  	 */
  	call_rcu(&isw->rcu_head, inode_switch_wbs_rcu_fn);
ec084de92   Jiufei Xue   fs/writeback.c: u...
510
511
  
  	atomic_inc(&isw_nr_in_flight);
6444f47eb   Tejun Heo   writeback, cgroup...
512
  	return;
682aa8e1a   Tejun Heo   writeback: implem...
513
514
515
516
517
518
  
  out_free:
  	if (isw->new_wb)
  		wb_put(isw->new_wb);
  	kfree(isw);
  }
87e1d789b   Tejun Heo   writeback: implem...
519
  /**
b16b1deb5   Tejun Heo   writeback: make w...
520
521
522
523
524
525
526
527
528
529
530
531
   * wbc_attach_and_unlock_inode - associate wbc with target inode and unlock it
   * @wbc: writeback_control of interest
   * @inode: target inode
   *
   * @inode is locked and about to be written back under the control of @wbc.
   * Record @inode's writeback context into @wbc and unlock the i_lock.  On
   * writeback completion, wbc_detach_inode() should be called.  This is used
   * to track the cgroup writeback context.
   */
  void wbc_attach_and_unlock_inode(struct writeback_control *wbc,
  				 struct inode *inode)
  {
dd73e4b7d   Tejun Heo   writeback: do for...
532
533
534
535
  	if (!inode_cgwb_enabled(inode)) {
  		spin_unlock(&inode->i_lock);
  		return;
  	}
b16b1deb5   Tejun Heo   writeback: make w...
536
  	wbc->wb = inode_to_wb(inode);
2a8149081   Tejun Heo   writeback: implem...
537
538
539
540
541
542
543
544
  	wbc->inode = inode;
  
  	wbc->wb_id = wbc->wb->memcg_css->id;
  	wbc->wb_lcand_id = inode->i_wb_frn_winner;
  	wbc->wb_tcand_id = 0;
  	wbc->wb_bytes = 0;
  	wbc->wb_lcand_bytes = 0;
  	wbc->wb_tcand_bytes = 0;
b16b1deb5   Tejun Heo   writeback: make w...
545
546
  	wb_get(wbc->wb);
  	spin_unlock(&inode->i_lock);
e8a7abf5a   Tejun Heo   writeback: disass...
547
548
  
  	/*
65de03e25   Tejun Heo   cgroup,writeback:...
549
550
551
552
553
  	 * A dying wb indicates that either the blkcg associated with the
  	 * memcg changed or the associated memcg is dying.  In the first
  	 * case, a replacement wb should already be available and we should
  	 * refresh the wb immediately.  In the second case, trying to
  	 * refresh will keep failing.
e8a7abf5a   Tejun Heo   writeback: disass...
554
  	 */
65de03e25   Tejun Heo   cgroup,writeback:...
555
  	if (unlikely(wb_dying(wbc->wb) && !css_is_dying(wbc->wb->memcg_css)))
e8a7abf5a   Tejun Heo   writeback: disass...
556
  		inode_switch_wbs(inode, wbc->wb_id);
b16b1deb5   Tejun Heo   writeback: make w...
557
  }
9b0eb69b7   Tejun Heo   cgroup, blkcg: Pr...
558
  EXPORT_SYMBOL_GPL(wbc_attach_and_unlock_inode);
b16b1deb5   Tejun Heo   writeback: make w...
559
560
  
  /**
2a8149081   Tejun Heo   writeback: implem...
561
562
   * wbc_detach_inode - disassociate wbc from inode and perform foreign detection
   * @wbc: writeback_control of the just finished writeback
b16b1deb5   Tejun Heo   writeback: make w...
563
564
565
   *
   * To be called after a writeback attempt of an inode finishes and undoes
   * wbc_attach_and_unlock_inode().  Can be called under any context.
2a8149081   Tejun Heo   writeback: implem...
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
   *
   * As concurrent write sharing of an inode is expected to be very rare and
   * memcg only tracks page ownership on first-use basis severely confining
   * the usefulness of such sharing, cgroup writeback tracks ownership
   * per-inode.  While the support for concurrent write sharing of an inode
   * is deemed unnecessary, an inode being written to by different cgroups at
   * different points in time is a lot more common, and, more importantly,
   * charging only by first-use can too readily lead to grossly incorrect
   * behaviors (single foreign page can lead to gigabytes of writeback to be
   * incorrectly attributed).
   *
   * To resolve this issue, cgroup writeback detects the majority dirtier of
   * an inode and transfers the ownership to it.  To avoid unnnecessary
   * oscillation, the detection mechanism keeps track of history and gives
   * out the switch verdict only if the foreign usage pattern is stable over
   * a certain amount of time and/or writeback attempts.
   *
   * On each writeback attempt, @wbc tries to detect the majority writer
   * using Boyer-Moore majority vote algorithm.  In addition to the byte
   * count from the majority voting, it also counts the bytes written for the
   * current wb and the last round's winner wb (max of last round's current
   * wb, the winner from two rounds ago, and the last round's majority
   * candidate).  Keeping track of the historical winner helps the algorithm
   * to semi-reliably detect the most active writer even when it's not the
   * absolute majority.
   *
   * Once the winner of the round is determined, whether the winner is
   * foreign or not and how much IO time the round consumed is recorded in
   * inode->i_wb_frn_history.  If the amount of recorded foreign IO time is
   * over a certain threshold, the switch verdict is given.
b16b1deb5   Tejun Heo   writeback: make w...
596
597
598
   */
  void wbc_detach_inode(struct writeback_control *wbc)
  {
2a8149081   Tejun Heo   writeback: implem...
599
600
  	struct bdi_writeback *wb = wbc->wb;
  	struct inode *inode = wbc->inode;
dd73e4b7d   Tejun Heo   writeback: do for...
601
602
  	unsigned long avg_time, max_bytes, max_time;
  	u16 history;
2a8149081   Tejun Heo   writeback: implem...
603
  	int max_id;
dd73e4b7d   Tejun Heo   writeback: do for...
604
605
606
607
608
  	if (!wb)
  		return;
  
  	history = inode->i_wb_frn_history;
  	avg_time = inode->i_wb_frn_avg_time;
2a8149081   Tejun Heo   writeback: implem...
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
  	/* pick the winner of this round */
  	if (wbc->wb_bytes >= wbc->wb_lcand_bytes &&
  	    wbc->wb_bytes >= wbc->wb_tcand_bytes) {
  		max_id = wbc->wb_id;
  		max_bytes = wbc->wb_bytes;
  	} else if (wbc->wb_lcand_bytes >= wbc->wb_tcand_bytes) {
  		max_id = wbc->wb_lcand_id;
  		max_bytes = wbc->wb_lcand_bytes;
  	} else {
  		max_id = wbc->wb_tcand_id;
  		max_bytes = wbc->wb_tcand_bytes;
  	}
  
  	/*
  	 * Calculate the amount of IO time the winner consumed and fold it
  	 * into the running average kept per inode.  If the consumed IO
  	 * time is lower than avag / WB_FRN_TIME_CUT_DIV, ignore it for
  	 * deciding whether to switch or not.  This is to prevent one-off
  	 * small dirtiers from skewing the verdict.
  	 */
  	max_time = DIV_ROUND_UP((max_bytes >> PAGE_SHIFT) << WB_FRN_TIME_SHIFT,
  				wb->avg_write_bandwidth);
  	if (avg_time)
  		avg_time += (max_time >> WB_FRN_TIME_AVG_SHIFT) -
  			    (avg_time >> WB_FRN_TIME_AVG_SHIFT);
  	else
  		avg_time = max_time;	/* immediate catch up on first run */
  
  	if (max_time >= avg_time / WB_FRN_TIME_CUT_DIV) {
  		int slots;
  
  		/*
  		 * The switch verdict is reached if foreign wb's consume
  		 * more than a certain proportion of IO time in a
  		 * WB_FRN_TIME_PERIOD.  This is loosely tracked by 16 slot
  		 * history mask where each bit represents one sixteenth of
  		 * the period.  Determine the number of slots to shift into
  		 * history from @max_time.
  		 */
  		slots = min(DIV_ROUND_UP(max_time, WB_FRN_HIST_UNIT),
  			    (unsigned long)WB_FRN_HIST_MAX_SLOTS);
  		history <<= slots;
  		if (wbc->wb_id != max_id)
  			history |= (1U << slots) - 1;
3a8e9ac89   Tejun Heo   writeback: add tr...
653
654
  		if (history)
  			trace_inode_foreign_history(inode, wbc, history);
2a8149081   Tejun Heo   writeback: implem...
655
656
657
658
659
660
661
  		/*
  		 * Switch if the current wb isn't the consistent winner.
  		 * If there are multiple closely competing dirtiers, the
  		 * inode may switch across them repeatedly over time, which
  		 * is okay.  The main goal is avoiding keeping an inode on
  		 * the wrong wb for an extended period of time.
  		 */
682aa8e1a   Tejun Heo   writeback: implem...
662
663
  		if (hweight32(history) > WB_FRN_HIST_THR_SLOTS)
  			inode_switch_wbs(inode, max_id);
2a8149081   Tejun Heo   writeback: implem...
664
665
666
667
668
669
670
671
672
  	}
  
  	/*
  	 * Multiple instances of this function may race to update the
  	 * following fields but we don't mind occassional inaccuracies.
  	 */
  	inode->i_wb_frn_winner = max_id;
  	inode->i_wb_frn_avg_time = min(avg_time, (unsigned long)U16_MAX);
  	inode->i_wb_frn_history = history;
b16b1deb5   Tejun Heo   writeback: make w...
673
674
675
  	wb_put(wbc->wb);
  	wbc->wb = NULL;
  }
9b0eb69b7   Tejun Heo   cgroup, blkcg: Pr...
676
  EXPORT_SYMBOL_GPL(wbc_detach_inode);
b16b1deb5   Tejun Heo   writeback: make w...
677
678
  
  /**
34e51a5e1   Tejun Heo   blkcg, writeback:...
679
   * wbc_account_cgroup_owner - account writeback to update inode cgroup ownership
2a8149081   Tejun Heo   writeback: implem...
680
681
682
683
684
685
686
687
   * @wbc: writeback_control of the writeback in progress
   * @page: page being written out
   * @bytes: number of bytes being written out
   *
   * @bytes from @page are about to written out during the writeback
   * controlled by @wbc.  Keep the book for foreign inode detection.  See
   * wbc_detach_inode().
   */
34e51a5e1   Tejun Heo   blkcg, writeback:...
688
689
  void wbc_account_cgroup_owner(struct writeback_control *wbc, struct page *page,
  			      size_t bytes)
2a8149081   Tejun Heo   writeback: implem...
690
  {
663114222   Tejun Heo   blkcg, writeback:...
691
  	struct cgroup_subsys_state *css;
2a8149081   Tejun Heo   writeback: implem...
692
693
694
695
696
697
698
699
  	int id;
  
  	/*
  	 * pageout() path doesn't attach @wbc to the inode being written
  	 * out.  This is intentional as we don't want the function to block
  	 * behind a slow cgroup.  Ultimately, we want pageout() to kick off
  	 * regular writeback instead of writing things out itself.
  	 */
27b36d8fa   Tejun Heo   blkcg, writeback:...
700
  	if (!wbc->wb || wbc->no_cgroup_owner)
2a8149081   Tejun Heo   writeback: implem...
701
  		return;
663114222   Tejun Heo   blkcg, writeback:...
702
703
704
705
706
707
  	css = mem_cgroup_css_from_page(page);
  	/* dead cgroups shouldn't contribute to inode ownership arbitration */
  	if (!(css->flags & CSS_ONLINE))
  		return;
  
  	id = css->id;
2a8149081   Tejun Heo   writeback: implem...
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
  
  	if (id == wbc->wb_id) {
  		wbc->wb_bytes += bytes;
  		return;
  	}
  
  	if (id == wbc->wb_lcand_id)
  		wbc->wb_lcand_bytes += bytes;
  
  	/* Boyer-Moore majority vote algorithm */
  	if (!wbc->wb_tcand_bytes)
  		wbc->wb_tcand_id = id;
  	if (id == wbc->wb_tcand_id)
  		wbc->wb_tcand_bytes += bytes;
  	else
  		wbc->wb_tcand_bytes -= min(bytes, wbc->wb_tcand_bytes);
  }
34e51a5e1   Tejun Heo   blkcg, writeback:...
725
  EXPORT_SYMBOL_GPL(wbc_account_cgroup_owner);
2a8149081   Tejun Heo   writeback: implem...
726
727
  
  /**
703c27088   Tejun Heo   writeback: implem...
728
   * inode_congested - test whether an inode is congested
60292bcc1   Tejun Heo   writeback: explai...
729
   * @inode: inode to test for congestion (may be NULL)
703c27088   Tejun Heo   writeback: implem...
730
731
732
733
734
735
736
737
738
   * @cong_bits: mask of WB_[a]sync_congested bits to test
   *
   * Tests whether @inode is congested.  @cong_bits is the mask of congestion
   * bits to test and the return value is the mask of set bits.
   *
   * If cgroup writeback is enabled for @inode, the congestion state is
   * determined by whether the cgwb (cgroup bdi_writeback) for the blkcg
   * associated with @inode is congested; otherwise, the root wb's congestion
   * state is used.
60292bcc1   Tejun Heo   writeback: explai...
739
740
741
   *
   * @inode is allowed to be NULL as this function is often called on
   * mapping->host which is NULL for the swapper space.
703c27088   Tejun Heo   writeback: implem...
742
743
744
   */
  int inode_congested(struct inode *inode, int cong_bits)
  {
5cb8b8241   Tejun Heo   writeback: use un...
745
746
747
748
  	/*
  	 * Once set, ->i_wb never becomes NULL while the inode is alive.
  	 * Start transaction iff ->i_wb is visible.
  	 */
aaa2cacf8   Tejun Heo   writeback: add lo...
749
  	if (inode && inode_to_wb_is_valid(inode)) {
5cb8b8241   Tejun Heo   writeback: use un...
750
  		struct bdi_writeback *wb;
2e898e4c0   Greg Thelen   writeback: safer ...
751
752
  		struct wb_lock_cookie lock_cookie = {};
  		bool congested;
5cb8b8241   Tejun Heo   writeback: use un...
753

2e898e4c0   Greg Thelen   writeback: safer ...
754
  		wb = unlocked_inode_to_wb_begin(inode, &lock_cookie);
5cb8b8241   Tejun Heo   writeback: use un...
755
  		congested = wb_congested(wb, cong_bits);
2e898e4c0   Greg Thelen   writeback: safer ...
756
  		unlocked_inode_to_wb_end(inode, &lock_cookie);
5cb8b8241   Tejun Heo   writeback: use un...
757
  		return congested;
703c27088   Tejun Heo   writeback: implem...
758
759
760
761
762
  	}
  
  	return wb_congested(&inode_to_bdi(inode)->wb, cong_bits);
  }
  EXPORT_SYMBOL_GPL(inode_congested);
f2b651216   Tejun Heo   writeback: make w...
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
  /**
   * wb_split_bdi_pages - split nr_pages to write according to bandwidth
   * @wb: target bdi_writeback to split @nr_pages to
   * @nr_pages: number of pages to write for the whole bdi
   *
   * Split @wb's portion of @nr_pages according to @wb's write bandwidth in
   * relation to the total write bandwidth of all wb's w/ dirty inodes on
   * @wb->bdi.
   */
  static long wb_split_bdi_pages(struct bdi_writeback *wb, long nr_pages)
  {
  	unsigned long this_bw = wb->avg_write_bandwidth;
  	unsigned long tot_bw = atomic_long_read(&wb->bdi->tot_write_bandwidth);
  
  	if (nr_pages == LONG_MAX)
  		return LONG_MAX;
  
  	/*
  	 * This may be called on clean wb's and proportional distribution
  	 * may not make sense, just use the original @nr_pages in those
  	 * cases.  In general, we wanna err on the side of writing more.
  	 */
  	if (!tot_bw || this_bw >= tot_bw)
  		return nr_pages;
  	else
  		return DIV_ROUND_UP_ULL((u64)nr_pages * this_bw, tot_bw);
  }
db1253604   Tejun Heo   writeback: make w...
790
  /**
db1253604   Tejun Heo   writeback: make w...
791
792
793
794
795
796
797
798
799
800
801
802
803
804
   * bdi_split_work_to_wbs - split a wb_writeback_work to all wb's of a bdi
   * @bdi: target backing_dev_info
   * @base_work: wb_writeback_work to issue
   * @skip_if_busy: skip wb's which already have writeback in progress
   *
   * Split and issue @base_work to all wb's (bdi_writeback's) of @bdi which
   * have dirty inodes.  If @base_work->nr_page isn't %LONG_MAX, it's
   * distributed to the busy wbs according to each wb's proportion in the
   * total active write bandwidth of @bdi.
   */
  static void bdi_split_work_to_wbs(struct backing_dev_info *bdi,
  				  struct wb_writeback_work *base_work,
  				  bool skip_if_busy)
  {
b817525a4   Tejun Heo   writeback: bdi_wr...
805
  	struct bdi_writeback *last_wb = NULL;
b33e18f61   Tejun Heo   fs/writeback, rcu...
806
807
  	struct bdi_writeback *wb = list_entry(&bdi->wb_list,
  					      struct bdi_writeback, bdi_node);
db1253604   Tejun Heo   writeback: make w...
808
809
  
  	might_sleep();
db1253604   Tejun Heo   writeback: make w...
810
811
  restart:
  	rcu_read_lock();
b817525a4   Tejun Heo   writeback: bdi_wr...
812
  	list_for_each_entry_continue_rcu(wb, &bdi->wb_list, bdi_node) {
5b9cce4c7   Tejun Heo   writeback: Genera...
813
  		DEFINE_WB_COMPLETION(fallback_work_done, bdi);
8a1270cda   Tejun Heo   writeback: remove...
814
815
816
  		struct wb_writeback_work fallback_work;
  		struct wb_writeback_work *work;
  		long nr_pages;
b817525a4   Tejun Heo   writeback: bdi_wr...
817
818
819
820
  		if (last_wb) {
  			wb_put(last_wb);
  			last_wb = NULL;
  		}
006a0973e   Tejun Heo   writeback: sync_i...
821
822
823
824
825
826
  		/* SYNC_ALL writes out I_DIRTY_TIME too */
  		if (!wb_has_dirty_io(wb) &&
  		    (base_work->sync_mode == WB_SYNC_NONE ||
  		     list_empty(&wb->b_dirty_time)))
  			continue;
  		if (skip_if_busy && writeback_in_progress(wb))
db1253604   Tejun Heo   writeback: make w...
827
  			continue;
8a1270cda   Tejun Heo   writeback: remove...
828
829
830
831
832
833
834
835
836
  		nr_pages = wb_split_bdi_pages(wb, base_work->nr_pages);
  
  		work = kmalloc(sizeof(*work), GFP_ATOMIC);
  		if (work) {
  			*work = *base_work;
  			work->nr_pages = nr_pages;
  			work->auto_free = 1;
  			wb_queue_work(wb, work);
  			continue;
db1253604   Tejun Heo   writeback: make w...
837
  		}
8a1270cda   Tejun Heo   writeback: remove...
838
839
840
841
842
843
844
845
846
  
  		/* alloc failed, execute synchronously using on-stack fallback */
  		work = &fallback_work;
  		*work = *base_work;
  		work->nr_pages = nr_pages;
  		work->auto_free = 0;
  		work->done = &fallback_work_done;
  
  		wb_queue_work(wb, work);
b817525a4   Tejun Heo   writeback: bdi_wr...
847
848
849
850
851
852
853
  		/*
  		 * Pin @wb so that it stays on @bdi->wb_list.  This allows
  		 * continuing iteration from @wb after dropping and
  		 * regrabbing rcu read lock.
  		 */
  		wb_get(wb);
  		last_wb = wb;
8a1270cda   Tejun Heo   writeback: remove...
854
  		rcu_read_unlock();
5b9cce4c7   Tejun Heo   writeback: Genera...
855
  		wb_wait_for_completion(&fallback_work_done);
8a1270cda   Tejun Heo   writeback: remove...
856
  		goto restart;
db1253604   Tejun Heo   writeback: make w...
857
858
  	}
  	rcu_read_unlock();
b817525a4   Tejun Heo   writeback: bdi_wr...
859
860
861
  
  	if (last_wb)
  		wb_put(last_wb);
db1253604   Tejun Heo   writeback: make w...
862
  }
a1a0e23e4   Tejun Heo   writeback: flush ...
863
  /**
d62241c7a   Tejun Heo   writeback, memcg:...
864
865
866
   * cgroup_writeback_by_id - initiate cgroup writeback from bdi and memcg IDs
   * @bdi_id: target bdi id
   * @memcg_id: target memcg css id
b46ec1da5   Randy Dunlap   fs/fs-writeback.c...
867
   * @nr: number of pages to write, 0 for best-effort dirty flushing
d62241c7a   Tejun Heo   writeback, memcg:...
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
   * @reason: reason why some writeback work initiated
   * @done: target wb_completion
   *
   * Initiate flush of the bdi_writeback identified by @bdi_id and @memcg_id
   * with the specified parameters.
   */
  int cgroup_writeback_by_id(u64 bdi_id, int memcg_id, unsigned long nr,
  			   enum wb_reason reason, struct wb_completion *done)
  {
  	struct backing_dev_info *bdi;
  	struct cgroup_subsys_state *memcg_css;
  	struct bdi_writeback *wb;
  	struct wb_writeback_work *work;
  	int ret;
  
  	/* lookup bdi and memcg */
  	bdi = bdi_get_by_id(bdi_id);
  	if (!bdi)
  		return -ENOENT;
  
  	rcu_read_lock();
  	memcg_css = css_from_id(memcg_id, &memory_cgrp_subsys);
  	if (memcg_css && !css_tryget(memcg_css))
  		memcg_css = NULL;
  	rcu_read_unlock();
  	if (!memcg_css) {
  		ret = -ENOENT;
  		goto out_bdi_put;
  	}
  
  	/*
  	 * And find the associated wb.  If the wb isn't there already
  	 * there's nothing to flush, don't create one.
  	 */
  	wb = wb_get_lookup(bdi, memcg_css);
  	if (!wb) {
  		ret = -ENOENT;
  		goto out_css_put;
  	}
  
  	/*
  	 * If @nr is zero, the caller is attempting to write out most of
  	 * the currently dirty pages.  Let's take the current dirty page
  	 * count and inflate it by 25% which should be large enough to
  	 * flush out most dirty pages while avoiding getting livelocked by
  	 * concurrent dirtiers.
  	 */
  	if (!nr) {
  		unsigned long filepages, headroom, dirty, writeback;
  
  		mem_cgroup_wb_stats(wb, &filepages, &headroom, &dirty,
  				      &writeback);
  		nr = dirty * 10 / 8;
  	}
  
  	/* issue the writeback work */
  	work = kzalloc(sizeof(*work), GFP_NOWAIT | __GFP_NOWARN);
  	if (work) {
  		work->nr_pages = nr;
  		work->sync_mode = WB_SYNC_NONE;
  		work->range_cyclic = 1;
  		work->reason = reason;
  		work->done = done;
  		work->auto_free = 1;
  		wb_queue_work(wb, work);
  		ret = 0;
  	} else {
  		ret = -ENOMEM;
  	}
  
  	wb_put(wb);
  out_css_put:
  	css_put(memcg_css);
  out_bdi_put:
  	bdi_put(bdi);
  	return ret;
  }
  
  /**
a1a0e23e4   Tejun Heo   writeback: flush ...
947
948
949
950
951
952
953
954
955
956
957
958
   * cgroup_writeback_umount - flush inode wb switches for umount
   *
   * This function is called when a super_block is about to be destroyed and
   * flushes in-flight inode wb switches.  An inode wb switch goes through
   * RCU and then workqueue, so the two need to be flushed in order to ensure
   * that all previously scheduled switches are finished.  As wb switches are
   * rare occurrences and synchronize_rcu() can take a while, perform
   * flushing iff wb switches are in flight.
   */
  void cgroup_writeback_umount(void)
  {
  	if (atomic_read(&isw_nr_in_flight)) {
ec084de92   Jiufei Xue   fs/writeback.c: u...
959
960
961
962
963
  		/*
  		 * Use rcu_barrier() to wait for all pending callbacks to
  		 * ensure that all in-flight wb switches are in the workqueue.
  		 */
  		rcu_barrier();
a1a0e23e4   Tejun Heo   writeback: flush ...
964
965
966
967
968
969
970
971
972
973
974
975
  		flush_workqueue(isw_wq);
  	}
  }
  
  static int __init cgroup_writeback_init(void)
  {
  	isw_wq = alloc_workqueue("inode_switch_wbs", 0, 0);
  	if (!isw_wq)
  		return -ENOMEM;
  	return 0;
  }
  fs_initcall(cgroup_writeback_init);
f2b651216   Tejun Heo   writeback: make w...
976
  #else	/* CONFIG_CGROUP_WRITEBACK */
7fc5854f8   Tejun Heo   writeback: synchr...
977
978
  static void bdi_down_write_wb_switch_rwsem(struct backing_dev_info *bdi) { }
  static void bdi_up_write_wb_switch_rwsem(struct backing_dev_info *bdi) { }
87e1d789b   Tejun Heo   writeback: implem...
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
  static struct bdi_writeback *
  locked_inode_to_wb_and_lock_list(struct inode *inode)
  	__releases(&inode->i_lock)
  	__acquires(&wb->list_lock)
  {
  	struct bdi_writeback *wb = inode_to_wb(inode);
  
  	spin_unlock(&inode->i_lock);
  	spin_lock(&wb->list_lock);
  	return wb;
  }
  
  static struct bdi_writeback *inode_to_wb_and_lock_list(struct inode *inode)
  	__acquires(&wb->list_lock)
  {
  	struct bdi_writeback *wb = inode_to_wb(inode);
  
  	spin_lock(&wb->list_lock);
  	return wb;
  }
f2b651216   Tejun Heo   writeback: make w...
999
1000
1001
1002
  static long wb_split_bdi_pages(struct bdi_writeback *wb, long nr_pages)
  {
  	return nr_pages;
  }
db1253604   Tejun Heo   writeback: make w...
1003
1004
1005
1006
1007
  static void bdi_split_work_to_wbs(struct backing_dev_info *bdi,
  				  struct wb_writeback_work *base_work,
  				  bool skip_if_busy)
  {
  	might_sleep();
006a0973e   Tejun Heo   writeback: sync_i...
1008
  	if (!skip_if_busy || !writeback_in_progress(&bdi->wb)) {
db1253604   Tejun Heo   writeback: make w...
1009
  		base_work->auto_free = 0;
db1253604   Tejun Heo   writeback: make w...
1010
1011
1012
  		wb_queue_work(&bdi->wb, base_work);
  	}
  }
703c27088   Tejun Heo   writeback: implem...
1013
  #endif	/* CONFIG_CGROUP_WRITEBACK */
e8e8a0c6c   Jens Axboe   writeback: move n...
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
  /*
   * Add in the number of potentially dirty inodes, because each inode
   * write can dirty pagecache in the underlying blockdev.
   */
  static unsigned long get_nr_dirty_pages(void)
  {
  	return global_node_page_state(NR_FILE_DIRTY) +
  		global_node_page_state(NR_UNSTABLE_NFS) +
  		get_nr_dirty_inodes();
  }
  
  static void wb_start_writeback(struct bdi_writeback *wb, enum wb_reason reason)
b6e51316d   Jens Axboe   writeback: separa...
1026
  {
c00ddad39   Tejun Heo   writeback: remove...
1027
1028
1029
1030
  	if (!wb_has_dirty_io(wb))
  		return;
  
  	/*
aac8d41cd   Jens Axboe   writeback: only a...
1031
1032
1033
1034
1035
  	 * All callers of this function want to start writeback of all
  	 * dirty pages. Places like vmscan can call this at a very
  	 * high frequency, causing pointless allocations of tons of
  	 * work items and keeping the flusher threads busy retrieving
  	 * that work. Ensure that we only allow one of them pending and
85009b4f5   Jens Axboe   writeback: elimin...
1036
  	 * inflight at the time.
aac8d41cd   Jens Axboe   writeback: only a...
1037
  	 */
85009b4f5   Jens Axboe   writeback: elimin...
1038
1039
  	if (test_bit(WB_start_all, &wb->state) ||
  	    test_and_set_bit(WB_start_all, &wb->state))
aac8d41cd   Jens Axboe   writeback: only a...
1040
  		return;
85009b4f5   Jens Axboe   writeback: elimin...
1041
1042
  	wb->start_all_reason = reason;
  	wb_wakeup(wb);
c5444198c   Christoph Hellwig   writeback: simpli...
1043
  }
d3ddec763   Wu Fengguang   writeback: stop b...
1044

c5444198c   Christoph Hellwig   writeback: simpli...
1045
  /**
9ecf4866c   Tejun Heo   writeback: make b...
1046
1047
   * wb_start_background_writeback - start background writeback
   * @wb: bdi_writback to write from
c5444198c   Christoph Hellwig   writeback: simpli...
1048
1049
   *
   * Description:
6585027a5   Jan Kara   writeback: integr...
1050
   *   This makes sure WB_SYNC_NONE background writeback happens. When
9ecf4866c   Tejun Heo   writeback: make b...
1051
   *   this function returns, it is only guaranteed that for given wb
6585027a5   Jan Kara   writeback: integr...
1052
1053
   *   some IO is happening if we are over background dirty threshold.
   *   Caller need not hold sb s_umount semaphore.
c5444198c   Christoph Hellwig   writeback: simpli...
1054
   */
9ecf4866c   Tejun Heo   writeback: make b...
1055
  void wb_start_background_writeback(struct bdi_writeback *wb)
c5444198c   Christoph Hellwig   writeback: simpli...
1056
  {
6585027a5   Jan Kara   writeback: integr...
1057
1058
1059
1060
  	/*
  	 * We just wake up the flusher thread. It will perform background
  	 * writeback as soon as there is no other work to do.
  	 */
5634cc2aa   Tejun Heo   writeback: update...
1061
  	trace_writeback_wake_background(wb);
9ecf4866c   Tejun Heo   writeback: make b...
1062
  	wb_wakeup(wb);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1063
1064
1065
  }
  
  /*
a66979aba   Dave Chinner   fs: move i_wb_lis...
1066
1067
   * Remove the inode from the writeback list it is on.
   */
c7f540849   Dave Chinner   inode: rename i_w...
1068
  void inode_io_list_del(struct inode *inode)
a66979aba   Dave Chinner   fs: move i_wb_lis...
1069
  {
87e1d789b   Tejun Heo   writeback: implem...
1070
  	struct bdi_writeback *wb;
f758eeabe   Christoph Hellwig   writeback: split ...
1071

87e1d789b   Tejun Heo   writeback: implem...
1072
  	wb = inode_to_wb_and_lock_list(inode);
c7f540849   Dave Chinner   inode: rename i_w...
1073
  	inode_io_list_del_locked(inode, wb);
52ebea749   Tejun Heo   writeback: make b...
1074
  	spin_unlock(&wb->list_lock);
a66979aba   Dave Chinner   fs: move i_wb_lis...
1075
  }
a66979aba   Dave Chinner   fs: move i_wb_lis...
1076
  /*
6c60d2b57   Dave Chinner   fs/fs-writeback.c...
1077
1078
1079
1080
1081
1082
1083
1084
1085
   * mark an inode as under writeback on the sb
   */
  void sb_mark_inode_writeback(struct inode *inode)
  {
  	struct super_block *sb = inode->i_sb;
  	unsigned long flags;
  
  	if (list_empty(&inode->i_wb_list)) {
  		spin_lock_irqsave(&sb->s_inode_wblist_lock, flags);
9a46b04f1   Brian Foster   fs/fs-writeback.c...
1086
  		if (list_empty(&inode->i_wb_list)) {
6c60d2b57   Dave Chinner   fs/fs-writeback.c...
1087
  			list_add_tail(&inode->i_wb_list, &sb->s_inodes_wb);
9a46b04f1   Brian Foster   fs/fs-writeback.c...
1088
1089
  			trace_sb_mark_inode_writeback(inode);
  		}
6c60d2b57   Dave Chinner   fs/fs-writeback.c...
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
  		spin_unlock_irqrestore(&sb->s_inode_wblist_lock, flags);
  	}
  }
  
  /*
   * clear an inode as under writeback on the sb
   */
  void sb_clear_inode_writeback(struct inode *inode)
  {
  	struct super_block *sb = inode->i_sb;
  	unsigned long flags;
  
  	if (!list_empty(&inode->i_wb_list)) {
  		spin_lock_irqsave(&sb->s_inode_wblist_lock, flags);
9a46b04f1   Brian Foster   fs/fs-writeback.c...
1104
1105
1106
1107
  		if (!list_empty(&inode->i_wb_list)) {
  			list_del_init(&inode->i_wb_list);
  			trace_sb_clear_inode_writeback(inode);
  		}
6c60d2b57   Dave Chinner   fs/fs-writeback.c...
1108
1109
1110
1111
1112
  		spin_unlock_irqrestore(&sb->s_inode_wblist_lock, flags);
  	}
  }
  
  /*
6610a0bc8   Andrew Morton   writeback: fix ti...
1113
1114
1115
1116
   * Redirty an inode: set its when-it-was dirtied timestamp and move it to the
   * furthest end of its superblock's dirty-inode list.
   *
   * Before stamping the inode's ->dirtied_when, we check to see whether it is
66f3b8e2e   Jens Axboe   writeback: move d...
1117
   * already the most-recently-dirtied inode on the b_dirty list.  If that is
6610a0bc8   Andrew Morton   writeback: fix ti...
1118
1119
1120
   * the case then the inode must have been redirtied while it was being written
   * out and we don't reset its dirtied_when.
   */
f758eeabe   Christoph Hellwig   writeback: split ...
1121
  static void redirty_tail(struct inode *inode, struct bdi_writeback *wb)
6610a0bc8   Andrew Morton   writeback: fix ti...
1122
  {
03ba3782e   Jens Axboe   writeback: switch...
1123
  	if (!list_empty(&wb->b_dirty)) {
66f3b8e2e   Jens Axboe   writeback: move d...
1124
  		struct inode *tail;
6610a0bc8   Andrew Morton   writeback: fix ti...
1125

7ccf19a80   Nick Piggin   fs: inode split I...
1126
  		tail = wb_inode(wb->b_dirty.next);
66f3b8e2e   Jens Axboe   writeback: move d...
1127
  		if (time_before(inode->dirtied_when, tail->dirtied_when))
6610a0bc8   Andrew Morton   writeback: fix ti...
1128
1129
  			inode->dirtied_when = jiffies;
  	}
c7f540849   Dave Chinner   inode: rename i_w...
1130
  	inode_io_list_move_locked(inode, wb, &wb->b_dirty);
6610a0bc8   Andrew Morton   writeback: fix ti...
1131
1132
1133
  }
  
  /*
66f3b8e2e   Jens Axboe   writeback: move d...
1134
   * requeue inode for re-scanning after bdi->b_io list is exhausted.
c986d1e2a   Andrew Morton   writeback: fix ti...
1135
   */
f758eeabe   Christoph Hellwig   writeback: split ...
1136
  static void requeue_io(struct inode *inode, struct bdi_writeback *wb)
c986d1e2a   Andrew Morton   writeback: fix ti...
1137
  {
c7f540849   Dave Chinner   inode: rename i_w...
1138
  	inode_io_list_move_locked(inode, wb, &wb->b_more_io);
c986d1e2a   Andrew Morton   writeback: fix ti...
1139
  }
1c0eeaf56   Joern Engel   introduce I_SYNC
1140
1141
  static void inode_sync_complete(struct inode *inode)
  {
365b94ae6   Jan Kara   writeback: Move c...
1142
  	inode->i_state &= ~I_SYNC;
4eff96dd5   Jan Kara   writeback: put un...
1143
1144
  	/* If inode is clean an unused, put it into LRU now... */
  	inode_add_lru(inode);
365b94ae6   Jan Kara   writeback: Move c...
1145
  	/* Waiters must see I_SYNC cleared before being woken up */
1c0eeaf56   Joern Engel   introduce I_SYNC
1146
1147
1148
  	smp_mb();
  	wake_up_bit(&inode->i_state, __I_SYNC);
  }
d2caa3c54   Jeff Layton   writeback: guard ...
1149
1150
1151
1152
1153
1154
1155
1156
  static bool inode_dirtied_after(struct inode *inode, unsigned long t)
  {
  	bool ret = time_after(inode->dirtied_when, t);
  #ifndef CONFIG_64BIT
  	/*
  	 * For inodes being constantly redirtied, dirtied_when can get stuck.
  	 * It _appears_ to be in the future, but is actually in distant past.
  	 * This test is necessary to prevent such wrapped-around relative times
5b0830cb9   Jens Axboe   writeback: get ri...
1157
  	 * from permanently stopping the whole bdi writeback.
d2caa3c54   Jeff Layton   writeback: guard ...
1158
1159
1160
1161
1162
  	 */
  	ret = ret && time_before_eq(inode->dirtied_when, jiffies);
  #endif
  	return ret;
  }
0ae45f63d   Theodore Ts'o   vfs: add support ...
1163
  #define EXPIRE_DIRTY_ATIME 0x0001
c986d1e2a   Andrew Morton   writeback: fix ti...
1164
  /*
0e2f2b236   Wang Sheng-Hui   writeback: correc...
1165
   * Move expired (dirtied before work->older_than_this) dirty inodes from
697e6fed9   Jan Kara   writeback: Remove...
1166
   * @delaying_queue to @dispatch_queue.
2c1365791   Fengguang Wu   writeback: fix ti...
1167
   */
e84d0a4f8   Wu Fengguang   writeback: trace ...
1168
  static int move_expired_inodes(struct list_head *delaying_queue,
2c1365791   Fengguang Wu   writeback: fix ti...
1169
  			       struct list_head *dispatch_queue,
0ae45f63d   Theodore Ts'o   vfs: add support ...
1170
  			       int flags,
ad4e38dd6   Curt Wohlgemuth   writeback: send w...
1171
  			       struct wb_writeback_work *work)
2c1365791   Fengguang Wu   writeback: fix ti...
1172
  {
0ae45f63d   Theodore Ts'o   vfs: add support ...
1173
1174
  	unsigned long *older_than_this = NULL;
  	unsigned long expire_time;
5c03449d3   Shaohua Li   writeback: move i...
1175
1176
  	LIST_HEAD(tmp);
  	struct list_head *pos, *node;
cf137307c   Jens Axboe   writeback: don't ...
1177
  	struct super_block *sb = NULL;
5c03449d3   Shaohua Li   writeback: move i...
1178
  	struct inode *inode;
cf137307c   Jens Axboe   writeback: don't ...
1179
  	int do_sb_sort = 0;
e84d0a4f8   Wu Fengguang   writeback: trace ...
1180
  	int moved = 0;
5c03449d3   Shaohua Li   writeback: move i...
1181

0ae45f63d   Theodore Ts'o   vfs: add support ...
1182
1183
  	if ((flags & EXPIRE_DIRTY_ATIME) == 0)
  		older_than_this = work->older_than_this;
a2f487069   Theodore Ts'o   fs: make sure the...
1184
1185
  	else if (!work->for_sync) {
  		expire_time = jiffies - (dirtytime_expire_interval * HZ);
0ae45f63d   Theodore Ts'o   vfs: add support ...
1186
1187
  		older_than_this = &expire_time;
  	}
2c1365791   Fengguang Wu   writeback: fix ti...
1188
  	while (!list_empty(delaying_queue)) {
7ccf19a80   Nick Piggin   fs: inode split I...
1189
  		inode = wb_inode(delaying_queue->prev);
0ae45f63d   Theodore Ts'o   vfs: add support ...
1190
1191
  		if (older_than_this &&
  		    inode_dirtied_after(inode, *older_than_this))
2c1365791   Fengguang Wu   writeback: fix ti...
1192
  			break;
c7f540849   Dave Chinner   inode: rename i_w...
1193
  		list_move(&inode->i_io_list, &tmp);
a8855990e   Jan Kara   writeback: Do not...
1194
  		moved++;
0ae45f63d   Theodore Ts'o   vfs: add support ...
1195
1196
  		if (flags & EXPIRE_DIRTY_ATIME)
  			set_bit(__I_DIRTY_TIME_EXPIRED, &inode->i_state);
a8855990e   Jan Kara   writeback: Do not...
1197
1198
  		if (sb_is_blkdev_sb(inode->i_sb))
  			continue;
cf137307c   Jens Axboe   writeback: don't ...
1199
1200
1201
  		if (sb && sb != inode->i_sb)
  			do_sb_sort = 1;
  		sb = inode->i_sb;
5c03449d3   Shaohua Li   writeback: move i...
1202
  	}
cf137307c   Jens Axboe   writeback: don't ...
1203
1204
1205
  	/* just one sb in list, splice to dispatch_queue and we're done */
  	if (!do_sb_sort) {
  		list_splice(&tmp, dispatch_queue);
e84d0a4f8   Wu Fengguang   writeback: trace ...
1206
  		goto out;
cf137307c   Jens Axboe   writeback: don't ...
1207
  	}
5c03449d3   Shaohua Li   writeback: move i...
1208
1209
  	/* Move inodes from one superblock together */
  	while (!list_empty(&tmp)) {
7ccf19a80   Nick Piggin   fs: inode split I...
1210
  		sb = wb_inode(tmp.prev)->i_sb;
5c03449d3   Shaohua Li   writeback: move i...
1211
  		list_for_each_prev_safe(pos, node, &tmp) {
7ccf19a80   Nick Piggin   fs: inode split I...
1212
  			inode = wb_inode(pos);
5c03449d3   Shaohua Li   writeback: move i...
1213
  			if (inode->i_sb == sb)
c7f540849   Dave Chinner   inode: rename i_w...
1214
  				list_move(&inode->i_io_list, dispatch_queue);
5c03449d3   Shaohua Li   writeback: move i...
1215
  		}
2c1365791   Fengguang Wu   writeback: fix ti...
1216
  	}
e84d0a4f8   Wu Fengguang   writeback: trace ...
1217
1218
  out:
  	return moved;
2c1365791   Fengguang Wu   writeback: fix ti...
1219
1220
1221
1222
  }
  
  /*
   * Queue all expired dirty inodes for io, eldest first.
4ea879b96   Wu Fengguang   writeback: fix qu...
1223
1224
1225
1226
1227
1228
1229
1230
   * Before
   *         newly dirtied     b_dirty    b_io    b_more_io
   *         =============>    gf         edc     BA
   * After
   *         newly dirtied     b_dirty    b_io    b_more_io
   *         =============>    g          fBAedc
   *                                           |
   *                                           +--> dequeue for IO
2c1365791   Fengguang Wu   writeback: fix ti...
1231
   */
ad4e38dd6   Curt Wohlgemuth   writeback: send w...
1232
  static void queue_io(struct bdi_writeback *wb, struct wb_writeback_work *work)
66f3b8e2e   Jens Axboe   writeback: move d...
1233
  {
e84d0a4f8   Wu Fengguang   writeback: trace ...
1234
  	int moved;
0ae45f63d   Theodore Ts'o   vfs: add support ...
1235

f758eeabe   Christoph Hellwig   writeback: split ...
1236
  	assert_spin_locked(&wb->list_lock);
4ea879b96   Wu Fengguang   writeback: fix qu...
1237
  	list_splice_init(&wb->b_more_io, &wb->b_io);
0ae45f63d   Theodore Ts'o   vfs: add support ...
1238
1239
1240
  	moved = move_expired_inodes(&wb->b_dirty, &wb->b_io, 0, work);
  	moved += move_expired_inodes(&wb->b_dirty_time, &wb->b_io,
  				     EXPIRE_DIRTY_ATIME, work);
d6c10f1fc   Tejun Heo   writeback: implem...
1241
1242
  	if (moved)
  		wb_io_lists_populated(wb);
ad4e38dd6   Curt Wohlgemuth   writeback: send w...
1243
  	trace_writeback_queue_io(wb, work, moved);
66f3b8e2e   Jens Axboe   writeback: move d...
1244
  }
a9185b41a   Christoph Hellwig   pass writeback_co...
1245
  static int write_inode(struct inode *inode, struct writeback_control *wbc)
08d8e9749   Fengguang Wu   writeback: fix nt...
1246
  {
9fb0a7da0   Tejun Heo   writeback: add mo...
1247
1248
1249
1250
1251
1252
1253
1254
  	int ret;
  
  	if (inode->i_sb->s_op->write_inode && !is_bad_inode(inode)) {
  		trace_writeback_write_inode_start(inode, wbc);
  		ret = inode->i_sb->s_op->write_inode(inode, wbc);
  		trace_writeback_write_inode(inode, wbc);
  		return ret;
  	}
03ba3782e   Jens Axboe   writeback: switch...
1255
  	return 0;
08d8e9749   Fengguang Wu   writeback: fix nt...
1256
  }
08d8e9749   Fengguang Wu   writeback: fix nt...
1257

2c1365791   Fengguang Wu   writeback: fix ti...
1258
  /*
169ebd901   Jan Kara   writeback: Avoid ...
1259
1260
   * Wait for writeback on an inode to complete. Called with i_lock held.
   * Caller must make sure inode cannot go away when we drop i_lock.
01c031945   Christoph Hellwig   cleanup __writeba...
1261
   */
169ebd901   Jan Kara   writeback: Avoid ...
1262
1263
1264
  static void __inode_wait_for_writeback(struct inode *inode)
  	__releases(inode->i_lock)
  	__acquires(inode->i_lock)
01c031945   Christoph Hellwig   cleanup __writeba...
1265
1266
1267
1268
1269
  {
  	DEFINE_WAIT_BIT(wq, &inode->i_state, __I_SYNC);
  	wait_queue_head_t *wqh;
  
  	wqh = bit_waitqueue(&inode->i_state, __I_SYNC);
250df6ed2   Dave Chinner   fs: protect inode...
1270
1271
  	while (inode->i_state & I_SYNC) {
  		spin_unlock(&inode->i_lock);
743162013   NeilBrown   sched: Remove pro...
1272
1273
  		__wait_on_bit(wqh, &wq, bit_wait,
  			      TASK_UNINTERRUPTIBLE);
250df6ed2   Dave Chinner   fs: protect inode...
1274
  		spin_lock(&inode->i_lock);
58a9d3d8d   Richard Kennedy   fs-writeback: che...
1275
  	}
01c031945   Christoph Hellwig   cleanup __writeba...
1276
1277
1278
  }
  
  /*
169ebd901   Jan Kara   writeback: Avoid ...
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
   * Wait for writeback on an inode to complete. Caller must have inode pinned.
   */
  void inode_wait_for_writeback(struct inode *inode)
  {
  	spin_lock(&inode->i_lock);
  	__inode_wait_for_writeback(inode);
  	spin_unlock(&inode->i_lock);
  }
  
  /*
   * Sleep until I_SYNC is cleared. This function must be called with i_lock
   * held and drops it. It is aimed for callers not holding any inode reference
   * so once i_lock is dropped, inode can go away.
   */
  static void inode_sleep_on_writeback(struct inode *inode)
  	__releases(inode->i_lock)
  {
  	DEFINE_WAIT(wait);
  	wait_queue_head_t *wqh = bit_waitqueue(&inode->i_state, __I_SYNC);
  	int sleep;
  
  	prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
  	sleep = inode->i_state & I_SYNC;
  	spin_unlock(&inode->i_lock);
  	if (sleep)
  		schedule();
  	finish_wait(wqh, &wait);
  }
  
  /*
ccb26b5a6   Jan Kara   writeback: Separa...
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
   * Find proper writeback list for the inode depending on its current state and
   * possibly also change of its state while we were doing writeback.  Here we
   * handle things such as livelock prevention or fairness of writeback among
   * inodes. This function can be called only by flusher thread - noone else
   * processes all inodes in writeback lists and requeueing inodes behind flusher
   * thread's back can have unexpected consequences.
   */
  static void requeue_inode(struct inode *inode, struct bdi_writeback *wb,
  			  struct writeback_control *wbc)
  {
  	if (inode->i_state & I_FREEING)
  		return;
  
  	/*
  	 * Sync livelock prevention. Each inode is tagged and synced in one
  	 * shot. If still dirty, it will be redirty_tail()'ed below.  Update
  	 * the dirty time to prevent enqueue and sync it again.
  	 */
  	if ((inode->i_state & I_DIRTY) &&
  	    (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages))
  		inode->dirtied_when = jiffies;
4f8ad655d   Jan Kara   writeback: Refact...
1330
1331
1332
1333
1334
1335
1336
1337
  	if (wbc->pages_skipped) {
  		/*
  		 * writeback is not making progress due to locked
  		 * buffers. Skip this inode for now.
  		 */
  		redirty_tail(inode, wb);
  		return;
  	}
ccb26b5a6   Jan Kara   writeback: Separa...
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
  	if (mapping_tagged(inode->i_mapping, PAGECACHE_TAG_DIRTY)) {
  		/*
  		 * We didn't write back all the pages.  nfs_writepages()
  		 * sometimes bales out without doing anything.
  		 */
  		if (wbc->nr_to_write <= 0) {
  			/* Slice used up. Queue for next turn. */
  			requeue_io(inode, wb);
  		} else {
  			/*
  			 * Writeback blocked by something other than
  			 * congestion. Delay the inode for some time to
  			 * avoid spinning on the CPU (100% iowait)
  			 * retrying writeback of the dirty page/inode
  			 * that cannot be performed immediately.
  			 */
  			redirty_tail(inode, wb);
  		}
  	} else if (inode->i_state & I_DIRTY) {
  		/*
  		 * Filesystems can dirty the inode during writeback operations,
  		 * such as delayed allocation during submission or metadata
  		 * updates after data IO completion.
  		 */
  		redirty_tail(inode, wb);
0ae45f63d   Theodore Ts'o   vfs: add support ...
1363
  	} else if (inode->i_state & I_DIRTY_TIME) {
a2f487069   Theodore Ts'o   fs: make sure the...
1364
  		inode->dirtied_when = jiffies;
c7f540849   Dave Chinner   inode: rename i_w...
1365
  		inode_io_list_move_locked(inode, wb, &wb->b_dirty_time);
ccb26b5a6   Jan Kara   writeback: Separa...
1366
1367
  	} else {
  		/* The inode is clean. Remove from writeback lists. */
c7f540849   Dave Chinner   inode: rename i_w...
1368
  		inode_io_list_del_locked(inode, wb);
ccb26b5a6   Jan Kara   writeback: Separa...
1369
1370
1371
1372
  	}
  }
  
  /*
4f8ad655d   Jan Kara   writeback: Refact...
1373
1374
1375
   * Write out an inode and its dirty pages. Do not update the writeback list
   * linkage. That is left to the caller. The caller is also responsible for
   * setting I_SYNC flag and calling inode_sync_complete() to clear it.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1376
1377
   */
  static int
cd8ed2a45   Yan Hong   fs/fs-writeback.c...
1378
  __writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1379
  {
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1380
  	struct address_space *mapping = inode->i_mapping;
251d6a471   Wu Fengguang   writeback: trace ...
1381
  	long nr_to_write = wbc->nr_to_write;
01c031945   Christoph Hellwig   cleanup __writeba...
1382
  	unsigned dirty;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1383
  	int ret;
4f8ad655d   Jan Kara   writeback: Refact...
1384
  	WARN_ON(!(inode->i_state & I_SYNC));
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1385

9fb0a7da0   Tejun Heo   writeback: add mo...
1386
  	trace_writeback_single_inode_start(inode, wbc, nr_to_write);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1387
  	ret = do_writepages(mapping, wbc);
26821ed40   Christoph Hellwig   make sure data is...
1388
1389
1390
  	/*
  	 * Make sure to wait on the data before writing out the metadata.
  	 * This is important for filesystems that modify metadata on data
7747bd4bc   Dave Chinner   sync: don't block...
1391
1392
1393
  	 * I/O completion. We don't do it for sync(2) writeback because it has a
  	 * separate, external IO completion path and ->sync_fs for guaranteeing
  	 * inode metadata is written back correctly.
26821ed40   Christoph Hellwig   make sure data is...
1394
  	 */
7747bd4bc   Dave Chinner   sync: don't block...
1395
  	if (wbc->sync_mode == WB_SYNC_ALL && !wbc->for_sync) {
26821ed40   Christoph Hellwig   make sure data is...
1396
  		int err = filemap_fdatawait(mapping);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1397
1398
1399
  		if (ret == 0)
  			ret = err;
  	}
5547e8aac   Dmitry Monakhov   writeback: Update...
1400
1401
1402
1403
1404
  	/*
  	 * Some filesystems may redirty the inode during the writeback
  	 * due to delalloc, clear dirty metadata flags right before
  	 * write_inode()
  	 */
250df6ed2   Dave Chinner   fs: protect inode...
1405
  	spin_lock(&inode->i_lock);
9c6ac78eb   Tejun Heo   writeback: fix a ...
1406

5547e8aac   Dmitry Monakhov   writeback: Update...
1407
  	dirty = inode->i_state & I_DIRTY;
a2f487069   Theodore Ts'o   fs: make sure the...
1408
  	if (inode->i_state & I_DIRTY_TIME) {
0e11f6443   Christoph Hellwig   fs: move I_DIRTY_...
1409
  		if ((dirty & I_DIRTY_INODE) ||
dc5ff2b1d   Jan Kara   writeback: Write ...
1410
  		    wbc->sync_mode == WB_SYNC_ALL ||
a2f487069   Theodore Ts'o   fs: make sure the...
1411
1412
1413
1414
1415
1416
1417
1418
1419
  		    unlikely(inode->i_state & I_DIRTY_TIME_EXPIRED) ||
  		    unlikely(time_after(jiffies,
  					(inode->dirtied_time_when +
  					 dirtytime_expire_interval * HZ)))) {
  			dirty |= I_DIRTY_TIME | I_DIRTY_TIME_EXPIRED;
  			trace_writeback_lazytime(inode);
  		}
  	} else
  		inode->i_state &= ~I_DIRTY_TIME_EXPIRED;
0ae45f63d   Theodore Ts'o   vfs: add support ...
1420
  	inode->i_state &= ~dirty;
9c6ac78eb   Tejun Heo   writeback: fix a ...
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
  
  	/*
  	 * Paired with smp_mb() in __mark_inode_dirty().  This allows
  	 * __mark_inode_dirty() to test i_state without grabbing i_lock -
  	 * either they see the I_DIRTY bits cleared or we see the dirtied
  	 * inode.
  	 *
  	 * I_DIRTY_PAGES is always cleared together above even if @mapping
  	 * still has dirty pages.  The flag is reinstated after smp_mb() if
  	 * necessary.  This guarantees that either __mark_inode_dirty()
  	 * sees clear I_DIRTY_PAGES or we see PAGECACHE_TAG_DIRTY.
  	 */
  	smp_mb();
  
  	if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
  		inode->i_state |= I_DIRTY_PAGES;
250df6ed2   Dave Chinner   fs: protect inode...
1437
  	spin_unlock(&inode->i_lock);
9c6ac78eb   Tejun Heo   writeback: fix a ...
1438

0ae45f63d   Theodore Ts'o   vfs: add support ...
1439
1440
  	if (dirty & I_DIRTY_TIME)
  		mark_inode_dirty_sync(inode);
26821ed40   Christoph Hellwig   make sure data is...
1441
  	/* Don't write the inode if only I_DIRTY_PAGES was set */
0ae45f63d   Theodore Ts'o   vfs: add support ...
1442
  	if (dirty & ~I_DIRTY_PAGES) {
a9185b41a   Christoph Hellwig   pass writeback_co...
1443
  		int err = write_inode(inode, wbc);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1444
1445
1446
  		if (ret == 0)
  			ret = err;
  	}
4f8ad655d   Jan Kara   writeback: Refact...
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
  	trace_writeback_single_inode(inode, wbc, nr_to_write);
  	return ret;
  }
  
  /*
   * Write out an inode's dirty pages. Either the caller has an active reference
   * on the inode or the inode has I_WILL_FREE set.
   *
   * This function is designed to be called for writing back one inode which
   * we go e.g. from filesystem. Flusher thread uses __writeback_single_inode()
   * and does more profound writeback list handling in writeback_sb_inodes().
   */
aaf255933   Tejun Heo   writeback, cgroup...
1459
1460
  static int writeback_single_inode(struct inode *inode,
  				  struct writeback_control *wbc)
4f8ad655d   Jan Kara   writeback: Refact...
1461
  {
aaf255933   Tejun Heo   writeback, cgroup...
1462
  	struct bdi_writeback *wb;
4f8ad655d   Jan Kara   writeback: Refact...
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
  	int ret = 0;
  
  	spin_lock(&inode->i_lock);
  	if (!atomic_read(&inode->i_count))
  		WARN_ON(!(inode->i_state & (I_WILL_FREE|I_FREEING)));
  	else
  		WARN_ON(inode->i_state & I_WILL_FREE);
  
  	if (inode->i_state & I_SYNC) {
  		if (wbc->sync_mode != WB_SYNC_ALL)
  			goto out;
  		/*
169ebd901   Jan Kara   writeback: Avoid ...
1475
1476
1477
  		 * It's a data-integrity sync. We must wait. Since callers hold
  		 * inode reference or inode has I_WILL_FREE set, it cannot go
  		 * away under us.
4f8ad655d   Jan Kara   writeback: Refact...
1478
  		 */
169ebd901   Jan Kara   writeback: Avoid ...
1479
  		__inode_wait_for_writeback(inode);
4f8ad655d   Jan Kara   writeback: Refact...
1480
1481
1482
  	}
  	WARN_ON(inode->i_state & I_SYNC);
  	/*
f9b0e058c   Jan Kara   writeback: Fix da...
1483
1484
1485
1486
1487
1488
  	 * Skip inode if it is clean and we have no outstanding writeback in
  	 * WB_SYNC_ALL mode. We don't want to mess with writeback lists in this
  	 * function since flusher thread may be doing for example sync in
  	 * parallel and if we move the inode, it could get skipped. So here we
  	 * make sure inode is on some writeback list and leave it there unless
  	 * we have completely cleaned the inode.
4f8ad655d   Jan Kara   writeback: Refact...
1489
  	 */
0ae45f63d   Theodore Ts'o   vfs: add support ...
1490
  	if (!(inode->i_state & I_DIRTY_ALL) &&
f9b0e058c   Jan Kara   writeback: Fix da...
1491
1492
  	    (wbc->sync_mode != WB_SYNC_ALL ||
  	     !mapping_tagged(inode->i_mapping, PAGECACHE_TAG_WRITEBACK)))
4f8ad655d   Jan Kara   writeback: Refact...
1493
1494
  		goto out;
  	inode->i_state |= I_SYNC;
b16b1deb5   Tejun Heo   writeback: make w...
1495
  	wbc_attach_and_unlock_inode(wbc, inode);
4f8ad655d   Jan Kara   writeback: Refact...
1496

cd8ed2a45   Yan Hong   fs/fs-writeback.c...
1497
  	ret = __writeback_single_inode(inode, wbc);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1498

b16b1deb5   Tejun Heo   writeback: make w...
1499
  	wbc_detach_inode(wbc);
aaf255933   Tejun Heo   writeback, cgroup...
1500
1501
  
  	wb = inode_to_wb_and_lock_list(inode);
250df6ed2   Dave Chinner   fs: protect inode...
1502
  	spin_lock(&inode->i_lock);
4f8ad655d   Jan Kara   writeback: Refact...
1503
1504
1505
1506
  	/*
  	 * If inode is clean, remove it from writeback lists. Otherwise don't
  	 * touch it. See comment above for explanation.
  	 */
0ae45f63d   Theodore Ts'o   vfs: add support ...
1507
  	if (!(inode->i_state & I_DIRTY_ALL))
c7f540849   Dave Chinner   inode: rename i_w...
1508
  		inode_io_list_del_locked(inode, wb);
4f8ad655d   Jan Kara   writeback: Refact...
1509
  	spin_unlock(&wb->list_lock);
1c0eeaf56   Joern Engel   introduce I_SYNC
1510
  	inode_sync_complete(inode);
4f8ad655d   Jan Kara   writeback: Refact...
1511
1512
  out:
  	spin_unlock(&inode->i_lock);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1513
1514
  	return ret;
  }
a88a341a7   Tejun Heo   writeback: move b...
1515
  static long writeback_chunk_size(struct bdi_writeback *wb,
1a12d8bd7   Wu Fengguang   writeback: scale ...
1516
  				 struct wb_writeback_work *work)
d46db3d58   Wu Fengguang   writeback: make w...
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
  {
  	long pages;
  
  	/*
  	 * WB_SYNC_ALL mode does livelock avoidance by syncing dirty
  	 * inodes/pages in one big loop. Setting wbc.nr_to_write=LONG_MAX
  	 * here avoids calling into writeback_inodes_wb() more than once.
  	 *
  	 * The intended call sequence for WB_SYNC_ALL writeback is:
  	 *
  	 *      wb_writeback()
  	 *          writeback_sb_inodes()       <== called only once
  	 *              write_cache_pages()     <== called once for each inode
  	 *                   (quickly) tag currently dirty pages
  	 *                   (maybe slowly) sync all tagged pages
  	 */
  	if (work->sync_mode == WB_SYNC_ALL || work->tagged_writepages)
  		pages = LONG_MAX;
1a12d8bd7   Wu Fengguang   writeback: scale ...
1535
  	else {
a88a341a7   Tejun Heo   writeback: move b...
1536
  		pages = min(wb->avg_write_bandwidth / 2,
dcc25ae76   Tejun Heo   writeback: move g...
1537
  			    global_wb_domain.dirty_limit / DIRTY_SCOPE);
1a12d8bd7   Wu Fengguang   writeback: scale ...
1538
1539
1540
1541
  		pages = min(pages, work->nr_pages);
  		pages = round_down(pages + MIN_WRITEBACK_PAGES,
  				   MIN_WRITEBACK_PAGES);
  	}
d46db3d58   Wu Fengguang   writeback: make w...
1542
1543
1544
  
  	return pages;
  }
03ba3782e   Jens Axboe   writeback: switch...
1545
  /*
f11c9c5c2   Edward Shishkin   vfs: improve writ...
1546
   * Write a portion of b_io inodes which belong to @sb.
edadfb10b   Christoph Hellwig   writeback: split ...
1547
   *
d46db3d58   Wu Fengguang   writeback: make w...
1548
   * Return the number of pages and/or inodes written.
0ba13fd19   Linus Torvalds   Revert "writeback...
1549
1550
1551
1552
   *
   * NOTE! This is called with wb->list_lock held, and will
   * unlock and relock that for each inode it ends up doing
   * IO for.
f11c9c5c2   Edward Shishkin   vfs: improve writ...
1553
   */
d46db3d58   Wu Fengguang   writeback: make w...
1554
1555
1556
  static long writeback_sb_inodes(struct super_block *sb,
  				struct bdi_writeback *wb,
  				struct wb_writeback_work *work)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1557
  {
d46db3d58   Wu Fengguang   writeback: make w...
1558
1559
1560
1561
1562
  	struct writeback_control wbc = {
  		.sync_mode		= work->sync_mode,
  		.tagged_writepages	= work->tagged_writepages,
  		.for_kupdate		= work->for_kupdate,
  		.for_background		= work->for_background,
7747bd4bc   Dave Chinner   sync: don't block...
1563
  		.for_sync		= work->for_sync,
d46db3d58   Wu Fengguang   writeback: make w...
1564
1565
1566
1567
1568
1569
1570
  		.range_cyclic		= work->range_cyclic,
  		.range_start		= 0,
  		.range_end		= LLONG_MAX,
  	};
  	unsigned long start_time = jiffies;
  	long write_chunk;
  	long wrote = 0;  /* count both pages and inodes */
03ba3782e   Jens Axboe   writeback: switch...
1571
  	while (!list_empty(&wb->b_io)) {
7ccf19a80   Nick Piggin   fs: inode split I...
1572
  		struct inode *inode = wb_inode(wb->b_io.prev);
aaf255933   Tejun Heo   writeback, cgroup...
1573
  		struct bdi_writeback *tmp_wb;
edadfb10b   Christoph Hellwig   writeback: split ...
1574
1575
  
  		if (inode->i_sb != sb) {
d46db3d58   Wu Fengguang   writeback: make w...
1576
  			if (work->sb) {
edadfb10b   Christoph Hellwig   writeback: split ...
1577
1578
1579
1580
1581
  				/*
  				 * We only want to write back data for this
  				 * superblock, move all inodes not belonging
  				 * to it back onto the dirty list.
  				 */
f758eeabe   Christoph Hellwig   writeback: split ...
1582
  				redirty_tail(inode, wb);
edadfb10b   Christoph Hellwig   writeback: split ...
1583
1584
1585
1586
1587
1588
1589
1590
  				continue;
  			}
  
  			/*
  			 * The inode belongs to a different superblock.
  			 * Bounce back to the caller to unpin this and
  			 * pin the next superblock.
  			 */
d46db3d58   Wu Fengguang   writeback: make w...
1591
  			break;
edadfb10b   Christoph Hellwig   writeback: split ...
1592
  		}
9843b76aa   Christoph Hellwig   fs: skip I_FREEIN...
1593
  		/*
331cbdeed   Wanpeng Li   writeback: Fix so...
1594
1595
  		 * Don't bother with new inodes or inodes being freed, first
  		 * kind does not need periodic writeout yet, and for the latter
9843b76aa   Christoph Hellwig   fs: skip I_FREEIN...
1596
1597
  		 * kind writeout is handled by the freer.
  		 */
250df6ed2   Dave Chinner   fs: protect inode...
1598
  		spin_lock(&inode->i_lock);
9843b76aa   Christoph Hellwig   fs: skip I_FREEIN...
1599
  		if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
250df6ed2   Dave Chinner   fs: protect inode...
1600
  			spin_unlock(&inode->i_lock);
fcc5c2221   Wu Fengguang   writeback: don't ...
1601
  			redirty_tail(inode, wb);
7ef0d7377   Nick Piggin   fs: new inode i_s...
1602
1603
  			continue;
  		}
cc1676d91   Jan Kara   writeback: Move r...
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
  		if ((inode->i_state & I_SYNC) && wbc.sync_mode != WB_SYNC_ALL) {
  			/*
  			 * If this inode is locked for writeback and we are not
  			 * doing writeback-for-data-integrity, move it to
  			 * b_more_io so that writeback can proceed with the
  			 * other inodes on s_io.
  			 *
  			 * We'll have another go at writing back this inode
  			 * when we completed a full scan of b_io.
  			 */
  			spin_unlock(&inode->i_lock);
  			requeue_io(inode, wb);
  			trace_writeback_sb_inodes_requeue(inode);
  			continue;
  		}
f0d07b7ff   Jan Kara   writeback: Remove...
1619
  		spin_unlock(&wb->list_lock);
4f8ad655d   Jan Kara   writeback: Refact...
1620
1621
1622
1623
1624
  		/*
  		 * We already requeued the inode if it had I_SYNC set and we
  		 * are doing WB_SYNC_NONE writeback. So this catches only the
  		 * WB_SYNC_ALL case.
  		 */
169ebd901   Jan Kara   writeback: Avoid ...
1625
1626
1627
1628
  		if (inode->i_state & I_SYNC) {
  			/* Wait for I_SYNC. This function drops i_lock... */
  			inode_sleep_on_writeback(inode);
  			/* Inode may be gone, start again */
ead188f9f   Jan Kara   writeback: Fix lo...
1629
  			spin_lock(&wb->list_lock);
169ebd901   Jan Kara   writeback: Avoid ...
1630
1631
  			continue;
  		}
4f8ad655d   Jan Kara   writeback: Refact...
1632
  		inode->i_state |= I_SYNC;
b16b1deb5   Tejun Heo   writeback: make w...
1633
  		wbc_attach_and_unlock_inode(&wbc, inode);
169ebd901   Jan Kara   writeback: Avoid ...
1634

a88a341a7   Tejun Heo   writeback: move b...
1635
  		write_chunk = writeback_chunk_size(wb, work);
d46db3d58   Wu Fengguang   writeback: make w...
1636
1637
  		wbc.nr_to_write = write_chunk;
  		wbc.pages_skipped = 0;
250df6ed2   Dave Chinner   fs: protect inode...
1638

169ebd901   Jan Kara   writeback: Avoid ...
1639
1640
1641
1642
  		/*
  		 * We use I_SYNC to pin the inode in memory. While it is set
  		 * evict_inode() will wait so the inode cannot be freed.
  		 */
cd8ed2a45   Yan Hong   fs/fs-writeback.c...
1643
  		__writeback_single_inode(inode, &wbc);
250df6ed2   Dave Chinner   fs: protect inode...
1644

b16b1deb5   Tejun Heo   writeback: make w...
1645
  		wbc_detach_inode(&wbc);
d46db3d58   Wu Fengguang   writeback: make w...
1646
1647
  		work->nr_pages -= write_chunk - wbc.nr_to_write;
  		wrote += write_chunk - wbc.nr_to_write;
590dca3a7   Chris Mason   fs-writeback: unp...
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
  
  		if (need_resched()) {
  			/*
  			 * We're trying to balance between building up a nice
  			 * long list of IOs to improve our merge rate, and
  			 * getting those IOs out quickly for anyone throttling
  			 * in balance_dirty_pages().  cond_resched() doesn't
  			 * unplug, so get our IOs out the door before we
  			 * give up the CPU.
  			 */
  			blk_flush_plug(current);
  			cond_resched();
  		}
aaf255933   Tejun Heo   writeback, cgroup...
1661
1662
1663
1664
1665
  		/*
  		 * Requeue @inode if still dirty.  Be careful as @inode may
  		 * have been switched to another wb in the meantime.
  		 */
  		tmp_wb = inode_to_wb_and_lock_list(inode);
4f8ad655d   Jan Kara   writeback: Refact...
1666
  		spin_lock(&inode->i_lock);
0ae45f63d   Theodore Ts'o   vfs: add support ...
1667
  		if (!(inode->i_state & I_DIRTY_ALL))
d46db3d58   Wu Fengguang   writeback: make w...
1668
  			wrote++;
aaf255933   Tejun Heo   writeback, cgroup...
1669
  		requeue_inode(inode, tmp_wb, &wbc);
4f8ad655d   Jan Kara   writeback: Refact...
1670
  		inode_sync_complete(inode);
0f1b1fd86   Dave Chinner   fs: pull inode->i...
1671
  		spin_unlock(&inode->i_lock);
590dca3a7   Chris Mason   fs-writeback: unp...
1672

aaf255933   Tejun Heo   writeback, cgroup...
1673
1674
1675
1676
  		if (unlikely(tmp_wb != wb)) {
  			spin_unlock(&tmp_wb->list_lock);
  			spin_lock(&wb->list_lock);
  		}
d46db3d58   Wu Fengguang   writeback: make w...
1677
1678
1679
1680
1681
1682
1683
1684
1685
  		/*
  		 * bail out to wb_writeback() often enough to check
  		 * background threshold and other termination conditions.
  		 */
  		if (wrote) {
  			if (time_is_before_jiffies(start_time + HZ / 10UL))
  				break;
  			if (work->nr_pages <= 0)
  				break;
8bc3be275   Fengguang Wu   writeback: speed ...
1686
  		}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1687
  	}
d46db3d58   Wu Fengguang   writeback: make w...
1688
  	return wrote;
f11c9c5c2   Edward Shishkin   vfs: improve writ...
1689
  }
d46db3d58   Wu Fengguang   writeback: make w...
1690
1691
  static long __writeback_inodes_wb(struct bdi_writeback *wb,
  				  struct wb_writeback_work *work)
f11c9c5c2   Edward Shishkin   vfs: improve writ...
1692
  {
d46db3d58   Wu Fengguang   writeback: make w...
1693
1694
  	unsigned long start_time = jiffies;
  	long wrote = 0;
38f219776   Nick Piggin   fs: sync_sb_inode...
1695

f11c9c5c2   Edward Shishkin   vfs: improve writ...
1696
  	while (!list_empty(&wb->b_io)) {
7ccf19a80   Nick Piggin   fs: inode split I...
1697
  		struct inode *inode = wb_inode(wb->b_io.prev);
f11c9c5c2   Edward Shishkin   vfs: improve writ...
1698
  		struct super_block *sb = inode->i_sb;
9ecc2738a   Jens Axboe   writeback: make t...
1699

eb6ef3df4   Konstantin Khlebnikov   trylock_super(): ...
1700
  		if (!trylock_super(sb)) {
0e995816f   Wu Fengguang   don't busy retry ...
1701
  			/*
eb6ef3df4   Konstantin Khlebnikov   trylock_super(): ...
1702
  			 * trylock_super() may fail consistently due to
0e995816f   Wu Fengguang   don't busy retry ...
1703
1704
1705
1706
  			 * s_umount being grabbed by someone else. Don't use
  			 * requeue_io() to avoid busy retrying the inode/sb.
  			 */
  			redirty_tail(inode, wb);
edadfb10b   Christoph Hellwig   writeback: split ...
1707
  			continue;
f11c9c5c2   Edward Shishkin   vfs: improve writ...
1708
  		}
d46db3d58   Wu Fengguang   writeback: make w...
1709
  		wrote += writeback_sb_inodes(sb, wb, work);
eb6ef3df4   Konstantin Khlebnikov   trylock_super(): ...
1710
  		up_read(&sb->s_umount);
f11c9c5c2   Edward Shishkin   vfs: improve writ...
1711

d46db3d58   Wu Fengguang   writeback: make w...
1712
1713
1714
1715
1716
1717
1718
  		/* refer to the same tests at the end of writeback_sb_inodes */
  		if (wrote) {
  			if (time_is_before_jiffies(start_time + HZ / 10UL))
  				break;
  			if (work->nr_pages <= 0)
  				break;
  		}
f11c9c5c2   Edward Shishkin   vfs: improve writ...
1719
  	}
66f3b8e2e   Jens Axboe   writeback: move d...
1720
  	/* Leave any unwritten inodes on b_io */
d46db3d58   Wu Fengguang   writeback: make w...
1721
  	return wrote;
66f3b8e2e   Jens Axboe   writeback: move d...
1722
  }
7d9f073b8   Wanpeng Li   mm/writeback: mak...
1723
  static long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages,
0e175a183   Curt Wohlgemuth   writeback: Add a ...
1724
  				enum wb_reason reason)
edadfb10b   Christoph Hellwig   writeback: split ...
1725
  {
d46db3d58   Wu Fengguang   writeback: make w...
1726
1727
1728
1729
  	struct wb_writeback_work work = {
  		.nr_pages	= nr_pages,
  		.sync_mode	= WB_SYNC_NONE,
  		.range_cyclic	= 1,
0e175a183   Curt Wohlgemuth   writeback: Add a ...
1730
  		.reason		= reason,
d46db3d58   Wu Fengguang   writeback: make w...
1731
  	};
505a666ee   Linus Torvalds   writeback: plug w...
1732
  	struct blk_plug plug;
edadfb10b   Christoph Hellwig   writeback: split ...
1733

505a666ee   Linus Torvalds   writeback: plug w...
1734
  	blk_start_plug(&plug);
f758eeabe   Christoph Hellwig   writeback: split ...
1735
  	spin_lock(&wb->list_lock);
424b351fe   Wu Fengguang   writeback: refill...
1736
  	if (list_empty(&wb->b_io))
ad4e38dd6   Curt Wohlgemuth   writeback: send w...
1737
  		queue_io(wb, &work);
d46db3d58   Wu Fengguang   writeback: make w...
1738
  	__writeback_inodes_wb(wb, &work);
f758eeabe   Christoph Hellwig   writeback: split ...
1739
  	spin_unlock(&wb->list_lock);
505a666ee   Linus Torvalds   writeback: plug w...
1740
  	blk_finish_plug(&plug);
edadfb10b   Christoph Hellwig   writeback: split ...
1741

d46db3d58   Wu Fengguang   writeback: make w...
1742
1743
  	return nr_pages - work.nr_pages;
  }
03ba3782e   Jens Axboe   writeback: switch...
1744

03ba3782e   Jens Axboe   writeback: switch...
1745
1746
  /*
   * Explicit flushing or periodic writeback of "old" data.
66f3b8e2e   Jens Axboe   writeback: move d...
1747
   *
03ba3782e   Jens Axboe   writeback: switch...
1748
1749
1750
1751
   * Define "old": the first time one of an inode's pages is dirtied, we mark the
   * dirtying-time in the inode's address_space.  So this periodic writeback code
   * just walks the superblock inode list, writing back any inodes which are
   * older than a specific point in time.
66f3b8e2e   Jens Axboe   writeback: move d...
1752
   *
03ba3782e   Jens Axboe   writeback: switch...
1753
1754
1755
   * Try to run once per dirty_writeback_interval.  But if a writeback event
   * takes longer than a dirty_writeback_interval interval, then leave a
   * one-second gap.
66f3b8e2e   Jens Axboe   writeback: move d...
1756
   *
03ba3782e   Jens Axboe   writeback: switch...
1757
1758
   * older_than_this takes precedence over nr_to_write.  So we'll only write back
   * all dirty pages if they are all attached to "old" mappings.
66f3b8e2e   Jens Axboe   writeback: move d...
1759
   */
c4a77a6c7   Jens Axboe   writeback: make w...
1760
  static long wb_writeback(struct bdi_writeback *wb,
83ba7b071   Christoph Hellwig   writeback: simpli...
1761
  			 struct wb_writeback_work *work)
66f3b8e2e   Jens Axboe   writeback: move d...
1762
  {
e98be2d59   Wu Fengguang   writeback: bdi wr...
1763
  	unsigned long wb_start = jiffies;
d46db3d58   Wu Fengguang   writeback: make w...
1764
  	long nr_pages = work->nr_pages;
0dc83bd30   Jan Kara   Revert "writeback...
1765
  	unsigned long oldest_jif;
a5989bdc9   Jan Kara   fs: Fix busyloop ...
1766
  	struct inode *inode;
d46db3d58   Wu Fengguang   writeback: make w...
1767
  	long progress;
505a666ee   Linus Torvalds   writeback: plug w...
1768
  	struct blk_plug plug;
66f3b8e2e   Jens Axboe   writeback: move d...
1769

0dc83bd30   Jan Kara   Revert "writeback...
1770
1771
  	oldest_jif = jiffies;
  	work->older_than_this = &oldest_jif;
38f219776   Nick Piggin   fs: sync_sb_inode...
1772

505a666ee   Linus Torvalds   writeback: plug w...
1773
  	blk_start_plug(&plug);
e8dfc3058   Wu Fengguang   writeback: elevat...
1774
  	spin_lock(&wb->list_lock);
03ba3782e   Jens Axboe   writeback: switch...
1775
1776
  	for (;;) {
  		/*
d3ddec763   Wu Fengguang   writeback: stop b...
1777
  		 * Stop writeback when nr_pages has been consumed
03ba3782e   Jens Axboe   writeback: switch...
1778
  		 */
83ba7b071   Christoph Hellwig   writeback: simpli...
1779
  		if (work->nr_pages <= 0)
03ba3782e   Jens Axboe   writeback: switch...
1780
  			break;
66f3b8e2e   Jens Axboe   writeback: move d...
1781

38f219776   Nick Piggin   fs: sync_sb_inode...
1782
  		/*
aa373cf55   Jan Kara   writeback: stop b...
1783
1784
1785
1786
1787
1788
  		 * Background writeout and kupdate-style writeback may
  		 * run forever. Stop them if there is other work to do
  		 * so that e.g. sync can proceed. They'll be restarted
  		 * after the other works are all done.
  		 */
  		if ((work->for_background || work->for_kupdate) &&
f0054bb1e   Tejun Heo   writeback: move b...
1789
  		    !list_empty(&wb->work_list))
aa373cf55   Jan Kara   writeback: stop b...
1790
1791
1792
  			break;
  
  		/*
d3ddec763   Wu Fengguang   writeback: stop b...
1793
1794
  		 * For background writeout, stop when we are below the
  		 * background dirty threshold
38f219776   Nick Piggin   fs: sync_sb_inode...
1795
  		 */
aa661bbe1   Tejun Heo   writeback: move o...
1796
  		if (work->for_background && !wb_over_bg_thresh(wb))
03ba3782e   Jens Axboe   writeback: switch...
1797
  			break;
38f219776   Nick Piggin   fs: sync_sb_inode...
1798

1bc36b642   Jan Kara   writeback: Includ...
1799
1800
1801
1802
1803
1804
  		/*
  		 * Kupdate and background works are special and we want to
  		 * include all inodes that need writing. Livelock avoidance is
  		 * handled by these works yielding to any other work so we are
  		 * safe.
  		 */
ba9aa8399   Wu Fengguang   writeback: the ku...
1805
  		if (work->for_kupdate) {
0dc83bd30   Jan Kara   Revert "writeback...
1806
  			oldest_jif = jiffies -
ba9aa8399   Wu Fengguang   writeback: the ku...
1807
  				msecs_to_jiffies(dirty_expire_interval * 10);
1bc36b642   Jan Kara   writeback: Includ...
1808
  		} else if (work->for_background)
0dc83bd30   Jan Kara   Revert "writeback...
1809
  			oldest_jif = jiffies;
028c2dd18   Dave Chinner   writeback: Add tr...
1810

5634cc2aa   Tejun Heo   writeback: update...
1811
  		trace_writeback_start(wb, work);
e8dfc3058   Wu Fengguang   writeback: elevat...
1812
  		if (list_empty(&wb->b_io))
ad4e38dd6   Curt Wohlgemuth   writeback: send w...
1813
  			queue_io(wb, work);
83ba7b071   Christoph Hellwig   writeback: simpli...
1814
  		if (work->sb)
d46db3d58   Wu Fengguang   writeback: make w...
1815
  			progress = writeback_sb_inodes(work->sb, wb, work);
edadfb10b   Christoph Hellwig   writeback: split ...
1816
  		else
d46db3d58   Wu Fengguang   writeback: make w...
1817
  			progress = __writeback_inodes_wb(wb, work);
5634cc2aa   Tejun Heo   writeback: update...
1818
  		trace_writeback_written(wb, work);
028c2dd18   Dave Chinner   writeback: Add tr...
1819

e98be2d59   Wu Fengguang   writeback: bdi wr...
1820
  		wb_update_bandwidth(wb, wb_start);
03ba3782e   Jens Axboe   writeback: switch...
1821
1822
  
  		/*
e6fb6da2e   Wu Fengguang   writeback: try mo...
1823
1824
1825
1826
1827
1828
  		 * Did we write something? Try for more
  		 *
  		 * Dirty inodes are moved to b_io for writeback in batches.
  		 * The completion of the current batch does not necessarily
  		 * mean the overall work is done. So we keep looping as long
  		 * as made some progress on cleaning pages or inodes.
03ba3782e   Jens Axboe   writeback: switch...
1829
  		 */
d46db3d58   Wu Fengguang   writeback: make w...
1830
  		if (progress)
71fd05a88   Jens Axboe   writeback: improv...
1831
1832
  			continue;
  		/*
e6fb6da2e   Wu Fengguang   writeback: try mo...
1833
  		 * No more inodes for IO, bail
71fd05a88   Jens Axboe   writeback: improv...
1834
  		 */
b7a2441f9   Wu Fengguang   writeback: remove...
1835
  		if (list_empty(&wb->b_more_io))
03ba3782e   Jens Axboe   writeback: switch...
1836
  			break;
71fd05a88   Jens Axboe   writeback: improv...
1837
  		/*
71fd05a88   Jens Axboe   writeback: improv...
1838
1839
1840
1841
  		 * Nothing written. Wait for some inode to
  		 * become available for writeback. Otherwise
  		 * we'll just busyloop.
  		 */
bace92481   Tahsin Erdogan   fs/fs-writeback.c...
1842
1843
1844
1845
1846
1847
1848
  		trace_writeback_wait(wb, work);
  		inode = wb_inode(wb->b_more_io.prev);
  		spin_lock(&inode->i_lock);
  		spin_unlock(&wb->list_lock);
  		/* This function drops i_lock... */
  		inode_sleep_on_writeback(inode);
  		spin_lock(&wb->list_lock);
03ba3782e   Jens Axboe   writeback: switch...
1849
  	}
e8dfc3058   Wu Fengguang   writeback: elevat...
1850
  	spin_unlock(&wb->list_lock);
505a666ee   Linus Torvalds   writeback: plug w...
1851
  	blk_finish_plug(&plug);
03ba3782e   Jens Axboe   writeback: switch...
1852

d46db3d58   Wu Fengguang   writeback: make w...
1853
  	return nr_pages - work->nr_pages;
03ba3782e   Jens Axboe   writeback: switch...
1854
1855
1856
  }
  
  /*
83ba7b071   Christoph Hellwig   writeback: simpli...
1857
   * Return the next wb_writeback_work struct that hasn't been processed yet.
03ba3782e   Jens Axboe   writeback: switch...
1858
   */
f0054bb1e   Tejun Heo   writeback: move b...
1859
  static struct wb_writeback_work *get_next_work_item(struct bdi_writeback *wb)
03ba3782e   Jens Axboe   writeback: switch...
1860
  {
83ba7b071   Christoph Hellwig   writeback: simpli...
1861
  	struct wb_writeback_work *work = NULL;
03ba3782e   Jens Axboe   writeback: switch...
1862

f0054bb1e   Tejun Heo   writeback: move b...
1863
1864
1865
  	spin_lock_bh(&wb->work_lock);
  	if (!list_empty(&wb->work_list)) {
  		work = list_entry(wb->work_list.next,
83ba7b071   Christoph Hellwig   writeback: simpli...
1866
1867
  				  struct wb_writeback_work, list);
  		list_del_init(&work->list);
03ba3782e   Jens Axboe   writeback: switch...
1868
  	}
f0054bb1e   Tejun Heo   writeback: move b...
1869
  	spin_unlock_bh(&wb->work_lock);
83ba7b071   Christoph Hellwig   writeback: simpli...
1870
  	return work;
03ba3782e   Jens Axboe   writeback: switch...
1871
  }
6585027a5   Jan Kara   writeback: integr...
1872
1873
  static long wb_check_background_flush(struct bdi_writeback *wb)
  {
aa661bbe1   Tejun Heo   writeback: move o...
1874
  	if (wb_over_bg_thresh(wb)) {
6585027a5   Jan Kara   writeback: integr...
1875
1876
1877
1878
1879
1880
  
  		struct wb_writeback_work work = {
  			.nr_pages	= LONG_MAX,
  			.sync_mode	= WB_SYNC_NONE,
  			.for_background	= 1,
  			.range_cyclic	= 1,
0e175a183   Curt Wohlgemuth   writeback: Add a ...
1881
  			.reason		= WB_REASON_BACKGROUND,
6585027a5   Jan Kara   writeback: integr...
1882
1883
1884
1885
1886
1887
1888
  		};
  
  		return wb_writeback(wb, &work);
  	}
  
  	return 0;
  }
03ba3782e   Jens Axboe   writeback: switch...
1889
1890
1891
1892
  static long wb_check_old_data_flush(struct bdi_writeback *wb)
  {
  	unsigned long expired;
  	long nr_pages;
69b62d01e   Jens Axboe   writeback: disabl...
1893
1894
1895
1896
1897
  	/*
  	 * When set to zero, disable periodic writeback
  	 */
  	if (!dirty_writeback_interval)
  		return 0;
03ba3782e   Jens Axboe   writeback: switch...
1898
1899
1900
1901
1902
1903
  	expired = wb->last_old_flush +
  			msecs_to_jiffies(dirty_writeback_interval * 10);
  	if (time_before(jiffies, expired))
  		return 0;
  
  	wb->last_old_flush = jiffies;
cdf01dd54   Linus Torvalds   fs-writeback.c: u...
1904
  	nr_pages = get_nr_dirty_pages();
03ba3782e   Jens Axboe   writeback: switch...
1905

c4a77a6c7   Jens Axboe   writeback: make w...
1906
  	if (nr_pages) {
83ba7b071   Christoph Hellwig   writeback: simpli...
1907
  		struct wb_writeback_work work = {
c4a77a6c7   Jens Axboe   writeback: make w...
1908
1909
1910
1911
  			.nr_pages	= nr_pages,
  			.sync_mode	= WB_SYNC_NONE,
  			.for_kupdate	= 1,
  			.range_cyclic	= 1,
0e175a183   Curt Wohlgemuth   writeback: Add a ...
1912
  			.reason		= WB_REASON_PERIODIC,
c4a77a6c7   Jens Axboe   writeback: make w...
1913
  		};
83ba7b071   Christoph Hellwig   writeback: simpli...
1914
  		return wb_writeback(wb, &work);
c4a77a6c7   Jens Axboe   writeback: make w...
1915
  	}
03ba3782e   Jens Axboe   writeback: switch...
1916
1917
1918
  
  	return 0;
  }
85009b4f5   Jens Axboe   writeback: elimin...
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
  static long wb_check_start_all(struct bdi_writeback *wb)
  {
  	long nr_pages;
  
  	if (!test_bit(WB_start_all, &wb->state))
  		return 0;
  
  	nr_pages = get_nr_dirty_pages();
  	if (nr_pages) {
  		struct wb_writeback_work work = {
  			.nr_pages	= wb_split_bdi_pages(wb, nr_pages),
  			.sync_mode	= WB_SYNC_NONE,
  			.range_cyclic	= 1,
  			.reason		= wb->start_all_reason,
  		};
  
  		nr_pages = wb_writeback(wb, &work);
  	}
  
  	clear_bit(WB_start_all, &wb->state);
  	return nr_pages;
  }
03ba3782e   Jens Axboe   writeback: switch...
1941
1942
1943
  /*
   * Retrieve work items and do the writeback they describe
   */
25d130ba2   Wanpeng Li   mm/writeback: don...
1944
  static long wb_do_writeback(struct bdi_writeback *wb)
03ba3782e   Jens Axboe   writeback: switch...
1945
  {
83ba7b071   Christoph Hellwig   writeback: simpli...
1946
  	struct wb_writeback_work *work;
c4a77a6c7   Jens Axboe   writeback: make w...
1947
  	long wrote = 0;
03ba3782e   Jens Axboe   writeback: switch...
1948

4452226ea   Tejun Heo   writeback: move b...
1949
  	set_bit(WB_writeback_running, &wb->state);
f0054bb1e   Tejun Heo   writeback: move b...
1950
  	while ((work = get_next_work_item(wb)) != NULL) {
5634cc2aa   Tejun Heo   writeback: update...
1951
  		trace_writeback_exec(wb, work);
83ba7b071   Christoph Hellwig   writeback: simpli...
1952
  		wrote += wb_writeback(wb, work);
4a3a485b1   Tahsin Erdogan   writeback: fix me...
1953
  		finish_writeback_work(wb, work);
03ba3782e   Jens Axboe   writeback: switch...
1954
1955
1956
  	}
  
  	/*
85009b4f5   Jens Axboe   writeback: elimin...
1957
1958
1959
1960
1961
  	 * Check for a flush-everything request
  	 */
  	wrote += wb_check_start_all(wb);
  
  	/*
03ba3782e   Jens Axboe   writeback: switch...
1962
1963
1964
  	 * Check for periodic writeback, kupdated() style
  	 */
  	wrote += wb_check_old_data_flush(wb);
6585027a5   Jan Kara   writeback: integr...
1965
  	wrote += wb_check_background_flush(wb);
4452226ea   Tejun Heo   writeback: move b...
1966
  	clear_bit(WB_writeback_running, &wb->state);
03ba3782e   Jens Axboe   writeback: switch...
1967
1968
1969
1970
1971
1972
  
  	return wrote;
  }
  
  /*
   * Handle writeback of dirty data for the device backed by this bdi. Also
839a8e866   Tejun Heo   writeback: replac...
1973
   * reschedules periodically and does kupdated style flushing.
03ba3782e   Jens Axboe   writeback: switch...
1974
   */
f0054bb1e   Tejun Heo   writeback: move b...
1975
  void wb_workfn(struct work_struct *work)
03ba3782e   Jens Axboe   writeback: switch...
1976
  {
839a8e866   Tejun Heo   writeback: replac...
1977
1978
  	struct bdi_writeback *wb = container_of(to_delayed_work(work),
  						struct bdi_writeback, dwork);
03ba3782e   Jens Axboe   writeback: switch...
1979
  	long pages_written;
c2c814fc9   Theodore Ts'o   memcg: fix a cras...
1980
  	set_worker_desc("flush-%s", bdi_dev_name(wb->bdi));
766f91641   Peter Zijlstra   kernel: remove PF...
1981
  	current->flags |= PF_SWAPWRITE;
455b28646   Dave Chinner   writeback: Initia...
1982

839a8e866   Tejun Heo   writeback: replac...
1983
  	if (likely(!current_is_workqueue_rescuer() ||
4452226ea   Tejun Heo   writeback: move b...
1984
  		   !test_bit(WB_registered, &wb->state))) {
6467716a3   Artem Bityutskiy   writeback: optimi...
1985
  		/*
f0054bb1e   Tejun Heo   writeback: move b...
1986
  		 * The normal path.  Keep writing back @wb until its
839a8e866   Tejun Heo   writeback: replac...
1987
  		 * work_list is empty.  Note that this path is also taken
f0054bb1e   Tejun Heo   writeback: move b...
1988
  		 * if @wb is shutting down even when we're running off the
839a8e866   Tejun Heo   writeback: replac...
1989
  		 * rescuer as work_list needs to be drained.
6467716a3   Artem Bityutskiy   writeback: optimi...
1990
  		 */
839a8e866   Tejun Heo   writeback: replac...
1991
  		do {
25d130ba2   Wanpeng Li   mm/writeback: don...
1992
  			pages_written = wb_do_writeback(wb);
839a8e866   Tejun Heo   writeback: replac...
1993
  			trace_writeback_pages_written(pages_written);
f0054bb1e   Tejun Heo   writeback: move b...
1994
  		} while (!list_empty(&wb->work_list));
839a8e866   Tejun Heo   writeback: replac...
1995
1996
1997
1998
1999
2000
  	} else {
  		/*
  		 * bdi_wq can't get enough workers and we're running off
  		 * the emergency worker.  Don't hog it.  Hopefully, 1024 is
  		 * enough for efficient IO.
  		 */
f0054bb1e   Tejun Heo   writeback: move b...
2001
  		pages_written = writeback_inodes_wb(wb, 1024,
839a8e866   Tejun Heo   writeback: replac...
2002
  						    WB_REASON_FORKER_THREAD);
455b28646   Dave Chinner   writeback: Initia...
2003
  		trace_writeback_pages_written(pages_written);
03ba3782e   Jens Axboe   writeback: switch...
2004
  	}
f0054bb1e   Tejun Heo   writeback: move b...
2005
  	if (!list_empty(&wb->work_list))
b8b784958   Jan Kara   bdi: Fix oops in ...
2006
  		wb_wakeup(wb);
6ca738d60   Derek Basehore   backing_dev: fix ...
2007
  	else if (wb_has_dirty_io(wb) && dirty_writeback_interval)
f0054bb1e   Tejun Heo   writeback: move b...
2008
  		wb_wakeup_delayed(wb);
455b28646   Dave Chinner   writeback: Initia...
2009

839a8e866   Tejun Heo   writeback: replac...
2010
  	current->flags &= ~PF_SWAPWRITE;
03ba3782e   Jens Axboe   writeback: switch...
2011
2012
2013
  }
  
  /*
595043e5f   Jens Axboe   writeback: provid...
2014
2015
2016
2017
   * Start writeback of `nr_pages' pages on this bdi. If `nr_pages' is zero,
   * write back the whole world.
   */
  static void __wakeup_flusher_threads_bdi(struct backing_dev_info *bdi,
e8e8a0c6c   Jens Axboe   writeback: move n...
2018
  					 enum wb_reason reason)
595043e5f   Jens Axboe   writeback: provid...
2019
2020
2021
2022
2023
2024
2025
  {
  	struct bdi_writeback *wb;
  
  	if (!bdi_has_dirty_io(bdi))
  		return;
  
  	list_for_each_entry_rcu(wb, &bdi->wb_list, bdi_node)
e8e8a0c6c   Jens Axboe   writeback: move n...
2026
  		wb_start_writeback(wb, reason);
595043e5f   Jens Axboe   writeback: provid...
2027
2028
2029
2030
2031
  }
  
  void wakeup_flusher_threads_bdi(struct backing_dev_info *bdi,
  				enum wb_reason reason)
  {
595043e5f   Jens Axboe   writeback: provid...
2032
  	rcu_read_lock();
e8e8a0c6c   Jens Axboe   writeback: move n...
2033
  	__wakeup_flusher_threads_bdi(bdi, reason);
595043e5f   Jens Axboe   writeback: provid...
2034
2035
2036
2037
  	rcu_read_unlock();
  }
  
  /*
9ba4b2dfa   Jens Axboe   fs: kill 'nr_page...
2038
   * Wakeup the flusher threads to start writeback of all currently dirty pages
03ba3782e   Jens Axboe   writeback: switch...
2039
   */
9ba4b2dfa   Jens Axboe   fs: kill 'nr_page...
2040
  void wakeup_flusher_threads(enum wb_reason reason)
03ba3782e   Jens Axboe   writeback: switch...
2041
  {
b8c2f3474   Christoph Hellwig   writeback: simpli...
2042
  	struct backing_dev_info *bdi;
03ba3782e   Jens Axboe   writeback: switch...
2043

51350ea0d   Konstantin Khlebnikov   mm, writeback: fl...
2044
2045
2046
2047
2048
  	/*
  	 * If we are expecting writeback progress we must submit plugged IO.
  	 */
  	if (blk_needs_flush_plug(current))
  		blk_schedule_flush_plug(current);
b8c2f3474   Christoph Hellwig   writeback: simpli...
2049
  	rcu_read_lock();
595043e5f   Jens Axboe   writeback: provid...
2050
  	list_for_each_entry_rcu(bdi, &bdi_list, bdi_list)
e8e8a0c6c   Jens Axboe   writeback: move n...
2051
  		__wakeup_flusher_threads_bdi(bdi, reason);
cfc4ba536   Jens Axboe   writeback: use RC...
2052
  	rcu_read_unlock();
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2053
  }
a2f487069   Theodore Ts'o   fs: make sure the...
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
  /*
   * Wake up bdi's periodically to make sure dirtytime inodes gets
   * written back periodically.  We deliberately do *not* check the
   * b_dirtytime list in wb_has_dirty_io(), since this would cause the
   * kernel to be constantly waking up once there are any dirtytime
   * inodes on the system.  So instead we define a separate delayed work
   * function which gets called much more rarely.  (By default, only
   * once every 12 hours.)
   *
   * If there is any other write activity going on in the file system,
   * this function won't be necessary.  But if the only thing that has
   * happened on the file system is a dirtytime inode caused by an atime
   * update, we need this infrastructure below to make sure that inode
   * eventually gets pushed out to disk.
   */
  static void wakeup_dirtytime_writeback(struct work_struct *w);
  static DECLARE_DELAYED_WORK(dirtytime_work, wakeup_dirtytime_writeback);
  
  static void wakeup_dirtytime_writeback(struct work_struct *w)
  {
  	struct backing_dev_info *bdi;
  
  	rcu_read_lock();
  	list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) {
001fe6f61   Tejun Heo   writeback: make w...
2078
  		struct bdi_writeback *wb;
001fe6f61   Tejun Heo   writeback: make w...
2079

b817525a4   Tejun Heo   writeback: bdi_wr...
2080
  		list_for_each_entry_rcu(wb, &bdi->wb_list, bdi_node)
6fdf860f1   Tejun Heo   writeback: fix bd...
2081
2082
  			if (!list_empty(&wb->b_dirty_time))
  				wb_wakeup(wb);
a2f487069   Theodore Ts'o   fs: make sure the...
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
  	}
  	rcu_read_unlock();
  	schedule_delayed_work(&dirtytime_work, dirtytime_expire_interval * HZ);
  }
  
  static int __init start_dirtytime_writeback(void)
  {
  	schedule_delayed_work(&dirtytime_work, dirtytime_expire_interval * HZ);
  	return 0;
  }
  __initcall(start_dirtytime_writeback);
1efff914a   Theodore Ts'o   fs: add dirtytime...
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103
  int dirtytime_interval_handler(struct ctl_table *table, int write,
  			       void __user *buffer, size_t *lenp, loff_t *ppos)
  {
  	int ret;
  
  	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
  	if (ret == 0 && write)
  		mod_delayed_work(system_wq, &dirtytime_work, 0);
  	return ret;
  }
03ba3782e   Jens Axboe   writeback: switch...
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127
  static noinline void block_dump___mark_inode_dirty(struct inode *inode)
  {
  	if (inode->i_ino || strcmp(inode->i_sb->s_id, "bdev")) {
  		struct dentry *dentry;
  		const char *name = "?";
  
  		dentry = d_find_alias(inode);
  		if (dentry) {
  			spin_lock(&dentry->d_lock);
  			name = (const char *) dentry->d_name.name;
  		}
  		printk(KERN_DEBUG
  		       "%s(%d): dirtied inode %lu (%s) on %s
  ",
  		       current->comm, task_pid_nr(current), inode->i_ino,
  		       name, inode->i_sb->s_id);
  		if (dentry) {
  			spin_unlock(&dentry->d_lock);
  			dput(dentry);
  		}
  	}
  }
  
  /**
0117d4272   Mauro Carvalho Chehab   fs: add a blank l...
2128
2129
2130
2131
2132
2133
2134
   * __mark_inode_dirty -	internal function
   *
   * @inode: inode to mark
   * @flags: what kind of dirty (i.e. I_DIRTY_SYNC)
   *
   * Mark an inode as dirty. Callers should use mark_inode_dirty or
   * mark_inode_dirty_sync.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2135
   *
03ba3782e   Jens Axboe   writeback: switch...
2136
2137
2138
2139
2140
2141
2142
2143
2144
   * Put the inode on the super block's dirty list.
   *
   * CAREFUL! We mark it dirty unconditionally, but move it onto the
   * dirty list only if it is hashed or if it refers to a blockdev.
   * If it was not hashed, it will never be added to the dirty list
   * even if it is later hashed, as it will have been marked dirty already.
   *
   * In short, make sure you hash any inodes _before_ you start marking
   * them dirty.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2145
   *
03ba3782e   Jens Axboe   writeback: switch...
2146
2147
2148
2149
2150
2151
   * Note that for blockdevs, inode->dirtied_when represents the dirtying time of
   * the block-special inode (/dev/hda1) itself.  And the ->dirtied_when field of
   * the kernel-internal blockdev inode represents the dirtying time of the
   * blockdev's pages.  This is why for I_DIRTY_PAGES we always use
   * page->mapping->host, so the page-dirtying time is recorded in the internal
   * blockdev inode.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2152
   */
03ba3782e   Jens Axboe   writeback: switch...
2153
  void __mark_inode_dirty(struct inode *inode, int flags)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2154
  {
03ba3782e   Jens Axboe   writeback: switch...
2155
  	struct super_block *sb = inode->i_sb;
0ae45f63d   Theodore Ts'o   vfs: add support ...
2156
2157
2158
  	int dirtytime;
  
  	trace_writeback_mark_inode_dirty(inode, flags);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2159

03ba3782e   Jens Axboe   writeback: switch...
2160
2161
2162
2163
  	/*
  	 * Don't do this for I_DIRTY_PAGES - that doesn't actually
  	 * dirty the inode itself
  	 */
0e11f6443   Christoph Hellwig   fs: move I_DIRTY_...
2164
  	if (flags & (I_DIRTY_INODE | I_DIRTY_TIME)) {
9fb0a7da0   Tejun Heo   writeback: add mo...
2165
  		trace_writeback_dirty_inode_start(inode, flags);
03ba3782e   Jens Axboe   writeback: switch...
2166
  		if (sb->s_op->dirty_inode)
aa3857295   Christoph Hellwig   fs: pass exact ty...
2167
  			sb->s_op->dirty_inode(inode, flags);
9fb0a7da0   Tejun Heo   writeback: add mo...
2168
2169
  
  		trace_writeback_dirty_inode(inode, flags);
03ba3782e   Jens Axboe   writeback: switch...
2170
  	}
0ae45f63d   Theodore Ts'o   vfs: add support ...
2171
2172
2173
  	if (flags & I_DIRTY_INODE)
  		flags &= ~I_DIRTY_TIME;
  	dirtytime = flags & I_DIRTY_TIME;
03ba3782e   Jens Axboe   writeback: switch...
2174
2175
  
  	/*
9c6ac78eb   Tejun Heo   writeback: fix a ...
2176
2177
  	 * Paired with smp_mb() in __writeback_single_inode() for the
  	 * following lockless i_state test.  See there for details.
03ba3782e   Jens Axboe   writeback: switch...
2178
2179
  	 */
  	smp_mb();
0ae45f63d   Theodore Ts'o   vfs: add support ...
2180
2181
  	if (((inode->i_state & flags) == flags) ||
  	    (dirtytime && (inode->i_state & I_DIRTY_INODE)))
03ba3782e   Jens Axboe   writeback: switch...
2182
2183
2184
2185
  		return;
  
  	if (unlikely(block_dump))
  		block_dump___mark_inode_dirty(inode);
250df6ed2   Dave Chinner   fs: protect inode...
2186
  	spin_lock(&inode->i_lock);
0ae45f63d   Theodore Ts'o   vfs: add support ...
2187
2188
  	if (dirtytime && (inode->i_state & I_DIRTY_INODE))
  		goto out_unlock_inode;
03ba3782e   Jens Axboe   writeback: switch...
2189
2190
  	if ((inode->i_state & flags) != flags) {
  		const int was_dirty = inode->i_state & I_DIRTY;
52ebea749   Tejun Heo   writeback: make b...
2191
  		inode_attach_wb(inode, NULL);
0ae45f63d   Theodore Ts'o   vfs: add support ...
2192
2193
  		if (flags & I_DIRTY_INODE)
  			inode->i_state &= ~I_DIRTY_TIME;
03ba3782e   Jens Axboe   writeback: switch...
2194
2195
2196
2197
2198
2199
2200
2201
  		inode->i_state |= flags;
  
  		/*
  		 * If the inode is being synced, just update its dirty state.
  		 * The unlocker will place the inode on the appropriate
  		 * superblock list, based upon its state.
  		 */
  		if (inode->i_state & I_SYNC)
250df6ed2   Dave Chinner   fs: protect inode...
2202
  			goto out_unlock_inode;
03ba3782e   Jens Axboe   writeback: switch...
2203
2204
2205
2206
2207
2208
  
  		/*
  		 * Only add valid (hashed) inodes to the superblock's
  		 * dirty list.  Add blockdev inodes as well.
  		 */
  		if (!S_ISBLK(inode->i_mode)) {
1d3382cbf   Al Viro   new helper: inode...
2209
  			if (inode_unhashed(inode))
250df6ed2   Dave Chinner   fs: protect inode...
2210
  				goto out_unlock_inode;
03ba3782e   Jens Axboe   writeback: switch...
2211
  		}
a4ffdde6e   Al Viro   simplify checks f...
2212
  		if (inode->i_state & I_FREEING)
250df6ed2   Dave Chinner   fs: protect inode...
2213
  			goto out_unlock_inode;
03ba3782e   Jens Axboe   writeback: switch...
2214
2215
2216
2217
2218
2219
  
  		/*
  		 * If the inode was already on b_dirty/b_io/b_more_io, don't
  		 * reposition it (that would break b_dirty time-ordering).
  		 */
  		if (!was_dirty) {
87e1d789b   Tejun Heo   writeback: implem...
2220
  			struct bdi_writeback *wb;
d6c10f1fc   Tejun Heo   writeback: implem...
2221
  			struct list_head *dirty_list;
a66979aba   Dave Chinner   fs: move i_wb_lis...
2222
  			bool wakeup_bdi = false;
253c34e9b   Artem Bityutskiy   writeback: preven...
2223

87e1d789b   Tejun Heo   writeback: implem...
2224
  			wb = locked_inode_to_wb_and_lock_list(inode);
253c34e9b   Artem Bityutskiy   writeback: preven...
2225

0747259d1   Tejun Heo   writeback: dirty ...
2226
2227
2228
2229
  			WARN(bdi_cap_writeback_dirty(wb->bdi) &&
  			     !test_bit(WB_registered, &wb->state),
  			     "bdi-%s not registered
  ", wb->bdi->name);
03ba3782e   Jens Axboe   writeback: switch...
2230
2231
  
  			inode->dirtied_when = jiffies;
a2f487069   Theodore Ts'o   fs: make sure the...
2232
2233
  			if (dirtytime)
  				inode->dirtied_time_when = jiffies;
d6c10f1fc   Tejun Heo   writeback: implem...
2234

0e11f6443   Christoph Hellwig   fs: move I_DIRTY_...
2235
  			if (inode->i_state & I_DIRTY)
0747259d1   Tejun Heo   writeback: dirty ...
2236
  				dirty_list = &wb->b_dirty;
a2f487069   Theodore Ts'o   fs: make sure the...
2237
  			else
0747259d1   Tejun Heo   writeback: dirty ...
2238
  				dirty_list = &wb->b_dirty_time;
d6c10f1fc   Tejun Heo   writeback: implem...
2239

c7f540849   Dave Chinner   inode: rename i_w...
2240
  			wakeup_bdi = inode_io_list_move_locked(inode, wb,
d6c10f1fc   Tejun Heo   writeback: implem...
2241
  							       dirty_list);
0747259d1   Tejun Heo   writeback: dirty ...
2242
  			spin_unlock(&wb->list_lock);
0ae45f63d   Theodore Ts'o   vfs: add support ...
2243
  			trace_writeback_dirty_inode_enqueue(inode);
a66979aba   Dave Chinner   fs: move i_wb_lis...
2244

d6c10f1fc   Tejun Heo   writeback: implem...
2245
2246
2247
2248
2249
2250
  			/*
  			 * If this is the first dirty inode for this bdi,
  			 * we have to wake-up the corresponding bdi thread
  			 * to make sure background write-back happens
  			 * later.
  			 */
0747259d1   Tejun Heo   writeback: dirty ...
2251
2252
  			if (bdi_cap_writeback_dirty(wb->bdi) && wakeup_bdi)
  				wb_wakeup_delayed(wb);
a66979aba   Dave Chinner   fs: move i_wb_lis...
2253
  			return;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2254
  		}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2255
  	}
250df6ed2   Dave Chinner   fs: protect inode...
2256
2257
  out_unlock_inode:
  	spin_unlock(&inode->i_lock);
03ba3782e   Jens Axboe   writeback: switch...
2258
2259
  }
  EXPORT_SYMBOL(__mark_inode_dirty);
e97fedb9e   Dave Chinner   sync: serialise p...
2260
2261
2262
2263
2264
2265
2266
2267
2268
  /*
   * The @s_sync_lock is used to serialise concurrent sync operations
   * to avoid lock contention problems with concurrent wait_sb_inodes() calls.
   * Concurrent callers will block on the s_sync_lock rather than doing contending
   * walks. The queueing maintains sync(2) required behaviour as all the IO that
   * has been issued up to the time this function is enter is guaranteed to be
   * completed by the time we have gained the lock and waited for all IO that is
   * in progress regardless of the order callers are granted the lock.
   */
b6e51316d   Jens Axboe   writeback: separa...
2269
  static void wait_sb_inodes(struct super_block *sb)
03ba3782e   Jens Axboe   writeback: switch...
2270
  {
6c60d2b57   Dave Chinner   fs/fs-writeback.c...
2271
  	LIST_HEAD(sync_list);
03ba3782e   Jens Axboe   writeback: switch...
2272
2273
2274
2275
2276
  
  	/*
  	 * We need to be protected against the filesystem going from
  	 * r/o to r/w or vice versa.
  	 */
b6e51316d   Jens Axboe   writeback: separa...
2277
  	WARN_ON(!rwsem_is_locked(&sb->s_umount));
03ba3782e   Jens Axboe   writeback: switch...
2278

e97fedb9e   Dave Chinner   sync: serialise p...
2279
  	mutex_lock(&sb->s_sync_lock);
03ba3782e   Jens Axboe   writeback: switch...
2280
2281
  
  	/*
6c60d2b57   Dave Chinner   fs/fs-writeback.c...
2282
2283
2284
2285
2286
2287
2288
2289
2290
2291
2292
2293
2294
2295
2296
2297
2298
2299
  	 * Splice the writeback list onto a temporary list to avoid waiting on
  	 * inodes that have started writeback after this point.
  	 *
  	 * Use rcu_read_lock() to keep the inodes around until we have a
  	 * reference. s_inode_wblist_lock protects sb->s_inodes_wb as well as
  	 * the local list because inodes can be dropped from either by writeback
  	 * completion.
  	 */
  	rcu_read_lock();
  	spin_lock_irq(&sb->s_inode_wblist_lock);
  	list_splice_init(&sb->s_inodes_wb, &sync_list);
  
  	/*
  	 * Data integrity sync. Must wait for all pages under writeback, because
  	 * there may have been pages dirtied before our sync call, but which had
  	 * writeout started before we write it out.  In which case, the inode
  	 * may not be on the dirty list, but we still have to wait for that
  	 * writeout.
03ba3782e   Jens Axboe   writeback: switch...
2300
  	 */
6c60d2b57   Dave Chinner   fs/fs-writeback.c...
2301
2302
2303
  	while (!list_empty(&sync_list)) {
  		struct inode *inode = list_first_entry(&sync_list, struct inode,
  						       i_wb_list);
250df6ed2   Dave Chinner   fs: protect inode...
2304
  		struct address_space *mapping = inode->i_mapping;
03ba3782e   Jens Axboe   writeback: switch...
2305

6c60d2b57   Dave Chinner   fs/fs-writeback.c...
2306
2307
2308
2309
2310
2311
2312
2313
2314
2315
2316
2317
2318
2319
2320
2321
2322
  		/*
  		 * Move each inode back to the wb list before we drop the lock
  		 * to preserve consistency between i_wb_list and the mapping
  		 * writeback tag. Writeback completion is responsible to remove
  		 * the inode from either list once the writeback tag is cleared.
  		 */
  		list_move_tail(&inode->i_wb_list, &sb->s_inodes_wb);
  
  		/*
  		 * The mapping can appear untagged while still on-list since we
  		 * do not have the mapping lock. Skip it here, wb completion
  		 * will remove it.
  		 */
  		if (!mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK))
  			continue;
  
  		spin_unlock_irq(&sb->s_inode_wblist_lock);
250df6ed2   Dave Chinner   fs: protect inode...
2323
  		spin_lock(&inode->i_lock);
6c60d2b57   Dave Chinner   fs/fs-writeback.c...
2324
  		if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) {
250df6ed2   Dave Chinner   fs: protect inode...
2325
  			spin_unlock(&inode->i_lock);
6c60d2b57   Dave Chinner   fs/fs-writeback.c...
2326
2327
  
  			spin_lock_irq(&sb->s_inode_wblist_lock);
03ba3782e   Jens Axboe   writeback: switch...
2328
  			continue;
250df6ed2   Dave Chinner   fs: protect inode...
2329
  		}
03ba3782e   Jens Axboe   writeback: switch...
2330
  		__iget(inode);
250df6ed2   Dave Chinner   fs: protect inode...
2331
  		spin_unlock(&inode->i_lock);
6c60d2b57   Dave Chinner   fs/fs-writeback.c...
2332
  		rcu_read_unlock();
03ba3782e   Jens Axboe   writeback: switch...
2333

aa750fd71   Junichi Nomura   mm/filemap.c: mak...
2334
2335
2336
2337
2338
2339
  		/*
  		 * We keep the error status of individual mapping so that
  		 * applications can catch the writeback error using fsync(2).
  		 * See filemap_fdatawait_keep_errors() for details.
  		 */
  		filemap_fdatawait_keep_errors(mapping);
03ba3782e   Jens Axboe   writeback: switch...
2340
2341
  
  		cond_resched();
6c60d2b57   Dave Chinner   fs/fs-writeback.c...
2342
2343
2344
2345
  		iput(inode);
  
  		rcu_read_lock();
  		spin_lock_irq(&sb->s_inode_wblist_lock);
03ba3782e   Jens Axboe   writeback: switch...
2346
  	}
6c60d2b57   Dave Chinner   fs/fs-writeback.c...
2347
2348
  	spin_unlock_irq(&sb->s_inode_wblist_lock);
  	rcu_read_unlock();
e97fedb9e   Dave Chinner   sync: serialise p...
2349
  	mutex_unlock(&sb->s_sync_lock);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2350
  }
f30a7d0cc   Tejun Heo   writeback: restru...
2351
2352
  static void __writeback_inodes_sb_nr(struct super_block *sb, unsigned long nr,
  				     enum wb_reason reason, bool skip_if_busy)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2353
  {
5b9cce4c7   Tejun Heo   writeback: Genera...
2354
2355
  	struct backing_dev_info *bdi = sb->s_bdi;
  	DEFINE_WB_COMPLETION(done, bdi);
83ba7b071   Christoph Hellwig   writeback: simpli...
2356
  	struct wb_writeback_work work = {
6e6938b6d   Wu Fengguang   writeback: introd...
2357
2358
2359
2360
2361
  		.sb			= sb,
  		.sync_mode		= WB_SYNC_NONE,
  		.tagged_writepages	= 1,
  		.done			= &done,
  		.nr_pages		= nr,
0e175a183   Curt Wohlgemuth   writeback: Add a ...
2362
  		.reason			= reason,
3c4d71653   Christoph Hellwig   writeback: queue ...
2363
  	};
d8a8559cd   Jens Axboe   writeback: get ri...
2364

e79729123   Tejun Heo   writeback: don't ...
2365
  	if (!bdi_has_dirty_io(bdi) || bdi == &noop_backing_dev_info)
6eedc7015   Jan Kara   vfs: Move noop_ba...
2366
  		return;
cf37e9724   Christoph Hellwig   writeback: enforc...
2367
  	WARN_ON(!rwsem_is_locked(&sb->s_umount));
f30a7d0cc   Tejun Heo   writeback: restru...
2368

db1253604   Tejun Heo   writeback: make w...
2369
  	bdi_split_work_to_wbs(sb->s_bdi, &work, skip_if_busy);
5b9cce4c7   Tejun Heo   writeback: Genera...
2370
  	wb_wait_for_completion(&done);
e913fc825   Jens Axboe   writeback: fix WB...
2371
  }
f30a7d0cc   Tejun Heo   writeback: restru...
2372
2373
2374
2375
2376
2377
2378
2379
2380
2381
2382
2383
2384
2385
2386
2387
2388
  
  /**
   * writeback_inodes_sb_nr -	writeback dirty inodes from given super_block
   * @sb: the superblock
   * @nr: the number of pages to write
   * @reason: reason why some writeback work initiated
   *
   * Start writeback on some inodes on this super_block. No guarantees are made
   * on how many (if any) will be written, and this function does not wait
   * for IO completion of submitted IO.
   */
  void writeback_inodes_sb_nr(struct super_block *sb,
  			    unsigned long nr,
  			    enum wb_reason reason)
  {
  	__writeback_inodes_sb_nr(sb, nr, reason, false);
  }
3259f8bed   Chris Mason   Add new functions...
2389
2390
2391
2392
2393
  EXPORT_SYMBOL(writeback_inodes_sb_nr);
  
  /**
   * writeback_inodes_sb	-	writeback dirty inodes from given super_block
   * @sb: the superblock
786228ab3   Marcos Paulo de Souza   writeback: Fix is...
2394
   * @reason: reason why some writeback work was initiated
3259f8bed   Chris Mason   Add new functions...
2395
2396
2397
2398
2399
   *
   * Start writeback on some inodes on this super_block. No guarantees are made
   * on how many (if any) will be written, and this function does not wait
   * for IO completion of submitted IO.
   */
0e175a183   Curt Wohlgemuth   writeback: Add a ...
2400
  void writeback_inodes_sb(struct super_block *sb, enum wb_reason reason)
3259f8bed   Chris Mason   Add new functions...
2401
  {
0e175a183   Curt Wohlgemuth   writeback: Add a ...
2402
  	return writeback_inodes_sb_nr(sb, get_nr_dirty_pages(), reason);
3259f8bed   Chris Mason   Add new functions...
2403
  }
0e3c9a228   Jens Axboe   Revert "writeback...
2404
  EXPORT_SYMBOL(writeback_inodes_sb);
e913fc825   Jens Axboe   writeback: fix WB...
2405
2406
  
  /**
8264c3214   Rakesh Pandit   writeback: merge ...
2407
   * try_to_writeback_inodes_sb - try to start writeback if none underway
17bd55d03   Eric Sandeen   fs-writeback: Add...
2408
   * @sb: the superblock
8264c3214   Rakesh Pandit   writeback: merge ...
2409
   * @reason: reason why some writeback work was initiated
17bd55d03   Eric Sandeen   fs-writeback: Add...
2410
   *
8264c3214   Rakesh Pandit   writeback: merge ...
2411
   * Invoke __writeback_inodes_sb_nr if no writeback is currently underway.
17bd55d03   Eric Sandeen   fs-writeback: Add...
2412
   */
8264c3214   Rakesh Pandit   writeback: merge ...
2413
  void try_to_writeback_inodes_sb(struct super_block *sb, enum wb_reason reason)
17bd55d03   Eric Sandeen   fs-writeback: Add...
2414
  {
10ee27a06   Miao Xie   vfs: re-implement...
2415
  	if (!down_read_trylock(&sb->s_umount))
8264c3214   Rakesh Pandit   writeback: merge ...
2416
  		return;
10ee27a06   Miao Xie   vfs: re-implement...
2417

8264c3214   Rakesh Pandit   writeback: merge ...
2418
  	__writeback_inodes_sb_nr(sb, get_nr_dirty_pages(), reason, true);
10ee27a06   Miao Xie   vfs: re-implement...
2419
  	up_read(&sb->s_umount);
3259f8bed   Chris Mason   Add new functions...
2420
  }
10ee27a06   Miao Xie   vfs: re-implement...
2421
  EXPORT_SYMBOL(try_to_writeback_inodes_sb);
3259f8bed   Chris Mason   Add new functions...
2422
2423
  
  /**
d8a8559cd   Jens Axboe   writeback: get ri...
2424
   * sync_inodes_sb	-	sync sb inode pages
0dc83bd30   Jan Kara   Revert "writeback...
2425
   * @sb: the superblock
d8a8559cd   Jens Axboe   writeback: get ri...
2426
2427
   *
   * This function writes and waits on any dirty inode belonging to this
0dc83bd30   Jan Kara   Revert "writeback...
2428
   * super_block.
d8a8559cd   Jens Axboe   writeback: get ri...
2429
   */
0dc83bd30   Jan Kara   Revert "writeback...
2430
  void sync_inodes_sb(struct super_block *sb)
d8a8559cd   Jens Axboe   writeback: get ri...
2431
  {
5b9cce4c7   Tejun Heo   writeback: Genera...
2432
2433
  	struct backing_dev_info *bdi = sb->s_bdi;
  	DEFINE_WB_COMPLETION(done, bdi);
83ba7b071   Christoph Hellwig   writeback: simpli...
2434
  	struct wb_writeback_work work = {
3c4d71653   Christoph Hellwig   writeback: queue ...
2435
2436
2437
2438
  		.sb		= sb,
  		.sync_mode	= WB_SYNC_ALL,
  		.nr_pages	= LONG_MAX,
  		.range_cyclic	= 0,
83ba7b071   Christoph Hellwig   writeback: simpli...
2439
  		.done		= &done,
0e175a183   Curt Wohlgemuth   writeback: Add a ...
2440
  		.reason		= WB_REASON_SYNC,
7747bd4bc   Dave Chinner   sync: don't block...
2441
  		.for_sync	= 1,
3c4d71653   Christoph Hellwig   writeback: queue ...
2442
  	};
006a0973e   Tejun Heo   writeback: sync_i...
2443
2444
2445
2446
2447
2448
  	/*
  	 * Can't skip on !bdi_has_dirty() because we should wait for !dirty
  	 * inodes under writeback and I_DIRTY_TIME inodes ignored by
  	 * bdi_has_dirty() need to be written out too.
  	 */
  	if (bdi == &noop_backing_dev_info)
6eedc7015   Jan Kara   vfs: Move noop_ba...
2449
  		return;
cf37e9724   Christoph Hellwig   writeback: enforc...
2450
  	WARN_ON(!rwsem_is_locked(&sb->s_umount));
7fc5854f8   Tejun Heo   writeback: synchr...
2451
2452
  	/* protect against inode wb switch, see inode_switch_wbs_work_fn() */
  	bdi_down_write_wb_switch_rwsem(bdi);
db1253604   Tejun Heo   writeback: make w...
2453
  	bdi_split_work_to_wbs(bdi, &work, false);
5b9cce4c7   Tejun Heo   writeback: Genera...
2454
  	wb_wait_for_completion(&done);
7fc5854f8   Tejun Heo   writeback: synchr...
2455
  	bdi_up_write_wb_switch_rwsem(bdi);
83ba7b071   Christoph Hellwig   writeback: simpli...
2456

b6e51316d   Jens Axboe   writeback: separa...
2457
  	wait_sb_inodes(sb);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2458
  }
d8a8559cd   Jens Axboe   writeback: get ri...
2459
  EXPORT_SYMBOL(sync_inodes_sb);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2460

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2461
  /**
7f04c26d7   Andrea Arcangeli   [PATCH] fix nr_un...
2462
2463
2464
2465
2466
2467
   * write_inode_now	-	write an inode to disk
   * @inode: inode to write to disk
   * @sync: whether the write should be synchronous or not
   *
   * This function commits an inode to disk immediately if it is dirty. This is
   * primarily needed by knfsd.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2468
   *
7f04c26d7   Andrea Arcangeli   [PATCH] fix nr_un...
2469
   * The caller must either have a ref on the inode or must have set I_WILL_FREE.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2470
   */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2471
2472
  int write_inode_now(struct inode *inode, int sync)
  {
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2473
2474
  	struct writeback_control wbc = {
  		.nr_to_write = LONG_MAX,
18914b188   Mike Galbraith   write_inode_now()...
2475
  		.sync_mode = sync ? WB_SYNC_ALL : WB_SYNC_NONE,
111ebb6e6   OGAWA Hirofumi   [PATCH] writeback...
2476
2477
  		.range_start = 0,
  		.range_end = LLONG_MAX,
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2478
2479
2480
  	};
  
  	if (!mapping_cap_writeback_dirty(inode->i_mapping))
49364ce25   Andrew Morton   [PATCH] write_ino...
2481
  		wbc.nr_to_write = 0;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2482
2483
  
  	might_sleep();
aaf255933   Tejun Heo   writeback, cgroup...
2484
  	return writeback_single_inode(inode, &wbc);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2485
2486
2487
2488
2489
2490
2491
2492
2493
2494
2495
2496
2497
2498
2499
2500
  }
  EXPORT_SYMBOL(write_inode_now);
  
  /**
   * sync_inode - write an inode and its pages to disk.
   * @inode: the inode to sync
   * @wbc: controls the writeback mode
   *
   * sync_inode() will write an inode and its pages to disk.  It will also
   * correctly update the inode on its superblock's dirty inode lists and will
   * update inode->i_state.
   *
   * The caller must have a ref on the inode.
   */
  int sync_inode(struct inode *inode, struct writeback_control *wbc)
  {
aaf255933   Tejun Heo   writeback, cgroup...
2501
  	return writeback_single_inode(inode, wbc);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2502
2503
  }
  EXPORT_SYMBOL(sync_inode);
c37650161   Christoph Hellwig   fs: add sync_inod...
2504
2505
  
  /**
c691b9d98   Andrew Morton   sync_inode_metada...
2506
   * sync_inode_metadata - write an inode to disk
c37650161   Christoph Hellwig   fs: add sync_inod...
2507
2508
2509
   * @inode: the inode to sync
   * @wait: wait for I/O to complete.
   *
c691b9d98   Andrew Morton   sync_inode_metada...
2510
   * Write an inode to disk and adjust its dirty state after completion.
c37650161   Christoph Hellwig   fs: add sync_inod...
2511
2512
2513
2514
2515
2516
2517
2518
2519
2520
2521
2522
2523
   *
   * Note: only writes the actual inode, no associated data or other metadata.
   */
  int sync_inode_metadata(struct inode *inode, int wait)
  {
  	struct writeback_control wbc = {
  		.sync_mode = wait ? WB_SYNC_ALL : WB_SYNC_NONE,
  		.nr_to_write = 0, /* metadata-only */
  	};
  
  	return sync_inode(inode, &wbc);
  }
  EXPORT_SYMBOL(sync_inode_metadata);