Blame view

fs/fs-writeback.c 70.4 KB
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1
2
3
4
5
6
7
8
9
10
  /*
   * fs/fs-writeback.c
   *
   * Copyright (C) 2002, Linus Torvalds.
   *
   * Contains all the functions related to writing back and waiting
   * upon dirty inodes against superblocks, and writing back dirty
   * pages against inodes.  ie: data writeback.  Writeout of the
   * inode itself is not handled here.
   *
e1f8e8744   Francois Cami   Remove Andrew Mor...
11
   * 10Apr2002	Andrew Morton
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
12
13
14
15
16
   *		Split out of fs/inode.c
   *		Additions for address_space-based writeback
   */
  
  #include <linux/kernel.h>
630d9c472   Paul Gortmaker   fs: reduce the us...
17
  #include <linux/export.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
18
  #include <linux/spinlock.h>
5a0e3ad6a   Tejun Heo   include cleanup: ...
19
  #include <linux/slab.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
20
21
22
  #include <linux/sched.h>
  #include <linux/fs.h>
  #include <linux/mm.h>
bc31b86a5   Wu Fengguang   writeback: move M...
23
  #include <linux/pagemap.h>
03ba3782e   Jens Axboe   writeback: switch...
24
  #include <linux/kthread.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
25
26
27
  #include <linux/writeback.h>
  #include <linux/blkdev.h>
  #include <linux/backing-dev.h>
455b28646   Dave Chinner   writeback: Initia...
28
  #include <linux/tracepoint.h>
719ea2fbb   Al Viro   new helpers: lock...
29
  #include <linux/device.h>
21c6321fb   Tejun Heo   writeback: reloca...
30
  #include <linux/memcontrol.h>
07f3f05c1   David Howells   [PATCH] BLOCK: Mo...
31
  #include "internal.h"
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
32

d0bceac74   Jens Axboe   writeback: get ri...
33
  /*
bc31b86a5   Wu Fengguang   writeback: move M...
34
35
   * 4MB minimal write chunk size
   */
09cbfeaf1   Kirill A. Shutemov   mm, fs: get rid o...
36
  #define MIN_WRITEBACK_PAGES	(4096UL >> (PAGE_SHIFT - 10))
bc31b86a5   Wu Fengguang   writeback: move M...
37

cc395d7f1   Tejun Heo   writeback: implem...
38
39
40
  struct wb_completion {
  	atomic_t		cnt;
  };
bc31b86a5   Wu Fengguang   writeback: move M...
41
  /*
c4a77a6c7   Jens Axboe   writeback: make w...
42
43
   * Passed into wb_writeback(), essentially a subset of writeback_control
   */
83ba7b071   Christoph Hellwig   writeback: simpli...
44
  struct wb_writeback_work {
c4a77a6c7   Jens Axboe   writeback: make w...
45
46
  	long nr_pages;
  	struct super_block *sb;
0dc83bd30   Jan Kara   Revert "writeback...
47
  	unsigned long *older_than_this;
c4a77a6c7   Jens Axboe   writeback: make w...
48
  	enum writeback_sync_modes sync_mode;
6e6938b6d   Wu Fengguang   writeback: introd...
49
  	unsigned int tagged_writepages:1;
52957fe1c   H Hartley Sweeten   fs-writeback.c: b...
50
51
52
  	unsigned int for_kupdate:1;
  	unsigned int range_cyclic:1;
  	unsigned int for_background:1;
7747bd4bc   Dave Chinner   sync: don't block...
53
  	unsigned int for_sync:1;	/* sync(2) WB_SYNC_ALL writeback */
ac7b19a34   Tejun Heo   writeback: add wb...
54
  	unsigned int auto_free:1;	/* free on completion */
0e175a183   Curt Wohlgemuth   writeback: Add a ...
55
  	enum wb_reason reason;		/* why was writeback initiated? */
c4a77a6c7   Jens Axboe   writeback: make w...
56

8010c3b63   Jens Axboe   writeback: add co...
57
  	struct list_head list;		/* pending work list */
cc395d7f1   Tejun Heo   writeback: implem...
58
  	struct wb_completion *done;	/* set if the caller waits */
03ba3782e   Jens Axboe   writeback: switch...
59
  };
a2f487069   Theodore Ts'o   fs: make sure the...
60
  /*
cc395d7f1   Tejun Heo   writeback: implem...
61
62
63
64
65
66
67
68
69
70
71
72
73
   * If one wants to wait for one or more wb_writeback_works, each work's
   * ->done should be set to a wb_completion defined using the following
   * macro.  Once all work items are issued with wb_queue_work(), the caller
   * can wait for the completion of all using wb_wait_for_completion().  Work
   * items which are waited upon aren't freed automatically on completion.
   */
  #define DEFINE_WB_COMPLETION_ONSTACK(cmpl)				\
  	struct wb_completion cmpl = {					\
  		.cnt		= ATOMIC_INIT(1),			\
  	}
  
  
  /*
a2f487069   Theodore Ts'o   fs: make sure the...
74
75
76
77
78
79
80
81
82
83
   * If an inode is constantly having its pages dirtied, but then the
   * updates stop dirtytime_expire_interval seconds in the past, it's
   * possible for the worst case time between when an inode has its
   * timestamps updated and when they finally get written out to be two
   * dirtytime_expire_intervals.  We set the default to 12 hours (in
   * seconds), which means most of the time inodes will have their
   * timestamps written to disk after 12 hours, but in the worst case a
   * few inodes might not their timestamps updated for 24 hours.
   */
  unsigned int dirtytime_expire_interval = 12 * 60 * 60;
7ccf19a80   Nick Piggin   fs: inode split I...
84
85
  static inline struct inode *wb_inode(struct list_head *head)
  {
c7f540849   Dave Chinner   inode: rename i_w...
86
  	return list_entry(head, struct inode, i_io_list);
7ccf19a80   Nick Piggin   fs: inode split I...
87
  }
15eb77a07   Wu Fengguang   writeback: fix NU...
88
89
90
91
92
93
94
  /*
   * Include the creation of the trace points after defining the
   * wb_writeback_work structure and inline functions so that the definition
   * remains local to this file.
   */
  #define CREATE_TRACE_POINTS
  #include <trace/events/writeback.h>
774016b2d   Steven Whitehouse   GFS2: journal dat...
95
  EXPORT_TRACEPOINT_SYMBOL_GPL(wbc_writepage);
d6c10f1fc   Tejun Heo   writeback: implem...
96
97
98
99
100
101
  static bool wb_io_lists_populated(struct bdi_writeback *wb)
  {
  	if (wb_has_dirty_io(wb)) {
  		return false;
  	} else {
  		set_bit(WB_has_dirty_io, &wb->state);
95a46c65e   Tejun Heo   writeback: make b...
102
  		WARN_ON_ONCE(!wb->avg_write_bandwidth);
766a9d6e6   Tejun Heo   writeback: implem...
103
104
  		atomic_long_add(wb->avg_write_bandwidth,
  				&wb->bdi->tot_write_bandwidth);
d6c10f1fc   Tejun Heo   writeback: implem...
105
106
107
108
109
110
111
  		return true;
  	}
  }
  
  static void wb_io_lists_depopulated(struct bdi_writeback *wb)
  {
  	if (wb_has_dirty_io(wb) && list_empty(&wb->b_dirty) &&
766a9d6e6   Tejun Heo   writeback: implem...
112
  	    list_empty(&wb->b_io) && list_empty(&wb->b_more_io)) {
d6c10f1fc   Tejun Heo   writeback: implem...
113
  		clear_bit(WB_has_dirty_io, &wb->state);
95a46c65e   Tejun Heo   writeback: make b...
114
115
  		WARN_ON_ONCE(atomic_long_sub_return(wb->avg_write_bandwidth,
  					&wb->bdi->tot_write_bandwidth) < 0);
766a9d6e6   Tejun Heo   writeback: implem...
116
  	}
d6c10f1fc   Tejun Heo   writeback: implem...
117
118
119
  }
  
  /**
c7f540849   Dave Chinner   inode: rename i_w...
120
   * inode_io_list_move_locked - move an inode onto a bdi_writeback IO list
d6c10f1fc   Tejun Heo   writeback: implem...
121
122
123
124
   * @inode: inode to be moved
   * @wb: target bdi_writeback
   * @head: one of @wb->b_{dirty|io|more_io}
   *
c7f540849   Dave Chinner   inode: rename i_w...
125
   * Move @inode->i_io_list to @list of @wb and set %WB_has_dirty_io.
d6c10f1fc   Tejun Heo   writeback: implem...
126
127
128
   * Returns %true if @inode is the first occupant of the !dirty_time IO
   * lists; otherwise, %false.
   */
c7f540849   Dave Chinner   inode: rename i_w...
129
  static bool inode_io_list_move_locked(struct inode *inode,
d6c10f1fc   Tejun Heo   writeback: implem...
130
131
132
133
  				      struct bdi_writeback *wb,
  				      struct list_head *head)
  {
  	assert_spin_locked(&wb->list_lock);
c7f540849   Dave Chinner   inode: rename i_w...
134
  	list_move(&inode->i_io_list, head);
d6c10f1fc   Tejun Heo   writeback: implem...
135
136
137
138
139
140
141
142
143
144
  
  	/* dirty_time doesn't count as dirty_io until expiration */
  	if (head != &wb->b_dirty_time)
  		return wb_io_lists_populated(wb);
  
  	wb_io_lists_depopulated(wb);
  	return false;
  }
  
  /**
c7f540849   Dave Chinner   inode: rename i_w...
145
   * inode_io_list_del_locked - remove an inode from its bdi_writeback IO list
d6c10f1fc   Tejun Heo   writeback: implem...
146
147
148
149
150
151
   * @inode: inode to be removed
   * @wb: bdi_writeback @inode is being removed from
   *
   * Remove @inode which may be on one of @wb->b_{dirty|io|more_io} lists and
   * clear %WB_has_dirty_io if all are empty afterwards.
   */
c7f540849   Dave Chinner   inode: rename i_w...
152
  static void inode_io_list_del_locked(struct inode *inode,
d6c10f1fc   Tejun Heo   writeback: implem...
153
154
155
  				     struct bdi_writeback *wb)
  {
  	assert_spin_locked(&wb->list_lock);
c7f540849   Dave Chinner   inode: rename i_w...
156
  	list_del_init(&inode->i_io_list);
d6c10f1fc   Tejun Heo   writeback: implem...
157
158
  	wb_io_lists_depopulated(wb);
  }
f0054bb1e   Tejun Heo   writeback: move b...
159
  static void wb_wakeup(struct bdi_writeback *wb)
5acda9d12   Jan Kara   bdi: avoid oops o...
160
  {
f0054bb1e   Tejun Heo   writeback: move b...
161
162
163
164
  	spin_lock_bh(&wb->work_lock);
  	if (test_bit(WB_registered, &wb->state))
  		mod_delayed_work(bdi_wq, &wb->dwork, 0);
  	spin_unlock_bh(&wb->work_lock);
5acda9d12   Jan Kara   bdi: avoid oops o...
165
  }
f0054bb1e   Tejun Heo   writeback: move b...
166
167
  static void wb_queue_work(struct bdi_writeback *wb,
  			  struct wb_writeback_work *work)
6585027a5   Jan Kara   writeback: integr...
168
  {
5634cc2aa   Tejun Heo   writeback: update...
169
  	trace_writeback_queue(wb, work);
6585027a5   Jan Kara   writeback: integr...
170

f0054bb1e   Tejun Heo   writeback: move b...
171
  	spin_lock_bh(&wb->work_lock);
8a1270cda   Tejun Heo   writeback: remove...
172
  	if (!test_bit(WB_registered, &wb->state))
5acda9d12   Jan Kara   bdi: avoid oops o...
173
  		goto out_unlock;
cc395d7f1   Tejun Heo   writeback: implem...
174
175
  	if (work->done)
  		atomic_inc(&work->done->cnt);
f0054bb1e   Tejun Heo   writeback: move b...
176
177
  	list_add_tail(&work->list, &wb->work_list);
  	mod_delayed_work(bdi_wq, &wb->dwork, 0);
5acda9d12   Jan Kara   bdi: avoid oops o...
178
  out_unlock:
f0054bb1e   Tejun Heo   writeback: move b...
179
  	spin_unlock_bh(&wb->work_lock);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
180
  }
cc395d7f1   Tejun Heo   writeback: implem...
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
  /**
   * wb_wait_for_completion - wait for completion of bdi_writeback_works
   * @bdi: bdi work items were issued to
   * @done: target wb_completion
   *
   * Wait for one or more work items issued to @bdi with their ->done field
   * set to @done, which should have been defined with
   * DEFINE_WB_COMPLETION_ONSTACK().  This function returns after all such
   * work items are completed.  Work items which are waited upon aren't freed
   * automatically on completion.
   */
  static void wb_wait_for_completion(struct backing_dev_info *bdi,
  				   struct wb_completion *done)
  {
  	atomic_dec(&done->cnt);		/* put down the initial count */
  	wait_event(bdi->wb_waitq, !atomic_read(&done->cnt));
  }
703c27088   Tejun Heo   writeback: implem...
198
  #ifdef CONFIG_CGROUP_WRITEBACK
2a8149081   Tejun Heo   writeback: implem...
199
200
201
202
203
204
205
206
207
208
209
210
211
  /* parameters for foreign inode detection, see wb_detach_inode() */
  #define WB_FRN_TIME_SHIFT	13	/* 1s = 2^13, upto 8 secs w/ 16bit */
  #define WB_FRN_TIME_AVG_SHIFT	3	/* avg = avg * 7/8 + new * 1/8 */
  #define WB_FRN_TIME_CUT_DIV	2	/* ignore rounds < avg / 2 */
  #define WB_FRN_TIME_PERIOD	(2 * (1 << WB_FRN_TIME_SHIFT))	/* 2s */
  
  #define WB_FRN_HIST_SLOTS	16	/* inode->i_wb_frn_history is 16bit */
  #define WB_FRN_HIST_UNIT	(WB_FRN_TIME_PERIOD / WB_FRN_HIST_SLOTS)
  					/* each slot's duration is 2s / 16 */
  #define WB_FRN_HIST_THR_SLOTS	(WB_FRN_HIST_SLOTS / 2)
  					/* if foreign slots >= 8, switch */
  #define WB_FRN_HIST_MAX_SLOTS	(WB_FRN_HIST_THR_SLOTS / 2 + 1)
  					/* one round can affect upto 5 slots */
a1a0e23e4   Tejun Heo   writeback: flush ...
212
213
  static atomic_t isw_nr_in_flight = ATOMIC_INIT(0);
  static struct workqueue_struct *isw_wq;
21c6321fb   Tejun Heo   writeback: reloca...
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
  void __inode_attach_wb(struct inode *inode, struct page *page)
  {
  	struct backing_dev_info *bdi = inode_to_bdi(inode);
  	struct bdi_writeback *wb = NULL;
  
  	if (inode_cgwb_enabled(inode)) {
  		struct cgroup_subsys_state *memcg_css;
  
  		if (page) {
  			memcg_css = mem_cgroup_css_from_page(page);
  			wb = wb_get_create(bdi, memcg_css, GFP_ATOMIC);
  		} else {
  			/* must pin memcg_css, see wb_get_create() */
  			memcg_css = task_get_css(current, memory_cgrp_id);
  			wb = wb_get_create(bdi, memcg_css, GFP_ATOMIC);
  			css_put(memcg_css);
  		}
  	}
  
  	if (!wb)
  		wb = &bdi->wb;
  
  	/*
  	 * There may be multiple instances of this function racing to
  	 * update the same inode.  Use cmpxchg() to tell the winner.
  	 */
  	if (unlikely(cmpxchg(&inode->i_wb, NULL, wb)))
  		wb_put(wb);
  }
703c27088   Tejun Heo   writeback: implem...
243
  /**
87e1d789b   Tejun Heo   writeback: implem...
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
   * locked_inode_to_wb_and_lock_list - determine a locked inode's wb and lock it
   * @inode: inode of interest with i_lock held
   *
   * Returns @inode's wb with its list_lock held.  @inode->i_lock must be
   * held on entry and is released on return.  The returned wb is guaranteed
   * to stay @inode's associated wb until its list_lock is released.
   */
  static struct bdi_writeback *
  locked_inode_to_wb_and_lock_list(struct inode *inode)
  	__releases(&inode->i_lock)
  	__acquires(&wb->list_lock)
  {
  	while (true) {
  		struct bdi_writeback *wb = inode_to_wb(inode);
  
  		/*
  		 * inode_to_wb() association is protected by both
  		 * @inode->i_lock and @wb->list_lock but list_lock nests
  		 * outside i_lock.  Drop i_lock and verify that the
  		 * association hasn't changed after acquiring list_lock.
  		 */
  		wb_get(wb);
  		spin_unlock(&inode->i_lock);
  		spin_lock(&wb->list_lock);
87e1d789b   Tejun Heo   writeback: implem...
268

aaa2cacf8   Tejun Heo   writeback: add lo...
269
  		/* i_wb may have changed inbetween, can't use inode_to_wb() */
614a4e377   Tejun Heo   writeback, cgroup...
270
271
272
273
  		if (likely(wb == inode->i_wb)) {
  			wb_put(wb);	/* @inode already has ref */
  			return wb;
  		}
87e1d789b   Tejun Heo   writeback: implem...
274
275
  
  		spin_unlock(&wb->list_lock);
614a4e377   Tejun Heo   writeback, cgroup...
276
  		wb_put(wb);
87e1d789b   Tejun Heo   writeback: implem...
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
  		cpu_relax();
  		spin_lock(&inode->i_lock);
  	}
  }
  
  /**
   * inode_to_wb_and_lock_list - determine an inode's wb and lock it
   * @inode: inode of interest
   *
   * Same as locked_inode_to_wb_and_lock_list() but @inode->i_lock isn't held
   * on entry.
   */
  static struct bdi_writeback *inode_to_wb_and_lock_list(struct inode *inode)
  	__acquires(&wb->list_lock)
  {
  	spin_lock(&inode->i_lock);
  	return locked_inode_to_wb_and_lock_list(inode);
  }
682aa8e1a   Tejun Heo   writeback: implem...
295
296
297
298
299
300
301
302
303
304
305
306
307
  struct inode_switch_wbs_context {
  	struct inode		*inode;
  	struct bdi_writeback	*new_wb;
  
  	struct rcu_head		rcu_head;
  	struct work_struct	work;
  };
  
  static void inode_switch_wbs_work_fn(struct work_struct *work)
  {
  	struct inode_switch_wbs_context *isw =
  		container_of(work, struct inode_switch_wbs_context, work);
  	struct inode *inode = isw->inode;
d10c80955   Tejun Heo   writeback: implem...
308
309
  	struct address_space *mapping = inode->i_mapping;
  	struct bdi_writeback *old_wb = inode->i_wb;
682aa8e1a   Tejun Heo   writeback: implem...
310
  	struct bdi_writeback *new_wb = isw->new_wb;
d10c80955   Tejun Heo   writeback: implem...
311
312
313
  	struct radix_tree_iter iter;
  	bool switched = false;
  	void **slot;
682aa8e1a   Tejun Heo   writeback: implem...
314
315
316
317
318
319
  
  	/*
  	 * By the time control reaches here, RCU grace period has passed
  	 * since I_WB_SWITCH assertion and all wb stat update transactions
  	 * between unlocked_inode_to_wb_begin/end() are guaranteed to be
  	 * synchronizing against mapping->tree_lock.
d10c80955   Tejun Heo   writeback: implem...
320
321
322
323
  	 *
  	 * Grabbing old_wb->list_lock, inode->i_lock and mapping->tree_lock
  	 * gives us exclusion against all wb related operations on @inode
  	 * including IO list manipulations and stat updates.
682aa8e1a   Tejun Heo   writeback: implem...
324
  	 */
d10c80955   Tejun Heo   writeback: implem...
325
326
327
328
329
330
331
  	if (old_wb < new_wb) {
  		spin_lock(&old_wb->list_lock);
  		spin_lock_nested(&new_wb->list_lock, SINGLE_DEPTH_NESTING);
  	} else {
  		spin_lock(&new_wb->list_lock);
  		spin_lock_nested(&old_wb->list_lock, SINGLE_DEPTH_NESTING);
  	}
682aa8e1a   Tejun Heo   writeback: implem...
332
  	spin_lock(&inode->i_lock);
d10c80955   Tejun Heo   writeback: implem...
333
334
335
336
  	spin_lock_irq(&mapping->tree_lock);
  
  	/*
  	 * Once I_FREEING is visible under i_lock, the eviction path owns
c7f540849   Dave Chinner   inode: rename i_w...
337
  	 * the inode and we shouldn't modify ->i_io_list.
d10c80955   Tejun Heo   writeback: implem...
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
  	 */
  	if (unlikely(inode->i_state & I_FREEING))
  		goto skip_switch;
  
  	/*
  	 * Count and transfer stats.  Note that PAGECACHE_TAG_DIRTY points
  	 * to possibly dirty pages while PAGECACHE_TAG_WRITEBACK points to
  	 * pages actually under underwriteback.
  	 */
  	radix_tree_for_each_tagged(slot, &mapping->page_tree, &iter, 0,
  				   PAGECACHE_TAG_DIRTY) {
  		struct page *page = radix_tree_deref_slot_protected(slot,
  							&mapping->tree_lock);
  		if (likely(page) && PageDirty(page)) {
  			__dec_wb_stat(old_wb, WB_RECLAIMABLE);
  			__inc_wb_stat(new_wb, WB_RECLAIMABLE);
  		}
  	}
  
  	radix_tree_for_each_tagged(slot, &mapping->page_tree, &iter, 0,
  				   PAGECACHE_TAG_WRITEBACK) {
  		struct page *page = radix_tree_deref_slot_protected(slot,
  							&mapping->tree_lock);
  		if (likely(page)) {
  			WARN_ON_ONCE(!PageWriteback(page));
  			__dec_wb_stat(old_wb, WB_WRITEBACK);
  			__inc_wb_stat(new_wb, WB_WRITEBACK);
  		}
  	}
  
  	wb_get(new_wb);
  
  	/*
  	 * Transfer to @new_wb's IO list if necessary.  The specific list
  	 * @inode was on is ignored and the inode is put on ->b_dirty which
  	 * is always correct including from ->b_dirty_time.  The transfer
  	 * preserves @inode->dirtied_when ordering.
  	 */
c7f540849   Dave Chinner   inode: rename i_w...
376
  	if (!list_empty(&inode->i_io_list)) {
d10c80955   Tejun Heo   writeback: implem...
377
  		struct inode *pos;
c7f540849   Dave Chinner   inode: rename i_w...
378
  		inode_io_list_del_locked(inode, old_wb);
d10c80955   Tejun Heo   writeback: implem...
379
  		inode->i_wb = new_wb;
c7f540849   Dave Chinner   inode: rename i_w...
380
  		list_for_each_entry(pos, &new_wb->b_dirty, i_io_list)
d10c80955   Tejun Heo   writeback: implem...
381
382
383
  			if (time_after_eq(inode->dirtied_when,
  					  pos->dirtied_when))
  				break;
c7f540849   Dave Chinner   inode: rename i_w...
384
  		inode_io_list_move_locked(inode, new_wb, pos->i_io_list.prev);
d10c80955   Tejun Heo   writeback: implem...
385
386
387
  	} else {
  		inode->i_wb = new_wb;
  	}
682aa8e1a   Tejun Heo   writeback: implem...
388

d10c80955   Tejun Heo   writeback: implem...
389
  	/* ->i_wb_frn updates may race wbc_detach_inode() but doesn't matter */
682aa8e1a   Tejun Heo   writeback: implem...
390
391
392
  	inode->i_wb_frn_winner = 0;
  	inode->i_wb_frn_avg_time = 0;
  	inode->i_wb_frn_history = 0;
d10c80955   Tejun Heo   writeback: implem...
393
394
  	switched = true;
  skip_switch:
682aa8e1a   Tejun Heo   writeback: implem...
395
396
397
398
399
  	/*
  	 * Paired with load_acquire in unlocked_inode_to_wb_begin() and
  	 * ensures that the new wb is visible if they see !I_WB_SWITCH.
  	 */
  	smp_store_release(&inode->i_state, inode->i_state & ~I_WB_SWITCH);
d10c80955   Tejun Heo   writeback: implem...
400
  	spin_unlock_irq(&mapping->tree_lock);
682aa8e1a   Tejun Heo   writeback: implem...
401
  	spin_unlock(&inode->i_lock);
d10c80955   Tejun Heo   writeback: implem...
402
403
  	spin_unlock(&new_wb->list_lock);
  	spin_unlock(&old_wb->list_lock);
682aa8e1a   Tejun Heo   writeback: implem...
404

d10c80955   Tejun Heo   writeback: implem...
405
406
407
408
  	if (switched) {
  		wb_wakeup(new_wb);
  		wb_put(old_wb);
  	}
682aa8e1a   Tejun Heo   writeback: implem...
409
  	wb_put(new_wb);
d10c80955   Tejun Heo   writeback: implem...
410
411
  
  	iput(inode);
682aa8e1a   Tejun Heo   writeback: implem...
412
  	kfree(isw);
a1a0e23e4   Tejun Heo   writeback: flush ...
413
414
  
  	atomic_dec(&isw_nr_in_flight);
682aa8e1a   Tejun Heo   writeback: implem...
415
416
417
418
419
420
421
422
423
  }
  
  static void inode_switch_wbs_rcu_fn(struct rcu_head *rcu_head)
  {
  	struct inode_switch_wbs_context *isw = container_of(rcu_head,
  				struct inode_switch_wbs_context, rcu_head);
  
  	/* needs to grab bh-unsafe locks, bounce to work item */
  	INIT_WORK(&isw->work, inode_switch_wbs_work_fn);
a1a0e23e4   Tejun Heo   writeback: flush ...
424
  	queue_work(isw_wq, &isw->work);
682aa8e1a   Tejun Heo   writeback: implem...
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
  }
  
  /**
   * inode_switch_wbs - change the wb association of an inode
   * @inode: target inode
   * @new_wb_id: ID of the new wb
   *
   * Switch @inode's wb association to the wb identified by @new_wb_id.  The
   * switching is performed asynchronously and may fail silently.
   */
  static void inode_switch_wbs(struct inode *inode, int new_wb_id)
  {
  	struct backing_dev_info *bdi = inode_to_bdi(inode);
  	struct cgroup_subsys_state *memcg_css;
  	struct inode_switch_wbs_context *isw;
  
  	/* noop if seems to be already in progress */
  	if (inode->i_state & I_WB_SWITCH)
  		return;
  
  	isw = kzalloc(sizeof(*isw), GFP_ATOMIC);
  	if (!isw)
  		return;
  
  	/* find and pin the new wb */
  	rcu_read_lock();
  	memcg_css = css_from_id(new_wb_id, &memory_cgrp_subsys);
  	if (memcg_css)
  		isw->new_wb = wb_get_create(bdi, memcg_css, GFP_ATOMIC);
  	rcu_read_unlock();
  	if (!isw->new_wb)
  		goto out_free;
  
  	/* while holding I_WB_SWITCH, no one else can update the association */
  	spin_lock(&inode->i_lock);
a1a0e23e4   Tejun Heo   writeback: flush ...
460
461
462
463
464
465
  	if (!(inode->i_sb->s_flags & MS_ACTIVE) ||
  	    inode->i_state & (I_WB_SWITCH | I_FREEING) ||
  	    inode_to_wb(inode) == isw->new_wb) {
  		spin_unlock(&inode->i_lock);
  		goto out_free;
  	}
682aa8e1a   Tejun Heo   writeback: implem...
466
  	inode->i_state |= I_WB_SWITCH;
745249555   Tahsin Erdogan   writeback: inode ...
467
  	__iget(inode);
682aa8e1a   Tejun Heo   writeback: implem...
468
  	spin_unlock(&inode->i_lock);
682aa8e1a   Tejun Heo   writeback: implem...
469
  	isw->inode = inode;
a1a0e23e4   Tejun Heo   writeback: flush ...
470
  	atomic_inc(&isw_nr_in_flight);
682aa8e1a   Tejun Heo   writeback: implem...
471
472
473
474
475
476
477
478
479
480
481
482
483
484
  	/*
  	 * In addition to synchronizing among switchers, I_WB_SWITCH tells
  	 * the RCU protected stat update paths to grab the mapping's
  	 * tree_lock so that stat transfer can synchronize against them.
  	 * Let's continue after I_WB_SWITCH is guaranteed to be visible.
  	 */
  	call_rcu(&isw->rcu_head, inode_switch_wbs_rcu_fn);
  	return;
  
  out_free:
  	if (isw->new_wb)
  		wb_put(isw->new_wb);
  	kfree(isw);
  }
87e1d789b   Tejun Heo   writeback: implem...
485
  /**
b16b1deb5   Tejun Heo   writeback: make w...
486
487
488
489
490
491
492
493
494
495
496
497
   * wbc_attach_and_unlock_inode - associate wbc with target inode and unlock it
   * @wbc: writeback_control of interest
   * @inode: target inode
   *
   * @inode is locked and about to be written back under the control of @wbc.
   * Record @inode's writeback context into @wbc and unlock the i_lock.  On
   * writeback completion, wbc_detach_inode() should be called.  This is used
   * to track the cgroup writeback context.
   */
  void wbc_attach_and_unlock_inode(struct writeback_control *wbc,
  				 struct inode *inode)
  {
dd73e4b7d   Tejun Heo   writeback: do for...
498
499
500
501
  	if (!inode_cgwb_enabled(inode)) {
  		spin_unlock(&inode->i_lock);
  		return;
  	}
b16b1deb5   Tejun Heo   writeback: make w...
502
  	wbc->wb = inode_to_wb(inode);
2a8149081   Tejun Heo   writeback: implem...
503
504
505
506
507
508
509
510
  	wbc->inode = inode;
  
  	wbc->wb_id = wbc->wb->memcg_css->id;
  	wbc->wb_lcand_id = inode->i_wb_frn_winner;
  	wbc->wb_tcand_id = 0;
  	wbc->wb_bytes = 0;
  	wbc->wb_lcand_bytes = 0;
  	wbc->wb_tcand_bytes = 0;
b16b1deb5   Tejun Heo   writeback: make w...
511
512
  	wb_get(wbc->wb);
  	spin_unlock(&inode->i_lock);
e8a7abf5a   Tejun Heo   writeback: disass...
513
514
515
516
517
518
519
  
  	/*
  	 * A dying wb indicates that the memcg-blkcg mapping has changed
  	 * and a new wb is already serving the memcg.  Switch immediately.
  	 */
  	if (unlikely(wb_dying(wbc->wb)))
  		inode_switch_wbs(inode, wbc->wb_id);
b16b1deb5   Tejun Heo   writeback: make w...
520
521
522
  }
  
  /**
2a8149081   Tejun Heo   writeback: implem...
523
524
   * wbc_detach_inode - disassociate wbc from inode and perform foreign detection
   * @wbc: writeback_control of the just finished writeback
b16b1deb5   Tejun Heo   writeback: make w...
525
526
527
   *
   * To be called after a writeback attempt of an inode finishes and undoes
   * wbc_attach_and_unlock_inode().  Can be called under any context.
2a8149081   Tejun Heo   writeback: implem...
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
   *
   * As concurrent write sharing of an inode is expected to be very rare and
   * memcg only tracks page ownership on first-use basis severely confining
   * the usefulness of such sharing, cgroup writeback tracks ownership
   * per-inode.  While the support for concurrent write sharing of an inode
   * is deemed unnecessary, an inode being written to by different cgroups at
   * different points in time is a lot more common, and, more importantly,
   * charging only by first-use can too readily lead to grossly incorrect
   * behaviors (single foreign page can lead to gigabytes of writeback to be
   * incorrectly attributed).
   *
   * To resolve this issue, cgroup writeback detects the majority dirtier of
   * an inode and transfers the ownership to it.  To avoid unnnecessary
   * oscillation, the detection mechanism keeps track of history and gives
   * out the switch verdict only if the foreign usage pattern is stable over
   * a certain amount of time and/or writeback attempts.
   *
   * On each writeback attempt, @wbc tries to detect the majority writer
   * using Boyer-Moore majority vote algorithm.  In addition to the byte
   * count from the majority voting, it also counts the bytes written for the
   * current wb and the last round's winner wb (max of last round's current
   * wb, the winner from two rounds ago, and the last round's majority
   * candidate).  Keeping track of the historical winner helps the algorithm
   * to semi-reliably detect the most active writer even when it's not the
   * absolute majority.
   *
   * Once the winner of the round is determined, whether the winner is
   * foreign or not and how much IO time the round consumed is recorded in
   * inode->i_wb_frn_history.  If the amount of recorded foreign IO time is
   * over a certain threshold, the switch verdict is given.
b16b1deb5   Tejun Heo   writeback: make w...
558
559
560
   */
  void wbc_detach_inode(struct writeback_control *wbc)
  {
2a8149081   Tejun Heo   writeback: implem...
561
562
  	struct bdi_writeback *wb = wbc->wb;
  	struct inode *inode = wbc->inode;
dd73e4b7d   Tejun Heo   writeback: do for...
563
564
  	unsigned long avg_time, max_bytes, max_time;
  	u16 history;
2a8149081   Tejun Heo   writeback: implem...
565
  	int max_id;
dd73e4b7d   Tejun Heo   writeback: do for...
566
567
568
569
570
  	if (!wb)
  		return;
  
  	history = inode->i_wb_frn_history;
  	avg_time = inode->i_wb_frn_avg_time;
2a8149081   Tejun Heo   writeback: implem...
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
  	/* pick the winner of this round */
  	if (wbc->wb_bytes >= wbc->wb_lcand_bytes &&
  	    wbc->wb_bytes >= wbc->wb_tcand_bytes) {
  		max_id = wbc->wb_id;
  		max_bytes = wbc->wb_bytes;
  	} else if (wbc->wb_lcand_bytes >= wbc->wb_tcand_bytes) {
  		max_id = wbc->wb_lcand_id;
  		max_bytes = wbc->wb_lcand_bytes;
  	} else {
  		max_id = wbc->wb_tcand_id;
  		max_bytes = wbc->wb_tcand_bytes;
  	}
  
  	/*
  	 * Calculate the amount of IO time the winner consumed and fold it
  	 * into the running average kept per inode.  If the consumed IO
  	 * time is lower than avag / WB_FRN_TIME_CUT_DIV, ignore it for
  	 * deciding whether to switch or not.  This is to prevent one-off
  	 * small dirtiers from skewing the verdict.
  	 */
  	max_time = DIV_ROUND_UP((max_bytes >> PAGE_SHIFT) << WB_FRN_TIME_SHIFT,
  				wb->avg_write_bandwidth);
  	if (avg_time)
  		avg_time += (max_time >> WB_FRN_TIME_AVG_SHIFT) -
  			    (avg_time >> WB_FRN_TIME_AVG_SHIFT);
  	else
  		avg_time = max_time;	/* immediate catch up on first run */
  
  	if (max_time >= avg_time / WB_FRN_TIME_CUT_DIV) {
  		int slots;
  
  		/*
  		 * The switch verdict is reached if foreign wb's consume
  		 * more than a certain proportion of IO time in a
  		 * WB_FRN_TIME_PERIOD.  This is loosely tracked by 16 slot
  		 * history mask where each bit represents one sixteenth of
  		 * the period.  Determine the number of slots to shift into
  		 * history from @max_time.
  		 */
  		slots = min(DIV_ROUND_UP(max_time, WB_FRN_HIST_UNIT),
  			    (unsigned long)WB_FRN_HIST_MAX_SLOTS);
  		history <<= slots;
  		if (wbc->wb_id != max_id)
  			history |= (1U << slots) - 1;
  
  		/*
  		 * Switch if the current wb isn't the consistent winner.
  		 * If there are multiple closely competing dirtiers, the
  		 * inode may switch across them repeatedly over time, which
  		 * is okay.  The main goal is avoiding keeping an inode on
  		 * the wrong wb for an extended period of time.
  		 */
682aa8e1a   Tejun Heo   writeback: implem...
623
624
  		if (hweight32(history) > WB_FRN_HIST_THR_SLOTS)
  			inode_switch_wbs(inode, max_id);
2a8149081   Tejun Heo   writeback: implem...
625
626
627
628
629
630
631
632
633
  	}
  
  	/*
  	 * Multiple instances of this function may race to update the
  	 * following fields but we don't mind occassional inaccuracies.
  	 */
  	inode->i_wb_frn_winner = max_id;
  	inode->i_wb_frn_avg_time = min(avg_time, (unsigned long)U16_MAX);
  	inode->i_wb_frn_history = history;
b16b1deb5   Tejun Heo   writeback: make w...
634
635
636
637
638
  	wb_put(wbc->wb);
  	wbc->wb = NULL;
  }
  
  /**
2a8149081   Tejun Heo   writeback: implem...
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
   * wbc_account_io - account IO issued during writeback
   * @wbc: writeback_control of the writeback in progress
   * @page: page being written out
   * @bytes: number of bytes being written out
   *
   * @bytes from @page are about to written out during the writeback
   * controlled by @wbc.  Keep the book for foreign inode detection.  See
   * wbc_detach_inode().
   */
  void wbc_account_io(struct writeback_control *wbc, struct page *page,
  		    size_t bytes)
  {
  	int id;
  
  	/*
  	 * pageout() path doesn't attach @wbc to the inode being written
  	 * out.  This is intentional as we don't want the function to block
  	 * behind a slow cgroup.  Ultimately, we want pageout() to kick off
  	 * regular writeback instead of writing things out itself.
  	 */
  	if (!wbc->wb)
  		return;
2a8149081   Tejun Heo   writeback: implem...
661
  	id = mem_cgroup_css_from_page(page)->id;
2a8149081   Tejun Heo   writeback: implem...
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
  
  	if (id == wbc->wb_id) {
  		wbc->wb_bytes += bytes;
  		return;
  	}
  
  	if (id == wbc->wb_lcand_id)
  		wbc->wb_lcand_bytes += bytes;
  
  	/* Boyer-Moore majority vote algorithm */
  	if (!wbc->wb_tcand_bytes)
  		wbc->wb_tcand_id = id;
  	if (id == wbc->wb_tcand_id)
  		wbc->wb_tcand_bytes += bytes;
  	else
  		wbc->wb_tcand_bytes -= min(bytes, wbc->wb_tcand_bytes);
  }
5aa2a96b3   Tejun Heo   block: export bio...
679
  EXPORT_SYMBOL_GPL(wbc_account_io);
2a8149081   Tejun Heo   writeback: implem...
680
681
  
  /**
703c27088   Tejun Heo   writeback: implem...
682
   * inode_congested - test whether an inode is congested
60292bcc1   Tejun Heo   writeback: explai...
683
   * @inode: inode to test for congestion (may be NULL)
703c27088   Tejun Heo   writeback: implem...
684
685
686
687
688
689
690
691
692
   * @cong_bits: mask of WB_[a]sync_congested bits to test
   *
   * Tests whether @inode is congested.  @cong_bits is the mask of congestion
   * bits to test and the return value is the mask of set bits.
   *
   * If cgroup writeback is enabled for @inode, the congestion state is
   * determined by whether the cgwb (cgroup bdi_writeback) for the blkcg
   * associated with @inode is congested; otherwise, the root wb's congestion
   * state is used.
60292bcc1   Tejun Heo   writeback: explai...
693
694
695
   *
   * @inode is allowed to be NULL as this function is often called on
   * mapping->host which is NULL for the swapper space.
703c27088   Tejun Heo   writeback: implem...
696
697
698
   */
  int inode_congested(struct inode *inode, int cong_bits)
  {
5cb8b8241   Tejun Heo   writeback: use un...
699
700
701
702
  	/*
  	 * Once set, ->i_wb never becomes NULL while the inode is alive.
  	 * Start transaction iff ->i_wb is visible.
  	 */
aaa2cacf8   Tejun Heo   writeback: add lo...
703
  	if (inode && inode_to_wb_is_valid(inode)) {
5cb8b8241   Tejun Heo   writeback: use un...
704
705
706
707
708
709
710
  		struct bdi_writeback *wb;
  		bool locked, congested;
  
  		wb = unlocked_inode_to_wb_begin(inode, &locked);
  		congested = wb_congested(wb, cong_bits);
  		unlocked_inode_to_wb_end(inode, locked);
  		return congested;
703c27088   Tejun Heo   writeback: implem...
711
712
713
714
715
  	}
  
  	return wb_congested(&inode_to_bdi(inode)->wb, cong_bits);
  }
  EXPORT_SYMBOL_GPL(inode_congested);
f2b651216   Tejun Heo   writeback: make w...
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
  /**
   * wb_split_bdi_pages - split nr_pages to write according to bandwidth
   * @wb: target bdi_writeback to split @nr_pages to
   * @nr_pages: number of pages to write for the whole bdi
   *
   * Split @wb's portion of @nr_pages according to @wb's write bandwidth in
   * relation to the total write bandwidth of all wb's w/ dirty inodes on
   * @wb->bdi.
   */
  static long wb_split_bdi_pages(struct bdi_writeback *wb, long nr_pages)
  {
  	unsigned long this_bw = wb->avg_write_bandwidth;
  	unsigned long tot_bw = atomic_long_read(&wb->bdi->tot_write_bandwidth);
  
  	if (nr_pages == LONG_MAX)
  		return LONG_MAX;
  
  	/*
  	 * This may be called on clean wb's and proportional distribution
  	 * may not make sense, just use the original @nr_pages in those
  	 * cases.  In general, we wanna err on the side of writing more.
  	 */
  	if (!tot_bw || this_bw >= tot_bw)
  		return nr_pages;
  	else
  		return DIV_ROUND_UP_ULL((u64)nr_pages * this_bw, tot_bw);
  }
db1253604   Tejun Heo   writeback: make w...
743
  /**
db1253604   Tejun Heo   writeback: make w...
744
745
746
747
748
749
750
751
752
753
754
755
756
757
   * bdi_split_work_to_wbs - split a wb_writeback_work to all wb's of a bdi
   * @bdi: target backing_dev_info
   * @base_work: wb_writeback_work to issue
   * @skip_if_busy: skip wb's which already have writeback in progress
   *
   * Split and issue @base_work to all wb's (bdi_writeback's) of @bdi which
   * have dirty inodes.  If @base_work->nr_page isn't %LONG_MAX, it's
   * distributed to the busy wbs according to each wb's proportion in the
   * total active write bandwidth of @bdi.
   */
  static void bdi_split_work_to_wbs(struct backing_dev_info *bdi,
  				  struct wb_writeback_work *base_work,
  				  bool skip_if_busy)
  {
b817525a4   Tejun Heo   writeback: bdi_wr...
758
  	struct bdi_writeback *last_wb = NULL;
b33e18f61   Tejun Heo   fs/writeback, rcu...
759
760
  	struct bdi_writeback *wb = list_entry(&bdi->wb_list,
  					      struct bdi_writeback, bdi_node);
db1253604   Tejun Heo   writeback: make w...
761
762
  
  	might_sleep();
db1253604   Tejun Heo   writeback: make w...
763
764
  restart:
  	rcu_read_lock();
b817525a4   Tejun Heo   writeback: bdi_wr...
765
  	list_for_each_entry_continue_rcu(wb, &bdi->wb_list, bdi_node) {
8a1270cda   Tejun Heo   writeback: remove...
766
767
768
769
  		DEFINE_WB_COMPLETION_ONSTACK(fallback_work_done);
  		struct wb_writeback_work fallback_work;
  		struct wb_writeback_work *work;
  		long nr_pages;
b817525a4   Tejun Heo   writeback: bdi_wr...
770
771
772
773
  		if (last_wb) {
  			wb_put(last_wb);
  			last_wb = NULL;
  		}
006a0973e   Tejun Heo   writeback: sync_i...
774
775
776
777
778
779
  		/* SYNC_ALL writes out I_DIRTY_TIME too */
  		if (!wb_has_dirty_io(wb) &&
  		    (base_work->sync_mode == WB_SYNC_NONE ||
  		     list_empty(&wb->b_dirty_time)))
  			continue;
  		if (skip_if_busy && writeback_in_progress(wb))
db1253604   Tejun Heo   writeback: make w...
780
  			continue;
8a1270cda   Tejun Heo   writeback: remove...
781
782
783
784
785
786
787
788
789
  		nr_pages = wb_split_bdi_pages(wb, base_work->nr_pages);
  
  		work = kmalloc(sizeof(*work), GFP_ATOMIC);
  		if (work) {
  			*work = *base_work;
  			work->nr_pages = nr_pages;
  			work->auto_free = 1;
  			wb_queue_work(wb, work);
  			continue;
db1253604   Tejun Heo   writeback: make w...
790
  		}
8a1270cda   Tejun Heo   writeback: remove...
791
792
793
794
795
796
797
798
799
  
  		/* alloc failed, execute synchronously using on-stack fallback */
  		work = &fallback_work;
  		*work = *base_work;
  		work->nr_pages = nr_pages;
  		work->auto_free = 0;
  		work->done = &fallback_work_done;
  
  		wb_queue_work(wb, work);
b817525a4   Tejun Heo   writeback: bdi_wr...
800
801
802
803
804
805
806
  		/*
  		 * Pin @wb so that it stays on @bdi->wb_list.  This allows
  		 * continuing iteration from @wb after dropping and
  		 * regrabbing rcu read lock.
  		 */
  		wb_get(wb);
  		last_wb = wb;
8a1270cda   Tejun Heo   writeback: remove...
807
808
809
  		rcu_read_unlock();
  		wb_wait_for_completion(bdi, &fallback_work_done);
  		goto restart;
db1253604   Tejun Heo   writeback: make w...
810
811
  	}
  	rcu_read_unlock();
b817525a4   Tejun Heo   writeback: bdi_wr...
812
813
814
  
  	if (last_wb)
  		wb_put(last_wb);
db1253604   Tejun Heo   writeback: make w...
815
  }
a1a0e23e4   Tejun Heo   writeback: flush ...
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
  /**
   * cgroup_writeback_umount - flush inode wb switches for umount
   *
   * This function is called when a super_block is about to be destroyed and
   * flushes in-flight inode wb switches.  An inode wb switch goes through
   * RCU and then workqueue, so the two need to be flushed in order to ensure
   * that all previously scheduled switches are finished.  As wb switches are
   * rare occurrences and synchronize_rcu() can take a while, perform
   * flushing iff wb switches are in flight.
   */
  void cgroup_writeback_umount(void)
  {
  	if (atomic_read(&isw_nr_in_flight)) {
  		synchronize_rcu();
  		flush_workqueue(isw_wq);
  	}
  }
  
  static int __init cgroup_writeback_init(void)
  {
  	isw_wq = alloc_workqueue("inode_switch_wbs", 0, 0);
  	if (!isw_wq)
  		return -ENOMEM;
  	return 0;
  }
  fs_initcall(cgroup_writeback_init);
f2b651216   Tejun Heo   writeback: make w...
842
  #else	/* CONFIG_CGROUP_WRITEBACK */
87e1d789b   Tejun Heo   writeback: implem...
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
  static struct bdi_writeback *
  locked_inode_to_wb_and_lock_list(struct inode *inode)
  	__releases(&inode->i_lock)
  	__acquires(&wb->list_lock)
  {
  	struct bdi_writeback *wb = inode_to_wb(inode);
  
  	spin_unlock(&inode->i_lock);
  	spin_lock(&wb->list_lock);
  	return wb;
  }
  
  static struct bdi_writeback *inode_to_wb_and_lock_list(struct inode *inode)
  	__acquires(&wb->list_lock)
  {
  	struct bdi_writeback *wb = inode_to_wb(inode);
  
  	spin_lock(&wb->list_lock);
  	return wb;
  }
f2b651216   Tejun Heo   writeback: make w...
863
864
865
866
  static long wb_split_bdi_pages(struct bdi_writeback *wb, long nr_pages)
  {
  	return nr_pages;
  }
db1253604   Tejun Heo   writeback: make w...
867
868
869
870
871
  static void bdi_split_work_to_wbs(struct backing_dev_info *bdi,
  				  struct wb_writeback_work *base_work,
  				  bool skip_if_busy)
  {
  	might_sleep();
006a0973e   Tejun Heo   writeback: sync_i...
872
  	if (!skip_if_busy || !writeback_in_progress(&bdi->wb)) {
db1253604   Tejun Heo   writeback: make w...
873
  		base_work->auto_free = 0;
db1253604   Tejun Heo   writeback: make w...
874
875
876
  		wb_queue_work(&bdi->wb, base_work);
  	}
  }
703c27088   Tejun Heo   writeback: implem...
877
  #endif	/* CONFIG_CGROUP_WRITEBACK */
c00ddad39   Tejun Heo   writeback: remove...
878
879
  void wb_start_writeback(struct bdi_writeback *wb, long nr_pages,
  			bool range_cyclic, enum wb_reason reason)
b6e51316d   Jens Axboe   writeback: separa...
880
  {
c00ddad39   Tejun Heo   writeback: remove...
881
882
883
884
885
886
887
888
889
  	struct wb_writeback_work *work;
  
  	if (!wb_has_dirty_io(wb))
  		return;
  
  	/*
  	 * This is WB_SYNC_NONE writeback, so if allocation fails just
  	 * wakeup the thread for old dirty data writeback
  	 */
78ebc2f71   Tetsuo Handa   mm,writeback: don...
890
891
  	work = kzalloc(sizeof(*work),
  		       GFP_NOWAIT | __GFP_NOMEMALLOC | __GFP_NOWARN);
c00ddad39   Tejun Heo   writeback: remove...
892
  	if (!work) {
5634cc2aa   Tejun Heo   writeback: update...
893
  		trace_writeback_nowork(wb);
c00ddad39   Tejun Heo   writeback: remove...
894
895
896
897
898
899
900
901
  		wb_wakeup(wb);
  		return;
  	}
  
  	work->sync_mode	= WB_SYNC_NONE;
  	work->nr_pages	= nr_pages;
  	work->range_cyclic = range_cyclic;
  	work->reason	= reason;
ac7b19a34   Tejun Heo   writeback: add wb...
902
  	work->auto_free	= 1;
c00ddad39   Tejun Heo   writeback: remove...
903
904
  
  	wb_queue_work(wb, work);
c5444198c   Christoph Hellwig   writeback: simpli...
905
  }
d3ddec763   Wu Fengguang   writeback: stop b...
906

c5444198c   Christoph Hellwig   writeback: simpli...
907
  /**
9ecf4866c   Tejun Heo   writeback: make b...
908
909
   * wb_start_background_writeback - start background writeback
   * @wb: bdi_writback to write from
c5444198c   Christoph Hellwig   writeback: simpli...
910
911
   *
   * Description:
6585027a5   Jan Kara   writeback: integr...
912
   *   This makes sure WB_SYNC_NONE background writeback happens. When
9ecf4866c   Tejun Heo   writeback: make b...
913
   *   this function returns, it is only guaranteed that for given wb
6585027a5   Jan Kara   writeback: integr...
914
915
   *   some IO is happening if we are over background dirty threshold.
   *   Caller need not hold sb s_umount semaphore.
c5444198c   Christoph Hellwig   writeback: simpli...
916
   */
9ecf4866c   Tejun Heo   writeback: make b...
917
  void wb_start_background_writeback(struct bdi_writeback *wb)
c5444198c   Christoph Hellwig   writeback: simpli...
918
  {
6585027a5   Jan Kara   writeback: integr...
919
920
921
922
  	/*
  	 * We just wake up the flusher thread. It will perform background
  	 * writeback as soon as there is no other work to do.
  	 */
5634cc2aa   Tejun Heo   writeback: update...
923
  	trace_writeback_wake_background(wb);
9ecf4866c   Tejun Heo   writeback: make b...
924
  	wb_wakeup(wb);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
925
926
927
  }
  
  /*
a66979aba   Dave Chinner   fs: move i_wb_lis...
928
929
   * Remove the inode from the writeback list it is on.
   */
c7f540849   Dave Chinner   inode: rename i_w...
930
  void inode_io_list_del(struct inode *inode)
a66979aba   Dave Chinner   fs: move i_wb_lis...
931
  {
87e1d789b   Tejun Heo   writeback: implem...
932
  	struct bdi_writeback *wb;
f758eeabe   Christoph Hellwig   writeback: split ...
933

87e1d789b   Tejun Heo   writeback: implem...
934
  	wb = inode_to_wb_and_lock_list(inode);
c7f540849   Dave Chinner   inode: rename i_w...
935
  	inode_io_list_del_locked(inode, wb);
52ebea749   Tejun Heo   writeback: make b...
936
  	spin_unlock(&wb->list_lock);
a66979aba   Dave Chinner   fs: move i_wb_lis...
937
  }
a66979aba   Dave Chinner   fs: move i_wb_lis...
938
  /*
6c60d2b57   Dave Chinner   fs/fs-writeback.c...
939
940
941
942
943
944
945
946
947
   * mark an inode as under writeback on the sb
   */
  void sb_mark_inode_writeback(struct inode *inode)
  {
  	struct super_block *sb = inode->i_sb;
  	unsigned long flags;
  
  	if (list_empty(&inode->i_wb_list)) {
  		spin_lock_irqsave(&sb->s_inode_wblist_lock, flags);
9a46b04f1   Brian Foster   fs/fs-writeback.c...
948
  		if (list_empty(&inode->i_wb_list)) {
6c60d2b57   Dave Chinner   fs/fs-writeback.c...
949
  			list_add_tail(&inode->i_wb_list, &sb->s_inodes_wb);
9a46b04f1   Brian Foster   fs/fs-writeback.c...
950
951
  			trace_sb_mark_inode_writeback(inode);
  		}
6c60d2b57   Dave Chinner   fs/fs-writeback.c...
952
953
954
955
956
957
958
959
960
961
962
963
964
965
  		spin_unlock_irqrestore(&sb->s_inode_wblist_lock, flags);
  	}
  }
  
  /*
   * clear an inode as under writeback on the sb
   */
  void sb_clear_inode_writeback(struct inode *inode)
  {
  	struct super_block *sb = inode->i_sb;
  	unsigned long flags;
  
  	if (!list_empty(&inode->i_wb_list)) {
  		spin_lock_irqsave(&sb->s_inode_wblist_lock, flags);
9a46b04f1   Brian Foster   fs/fs-writeback.c...
966
967
968
969
  		if (!list_empty(&inode->i_wb_list)) {
  			list_del_init(&inode->i_wb_list);
  			trace_sb_clear_inode_writeback(inode);
  		}
6c60d2b57   Dave Chinner   fs/fs-writeback.c...
970
971
972
973
974
  		spin_unlock_irqrestore(&sb->s_inode_wblist_lock, flags);
  	}
  }
  
  /*
6610a0bc8   Andrew Morton   writeback: fix ti...
975
976
977
978
   * Redirty an inode: set its when-it-was dirtied timestamp and move it to the
   * furthest end of its superblock's dirty-inode list.
   *
   * Before stamping the inode's ->dirtied_when, we check to see whether it is
66f3b8e2e   Jens Axboe   writeback: move d...
979
   * already the most-recently-dirtied inode on the b_dirty list.  If that is
6610a0bc8   Andrew Morton   writeback: fix ti...
980
981
982
   * the case then the inode must have been redirtied while it was being written
   * out and we don't reset its dirtied_when.
   */
f758eeabe   Christoph Hellwig   writeback: split ...
983
  static void redirty_tail(struct inode *inode, struct bdi_writeback *wb)
6610a0bc8   Andrew Morton   writeback: fix ti...
984
  {
03ba3782e   Jens Axboe   writeback: switch...
985
  	if (!list_empty(&wb->b_dirty)) {
66f3b8e2e   Jens Axboe   writeback: move d...
986
  		struct inode *tail;
6610a0bc8   Andrew Morton   writeback: fix ti...
987

7ccf19a80   Nick Piggin   fs: inode split I...
988
  		tail = wb_inode(wb->b_dirty.next);
66f3b8e2e   Jens Axboe   writeback: move d...
989
  		if (time_before(inode->dirtied_when, tail->dirtied_when))
6610a0bc8   Andrew Morton   writeback: fix ti...
990
991
  			inode->dirtied_when = jiffies;
  	}
c7f540849   Dave Chinner   inode: rename i_w...
992
  	inode_io_list_move_locked(inode, wb, &wb->b_dirty);
6610a0bc8   Andrew Morton   writeback: fix ti...
993
994
995
  }
  
  /*
66f3b8e2e   Jens Axboe   writeback: move d...
996
   * requeue inode for re-scanning after bdi->b_io list is exhausted.
c986d1e2a   Andrew Morton   writeback: fix ti...
997
   */
f758eeabe   Christoph Hellwig   writeback: split ...
998
  static void requeue_io(struct inode *inode, struct bdi_writeback *wb)
c986d1e2a   Andrew Morton   writeback: fix ti...
999
  {
c7f540849   Dave Chinner   inode: rename i_w...
1000
  	inode_io_list_move_locked(inode, wb, &wb->b_more_io);
c986d1e2a   Andrew Morton   writeback: fix ti...
1001
  }
1c0eeaf56   Joern Engel   introduce I_SYNC
1002
1003
  static void inode_sync_complete(struct inode *inode)
  {
365b94ae6   Jan Kara   writeback: Move c...
1004
  	inode->i_state &= ~I_SYNC;
4eff96dd5   Jan Kara   writeback: put un...
1005
1006
  	/* If inode is clean an unused, put it into LRU now... */
  	inode_add_lru(inode);
365b94ae6   Jan Kara   writeback: Move c...
1007
  	/* Waiters must see I_SYNC cleared before being woken up */
1c0eeaf56   Joern Engel   introduce I_SYNC
1008
1009
1010
  	smp_mb();
  	wake_up_bit(&inode->i_state, __I_SYNC);
  }
d2caa3c54   Jeff Layton   writeback: guard ...
1011
1012
1013
1014
1015
1016
1017
1018
  static bool inode_dirtied_after(struct inode *inode, unsigned long t)
  {
  	bool ret = time_after(inode->dirtied_when, t);
  #ifndef CONFIG_64BIT
  	/*
  	 * For inodes being constantly redirtied, dirtied_when can get stuck.
  	 * It _appears_ to be in the future, but is actually in distant past.
  	 * This test is necessary to prevent such wrapped-around relative times
5b0830cb9   Jens Axboe   writeback: get ri...
1019
  	 * from permanently stopping the whole bdi writeback.
d2caa3c54   Jeff Layton   writeback: guard ...
1020
1021
1022
1023
1024
  	 */
  	ret = ret && time_before_eq(inode->dirtied_when, jiffies);
  #endif
  	return ret;
  }
0ae45f63d   Theodore Ts'o   vfs: add support ...
1025
  #define EXPIRE_DIRTY_ATIME 0x0001
c986d1e2a   Andrew Morton   writeback: fix ti...
1026
  /*
0e2f2b236   Wang Sheng-Hui   writeback: correc...
1027
   * Move expired (dirtied before work->older_than_this) dirty inodes from
697e6fed9   Jan Kara   writeback: Remove...
1028
   * @delaying_queue to @dispatch_queue.
2c1365791   Fengguang Wu   writeback: fix ti...
1029
   */
e84d0a4f8   Wu Fengguang   writeback: trace ...
1030
  static int move_expired_inodes(struct list_head *delaying_queue,
2c1365791   Fengguang Wu   writeback: fix ti...
1031
  			       struct list_head *dispatch_queue,
0ae45f63d   Theodore Ts'o   vfs: add support ...
1032
  			       int flags,
ad4e38dd6   Curt Wohlgemuth   writeback: send w...
1033
  			       struct wb_writeback_work *work)
2c1365791   Fengguang Wu   writeback: fix ti...
1034
  {
0ae45f63d   Theodore Ts'o   vfs: add support ...
1035
1036
  	unsigned long *older_than_this = NULL;
  	unsigned long expire_time;
5c03449d3   Shaohua Li   writeback: move i...
1037
1038
  	LIST_HEAD(tmp);
  	struct list_head *pos, *node;
cf137307c   Jens Axboe   writeback: don't ...
1039
  	struct super_block *sb = NULL;
5c03449d3   Shaohua Li   writeback: move i...
1040
  	struct inode *inode;
cf137307c   Jens Axboe   writeback: don't ...
1041
  	int do_sb_sort = 0;
e84d0a4f8   Wu Fengguang   writeback: trace ...
1042
  	int moved = 0;
5c03449d3   Shaohua Li   writeback: move i...
1043

0ae45f63d   Theodore Ts'o   vfs: add support ...
1044
1045
  	if ((flags & EXPIRE_DIRTY_ATIME) == 0)
  		older_than_this = work->older_than_this;
a2f487069   Theodore Ts'o   fs: make sure the...
1046
1047
  	else if (!work->for_sync) {
  		expire_time = jiffies - (dirtytime_expire_interval * HZ);
0ae45f63d   Theodore Ts'o   vfs: add support ...
1048
1049
  		older_than_this = &expire_time;
  	}
2c1365791   Fengguang Wu   writeback: fix ti...
1050
  	while (!list_empty(delaying_queue)) {
7ccf19a80   Nick Piggin   fs: inode split I...
1051
  		inode = wb_inode(delaying_queue->prev);
0ae45f63d   Theodore Ts'o   vfs: add support ...
1052
1053
  		if (older_than_this &&
  		    inode_dirtied_after(inode, *older_than_this))
2c1365791   Fengguang Wu   writeback: fix ti...
1054
  			break;
c7f540849   Dave Chinner   inode: rename i_w...
1055
  		list_move(&inode->i_io_list, &tmp);
a8855990e   Jan Kara   writeback: Do not...
1056
  		moved++;
0ae45f63d   Theodore Ts'o   vfs: add support ...
1057
1058
  		if (flags & EXPIRE_DIRTY_ATIME)
  			set_bit(__I_DIRTY_TIME_EXPIRED, &inode->i_state);
a8855990e   Jan Kara   writeback: Do not...
1059
1060
  		if (sb_is_blkdev_sb(inode->i_sb))
  			continue;
cf137307c   Jens Axboe   writeback: don't ...
1061
1062
1063
  		if (sb && sb != inode->i_sb)
  			do_sb_sort = 1;
  		sb = inode->i_sb;
5c03449d3   Shaohua Li   writeback: move i...
1064
  	}
cf137307c   Jens Axboe   writeback: don't ...
1065
1066
1067
  	/* just one sb in list, splice to dispatch_queue and we're done */
  	if (!do_sb_sort) {
  		list_splice(&tmp, dispatch_queue);
e84d0a4f8   Wu Fengguang   writeback: trace ...
1068
  		goto out;
cf137307c   Jens Axboe   writeback: don't ...
1069
  	}
5c03449d3   Shaohua Li   writeback: move i...
1070
1071
  	/* Move inodes from one superblock together */
  	while (!list_empty(&tmp)) {
7ccf19a80   Nick Piggin   fs: inode split I...
1072
  		sb = wb_inode(tmp.prev)->i_sb;
5c03449d3   Shaohua Li   writeback: move i...
1073
  		list_for_each_prev_safe(pos, node, &tmp) {
7ccf19a80   Nick Piggin   fs: inode split I...
1074
  			inode = wb_inode(pos);
5c03449d3   Shaohua Li   writeback: move i...
1075
  			if (inode->i_sb == sb)
c7f540849   Dave Chinner   inode: rename i_w...
1076
  				list_move(&inode->i_io_list, dispatch_queue);
5c03449d3   Shaohua Li   writeback: move i...
1077
  		}
2c1365791   Fengguang Wu   writeback: fix ti...
1078
  	}
e84d0a4f8   Wu Fengguang   writeback: trace ...
1079
1080
  out:
  	return moved;
2c1365791   Fengguang Wu   writeback: fix ti...
1081
1082
1083
1084
  }
  
  /*
   * Queue all expired dirty inodes for io, eldest first.
4ea879b96   Wu Fengguang   writeback: fix qu...
1085
1086
1087
1088
1089
1090
1091
1092
   * Before
   *         newly dirtied     b_dirty    b_io    b_more_io
   *         =============>    gf         edc     BA
   * After
   *         newly dirtied     b_dirty    b_io    b_more_io
   *         =============>    g          fBAedc
   *                                           |
   *                                           +--> dequeue for IO
2c1365791   Fengguang Wu   writeback: fix ti...
1093
   */
ad4e38dd6   Curt Wohlgemuth   writeback: send w...
1094
  static void queue_io(struct bdi_writeback *wb, struct wb_writeback_work *work)
66f3b8e2e   Jens Axboe   writeback: move d...
1095
  {
e84d0a4f8   Wu Fengguang   writeback: trace ...
1096
  	int moved;
0ae45f63d   Theodore Ts'o   vfs: add support ...
1097

f758eeabe   Christoph Hellwig   writeback: split ...
1098
  	assert_spin_locked(&wb->list_lock);
4ea879b96   Wu Fengguang   writeback: fix qu...
1099
  	list_splice_init(&wb->b_more_io, &wb->b_io);
0ae45f63d   Theodore Ts'o   vfs: add support ...
1100
1101
1102
  	moved = move_expired_inodes(&wb->b_dirty, &wb->b_io, 0, work);
  	moved += move_expired_inodes(&wb->b_dirty_time, &wb->b_io,
  				     EXPIRE_DIRTY_ATIME, work);
d6c10f1fc   Tejun Heo   writeback: implem...
1103
1104
  	if (moved)
  		wb_io_lists_populated(wb);
ad4e38dd6   Curt Wohlgemuth   writeback: send w...
1105
  	trace_writeback_queue_io(wb, work, moved);
66f3b8e2e   Jens Axboe   writeback: move d...
1106
  }
a9185b41a   Christoph Hellwig   pass writeback_co...
1107
  static int write_inode(struct inode *inode, struct writeback_control *wbc)
08d8e9749   Fengguang Wu   writeback: fix nt...
1108
  {
9fb0a7da0   Tejun Heo   writeback: add mo...
1109
1110
1111
1112
1113
1114
1115
1116
  	int ret;
  
  	if (inode->i_sb->s_op->write_inode && !is_bad_inode(inode)) {
  		trace_writeback_write_inode_start(inode, wbc);
  		ret = inode->i_sb->s_op->write_inode(inode, wbc);
  		trace_writeback_write_inode(inode, wbc);
  		return ret;
  	}
03ba3782e   Jens Axboe   writeback: switch...
1117
  	return 0;
08d8e9749   Fengguang Wu   writeback: fix nt...
1118
  }
08d8e9749   Fengguang Wu   writeback: fix nt...
1119

2c1365791   Fengguang Wu   writeback: fix ti...
1120
  /*
169ebd901   Jan Kara   writeback: Avoid ...
1121
1122
   * Wait for writeback on an inode to complete. Called with i_lock held.
   * Caller must make sure inode cannot go away when we drop i_lock.
01c031945   Christoph Hellwig   cleanup __writeba...
1123
   */
169ebd901   Jan Kara   writeback: Avoid ...
1124
1125
1126
  static void __inode_wait_for_writeback(struct inode *inode)
  	__releases(inode->i_lock)
  	__acquires(inode->i_lock)
01c031945   Christoph Hellwig   cleanup __writeba...
1127
1128
1129
1130
1131
  {
  	DEFINE_WAIT_BIT(wq, &inode->i_state, __I_SYNC);
  	wait_queue_head_t *wqh;
  
  	wqh = bit_waitqueue(&inode->i_state, __I_SYNC);
250df6ed2   Dave Chinner   fs: protect inode...
1132
1133
  	while (inode->i_state & I_SYNC) {
  		spin_unlock(&inode->i_lock);
743162013   NeilBrown   sched: Remove pro...
1134
1135
  		__wait_on_bit(wqh, &wq, bit_wait,
  			      TASK_UNINTERRUPTIBLE);
250df6ed2   Dave Chinner   fs: protect inode...
1136
  		spin_lock(&inode->i_lock);
58a9d3d8d   Richard Kennedy   fs-writeback: che...
1137
  	}
01c031945   Christoph Hellwig   cleanup __writeba...
1138
1139
1140
  }
  
  /*
169ebd901   Jan Kara   writeback: Avoid ...
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
   * Wait for writeback on an inode to complete. Caller must have inode pinned.
   */
  void inode_wait_for_writeback(struct inode *inode)
  {
  	spin_lock(&inode->i_lock);
  	__inode_wait_for_writeback(inode);
  	spin_unlock(&inode->i_lock);
  }
  
  /*
   * Sleep until I_SYNC is cleared. This function must be called with i_lock
   * held and drops it. It is aimed for callers not holding any inode reference
   * so once i_lock is dropped, inode can go away.
   */
  static void inode_sleep_on_writeback(struct inode *inode)
  	__releases(inode->i_lock)
  {
  	DEFINE_WAIT(wait);
  	wait_queue_head_t *wqh = bit_waitqueue(&inode->i_state, __I_SYNC);
  	int sleep;
  
  	prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
  	sleep = inode->i_state & I_SYNC;
  	spin_unlock(&inode->i_lock);
  	if (sleep)
  		schedule();
  	finish_wait(wqh, &wait);
  }
  
  /*
ccb26b5a6   Jan Kara   writeback: Separa...
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
   * Find proper writeback list for the inode depending on its current state and
   * possibly also change of its state while we were doing writeback.  Here we
   * handle things such as livelock prevention or fairness of writeback among
   * inodes. This function can be called only by flusher thread - noone else
   * processes all inodes in writeback lists and requeueing inodes behind flusher
   * thread's back can have unexpected consequences.
   */
  static void requeue_inode(struct inode *inode, struct bdi_writeback *wb,
  			  struct writeback_control *wbc)
  {
  	if (inode->i_state & I_FREEING)
  		return;
  
  	/*
  	 * Sync livelock prevention. Each inode is tagged and synced in one
  	 * shot. If still dirty, it will be redirty_tail()'ed below.  Update
  	 * the dirty time to prevent enqueue and sync it again.
  	 */
  	if ((inode->i_state & I_DIRTY) &&
  	    (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages))
  		inode->dirtied_when = jiffies;
4f8ad655d   Jan Kara   writeback: Refact...
1192
1193
1194
1195
1196
1197
1198
1199
  	if (wbc->pages_skipped) {
  		/*
  		 * writeback is not making progress due to locked
  		 * buffers. Skip this inode for now.
  		 */
  		redirty_tail(inode, wb);
  		return;
  	}
ccb26b5a6   Jan Kara   writeback: Separa...
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
  	if (mapping_tagged(inode->i_mapping, PAGECACHE_TAG_DIRTY)) {
  		/*
  		 * We didn't write back all the pages.  nfs_writepages()
  		 * sometimes bales out without doing anything.
  		 */
  		if (wbc->nr_to_write <= 0) {
  			/* Slice used up. Queue for next turn. */
  			requeue_io(inode, wb);
  		} else {
  			/*
  			 * Writeback blocked by something other than
  			 * congestion. Delay the inode for some time to
  			 * avoid spinning on the CPU (100% iowait)
  			 * retrying writeback of the dirty page/inode
  			 * that cannot be performed immediately.
  			 */
  			redirty_tail(inode, wb);
  		}
  	} else if (inode->i_state & I_DIRTY) {
  		/*
  		 * Filesystems can dirty the inode during writeback operations,
  		 * such as delayed allocation during submission or metadata
  		 * updates after data IO completion.
  		 */
  		redirty_tail(inode, wb);
0ae45f63d   Theodore Ts'o   vfs: add support ...
1225
  	} else if (inode->i_state & I_DIRTY_TIME) {
a2f487069   Theodore Ts'o   fs: make sure the...
1226
  		inode->dirtied_when = jiffies;
c7f540849   Dave Chinner   inode: rename i_w...
1227
  		inode_io_list_move_locked(inode, wb, &wb->b_dirty_time);
ccb26b5a6   Jan Kara   writeback: Separa...
1228
1229
  	} else {
  		/* The inode is clean. Remove from writeback lists. */
c7f540849   Dave Chinner   inode: rename i_w...
1230
  		inode_io_list_del_locked(inode, wb);
ccb26b5a6   Jan Kara   writeback: Separa...
1231
1232
1233
1234
  	}
  }
  
  /*
4f8ad655d   Jan Kara   writeback: Refact...
1235
1236
1237
   * Write out an inode and its dirty pages. Do not update the writeback list
   * linkage. That is left to the caller. The caller is also responsible for
   * setting I_SYNC flag and calling inode_sync_complete() to clear it.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1238
1239
   */
  static int
cd8ed2a45   Yan Hong   fs/fs-writeback.c...
1240
  __writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1241
  {
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1242
  	struct address_space *mapping = inode->i_mapping;
251d6a471   Wu Fengguang   writeback: trace ...
1243
  	long nr_to_write = wbc->nr_to_write;
01c031945   Christoph Hellwig   cleanup __writeba...
1244
  	unsigned dirty;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1245
  	int ret;
4f8ad655d   Jan Kara   writeback: Refact...
1246
  	WARN_ON(!(inode->i_state & I_SYNC));
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1247

9fb0a7da0   Tejun Heo   writeback: add mo...
1248
  	trace_writeback_single_inode_start(inode, wbc, nr_to_write);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1249
  	ret = do_writepages(mapping, wbc);
26821ed40   Christoph Hellwig   make sure data is...
1250
1251
1252
  	/*
  	 * Make sure to wait on the data before writing out the metadata.
  	 * This is important for filesystems that modify metadata on data
7747bd4bc   Dave Chinner   sync: don't block...
1253
1254
1255
  	 * I/O completion. We don't do it for sync(2) writeback because it has a
  	 * separate, external IO completion path and ->sync_fs for guaranteeing
  	 * inode metadata is written back correctly.
26821ed40   Christoph Hellwig   make sure data is...
1256
  	 */
7747bd4bc   Dave Chinner   sync: don't block...
1257
  	if (wbc->sync_mode == WB_SYNC_ALL && !wbc->for_sync) {
26821ed40   Christoph Hellwig   make sure data is...
1258
  		int err = filemap_fdatawait(mapping);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1259
1260
1261
  		if (ret == 0)
  			ret = err;
  	}
5547e8aac   Dmitry Monakhov   writeback: Update...
1262
1263
1264
1265
1266
  	/*
  	 * Some filesystems may redirty the inode during the writeback
  	 * due to delalloc, clear dirty metadata flags right before
  	 * write_inode()
  	 */
250df6ed2   Dave Chinner   fs: protect inode...
1267
  	spin_lock(&inode->i_lock);
9c6ac78eb   Tejun Heo   writeback: fix a ...
1268

5547e8aac   Dmitry Monakhov   writeback: Update...
1269
  	dirty = inode->i_state & I_DIRTY;
a2f487069   Theodore Ts'o   fs: make sure the...
1270
1271
  	if (inode->i_state & I_DIRTY_TIME) {
  		if ((dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) ||
dc5ff2b1d   Jan Kara   writeback: Write ...
1272
  		    wbc->sync_mode == WB_SYNC_ALL ||
a2f487069   Theodore Ts'o   fs: make sure the...
1273
1274
1275
1276
1277
1278
1279
1280
1281
  		    unlikely(inode->i_state & I_DIRTY_TIME_EXPIRED) ||
  		    unlikely(time_after(jiffies,
  					(inode->dirtied_time_when +
  					 dirtytime_expire_interval * HZ)))) {
  			dirty |= I_DIRTY_TIME | I_DIRTY_TIME_EXPIRED;
  			trace_writeback_lazytime(inode);
  		}
  	} else
  		inode->i_state &= ~I_DIRTY_TIME_EXPIRED;
0ae45f63d   Theodore Ts'o   vfs: add support ...
1282
  	inode->i_state &= ~dirty;
9c6ac78eb   Tejun Heo   writeback: fix a ...
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
  
  	/*
  	 * Paired with smp_mb() in __mark_inode_dirty().  This allows
  	 * __mark_inode_dirty() to test i_state without grabbing i_lock -
  	 * either they see the I_DIRTY bits cleared or we see the dirtied
  	 * inode.
  	 *
  	 * I_DIRTY_PAGES is always cleared together above even if @mapping
  	 * still has dirty pages.  The flag is reinstated after smp_mb() if
  	 * necessary.  This guarantees that either __mark_inode_dirty()
  	 * sees clear I_DIRTY_PAGES or we see PAGECACHE_TAG_DIRTY.
  	 */
  	smp_mb();
  
  	if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
  		inode->i_state |= I_DIRTY_PAGES;
250df6ed2   Dave Chinner   fs: protect inode...
1299
  	spin_unlock(&inode->i_lock);
9c6ac78eb   Tejun Heo   writeback: fix a ...
1300

0ae45f63d   Theodore Ts'o   vfs: add support ...
1301
1302
  	if (dirty & I_DIRTY_TIME)
  		mark_inode_dirty_sync(inode);
26821ed40   Christoph Hellwig   make sure data is...
1303
  	/* Don't write the inode if only I_DIRTY_PAGES was set */
0ae45f63d   Theodore Ts'o   vfs: add support ...
1304
  	if (dirty & ~I_DIRTY_PAGES) {
a9185b41a   Christoph Hellwig   pass writeback_co...
1305
  		int err = write_inode(inode, wbc);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1306
1307
1308
  		if (ret == 0)
  			ret = err;
  	}
4f8ad655d   Jan Kara   writeback: Refact...
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
  	trace_writeback_single_inode(inode, wbc, nr_to_write);
  	return ret;
  }
  
  /*
   * Write out an inode's dirty pages. Either the caller has an active reference
   * on the inode or the inode has I_WILL_FREE set.
   *
   * This function is designed to be called for writing back one inode which
   * we go e.g. from filesystem. Flusher thread uses __writeback_single_inode()
   * and does more profound writeback list handling in writeback_sb_inodes().
   */
aaf255933   Tejun Heo   writeback, cgroup...
1321
1322
  static int writeback_single_inode(struct inode *inode,
  				  struct writeback_control *wbc)
4f8ad655d   Jan Kara   writeback: Refact...
1323
  {
aaf255933   Tejun Heo   writeback, cgroup...
1324
  	struct bdi_writeback *wb;
4f8ad655d   Jan Kara   writeback: Refact...
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
  	int ret = 0;
  
  	spin_lock(&inode->i_lock);
  	if (!atomic_read(&inode->i_count))
  		WARN_ON(!(inode->i_state & (I_WILL_FREE|I_FREEING)));
  	else
  		WARN_ON(inode->i_state & I_WILL_FREE);
  
  	if (inode->i_state & I_SYNC) {
  		if (wbc->sync_mode != WB_SYNC_ALL)
  			goto out;
  		/*
169ebd901   Jan Kara   writeback: Avoid ...
1337
1338
1339
  		 * It's a data-integrity sync. We must wait. Since callers hold
  		 * inode reference or inode has I_WILL_FREE set, it cannot go
  		 * away under us.
4f8ad655d   Jan Kara   writeback: Refact...
1340
  		 */
169ebd901   Jan Kara   writeback: Avoid ...
1341
  		__inode_wait_for_writeback(inode);
4f8ad655d   Jan Kara   writeback: Refact...
1342
1343
1344
  	}
  	WARN_ON(inode->i_state & I_SYNC);
  	/*
f9b0e058c   Jan Kara   writeback: Fix da...
1345
1346
1347
1348
1349
1350
  	 * Skip inode if it is clean and we have no outstanding writeback in
  	 * WB_SYNC_ALL mode. We don't want to mess with writeback lists in this
  	 * function since flusher thread may be doing for example sync in
  	 * parallel and if we move the inode, it could get skipped. So here we
  	 * make sure inode is on some writeback list and leave it there unless
  	 * we have completely cleaned the inode.
4f8ad655d   Jan Kara   writeback: Refact...
1351
  	 */
0ae45f63d   Theodore Ts'o   vfs: add support ...
1352
  	if (!(inode->i_state & I_DIRTY_ALL) &&
f9b0e058c   Jan Kara   writeback: Fix da...
1353
1354
  	    (wbc->sync_mode != WB_SYNC_ALL ||
  	     !mapping_tagged(inode->i_mapping, PAGECACHE_TAG_WRITEBACK)))
4f8ad655d   Jan Kara   writeback: Refact...
1355
1356
  		goto out;
  	inode->i_state |= I_SYNC;
b16b1deb5   Tejun Heo   writeback: make w...
1357
  	wbc_attach_and_unlock_inode(wbc, inode);
4f8ad655d   Jan Kara   writeback: Refact...
1358

cd8ed2a45   Yan Hong   fs/fs-writeback.c...
1359
  	ret = __writeback_single_inode(inode, wbc);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1360

b16b1deb5   Tejun Heo   writeback: make w...
1361
  	wbc_detach_inode(wbc);
aaf255933   Tejun Heo   writeback, cgroup...
1362
1363
  
  	wb = inode_to_wb_and_lock_list(inode);
250df6ed2   Dave Chinner   fs: protect inode...
1364
  	spin_lock(&inode->i_lock);
4f8ad655d   Jan Kara   writeback: Refact...
1365
1366
1367
1368
  	/*
  	 * If inode is clean, remove it from writeback lists. Otherwise don't
  	 * touch it. See comment above for explanation.
  	 */
0ae45f63d   Theodore Ts'o   vfs: add support ...
1369
  	if (!(inode->i_state & I_DIRTY_ALL))
c7f540849   Dave Chinner   inode: rename i_w...
1370
  		inode_io_list_del_locked(inode, wb);
4f8ad655d   Jan Kara   writeback: Refact...
1371
  	spin_unlock(&wb->list_lock);
1c0eeaf56   Joern Engel   introduce I_SYNC
1372
  	inode_sync_complete(inode);
4f8ad655d   Jan Kara   writeback: Refact...
1373
1374
  out:
  	spin_unlock(&inode->i_lock);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1375
1376
  	return ret;
  }
a88a341a7   Tejun Heo   writeback: move b...
1377
  static long writeback_chunk_size(struct bdi_writeback *wb,
1a12d8bd7   Wu Fengguang   writeback: scale ...
1378
  				 struct wb_writeback_work *work)
d46db3d58   Wu Fengguang   writeback: make w...
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
  {
  	long pages;
  
  	/*
  	 * WB_SYNC_ALL mode does livelock avoidance by syncing dirty
  	 * inodes/pages in one big loop. Setting wbc.nr_to_write=LONG_MAX
  	 * here avoids calling into writeback_inodes_wb() more than once.
  	 *
  	 * The intended call sequence for WB_SYNC_ALL writeback is:
  	 *
  	 *      wb_writeback()
  	 *          writeback_sb_inodes()       <== called only once
  	 *              write_cache_pages()     <== called once for each inode
  	 *                   (quickly) tag currently dirty pages
  	 *                   (maybe slowly) sync all tagged pages
  	 */
  	if (work->sync_mode == WB_SYNC_ALL || work->tagged_writepages)
  		pages = LONG_MAX;
1a12d8bd7   Wu Fengguang   writeback: scale ...
1397
  	else {
a88a341a7   Tejun Heo   writeback: move b...
1398
  		pages = min(wb->avg_write_bandwidth / 2,
dcc25ae76   Tejun Heo   writeback: move g...
1399
  			    global_wb_domain.dirty_limit / DIRTY_SCOPE);
1a12d8bd7   Wu Fengguang   writeback: scale ...
1400
1401
1402
1403
  		pages = min(pages, work->nr_pages);
  		pages = round_down(pages + MIN_WRITEBACK_PAGES,
  				   MIN_WRITEBACK_PAGES);
  	}
d46db3d58   Wu Fengguang   writeback: make w...
1404
1405
1406
  
  	return pages;
  }
03ba3782e   Jens Axboe   writeback: switch...
1407
  /*
f11c9c5c2   Edward Shishkin   vfs: improve writ...
1408
   * Write a portion of b_io inodes which belong to @sb.
edadfb10b   Christoph Hellwig   writeback: split ...
1409
   *
d46db3d58   Wu Fengguang   writeback: make w...
1410
   * Return the number of pages and/or inodes written.
0ba13fd19   Linus Torvalds   Revert "writeback...
1411
1412
1413
1414
   *
   * NOTE! This is called with wb->list_lock held, and will
   * unlock and relock that for each inode it ends up doing
   * IO for.
f11c9c5c2   Edward Shishkin   vfs: improve writ...
1415
   */
d46db3d58   Wu Fengguang   writeback: make w...
1416
1417
1418
  static long writeback_sb_inodes(struct super_block *sb,
  				struct bdi_writeback *wb,
  				struct wb_writeback_work *work)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1419
  {
d46db3d58   Wu Fengguang   writeback: make w...
1420
1421
1422
1423
1424
  	struct writeback_control wbc = {
  		.sync_mode		= work->sync_mode,
  		.tagged_writepages	= work->tagged_writepages,
  		.for_kupdate		= work->for_kupdate,
  		.for_background		= work->for_background,
7747bd4bc   Dave Chinner   sync: don't block...
1425
  		.for_sync		= work->for_sync,
d46db3d58   Wu Fengguang   writeback: make w...
1426
1427
1428
1429
1430
1431
1432
  		.range_cyclic		= work->range_cyclic,
  		.range_start		= 0,
  		.range_end		= LLONG_MAX,
  	};
  	unsigned long start_time = jiffies;
  	long write_chunk;
  	long wrote = 0;  /* count both pages and inodes */
03ba3782e   Jens Axboe   writeback: switch...
1433
  	while (!list_empty(&wb->b_io)) {
7ccf19a80   Nick Piggin   fs: inode split I...
1434
  		struct inode *inode = wb_inode(wb->b_io.prev);
aaf255933   Tejun Heo   writeback, cgroup...
1435
  		struct bdi_writeback *tmp_wb;
edadfb10b   Christoph Hellwig   writeback: split ...
1436
1437
  
  		if (inode->i_sb != sb) {
d46db3d58   Wu Fengguang   writeback: make w...
1438
  			if (work->sb) {
edadfb10b   Christoph Hellwig   writeback: split ...
1439
1440
1441
1442
1443
  				/*
  				 * We only want to write back data for this
  				 * superblock, move all inodes not belonging
  				 * to it back onto the dirty list.
  				 */
f758eeabe   Christoph Hellwig   writeback: split ...
1444
  				redirty_tail(inode, wb);
edadfb10b   Christoph Hellwig   writeback: split ...
1445
1446
1447
1448
1449
1450
1451
1452
  				continue;
  			}
  
  			/*
  			 * The inode belongs to a different superblock.
  			 * Bounce back to the caller to unpin this and
  			 * pin the next superblock.
  			 */
d46db3d58   Wu Fengguang   writeback: make w...
1453
  			break;
edadfb10b   Christoph Hellwig   writeback: split ...
1454
  		}
9843b76aa   Christoph Hellwig   fs: skip I_FREEIN...
1455
  		/*
331cbdeed   Wanpeng Li   writeback: Fix so...
1456
1457
  		 * Don't bother with new inodes or inodes being freed, first
  		 * kind does not need periodic writeout yet, and for the latter
9843b76aa   Christoph Hellwig   fs: skip I_FREEIN...
1458
1459
  		 * kind writeout is handled by the freer.
  		 */
250df6ed2   Dave Chinner   fs: protect inode...
1460
  		spin_lock(&inode->i_lock);
9843b76aa   Christoph Hellwig   fs: skip I_FREEIN...
1461
  		if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
250df6ed2   Dave Chinner   fs: protect inode...
1462
  			spin_unlock(&inode->i_lock);
fcc5c2221   Wu Fengguang   writeback: don't ...
1463
  			redirty_tail(inode, wb);
7ef0d7377   Nick Piggin   fs: new inode i_s...
1464
1465
  			continue;
  		}
cc1676d91   Jan Kara   writeback: Move r...
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
  		if ((inode->i_state & I_SYNC) && wbc.sync_mode != WB_SYNC_ALL) {
  			/*
  			 * If this inode is locked for writeback and we are not
  			 * doing writeback-for-data-integrity, move it to
  			 * b_more_io so that writeback can proceed with the
  			 * other inodes on s_io.
  			 *
  			 * We'll have another go at writing back this inode
  			 * when we completed a full scan of b_io.
  			 */
  			spin_unlock(&inode->i_lock);
  			requeue_io(inode, wb);
  			trace_writeback_sb_inodes_requeue(inode);
  			continue;
  		}
f0d07b7ff   Jan Kara   writeback: Remove...
1481
  		spin_unlock(&wb->list_lock);
4f8ad655d   Jan Kara   writeback: Refact...
1482
1483
1484
1485
1486
  		/*
  		 * We already requeued the inode if it had I_SYNC set and we
  		 * are doing WB_SYNC_NONE writeback. So this catches only the
  		 * WB_SYNC_ALL case.
  		 */
169ebd901   Jan Kara   writeback: Avoid ...
1487
1488
1489
1490
  		if (inode->i_state & I_SYNC) {
  			/* Wait for I_SYNC. This function drops i_lock... */
  			inode_sleep_on_writeback(inode);
  			/* Inode may be gone, start again */
ead188f9f   Jan Kara   writeback: Fix lo...
1491
  			spin_lock(&wb->list_lock);
169ebd901   Jan Kara   writeback: Avoid ...
1492
1493
  			continue;
  		}
4f8ad655d   Jan Kara   writeback: Refact...
1494
  		inode->i_state |= I_SYNC;
b16b1deb5   Tejun Heo   writeback: make w...
1495
  		wbc_attach_and_unlock_inode(&wbc, inode);
169ebd901   Jan Kara   writeback: Avoid ...
1496

a88a341a7   Tejun Heo   writeback: move b...
1497
  		write_chunk = writeback_chunk_size(wb, work);
d46db3d58   Wu Fengguang   writeback: make w...
1498
1499
  		wbc.nr_to_write = write_chunk;
  		wbc.pages_skipped = 0;
250df6ed2   Dave Chinner   fs: protect inode...
1500

169ebd901   Jan Kara   writeback: Avoid ...
1501
1502
1503
1504
  		/*
  		 * We use I_SYNC to pin the inode in memory. While it is set
  		 * evict_inode() will wait so the inode cannot be freed.
  		 */
cd8ed2a45   Yan Hong   fs/fs-writeback.c...
1505
  		__writeback_single_inode(inode, &wbc);
250df6ed2   Dave Chinner   fs: protect inode...
1506

b16b1deb5   Tejun Heo   writeback: make w...
1507
  		wbc_detach_inode(&wbc);
d46db3d58   Wu Fengguang   writeback: make w...
1508
1509
  		work->nr_pages -= write_chunk - wbc.nr_to_write;
  		wrote += write_chunk - wbc.nr_to_write;
590dca3a7   Chris Mason   fs-writeback: unp...
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
  
  		if (need_resched()) {
  			/*
  			 * We're trying to balance between building up a nice
  			 * long list of IOs to improve our merge rate, and
  			 * getting those IOs out quickly for anyone throttling
  			 * in balance_dirty_pages().  cond_resched() doesn't
  			 * unplug, so get our IOs out the door before we
  			 * give up the CPU.
  			 */
  			blk_flush_plug(current);
  			cond_resched();
  		}
aaf255933   Tejun Heo   writeback, cgroup...
1523
1524
1525
1526
1527
  		/*
  		 * Requeue @inode if still dirty.  Be careful as @inode may
  		 * have been switched to another wb in the meantime.
  		 */
  		tmp_wb = inode_to_wb_and_lock_list(inode);
4f8ad655d   Jan Kara   writeback: Refact...
1528
  		spin_lock(&inode->i_lock);
0ae45f63d   Theodore Ts'o   vfs: add support ...
1529
  		if (!(inode->i_state & I_DIRTY_ALL))
d46db3d58   Wu Fengguang   writeback: make w...
1530
  			wrote++;
aaf255933   Tejun Heo   writeback, cgroup...
1531
  		requeue_inode(inode, tmp_wb, &wbc);
4f8ad655d   Jan Kara   writeback: Refact...
1532
  		inode_sync_complete(inode);
0f1b1fd86   Dave Chinner   fs: pull inode->i...
1533
  		spin_unlock(&inode->i_lock);
590dca3a7   Chris Mason   fs-writeback: unp...
1534

aaf255933   Tejun Heo   writeback, cgroup...
1535
1536
1537
1538
  		if (unlikely(tmp_wb != wb)) {
  			spin_unlock(&tmp_wb->list_lock);
  			spin_lock(&wb->list_lock);
  		}
d46db3d58   Wu Fengguang   writeback: make w...
1539
1540
1541
1542
1543
1544
1545
1546
1547
  		/*
  		 * bail out to wb_writeback() often enough to check
  		 * background threshold and other termination conditions.
  		 */
  		if (wrote) {
  			if (time_is_before_jiffies(start_time + HZ / 10UL))
  				break;
  			if (work->nr_pages <= 0)
  				break;
8bc3be275   Fengguang Wu   writeback: speed ...
1548
  		}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1549
  	}
d46db3d58   Wu Fengguang   writeback: make w...
1550
  	return wrote;
f11c9c5c2   Edward Shishkin   vfs: improve writ...
1551
  }
d46db3d58   Wu Fengguang   writeback: make w...
1552
1553
  static long __writeback_inodes_wb(struct bdi_writeback *wb,
  				  struct wb_writeback_work *work)
f11c9c5c2   Edward Shishkin   vfs: improve writ...
1554
  {
d46db3d58   Wu Fengguang   writeback: make w...
1555
1556
  	unsigned long start_time = jiffies;
  	long wrote = 0;
38f219776   Nick Piggin   fs: sync_sb_inode...
1557

f11c9c5c2   Edward Shishkin   vfs: improve writ...
1558
  	while (!list_empty(&wb->b_io)) {
7ccf19a80   Nick Piggin   fs: inode split I...
1559
  		struct inode *inode = wb_inode(wb->b_io.prev);
f11c9c5c2   Edward Shishkin   vfs: improve writ...
1560
  		struct super_block *sb = inode->i_sb;
9ecc2738a   Jens Axboe   writeback: make t...
1561

eb6ef3df4   Konstantin Khlebnikov   trylock_super(): ...
1562
  		if (!trylock_super(sb)) {
0e995816f   Wu Fengguang   don't busy retry ...
1563
  			/*
eb6ef3df4   Konstantin Khlebnikov   trylock_super(): ...
1564
  			 * trylock_super() may fail consistently due to
0e995816f   Wu Fengguang   don't busy retry ...
1565
1566
1567
1568
  			 * s_umount being grabbed by someone else. Don't use
  			 * requeue_io() to avoid busy retrying the inode/sb.
  			 */
  			redirty_tail(inode, wb);
edadfb10b   Christoph Hellwig   writeback: split ...
1569
  			continue;
f11c9c5c2   Edward Shishkin   vfs: improve writ...
1570
  		}
d46db3d58   Wu Fengguang   writeback: make w...
1571
  		wrote += writeback_sb_inodes(sb, wb, work);
eb6ef3df4   Konstantin Khlebnikov   trylock_super(): ...
1572
  		up_read(&sb->s_umount);
f11c9c5c2   Edward Shishkin   vfs: improve writ...
1573

d46db3d58   Wu Fengguang   writeback: make w...
1574
1575
1576
1577
1578
1579
1580
  		/* refer to the same tests at the end of writeback_sb_inodes */
  		if (wrote) {
  			if (time_is_before_jiffies(start_time + HZ / 10UL))
  				break;
  			if (work->nr_pages <= 0)
  				break;
  		}
f11c9c5c2   Edward Shishkin   vfs: improve writ...
1581
  	}
66f3b8e2e   Jens Axboe   writeback: move d...
1582
  	/* Leave any unwritten inodes on b_io */
d46db3d58   Wu Fengguang   writeback: make w...
1583
  	return wrote;
66f3b8e2e   Jens Axboe   writeback: move d...
1584
  }
7d9f073b8   Wanpeng Li   mm/writeback: mak...
1585
  static long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages,
0e175a183   Curt Wohlgemuth   writeback: Add a ...
1586
  				enum wb_reason reason)
edadfb10b   Christoph Hellwig   writeback: split ...
1587
  {
d46db3d58   Wu Fengguang   writeback: make w...
1588
1589
1590
1591
  	struct wb_writeback_work work = {
  		.nr_pages	= nr_pages,
  		.sync_mode	= WB_SYNC_NONE,
  		.range_cyclic	= 1,
0e175a183   Curt Wohlgemuth   writeback: Add a ...
1592
  		.reason		= reason,
d46db3d58   Wu Fengguang   writeback: make w...
1593
  	};
505a666ee   Linus Torvalds   writeback: plug w...
1594
  	struct blk_plug plug;
edadfb10b   Christoph Hellwig   writeback: split ...
1595

505a666ee   Linus Torvalds   writeback: plug w...
1596
  	blk_start_plug(&plug);
f758eeabe   Christoph Hellwig   writeback: split ...
1597
  	spin_lock(&wb->list_lock);
424b351fe   Wu Fengguang   writeback: refill...
1598
  	if (list_empty(&wb->b_io))
ad4e38dd6   Curt Wohlgemuth   writeback: send w...
1599
  		queue_io(wb, &work);
d46db3d58   Wu Fengguang   writeback: make w...
1600
  	__writeback_inodes_wb(wb, &work);
f758eeabe   Christoph Hellwig   writeback: split ...
1601
  	spin_unlock(&wb->list_lock);
505a666ee   Linus Torvalds   writeback: plug w...
1602
  	blk_finish_plug(&plug);
edadfb10b   Christoph Hellwig   writeback: split ...
1603

d46db3d58   Wu Fengguang   writeback: make w...
1604
1605
  	return nr_pages - work.nr_pages;
  }
03ba3782e   Jens Axboe   writeback: switch...
1606

03ba3782e   Jens Axboe   writeback: switch...
1607
1608
  /*
   * Explicit flushing or periodic writeback of "old" data.
66f3b8e2e   Jens Axboe   writeback: move d...
1609
   *
03ba3782e   Jens Axboe   writeback: switch...
1610
1611
1612
1613
   * Define "old": the first time one of an inode's pages is dirtied, we mark the
   * dirtying-time in the inode's address_space.  So this periodic writeback code
   * just walks the superblock inode list, writing back any inodes which are
   * older than a specific point in time.
66f3b8e2e   Jens Axboe   writeback: move d...
1614
   *
03ba3782e   Jens Axboe   writeback: switch...
1615
1616
1617
   * Try to run once per dirty_writeback_interval.  But if a writeback event
   * takes longer than a dirty_writeback_interval interval, then leave a
   * one-second gap.
66f3b8e2e   Jens Axboe   writeback: move d...
1618
   *
03ba3782e   Jens Axboe   writeback: switch...
1619
1620
   * older_than_this takes precedence over nr_to_write.  So we'll only write back
   * all dirty pages if they are all attached to "old" mappings.
66f3b8e2e   Jens Axboe   writeback: move d...
1621
   */
c4a77a6c7   Jens Axboe   writeback: make w...
1622
  static long wb_writeback(struct bdi_writeback *wb,
83ba7b071   Christoph Hellwig   writeback: simpli...
1623
  			 struct wb_writeback_work *work)
66f3b8e2e   Jens Axboe   writeback: move d...
1624
  {
e98be2d59   Wu Fengguang   writeback: bdi wr...
1625
  	unsigned long wb_start = jiffies;
d46db3d58   Wu Fengguang   writeback: make w...
1626
  	long nr_pages = work->nr_pages;
0dc83bd30   Jan Kara   Revert "writeback...
1627
  	unsigned long oldest_jif;
a5989bdc9   Jan Kara   fs: Fix busyloop ...
1628
  	struct inode *inode;
d46db3d58   Wu Fengguang   writeback: make w...
1629
  	long progress;
505a666ee   Linus Torvalds   writeback: plug w...
1630
  	struct blk_plug plug;
66f3b8e2e   Jens Axboe   writeback: move d...
1631

0dc83bd30   Jan Kara   Revert "writeback...
1632
1633
  	oldest_jif = jiffies;
  	work->older_than_this = &oldest_jif;
38f219776   Nick Piggin   fs: sync_sb_inode...
1634

505a666ee   Linus Torvalds   writeback: plug w...
1635
  	blk_start_plug(&plug);
e8dfc3058   Wu Fengguang   writeback: elevat...
1636
  	spin_lock(&wb->list_lock);
03ba3782e   Jens Axboe   writeback: switch...
1637
1638
  	for (;;) {
  		/*
d3ddec763   Wu Fengguang   writeback: stop b...
1639
  		 * Stop writeback when nr_pages has been consumed
03ba3782e   Jens Axboe   writeback: switch...
1640
  		 */
83ba7b071   Christoph Hellwig   writeback: simpli...
1641
  		if (work->nr_pages <= 0)
03ba3782e   Jens Axboe   writeback: switch...
1642
  			break;
66f3b8e2e   Jens Axboe   writeback: move d...
1643

38f219776   Nick Piggin   fs: sync_sb_inode...
1644
  		/*
aa373cf55   Jan Kara   writeback: stop b...
1645
1646
1647
1648
1649
1650
  		 * Background writeout and kupdate-style writeback may
  		 * run forever. Stop them if there is other work to do
  		 * so that e.g. sync can proceed. They'll be restarted
  		 * after the other works are all done.
  		 */
  		if ((work->for_background || work->for_kupdate) &&
f0054bb1e   Tejun Heo   writeback: move b...
1651
  		    !list_empty(&wb->work_list))
aa373cf55   Jan Kara   writeback: stop b...
1652
1653
1654
  			break;
  
  		/*
d3ddec763   Wu Fengguang   writeback: stop b...
1655
1656
  		 * For background writeout, stop when we are below the
  		 * background dirty threshold
38f219776   Nick Piggin   fs: sync_sb_inode...
1657
  		 */
aa661bbe1   Tejun Heo   writeback: move o...
1658
  		if (work->for_background && !wb_over_bg_thresh(wb))
03ba3782e   Jens Axboe   writeback: switch...
1659
  			break;
38f219776   Nick Piggin   fs: sync_sb_inode...
1660

1bc36b642   Jan Kara   writeback: Includ...
1661
1662
1663
1664
1665
1666
  		/*
  		 * Kupdate and background works are special and we want to
  		 * include all inodes that need writing. Livelock avoidance is
  		 * handled by these works yielding to any other work so we are
  		 * safe.
  		 */
ba9aa8399   Wu Fengguang   writeback: the ku...
1667
  		if (work->for_kupdate) {
0dc83bd30   Jan Kara   Revert "writeback...
1668
  			oldest_jif = jiffies -
ba9aa8399   Wu Fengguang   writeback: the ku...
1669
  				msecs_to_jiffies(dirty_expire_interval * 10);
1bc36b642   Jan Kara   writeback: Includ...
1670
  		} else if (work->for_background)
0dc83bd30   Jan Kara   Revert "writeback...
1671
  			oldest_jif = jiffies;
028c2dd18   Dave Chinner   writeback: Add tr...
1672

5634cc2aa   Tejun Heo   writeback: update...
1673
  		trace_writeback_start(wb, work);
e8dfc3058   Wu Fengguang   writeback: elevat...
1674
  		if (list_empty(&wb->b_io))
ad4e38dd6   Curt Wohlgemuth   writeback: send w...
1675
  			queue_io(wb, work);
83ba7b071   Christoph Hellwig   writeback: simpli...
1676
  		if (work->sb)
d46db3d58   Wu Fengguang   writeback: make w...
1677
  			progress = writeback_sb_inodes(work->sb, wb, work);
edadfb10b   Christoph Hellwig   writeback: split ...
1678
  		else
d46db3d58   Wu Fengguang   writeback: make w...
1679
  			progress = __writeback_inodes_wb(wb, work);
5634cc2aa   Tejun Heo   writeback: update...
1680
  		trace_writeback_written(wb, work);
028c2dd18   Dave Chinner   writeback: Add tr...
1681

e98be2d59   Wu Fengguang   writeback: bdi wr...
1682
  		wb_update_bandwidth(wb, wb_start);
03ba3782e   Jens Axboe   writeback: switch...
1683
1684
  
  		/*
e6fb6da2e   Wu Fengguang   writeback: try mo...
1685
1686
1687
1688
1689
1690
  		 * Did we write something? Try for more
  		 *
  		 * Dirty inodes are moved to b_io for writeback in batches.
  		 * The completion of the current batch does not necessarily
  		 * mean the overall work is done. So we keep looping as long
  		 * as made some progress on cleaning pages or inodes.
03ba3782e   Jens Axboe   writeback: switch...
1691
  		 */
d46db3d58   Wu Fengguang   writeback: make w...
1692
  		if (progress)
71fd05a88   Jens Axboe   writeback: improv...
1693
1694
  			continue;
  		/*
e6fb6da2e   Wu Fengguang   writeback: try mo...
1695
  		 * No more inodes for IO, bail
71fd05a88   Jens Axboe   writeback: improv...
1696
  		 */
b7a2441f9   Wu Fengguang   writeback: remove...
1697
  		if (list_empty(&wb->b_more_io))
03ba3782e   Jens Axboe   writeback: switch...
1698
  			break;
71fd05a88   Jens Axboe   writeback: improv...
1699
  		/*
71fd05a88   Jens Axboe   writeback: improv...
1700
1701
1702
1703
  		 * Nothing written. Wait for some inode to
  		 * become available for writeback. Otherwise
  		 * we'll just busyloop.
  		 */
71fd05a88   Jens Axboe   writeback: improv...
1704
  		if (!list_empty(&wb->b_more_io))  {
5634cc2aa   Tejun Heo   writeback: update...
1705
  			trace_writeback_wait(wb, work);
7ccf19a80   Nick Piggin   fs: inode split I...
1706
  			inode = wb_inode(wb->b_more_io.prev);
250df6ed2   Dave Chinner   fs: protect inode...
1707
  			spin_lock(&inode->i_lock);
f0d07b7ff   Jan Kara   writeback: Remove...
1708
  			spin_unlock(&wb->list_lock);
169ebd901   Jan Kara   writeback: Avoid ...
1709
1710
  			/* This function drops i_lock... */
  			inode_sleep_on_writeback(inode);
f0d07b7ff   Jan Kara   writeback: Remove...
1711
  			spin_lock(&wb->list_lock);
03ba3782e   Jens Axboe   writeback: switch...
1712
1713
  		}
  	}
e8dfc3058   Wu Fengguang   writeback: elevat...
1714
  	spin_unlock(&wb->list_lock);
505a666ee   Linus Torvalds   writeback: plug w...
1715
  	blk_finish_plug(&plug);
03ba3782e   Jens Axboe   writeback: switch...
1716

d46db3d58   Wu Fengguang   writeback: make w...
1717
  	return nr_pages - work->nr_pages;
03ba3782e   Jens Axboe   writeback: switch...
1718
1719
1720
  }
  
  /*
83ba7b071   Christoph Hellwig   writeback: simpli...
1721
   * Return the next wb_writeback_work struct that hasn't been processed yet.
03ba3782e   Jens Axboe   writeback: switch...
1722
   */
f0054bb1e   Tejun Heo   writeback: move b...
1723
  static struct wb_writeback_work *get_next_work_item(struct bdi_writeback *wb)
03ba3782e   Jens Axboe   writeback: switch...
1724
  {
83ba7b071   Christoph Hellwig   writeback: simpli...
1725
  	struct wb_writeback_work *work = NULL;
03ba3782e   Jens Axboe   writeback: switch...
1726

f0054bb1e   Tejun Heo   writeback: move b...
1727
1728
1729
  	spin_lock_bh(&wb->work_lock);
  	if (!list_empty(&wb->work_list)) {
  		work = list_entry(wb->work_list.next,
83ba7b071   Christoph Hellwig   writeback: simpli...
1730
1731
  				  struct wb_writeback_work, list);
  		list_del_init(&work->list);
03ba3782e   Jens Axboe   writeback: switch...
1732
  	}
f0054bb1e   Tejun Heo   writeback: move b...
1733
  	spin_unlock_bh(&wb->work_lock);
83ba7b071   Christoph Hellwig   writeback: simpli...
1734
  	return work;
03ba3782e   Jens Axboe   writeback: switch...
1735
  }
cdf01dd54   Linus Torvalds   fs-writeback.c: u...
1736
1737
1738
1739
1740
1741
  /*
   * Add in the number of potentially dirty inodes, because each inode
   * write can dirty pagecache in the underlying blockdev.
   */
  static unsigned long get_nr_dirty_pages(void)
  {
11fb99898   Mel Gorman   mm: move most fil...
1742
1743
  	return global_node_page_state(NR_FILE_DIRTY) +
  		global_node_page_state(NR_UNSTABLE_NFS) +
cdf01dd54   Linus Torvalds   fs-writeback.c: u...
1744
1745
  		get_nr_dirty_inodes();
  }
6585027a5   Jan Kara   writeback: integr...
1746
1747
  static long wb_check_background_flush(struct bdi_writeback *wb)
  {
aa661bbe1   Tejun Heo   writeback: move o...
1748
  	if (wb_over_bg_thresh(wb)) {
6585027a5   Jan Kara   writeback: integr...
1749
1750
1751
1752
1753
1754
  
  		struct wb_writeback_work work = {
  			.nr_pages	= LONG_MAX,
  			.sync_mode	= WB_SYNC_NONE,
  			.for_background	= 1,
  			.range_cyclic	= 1,
0e175a183   Curt Wohlgemuth   writeback: Add a ...
1755
  			.reason		= WB_REASON_BACKGROUND,
6585027a5   Jan Kara   writeback: integr...
1756
1757
1758
1759
1760
1761
1762
  		};
  
  		return wb_writeback(wb, &work);
  	}
  
  	return 0;
  }
03ba3782e   Jens Axboe   writeback: switch...
1763
1764
1765
1766
  static long wb_check_old_data_flush(struct bdi_writeback *wb)
  {
  	unsigned long expired;
  	long nr_pages;
69b62d01e   Jens Axboe   writeback: disabl...
1767
1768
1769
1770
1771
  	/*
  	 * When set to zero, disable periodic writeback
  	 */
  	if (!dirty_writeback_interval)
  		return 0;
03ba3782e   Jens Axboe   writeback: switch...
1772
1773
1774
1775
1776
1777
  	expired = wb->last_old_flush +
  			msecs_to_jiffies(dirty_writeback_interval * 10);
  	if (time_before(jiffies, expired))
  		return 0;
  
  	wb->last_old_flush = jiffies;
cdf01dd54   Linus Torvalds   fs-writeback.c: u...
1778
  	nr_pages = get_nr_dirty_pages();
03ba3782e   Jens Axboe   writeback: switch...
1779

c4a77a6c7   Jens Axboe   writeback: make w...
1780
  	if (nr_pages) {
83ba7b071   Christoph Hellwig   writeback: simpli...
1781
  		struct wb_writeback_work work = {
c4a77a6c7   Jens Axboe   writeback: make w...
1782
1783
1784
1785
  			.nr_pages	= nr_pages,
  			.sync_mode	= WB_SYNC_NONE,
  			.for_kupdate	= 1,
  			.range_cyclic	= 1,
0e175a183   Curt Wohlgemuth   writeback: Add a ...
1786
  			.reason		= WB_REASON_PERIODIC,
c4a77a6c7   Jens Axboe   writeback: make w...
1787
  		};
83ba7b071   Christoph Hellwig   writeback: simpli...
1788
  		return wb_writeback(wb, &work);
c4a77a6c7   Jens Axboe   writeback: make w...
1789
  	}
03ba3782e   Jens Axboe   writeback: switch...
1790
1791
1792
1793
1794
1795
1796
  
  	return 0;
  }
  
  /*
   * Retrieve work items and do the writeback they describe
   */
25d130ba2   Wanpeng Li   mm/writeback: don...
1797
  static long wb_do_writeback(struct bdi_writeback *wb)
03ba3782e   Jens Axboe   writeback: switch...
1798
  {
83ba7b071   Christoph Hellwig   writeback: simpli...
1799
  	struct wb_writeback_work *work;
c4a77a6c7   Jens Axboe   writeback: make w...
1800
  	long wrote = 0;
03ba3782e   Jens Axboe   writeback: switch...
1801

4452226ea   Tejun Heo   writeback: move b...
1802
  	set_bit(WB_writeback_running, &wb->state);
f0054bb1e   Tejun Heo   writeback: move b...
1803
  	while ((work = get_next_work_item(wb)) != NULL) {
cc395d7f1   Tejun Heo   writeback: implem...
1804
  		struct wb_completion *done = work->done;
03ba3782e   Jens Axboe   writeback: switch...
1805

5634cc2aa   Tejun Heo   writeback: update...
1806
  		trace_writeback_exec(wb, work);
455b28646   Dave Chinner   writeback: Initia...
1807

83ba7b071   Christoph Hellwig   writeback: simpli...
1808
  		wrote += wb_writeback(wb, work);
03ba3782e   Jens Axboe   writeback: switch...
1809

8a1270cda   Tejun Heo   writeback: remove...
1810
  		if (work->auto_free)
83ba7b071   Christoph Hellwig   writeback: simpli...
1811
  			kfree(work);
cc395d7f1   Tejun Heo   writeback: implem...
1812
1813
  		if (done && atomic_dec_and_test(&done->cnt))
  			wake_up_all(&wb->bdi->wb_waitq);
03ba3782e   Jens Axboe   writeback: switch...
1814
1815
1816
1817
1818
1819
  	}
  
  	/*
  	 * Check for periodic writeback, kupdated() style
  	 */
  	wrote += wb_check_old_data_flush(wb);
6585027a5   Jan Kara   writeback: integr...
1820
  	wrote += wb_check_background_flush(wb);
4452226ea   Tejun Heo   writeback: move b...
1821
  	clear_bit(WB_writeback_running, &wb->state);
03ba3782e   Jens Axboe   writeback: switch...
1822
1823
1824
1825
1826
1827
  
  	return wrote;
  }
  
  /*
   * Handle writeback of dirty data for the device backed by this bdi. Also
839a8e866   Tejun Heo   writeback: replac...
1828
   * reschedules periodically and does kupdated style flushing.
03ba3782e   Jens Axboe   writeback: switch...
1829
   */
f0054bb1e   Tejun Heo   writeback: move b...
1830
  void wb_workfn(struct work_struct *work)
03ba3782e   Jens Axboe   writeback: switch...
1831
  {
839a8e866   Tejun Heo   writeback: replac...
1832
1833
  	struct bdi_writeback *wb = container_of(to_delayed_work(work),
  						struct bdi_writeback, dwork);
03ba3782e   Jens Axboe   writeback: switch...
1834
  	long pages_written;
f0054bb1e   Tejun Heo   writeback: move b...
1835
  	set_worker_desc("flush-%s", dev_name(wb->bdi->dev));
766f91641   Peter Zijlstra   kernel: remove PF...
1836
  	current->flags |= PF_SWAPWRITE;
455b28646   Dave Chinner   writeback: Initia...
1837

839a8e866   Tejun Heo   writeback: replac...
1838
  	if (likely(!current_is_workqueue_rescuer() ||
4452226ea   Tejun Heo   writeback: move b...
1839
  		   !test_bit(WB_registered, &wb->state))) {
6467716a3   Artem Bityutskiy   writeback: optimi...
1840
  		/*
f0054bb1e   Tejun Heo   writeback: move b...
1841
  		 * The normal path.  Keep writing back @wb until its
839a8e866   Tejun Heo   writeback: replac...
1842
  		 * work_list is empty.  Note that this path is also taken
f0054bb1e   Tejun Heo   writeback: move b...
1843
  		 * if @wb is shutting down even when we're running off the
839a8e866   Tejun Heo   writeback: replac...
1844
  		 * rescuer as work_list needs to be drained.
6467716a3   Artem Bityutskiy   writeback: optimi...
1845
  		 */
839a8e866   Tejun Heo   writeback: replac...
1846
  		do {
25d130ba2   Wanpeng Li   mm/writeback: don...
1847
  			pages_written = wb_do_writeback(wb);
839a8e866   Tejun Heo   writeback: replac...
1848
  			trace_writeback_pages_written(pages_written);
f0054bb1e   Tejun Heo   writeback: move b...
1849
  		} while (!list_empty(&wb->work_list));
839a8e866   Tejun Heo   writeback: replac...
1850
1851
1852
1853
1854
1855
  	} else {
  		/*
  		 * bdi_wq can't get enough workers and we're running off
  		 * the emergency worker.  Don't hog it.  Hopefully, 1024 is
  		 * enough for efficient IO.
  		 */
f0054bb1e   Tejun Heo   writeback: move b...
1856
  		pages_written = writeback_inodes_wb(wb, 1024,
839a8e866   Tejun Heo   writeback: replac...
1857
  						    WB_REASON_FORKER_THREAD);
455b28646   Dave Chinner   writeback: Initia...
1858
  		trace_writeback_pages_written(pages_written);
03ba3782e   Jens Axboe   writeback: switch...
1859
  	}
f0054bb1e   Tejun Heo   writeback: move b...
1860
  	if (!list_empty(&wb->work_list))
6ca738d60   Derek Basehore   backing_dev: fix ...
1861
1862
  		mod_delayed_work(bdi_wq, &wb->dwork, 0);
  	else if (wb_has_dirty_io(wb) && dirty_writeback_interval)
f0054bb1e   Tejun Heo   writeback: move b...
1863
  		wb_wakeup_delayed(wb);
455b28646   Dave Chinner   writeback: Initia...
1864

839a8e866   Tejun Heo   writeback: replac...
1865
  	current->flags &= ~PF_SWAPWRITE;
03ba3782e   Jens Axboe   writeback: switch...
1866
1867
1868
  }
  
  /*
b8c2f3474   Christoph Hellwig   writeback: simpli...
1869
1870
   * Start writeback of `nr_pages' pages.  If `nr_pages' is zero, write back
   * the whole world.
03ba3782e   Jens Axboe   writeback: switch...
1871
   */
0e175a183   Curt Wohlgemuth   writeback: Add a ...
1872
  void wakeup_flusher_threads(long nr_pages, enum wb_reason reason)
03ba3782e   Jens Axboe   writeback: switch...
1873
  {
b8c2f3474   Christoph Hellwig   writeback: simpli...
1874
  	struct backing_dev_info *bdi;
03ba3782e   Jens Axboe   writeback: switch...
1875

51350ea0d   Konstantin Khlebnikov   mm, writeback: fl...
1876
1877
1878
1879
1880
  	/*
  	 * If we are expecting writeback progress we must submit plugged IO.
  	 */
  	if (blk_needs_flush_plug(current))
  		blk_schedule_flush_plug(current);
47df3dded   Jan Kara   writeback: fix oc...
1881
1882
  	if (!nr_pages)
  		nr_pages = get_nr_dirty_pages();
03ba3782e   Jens Axboe   writeback: switch...
1883

b8c2f3474   Christoph Hellwig   writeback: simpli...
1884
  	rcu_read_lock();
f2b651216   Tejun Heo   writeback: make w...
1885
1886
  	list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) {
  		struct bdi_writeback *wb;
f2b651216   Tejun Heo   writeback: make w...
1887
1888
1889
  
  		if (!bdi_has_dirty_io(bdi))
  			continue;
b817525a4   Tejun Heo   writeback: bdi_wr...
1890
  		list_for_each_entry_rcu(wb, &bdi->wb_list, bdi_node)
f2b651216   Tejun Heo   writeback: make w...
1891
1892
1893
  			wb_start_writeback(wb, wb_split_bdi_pages(wb, nr_pages),
  					   false, reason);
  	}
cfc4ba536   Jens Axboe   writeback: use RC...
1894
  	rcu_read_unlock();
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1895
  }
a2f487069   Theodore Ts'o   fs: make sure the...
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
  /*
   * Wake up bdi's periodically to make sure dirtytime inodes gets
   * written back periodically.  We deliberately do *not* check the
   * b_dirtytime list in wb_has_dirty_io(), since this would cause the
   * kernel to be constantly waking up once there are any dirtytime
   * inodes on the system.  So instead we define a separate delayed work
   * function which gets called much more rarely.  (By default, only
   * once every 12 hours.)
   *
   * If there is any other write activity going on in the file system,
   * this function won't be necessary.  But if the only thing that has
   * happened on the file system is a dirtytime inode caused by an atime
   * update, we need this infrastructure below to make sure that inode
   * eventually gets pushed out to disk.
   */
  static void wakeup_dirtytime_writeback(struct work_struct *w);
  static DECLARE_DELAYED_WORK(dirtytime_work, wakeup_dirtytime_writeback);
  
  static void wakeup_dirtytime_writeback(struct work_struct *w)
  {
  	struct backing_dev_info *bdi;
  
  	rcu_read_lock();
  	list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) {
001fe6f61   Tejun Heo   writeback: make w...
1920
  		struct bdi_writeback *wb;
001fe6f61   Tejun Heo   writeback: make w...
1921

b817525a4   Tejun Heo   writeback: bdi_wr...
1922
  		list_for_each_entry_rcu(wb, &bdi->wb_list, bdi_node)
6fdf860f1   Tejun Heo   writeback: fix bd...
1923
1924
  			if (!list_empty(&wb->b_dirty_time))
  				wb_wakeup(wb);
a2f487069   Theodore Ts'o   fs: make sure the...
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
  	}
  	rcu_read_unlock();
  	schedule_delayed_work(&dirtytime_work, dirtytime_expire_interval * HZ);
  }
  
  static int __init start_dirtytime_writeback(void)
  {
  	schedule_delayed_work(&dirtytime_work, dirtytime_expire_interval * HZ);
  	return 0;
  }
  __initcall(start_dirtytime_writeback);
1efff914a   Theodore Ts'o   fs: add dirtytime...
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
  int dirtytime_interval_handler(struct ctl_table *table, int write,
  			       void __user *buffer, size_t *lenp, loff_t *ppos)
  {
  	int ret;
  
  	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
  	if (ret == 0 && write)
  		mod_delayed_work(system_wq, &dirtytime_work, 0);
  	return ret;
  }
03ba3782e   Jens Axboe   writeback: switch...
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
  static noinline void block_dump___mark_inode_dirty(struct inode *inode)
  {
  	if (inode->i_ino || strcmp(inode->i_sb->s_id, "bdev")) {
  		struct dentry *dentry;
  		const char *name = "?";
  
  		dentry = d_find_alias(inode);
  		if (dentry) {
  			spin_lock(&dentry->d_lock);
  			name = (const char *) dentry->d_name.name;
  		}
  		printk(KERN_DEBUG
  		       "%s(%d): dirtied inode %lu (%s) on %s
  ",
  		       current->comm, task_pid_nr(current), inode->i_ino,
  		       name, inode->i_sb->s_id);
  		if (dentry) {
  			spin_unlock(&dentry->d_lock);
  			dput(dentry);
  		}
  	}
  }
  
  /**
   *	__mark_inode_dirty -	internal function
   *	@inode: inode to mark
   *	@flags: what kind of dirty (i.e. I_DIRTY_SYNC)
   *	Mark an inode as dirty. Callers should use mark_inode_dirty or
   *  	mark_inode_dirty_sync.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1975
   *
03ba3782e   Jens Axboe   writeback: switch...
1976
1977
1978
1979
1980
1981
1982
1983
1984
   * Put the inode on the super block's dirty list.
   *
   * CAREFUL! We mark it dirty unconditionally, but move it onto the
   * dirty list only if it is hashed or if it refers to a blockdev.
   * If it was not hashed, it will never be added to the dirty list
   * even if it is later hashed, as it will have been marked dirty already.
   *
   * In short, make sure you hash any inodes _before_ you start marking
   * them dirty.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1985
   *
03ba3782e   Jens Axboe   writeback: switch...
1986
1987
1988
1989
1990
1991
   * Note that for blockdevs, inode->dirtied_when represents the dirtying time of
   * the block-special inode (/dev/hda1) itself.  And the ->dirtied_when field of
   * the kernel-internal blockdev inode represents the dirtying time of the
   * blockdev's pages.  This is why for I_DIRTY_PAGES we always use
   * page->mapping->host, so the page-dirtying time is recorded in the internal
   * blockdev inode.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1992
   */
03ba3782e   Jens Axboe   writeback: switch...
1993
  void __mark_inode_dirty(struct inode *inode, int flags)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1994
  {
dbce03b9e   Randy Dunlap   fs/writeback.c: f...
1995
  #define I_DIRTY_INODE (I_DIRTY_SYNC | I_DIRTY_DATASYNC)
03ba3782e   Jens Axboe   writeback: switch...
1996
  	struct super_block *sb = inode->i_sb;
0ae45f63d   Theodore Ts'o   vfs: add support ...
1997
1998
1999
  	int dirtytime;
  
  	trace_writeback_mark_inode_dirty(inode, flags);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2000

03ba3782e   Jens Axboe   writeback: switch...
2001
2002
2003
2004
  	/*
  	 * Don't do this for I_DIRTY_PAGES - that doesn't actually
  	 * dirty the inode itself
  	 */
0ae45f63d   Theodore Ts'o   vfs: add support ...
2005
  	if (flags & (I_DIRTY_SYNC | I_DIRTY_DATASYNC | I_DIRTY_TIME)) {
9fb0a7da0   Tejun Heo   writeback: add mo...
2006
  		trace_writeback_dirty_inode_start(inode, flags);
03ba3782e   Jens Axboe   writeback: switch...
2007
  		if (sb->s_op->dirty_inode)
aa3857295   Christoph Hellwig   fs: pass exact ty...
2008
  			sb->s_op->dirty_inode(inode, flags);
9fb0a7da0   Tejun Heo   writeback: add mo...
2009
2010
  
  		trace_writeback_dirty_inode(inode, flags);
03ba3782e   Jens Axboe   writeback: switch...
2011
  	}
0ae45f63d   Theodore Ts'o   vfs: add support ...
2012
2013
2014
  	if (flags & I_DIRTY_INODE)
  		flags &= ~I_DIRTY_TIME;
  	dirtytime = flags & I_DIRTY_TIME;
03ba3782e   Jens Axboe   writeback: switch...
2015
2016
  
  	/*
9c6ac78eb   Tejun Heo   writeback: fix a ...
2017
2018
  	 * Paired with smp_mb() in __writeback_single_inode() for the
  	 * following lockless i_state test.  See there for details.
03ba3782e   Jens Axboe   writeback: switch...
2019
2020
  	 */
  	smp_mb();
0ae45f63d   Theodore Ts'o   vfs: add support ...
2021
2022
  	if (((inode->i_state & flags) == flags) ||
  	    (dirtytime && (inode->i_state & I_DIRTY_INODE)))
03ba3782e   Jens Axboe   writeback: switch...
2023
2024
2025
2026
  		return;
  
  	if (unlikely(block_dump))
  		block_dump___mark_inode_dirty(inode);
250df6ed2   Dave Chinner   fs: protect inode...
2027
  	spin_lock(&inode->i_lock);
0ae45f63d   Theodore Ts'o   vfs: add support ...
2028
2029
  	if (dirtytime && (inode->i_state & I_DIRTY_INODE))
  		goto out_unlock_inode;
03ba3782e   Jens Axboe   writeback: switch...
2030
2031
  	if ((inode->i_state & flags) != flags) {
  		const int was_dirty = inode->i_state & I_DIRTY;
52ebea749   Tejun Heo   writeback: make b...
2032
  		inode_attach_wb(inode, NULL);
0ae45f63d   Theodore Ts'o   vfs: add support ...
2033
2034
  		if (flags & I_DIRTY_INODE)
  			inode->i_state &= ~I_DIRTY_TIME;
03ba3782e   Jens Axboe   writeback: switch...
2035
2036
2037
2038
2039
2040
2041
2042
  		inode->i_state |= flags;
  
  		/*
  		 * If the inode is being synced, just update its dirty state.
  		 * The unlocker will place the inode on the appropriate
  		 * superblock list, based upon its state.
  		 */
  		if (inode->i_state & I_SYNC)
250df6ed2   Dave Chinner   fs: protect inode...
2043
  			goto out_unlock_inode;
03ba3782e   Jens Axboe   writeback: switch...
2044
2045
2046
2047
2048
2049
  
  		/*
  		 * Only add valid (hashed) inodes to the superblock's
  		 * dirty list.  Add blockdev inodes as well.
  		 */
  		if (!S_ISBLK(inode->i_mode)) {
1d3382cbf   Al Viro   new helper: inode...
2050
  			if (inode_unhashed(inode))
250df6ed2   Dave Chinner   fs: protect inode...
2051
  				goto out_unlock_inode;
03ba3782e   Jens Axboe   writeback: switch...
2052
  		}
a4ffdde6e   Al Viro   simplify checks f...
2053
  		if (inode->i_state & I_FREEING)
250df6ed2   Dave Chinner   fs: protect inode...
2054
  			goto out_unlock_inode;
03ba3782e   Jens Axboe   writeback: switch...
2055
2056
2057
2058
2059
2060
  
  		/*
  		 * If the inode was already on b_dirty/b_io/b_more_io, don't
  		 * reposition it (that would break b_dirty time-ordering).
  		 */
  		if (!was_dirty) {
87e1d789b   Tejun Heo   writeback: implem...
2061
  			struct bdi_writeback *wb;
d6c10f1fc   Tejun Heo   writeback: implem...
2062
  			struct list_head *dirty_list;
a66979aba   Dave Chinner   fs: move i_wb_lis...
2063
  			bool wakeup_bdi = false;
253c34e9b   Artem Bityutskiy   writeback: preven...
2064

87e1d789b   Tejun Heo   writeback: implem...
2065
  			wb = locked_inode_to_wb_and_lock_list(inode);
253c34e9b   Artem Bityutskiy   writeback: preven...
2066

0747259d1   Tejun Heo   writeback: dirty ...
2067
2068
2069
2070
  			WARN(bdi_cap_writeback_dirty(wb->bdi) &&
  			     !test_bit(WB_registered, &wb->state),
  			     "bdi-%s not registered
  ", wb->bdi->name);
03ba3782e   Jens Axboe   writeback: switch...
2071
2072
  
  			inode->dirtied_when = jiffies;
a2f487069   Theodore Ts'o   fs: make sure the...
2073
2074
  			if (dirtytime)
  				inode->dirtied_time_when = jiffies;
d6c10f1fc   Tejun Heo   writeback: implem...
2075

a2f487069   Theodore Ts'o   fs: make sure the...
2076
  			if (inode->i_state & (I_DIRTY_INODE | I_DIRTY_PAGES))
0747259d1   Tejun Heo   writeback: dirty ...
2077
  				dirty_list = &wb->b_dirty;
a2f487069   Theodore Ts'o   fs: make sure the...
2078
  			else
0747259d1   Tejun Heo   writeback: dirty ...
2079
  				dirty_list = &wb->b_dirty_time;
d6c10f1fc   Tejun Heo   writeback: implem...
2080

c7f540849   Dave Chinner   inode: rename i_w...
2081
  			wakeup_bdi = inode_io_list_move_locked(inode, wb,
d6c10f1fc   Tejun Heo   writeback: implem...
2082
  							       dirty_list);
0747259d1   Tejun Heo   writeback: dirty ...
2083
  			spin_unlock(&wb->list_lock);
0ae45f63d   Theodore Ts'o   vfs: add support ...
2084
  			trace_writeback_dirty_inode_enqueue(inode);
a66979aba   Dave Chinner   fs: move i_wb_lis...
2085

d6c10f1fc   Tejun Heo   writeback: implem...
2086
2087
2088
2089
2090
2091
  			/*
  			 * If this is the first dirty inode for this bdi,
  			 * we have to wake-up the corresponding bdi thread
  			 * to make sure background write-back happens
  			 * later.
  			 */
0747259d1   Tejun Heo   writeback: dirty ...
2092
2093
  			if (bdi_cap_writeback_dirty(wb->bdi) && wakeup_bdi)
  				wb_wakeup_delayed(wb);
a66979aba   Dave Chinner   fs: move i_wb_lis...
2094
  			return;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2095
  		}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2096
  	}
250df6ed2   Dave Chinner   fs: protect inode...
2097
2098
  out_unlock_inode:
  	spin_unlock(&inode->i_lock);
253c34e9b   Artem Bityutskiy   writeback: preven...
2099

dbce03b9e   Randy Dunlap   fs/writeback.c: f...
2100
  #undef I_DIRTY_INODE
03ba3782e   Jens Axboe   writeback: switch...
2101
2102
  }
  EXPORT_SYMBOL(__mark_inode_dirty);
e97fedb9e   Dave Chinner   sync: serialise p...
2103
2104
2105
2106
2107
2108
2109
2110
2111
  /*
   * The @s_sync_lock is used to serialise concurrent sync operations
   * to avoid lock contention problems with concurrent wait_sb_inodes() calls.
   * Concurrent callers will block on the s_sync_lock rather than doing contending
   * walks. The queueing maintains sync(2) required behaviour as all the IO that
   * has been issued up to the time this function is enter is guaranteed to be
   * completed by the time we have gained the lock and waited for all IO that is
   * in progress regardless of the order callers are granted the lock.
   */
b6e51316d   Jens Axboe   writeback: separa...
2112
  static void wait_sb_inodes(struct super_block *sb)
03ba3782e   Jens Axboe   writeback: switch...
2113
  {
6c60d2b57   Dave Chinner   fs/fs-writeback.c...
2114
  	LIST_HEAD(sync_list);
03ba3782e   Jens Axboe   writeback: switch...
2115
2116
2117
2118
2119
  
  	/*
  	 * We need to be protected against the filesystem going from
  	 * r/o to r/w or vice versa.
  	 */
b6e51316d   Jens Axboe   writeback: separa...
2120
  	WARN_ON(!rwsem_is_locked(&sb->s_umount));
03ba3782e   Jens Axboe   writeback: switch...
2121

e97fedb9e   Dave Chinner   sync: serialise p...
2122
  	mutex_lock(&sb->s_sync_lock);
03ba3782e   Jens Axboe   writeback: switch...
2123
2124
  
  	/*
6c60d2b57   Dave Chinner   fs/fs-writeback.c...
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
  	 * Splice the writeback list onto a temporary list to avoid waiting on
  	 * inodes that have started writeback after this point.
  	 *
  	 * Use rcu_read_lock() to keep the inodes around until we have a
  	 * reference. s_inode_wblist_lock protects sb->s_inodes_wb as well as
  	 * the local list because inodes can be dropped from either by writeback
  	 * completion.
  	 */
  	rcu_read_lock();
  	spin_lock_irq(&sb->s_inode_wblist_lock);
  	list_splice_init(&sb->s_inodes_wb, &sync_list);
  
  	/*
  	 * Data integrity sync. Must wait for all pages under writeback, because
  	 * there may have been pages dirtied before our sync call, but which had
  	 * writeout started before we write it out.  In which case, the inode
  	 * may not be on the dirty list, but we still have to wait for that
  	 * writeout.
03ba3782e   Jens Axboe   writeback: switch...
2143
  	 */
6c60d2b57   Dave Chinner   fs/fs-writeback.c...
2144
2145
2146
  	while (!list_empty(&sync_list)) {
  		struct inode *inode = list_first_entry(&sync_list, struct inode,
  						       i_wb_list);
250df6ed2   Dave Chinner   fs: protect inode...
2147
  		struct address_space *mapping = inode->i_mapping;
03ba3782e   Jens Axboe   writeback: switch...
2148

6c60d2b57   Dave Chinner   fs/fs-writeback.c...
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
  		/*
  		 * Move each inode back to the wb list before we drop the lock
  		 * to preserve consistency between i_wb_list and the mapping
  		 * writeback tag. Writeback completion is responsible to remove
  		 * the inode from either list once the writeback tag is cleared.
  		 */
  		list_move_tail(&inode->i_wb_list, &sb->s_inodes_wb);
  
  		/*
  		 * The mapping can appear untagged while still on-list since we
  		 * do not have the mapping lock. Skip it here, wb completion
  		 * will remove it.
  		 */
  		if (!mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK))
  			continue;
  
  		spin_unlock_irq(&sb->s_inode_wblist_lock);
250df6ed2   Dave Chinner   fs: protect inode...
2166
  		spin_lock(&inode->i_lock);
6c60d2b57   Dave Chinner   fs/fs-writeback.c...
2167
  		if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) {
250df6ed2   Dave Chinner   fs: protect inode...
2168
  			spin_unlock(&inode->i_lock);
6c60d2b57   Dave Chinner   fs/fs-writeback.c...
2169
2170
  
  			spin_lock_irq(&sb->s_inode_wblist_lock);
03ba3782e   Jens Axboe   writeback: switch...
2171
  			continue;
250df6ed2   Dave Chinner   fs: protect inode...
2172
  		}
03ba3782e   Jens Axboe   writeback: switch...
2173
  		__iget(inode);
250df6ed2   Dave Chinner   fs: protect inode...
2174
  		spin_unlock(&inode->i_lock);
6c60d2b57   Dave Chinner   fs/fs-writeback.c...
2175
  		rcu_read_unlock();
03ba3782e   Jens Axboe   writeback: switch...
2176

aa750fd71   Junichi Nomura   mm/filemap.c: mak...
2177
2178
2179
2180
2181
2182
  		/*
  		 * We keep the error status of individual mapping so that
  		 * applications can catch the writeback error using fsync(2).
  		 * See filemap_fdatawait_keep_errors() for details.
  		 */
  		filemap_fdatawait_keep_errors(mapping);
03ba3782e   Jens Axboe   writeback: switch...
2183
2184
  
  		cond_resched();
6c60d2b57   Dave Chinner   fs/fs-writeback.c...
2185
2186
2187
2188
  		iput(inode);
  
  		rcu_read_lock();
  		spin_lock_irq(&sb->s_inode_wblist_lock);
03ba3782e   Jens Axboe   writeback: switch...
2189
  	}
6c60d2b57   Dave Chinner   fs/fs-writeback.c...
2190
2191
  	spin_unlock_irq(&sb->s_inode_wblist_lock);
  	rcu_read_unlock();
e97fedb9e   Dave Chinner   sync: serialise p...
2192
  	mutex_unlock(&sb->s_sync_lock);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2193
  }
f30a7d0cc   Tejun Heo   writeback: restru...
2194
2195
  static void __writeback_inodes_sb_nr(struct super_block *sb, unsigned long nr,
  				     enum wb_reason reason, bool skip_if_busy)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2196
  {
cc395d7f1   Tejun Heo   writeback: implem...
2197
  	DEFINE_WB_COMPLETION_ONSTACK(done);
83ba7b071   Christoph Hellwig   writeback: simpli...
2198
  	struct wb_writeback_work work = {
6e6938b6d   Wu Fengguang   writeback: introd...
2199
2200
2201
2202
2203
  		.sb			= sb,
  		.sync_mode		= WB_SYNC_NONE,
  		.tagged_writepages	= 1,
  		.done			= &done,
  		.nr_pages		= nr,
0e175a183   Curt Wohlgemuth   writeback: Add a ...
2204
  		.reason			= reason,
3c4d71653   Christoph Hellwig   writeback: queue ...
2205
  	};
e79729123   Tejun Heo   writeback: don't ...
2206
  	struct backing_dev_info *bdi = sb->s_bdi;
d8a8559cd   Jens Axboe   writeback: get ri...
2207

e79729123   Tejun Heo   writeback: don't ...
2208
  	if (!bdi_has_dirty_io(bdi) || bdi == &noop_backing_dev_info)
6eedc7015   Jan Kara   vfs: Move noop_ba...
2209
  		return;
cf37e9724   Christoph Hellwig   writeback: enforc...
2210
  	WARN_ON(!rwsem_is_locked(&sb->s_umount));
f30a7d0cc   Tejun Heo   writeback: restru...
2211

db1253604   Tejun Heo   writeback: make w...
2212
  	bdi_split_work_to_wbs(sb->s_bdi, &work, skip_if_busy);
cc395d7f1   Tejun Heo   writeback: implem...
2213
  	wb_wait_for_completion(bdi, &done);
e913fc825   Jens Axboe   writeback: fix WB...
2214
  }
f30a7d0cc   Tejun Heo   writeback: restru...
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231
  
  /**
   * writeback_inodes_sb_nr -	writeback dirty inodes from given super_block
   * @sb: the superblock
   * @nr: the number of pages to write
   * @reason: reason why some writeback work initiated
   *
   * Start writeback on some inodes on this super_block. No guarantees are made
   * on how many (if any) will be written, and this function does not wait
   * for IO completion of submitted IO.
   */
  void writeback_inodes_sb_nr(struct super_block *sb,
  			    unsigned long nr,
  			    enum wb_reason reason)
  {
  	__writeback_inodes_sb_nr(sb, nr, reason, false);
  }
3259f8bed   Chris Mason   Add new functions...
2232
2233
2234
2235
2236
  EXPORT_SYMBOL(writeback_inodes_sb_nr);
  
  /**
   * writeback_inodes_sb	-	writeback dirty inodes from given super_block
   * @sb: the superblock
786228ab3   Marcos Paulo de Souza   writeback: Fix is...
2237
   * @reason: reason why some writeback work was initiated
3259f8bed   Chris Mason   Add new functions...
2238
2239
2240
2241
2242
   *
   * Start writeback on some inodes on this super_block. No guarantees are made
   * on how many (if any) will be written, and this function does not wait
   * for IO completion of submitted IO.
   */
0e175a183   Curt Wohlgemuth   writeback: Add a ...
2243
  void writeback_inodes_sb(struct super_block *sb, enum wb_reason reason)
3259f8bed   Chris Mason   Add new functions...
2244
  {
0e175a183   Curt Wohlgemuth   writeback: Add a ...
2245
  	return writeback_inodes_sb_nr(sb, get_nr_dirty_pages(), reason);
3259f8bed   Chris Mason   Add new functions...
2246
  }
0e3c9a228   Jens Axboe   Revert "writeback...
2247
  EXPORT_SYMBOL(writeback_inodes_sb);
e913fc825   Jens Axboe   writeback: fix WB...
2248
2249
  
  /**
10ee27a06   Miao Xie   vfs: re-implement...
2250
   * try_to_writeback_inodes_sb_nr - try to start writeback if none underway
17bd55d03   Eric Sandeen   fs-writeback: Add...
2251
   * @sb: the superblock
10ee27a06   Miao Xie   vfs: re-implement...
2252
2253
   * @nr: the number of pages to write
   * @reason: the reason of writeback
17bd55d03   Eric Sandeen   fs-writeback: Add...
2254
   *
10ee27a06   Miao Xie   vfs: re-implement...
2255
   * Invoke writeback_inodes_sb_nr if no writeback is currently underway.
17bd55d03   Eric Sandeen   fs-writeback: Add...
2256
2257
   * Returns 1 if writeback was started, 0 if not.
   */
f30a7d0cc   Tejun Heo   writeback: restru...
2258
2259
  bool try_to_writeback_inodes_sb_nr(struct super_block *sb, unsigned long nr,
  				   enum wb_reason reason)
17bd55d03   Eric Sandeen   fs-writeback: Add...
2260
  {
10ee27a06   Miao Xie   vfs: re-implement...
2261
  	if (!down_read_trylock(&sb->s_umount))
f30a7d0cc   Tejun Heo   writeback: restru...
2262
  		return false;
10ee27a06   Miao Xie   vfs: re-implement...
2263

f30a7d0cc   Tejun Heo   writeback: restru...
2264
  	__writeback_inodes_sb_nr(sb, nr, reason, true);
10ee27a06   Miao Xie   vfs: re-implement...
2265
  	up_read(&sb->s_umount);
f30a7d0cc   Tejun Heo   writeback: restru...
2266
  	return true;
17bd55d03   Eric Sandeen   fs-writeback: Add...
2267
  }
10ee27a06   Miao Xie   vfs: re-implement...
2268
  EXPORT_SYMBOL(try_to_writeback_inodes_sb_nr);
17bd55d03   Eric Sandeen   fs-writeback: Add...
2269
2270
  
  /**
10ee27a06   Miao Xie   vfs: re-implement...
2271
   * try_to_writeback_inodes_sb - try to start writeback if none underway
3259f8bed   Chris Mason   Add new functions...
2272
   * @sb: the superblock
786228ab3   Marcos Paulo de Souza   writeback: Fix is...
2273
   * @reason: reason why some writeback work was initiated
3259f8bed   Chris Mason   Add new functions...
2274
   *
10ee27a06   Miao Xie   vfs: re-implement...
2275
   * Implement by try_to_writeback_inodes_sb_nr()
3259f8bed   Chris Mason   Add new functions...
2276
2277
   * Returns 1 if writeback was started, 0 if not.
   */
f30a7d0cc   Tejun Heo   writeback: restru...
2278
  bool try_to_writeback_inodes_sb(struct super_block *sb, enum wb_reason reason)
3259f8bed   Chris Mason   Add new functions...
2279
  {
10ee27a06   Miao Xie   vfs: re-implement...
2280
  	return try_to_writeback_inodes_sb_nr(sb, get_nr_dirty_pages(), reason);
3259f8bed   Chris Mason   Add new functions...
2281
  }
10ee27a06   Miao Xie   vfs: re-implement...
2282
  EXPORT_SYMBOL(try_to_writeback_inodes_sb);
3259f8bed   Chris Mason   Add new functions...
2283
2284
  
  /**
d8a8559cd   Jens Axboe   writeback: get ri...
2285
   * sync_inodes_sb	-	sync sb inode pages
0dc83bd30   Jan Kara   Revert "writeback...
2286
   * @sb: the superblock
d8a8559cd   Jens Axboe   writeback: get ri...
2287
2288
   *
   * This function writes and waits on any dirty inode belonging to this
0dc83bd30   Jan Kara   Revert "writeback...
2289
   * super_block.
d8a8559cd   Jens Axboe   writeback: get ri...
2290
   */
0dc83bd30   Jan Kara   Revert "writeback...
2291
  void sync_inodes_sb(struct super_block *sb)
d8a8559cd   Jens Axboe   writeback: get ri...
2292
  {
cc395d7f1   Tejun Heo   writeback: implem...
2293
  	DEFINE_WB_COMPLETION_ONSTACK(done);
83ba7b071   Christoph Hellwig   writeback: simpli...
2294
  	struct wb_writeback_work work = {
3c4d71653   Christoph Hellwig   writeback: queue ...
2295
2296
2297
2298
  		.sb		= sb,
  		.sync_mode	= WB_SYNC_ALL,
  		.nr_pages	= LONG_MAX,
  		.range_cyclic	= 0,
83ba7b071   Christoph Hellwig   writeback: simpli...
2299
  		.done		= &done,
0e175a183   Curt Wohlgemuth   writeback: Add a ...
2300
  		.reason		= WB_REASON_SYNC,
7747bd4bc   Dave Chinner   sync: don't block...
2301
  		.for_sync	= 1,
3c4d71653   Christoph Hellwig   writeback: queue ...
2302
  	};
e79729123   Tejun Heo   writeback: don't ...
2303
  	struct backing_dev_info *bdi = sb->s_bdi;
3c4d71653   Christoph Hellwig   writeback: queue ...
2304

006a0973e   Tejun Heo   writeback: sync_i...
2305
2306
2307
2308
2309
2310
  	/*
  	 * Can't skip on !bdi_has_dirty() because we should wait for !dirty
  	 * inodes under writeback and I_DIRTY_TIME inodes ignored by
  	 * bdi_has_dirty() need to be written out too.
  	 */
  	if (bdi == &noop_backing_dev_info)
6eedc7015   Jan Kara   vfs: Move noop_ba...
2311
  		return;
cf37e9724   Christoph Hellwig   writeback: enforc...
2312
  	WARN_ON(!rwsem_is_locked(&sb->s_umount));
db1253604   Tejun Heo   writeback: make w...
2313
  	bdi_split_work_to_wbs(bdi, &work, false);
cc395d7f1   Tejun Heo   writeback: implem...
2314
  	wb_wait_for_completion(bdi, &done);
83ba7b071   Christoph Hellwig   writeback: simpli...
2315

b6e51316d   Jens Axboe   writeback: separa...
2316
  	wait_sb_inodes(sb);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2317
  }
d8a8559cd   Jens Axboe   writeback: get ri...
2318
  EXPORT_SYMBOL(sync_inodes_sb);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2319

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2320
  /**
7f04c26d7   Andrea Arcangeli   [PATCH] fix nr_un...
2321
2322
2323
2324
2325
2326
   * write_inode_now	-	write an inode to disk
   * @inode: inode to write to disk
   * @sync: whether the write should be synchronous or not
   *
   * This function commits an inode to disk immediately if it is dirty. This is
   * primarily needed by knfsd.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2327
   *
7f04c26d7   Andrea Arcangeli   [PATCH] fix nr_un...
2328
   * The caller must either have a ref on the inode or must have set I_WILL_FREE.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2329
   */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2330
2331
  int write_inode_now(struct inode *inode, int sync)
  {
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2332
2333
  	struct writeback_control wbc = {
  		.nr_to_write = LONG_MAX,
18914b188   Mike Galbraith   write_inode_now()...
2334
  		.sync_mode = sync ? WB_SYNC_ALL : WB_SYNC_NONE,
111ebb6e6   OGAWA Hirofumi   [PATCH] writeback...
2335
2336
  		.range_start = 0,
  		.range_end = LLONG_MAX,
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2337
2338
2339
  	};
  
  	if (!mapping_cap_writeback_dirty(inode->i_mapping))
49364ce25   Andrew Morton   [PATCH] write_ino...
2340
  		wbc.nr_to_write = 0;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2341
2342
  
  	might_sleep();
aaf255933   Tejun Heo   writeback, cgroup...
2343
  	return writeback_single_inode(inode, &wbc);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2344
2345
2346
2347
2348
2349
2350
2351
2352
2353
2354
2355
2356
2357
2358
2359
  }
  EXPORT_SYMBOL(write_inode_now);
  
  /**
   * sync_inode - write an inode and its pages to disk.
   * @inode: the inode to sync
   * @wbc: controls the writeback mode
   *
   * sync_inode() will write an inode and its pages to disk.  It will also
   * correctly update the inode on its superblock's dirty inode lists and will
   * update inode->i_state.
   *
   * The caller must have a ref on the inode.
   */
  int sync_inode(struct inode *inode, struct writeback_control *wbc)
  {
aaf255933   Tejun Heo   writeback, cgroup...
2360
  	return writeback_single_inode(inode, wbc);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2361
2362
  }
  EXPORT_SYMBOL(sync_inode);
c37650161   Christoph Hellwig   fs: add sync_inod...
2363
2364
  
  /**
c691b9d98   Andrew Morton   sync_inode_metada...
2365
   * sync_inode_metadata - write an inode to disk
c37650161   Christoph Hellwig   fs: add sync_inod...
2366
2367
2368
   * @inode: the inode to sync
   * @wait: wait for I/O to complete.
   *
c691b9d98   Andrew Morton   sync_inode_metada...
2369
   * Write an inode to disk and adjust its dirty state after completion.
c37650161   Christoph Hellwig   fs: add sync_inod...
2370
2371
2372
2373
2374
2375
2376
2377
2378
2379
2380
2381
2382
   *
   * Note: only writes the actual inode, no associated data or other metadata.
   */
  int sync_inode_metadata(struct inode *inode, int wait)
  {
  	struct writeback_control wbc = {
  		.sync_mode = wait ? WB_SYNC_ALL : WB_SYNC_NONE,
  		.nr_to_write = 0, /* metadata-only */
  	};
  
  	return sync_inode(inode, &wbc);
  }
  EXPORT_SYMBOL(sync_inode_metadata);