Blame view

fs/ext4/page-io.c 12.9 KB
bd2d0210c   Theodore Ts'o   ext4: use bio lay...
1
2
3
4
5
6
7
  /*
   * linux/fs/ext4/page-io.c
   *
   * This contains the new page_io functions for ext4
   *
   * Written by Theodore Ts'o, 2010.
   */
bd2d0210c   Theodore Ts'o   ext4: use bio lay...
8
9
10
11
12
13
14
15
16
17
18
19
  #include <linux/fs.h>
  #include <linux/time.h>
  #include <linux/jbd2.h>
  #include <linux/highuid.h>
  #include <linux/pagemap.h>
  #include <linux/quotaops.h>
  #include <linux/string.h>
  #include <linux/buffer_head.h>
  #include <linux/writeback.h>
  #include <linux/pagevec.h>
  #include <linux/mpage.h>
  #include <linux/namei.h>
a27bb332c   Kent Overstreet   aio: don't includ...
20
  #include <linux/aio.h>
bd2d0210c   Theodore Ts'o   ext4: use bio lay...
21
22
23
24
25
  #include <linux/uio.h>
  #include <linux/bio.h>
  #include <linux/workqueue.h>
  #include <linux/kernel.h>
  #include <linux/slab.h>
1ae48a635   Jan Kara   ext4: use redirty...
26
  #include <linux/mm.h>
e8974c393   Anatol Pomozov   ext4: rate limit ...
27
  #include <linux/ratelimit.h>
bd2d0210c   Theodore Ts'o   ext4: use bio lay...
28
29
30
31
  
  #include "ext4_jbd2.h"
  #include "xattr.h"
  #include "acl.h"
bd2d0210c   Theodore Ts'o   ext4: use bio lay...
32

0058f9658   Jan Kara   ext4: make ext4_b...
33
  static struct kmem_cache *io_end_cachep;
bd2d0210c   Theodore Ts'o   ext4: use bio lay...
34

5dabfc78d   Theodore Ts'o   ext4: rename {exi...
35
  int __init ext4_init_pageio(void)
bd2d0210c   Theodore Ts'o   ext4: use bio lay...
36
  {
bd2d0210c   Theodore Ts'o   ext4: use bio lay...
37
  	io_end_cachep = KMEM_CACHE(ext4_io_end, SLAB_RECLAIM_ACCOUNT);
0058f9658   Jan Kara   ext4: make ext4_b...
38
  	if (io_end_cachep == NULL)
bd2d0210c   Theodore Ts'o   ext4: use bio lay...
39
  		return -ENOMEM;
bd2d0210c   Theodore Ts'o   ext4: use bio lay...
40
41
  	return 0;
  }
5dabfc78d   Theodore Ts'o   ext4: rename {exi...
42
  void ext4_exit_pageio(void)
bd2d0210c   Theodore Ts'o   ext4: use bio lay...
43
44
  {
  	kmem_cache_destroy(io_end_cachep);
bd2d0210c   Theodore Ts'o   ext4: use bio lay...
45
  }
1ada47d94   Theodore Ts'o   ext4: fix ext4_ev...
46
  /*
b0857d309   Jan Kara   ext4: defer clear...
47
48
49
50
51
52
53
54
55
   * Print an buffer I/O error compatible with the fs/buffer.c.  This
   * provides compatibility with dmesg scrapers that look for a specific
   * buffer I/O error message.  We really need a unified error reporting
   * structure to userspace ala Digital Unix's uerf system, but it's
   * probably not going to happen in my lifetime, due to LKML politics...
   */
  static void buffer_io_error(struct buffer_head *bh)
  {
  	char b[BDEVNAME_SIZE];
e8974c393   Anatol Pomozov   ext4: rate limit ...
56
57
  	printk_ratelimited(KERN_ERR "Buffer I/O error on device %s, logical block %llu
  ",
b0857d309   Jan Kara   ext4: defer clear...
58
59
60
61
62
63
64
65
  			bdevname(bh->b_bdev, b),
  			(unsigned long long)bh->b_blocknr);
  }
  
  static void ext4_finish_bio(struct bio *bio)
  {
  	int i;
  	int error = !test_bit(BIO_UPTODATE, &bio->bi_flags);
2c30c71bd   Kent Overstreet   block: Convert va...
66
  	struct bio_vec *bvec;
b0857d309   Jan Kara   ext4: defer clear...
67

2c30c71bd   Kent Overstreet   block: Convert va...
68
  	bio_for_each_segment_all(bvec, bio, i) {
b0857d309   Jan Kara   ext4: defer clear...
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
  		struct page *page = bvec->bv_page;
  		struct buffer_head *bh, *head;
  		unsigned bio_start = bvec->bv_offset;
  		unsigned bio_end = bio_start + bvec->bv_len;
  		unsigned under_io = 0;
  		unsigned long flags;
  
  		if (!page)
  			continue;
  
  		if (error) {
  			SetPageError(page);
  			set_bit(AS_EIO, &page->mapping->flags);
  		}
  		bh = head = page_buffers(page);
  		/*
  		 * We check all buffers in the page under BH_Uptodate_Lock
  		 * to avoid races with other end io clearing async_write flags
  		 */
  		local_irq_save(flags);
  		bit_spin_lock(BH_Uptodate_Lock, &head->b_state);
  		do {
  			if (bh_offset(bh) < bio_start ||
  			    bh_offset(bh) + bh->b_size > bio_end) {
  				if (buffer_async_write(bh))
  					under_io++;
  				continue;
  			}
  			clear_buffer_async_write(bh);
  			if (error)
  				buffer_io_error(bh);
  		} while ((bh = bh->b_this_page) != head);
  		bit_spin_unlock(BH_Uptodate_Lock, &head->b_state);
  		local_irq_restore(flags);
  		if (!under_io)
  			end_page_writeback(page);
  	}
  }
97a851ed7   Jan Kara   ext4: use io_end ...
107
  static void ext4_release_io_end(ext4_io_end_t *io_end)
bd2d0210c   Theodore Ts'o   ext4: use bio lay...
108
  {
b0857d309   Jan Kara   ext4: defer clear...
109
  	struct bio *bio, *next_bio;
97a851ed7   Jan Kara   ext4: use io_end ...
110
111
  	BUG_ON(!list_empty(&io_end->list));
  	BUG_ON(io_end->flag & EXT4_IO_END_UNWRITTEN);
6b523df4f   Jan Kara   ext4: use transac...
112
  	WARN_ON(io_end->handle);
97a851ed7   Jan Kara   ext4: use io_end ...
113
114
115
  
  	if (atomic_dec_and_test(&EXT4_I(io_end->inode)->i_ioend_count))
  		wake_up_all(ext4_ioend_wq(io_end->inode));
b0857d309   Jan Kara   ext4: defer clear...
116
117
118
119
120
121
  
  	for (bio = io_end->bio; bio; bio = next_bio) {
  		next_bio = bio->bi_private;
  		ext4_finish_bio(bio);
  		bio_put(bio);
  	}
97a851ed7   Jan Kara   ext4: use io_end ...
122
123
124
125
126
127
  	kmem_cache_free(io_end_cachep, io_end);
  }
  
  static void ext4_clear_io_unwritten_flag(ext4_io_end_t *io_end)
  {
  	struct inode *inode = io_end->inode;
82e542291   Dmitry Monakhov   ext4: fix unwritt...
128

97a851ed7   Jan Kara   ext4: use io_end ...
129
130
131
132
  	io_end->flag &= ~EXT4_IO_END_UNWRITTEN;
  	/* Wake up anyone waiting on unwritten extent conversion */
  	if (atomic_dec_and_test(&EXT4_I(inode)->i_unwritten))
  		wake_up_all(ext4_ioend_wq(inode));
bd2d0210c   Theodore Ts'o   ext4: use bio lay...
133
  }
a115f749c   Jan Kara   ext4: remove wait...
134
135
136
137
138
139
140
141
  /*
   * Check a range of space and convert unwritten extents to written. Note that
   * we are protected from truncate touching same part of extent tree by the
   * fact that truncate code waits for all DIO to finish (thus exclusion from
   * direct IO is achieved) and also waits for PageWriteback bits. Thus we
   * cannot get to ext4_ext_truncate() before all IOs overlapping that range are
   * completed (happens from ext4_free_ioend()).
   */
28a535f9a   Dmitry Monakhov   ext4: completed_i...
142
  static int ext4_end_io(ext4_io_end_t *io)
bd2d0210c   Theodore Ts'o   ext4: use bio lay...
143
144
145
146
  {
  	struct inode *inode = io->inode;
  	loff_t offset = io->offset;
  	ssize_t size = io->size;
6b523df4f   Jan Kara   ext4: use transac...
147
  	handle_t *handle = io->handle;
bd2d0210c   Theodore Ts'o   ext4: use bio lay...
148
149
150
151
152
153
  	int ret = 0;
  
  	ext4_debug("ext4_end_io_nolock: io 0x%p from inode %lu,list->next 0x%p,"
  		   "list->prev 0x%p
  ",
  		   io, inode->i_ino, io->list.next, io->list.prev);
6b523df4f   Jan Kara   ext4: use transac...
154
155
  	io->handle = NULL;	/* Following call will use up the handle */
  	ret = ext4_convert_unwritten_extents(handle, inode, offset, size);
bd2d0210c   Theodore Ts'o   ext4: use bio lay...
156
  	if (ret < 0) {
b82e384c7   Theodore Ts'o   ext4: optimize lo...
157
158
159
160
161
  		ext4_msg(inode->i_sb, KERN_EMERG,
  			 "failed to convert unwritten extents to written "
  			 "extents -- potential data loss!  "
  			 "(inode %lu, offset %llu, size %zd, error %d)",
  			 inode->i_ino, offset, size, ret);
bd2d0210c   Theodore Ts'o   ext4: use bio lay...
162
  	}
97a851ed7   Jan Kara   ext4: use io_end ...
163
164
  	ext4_clear_io_unwritten_flag(io);
  	ext4_release_io_end(io);
bd2d0210c   Theodore Ts'o   ext4: use bio lay...
165
166
  	return ret;
  }
2e8fa54e3   Jan Kara   ext4: split exten...
167
  static void dump_completed_IO(struct inode *inode, struct list_head *head)
28a535f9a   Dmitry Monakhov   ext4: completed_i...
168
169
170
171
  {
  #ifdef	EXT4FS_DEBUG
  	struct list_head *cur, *before, *after;
  	ext4_io_end_t *io, *io0, *io1;
28a535f9a   Dmitry Monakhov   ext4: completed_i...
172

2e8fa54e3   Jan Kara   ext4: split exten...
173
  	if (list_empty(head))
28a535f9a   Dmitry Monakhov   ext4: completed_i...
174
  		return;
28a535f9a   Dmitry Monakhov   ext4: completed_i...
175

2e8fa54e3   Jan Kara   ext4: split exten...
176
177
178
  	ext4_debug("Dump inode %lu completed io list
  ", inode->i_ino);
  	list_for_each_entry(io, head, list) {
28a535f9a   Dmitry Monakhov   ext4: completed_i...
179
180
181
182
183
184
185
186
187
188
189
190
191
192
  		cur = &io->list;
  		before = cur->prev;
  		io0 = container_of(before, ext4_io_end_t, list);
  		after = cur->next;
  		io1 = container_of(after, ext4_io_end_t, list);
  
  		ext4_debug("io 0x%p from inode %lu,prev 0x%p,next 0x%p
  ",
  			    io, inode->i_ino, io0, io1);
  	}
  #endif
  }
  
  /* Add the io_end to per-inode completed end_io list. */
97a851ed7   Jan Kara   ext4: use io_end ...
193
  static void ext4_add_complete_io(ext4_io_end_t *io_end)
bd2d0210c   Theodore Ts'o   ext4: use bio lay...
194
  {
28a535f9a   Dmitry Monakhov   ext4: completed_i...
195
  	struct ext4_inode_info *ei = EXT4_I(io_end->inode);
78371a45d   Jan Kara   ext4: fix asserti...
196
  	struct ext4_sb_info *sbi = EXT4_SB(io_end->inode->i_sb);
28a535f9a   Dmitry Monakhov   ext4: completed_i...
197
198
  	struct workqueue_struct *wq;
  	unsigned long flags;
7b7a8665e   Christoph Hellwig   direct-io: Implem...
199
200
  	/* Only reserved conversions from writeback should enter here */
  	WARN_ON(!(io_end->flag & EXT4_IO_END_UNWRITTEN));
78371a45d   Jan Kara   ext4: fix asserti...
201
  	WARN_ON(!io_end->handle && sbi->s_journal);
d73d5046a   Tao Ma   ext4: Use correct...
202
  	spin_lock_irqsave(&ei->i_completed_io_lock, flags);
78371a45d   Jan Kara   ext4: fix asserti...
203
  	wq = sbi->rsv_conversion_wq;
7b7a8665e   Christoph Hellwig   direct-io: Implem...
204
205
206
  	if (list_empty(&ei->i_rsv_conversion_list))
  		queue_work(wq, &ei->i_rsv_conversion_work);
  	list_add_tail(&io_end->list, &ei->i_rsv_conversion_list);
28a535f9a   Dmitry Monakhov   ext4: completed_i...
207
208
  	spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
  }
d73d5046a   Tao Ma   ext4: Use correct...
209

2e8fa54e3   Jan Kara   ext4: split exten...
210
211
  static int ext4_do_flush_completed_IO(struct inode *inode,
  				      struct list_head *head)
28a535f9a   Dmitry Monakhov   ext4: completed_i...
212
213
  {
  	ext4_io_end_t *io;
002bd7fa3   Jan Kara   ext4: simplify li...
214
  	struct list_head unwritten;
28a535f9a   Dmitry Monakhov   ext4: completed_i...
215
216
217
  	unsigned long flags;
  	struct ext4_inode_info *ei = EXT4_I(inode);
  	int err, ret = 0;
28a535f9a   Dmitry Monakhov   ext4: completed_i...
218
  	spin_lock_irqsave(&ei->i_completed_io_lock, flags);
2e8fa54e3   Jan Kara   ext4: split exten...
219
220
  	dump_completed_IO(inode, head);
  	list_replace_init(head, &unwritten);
28a535f9a   Dmitry Monakhov   ext4: completed_i...
221
222
223
224
225
226
227
228
229
230
  	spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
  
  	while (!list_empty(&unwritten)) {
  		io = list_entry(unwritten.next, ext4_io_end_t, list);
  		BUG_ON(!(io->flag & EXT4_IO_END_UNWRITTEN));
  		list_del_init(&io->list);
  
  		err = ext4_end_io(io);
  		if (unlikely(!ret && err))
  			ret = err;
28a535f9a   Dmitry Monakhov   ext4: completed_i...
231
232
233
234
235
  	}
  	return ret;
  }
  
  /*
2e8fa54e3   Jan Kara   ext4: split exten...
236
   * work on completed IO, to convert unwritten extents to extents
28a535f9a   Dmitry Monakhov   ext4: completed_i...
237
   */
2e8fa54e3   Jan Kara   ext4: split exten...
238
239
240
241
242
243
  void ext4_end_io_rsv_work(struct work_struct *work)
  {
  	struct ext4_inode_info *ei = container_of(work, struct ext4_inode_info,
  						  i_rsv_conversion_work);
  	ext4_do_flush_completed_IO(&ei->vfs_inode, &ei->i_rsv_conversion_list);
  }
bd2d0210c   Theodore Ts'o   ext4: use bio lay...
244
245
  ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags)
  {
b17b35ec1   Jesper Juhl   ext4: use kmem_ca...
246
  	ext4_io_end_t *io = kmem_cache_zalloc(io_end_cachep, flags);
bd2d0210c   Theodore Ts'o   ext4: use bio lay...
247
  	if (io) {
f7ad6d2e9   Theodore Ts'o   ext4: handle writ...
248
249
  		atomic_inc(&EXT4_I(inode)->i_ioend_count);
  		io->inode = inode;
bd2d0210c   Theodore Ts'o   ext4: use bio lay...
250
  		INIT_LIST_HEAD(&io->list);
97a851ed7   Jan Kara   ext4: use io_end ...
251
  		atomic_set(&io->count, 1);
bd2d0210c   Theodore Ts'o   ext4: use bio lay...
252
253
254
  	}
  	return io;
  }
97a851ed7   Jan Kara   ext4: use io_end ...
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
  void ext4_put_io_end_defer(ext4_io_end_t *io_end)
  {
  	if (atomic_dec_and_test(&io_end->count)) {
  		if (!(io_end->flag & EXT4_IO_END_UNWRITTEN) || !io_end->size) {
  			ext4_release_io_end(io_end);
  			return;
  		}
  		ext4_add_complete_io(io_end);
  	}
  }
  
  int ext4_put_io_end(ext4_io_end_t *io_end)
  {
  	int err = 0;
  
  	if (atomic_dec_and_test(&io_end->count)) {
  		if (io_end->flag & EXT4_IO_END_UNWRITTEN) {
6b523df4f   Jan Kara   ext4: use transac...
272
273
274
275
  			err = ext4_convert_unwritten_extents(io_end->handle,
  						io_end->inode, io_end->offset,
  						io_end->size);
  			io_end->handle = NULL;
97a851ed7   Jan Kara   ext4: use io_end ...
276
277
278
279
280
281
282
283
284
285
286
287
  			ext4_clear_io_unwritten_flag(io_end);
  		}
  		ext4_release_io_end(io_end);
  	}
  	return err;
  }
  
  ext4_io_end_t *ext4_get_io_end(ext4_io_end_t *io_end)
  {
  	atomic_inc(&io_end->count);
  	return io_end;
  }
822dbba33   Jan Kara   ext4: fix warning...
288
  /* BIO completion function for page writeback */
bd2d0210c   Theodore Ts'o   ext4: use bio lay...
289
290
291
  static void ext4_end_bio(struct bio *bio, int error)
  {
  	ext4_io_end_t *io_end = bio->bi_private;
4f024f379   Kent Overstreet   block: Abstract o...
292
  	sector_t bi_sector = bio->bi_iter.bi_sector;
bd2d0210c   Theodore Ts'o   ext4: use bio lay...
293
294
  
  	BUG_ON(!io_end);
bd2d0210c   Theodore Ts'o   ext4: use bio lay...
295
296
297
  	bio->bi_end_io = NULL;
  	if (test_bit(BIO_UPTODATE, &bio->bi_flags))
  		error = 0;
0058f9658   Jan Kara   ext4: make ext4_b...
298

f7ad6d2e9   Theodore Ts'o   ext4: handle writ...
299
  	if (error) {
b0857d309   Jan Kara   ext4: defer clear...
300
  		struct inode *inode = io_end->inode;
9503c67c9   Matthew Wilcox   ext4: note the er...
301
  		ext4_warning(inode->i_sb, "I/O error %d writing to inode %lu "
f7ad6d2e9   Theodore Ts'o   ext4: handle writ...
302
  			     "(offset %llu size %ld starting block %llu)",
9503c67c9   Matthew Wilcox   ext4: note the er...
303
  			     error, inode->i_ino,
f7ad6d2e9   Theodore Ts'o   ext4: handle writ...
304
305
306
  			     (unsigned long long) io_end->offset,
  			     (long) io_end->size,
  			     (unsigned long long)
d50bdd5aa   Curt Wohlgemuth   ext4: Fix data co...
307
  			     bi_sector >> (inode->i_blkbits - 9));
9503c67c9   Matthew Wilcox   ext4: note the er...
308
  		mapping_set_error(inode->i_mapping, error);
f7ad6d2e9   Theodore Ts'o   ext4: handle writ...
309
  	}
822dbba33   Jan Kara   ext4: fix warning...
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
  
  	if (io_end->flag & EXT4_IO_END_UNWRITTEN) {
  		/*
  		 * Link bio into list hanging from io_end. We have to do it
  		 * atomically as bio completions can be racing against each
  		 * other.
  		 */
  		bio->bi_private = xchg(&io_end->bio, bio);
  		ext4_put_io_end_defer(io_end);
  	} else {
  		/*
  		 * Drop io_end reference early. Inode can get freed once
  		 * we finish the bio.
  		 */
  		ext4_put_io_end_defer(io_end);
  		ext4_finish_bio(bio);
  		bio_put(bio);
  	}
bd2d0210c   Theodore Ts'o   ext4: use bio lay...
328
329
330
331
332
333
334
335
336
337
338
339
  }
  
  void ext4_io_submit(struct ext4_io_submit *io)
  {
  	struct bio *bio = io->io_bio;
  
  	if (bio) {
  		bio_get(io->io_bio);
  		submit_bio(io->io_op, io->io_bio);
  		BUG_ON(bio_flagged(io->io_bio, BIO_EOPNOTSUPP));
  		bio_put(io->io_bio);
  	}
7dc576158   Peter Huewe   ext4: Fix sparse ...
340
  	io->io_bio = NULL;
97a851ed7   Jan Kara   ext4: use io_end ...
341
342
343
344
345
346
347
  }
  
  void ext4_io_submit_init(struct ext4_io_submit *io,
  			 struct writeback_control *wbc)
  {
  	io->io_op = (wbc->sync_mode == WB_SYNC_ALL ?  WRITE_SYNC : WRITE);
  	io->io_bio = NULL;
7dc576158   Peter Huewe   ext4: Fix sparse ...
348
  	io->io_end = NULL;
bd2d0210c   Theodore Ts'o   ext4: use bio lay...
349
  }
97a851ed7   Jan Kara   ext4: use io_end ...
350
351
  static int io_submit_init_bio(struct ext4_io_submit *io,
  			      struct buffer_head *bh)
bd2d0210c   Theodore Ts'o   ext4: use bio lay...
352
  {
bd2d0210c   Theodore Ts'o   ext4: use bio lay...
353
354
  	int nvecs = bio_get_nr_vecs(bh->b_bdev);
  	struct bio *bio;
275d3ba6b   Theodore Ts'o   ext4: remove loop...
355
  	bio = bio_alloc(GFP_NOIO, min(nvecs, BIO_MAX_PAGES));
a1d8d9a75   Theodore Ts'o   ext4: add check t...
356
357
  	if (!bio)
  		return -ENOMEM;
4f024f379   Kent Overstreet   block: Abstract o...
358
  	bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
bd2d0210c   Theodore Ts'o   ext4: use bio lay...
359
  	bio->bi_bdev = bh->b_bdev;
bd2d0210c   Theodore Ts'o   ext4: use bio lay...
360
  	bio->bi_end_io = ext4_end_bio;
97a851ed7   Jan Kara   ext4: use io_end ...
361
  	bio->bi_private = ext4_get_io_end(io->io_end);
bd2d0210c   Theodore Ts'o   ext4: use bio lay...
362
  	io->io_bio = bio;
bd2d0210c   Theodore Ts'o   ext4: use bio lay...
363
364
365
366
367
  	io->io_next_block = bh->b_blocknr;
  	return 0;
  }
  
  static int io_submit_add_bh(struct ext4_io_submit *io,
bd2d0210c   Theodore Ts'o   ext4: use bio lay...
368
  			    struct inode *inode,
bd2d0210c   Theodore Ts'o   ext4: use bio lay...
369
370
  			    struct buffer_head *bh)
  {
bd2d0210c   Theodore Ts'o   ext4: use bio lay...
371
  	int ret;
bd2d0210c   Theodore Ts'o   ext4: use bio lay...
372
373
374
375
376
  	if (io->io_bio && bh->b_blocknr != io->io_next_block) {
  submit_and_retry:
  		ext4_io_submit(io);
  	}
  	if (io->io_bio == NULL) {
97a851ed7   Jan Kara   ext4: use io_end ...
377
  		ret = io_submit_init_bio(io, bh);
bd2d0210c   Theodore Ts'o   ext4: use bio lay...
378
379
380
  		if (ret)
  			return ret;
  	}
97a851ed7   Jan Kara   ext4: use io_end ...
381
382
383
  	ret = bio_add_page(io->io_bio, bh->b_page, bh->b_size, bh_offset(bh));
  	if (ret != bh->b_size)
  		goto submit_and_retry;
bd2d0210c   Theodore Ts'o   ext4: use bio lay...
384
  	io->io_next_block++;
bd2d0210c   Theodore Ts'o   ext4: use bio lay...
385
386
387
388
389
390
391
392
393
  	return 0;
  }
  
  int ext4_bio_write_page(struct ext4_io_submit *io,
  			struct page *page,
  			int len,
  			struct writeback_control *wbc)
  {
  	struct inode *inode = page->mapping->host;
0058f9658   Jan Kara   ext4: make ext4_b...
394
  	unsigned block_start, blocksize;
bd2d0210c   Theodore Ts'o   ext4: use bio lay...
395
396
  	struct buffer_head *bh, *head;
  	int ret = 0;
0058f9658   Jan Kara   ext4: make ext4_b...
397
  	int nr_submitted = 0;
bd2d0210c   Theodore Ts'o   ext4: use bio lay...
398
399
  
  	blocksize = 1 << inode->i_blkbits;
d50bdd5aa   Curt Wohlgemuth   ext4: Fix data co...
400
  	BUG_ON(!PageLocked(page));
bd2d0210c   Theodore Ts'o   ext4: use bio lay...
401
  	BUG_ON(PageWriteback(page));
bd2d0210c   Theodore Ts'o   ext4: use bio lay...
402

a54aa7610   Theodore Ts'o   ext4: don't leave...
403
404
  	set_page_writeback(page);
  	ClearPageError(page);
bd2d0210c   Theodore Ts'o   ext4: use bio lay...
405

0058f9658   Jan Kara   ext4: make ext4_b...
406
407
408
409
410
411
412
413
414
415
  	/*
  	 * In the first loop we prepare and mark buffers to submit. We have to
  	 * mark all buffers in the page before submitting so that
  	 * end_page_writeback() cannot be called from ext4_bio_end_io() when IO
  	 * on the first buffer finishes and we are still working on submitting
  	 * the second buffer.
  	 */
  	bh = head = page_buffers(page);
  	do {
  		block_start = bh_offset(bh);
bd2d0210c   Theodore Ts'o   ext4: use bio lay...
416
  		if (block_start >= len) {
5a0dc7365   Yongqiang Yang   ext4: handle EOF ...
417
418
419
420
421
422
423
424
425
426
427
  			/*
  			 * Comments copied from block_write_full_page_endio:
  			 *
  			 * The page straddles i_size.  It must be zeroed out on
  			 * each and every writepage invocation because it may
  			 * be mmapped.  "A file is mapped in multiples of the
  			 * page size.  For a file that is not a multiple of
  			 * the  page size, the remaining memory is zeroed when
  			 * mapped, and writes to that region are not written
  			 * out to the file."
  			 */
0058f9658   Jan Kara   ext4: make ext4_b...
428
429
  			zero_user_segment(page, block_start,
  					  block_start + blocksize);
bd2d0210c   Theodore Ts'o   ext4: use bio lay...
430
431
432
433
  			clear_buffer_dirty(bh);
  			set_buffer_uptodate(bh);
  			continue;
  		}
8a850c3fb   Jan Kara   ext4: Make ext4_b...
434
435
436
437
438
439
440
441
442
  		if (!buffer_dirty(bh) || buffer_delay(bh) ||
  		    !buffer_mapped(bh) || buffer_unwritten(bh)) {
  			/* A hole? We can safely clear the dirty bit */
  			if (!buffer_mapped(bh))
  				clear_buffer_dirty(bh);
  			if (io->io_bio)
  				ext4_io_submit(io);
  			continue;
  		}
0058f9658   Jan Kara   ext4: make ext4_b...
443
444
445
446
447
448
449
450
451
452
453
454
  		if (buffer_new(bh)) {
  			clear_buffer_new(bh);
  			unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr);
  		}
  		set_buffer_async_write(bh);
  	} while ((bh = bh->b_this_page) != head);
  
  	/* Now submit buffers to write */
  	bh = head = page_buffers(page);
  	do {
  		if (!buffer_async_write(bh))
  			continue;
97a851ed7   Jan Kara   ext4: use io_end ...
455
  		ret = io_submit_add_bh(io, inode, bh);
bd2d0210c   Theodore Ts'o   ext4: use bio lay...
456
457
458
459
460
461
  		if (ret) {
  			/*
  			 * We only get here on ENOMEM.  Not much else
  			 * we can do but mark the page as dirty, and
  			 * better luck next time.
  			 */
1ae48a635   Jan Kara   ext4: use redirty...
462
  			redirty_page_for_writepage(wbc, page);
bd2d0210c   Theodore Ts'o   ext4: use bio lay...
463
464
  			break;
  		}
0058f9658   Jan Kara   ext4: make ext4_b...
465
  		nr_submitted++;
1ae48a635   Jan Kara   ext4: use redirty...
466
  		clear_buffer_dirty(bh);
0058f9658   Jan Kara   ext4: make ext4_b...
467
468
469
470
471
472
473
474
  	} while ((bh = bh->b_this_page) != head);
  
  	/* Error stopped previous loop? Clean up buffers... */
  	if (ret) {
  		do {
  			clear_buffer_async_write(bh);
  			bh = bh->b_this_page;
  		} while (bh != head);
bd2d0210c   Theodore Ts'o   ext4: use bio lay...
475
476
  	}
  	unlock_page(page);
0058f9658   Jan Kara   ext4: make ext4_b...
477
478
479
  	/* Nothing submitted - we have to end page writeback */
  	if (!nr_submitted)
  		end_page_writeback(page);
bd2d0210c   Theodore Ts'o   ext4: use bio lay...
480
481
  	return ret;
  }