Commit 29a814d2ee0e43c2980f33f91c1311ec06c0aa35
Committed by
Theodore Ts'o
1 parent
87c89c232c
Exists in
master
and in
39 other branches
vfs: add hooks for ext4's delayed allocation support
Export mpage_bio_submit() and __mpage_writepage() for the benefit of ext4's delayed allocation support. Also change __block_write_full_page so that if buffers that have the BH_Delay flag set it will call get_block() to get the physical block allocated, just as in the !BH_Mapped case. Signed-off-by: Alex Tomas <alex@clusterfs.com> Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
Showing 3 changed files with 20 additions and 11 deletions Side-by-side Diff
fs/buffer.c
... | ... | @@ -1691,11 +1691,13 @@ |
1691 | 1691 | */ |
1692 | 1692 | clear_buffer_dirty(bh); |
1693 | 1693 | set_buffer_uptodate(bh); |
1694 | - } else if (!buffer_mapped(bh) && buffer_dirty(bh)) { | |
1694 | + } else if ((!buffer_mapped(bh) || buffer_delay(bh)) && | |
1695 | + buffer_dirty(bh)) { | |
1695 | 1696 | WARN_ON(bh->b_size != blocksize); |
1696 | 1697 | err = get_block(inode, block, bh, 1); |
1697 | 1698 | if (err) |
1698 | 1699 | goto recover; |
1700 | + clear_buffer_delay(bh); | |
1699 | 1701 | if (buffer_new(bh)) { |
1700 | 1702 | /* blockdev mappings never come here */ |
1701 | 1703 | clear_buffer_new(bh); |
... | ... | @@ -1774,7 +1776,8 @@ |
1774 | 1776 | bh = head; |
1775 | 1777 | /* Recovery: lock and submit the mapped buffers */ |
1776 | 1778 | do { |
1777 | - if (buffer_mapped(bh) && buffer_dirty(bh)) { | |
1779 | + if (buffer_mapped(bh) && buffer_dirty(bh) && | |
1780 | + !buffer_delay(bh)) { | |
1778 | 1781 | lock_buffer(bh); |
1779 | 1782 | mark_buffer_async_write(bh); |
1780 | 1783 | } else { |
fs/mpage.c
... | ... | @@ -82,7 +82,7 @@ |
82 | 82 | bio_put(bio); |
83 | 83 | } |
84 | 84 | |
85 | -static struct bio *mpage_bio_submit(int rw, struct bio *bio) | |
85 | +struct bio *mpage_bio_submit(int rw, struct bio *bio) | |
86 | 86 | { |
87 | 87 | bio->bi_end_io = mpage_end_io_read; |
88 | 88 | if (rw == WRITE) |
... | ... | @@ -90,6 +90,7 @@ |
90 | 90 | submit_bio(rw, bio); |
91 | 91 | return NULL; |
92 | 92 | } |
93 | +EXPORT_SYMBOL(mpage_bio_submit); | |
93 | 94 | |
94 | 95 | static struct bio * |
95 | 96 | mpage_alloc(struct block_device *bdev, |
96 | 97 | |
... | ... | @@ -435,15 +436,9 @@ |
435 | 436 | * written, so it can intelligently allocate a suitably-sized BIO. For now, |
436 | 437 | * just allocate full-size (16-page) BIOs. |
437 | 438 | */ |
438 | -struct mpage_data { | |
439 | - struct bio *bio; | |
440 | - sector_t last_block_in_bio; | |
441 | - get_block_t *get_block; | |
442 | - unsigned use_writepage; | |
443 | -}; | |
444 | 439 | |
445 | -static int __mpage_writepage(struct page *page, struct writeback_control *wbc, | |
446 | - void *data) | |
440 | +int __mpage_writepage(struct page *page, struct writeback_control *wbc, | |
441 | + void *data) | |
447 | 442 | { |
448 | 443 | struct mpage_data *mpd = data; |
449 | 444 | struct bio *bio = mpd->bio; |
... | ... | @@ -651,6 +646,7 @@ |
651 | 646 | mpd->bio = bio; |
652 | 647 | return ret; |
653 | 648 | } |
649 | +EXPORT_SYMBOL(__mpage_writepage); | |
654 | 650 | |
655 | 651 | /** |
656 | 652 | * mpage_writepages - walk the list of dirty pages of the given address space & writepage() all of them |
include/linux/mpage.h
... | ... | @@ -11,11 +11,21 @@ |
11 | 11 | */ |
12 | 12 | #ifdef CONFIG_BLOCK |
13 | 13 | |
14 | +struct mpage_data { | |
15 | + struct bio *bio; | |
16 | + sector_t last_block_in_bio; | |
17 | + get_block_t *get_block; | |
18 | + unsigned use_writepage; | |
19 | +}; | |
20 | + | |
14 | 21 | struct writeback_control; |
15 | 22 | |
23 | +struct bio *mpage_bio_submit(int rw, struct bio *bio); | |
16 | 24 | int mpage_readpages(struct address_space *mapping, struct list_head *pages, |
17 | 25 | unsigned nr_pages, get_block_t get_block); |
18 | 26 | int mpage_readpage(struct page *page, get_block_t get_block); |
27 | +int __mpage_writepage(struct page *page, struct writeback_control *wbc, | |
28 | + void *data); | |
19 | 29 | int mpage_writepages(struct address_space *mapping, |
20 | 30 | struct writeback_control *wbc, get_block_t get_block); |
21 | 31 | int mpage_writepage(struct page *page, get_block_t *get_block, |