Commit 090d2b185d8680fc26a2eaf4245d4171dcf4baf1

Authored by Pekka Enberg
Committed by Linus Torvalds
1 parent c330dda908

[PATCH] read_mapping_page for address space

Add read_mapping_page() which is used for callers that pass
mapping->a_ops->readpage as the filler for read_cache_page.  This removes
some duplication from filesystem code.

Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>

Showing 22 changed files with 38 additions and 56 deletions Side-by-side Diff

... ... @@ -185,9 +185,7 @@
185 185  
186 186 _enter("{%lu},%lu", dir->i_ino, index);
187 187  
188   - page = read_cache_page(dir->i_mapping,index,
189   - (filler_t *) dir->i_mapping->a_ops->readpage,
190   - NULL);
  188 + page = read_mapping_page(dir->i_mapping, index, NULL);
191 189 if (!IS_ERR(page)) {
192 190 wait_on_page_locked(page);
193 191 kmap(page);
... ... @@ -63,7 +63,6 @@
63 63 int afs_mntpt_check_symlink(struct afs_vnode *vnode)
64 64 {
65 65 struct page *page;
66   - filler_t *filler;
67 66 size_t size;
68 67 char *buf;
69 68 int ret;
... ... @@ -71,10 +70,7 @@
71 70 _enter("{%u,%u}", vnode->fid.vnode, vnode->fid.unique);
72 71  
73 72 /* read the contents of the symlink into the pagecache */
74   - filler = (filler_t *) AFS_VNODE_TO_I(vnode)->i_mapping->a_ops->readpage;
75   -
76   - page = read_cache_page(AFS_VNODE_TO_I(vnode)->i_mapping, 0,
77   - filler, NULL);
  73 + page = read_mapping_page(AFS_VNODE_TO_I(vnode)->i_mapping, 0, NULL);
78 74 if (IS_ERR(page)) {
79 75 ret = PTR_ERR(page);
80 76 goto out;
... ... @@ -160,7 +156,6 @@
160 156 struct page *page = NULL;
161 157 size_t size;
162 158 char *buf, *devname = NULL, *options = NULL;
163   - filler_t *filler;
164 159 int ret;
165 160  
166 161 kenter("{%s}", mntpt->d_name.name);
... ... @@ -182,9 +177,7 @@
182 177 goto error;
183 178  
184 179 /* read the contents of the AFS special symlink */
185   - filler = (filler_t *)mntpt->d_inode->i_mapping->a_ops->readpage;
186   -
187   - page = read_cache_page(mntpt->d_inode->i_mapping, 0, filler, NULL);
  180 + page = read_mapping_page(mntpt->d_inode->i_mapping, 0, NULL);
188 181 if (IS_ERR(page)) {
189 182 ret = PTR_ERR(page);
190 183 goto error;
... ... @@ -181,9 +181,7 @@
181 181 struct page *page = NULL;
182 182  
183 183 if (blocknr + i < devsize) {
184   - page = read_cache_page(mapping, blocknr + i,
185   - (filler_t *)mapping->a_ops->readpage,
186   - NULL);
  184 + page = read_mapping_page(mapping, blocknr + i, NULL);
187 185 /* synchronous error? */
188 186 if (IS_ERR(page))
189 187 page = NULL;
... ... @@ -159,8 +159,7 @@
159 159 static struct page * ext2_get_page(struct inode *dir, unsigned long n)
160 160 {
161 161 struct address_space *mapping = dir->i_mapping;
162   - struct page *page = read_cache_page(mapping, n,
163   - (filler_t*)mapping->a_ops->readpage, NULL);
  162 + struct page *page = read_mapping_page(mapping, n, NULL);
164 163 if (!IS_ERR(page)) {
165 164 wait_on_page_locked(page);
166 165 kmap(page);
fs/freevxfs/vxfs_subr.c
... ... @@ -71,8 +71,7 @@
71 71 {
72 72 struct page * pp;
73 73  
74   - pp = read_cache_page(mapping, n,
75   - (filler_t*)mapping->a_ops->readpage, NULL);
  74 + pp = read_mapping_page(mapping, n, NULL);
76 75  
77 76 if (!IS_ERR(pp)) {
78 77 wait_on_page_locked(pp);
... ... @@ -280,7 +280,7 @@
280 280 block = off >> PAGE_CACHE_SHIFT;
281 281 node->page_offset = off & ~PAGE_CACHE_MASK;
282 282 for (i = 0; i < tree->pages_per_bnode; i++) {
283   - page = read_cache_page(mapping, block++, (filler_t *)mapping->a_ops->readpage, NULL);
  283 + page = read_mapping_page(mapping, block++, NULL);
284 284 if (IS_ERR(page))
285 285 goto fail;
286 286 if (PageError(page)) {
... ... @@ -59,7 +59,7 @@
59 59 unlock_new_inode(tree->inode);
60 60  
61 61 mapping = tree->inode->i_mapping;
62   - page = read_cache_page(mapping, 0, (filler_t *)mapping->a_ops->readpage, NULL);
  62 + page = read_mapping_page(mapping, 0, NULL);
63 63 if (IS_ERR(page))
64 64 goto free_tree;
65 65  
... ... @@ -31,8 +31,7 @@
31 31 dprint(DBG_BITMAP, "block_allocate: %u,%u,%u\n", size, offset, len);
32 32 mutex_lock(&HFSPLUS_SB(sb).alloc_file->i_mutex);
33 33 mapping = HFSPLUS_SB(sb).alloc_file->i_mapping;
34   - page = read_cache_page(mapping, offset / PAGE_CACHE_BITS,
35   - (filler_t *)mapping->a_ops->readpage, NULL);
  34 + page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS, NULL);
36 35 pptr = kmap(page);
37 36 curr = pptr + (offset & (PAGE_CACHE_BITS - 1)) / 32;
38 37 i = offset % 32;
... ... @@ -72,8 +71,8 @@
72 71 offset += PAGE_CACHE_BITS;
73 72 if (offset >= size)
74 73 break;
75   - page = read_cache_page(mapping, offset / PAGE_CACHE_BITS,
76   - (filler_t *)mapping->a_ops->readpage, NULL);
  74 + page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS,
  75 + NULL);
77 76 curr = pptr = kmap(page);
78 77 if ((size ^ offset) / PAGE_CACHE_BITS)
79 78 end = pptr + PAGE_CACHE_BITS / 32;
... ... @@ -119,8 +118,8 @@
119 118 set_page_dirty(page);
120 119 kunmap(page);
121 120 offset += PAGE_CACHE_BITS;
122   - page = read_cache_page(mapping, offset / PAGE_CACHE_BITS,
123   - (filler_t *)mapping->a_ops->readpage, NULL);
  121 + page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS,
  122 + NULL);
124 123 pptr = kmap(page);
125 124 curr = pptr;
126 125 end = pptr + PAGE_CACHE_BITS / 32;
... ... @@ -167,7 +166,7 @@
167 166 mutex_lock(&HFSPLUS_SB(sb).alloc_file->i_mutex);
168 167 mapping = HFSPLUS_SB(sb).alloc_file->i_mapping;
169 168 pnr = offset / PAGE_CACHE_BITS;
170   - page = read_cache_page(mapping, pnr, (filler_t *)mapping->a_ops->readpage, NULL);
  169 + page = read_mapping_page(mapping, pnr, NULL);
171 170 pptr = kmap(page);
172 171 curr = pptr + (offset & (PAGE_CACHE_BITS - 1)) / 32;
173 172 end = pptr + PAGE_CACHE_BITS / 32;
... ... @@ -199,7 +198,7 @@
199 198 break;
200 199 set_page_dirty(page);
201 200 kunmap(page);
202   - page = read_cache_page(mapping, ++pnr, (filler_t *)mapping->a_ops->readpage, NULL);
  201 + page = read_mapping_page(mapping, ++pnr, NULL);
203 202 pptr = kmap(page);
204 203 curr = pptr;
205 204 end = pptr + PAGE_CACHE_BITS / 32;
... ... @@ -440,7 +440,7 @@
440 440 block = off >> PAGE_CACHE_SHIFT;
441 441 node->page_offset = off & ~PAGE_CACHE_MASK;
442 442 for (i = 0; i < tree->pages_per_bnode; block++, i++) {
443   - page = read_cache_page(mapping, block, (filler_t *)mapping->a_ops->readpage, NULL);
  443 + page = read_mapping_page(mapping, block, NULL);
444 444 if (IS_ERR(page))
445 445 goto fail;
446 446 if (PageError(page)) {
... ... @@ -38,7 +38,7 @@
38 38 goto free_tree;
39 39  
40 40 mapping = tree->inode->i_mapping;
41   - page = read_cache_page(mapping, 0, (filler_t *)mapping->a_ops->readpage, NULL);
  41 + page = read_mapping_page(mapping, 0, NULL);
42 42 if (IS_ERR(page))
43 43 goto free_tree;
44 44  
fs/jfs/jfs_metapage.c
... ... @@ -632,10 +632,9 @@
632 632 }
633 633 SetPageUptodate(page);
634 634 } else {
635   - page = read_cache_page(mapping, page_index,
636   - (filler_t *)mapping->a_ops->readpage, NULL);
  635 + page = read_mapping_page(mapping, page_index, NULL);
637 636 if (IS_ERR(page) || !PageUptodate(page)) {
638   - jfs_err("read_cache_page failed!");
  637 + jfs_err("read_mapping_page failed!");
639 638 return NULL;
640 639 }
641 640 lock_page(page);
... ... @@ -60,8 +60,7 @@
60 60 static struct page * dir_get_page(struct inode *dir, unsigned long n)
61 61 {
62 62 struct address_space *mapping = dir->i_mapping;
63   - struct page *page = read_cache_page(mapping, n,
64   - (filler_t*)mapping->a_ops->readpage, NULL);
  63 + struct page *page = read_mapping_page(mapping, n, NULL);
65 64 if (!IS_ERR(page)) {
66 65 wait_on_page_locked(page);
67 66 kmap(page);
... ... @@ -2577,8 +2577,7 @@
2577 2577 {
2578 2578 struct page * page;
2579 2579 struct address_space *mapping = dentry->d_inode->i_mapping;
2580   - page = read_cache_page(mapping, 0, (filler_t *)mapping->a_ops->readpage,
2581   - NULL);
  2580 + page = read_mapping_page(mapping, 0, NULL);
2582 2581 if (IS_ERR(page))
2583 2582 goto sync_fail;
2584 2583 wait_on_page_locked(page);
... ... @@ -86,8 +86,7 @@
86 86 static inline struct page *ntfs_map_page(struct address_space *mapping,
87 87 unsigned long index)
88 88 {
89   - struct page *page = read_cache_page(mapping, index,
90   - (filler_t*)mapping->a_ops->readpage, NULL);
  89 + struct page *page = read_mapping_page(mapping, index, NULL);
91 90  
92 91 if (!IS_ERR(page)) {
93 92 wait_on_page_locked(page);
... ... @@ -2529,8 +2529,7 @@
2529 2529 end >>= PAGE_CACHE_SHIFT;
2530 2530 /* If there is a first partial page, need to do it the slow way. */
2531 2531 if (start_ofs) {
2532   - page = read_cache_page(mapping, idx,
2533   - (filler_t*)mapping->a_ops->readpage, NULL);
  2532 + page = read_mapping_page(mapping, idx, NULL);
2534 2533 if (IS_ERR(page)) {
2535 2534 ntfs_error(vol->sb, "Failed to read first partial "
2536 2535 "page (sync error, index 0x%lx).", idx);
... ... @@ -2600,8 +2599,7 @@
2600 2599 }
2601 2600 /* If there is a last partial page, need to do it the slow way. */
2602 2601 if (end_ofs) {
2603   - page = read_cache_page(mapping, idx,
2604   - (filler_t*)mapping->a_ops->readpage, NULL);
  2602 + page = read_mapping_page(mapping, idx, NULL);
2605 2603 if (IS_ERR(page)) {
2606 2604 ntfs_error(vol->sb, "Failed to read last partial page "
2607 2605 "(sync error, index 0x%lx).", idx);
... ... @@ -231,8 +231,7 @@
231 231 * Read the page. If the page is not present, this will zero
232 232 * the uninitialized regions for us.
233 233 */
234   - page = read_cache_page(mapping, index,
235   - (filler_t*)mapping->a_ops->readpage, NULL);
  234 + page = read_mapping_page(mapping, index, NULL);
236 235 if (IS_ERR(page)) {
237 236 err = PTR_ERR(page);
238 237 goto init_err_out;
... ... @@ -64,8 +64,7 @@
64 64 {
65 65 struct page * page;
66 66 struct address_space *mapping = dentry->d_inode->i_mapping;
67   - page = read_cache_page(mapping, 0,
68   - (filler_t *)mapping->a_ops->readpage, NULL);
  67 + page = read_mapping_page(mapping, 0, NULL);
69 68 if (IS_ERR(page))
70 69 goto sync_fail;
71 70 wait_on_page_locked(page);
fs/partitions/check.c
... ... @@ -499,8 +499,8 @@
499 499 struct address_space *mapping = bdev->bd_inode->i_mapping;
500 500 struct page *page;
501 501  
502   - page = read_cache_page(mapping, (pgoff_t)(n >> (PAGE_CACHE_SHIFT-9)),
503   - (filler_t *)mapping->a_ops->readpage, NULL);
  502 + page = read_mapping_page(mapping, (pgoff_t)(n >> (PAGE_CACHE_SHIFT-9)),
  503 + NULL);
504 504 if (!IS_ERR(page)) {
505 505 wait_on_page_locked(page);
506 506 if (!PageUptodate(page))
... ... @@ -452,8 +452,7 @@
452 452 /* We can deadlock if we try to free dentries,
453 453 and an unlink/rmdir has just occured - GFP_NOFS avoids this */
454 454 mapping_set_gfp_mask(mapping, GFP_NOFS);
455   - page = read_cache_page(mapping, n,
456   - (filler_t *) mapping->a_ops->readpage, NULL);
  455 + page = read_mapping_page(mapping, n, NULL);
457 456 if (!IS_ERR(page)) {
458 457 wait_on_page_locked(page);
459 458 kmap(page);
... ... @@ -53,8 +53,7 @@
53 53 static struct page * dir_get_page(struct inode *dir, unsigned long n)
54 54 {
55 55 struct address_space *mapping = dir->i_mapping;
56   - struct page *page = read_cache_page(mapping, n,
57   - (filler_t*)mapping->a_ops->readpage, NULL);
  56 + struct page *page = read_mapping_page(mapping, n, NULL);
58 57 if (!IS_ERR(page)) {
59 58 wait_on_page_locked(page);
60 59 kmap(page);
include/linux/pagemap.h
... ... @@ -99,6 +99,13 @@
99 99 extern int read_cache_pages(struct address_space *mapping,
100 100 struct list_head *pages, filler_t *filler, void *data);
101 101  
  102 +static inline struct page *read_mapping_page(struct address_space *mapping,
  103 + unsigned long index, void *data)
  104 +{
  105 + filler_t *filler = (filler_t *)mapping->a_ops->readpage;
  106 + return read_cache_page(mapping, index, filler, data);
  107 +}
  108 +
102 109 int add_to_page_cache(struct page *page, struct address_space *mapping,
103 110 unsigned long index, gfp_t gfp_mask);
104 111 int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
... ... @@ -1477,8 +1477,7 @@
1477 1477 error = -EINVAL;
1478 1478 goto bad_swap;
1479 1479 }
1480   - page = read_cache_page(mapping, 0,
1481   - (filler_t *)mapping->a_ops->readpage, swap_file);
  1480 + page = read_mapping_page(mapping, 0, swap_file);
1482 1481 if (IS_ERR(page)) {
1483 1482 error = PTR_ERR(page);
1484 1483 goto bad_swap;