Commit 155130a4f7848b1aac439cab6bda1a175507c71c

Authored by Christoph Hellwig
Committed by Al Viro
1 parent 6e1db88d53

get rid of block_write_begin_newtrunc

Move the call to vmtruncate to get rid of accessive blocks to the callers
in preparation of the new truncate sequence and rename the non-truncating
version to block_write_begin.

While we're at it also remove several unused arguments to block_write_begin.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>

Showing 13 changed files with 103 additions and 91 deletions Side-by-side Diff

... ... @@ -168,9 +168,17 @@
168 168 loff_t pos, unsigned len, unsigned flags,
169 169 struct page **pagep, void **fsdata)
170 170 {
171   - *pagep = NULL;
172   - return block_write_begin(file, mapping, pos, len, flags,
173   - pagep, fsdata, bfs_get_block);
  171 + int ret;
  172 +
  173 + ret = block_write_begin(mapping, pos, len, flags, pagep,
  174 + bfs_get_block);
  175 + if (unlikely(ret)) {
  176 + loff_t isize = mapping->host->i_size;
  177 + if (pos + len > isize)
  178 + vmtruncate(mapping->host, isize);
  179 + }
  180 +
  181 + return ret;
174 182 }
175 183  
176 184 static sector_t bfs_bmap(struct address_space *mapping, sector_t block)
... ... @@ -308,9 +308,8 @@
308 308 loff_t pos, unsigned len, unsigned flags,
309 309 struct page **pagep, void **fsdata)
310 310 {
311   - *pagep = NULL;
312   - return block_write_begin_newtrunc(file, mapping, pos, len, flags,
313   - pagep, fsdata, blkdev_get_block);
  311 + return block_write_begin(mapping, pos, len, flags, pagep,
  312 + blkdev_get_block);
314 313 }
315 314  
316 315 static int blkdev_write_end(struct file *file, struct address_space *mapping,
... ... @@ -1962,14 +1962,13 @@
1962 1962 EXPORT_SYMBOL(__block_write_begin);
1963 1963  
1964 1964 /*
1965   - * Filesystems implementing the new truncate sequence should use the
1966   - * _newtrunc postfix variant which won't incorrectly call vmtruncate.
  1965 + * block_write_begin takes care of the basic task of block allocation and
  1966 + * bringing partial write blocks uptodate first.
  1967 + *
1967 1968 * The filesystem needs to handle block truncation upon failure.
1968 1969 */
1969   -int block_write_begin_newtrunc(struct file *file, struct address_space *mapping,
1970   - loff_t pos, unsigned len, unsigned flags,
1971   - struct page **pagep, void **fsdata,
1972   - get_block_t *get_block)
  1970 +int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
  1971 + unsigned flags, struct page **pagep, get_block_t *get_block)
1973 1972 {
1974 1973 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
1975 1974 struct page *page;
... ... @@ -1989,44 +1988,6 @@
1989 1988 *pagep = page;
1990 1989 return status;
1991 1990 }
1992   -EXPORT_SYMBOL(block_write_begin_newtrunc);
1993   -
1994   -/*
1995   - * block_write_begin takes care of the basic task of block allocation and
1996   - * bringing partial write blocks uptodate first.
1997   - *
1998   - * If *pagep is not NULL, then block_write_begin uses the locked page
1999   - * at *pagep rather than allocating its own. In this case, the page will
2000   - * not be unlocked or deallocated on failure.
2001   - */
2002   -int block_write_begin(struct file *file, struct address_space *mapping,
2003   - loff_t pos, unsigned len, unsigned flags,
2004   - struct page **pagep, void **fsdata,
2005   - get_block_t *get_block)
2006   -{
2007   - int ret;
2008   -
2009   - ret = block_write_begin_newtrunc(file, mapping, pos, len, flags,
2010   - pagep, fsdata, get_block);
2011   -
2012   - /*
2013   - * prepare_write() may have instantiated a few blocks
2014   - * outside i_size. Trim these off again. Don't need
2015   - * i_size_read because we hold i_mutex.
2016   - *
2017   - * Filesystems which pass down their own page also cannot
2018   - * call into vmtruncate here because it would lead to lock
2019   - * inversion problems (*pagep is locked). This is a further
2020   - * example of where the old truncate sequence is inadequate.
2021   - */
2022   - if (unlikely(ret) && *pagep == NULL) {
2023   - loff_t isize = mapping->host->i_size;
2024   - if (pos + len > isize)
2025   - vmtruncate(mapping->host, isize);
2026   - }
2027   -
2028   - return ret;
2029   -}
2030 1991 EXPORT_SYMBOL(block_write_begin);
2031 1992  
2032 1993 int block_write_end(struct file *file, struct address_space *mapping,
... ... @@ -2357,7 +2318,7 @@
2357 2318  
2358 2319 err = cont_expand_zero(file, mapping, pos, bytes);
2359 2320 if (err)
2360   - goto out;
  2321 + return err;
2361 2322  
2362 2323 zerofrom = *bytes & ~PAGE_CACHE_MASK;
2363 2324 if (pos+len > *bytes && zerofrom & (blocksize-1)) {
... ... @@ -2365,11 +2326,7 @@
2365 2326 (*bytes)++;
2366 2327 }
2367 2328  
2368   - *pagep = NULL;
2369   - err = block_write_begin_newtrunc(file, mapping, pos, len,
2370   - flags, pagep, fsdata, get_block);
2371   -out:
2372   - return err;
  2329 + return block_write_begin(mapping, pos, len, flags, pagep, get_block);
2373 2330 }
2374 2331 EXPORT_SYMBOL(cont_write_begin);
2375 2332  
... ... @@ -2511,8 +2468,8 @@
2511 2468 unlock_page(page);
2512 2469 page_cache_release(page);
2513 2470 *pagep = NULL;
2514   - return block_write_begin_newtrunc(NULL, mapping, pos, len,
2515   - flags, pagep, fsdata, get_block);
  2471 + return block_write_begin(mapping, pos, len, flags, pagep,
  2472 + get_block);
2516 2473 }
2517 2474  
2518 2475 if (PageMappedToDisk(page))
... ... @@ -772,9 +772,8 @@
772 772 {
773 773 int ret;
774 774  
775   - *pagep = NULL;
776   - ret = block_write_begin_newtrunc(file, mapping, pos, len, flags,
777   - pagep, fsdata, ext2_get_block);
  775 + ret = block_write_begin(mapping, pos, len, flags, pagep,
  776 + ext2_get_block);
778 777 if (ret < 0)
779 778 ext2_write_failed(mapping, pos + len);
780 779 return ret;
... ... @@ -366,9 +366,17 @@
366 366 loff_t pos, unsigned len, unsigned flags,
367 367 struct page **pagep, void **fsdata)
368 368 {
369   - *pagep = NULL;
370   - return block_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
  369 + int ret;
  370 +
  371 + ret = block_write_begin(mapping, pos, len, flags, pagep,
371 372 minix_get_block);
  373 + if (unlikely(ret)) {
  374 + loff_t isize = mapping->host->i_size;
  375 + if (pos + len > isize)
  376 + vmtruncate(mapping->host, isize);
  377 + }
  378 +
  379 + return ret;
372 380 }
373 381  
374 382 static sector_t minix_bmap(struct address_space *mapping, sector_t block)
... ... @@ -197,11 +197,15 @@
197 197 if (unlikely(err))
198 198 return err;
199 199  
200   - *pagep = NULL;
201   - err = block_write_begin(file, mapping, pos, len, flags, pagep,
202   - fsdata, nilfs_get_block);
203   - if (unlikely(err))
  200 + err = block_write_begin(mapping, pos, len, flags, pagep,
  201 + nilfs_get_block);
  202 + if (unlikely(err)) {
  203 + loff_t isize = mapping->host->i_size;
  204 + if (pos + len > isize)
  205 + vmtruncate(mapping->host, isize);
  206 +
204 207 nilfs_transaction_abort(inode->i_sb);
  208 + }
205 209 return err;
206 210 }
207 211  
fs/nilfs2/recovery.c
... ... @@ -505,11 +505,14 @@
505 505 }
506 506  
507 507 pos = rb->blkoff << inode->i_blkbits;
508   - page = NULL;
509   - err = block_write_begin(NULL, inode->i_mapping, pos, blocksize,
510   - 0, &page, NULL, nilfs_get_block);
511   - if (unlikely(err))
  508 + err = block_write_begin(inode->i_mapping, pos, blocksize,
  509 + 0, &page, nilfs_get_block);
  510 + if (unlikely(err)) {
  511 + loff_t isize = inode->i_size;
  512 + if (pos + blocksize > isize)
  513 + vmtruncate(inode, isize);
512 514 goto failed_inode;
  515 + }
513 516  
514 517 err = nilfs_recovery_copy_block(sbi, rb, page);
515 518 if (unlikely(err))
... ... @@ -312,9 +312,17 @@
312 312 loff_t pos, unsigned len, unsigned flags,
313 313 struct page **pagep, void **fsdata)
314 314 {
315   - *pagep = NULL;
316   - return block_write_begin(file, mapping, pos, len, flags,
317   - pagep, fsdata, omfs_get_block);
  315 + int ret;
  316 +
  317 + ret = block_write_begin(mapping, pos, len, flags, pagep,
  318 + omfs_get_block);
  319 + if (unlikely(ret)) {
  320 + loff_t isize = mapping->host->i_size;
  321 + if (pos + len > isize)
  322 + vmtruncate(mapping->host, isize);
  323 + }
  324 +
  325 + return ret;
318 326 }
319 327  
320 328 static sector_t omfs_bmap(struct address_space *mapping, sector_t block)
... ... @@ -468,9 +468,16 @@
468 468 loff_t pos, unsigned len, unsigned flags,
469 469 struct page **pagep, void **fsdata)
470 470 {
471   - *pagep = NULL;
472   - return block_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
473   - get_block);
  471 + int ret;
  472 +
  473 + ret = block_write_begin(mapping, pos, len, flags, pagep, get_block);
  474 + if (unlikely(ret)) {
  475 + loff_t isize = mapping->host->i_size;
  476 + if (pos + len > isize)
  477 + vmtruncate(mapping->host, isize);
  478 + }
  479 +
  480 + return ret;
474 481 }
475 482  
476 483 static sector_t sysv_bmap(struct address_space *mapping, sector_t block)
... ... @@ -127,9 +127,16 @@
127 127 loff_t pos, unsigned len, unsigned flags,
128 128 struct page **pagep, void **fsdata)
129 129 {
130   - *pagep = NULL;
131   - return block_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
132   - udf_get_block);
  130 + int ret;
  131 +
  132 + ret = block_write_begin(mapping, pos, len, flags, pagep, udf_get_block);
  133 + if (unlikely(ret)) {
  134 + loff_t isize = mapping->host->i_size;
  135 + if (pos + len > isize)
  136 + vmtruncate(mapping->host, isize);
  137 + }
  138 +
  139 + return ret;
133 140 }
134 141  
135 142 static sector_t udf_bmap(struct address_space *mapping, sector_t block)
... ... @@ -567,9 +567,17 @@
567 567 loff_t pos, unsigned len, unsigned flags,
568 568 struct page **pagep, void **fsdata)
569 569 {
570   - *pagep = NULL;
571   - return block_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
  570 + int ret;
  571 +
  572 + ret = block_write_begin(mapping, pos, len, flags, pagep,
572 573 ufs_getfrag_block);
  574 + if (unlikely(ret)) {
  575 + loff_t isize = mapping->host->i_size;
  576 + if (pos + len > isize)
  577 + vmtruncate(mapping->host, isize);
  578 + }
  579 +
  580 + return ret;
573 581 }
574 582  
575 583 static sector_t ufs_bmap(struct address_space *mapping, sector_t block)
fs/xfs/linux-2.6/xfs_aops.c
... ... @@ -1504,9 +1504,17 @@
1504 1504 struct page **pagep,
1505 1505 void **fsdata)
1506 1506 {
1507   - *pagep = NULL;
1508   - return block_write_begin(file, mapping, pos, len, flags | AOP_FLAG_NOFS,
1509   - pagep, fsdata, xfs_get_blocks);
  1507 + int ret;
  1508 +
  1509 + ret = block_write_begin(mapping, pos, len, flags | AOP_FLAG_NOFS,
  1510 + pagep, xfs_get_blocks);
  1511 + if (unlikely(ret)) {
  1512 + loff_t isize = mapping->host->i_size;
  1513 + if (pos + len > isize)
  1514 + vmtruncate(mapping->host, isize);
  1515 + }
  1516 +
  1517 + return ret;
1510 1518 }
1511 1519  
1512 1520 STATIC sector_t
include/linux/buffer_head.h
... ... @@ -203,12 +203,8 @@
203 203 int block_read_full_page(struct page*, get_block_t*);
204 204 int block_is_partially_uptodate(struct page *page, read_descriptor_t *desc,
205 205 unsigned long from);
206   -int block_write_begin_newtrunc(struct file *, struct address_space *,
207   - loff_t, unsigned, unsigned,
208   - struct page **, void **, get_block_t*);
209   -int block_write_begin(struct file *, struct address_space *,
210   - loff_t, unsigned, unsigned,
211   - struct page **, void **, get_block_t*);
  206 +int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
  207 + unsigned flags, struct page **pagep, get_block_t *get_block);
212 208 int __block_write_begin(struct page *page, loff_t pos, unsigned len,
213 209 get_block_t *get_block);
214 210 int block_write_end(struct file *, struct address_space *,