Commit cf2271e781cb16e1ca22be920010c2b64d90c338
1 parent
61e0f2d0a5
Exists in
ti-lsk-linux-4.1.y
and in
10 other branches
f2fs: avoid retrying wrong recovery routine when error was occurred
This patch eliminates the propagation of recovery errors to the next mount. Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
Showing 4 changed files with 23 additions and 7 deletions Side-by-side Diff
fs/f2fs/checkpoint.c
... | ... | @@ -796,6 +796,7 @@ |
796 | 796 | static void do_checkpoint(struct f2fs_sb_info *sbi, bool is_umount) |
797 | 797 | { |
798 | 798 | struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); |
799 | + struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_WARM_NODE); | |
799 | 800 | nid_t last_nid = 0; |
800 | 801 | block_t start_blk; |
801 | 802 | struct page *cp_page; |
... | ... | @@ -809,7 +810,7 @@ |
809 | 810 | * This avoids to conduct wrong roll-forward operations and uses |
810 | 811 | * metapages, so should be called prior to sync_meta_pages below. |
811 | 812 | */ |
812 | - discard_next_dnode(sbi); | |
813 | + discard_next_dnode(sbi, NEXT_FREE_BLKADDR(sbi, curseg)); | |
813 | 814 | |
814 | 815 | /* Flush all the NAT/SIT pages */ |
815 | 816 | while (get_pages(sbi, F2FS_DIRTY_META)) |
fs/f2fs/f2fs.h
... | ... | @@ -1225,7 +1225,7 @@ |
1225 | 1225 | void invalidate_blocks(struct f2fs_sb_info *, block_t); |
1226 | 1226 | void refresh_sit_entry(struct f2fs_sb_info *, block_t, block_t); |
1227 | 1227 | void clear_prefree_segments(struct f2fs_sb_info *); |
1228 | -void discard_next_dnode(struct f2fs_sb_info *); | |
1228 | +void discard_next_dnode(struct f2fs_sb_info *, block_t); | |
1229 | 1229 | int npages_for_summary_flush(struct f2fs_sb_info *); |
1230 | 1230 | void allocate_new_segments(struct f2fs_sb_info *); |
1231 | 1231 | struct page *get_sum_page(struct f2fs_sb_info *, unsigned int); |
fs/f2fs/recovery.c
... | ... | @@ -434,7 +434,9 @@ |
434 | 434 | |
435 | 435 | int recover_fsync_data(struct f2fs_sb_info *sbi) |
436 | 436 | { |
437 | + struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_WARM_NODE); | |
437 | 438 | struct list_head inode_list; |
439 | + block_t blkaddr; | |
438 | 440 | int err; |
439 | 441 | bool need_writecp = false; |
440 | 442 | |
... | ... | @@ -447,6 +449,9 @@ |
447 | 449 | |
448 | 450 | /* step #1: find fsynced inode numbers */ |
449 | 451 | sbi->por_doing = true; |
452 | + | |
453 | + blkaddr = NEXT_FREE_BLKADDR(sbi, curseg); | |
454 | + | |
450 | 455 | err = find_fsync_dnodes(sbi, &inode_list); |
451 | 456 | if (err) |
452 | 457 | goto out; |
453 | 458 | |
454 | 459 | |
... | ... | @@ -462,9 +467,22 @@ |
462 | 467 | out: |
463 | 468 | destroy_fsync_dnodes(&inode_list); |
464 | 469 | kmem_cache_destroy(fsync_entry_slab); |
470 | + | |
471 | + if (err) { | |
472 | + truncate_inode_pages_final(NODE_MAPPING(sbi)); | |
473 | + truncate_inode_pages_final(META_MAPPING(sbi)); | |
474 | + } | |
475 | + | |
465 | 476 | sbi->por_doing = false; |
466 | - if (!err && need_writecp) | |
477 | + if (err) { | |
478 | + discard_next_dnode(sbi, blkaddr); | |
479 | + | |
480 | + /* Flush all the NAT/SIT pages */ | |
481 | + while (get_pages(sbi, F2FS_DIRTY_META)) | |
482 | + sync_meta_pages(sbi, META, LONG_MAX); | |
483 | + } else if (need_writecp) { | |
467 | 484 | write_checkpoint(sbi, false); |
485 | + } | |
468 | 486 | return err; |
469 | 487 | } |
fs/f2fs/segment.c
... | ... | @@ -379,11 +379,8 @@ |
379 | 379 | return blkdev_issue_discard(sbi->sb->s_bdev, start, len, GFP_NOFS, 0); |
380 | 380 | } |
381 | 381 | |
382 | -void discard_next_dnode(struct f2fs_sb_info *sbi) | |
382 | +void discard_next_dnode(struct f2fs_sb_info *sbi, block_t blkaddr) | |
383 | 383 | { |
384 | - struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_WARM_NODE); | |
385 | - block_t blkaddr = NEXT_FREE_BLKADDR(sbi, curseg); | |
386 | - | |
387 | 384 | if (f2fs_issue_discard(sbi, blkaddr, 1)) { |
388 | 385 | struct page *page = grab_meta_page(sbi, blkaddr); |
389 | 386 | /* zero-filled page */ |