Commit ac5d156c78a68b39955ee9b09498ba93831c77d7
1 parent
b743ba78ae
Exists in
smarc-l5.0.0_1.0.0-ga
and in
5 other branches
f2fs: modify the number of issued pages to merge IOs
When testing f2fs on an SSD, I found some 128 page IOs followed by 1 page IO were issued by f2fs_write_node_pages. This means that there were some mishandling flows which degrades performance. Previous f2fs_write_node_pages determines the number of pages to be written, nr_to_write, as follows. 1. The bio_get_nr_vecs returns 129 pages. 2. The bio_alloc makes a room for 128 pages. 3. The initial 128 pages go into one bio. 4. The existing bio is submitted, and a new bio is prepared for the last 1 page. 5. Finally, sync_node_pages submits the last 1 page bio. The problem is from the use of bio_get_nr_vecs, so this patch replace it with max_hw_blocks using queue_max_sectors. Signed-off-by: Jaegeuk Kim <jaegeuk.kim@samsung.com>
Showing 3 changed files with 14 additions and 5 deletions Side-by-side Diff
fs/f2fs/node.c
... | ... | @@ -1171,7 +1171,6 @@ |
1171 | 1171 | struct writeback_control *wbc) |
1172 | 1172 | { |
1173 | 1173 | struct f2fs_sb_info *sbi = F2FS_SB(mapping->host->i_sb); |
1174 | - struct block_device *bdev = sbi->sb->s_bdev; | |
1175 | 1174 | long nr_to_write = wbc->nr_to_write; |
1176 | 1175 | |
1177 | 1176 | /* First check balancing cached NAT entries */ |
1178 | 1177 | |
... | ... | @@ -1185,10 +1184,9 @@ |
1185 | 1184 | return 0; |
1186 | 1185 | |
1187 | 1186 | /* if mounting is failed, skip writing node pages */ |
1188 | - wbc->nr_to_write = bio_get_nr_vecs(bdev); | |
1187 | + wbc->nr_to_write = max_hw_blocks(sbi); | |
1189 | 1188 | sync_node_pages(sbi, 0, wbc); |
1190 | - wbc->nr_to_write = nr_to_write - | |
1191 | - (bio_get_nr_vecs(bdev) - wbc->nr_to_write); | |
1189 | + wbc->nr_to_write = nr_to_write - (max_hw_blocks(sbi) - wbc->nr_to_write); | |
1192 | 1190 | return 0; |
1193 | 1191 | } |
1194 | 1192 |
fs/f2fs/segment.c
... | ... | @@ -734,7 +734,7 @@ |
734 | 734 | do_submit_bio(sbi, type, false); |
735 | 735 | alloc_new: |
736 | 736 | if (sbi->bio[type] == NULL) { |
737 | - sbi->bio[type] = f2fs_bio_alloc(bdev, bio_get_nr_vecs(bdev)); | |
737 | + sbi->bio[type] = f2fs_bio_alloc(bdev, max_hw_blocks(sbi)); | |
738 | 738 | sbi->bio[type]->bi_sector = SECTOR_FROM_BLOCK(sbi, blk_addr); |
739 | 739 | /* |
740 | 740 | * The end_io will be assigned at the sumbission phase. |
fs/f2fs/segment.h
... | ... | @@ -8,6 +8,8 @@ |
8 | 8 | * it under the terms of the GNU General Public License version 2 as |
9 | 9 | * published by the Free Software Foundation. |
10 | 10 | */ |
11 | +#include <linux/blkdev.h> | |
12 | + | |
11 | 13 | /* constant macro */ |
12 | 14 | #define NULL_SEGNO ((unsigned int)(~0)) |
13 | 15 | #define NULL_SECNO ((unsigned int)(~0)) |
... | ... | @@ -86,6 +88,8 @@ |
86 | 88 | |
87 | 89 | #define SECTOR_FROM_BLOCK(sbi, blk_addr) \ |
88 | 90 | (blk_addr << ((sbi)->log_blocksize - F2FS_LOG_SECTOR_SIZE)) |
91 | +#define SECTOR_TO_BLOCK(sbi, sectors) \ | |
92 | + (sectors >> ((sbi)->log_blocksize - F2FS_LOG_SECTOR_SIZE)) | |
89 | 93 | |
90 | 94 | /* during checkpoint, bio_private is used to synchronize the last bio */ |
91 | 95 | struct bio_private { |
... | ... | @@ -623,5 +627,12 @@ |
623 | 627 | if (IS_CURSEC(sbi, secno) || (sbi->cur_victim_sec == secno)) |
624 | 628 | return true; |
625 | 629 | return false; |
630 | +} | |
631 | + | |
632 | +static inline unsigned int max_hw_blocks(struct f2fs_sb_info *sbi) | |
633 | +{ | |
634 | + struct block_device *bdev = sbi->sb->s_bdev; | |
635 | + struct request_queue *q = bdev_get_queue(bdev); | |
636 | + return SECTOR_TO_BLOCK(sbi, queue_max_sectors(q)); | |
626 | 637 | } |