Blame view
fs/f2fs/data.c
25.2 KB
0a8165d7c
|
1 |
/* |
eb47b8009
|
2 3 4 5 6 7 8 9 10 11 12 13 14 |
* fs/f2fs/data.c * * Copyright (c) 2012 Samsung Electronics Co., Ltd. * http://www.samsung.com/ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/fs.h> #include <linux/f2fs_fs.h> #include <linux/buffer_head.h> #include <linux/mpage.h> |
a27bb332c
|
15 |
#include <linux/aio.h> |
eb47b8009
|
16 17 18 19 |
#include <linux/writeback.h> #include <linux/backing-dev.h> #include <linux/blkdev.h> #include <linux/bio.h> |
690e4a3ea
|
20 |
#include <linux/prefetch.h> |
eb47b8009
|
21 22 23 24 |
#include "f2fs.h" #include "node.h" #include "segment.h" |
848753aa3
|
25 |
#include <trace/events/f2fs.h> |
eb47b8009
|
26 |
|
93dfe2ac5
|
27 28 |
static void f2fs_read_end_io(struct bio *bio, int err) { |
f568849ed
|
29 30 |
struct bio_vec *bvec; int i; |
93dfe2ac5
|
31 |
|
f568849ed
|
32 |
bio_for_each_segment_all(bvec, bio, i) { |
93dfe2ac5
|
33 |
struct page *page = bvec->bv_page; |
f568849ed
|
34 35 36 |
if (!err) { SetPageUptodate(page); } else { |
93dfe2ac5
|
37 38 39 40 |
ClearPageUptodate(page); SetPageError(page); } unlock_page(page); |
f568849ed
|
41 |
} |
93dfe2ac5
|
42 43 44 45 46 |
bio_put(bio); } static void f2fs_write_end_io(struct bio *bio, int err) { |
1b1f559fc
|
47 |
struct f2fs_sb_info *sbi = bio->bi_private; |
f568849ed
|
48 49 |
struct bio_vec *bvec; int i; |
93dfe2ac5
|
50 |
|
f568849ed
|
51 |
bio_for_each_segment_all(bvec, bio, i) { |
93dfe2ac5
|
52 |
struct page *page = bvec->bv_page; |
f568849ed
|
53 |
if (unlikely(err)) { |
93dfe2ac5
|
54 55 |
SetPageError(page); set_bit(AS_EIO, &page->mapping->flags); |
744602cf4
|
56 |
f2fs_stop_checkpoint(sbi); |
93dfe2ac5
|
57 58 59 |
} end_page_writeback(page); dec_page_count(sbi, F2FS_WRITEBACK); |
f568849ed
|
60 |
} |
93dfe2ac5
|
61 |
|
1b1f559fc
|
62 63 64 65 |
if (sbi->wait_io) { complete(sbi->wait_io); sbi->wait_io = NULL; } |
93dfe2ac5
|
66 67 68 69 70 71 72 |
if (!get_pages(sbi, F2FS_WRITEBACK) && !list_empty(&sbi->cp_wait.task_list)) wake_up(&sbi->cp_wait); bio_put(bio); } |
940a6d34b
|
73 74 75 76 77 78 79 80 81 82 83 84 |
/* * Low-level block read/write IO operations. */ static struct bio *__bio_alloc(struct f2fs_sb_info *sbi, block_t blk_addr, int npages, bool is_read) { struct bio *bio; /* No failure on bio allocation */ bio = bio_alloc(GFP_NOIO, npages); bio->bi_bdev = sbi->sb->s_bdev; |
f568849ed
|
85 |
bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(sbi, blk_addr); |
940a6d34b
|
86 |
bio->bi_end_io = is_read ? f2fs_read_end_io : f2fs_write_end_io; |
1b1f559fc
|
87 |
bio->bi_private = sbi; |
940a6d34b
|
88 89 90 |
return bio; } |
458e6197c
|
91 |
static void __submit_merged_bio(struct f2fs_bio_info *io) |
93dfe2ac5
|
92 |
{ |
458e6197c
|
93 94 |
struct f2fs_io_info *fio = &io->fio; int rw; |
93dfe2ac5
|
95 96 97 |
if (!io->bio) return; |
7e8f23081
|
98 |
rw = fio->rw; |
93dfe2ac5
|
99 100 |
if (is_read_io(rw)) { |
940a6d34b
|
101 102 |
trace_f2fs_submit_read_bio(io->sbi->sb, rw, fio->type, io->bio); |
76130ccab
|
103 |
submit_bio(rw, io->bio); |
93dfe2ac5
|
104 |
} else { |
940a6d34b
|
105 106 107 108 109 110 111 112 |
trace_f2fs_submit_write_bio(io->sbi->sb, rw, fio->type, io->bio); /* * META_FLUSH is only from the checkpoint procedure, and we * should wait this metadata bio for FS consistency. */ if (fio->type == META_FLUSH) { DECLARE_COMPLETION_ONSTACK(wait); |
1b1f559fc
|
113 |
io->sbi->wait_io = &wait; |
940a6d34b
|
114 115 116 117 118 |
submit_bio(rw, io->bio); wait_for_completion(&wait); } else { submit_bio(rw, io->bio); } |
93dfe2ac5
|
119 |
} |
940a6d34b
|
120 |
|
93dfe2ac5
|
121 122 123 124 |
io->bio = NULL; } void f2fs_submit_merged_bio(struct f2fs_sb_info *sbi, |
458e6197c
|
125 |
enum page_type type, int rw) |
93dfe2ac5
|
126 127 128 129 130 |
{ enum page_type btype = PAGE_TYPE_OF_BIO(type); struct f2fs_bio_info *io; io = is_read_io(rw) ? &sbi->read_io : &sbi->write_io[btype]; |
df0f8dc0e
|
131 |
down_write(&io->io_rwsem); |
458e6197c
|
132 133 134 135 |
/* change META to META_FLUSH in the checkpoint procedure */ if (type >= META_FLUSH) { io->fio.type = META_FLUSH; |
c434cbc0e
|
136 |
io->fio.rw = WRITE_FLUSH_FUA | REQ_META | REQ_PRIO; |
458e6197c
|
137 138 |
} __submit_merged_bio(io); |
df0f8dc0e
|
139 |
up_write(&io->io_rwsem); |
93dfe2ac5
|
140 141 142 143 144 145 146 147 148 |
} /* * Fill the locked page with data located in the block address. * Return unlocked page. */ int f2fs_submit_page_bio(struct f2fs_sb_info *sbi, struct page *page, block_t blk_addr, int rw) { |
93dfe2ac5
|
149 150 151 152 153 |
struct bio *bio; trace_f2fs_submit_page_bio(page, blk_addr, rw); /* Allocate a new bio */ |
940a6d34b
|
154 |
bio = __bio_alloc(sbi, blk_addr, 1, is_read_io(rw)); |
93dfe2ac5
|
155 156 157 158 159 160 161 162 163 164 165 166 |
if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) { bio_put(bio); f2fs_put_page(page, 1); return -EFAULT; } submit_bio(rw, bio); return 0; } void f2fs_submit_page_mbio(struct f2fs_sb_info *sbi, struct page *page, |
458e6197c
|
167 |
block_t blk_addr, struct f2fs_io_info *fio) |
93dfe2ac5
|
168 |
{ |
458e6197c
|
169 |
enum page_type btype = PAGE_TYPE_OF_BIO(fio->type); |
93dfe2ac5
|
170 |
struct f2fs_bio_info *io; |
940a6d34b
|
171 |
bool is_read = is_read_io(fio->rw); |
93dfe2ac5
|
172 |
|
940a6d34b
|
173 |
io = is_read ? &sbi->read_io : &sbi->write_io[btype]; |
93dfe2ac5
|
174 175 |
verify_block_addr(sbi, blk_addr); |
df0f8dc0e
|
176 |
down_write(&io->io_rwsem); |
93dfe2ac5
|
177 |
|
940a6d34b
|
178 |
if (!is_read) |
93dfe2ac5
|
179 |
inc_page_count(sbi, F2FS_WRITEBACK); |
63a0b7cb3
|
180 |
if (io->bio && (io->last_block_in_bio != blk_addr - 1 || |
458e6197c
|
181 182 |
io->fio.rw != fio->rw)) __submit_merged_bio(io); |
93dfe2ac5
|
183 184 |
alloc_new: if (io->bio == NULL) { |
940a6d34b
|
185 186 187 |
int bio_blocks = MAX_BIO_BLOCKS(max_hw_blocks(sbi)); io->bio = __bio_alloc(sbi, blk_addr, bio_blocks, is_read); |
458e6197c
|
188 |
io->fio = *fio; |
93dfe2ac5
|
189 190 191 192 |
} if (bio_add_page(io->bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) { |
458e6197c
|
193 |
__submit_merged_bio(io); |
93dfe2ac5
|
194 195 196 197 |
goto alloc_new; } io->last_block_in_bio = blk_addr; |
df0f8dc0e
|
198 |
up_write(&io->io_rwsem); |
458e6197c
|
199 |
trace_f2fs_submit_page_mbio(page, fio->rw, fio->type, blk_addr); |
93dfe2ac5
|
200 201 202 |
} /* |
eb47b8009
|
203 204 205 206 207 208 209 210 211 212 213 |
* Lock ordering for the change of data block address: * ->data_page * ->node_page * update block addresses in the node page */ static void __set_data_blkaddr(struct dnode_of_data *dn, block_t new_addr) { struct f2fs_node *rn; __le32 *addr_array; struct page *node_page = dn->node_page; unsigned int ofs_in_node = dn->ofs_in_node; |
5514f0aad
|
214 |
f2fs_wait_on_page_writeback(node_page, NODE); |
eb47b8009
|
215 |
|
455907106
|
216 |
rn = F2FS_NODE(node_page); |
eb47b8009
|
217 218 219 220 221 222 223 224 225 226 |
/* Get physical address of data block */ addr_array = blkaddr_in_node(rn); addr_array[ofs_in_node] = cpu_to_le32(new_addr); set_page_dirty(node_page); } int reserve_new_block(struct dnode_of_data *dn) { struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb); |
6bacf52fb
|
227 |
if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC))) |
eb47b8009
|
228 |
return -EPERM; |
cfb271d48
|
229 |
if (unlikely(!inc_valid_block_count(sbi, dn->inode, 1))) |
eb47b8009
|
230 |
return -ENOSPC; |
c01e28532
|
231 |
trace_f2fs_reserve_new_block(dn->inode, dn->nid, dn->ofs_in_node); |
eb47b8009
|
232 233 |
__set_data_blkaddr(dn, NEW_ADDR); dn->data_blkaddr = NEW_ADDR; |
a18ff0634
|
234 |
mark_inode_dirty(dn->inode); |
eb47b8009
|
235 236 237 |
sync_inode_page(dn); return 0; } |
b600965c4
|
238 239 240 241 |
int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index) { bool need_put = dn->inode_page ? false : true; int err; |
a8865372a
|
242 243 |
/* if inode_page exists, index should be zero */ f2fs_bug_on(!need_put && index); |
b600965c4
|
244 245 246 |
err = get_dnode_of_data(dn, index, ALLOC_NODE); if (err) return err; |
a8865372a
|
247 |
|
b600965c4
|
248 249 |
if (dn->data_blkaddr == NULL_ADDR) err = reserve_new_block(dn); |
a8865372a
|
250 |
if (err || need_put) |
b600965c4
|
251 252 253 |
f2fs_put_dnode(dn); return err; } |
eb47b8009
|
254 255 256 257 |
static int check_extent_cache(struct inode *inode, pgoff_t pgofs, struct buffer_head *bh_result) { struct f2fs_inode_info *fi = F2FS_I(inode); |
eb47b8009
|
258 259 |
pgoff_t start_fofs, end_fofs; block_t start_blkaddr; |
c11abd1a8
|
260 261 |
if (is_inode_flag_set(fi, FI_NO_EXTENT)) return 0; |
eb47b8009
|
262 263 264 265 266 |
read_lock(&fi->ext.ext_lock); if (fi->ext.len == 0) { read_unlock(&fi->ext.ext_lock); return 0; } |
dcdfff652
|
267 |
stat_inc_total_hit(inode->i_sb); |
eb47b8009
|
268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 |
start_fofs = fi->ext.fofs; end_fofs = fi->ext.fofs + fi->ext.len - 1; start_blkaddr = fi->ext.blk_addr; if (pgofs >= start_fofs && pgofs <= end_fofs) { unsigned int blkbits = inode->i_sb->s_blocksize_bits; size_t count; clear_buffer_new(bh_result); map_bh(bh_result, inode->i_sb, start_blkaddr + pgofs - start_fofs); count = end_fofs - pgofs + 1; if (count < (UINT_MAX >> blkbits)) bh_result->b_size = (count << blkbits); else bh_result->b_size = UINT_MAX; |
dcdfff652
|
284 |
stat_inc_read_hit(inode->i_sb); |
eb47b8009
|
285 286 287 288 289 290 291 292 293 294 295 296 |
read_unlock(&fi->ext.ext_lock); return 1; } read_unlock(&fi->ext.ext_lock); return 0; } void update_extent_cache(block_t blk_addr, struct dnode_of_data *dn) { struct f2fs_inode_info *fi = F2FS_I(dn->inode); pgoff_t fofs, start_fofs, end_fofs; block_t start_blkaddr, end_blkaddr; |
c11abd1a8
|
297 |
int need_update = true; |
eb47b8009
|
298 |
|
5d56b6718
|
299 |
f2fs_bug_on(blk_addr == NEW_ADDR); |
de93653fe
|
300 301 |
fofs = start_bidx_of_node(ofs_of_node(dn->node_page), fi) + dn->ofs_in_node; |
eb47b8009
|
302 303 304 |
/* Update the page address in the parent node */ __set_data_blkaddr(dn, blk_addr); |
c11abd1a8
|
305 306 |
if (is_inode_flag_set(fi, FI_NO_EXTENT)) return; |
eb47b8009
|
307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 |
write_lock(&fi->ext.ext_lock); start_fofs = fi->ext.fofs; end_fofs = fi->ext.fofs + fi->ext.len - 1; start_blkaddr = fi->ext.blk_addr; end_blkaddr = fi->ext.blk_addr + fi->ext.len - 1; /* Drop and initialize the matched extent */ if (fi->ext.len == 1 && fofs == start_fofs) fi->ext.len = 0; /* Initial extent */ if (fi->ext.len == 0) { if (blk_addr != NULL_ADDR) { fi->ext.fofs = fofs; fi->ext.blk_addr = blk_addr; fi->ext.len = 1; } goto end_update; } |
6224da875
|
327 |
/* Front merge */ |
eb47b8009
|
328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 |
if (fofs == start_fofs - 1 && blk_addr == start_blkaddr - 1) { fi->ext.fofs--; fi->ext.blk_addr--; fi->ext.len++; goto end_update; } /* Back merge */ if (fofs == end_fofs + 1 && blk_addr == end_blkaddr + 1) { fi->ext.len++; goto end_update; } /* Split the existing extent */ if (fi->ext.len > 1 && fofs >= start_fofs && fofs <= end_fofs) { if ((end_fofs - fofs) < (fi->ext.len >> 1)) { fi->ext.len = fofs - start_fofs; } else { fi->ext.fofs = fofs + 1; fi->ext.blk_addr = start_blkaddr + fofs - start_fofs + 1; fi->ext.len -= fofs - start_fofs + 1; } |
c11abd1a8
|
352 353 |
} else { need_update = false; |
eb47b8009
|
354 |
} |
eb47b8009
|
355 |
|
c11abd1a8
|
356 357 358 359 360 361 |
/* Finally, if the extent is very fragmented, let's drop the cache. */ if (fi->ext.len < F2FS_MIN_EXTENT_LEN) { fi->ext.len = 0; set_inode_flag(fi, FI_NO_EXTENT); need_update = true; } |
eb47b8009
|
362 363 |
end_update: write_unlock(&fi->ext.ext_lock); |
c11abd1a8
|
364 365 366 |
if (need_update) sync_inode_page(dn); return; |
eb47b8009
|
367 |
} |
c718379b6
|
368 |
struct page *find_data_page(struct inode *inode, pgoff_t index, bool sync) |
eb47b8009
|
369 370 371 372 373 374 375 376 377 378 379 380 381 |
{ struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); struct address_space *mapping = inode->i_mapping; struct dnode_of_data dn; struct page *page; int err; page = find_get_page(mapping, index); if (page && PageUptodate(page)) return page; f2fs_put_page(page, 0); set_new_dnode(&dn, inode, NULL, NULL, 0); |
266e97a81
|
382 |
err = get_dnode_of_data(&dn, index, LOOKUP_NODE); |
eb47b8009
|
383 384 385 386 387 388 389 390 |
if (err) return ERR_PTR(err); f2fs_put_dnode(&dn); if (dn.data_blkaddr == NULL_ADDR) return ERR_PTR(-ENOENT); /* By fallocate(), there is no cached page, but with NEW_ADDR */ |
6bacf52fb
|
391 |
if (unlikely(dn.data_blkaddr == NEW_ADDR)) |
eb47b8009
|
392 |
return ERR_PTR(-EINVAL); |
6f85b3520
|
393 |
page = grab_cache_page_write_begin(mapping, index, AOP_FLAG_NOFS); |
eb47b8009
|
394 395 |
if (!page) return ERR_PTR(-ENOMEM); |
393ff91f5
|
396 397 398 399 |
if (PageUptodate(page)) { unlock_page(page); return page; } |
93dfe2ac5
|
400 |
err = f2fs_submit_page_bio(sbi, page, dn.data_blkaddr, |
c718379b6
|
401 |
sync ? READ_SYNC : READA); |
1069bbf7b
|
402 403 |
if (err) return ERR_PTR(err); |
c718379b6
|
404 405 |
if (sync) { wait_on_page_locked(page); |
6bacf52fb
|
406 |
if (unlikely(!PageUptodate(page))) { |
c718379b6
|
407 408 409 |
f2fs_put_page(page, 0); return ERR_PTR(-EIO); } |
eb47b8009
|
410 |
} |
eb47b8009
|
411 412 |
return page; } |
0a8165d7c
|
413 |
/* |
eb47b8009
|
414 415 416 417 418 419 420 421 422 423 424 |
* If it tries to access a hole, return an error. * Because, the callers, functions in dir.c and GC, should be able to know * whether this page exists or not. */ struct page *get_lock_data_page(struct inode *inode, pgoff_t index) { struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); struct address_space *mapping = inode->i_mapping; struct dnode_of_data dn; struct page *page; int err; |
650495ded
|
425 |
repeat: |
6f85b3520
|
426 |
page = grab_cache_page_write_begin(mapping, index, AOP_FLAG_NOFS); |
650495ded
|
427 428 |
if (!page) return ERR_PTR(-ENOMEM); |
eb47b8009
|
429 |
set_new_dnode(&dn, inode, NULL, NULL, 0); |
266e97a81
|
430 |
err = get_dnode_of_data(&dn, index, LOOKUP_NODE); |
650495ded
|
431 432 |
if (err) { f2fs_put_page(page, 1); |
eb47b8009
|
433 |
return ERR_PTR(err); |
650495ded
|
434 |
} |
eb47b8009
|
435 |
f2fs_put_dnode(&dn); |
6bacf52fb
|
436 |
if (unlikely(dn.data_blkaddr == NULL_ADDR)) { |
650495ded
|
437 |
f2fs_put_page(page, 1); |
eb47b8009
|
438 |
return ERR_PTR(-ENOENT); |
650495ded
|
439 |
} |
eb47b8009
|
440 441 442 |
if (PageUptodate(page)) return page; |
d59ff4df7
|
443 444 445 446 447 448 449 450 451 452 453 |
/* * A new dentry page is allocated but not able to be written, since its * new inode page couldn't be allocated due to -ENOSPC. * In such the case, its blkaddr can be remained as NEW_ADDR. * see, f2fs_add_link -> get_new_data_page -> init_inode_metadata. */ if (dn.data_blkaddr == NEW_ADDR) { zero_user_segment(page, 0, PAGE_CACHE_SIZE); SetPageUptodate(page); return page; } |
eb47b8009
|
454 |
|
93dfe2ac5
|
455 |
err = f2fs_submit_page_bio(sbi, page, dn.data_blkaddr, READ_SYNC); |
393ff91f5
|
456 |
if (err) |
eb47b8009
|
457 |
return ERR_PTR(err); |
393ff91f5
|
458 459 |
lock_page(page); |
6bacf52fb
|
460 |
if (unlikely(!PageUptodate(page))) { |
393ff91f5
|
461 462 |
f2fs_put_page(page, 1); return ERR_PTR(-EIO); |
eb47b8009
|
463 |
} |
6bacf52fb
|
464 |
if (unlikely(page->mapping != mapping)) { |
afcb7ca01
|
465 466 |
f2fs_put_page(page, 1); goto repeat; |
eb47b8009
|
467 468 469 |
} return page; } |
0a8165d7c
|
470 |
/* |
eb47b8009
|
471 472 |
* Caller ensures that this data page is never allocated. * A new zero-filled data page is allocated in the page cache. |
399368372
|
473 |
* |
4f4124d0b
|
474 475 |
* Also, caller should grab and release a rwsem by calling f2fs_lock_op() and * f2fs_unlock_op(). |
a8865372a
|
476 |
* Note that, ipage is set only by make_empty_dir. |
eb47b8009
|
477 |
*/ |
64aa7ed98
|
478 |
struct page *get_new_data_page(struct inode *inode, |
a8865372a
|
479 |
struct page *ipage, pgoff_t index, bool new_i_size) |
eb47b8009
|
480 481 482 483 484 485 |
{ struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); struct address_space *mapping = inode->i_mapping; struct page *page; struct dnode_of_data dn; int err; |
a8865372a
|
486 |
set_new_dnode(&dn, inode, ipage, NULL, 0); |
b600965c4
|
487 |
err = f2fs_reserve_block(&dn, index); |
eb47b8009
|
488 489 |
if (err) return ERR_PTR(err); |
afcb7ca01
|
490 |
repeat: |
eb47b8009
|
491 |
page = grab_cache_page(mapping, index); |
a8865372a
|
492 493 494 495 |
if (!page) { err = -ENOMEM; goto put_err; } |
eb47b8009
|
496 497 498 499 500 501 |
if (PageUptodate(page)) return page; if (dn.data_blkaddr == NEW_ADDR) { zero_user_segment(page, 0, PAGE_CACHE_SIZE); |
393ff91f5
|
502 |
SetPageUptodate(page); |
eb47b8009
|
503 |
} else { |
93dfe2ac5
|
504 505 |
err = f2fs_submit_page_bio(sbi, page, dn.data_blkaddr, READ_SYNC); |
393ff91f5
|
506 |
if (err) |
a8865372a
|
507 |
goto put_err; |
393ff91f5
|
508 |
lock_page(page); |
6bacf52fb
|
509 |
if (unlikely(!PageUptodate(page))) { |
393ff91f5
|
510 |
f2fs_put_page(page, 1); |
a8865372a
|
511 512 |
err = -EIO; goto put_err; |
eb47b8009
|
513 |
} |
6bacf52fb
|
514 |
if (unlikely(page->mapping != mapping)) { |
afcb7ca01
|
515 516 |
f2fs_put_page(page, 1); goto repeat; |
eb47b8009
|
517 518 |
} } |
eb47b8009
|
519 520 521 522 |
if (new_i_size && i_size_read(inode) < ((index + 1) << PAGE_CACHE_SHIFT)) { i_size_write(inode, ((index + 1) << PAGE_CACHE_SHIFT)); |
699489bbb
|
523 524 |
/* Only the directory inode sets new_i_size */ set_inode_flag(F2FS_I(inode), FI_UPDATE_DIR); |
eb47b8009
|
525 526 |
} return page; |
a8865372a
|
527 528 529 530 |
put_err: f2fs_put_dnode(&dn); return ERR_PTR(err); |
eb47b8009
|
531 |
} |
bfad7c2d4
|
532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 |
static int __allocate_data_block(struct dnode_of_data *dn) { struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb); struct f2fs_summary sum; block_t new_blkaddr; struct node_info ni; int type; if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC))) return -EPERM; if (unlikely(!inc_valid_block_count(sbi, dn->inode, 1))) return -ENOSPC; __set_data_blkaddr(dn, NEW_ADDR); dn->data_blkaddr = NEW_ADDR; get_node_info(sbi, dn->nid, &ni); set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version); type = CURSEG_WARM_DATA; allocate_data_block(sbi, NULL, NULL_ADDR, &new_blkaddr, &sum, type); /* direct IO doesn't use extent cache to maximize the performance */ set_inode_flag(F2FS_I(dn->inode), FI_NO_EXTENT); update_extent_cache(new_blkaddr, dn); clear_inode_flag(F2FS_I(dn->inode), FI_NO_EXTENT); dn->data_blkaddr = new_blkaddr; return 0; } |
0a8165d7c
|
563 |
/* |
4f4124d0b
|
564 565 566 567 568 569 |
* get_data_block() now supported readahead/bmap/rw direct_IO with mapped bh. * If original data blocks are allocated, then give them to blockdev. * Otherwise, * a. preallocate requested block addresses * b. do not use extent cache for better performance * c. give the block addresses to blockdev |
eb47b8009
|
570 |
*/ |
bfad7c2d4
|
571 |
static int get_data_block(struct inode *inode, sector_t iblock, |
eb47b8009
|
572 573 |
struct buffer_head *bh_result, int create) { |
bfad7c2d4
|
574 |
struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); |
eb47b8009
|
575 576 577 |
unsigned int blkbits = inode->i_sb->s_blocksize_bits; unsigned maxblocks = bh_result->b_size >> blkbits; struct dnode_of_data dn; |
bfad7c2d4
|
578 579 580 581 |
int mode = create ? ALLOC_NODE : LOOKUP_NODE_RA; pgoff_t pgofs, end_offset; int err = 0, ofs = 1; bool allocated = false; |
eb47b8009
|
582 583 584 |
/* Get the page offset from the block offset(iblock) */ pgofs = (pgoff_t)(iblock >> (PAGE_CACHE_SHIFT - blkbits)); |
bfad7c2d4
|
585 586 587 588 589 |
if (check_extent_cache(inode, pgofs, bh_result)) goto out; if (create) f2fs_lock_op(sbi); |
eb47b8009
|
590 591 592 |
/* When reading holes, we need its node page */ set_new_dnode(&dn, inode, NULL, NULL, 0); |
bfad7c2d4
|
593 |
err = get_dnode_of_data(&dn, pgofs, mode); |
1ec79083b
|
594 |
if (err) { |
bfad7c2d4
|
595 596 597 |
if (err == -ENOENT) err = 0; goto unlock_out; |
848753aa3
|
598 |
} |
1ec79083b
|
599 600 |
if (dn.data_blkaddr == NEW_ADDR) goto put_out; |
eb47b8009
|
601 |
|
bfad7c2d4
|
602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 |
if (dn.data_blkaddr != NULL_ADDR) { map_bh(bh_result, inode->i_sb, dn.data_blkaddr); } else if (create) { err = __allocate_data_block(&dn); if (err) goto put_out; allocated = true; map_bh(bh_result, inode->i_sb, dn.data_blkaddr); } else { goto put_out; } end_offset = IS_INODE(dn.node_page) ? ADDRS_PER_INODE(F2FS_I(inode)) : ADDRS_PER_BLOCK; bh_result->b_size = (((size_t)1) << blkbits); dn.ofs_in_node++; pgofs++; get_next: if (dn.ofs_in_node >= end_offset) { if (allocated) sync_inode_page(&dn); allocated = false; f2fs_put_dnode(&dn); set_new_dnode(&dn, inode, NULL, NULL, 0); err = get_dnode_of_data(&dn, pgofs, mode); |
1ec79083b
|
629 |
if (err) { |
bfad7c2d4
|
630 631 632 633 |
if (err == -ENOENT) err = 0; goto unlock_out; } |
1ec79083b
|
634 635 |
if (dn.data_blkaddr == NEW_ADDR) goto put_out; |
eb47b8009
|
636 |
end_offset = IS_INODE(dn.node_page) ? |
bfad7c2d4
|
637 638 |
ADDRS_PER_INODE(F2FS_I(inode)) : ADDRS_PER_BLOCK; } |
eb47b8009
|
639 |
|
bfad7c2d4
|
640 641 642 643 644 645 646 647 648 |
if (maxblocks > (bh_result->b_size >> blkbits)) { block_t blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node); if (blkaddr == NULL_ADDR && create) { err = __allocate_data_block(&dn); if (err) goto sync_out; allocated = true; blkaddr = dn.data_blkaddr; } |
eb47b8009
|
649 |
/* Give more consecutive addresses for the read ahead */ |
bfad7c2d4
|
650 651 652 653 654 655 656 |
if (blkaddr == (bh_result->b_blocknr + ofs)) { ofs++; dn.ofs_in_node++; pgofs++; bh_result->b_size += (((size_t)1) << blkbits); goto get_next; } |
eb47b8009
|
657 |
} |
bfad7c2d4
|
658 659 660 661 |
sync_out: if (allocated) sync_inode_page(&dn); put_out: |
eb47b8009
|
662 |
f2fs_put_dnode(&dn); |
bfad7c2d4
|
663 664 665 666 667 668 |
unlock_out: if (create) f2fs_unlock_op(sbi); out: trace_f2fs_get_data_block(inode, iblock, bh_result, err); return err; |
eb47b8009
|
669 670 671 672 |
} static int f2fs_read_data_page(struct file *file, struct page *page) { |
9ffe0fb5f
|
673 674 675 676 677 678 679 680 681 682 |
struct inode *inode = page->mapping->host; int ret; /* If the file has inline data, try to read it directlly */ if (f2fs_has_inline_data(inode)) ret = f2fs_read_inline_data(inode, page); else ret = mpage_readpage(page, get_data_block); return ret; |
eb47b8009
|
683 684 685 686 687 688 |
} static int f2fs_read_data_pages(struct file *file, struct address_space *mapping, struct list_head *pages, unsigned nr_pages) { |
9ffe0fb5f
|
689 690 691 692 693 |
struct inode *inode = file->f_mapping->host; /* If the file has inline data, skip readpages */ if (f2fs_has_inline_data(inode)) return 0; |
bfad7c2d4
|
694 |
return mpage_readpages(mapping, pages, nr_pages, get_data_block); |
eb47b8009
|
695 |
} |
458e6197c
|
696 |
int do_write_data_page(struct page *page, struct f2fs_io_info *fio) |
eb47b8009
|
697 698 |
{ struct inode *inode = page->mapping->host; |
458e6197c
|
699 |
block_t old_blkaddr, new_blkaddr; |
eb47b8009
|
700 701 702 703 |
struct dnode_of_data dn; int err = 0; set_new_dnode(&dn, inode, NULL, NULL, 0); |
266e97a81
|
704 |
err = get_dnode_of_data(&dn, page->index, LOOKUP_NODE); |
eb47b8009
|
705 706 |
if (err) return err; |
458e6197c
|
707 |
old_blkaddr = dn.data_blkaddr; |
eb47b8009
|
708 709 |
/* This page is already truncated */ |
458e6197c
|
710 |
if (old_blkaddr == NULL_ADDR) |
eb47b8009
|
711 712 713 714 715 716 717 718 |
goto out_writepage; set_page_writeback(page); /* * If current allocation needs SSR, * it had better in-place writes for updated data. */ |
458e6197c
|
719 |
if (unlikely(old_blkaddr != NEW_ADDR && |
b25958b6e
|
720 721 |
!is_cold_data(page) && need_inplace_update(inode))) { |
458e6197c
|
722 |
rewrite_data_page(page, old_blkaddr, fio); |
eb47b8009
|
723 |
} else { |
458e6197c
|
724 725 |
write_data_page(page, &dn, &new_blkaddr, fio); update_extent_cache(new_blkaddr, &dn); |
eb47b8009
|
726 727 728 729 730 731 732 733 734 735 736 737 738 739 |
} out_writepage: f2fs_put_dnode(&dn); return err; } static int f2fs_write_data_page(struct page *page, struct writeback_control *wbc) { struct inode *inode = page->mapping->host; struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); loff_t i_size = i_size_read(inode); const pgoff_t end_index = ((unsigned long long) i_size) >> PAGE_CACHE_SHIFT; |
9ffe0fb5f
|
740 |
unsigned offset = 0; |
399368372
|
741 |
bool need_balance_fs = false; |
eb47b8009
|
742 |
int err = 0; |
458e6197c
|
743 744 |
struct f2fs_io_info fio = { .type = DATA, |
6c311ec6c
|
745 |
.rw = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : WRITE, |
458e6197c
|
746 |
}; |
eb47b8009
|
747 748 |
if (page->index < end_index) |
399368372
|
749 |
goto write; |
eb47b8009
|
750 751 752 753 754 755 756 |
/* * If the offset is out-of-range of file size, * this page does not have to be written to disk. */ offset = i_size & (PAGE_CACHE_SIZE - 1); if ((page->index >= end_index + 1) || !offset) { |
1fe54f9dd
|
757 |
inode_dec_dirty_dents(inode); |
399368372
|
758 |
goto out; |
eb47b8009
|
759 760 761 |
} zero_user_segment(page, offset, PAGE_CACHE_SIZE); |
399368372
|
762 |
write: |
8618b881e
|
763 |
if (unlikely(sbi->por_doing)) |
eb47b8009
|
764 |
goto redirty_out; |
399368372
|
765 |
/* Dentry blocks are controlled by checkpoint */ |
eb47b8009
|
766 |
if (S_ISDIR(inode->i_mode)) { |
eb47b8009
|
767 |
inode_dec_dirty_dents(inode); |
458e6197c
|
768 |
err = do_write_data_page(page, &fio); |
8618b881e
|
769 770 |
goto done; } |
9ffe0fb5f
|
771 |
|
8618b881e
|
772 |
if (!wbc->for_reclaim) |
399368372
|
773 |
need_balance_fs = true; |
8618b881e
|
774 |
else if (has_not_enough_free_secs(sbi, 0)) |
399368372
|
775 |
goto redirty_out; |
eb47b8009
|
776 |
|
8618b881e
|
777 778 779 780 781 782 783 784 785 |
f2fs_lock_op(sbi); if (f2fs_has_inline_data(inode) || f2fs_may_inline(inode)) err = f2fs_write_inline_data(inode, page, offset); else err = do_write_data_page(page, &fio); f2fs_unlock_op(sbi); done: if (err && err != -ENOENT) goto redirty_out; |
eb47b8009
|
786 |
|
eb47b8009
|
787 |
clear_cold_data(page); |
399368372
|
788 |
out: |
eb47b8009
|
789 |
unlock_page(page); |
399368372
|
790 |
if (need_balance_fs) |
eb47b8009
|
791 792 |
f2fs_balance_fs(sbi); return 0; |
eb47b8009
|
793 794 |
redirty_out: wbc->pages_skipped++; |
9cf3c3898
|
795 |
account_page_redirty(page); |
eb47b8009
|
796 |
set_page_dirty(page); |
8618b881e
|
797 |
return AOP_WRITEPAGE_ACTIVATE; |
eb47b8009
|
798 |
} |
fa9150a84
|
799 800 801 802 803 804 805 806 |
static int __f2fs_writepage(struct page *page, struct writeback_control *wbc, void *data) { struct address_space *mapping = data; int ret = mapping->a_ops->writepage(page, wbc); mapping_set_error(mapping, ret); return ret; } |
25ca923b2
|
807 |
static int f2fs_write_data_pages(struct address_space *mapping, |
eb47b8009
|
808 809 810 811 |
struct writeback_control *wbc) { struct inode *inode = mapping->host; struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); |
531ad7d58
|
812 |
bool locked = false; |
eb47b8009
|
813 |
int ret; |
50c8cdb35
|
814 |
long diff; |
eb47b8009
|
815 |
|
cfb185a14
|
816 817 818 |
/* deal with chardevs and other special file */ if (!mapping->a_ops->writepage) return 0; |
87d6f8909
|
819 820 |
if (S_ISDIR(inode->i_mode) && wbc->sync_mode == WB_SYNC_NONE && get_dirty_dents(inode) < nr_pages_to_skip(sbi, DATA)) |
d3baf95da
|
821 |
goto skip_write; |
87d6f8909
|
822 |
|
50c8cdb35
|
823 |
diff = nr_pages_to_write(sbi, DATA, wbc); |
eb47b8009
|
824 |
|
531ad7d58
|
825 |
if (!S_ISDIR(inode->i_mode)) { |
eb47b8009
|
826 |
mutex_lock(&sbi->writepages); |
531ad7d58
|
827 828 |
locked = true; } |
fa9150a84
|
829 |
ret = write_cache_pages(mapping, wbc, __f2fs_writepage, mapping); |
531ad7d58
|
830 |
if (locked) |
eb47b8009
|
831 |
mutex_unlock(&sbi->writepages); |
458e6197c
|
832 833 |
f2fs_submit_merged_bio(sbi, DATA, WRITE); |
eb47b8009
|
834 835 |
remove_dirty_dir_inode(inode); |
50c8cdb35
|
836 |
wbc->nr_to_write = max((long)0, wbc->nr_to_write - diff); |
eb47b8009
|
837 |
return ret; |
d3baf95da
|
838 839 840 841 |
skip_write: wbc->pages_skipped += get_dirty_dents(inode); return 0; |
eb47b8009
|
842 843 844 845 846 847 848 849 850 851 852 853 |
} static int f2fs_write_begin(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata) { struct inode *inode = mapping->host; struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); struct page *page; pgoff_t index = ((unsigned long long) pos) >> PAGE_CACHE_SHIFT; struct dnode_of_data dn; int err = 0; |
eb47b8009
|
854 |
f2fs_balance_fs(sbi); |
afcb7ca01
|
855 |
repeat: |
9e09fc855
|
856 857 858 |
err = f2fs_convert_inline_data(inode, pos + len); if (err) return err; |
eb47b8009
|
859 860 861 862 |
page = grab_cache_page_write_begin(mapping, index, flags); if (!page) return -ENOMEM; *pagep = page; |
9e09fc855
|
863 864 |
if (f2fs_has_inline_data(inode) && (pos + len) <= MAX_INLINE_DATA) goto inline_data; |
9ffe0fb5f
|
865 |
|
e479556bf
|
866 |
f2fs_lock_op(sbi); |
eb47b8009
|
867 |
set_new_dnode(&dn, inode, NULL, NULL, 0); |
b600965c4
|
868 |
err = f2fs_reserve_block(&dn, index); |
e479556bf
|
869 |
f2fs_unlock_op(sbi); |
eb47b8009
|
870 |
|
b600965c4
|
871 872 873 874 |
if (err) { f2fs_put_page(page, 1); return err; } |
9ffe0fb5f
|
875 |
inline_data: |
eb47b8009
|
876 877 878 879 880 881 882 883 884 |
if ((len == PAGE_CACHE_SIZE) || PageUptodate(page)) return 0; if ((pos & PAGE_CACHE_MASK) >= i_size_read(inode)) { unsigned start = pos & (PAGE_CACHE_SIZE - 1); unsigned end = start + len; /* Reading beyond i_size is simple: memset to zero */ zero_user_segments(page, 0, start, end, PAGE_CACHE_SIZE); |
393ff91f5
|
885 |
goto out; |
eb47b8009
|
886 887 888 889 890 |
} if (dn.data_blkaddr == NEW_ADDR) { zero_user_segment(page, 0, PAGE_CACHE_SIZE); } else { |
d54c795b4
|
891 |
if (f2fs_has_inline_data(inode)) { |
9ffe0fb5f
|
892 |
err = f2fs_read_inline_data(inode, page); |
d54c795b4
|
893 894 895 896 897 |
if (err) { page_cache_release(page); return err; } } else { |
9ffe0fb5f
|
898 |
err = f2fs_submit_page_bio(sbi, page, dn.data_blkaddr, |
93dfe2ac5
|
899 |
READ_SYNC); |
d54c795b4
|
900 901 902 |
if (err) return err; } |
393ff91f5
|
903 |
lock_page(page); |
6bacf52fb
|
904 |
if (unlikely(!PageUptodate(page))) { |
393ff91f5
|
905 906 |
f2fs_put_page(page, 1); return -EIO; |
eb47b8009
|
907 |
} |
6bacf52fb
|
908 |
if (unlikely(page->mapping != mapping)) { |
afcb7ca01
|
909 910 |
f2fs_put_page(page, 1); goto repeat; |
eb47b8009
|
911 912 |
} } |
393ff91f5
|
913 |
out: |
eb47b8009
|
914 915 916 917 |
SetPageUptodate(page); clear_cold_data(page); return 0; } |
a1dd3c13c
|
918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 |
static int f2fs_write_end(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata) { struct inode *inode = page->mapping->host; SetPageUptodate(page); set_page_dirty(page); if (pos + copied > i_size_read(inode)) { i_size_write(inode, pos + copied); mark_inode_dirty(inode); update_inode_page(inode); } |
75c3c8bc8
|
933 |
f2fs_put_page(page, 1); |
a1dd3c13c
|
934 935 |
return copied; } |
944fcfc18
|
936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 |
static int check_direct_IO(struct inode *inode, int rw, const struct iovec *iov, loff_t offset, unsigned long nr_segs) { unsigned blocksize_mask = inode->i_sb->s_blocksize - 1; int i; if (rw == READ) return 0; if (offset & blocksize_mask) return -EINVAL; for (i = 0; i < nr_segs; i++) if (iov[i].iov_len & blocksize_mask) return -EINVAL; return 0; } |
eb47b8009
|
953 954 955 956 957 |
static ssize_t f2fs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, loff_t offset, unsigned long nr_segs) { struct file *file = iocb->ki_filp; struct inode *inode = file->f_mapping->host; |
944fcfc18
|
958 |
|
9ffe0fb5f
|
959 960 961 |
/* Let buffer I/O handle the inline data case. */ if (f2fs_has_inline_data(inode)) return 0; |
944fcfc18
|
962 963 |
if (check_direct_IO(inode, rw, iov, offset, nr_segs)) return 0; |
eb47b8009
|
964 |
return blockdev_direct_IO(rw, iocb, inode, iov, offset, nr_segs, |
bfad7c2d4
|
965 |
get_data_block); |
eb47b8009
|
966 |
} |
d47992f86
|
967 968 |
static void f2fs_invalidate_data_page(struct page *page, unsigned int offset, unsigned int length) |
eb47b8009
|
969 970 |
{ struct inode *inode = page->mapping->host; |
1fe54f9dd
|
971 |
if (PageDirty(page)) |
eb47b8009
|
972 |
inode_dec_dirty_dents(inode); |
eb47b8009
|
973 974 975 976 977 978 |
ClearPagePrivate(page); } static int f2fs_release_data_page(struct page *page, gfp_t wait) { ClearPagePrivate(page); |
c3850aa1c
|
979 |
return 1; |
eb47b8009
|
980 981 982 983 984 985 |
} static int f2fs_set_data_page_dirty(struct page *page) { struct address_space *mapping = page->mapping; struct inode *inode = mapping->host; |
26c6b8879
|
986 |
trace_f2fs_set_page_dirty(page, DATA); |
eb47b8009
|
987 |
SetPageUptodate(page); |
a18ff0634
|
988 |
mark_inode_dirty(inode); |
eb47b8009
|
989 990 991 992 993 994 995 |
if (!PageDirty(page)) { __set_page_dirty_nobuffers(page); set_dirty_dir_page(inode, page); return 1; } return 0; } |
c01e54b77
|
996 997 |
static sector_t f2fs_bmap(struct address_space *mapping, sector_t block) { |
bfad7c2d4
|
998 |
return generic_block_bmap(mapping, block, get_data_block); |
c01e54b77
|
999 |
} |
eb47b8009
|
1000 1001 1002 1003 1004 1005 |
const struct address_space_operations f2fs_dblock_aops = { .readpage = f2fs_read_data_page, .readpages = f2fs_read_data_pages, .writepage = f2fs_write_data_page, .writepages = f2fs_write_data_pages, .write_begin = f2fs_write_begin, |
a1dd3c13c
|
1006 |
.write_end = f2fs_write_end, |
eb47b8009
|
1007 1008 1009 1010 |
.set_page_dirty = f2fs_set_data_page_dirty, .invalidatepage = f2fs_invalidate_data_page, .releasepage = f2fs_release_data_page, .direct_IO = f2fs_direct_IO, |
c01e54b77
|
1011 |
.bmap = f2fs_bmap, |
eb47b8009
|
1012 |
}; |