Blame view
fs/ext4/inode.c
135 KB
ac27a0ec1 [PATCH] ext4: ini... |
1 |
/* |
617ba13b3 [PATCH] ext4: ren... |
2 |
* linux/fs/ext4/inode.c |
ac27a0ec1 [PATCH] ext4: ini... |
3 4 5 6 7 8 9 10 11 12 13 14 |
* * Copyright (C) 1992, 1993, 1994, 1995 * Remy Card (card@masi.ibp.fr) * Laboratoire MASI - Institut Blaise Pascal * Universite Pierre et Marie Curie (Paris VI) * * from * * linux/fs/minix/inode.c * * Copyright (C) 1991, 1992 Linus Torvalds * |
ac27a0ec1 [PATCH] ext4: ini... |
15 16 17 |
* 64-bit file support on 64-bit platforms by Jakub Jelinek * (jj@sunsite.ms.mff.cuni.cz) * |
617ba13b3 [PATCH] ext4: ren... |
18 |
* Assorted race fixes, rewrite of ext4_get_block() by Al Viro, 2000 |
ac27a0ec1 [PATCH] ext4: ini... |
19 |
*/ |
ac27a0ec1 [PATCH] ext4: ini... |
20 21 |
#include <linux/fs.h> #include <linux/time.h> |
dab291af8 [PATCH] jbd2: ena... |
22 |
#include <linux/jbd2.h> |
ac27a0ec1 [PATCH] ext4: ini... |
23 24 25 26 27 28 |
#include <linux/highuid.h> #include <linux/pagemap.h> #include <linux/quotaops.h> #include <linux/string.h> #include <linux/buffer_head.h> #include <linux/writeback.h> |
64769240b ext4: Add delayed... |
29 |
#include <linux/pagevec.h> |
ac27a0ec1 [PATCH] ext4: ini... |
30 |
#include <linux/mpage.h> |
e83c1397c ext4: ensure fast... |
31 |
#include <linux/namei.h> |
ac27a0ec1 [PATCH] ext4: ini... |
32 33 |
#include <linux/uio.h> #include <linux/bio.h> |
4c0425ff6 ext4: Use end_io ... |
34 |
#include <linux/workqueue.h> |
744692dc0 ext4: use ext4_ge... |
35 |
#include <linux/kernel.h> |
6db26ffc9 fs/ext4/inode.c: ... |
36 |
#include <linux/printk.h> |
5a0e3ad6a include cleanup: ... |
37 |
#include <linux/slab.h> |
a8901d348 ext4: Use pr_warn... |
38 |
#include <linux/ratelimit.h> |
9bffad1ed ext4: convert ins... |
39 |
|
3dcf54515 ext4: move header... |
40 |
#include "ext4_jbd2.h" |
ac27a0ec1 [PATCH] ext4: ini... |
41 42 |
#include "xattr.h" #include "acl.h" |
9f125d641 ext4: move common... |
43 |
#include "truncate.h" |
ac27a0ec1 [PATCH] ext4: ini... |
44 |
|
9bffad1ed ext4: convert ins... |
45 |
#include <trace/events/ext4.h> |
a1d6cc563 ext4: Rework the ... |
46 |
#define MPAGE_DA_EXTENT_TAIL 0x01 |
678aaf481 ext4: Use new fra... |
47 48 49 |
static inline int ext4_begin_ordered_truncate(struct inode *inode, loff_t new_size) { |
7ff9c073d ext4: Add new ext... |
50 |
trace_ext4_begin_ordered_truncate(inode, new_size); |
8aefcd557 ext4: dynamically... |
51 52 53 54 55 56 57 58 59 60 61 |
/* * If jinode is zero, then we never opened the file for * writing, so there's no need to call * jbd2_journal_begin_ordered_truncate() since there's no * outstanding writes we need to flush. */ if (!EXT4_I(inode)->jinode) return 0; return jbd2_journal_begin_ordered_truncate(EXT4_JOURNAL(inode), EXT4_I(inode)->jinode, new_size); |
678aaf481 ext4: Use new fra... |
62 |
} |
64769240b ext4: Add delayed... |
63 |
static void ext4_invalidatepage(struct page *page, unsigned long offset); |
cb20d5188 ext4: inline ext4... |
64 65 66 67 68 69 |
static int noalloc_get_block_write(struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create); static int ext4_set_bh_endio(struct buffer_head *bh, struct inode *inode); static void ext4_end_io_buffer_write(struct buffer_head *bh, int uptodate); static int __ext4_journalled_writepage(struct page *page, unsigned int len); static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh); |
5f163cc75 ext4: make more s... |
70 71 72 |
static int ext4_discard_partial_page_buffers_no_lock(handle_t *handle, struct inode *inode, struct page *page, loff_t from, loff_t length, int flags); |
64769240b ext4: Add delayed... |
73 |
|
ac27a0ec1 [PATCH] ext4: ini... |
74 75 76 |
/* * Test whether an inode is a fast symlink. */ |
617ba13b3 [PATCH] ext4: ren... |
77 |
static int ext4_inode_is_fast_symlink(struct inode *inode) |
ac27a0ec1 [PATCH] ext4: ini... |
78 |
{ |
617ba13b3 [PATCH] ext4: ren... |
79 |
int ea_blocks = EXT4_I(inode)->i_file_acl ? |
ac27a0ec1 [PATCH] ext4: ini... |
80 81 82 83 84 85 |
(inode->i_sb->s_blocksize >> 9) : 0; return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0); } /* |
ac27a0ec1 [PATCH] ext4: ini... |
86 87 88 89 |
* Restart the transaction associated with *handle. This does a commit, * so before we call here everything must be consistently dirtied against * this transaction. */ |
fa5d11133 ext4: discard pre... |
90 |
int ext4_truncate_restart_trans(handle_t *handle, struct inode *inode, |
487caeef9 ext4: Fix possibl... |
91 |
int nblocks) |
ac27a0ec1 [PATCH] ext4: ini... |
92 |
{ |
487caeef9 ext4: Fix possibl... |
93 94 95 |
int ret; /* |
e35fd6609 ext4: Add new abs... |
96 |
* Drop i_data_sem to avoid deadlock with ext4_map_blocks. At this |
487caeef9 ext4: Fix possibl... |
97 98 99 100 |
* moment, get_block can be called only for blocks inside i_size since * page cache has been already dropped and writes are blocked by * i_mutex. So we can safely drop the i_data_sem here. */ |
0390131ba ext4: Allow ext4 ... |
101 |
BUG_ON(EXT4_JOURNAL(inode) == NULL); |
ac27a0ec1 [PATCH] ext4: ini... |
102 103 |
jbd_debug(2, "restarting handle %p ", handle); |
487caeef9 ext4: Fix possibl... |
104 |
up_write(&EXT4_I(inode)->i_data_sem); |
8e8eaabef ext4: use the nbl... |
105 |
ret = ext4_journal_restart(handle, nblocks); |
487caeef9 ext4: Fix possibl... |
106 |
down_write(&EXT4_I(inode)->i_data_sem); |
fa5d11133 ext4: discard pre... |
107 |
ext4_discard_preallocations(inode); |
487caeef9 ext4: Fix possibl... |
108 109 |
return ret; |
ac27a0ec1 [PATCH] ext4: ini... |
110 111 112 113 114 |
} /* * Called at the last iput() if i_nlink is zero. */ |
0930fcc1e convert ext4 to -... |
115 |
void ext4_evict_inode(struct inode *inode) |
ac27a0ec1 [PATCH] ext4: ini... |
116 117 |
{ handle_t *handle; |
bc965ab3f ext4: Fix lack of... |
118 |
int err; |
ac27a0ec1 [PATCH] ext4: ini... |
119 |
|
7ff9c073d ext4: Add new ext... |
120 |
trace_ext4_evict_inode(inode); |
2581fdc81 ext4: call ext4_i... |
121 |
|
2581fdc81 ext4: call ext4_i... |
122 |
ext4_ioend_wait(inode); |
0930fcc1e convert ext4 to -... |
123 |
if (inode->i_nlink) { |
2d859db3e ext4: fix data co... |
124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 |
/* * When journalling data dirty buffers are tracked only in the * journal. So although mm thinks everything is clean and * ready for reaping the inode might still have some pages to * write in the running transaction or waiting to be * checkpointed. Thus calling jbd2_journal_invalidatepage() * (via truncate_inode_pages()) to discard these buffers can * cause data loss. Also even if we did not discard these * buffers, we would have no way to find them after the inode * is reaped and thus user could see stale data if he tries to * read them before the transaction is checkpointed. So be * careful and force everything to disk here... We use * ei->i_datasync_tid to store the newest transaction * containing inode's data. * * Note that directories do not have this problem because they * don't use page cache. */ if (ext4_should_journal_data(inode) && (S_ISLNK(inode->i_mode) || S_ISREG(inode->i_mode))) { journal_t *journal = EXT4_SB(inode->i_sb)->s_journal; tid_t commit_tid = EXT4_I(inode)->i_datasync_tid; jbd2_log_start_commit(journal, commit_tid); jbd2_log_wait_commit(journal, commit_tid); filemap_write_and_wait(&inode->i_data); } |
0930fcc1e convert ext4 to -... |
151 152 153 |
truncate_inode_pages(&inode->i_data, 0); goto no_delete; } |
907f4554e dquot: move dquot... |
154 |
if (!is_bad_inode(inode)) |
871a29315 dquot: cleanup dq... |
155 |
dquot_initialize(inode); |
907f4554e dquot: move dquot... |
156 |
|
678aaf481 ext4: Use new fra... |
157 158 |
if (ext4_should_order_data(inode)) ext4_begin_ordered_truncate(inode, 0); |
ac27a0ec1 [PATCH] ext4: ini... |
159 160 161 162 |
truncate_inode_pages(&inode->i_data, 0); if (is_bad_inode(inode)) goto no_delete; |
9f125d641 ext4: move common... |
163 |
handle = ext4_journal_start(inode, ext4_blocks_for_truncate(inode)+3); |
ac27a0ec1 [PATCH] ext4: ini... |
164 |
if (IS_ERR(handle)) { |
bc965ab3f ext4: Fix lack of... |
165 |
ext4_std_error(inode->i_sb, PTR_ERR(handle)); |
ac27a0ec1 [PATCH] ext4: ini... |
166 167 168 169 170 |
/* * If we're going to skip the normal cleanup, we still need to * make sure that the in-core orphan linked list is properly * cleaned up. */ |
617ba13b3 [PATCH] ext4: ren... |
171 |
ext4_orphan_del(NULL, inode); |
ac27a0ec1 [PATCH] ext4: ini... |
172 173 174 175 |
goto no_delete; } if (IS_SYNC(inode)) |
0390131ba ext4: Allow ext4 ... |
176 |
ext4_handle_sync(handle); |
ac27a0ec1 [PATCH] ext4: ini... |
177 |
inode->i_size = 0; |
bc965ab3f ext4: Fix lack of... |
178 179 |
err = ext4_mark_inode_dirty(handle, inode); if (err) { |
12062dddd ext4: move __func... |
180 |
ext4_warning(inode->i_sb, |
bc965ab3f ext4: Fix lack of... |
181 182 183 |
"couldn't mark inode dirty (err %d)", err); goto stop_handle; } |
ac27a0ec1 [PATCH] ext4: ini... |
184 |
if (inode->i_blocks) |
617ba13b3 [PATCH] ext4: ren... |
185 |
ext4_truncate(inode); |
bc965ab3f ext4: Fix lack of... |
186 187 188 189 190 191 192 |
/* * ext4_ext_truncate() doesn't reserve any slop when it * restarts journal transactions; therefore there may not be * enough credits left in the handle to remove the inode from * the orphan list and set the dtime field. */ |
0390131ba ext4: Allow ext4 ... |
193 |
if (!ext4_handle_has_enough_credits(handle, 3)) { |
bc965ab3f ext4: Fix lack of... |
194 195 196 197 |
err = ext4_journal_extend(handle, 3); if (err > 0) err = ext4_journal_restart(handle, 3); if (err != 0) { |
12062dddd ext4: move __func... |
198 |
ext4_warning(inode->i_sb, |
bc965ab3f ext4: Fix lack of... |
199 200 201 |
"couldn't extend journal (err %d)", err); stop_handle: ext4_journal_stop(handle); |
453882199 ext4: drop inode ... |
202 |
ext4_orphan_del(NULL, inode); |
bc965ab3f ext4: Fix lack of... |
203 204 205 |
goto no_delete; } } |
ac27a0ec1 [PATCH] ext4: ini... |
206 |
/* |
617ba13b3 [PATCH] ext4: ren... |
207 |
* Kill off the orphan record which ext4_truncate created. |
ac27a0ec1 [PATCH] ext4: ini... |
208 |
* AKPM: I think this can be inside the above `if'. |
617ba13b3 [PATCH] ext4: ren... |
209 |
* Note that ext4_orphan_del() has to be able to cope with the |
ac27a0ec1 [PATCH] ext4: ini... |
210 |
* deletion of a non-existent orphan - this is because we don't |
617ba13b3 [PATCH] ext4: ren... |
211 |
* know if ext4_truncate() actually created an orphan record. |
ac27a0ec1 [PATCH] ext4: ini... |
212 213 |
* (Well, we could do this if we need to, but heck - it works) */ |
617ba13b3 [PATCH] ext4: ren... |
214 215 |
ext4_orphan_del(handle, inode); EXT4_I(inode)->i_dtime = get_seconds(); |
ac27a0ec1 [PATCH] ext4: ini... |
216 217 218 219 220 221 222 223 |
/* * One subtle ordering requirement: if anything has gone wrong * (transaction abort, IO errors, whatever), then we can still * do these next steps (the fs will already have been marked as * having errors), but we can't free the inode if the mark_dirty * fails. */ |
617ba13b3 [PATCH] ext4: ren... |
224 |
if (ext4_mark_inode_dirty(handle, inode)) |
ac27a0ec1 [PATCH] ext4: ini... |
225 |
/* If that failed, just do the required in-core inode clear. */ |
0930fcc1e convert ext4 to -... |
226 |
ext4_clear_inode(inode); |
ac27a0ec1 [PATCH] ext4: ini... |
227 |
else |
617ba13b3 [PATCH] ext4: ren... |
228 229 |
ext4_free_inode(handle, inode); ext4_journal_stop(handle); |
ac27a0ec1 [PATCH] ext4: ini... |
230 231 |
return; no_delete: |
0930fcc1e convert ext4 to -... |
232 |
ext4_clear_inode(inode); /* We must guarantee clearing of inode... */ |
ac27a0ec1 [PATCH] ext4: ini... |
233 |
} |
a9e7f4472 ext4: Convert to ... |
234 235 |
#ifdef CONFIG_QUOTA qsize_t *ext4_get_reserved_space(struct inode *inode) |
60e58e0f3 ext4: quota reser... |
236 |
{ |
a9e7f4472 ext4: Convert to ... |
237 |
return &EXT4_I(inode)->i_reserved_quota; |
60e58e0f3 ext4: quota reser... |
238 |
} |
a9e7f4472 ext4: Convert to ... |
239 |
#endif |
9d0be5023 ext4: Calculate m... |
240 |
|
12219aea6 ext4: Cleanup the... |
241 242 |
/* * Calculate the number of metadata blocks need to reserve |
9d0be5023 ext4: Calculate m... |
243 |
* to allocate a block located at @lblock |
12219aea6 ext4: Cleanup the... |
244 |
*/ |
01f49d0b9 ext4: use ext4_lb... |
245 |
static int ext4_calc_metadata_amount(struct inode *inode, ext4_lblk_t lblock) |
12219aea6 ext4: Cleanup the... |
246 |
{ |
12e9b8920 ext4: Use bitops ... |
247 |
if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) |
9d0be5023 ext4: Calculate m... |
248 |
return ext4_ext_calc_metadata_amount(inode, lblock); |
12219aea6 ext4: Cleanup the... |
249 |
|
8bb2b2471 ext4: rename ext4... |
250 |
return ext4_ind_calc_metadata_amount(inode, lblock); |
12219aea6 ext4: Cleanup the... |
251 |
} |
0637c6f41 ext4: Patch up ho... |
252 253 254 255 |
/* * Called with i_data_sem down, which is important since we can call * ext4_discard_preallocations() from here. */ |
5f634d064 ext4: Fix quota a... |
256 257 |
void ext4_da_update_reserve_space(struct inode *inode, int used, int quota_claim) |
12219aea6 ext4: Cleanup the... |
258 259 |
{ struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); |
0637c6f41 ext4: Patch up ho... |
260 |
struct ext4_inode_info *ei = EXT4_I(inode); |
0637c6f41 ext4: Patch up ho... |
261 262 |
spin_lock(&ei->i_block_reservation_lock); |
d8990240d ext4: add some tr... |
263 |
trace_ext4_da_update_reserve_space(inode, used, quota_claim); |
0637c6f41 ext4: Patch up ho... |
264 265 266 267 268 269 270 271 272 |
if (unlikely(used > ei->i_reserved_data_blocks)) { ext4_msg(inode->i_sb, KERN_NOTICE, "%s: ino %lu, used %d " "with only %d reserved data blocks ", __func__, inode->i_ino, used, ei->i_reserved_data_blocks); WARN_ON(1); used = ei->i_reserved_data_blocks; } |
12219aea6 ext4: Cleanup the... |
273 |
|
0637c6f41 ext4: Patch up ho... |
274 275 |
/* Update per-inode reservations */ ei->i_reserved_data_blocks -= used; |
0637c6f41 ext4: Patch up ho... |
276 |
ei->i_reserved_meta_blocks -= ei->i_allocated_meta_blocks; |
570426518 ext4: convert s_{... |
277 |
percpu_counter_sub(&sbi->s_dirtyclusters_counter, |
72b8ab9dd ext4: don't use q... |
278 |
used + ei->i_allocated_meta_blocks); |
0637c6f41 ext4: Patch up ho... |
279 |
ei->i_allocated_meta_blocks = 0; |
6bc6e63fc ext4: Add percpu ... |
280 |
|
0637c6f41 ext4: Patch up ho... |
281 282 283 284 285 286 |
if (ei->i_reserved_data_blocks == 0) { /* * We can release all of the reserved metadata blocks * only when we have written all of the delayed * allocation blocks. */ |
570426518 ext4: convert s_{... |
287 |
percpu_counter_sub(&sbi->s_dirtyclusters_counter, |
72b8ab9dd ext4: don't use q... |
288 |
ei->i_reserved_meta_blocks); |
ee5f4d9cd ext4: Fix account... |
289 |
ei->i_reserved_meta_blocks = 0; |
9d0be5023 ext4: Calculate m... |
290 |
ei->i_da_metadata_calc_len = 0; |
6bc6e63fc ext4: Add percpu ... |
291 |
} |
12219aea6 ext4: Cleanup the... |
292 |
spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); |
60e58e0f3 ext4: quota reser... |
293 |
|
72b8ab9dd ext4: don't use q... |
294 295 |
/* Update quota subsystem for data blocks */ if (quota_claim) |
7b415bf60 ext4: Fix bigallo... |
296 |
dquot_claim_block(inode, EXT4_C2B(sbi, used)); |
72b8ab9dd ext4: don't use q... |
297 |
else { |
5f634d064 ext4: Fix quota a... |
298 299 300 |
/* * We did fallocate with an offset that is already delayed * allocated. So on delayed allocated writeback we should |
72b8ab9dd ext4: don't use q... |
301 |
* not re-claim the quota for fallocated blocks. |
5f634d064 ext4: Fix quota a... |
302 |
*/ |
7b415bf60 ext4: Fix bigallo... |
303 |
dquot_release_reservation_block(inode, EXT4_C2B(sbi, used)); |
5f634d064 ext4: Fix quota a... |
304 |
} |
d6014301b ext4: Fix discard... |
305 306 307 308 309 310 |
/* * If we have done all the pending block allocations and if * there aren't any writers on the inode, we can discard the * inode's preallocations. */ |
0637c6f41 ext4: Patch up ho... |
311 312 |
if ((ei->i_reserved_data_blocks == 0) && (atomic_read(&inode->i_writecount) == 0)) |
d6014301b ext4: Fix discard... |
313 |
ext4_discard_preallocations(inode); |
12219aea6 ext4: Cleanup the... |
314 |
} |
e29136f80 ext4: Enhance ext... |
315 |
static int __check_block_validity(struct inode *inode, const char *func, |
c398eda0e ext4: Pass line n... |
316 317 |
unsigned int line, struct ext4_map_blocks *map) |
6fd058f77 ext4: Add a compr... |
318 |
{ |
24676da46 ext4: Convert cal... |
319 320 |
if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), map->m_pblk, map->m_len)) { |
c398eda0e ext4: Pass line n... |
321 322 323 324 |
ext4_error_inode(inode, func, line, map->m_pblk, "lblock %lu mapped to illegal pblock " "(length %d)", (unsigned long) map->m_lblk, map->m_len); |
6fd058f77 ext4: Add a compr... |
325 326 327 328 |
return -EIO; } return 0; } |
e29136f80 ext4: Enhance ext... |
329 |
#define check_block_validity(inode, map) \ |
c398eda0e ext4: Pass line n... |
330 |
__check_block_validity((inode), __func__, __LINE__, (map)) |
e29136f80 ext4: Enhance ext... |
331 |
|
f5ab0d1f8 ext4: Fix BUG whe... |
332 |
/* |
1f94533d9 ext4: fix a BUG_O... |
333 334 |
* Return the number of contiguous dirty pages in a given inode * starting at page frame idx. |
55138e0bc ext4: Adjust ext4... |
335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 |
*/ static pgoff_t ext4_num_dirty_pages(struct inode *inode, pgoff_t idx, unsigned int max_pages) { struct address_space *mapping = inode->i_mapping; pgoff_t index; struct pagevec pvec; pgoff_t num = 0; int i, nr_pages, done = 0; if (max_pages == 0) return 0; pagevec_init(&pvec, 0); while (!done) { index = idx; nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, PAGECACHE_TAG_DIRTY, (pgoff_t)PAGEVEC_SIZE); if (nr_pages == 0) break; for (i = 0; i < nr_pages; i++) { struct page *page = pvec.pages[i]; struct buffer_head *bh, *head; lock_page(page); if (unlikely(page->mapping != mapping) || !PageDirty(page) || PageWriteback(page) || page->index != idx) { done = 1; unlock_page(page); break; } |
1f94533d9 ext4: fix a BUG_O... |
368 369 370 371 372 373 374 375 376 |
if (page_has_buffers(page)) { bh = head = page_buffers(page); do { if (!buffer_delay(bh) && !buffer_unwritten(bh)) done = 1; bh = bh->b_this_page; } while (!done && (bh != head)); } |
55138e0bc ext4: Adjust ext4... |
377 378 379 380 381 |
unlock_page(page); if (done) break; idx++; num++; |
659c6009c ext4: stop loopin... |
382 383 |
if (num >= max_pages) { done = 1; |
55138e0bc ext4: Adjust ext4... |
384 |
break; |
659c6009c ext4: stop loopin... |
385 |
} |
55138e0bc ext4: Adjust ext4... |
386 387 388 389 390 391 392 |
} pagevec_release(&pvec); } return num; } /* |
5356f2615 ext4: attempt to ... |
393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 |
* Sets the BH_Da_Mapped bit on the buffer heads corresponding to the given map. */ static void set_buffers_da_mapped(struct inode *inode, struct ext4_map_blocks *map) { struct address_space *mapping = inode->i_mapping; struct pagevec pvec; int i, nr_pages; pgoff_t index, end; index = map->m_lblk >> (PAGE_CACHE_SHIFT - inode->i_blkbits); end = (map->m_lblk + map->m_len - 1) >> (PAGE_CACHE_SHIFT - inode->i_blkbits); pagevec_init(&pvec, 0); while (index <= end) { nr_pages = pagevec_lookup(&pvec, mapping, index, min(end - index + 1, (pgoff_t)PAGEVEC_SIZE)); if (nr_pages == 0) break; for (i = 0; i < nr_pages; i++) { struct page *page = pvec.pages[i]; struct buffer_head *bh, *head; if (unlikely(page->mapping != mapping) || !PageDirty(page)) break; if (page_has_buffers(page)) { bh = head = page_buffers(page); do { set_buffer_da_mapped(bh); bh = bh->b_this_page; } while (bh != head); } index++; } pagevec_release(&pvec); } } /* |
e35fd6609 ext4: Add new abs... |
436 |
* The ext4_map_blocks() function tries to look up the requested blocks, |
2b2d6d019 ext4: Cleanup whi... |
437 |
* and returns if the blocks are already mapped. |
f5ab0d1f8 ext4: Fix BUG whe... |
438 |
* |
f5ab0d1f8 ext4: Fix BUG whe... |
439 440 441 442 |
* Otherwise it takes the write lock of the i_data_sem and allocate blocks * and store the allocated blocks in the result buffer head and mark it * mapped. * |
e35fd6609 ext4: Add new abs... |
443 444 |
* If file type is extents based, it will call ext4_ext_map_blocks(), * Otherwise, call with ext4_ind_map_blocks() to handle indirect mapping |
f5ab0d1f8 ext4: Fix BUG whe... |
445 446 447 448 449 450 451 452 |
* based files * * On success, it returns the number of blocks being mapped or allocate. * if create==0 and the blocks are pre-allocated and uninitialized block, * the result buffer head is unmapped. If the create ==1, it will make sure * the buffer head is mapped. * * It returns 0 if plain look up failed (blocks have not been allocated), in |
df3ab1707 ext4: fix the com... |
453 |
* that case, buffer head is unmapped |
f5ab0d1f8 ext4: Fix BUG whe... |
454 455 456 |
* * It returns the error in case of allocation failure. */ |
e35fd6609 ext4: Add new abs... |
457 458 |
int ext4_map_blocks(handle_t *handle, struct inode *inode, struct ext4_map_blocks *map, int flags) |
0e855ac8b ext4: Convert tru... |
459 460 |
{ int retval; |
f5ab0d1f8 ext4: Fix BUG whe... |
461 |
|
e35fd6609 ext4: Add new abs... |
462 463 464 465 466 |
map->m_flags = 0; ext_debug("ext4_map_blocks(): inode %lu, flag %d, max_blocks %u," "logical block %lu ", inode->i_ino, flags, map->m_len, (unsigned long) map->m_lblk); |
4df3d265b ext4: Take read l... |
467 |
/* |
b920c7550 ext4: Add documen... |
468 469 |
* Try to see if we can get the block without requesting a new * file system block. |
4df3d265b ext4: Take read l... |
470 471 |
*/ down_read((&EXT4_I(inode)->i_data_sem)); |
12e9b8920 ext4: Use bitops ... |
472 |
if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) { |
a4e5d88b1 ext4: update EOFB... |
473 474 |
retval = ext4_ext_map_blocks(handle, inode, map, flags & EXT4_GET_BLOCKS_KEEP_SIZE); |
0e855ac8b ext4: Convert tru... |
475 |
} else { |
a4e5d88b1 ext4: update EOFB... |
476 477 |
retval = ext4_ind_map_blocks(handle, inode, map, flags & EXT4_GET_BLOCKS_KEEP_SIZE); |
0e855ac8b ext4: Convert tru... |
478 |
} |
4df3d265b ext4: Take read l... |
479 |
up_read((&EXT4_I(inode)->i_data_sem)); |
f5ab0d1f8 ext4: Fix BUG whe... |
480 |
|
e35fd6609 ext4: Add new abs... |
481 |
if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) { |
e29136f80 ext4: Enhance ext... |
482 |
int ret = check_block_validity(inode, map); |
6fd058f77 ext4: Add a compr... |
483 484 485 |
if (ret != 0) return ret; } |
f5ab0d1f8 ext4: Fix BUG whe... |
486 |
/* If it is only a block(s) look up */ |
c21770573 ext4: Define a ne... |
487 |
if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) |
f5ab0d1f8 ext4: Fix BUG whe... |
488 489 490 491 492 493 |
return retval; /* * Returns if the blocks have already allocated * * Note that if blocks have been preallocated |
df3ab1707 ext4: fix the com... |
494 |
* ext4_ext_get_block() returns the create = 0 |
f5ab0d1f8 ext4: Fix BUG whe... |
495 496 |
* with buffer head unmapped. */ |
e35fd6609 ext4: Add new abs... |
497 |
if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) |
4df3d265b ext4: Take read l... |
498 499 500 |
return retval; /* |
2a8964d63 ext4: Clear the u... |
501 502 503 504 505 506 507 508 509 |
* When we call get_blocks without the create flag, the * BH_Unwritten flag could have gotten set if the blocks * requested were part of a uninitialized extent. We need to * clear this flag now that we are committed to convert all or * part of the uninitialized extent to be an initialized * extent. This is because we need to avoid the combination * of BH_Unwritten and BH_Mapped flags being simultaneously * set on the buffer_head. */ |
e35fd6609 ext4: Add new abs... |
510 |
map->m_flags &= ~EXT4_MAP_UNWRITTEN; |
2a8964d63 ext4: Clear the u... |
511 512 |
/* |
f5ab0d1f8 ext4: Fix BUG whe... |
513 514 515 516 |
* New blocks allocate and/or writing to uninitialized extent * will possibly result in updating i_data, so we take * the write lock of i_data_sem, and call get_blocks() * with create == 1 flag. |
4df3d265b ext4: Take read l... |
517 518 |
*/ down_write((&EXT4_I(inode)->i_data_sem)); |
d2a176379 ext4: delayed all... |
519 520 521 522 523 524 525 |
/* * if the caller is from delayed allocation writeout path * we have already reserved fs blocks for allocation * let the underlying get_block() function know to * avoid double accounting */ |
c21770573 ext4: Define a ne... |
526 |
if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) |
f23210977 ext4: replace i_d... |
527 |
ext4_set_inode_state(inode, EXT4_STATE_DELALLOC_RESERVED); |
4df3d265b ext4: Take read l... |
528 529 530 531 |
/* * We need to check for EXT4 here because migrate * could have changed the inode type in between */ |
12e9b8920 ext4: Use bitops ... |
532 |
if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) { |
e35fd6609 ext4: Add new abs... |
533 |
retval = ext4_ext_map_blocks(handle, inode, map, flags); |
0e855ac8b ext4: Convert tru... |
534 |
} else { |
e35fd6609 ext4: Add new abs... |
535 |
retval = ext4_ind_map_blocks(handle, inode, map, flags); |
267e4db9a ext4: Fix race be... |
536 |
|
e35fd6609 ext4: Add new abs... |
537 |
if (retval > 0 && map->m_flags & EXT4_MAP_NEW) { |
267e4db9a ext4: Fix race be... |
538 539 540 541 542 |
/* * We allocated new blocks which will result in * i_data's format changing. Force the migrate * to fail by clearing migrate flags */ |
19f5fb7ad ext4: Use bitops ... |
543 |
ext4_clear_inode_state(inode, EXT4_STATE_EXT_MIGRATE); |
267e4db9a ext4: Fix race be... |
544 |
} |
d2a176379 ext4: delayed all... |
545 |
|
5f634d064 ext4: Fix quota a... |
546 547 548 549 550 551 552 |
/* * Update reserved blocks/metadata blocks after successful * block allocation which had been deferred till now. We don't * support fallocate for non extent files. So we can update * reserve space here. */ if ((retval > 0) && |
1296cc85c ext4: Drop EXT4_G... |
553 |
(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)) |
5f634d064 ext4: Fix quota a... |
554 555 |
ext4_da_update_reserve_space(inode, retval, 1); } |
5356f2615 ext4: attempt to ... |
556 |
if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) { |
f23210977 ext4: replace i_d... |
557 |
ext4_clear_inode_state(inode, EXT4_STATE_DELALLOC_RESERVED); |
2ac3b6e00 ext4: Clean up ex... |
558 |
|
5356f2615 ext4: attempt to ... |
559 560 561 562 563 564 565 |
/* If we have successfully mapped the delayed allocated blocks, * set the BH_Da_Mapped bit on them. Its important to do this * under the protection of i_data_sem. */ if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) set_buffers_da_mapped(inode, map); } |
4df3d265b ext4: Take read l... |
566 |
up_write((&EXT4_I(inode)->i_data_sem)); |
e35fd6609 ext4: Add new abs... |
567 |
if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) { |
e29136f80 ext4: Enhance ext... |
568 |
int ret = check_block_validity(inode, map); |
6fd058f77 ext4: Add a compr... |
569 570 571 |
if (ret != 0) return ret; } |
0e855ac8b ext4: Convert tru... |
572 573 |
return retval; } |
f3bd1f3fa ext4: journal cre... |
574 575 |
/* Maximum number of blocks we map for direct IO at once. */ #define DIO_MAX_BLOCKS 4096 |
2ed886852 ext4: Convert cal... |
576 577 |
static int _ext4_get_block(struct inode *inode, sector_t iblock, struct buffer_head *bh, int flags) |
ac27a0ec1 [PATCH] ext4: ini... |
578 |
{ |
3e4fdaf8a [PATCH] jbd layer... |
579 |
handle_t *handle = ext4_journal_current_handle(); |
2ed886852 ext4: Convert cal... |
580 |
struct ext4_map_blocks map; |
7fb5409df ext4: Fix Direct ... |
581 |
int ret = 0, started = 0; |
f3bd1f3fa ext4: journal cre... |
582 |
int dio_credits; |
ac27a0ec1 [PATCH] ext4: ini... |
583 |
|
2ed886852 ext4: Convert cal... |
584 585 586 587 |
map.m_lblk = iblock; map.m_len = bh->b_size >> inode->i_blkbits; if (flags && !handle) { |
7fb5409df ext4: Fix Direct ... |
588 |
/* Direct IO write... */ |
2ed886852 ext4: Convert cal... |
589 590 591 |
if (map.m_len > DIO_MAX_BLOCKS) map.m_len = DIO_MAX_BLOCKS; dio_credits = ext4_chunk_trans_blocks(inode, map.m_len); |
f3bd1f3fa ext4: journal cre... |
592 |
handle = ext4_journal_start(inode, dio_credits); |
7fb5409df ext4: Fix Direct ... |
593 |
if (IS_ERR(handle)) { |
ac27a0ec1 [PATCH] ext4: ini... |
594 |
ret = PTR_ERR(handle); |
2ed886852 ext4: Convert cal... |
595 |
return ret; |
ac27a0ec1 [PATCH] ext4: ini... |
596 |
} |
7fb5409df ext4: Fix Direct ... |
597 |
started = 1; |
ac27a0ec1 [PATCH] ext4: ini... |
598 |
} |
2ed886852 ext4: Convert cal... |
599 |
ret = ext4_map_blocks(handle, inode, &map, flags); |
7fb5409df ext4: Fix Direct ... |
600 |
if (ret > 0) { |
2ed886852 ext4: Convert cal... |
601 602 603 |
map_bh(bh, inode->i_sb, map.m_pblk); bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | map.m_flags; bh->b_size = inode->i_sb->s_blocksize * map.m_len; |
7fb5409df ext4: Fix Direct ... |
604 |
ret = 0; |
ac27a0ec1 [PATCH] ext4: ini... |
605 |
} |
7fb5409df ext4: Fix Direct ... |
606 607 |
if (started) ext4_journal_stop(handle); |
ac27a0ec1 [PATCH] ext4: ini... |
608 609 |
return ret; } |
2ed886852 ext4: Convert cal... |
610 611 612 613 614 615 |
int ext4_get_block(struct inode *inode, sector_t iblock, struct buffer_head *bh, int create) { return _ext4_get_block(inode, iblock, bh, create ? EXT4_GET_BLOCKS_CREATE : 0); } |
ac27a0ec1 [PATCH] ext4: ini... |
616 617 618 |
/* * `handle' can be NULL if create is zero */ |
617ba13b3 [PATCH] ext4: ren... |
619 |
struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode, |
725d26d3f ext4: Introduce e... |
620 |
ext4_lblk_t block, int create, int *errp) |
ac27a0ec1 [PATCH] ext4: ini... |
621 |
{ |
2ed886852 ext4: Convert cal... |
622 623 |
struct ext4_map_blocks map; struct buffer_head *bh; |
ac27a0ec1 [PATCH] ext4: ini... |
624 625 626 |
int fatal = 0, err; J_ASSERT(handle != NULL || create == 0); |
2ed886852 ext4: Convert cal... |
627 628 629 630 |
map.m_lblk = block; map.m_len = 1; err = ext4_map_blocks(handle, inode, &map, create ? EXT4_GET_BLOCKS_CREATE : 0); |
ac27a0ec1 [PATCH] ext4: ini... |
631 |
|
2ed886852 ext4: Convert cal... |
632 633 634 635 636 637 638 639 640 641 |
if (err < 0) *errp = err; if (err <= 0) return NULL; *errp = 0; bh = sb_getblk(inode->i_sb, map.m_pblk); if (!bh) { *errp = -EIO; return NULL; |
ac27a0ec1 [PATCH] ext4: ini... |
642 |
} |
2ed886852 ext4: Convert cal... |
643 644 645 |
if (map.m_flags & EXT4_MAP_NEW) { J_ASSERT(create != 0); J_ASSERT(handle != NULL); |
ac27a0ec1 [PATCH] ext4: ini... |
646 |
|
2ed886852 ext4: Convert cal... |
647 648 649 650 651 652 653 654 655 656 657 658 659 |
/* * Now that we do not always journal data, we should * keep in mind whether this should always journal the * new buffer as metadata. For now, regular file * writes use ext4_get_block instead, so it's not a * problem. */ lock_buffer(bh); BUFFER_TRACE(bh, "call get_create_access"); fatal = ext4_journal_get_create_access(handle, bh); if (!fatal && !buffer_uptodate(bh)) { memset(bh->b_data, 0, inode->i_sb->s_blocksize); set_buffer_uptodate(bh); |
ac27a0ec1 [PATCH] ext4: ini... |
660 |
} |
2ed886852 ext4: Convert cal... |
661 662 663 664 665 666 667 |
unlock_buffer(bh); BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); err = ext4_handle_dirty_metadata(handle, inode, bh); if (!fatal) fatal = err; } else { BUFFER_TRACE(bh, "not a new buffer"); |
ac27a0ec1 [PATCH] ext4: ini... |
668 |
} |
2ed886852 ext4: Convert cal... |
669 670 671 672 673 674 |
if (fatal) { *errp = fatal; brelse(bh); bh = NULL; } return bh; |
ac27a0ec1 [PATCH] ext4: ini... |
675 |
} |
617ba13b3 [PATCH] ext4: ren... |
676 |
struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode, |
725d26d3f ext4: Introduce e... |
677 |
ext4_lblk_t block, int create, int *err) |
ac27a0ec1 [PATCH] ext4: ini... |
678 |
{ |
af5bc92dd ext4: Fix whitesp... |
679 |
struct buffer_head *bh; |
ac27a0ec1 [PATCH] ext4: ini... |
680 |
|
617ba13b3 [PATCH] ext4: ren... |
681 |
bh = ext4_getblk(handle, inode, block, create, err); |
ac27a0ec1 [PATCH] ext4: ini... |
682 683 684 685 |
if (!bh) return bh; if (buffer_uptodate(bh)) return bh; |
65299a3b7 block: separate p... |
686 |
ll_rw_block(READ | REQ_META | REQ_PRIO, 1, &bh); |
ac27a0ec1 [PATCH] ext4: ini... |
687 688 689 690 691 692 693 |
wait_on_buffer(bh); if (buffer_uptodate(bh)) return bh; put_bh(bh); *err = -EIO; return NULL; } |
af5bc92dd ext4: Fix whitesp... |
694 695 696 697 698 699 700 |
static int walk_page_buffers(handle_t *handle, struct buffer_head *head, unsigned from, unsigned to, int *partial, int (*fn)(handle_t *handle, struct buffer_head *bh)) |
ac27a0ec1 [PATCH] ext4: ini... |
701 702 703 704 705 706 |
{ struct buffer_head *bh; unsigned block_start, block_end; unsigned blocksize = head->b_size; int err, ret = 0; struct buffer_head *next; |
af5bc92dd ext4: Fix whitesp... |
707 708 |
for (bh = head, block_start = 0; ret == 0 && (bh != head || !block_start); |
de9a55b84 ext4: Fix up whit... |
709 |
block_start = block_end, bh = next) { |
ac27a0ec1 [PATCH] ext4: ini... |
710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 |
next = bh->b_this_page; block_end = block_start + blocksize; if (block_end <= from || block_start >= to) { if (partial && !buffer_uptodate(bh)) *partial = 1; continue; } err = (*fn)(handle, bh); if (!ret) ret = err; } return ret; } /* * To preserve ordering, it is essential that the hole instantiation and * the data write be encapsulated in a single transaction. We cannot |
617ba13b3 [PATCH] ext4: ren... |
727 |
* close off a transaction and start a new one between the ext4_get_block() |
dab291af8 [PATCH] jbd2: ena... |
728 |
* and the commit_write(). So doing the jbd2_journal_start at the start of |
ac27a0ec1 [PATCH] ext4: ini... |
729 730 |
* prepare_write() is the right place. * |
617ba13b3 [PATCH] ext4: ren... |
731 732 |
* Also, this function can nest inside ext4_writepage() -> * block_write_full_page(). In that case, we *know* that ext4_writepage() |
ac27a0ec1 [PATCH] ext4: ini... |
733 734 735 736 |
* has generated enough buffer credits to do the whole page. So we won't * block on the journal in that case, which is good, because the caller may * be PF_MEMALLOC. * |
617ba13b3 [PATCH] ext4: ren... |
737 |
* By accident, ext4 can be reentered when a transaction is open via |
ac27a0ec1 [PATCH] ext4: ini... |
738 739 740 741 742 743 |
* quota file writes. If we were to commit the transaction while thus * reentered, there can be a deadlock - we would be holding a quota * lock, and the commit would never complete if another thread had a * transaction open and was blocking on the quota lock - a ranking * violation. * |
dab291af8 [PATCH] jbd2: ena... |
744 |
* So what we do is to rely on the fact that jbd2_journal_stop/journal_start |
ac27a0ec1 [PATCH] ext4: ini... |
745 746 747 748 749 |
* will _not_ run commit under these circumstances because handle->h_ref * is elevated. We'll still have enough credits for the tiny quotafile * write. */ static int do_journal_get_write_access(handle_t *handle, |
de9a55b84 ext4: Fix up whit... |
750 |
struct buffer_head *bh) |
ac27a0ec1 [PATCH] ext4: ini... |
751 |
{ |
56d35a4cd ext4: Fix dirtyin... |
752 753 |
int dirty = buffer_dirty(bh); int ret; |
ac27a0ec1 [PATCH] ext4: ini... |
754 755 |
if (!buffer_mapped(bh) || buffer_freed(bh)) return 0; |
56d35a4cd ext4: Fix dirtyin... |
756 |
/* |
ebdec241d fs: kill block_pr... |
757 |
* __block_write_begin() could have dirtied some buffers. Clean |
56d35a4cd ext4: Fix dirtyin... |
758 759 |
* the dirty bit as jbd2_journal_get_write_access() could complain * otherwise about fs integrity issues. Setting of the dirty bit |
ebdec241d fs: kill block_pr... |
760 |
* by __block_write_begin() isn't a real problem here as we clear |
56d35a4cd ext4: Fix dirtyin... |
761 762 763 764 765 766 767 768 769 |
* the bit before releasing a page lock and thus writeback cannot * ever write the buffer. */ if (dirty) clear_buffer_dirty(bh); ret = ext4_journal_get_write_access(handle, bh); if (!ret && dirty) ret = ext4_handle_dirty_metadata(handle, NULL, bh); return ret; |
ac27a0ec1 [PATCH] ext4: ini... |
770 |
} |
744692dc0 ext4: use ext4_ge... |
771 772 |
static int ext4_get_block_write(struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create); |
bfc1af650 ext4: convert to ... |
773 |
static int ext4_write_begin(struct file *file, struct address_space *mapping, |
de9a55b84 ext4: Fix up whit... |
774 775 |
loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata) |
ac27a0ec1 [PATCH] ext4: ini... |
776 |
{ |
af5bc92dd ext4: Fix whitesp... |
777 |
struct inode *inode = mapping->host; |
1938a150c ext4: Avoid leaki... |
778 |
int ret, needed_blocks; |
ac27a0ec1 [PATCH] ext4: ini... |
779 780 |
handle_t *handle; int retries = 0; |
af5bc92dd ext4: Fix whitesp... |
781 |
struct page *page; |
de9a55b84 ext4: Fix up whit... |
782 |
pgoff_t index; |
af5bc92dd ext4: Fix whitesp... |
783 |
unsigned from, to; |
bfc1af650 ext4: convert to ... |
784 |
|
9bffad1ed ext4: convert ins... |
785 |
trace_ext4_write_begin(inode, pos, len, flags); |
1938a150c ext4: Avoid leaki... |
786 787 788 789 790 |
/* * Reserve one block more for addition to orphan list in case * we allocate blocks but write fails for some reason */ needed_blocks = ext4_writepage_trans_blocks(inode) + 1; |
de9a55b84 ext4: Fix up whit... |
791 |
index = pos >> PAGE_CACHE_SHIFT; |
af5bc92dd ext4: Fix whitesp... |
792 793 |
from = pos & (PAGE_CACHE_SIZE - 1); to = from + len; |
ac27a0ec1 [PATCH] ext4: ini... |
794 795 |
retry: |
af5bc92dd ext4: Fix whitesp... |
796 797 798 799 |
handle = ext4_journal_start(inode, needed_blocks); if (IS_ERR(handle)) { ret = PTR_ERR(handle); goto out; |
7479d2b90 [PATCH] revert "r... |
800 |
} |
ac27a0ec1 [PATCH] ext4: ini... |
801 |
|
ebd3610b1 ext4: Fix deadloc... |
802 803 804 |
/* We cannot recurse into the filesystem as the transaction is already * started */ flags |= AOP_FLAG_NOFS; |
54566b2c1 fs: symlink write... |
805 |
page = grab_cache_page_write_begin(mapping, index, flags); |
cf108bca4 ext4: Invert the ... |
806 807 808 809 810 811 |
if (!page) { ext4_journal_stop(handle); ret = -ENOMEM; goto out; } *pagep = page; |
744692dc0 ext4: use ext4_ge... |
812 |
if (ext4_should_dioread_nolock(inode)) |
6e1db88d5 introduce __block... |
813 |
ret = __block_write_begin(page, pos, len, ext4_get_block_write); |
744692dc0 ext4: use ext4_ge... |
814 |
else |
6e1db88d5 introduce __block... |
815 |
ret = __block_write_begin(page, pos, len, ext4_get_block); |
bfc1af650 ext4: convert to ... |
816 817 |
if (!ret && ext4_should_journal_data(inode)) { |
ac27a0ec1 [PATCH] ext4: ini... |
818 819 820 |
ret = walk_page_buffers(handle, page_buffers(page), from, to, NULL, do_journal_get_write_access); } |
bfc1af650 ext4: convert to ... |
821 822 |
if (ret) { |
af5bc92dd ext4: Fix whitesp... |
823 |
unlock_page(page); |
af5bc92dd ext4: Fix whitesp... |
824 |
page_cache_release(page); |
ae4d53721 ext4: truncate bl... |
825 |
/* |
6e1db88d5 introduce __block... |
826 |
* __block_write_begin may have instantiated a few blocks |
ae4d53721 ext4: truncate bl... |
827 828 |
* outside i_size. Trim these off again. Don't need * i_size_read because we hold i_mutex. |
1938a150c ext4: Avoid leaki... |
829 830 831 |
* * Add inode to orphan list in case we crash before * truncate finishes |
ae4d53721 ext4: truncate bl... |
832 |
*/ |
ffacfa7a7 ext4: Fix truncat... |
833 |
if (pos + len > inode->i_size && ext4_can_truncate(inode)) |
1938a150c ext4: Avoid leaki... |
834 835 836 837 |
ext4_orphan_add(handle, inode); ext4_journal_stop(handle); if (pos + len > inode->i_size) { |
b9a4207d5 ext4: Avoid data ... |
838 |
ext4_truncate_failed_write(inode); |
de9a55b84 ext4: Fix up whit... |
839 |
/* |
ffacfa7a7 ext4: Fix truncat... |
840 |
* If truncate failed early the inode might |
1938a150c ext4: Avoid leaki... |
841 842 843 844 845 846 847 |
* still be on the orphan list; we need to * make sure the inode is removed from the * orphan list in that case. */ if (inode->i_nlink) ext4_orphan_del(NULL, inode); } |
bfc1af650 ext4: convert to ... |
848 |
} |
617ba13b3 [PATCH] ext4: ren... |
849 |
if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) |
ac27a0ec1 [PATCH] ext4: ini... |
850 |
goto retry; |
7479d2b90 [PATCH] revert "r... |
851 |
out: |
ac27a0ec1 [PATCH] ext4: ini... |
852 853 |
return ret; } |
bfc1af650 ext4: convert to ... |
854 855 |
/* For write_end() in data=journal mode */ static int write_end_fn(handle_t *handle, struct buffer_head *bh) |
ac27a0ec1 [PATCH] ext4: ini... |
856 857 858 859 |
{ if (!buffer_mapped(bh) || buffer_freed(bh)) return 0; set_buffer_uptodate(bh); |
0390131ba ext4: Allow ext4 ... |
860 |
return ext4_handle_dirty_metadata(handle, NULL, bh); |
ac27a0ec1 [PATCH] ext4: ini... |
861 |
} |
f8514083c ext4: truncate th... |
862 |
static int ext4_generic_write_end(struct file *file, |
de9a55b84 ext4: Fix up whit... |
863 864 865 |
struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata) |
f8514083c ext4: truncate th... |
866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 |
{ int i_size_changed = 0; struct inode *inode = mapping->host; handle_t *handle = ext4_journal_current_handle(); copied = block_write_end(file, mapping, pos, len, copied, page, fsdata); /* * No need to use i_size_read() here, the i_size * cannot change under us because we hold i_mutex. * * But it's important to update i_size while still holding page lock: * page writeout could otherwise come in and zero beyond i_size. */ if (pos + copied > inode->i_size) { i_size_write(inode, pos + copied); i_size_changed = 1; } if (pos + copied > EXT4_I(inode)->i_disksize) { /* We need to mark inode dirty even if * new_i_size is less that inode->i_size * bu greater than i_disksize.(hint delalloc) */ ext4_update_i_disksize(inode, (pos + copied)); i_size_changed = 1; } unlock_page(page); page_cache_release(page); /* * Don't mark the inode dirty under page lock. First, it unnecessarily * makes the holding time of page lock longer. Second, it forces lock * ordering of page lock and transaction start for journaling * filesystems. */ if (i_size_changed) ext4_mark_inode_dirty(handle, inode); return copied; } |
ac27a0ec1 [PATCH] ext4: ini... |
907 908 909 910 |
/* * We need to pick up the new inode size which generic_commit_write gave us * `file' can be NULL - eg, when called from page_symlink(). * |
617ba13b3 [PATCH] ext4: ren... |
911 |
* ext4 never places buffers on inode->i_mapping->private_list. metadata |
ac27a0ec1 [PATCH] ext4: ini... |
912 913 |
* buffers are managed internally. */ |
bfc1af650 ext4: convert to ... |
914 |
static int ext4_ordered_write_end(struct file *file, |
de9a55b84 ext4: Fix up whit... |
915 916 917 |
struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata) |
ac27a0ec1 [PATCH] ext4: ini... |
918 |
{ |
617ba13b3 [PATCH] ext4: ren... |
919 |
handle_t *handle = ext4_journal_current_handle(); |
cf108bca4 ext4: Invert the ... |
920 |
struct inode *inode = mapping->host; |
ac27a0ec1 [PATCH] ext4: ini... |
921 |
int ret = 0, ret2; |
9bffad1ed ext4: convert ins... |
922 |
trace_ext4_ordered_write_end(inode, pos, len, copied); |
678aaf481 ext4: Use new fra... |
923 |
ret = ext4_jbd2_file_inode(handle, inode); |
ac27a0ec1 [PATCH] ext4: ini... |
924 925 |
if (ret == 0) { |
f8514083c ext4: truncate th... |
926 |
ret2 = ext4_generic_write_end(file, mapping, pos, len, copied, |
bfc1af650 ext4: convert to ... |
927 |
page, fsdata); |
f8a87d893 ext4: fix test ex... |
928 |
copied = ret2; |
ffacfa7a7 ext4: Fix truncat... |
929 |
if (pos + len > inode->i_size && ext4_can_truncate(inode)) |
f8514083c ext4: truncate th... |
930 931 932 933 934 |
/* if we have allocated more blocks and copied * less. We will have blocks allocated outside * inode->i_size. So truncate them */ ext4_orphan_add(handle, inode); |
f8a87d893 ext4: fix test ex... |
935 936 |
if (ret2 < 0) ret = ret2; |
09e0834fb ext4: fix deadloc... |
937 938 939 |
} else { unlock_page(page); page_cache_release(page); |
ac27a0ec1 [PATCH] ext4: ini... |
940 |
} |
09e0834fb ext4: fix deadloc... |
941 |
|
617ba13b3 [PATCH] ext4: ren... |
942 |
ret2 = ext4_journal_stop(handle); |
ac27a0ec1 [PATCH] ext4: ini... |
943 944 |
if (!ret) ret = ret2; |
bfc1af650 ext4: convert to ... |
945 |
|
f8514083c ext4: truncate th... |
946 |
if (pos + len > inode->i_size) { |
b9a4207d5 ext4: Avoid data ... |
947 |
ext4_truncate_failed_write(inode); |
de9a55b84 ext4: Fix up whit... |
948 |
/* |
ffacfa7a7 ext4: Fix truncat... |
949 |
* If truncate failed early the inode might still be |
f8514083c ext4: truncate th... |
950 951 952 953 954 955 |
* on the orphan list; we need to make sure the inode * is removed from the orphan list in that case. */ if (inode->i_nlink) ext4_orphan_del(NULL, inode); } |
bfc1af650 ext4: convert to ... |
956 |
return ret ? ret : copied; |
ac27a0ec1 [PATCH] ext4: ini... |
957 |
} |
bfc1af650 ext4: convert to ... |
958 |
static int ext4_writeback_write_end(struct file *file, |
de9a55b84 ext4: Fix up whit... |
959 960 961 |
struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata) |
ac27a0ec1 [PATCH] ext4: ini... |
962 |
{ |
617ba13b3 [PATCH] ext4: ren... |
963 |
handle_t *handle = ext4_journal_current_handle(); |
cf108bca4 ext4: Invert the ... |
964 |
struct inode *inode = mapping->host; |
ac27a0ec1 [PATCH] ext4: ini... |
965 |
int ret = 0, ret2; |
ac27a0ec1 [PATCH] ext4: ini... |
966 |
|
9bffad1ed ext4: convert ins... |
967 |
trace_ext4_writeback_write_end(inode, pos, len, copied); |
f8514083c ext4: truncate th... |
968 |
ret2 = ext4_generic_write_end(file, mapping, pos, len, copied, |
bfc1af650 ext4: convert to ... |
969 |
page, fsdata); |
f8a87d893 ext4: fix test ex... |
970 |
copied = ret2; |
ffacfa7a7 ext4: Fix truncat... |
971 |
if (pos + len > inode->i_size && ext4_can_truncate(inode)) |
f8514083c ext4: truncate th... |
972 973 974 975 976 |
/* if we have allocated more blocks and copied * less. We will have blocks allocated outside * inode->i_size. So truncate them */ ext4_orphan_add(handle, inode); |
f8a87d893 ext4: fix test ex... |
977 978 |
if (ret2 < 0) ret = ret2; |
ac27a0ec1 [PATCH] ext4: ini... |
979 |
|
617ba13b3 [PATCH] ext4: ren... |
980 |
ret2 = ext4_journal_stop(handle); |
ac27a0ec1 [PATCH] ext4: ini... |
981 982 |
if (!ret) ret = ret2; |
bfc1af650 ext4: convert to ... |
983 |
|
f8514083c ext4: truncate th... |
984 |
if (pos + len > inode->i_size) { |
b9a4207d5 ext4: Avoid data ... |
985 |
ext4_truncate_failed_write(inode); |
de9a55b84 ext4: Fix up whit... |
986 |
/* |
ffacfa7a7 ext4: Fix truncat... |
987 |
* If truncate failed early the inode might still be |
f8514083c ext4: truncate th... |
988 989 990 991 992 993 |
* on the orphan list; we need to make sure the inode * is removed from the orphan list in that case. */ if (inode->i_nlink) ext4_orphan_del(NULL, inode); } |
bfc1af650 ext4: convert to ... |
994 |
return ret ? ret : copied; |
ac27a0ec1 [PATCH] ext4: ini... |
995 |
} |
bfc1af650 ext4: convert to ... |
996 |
static int ext4_journalled_write_end(struct file *file, |
de9a55b84 ext4: Fix up whit... |
997 998 999 |
struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata) |
ac27a0ec1 [PATCH] ext4: ini... |
1000 |
{ |
617ba13b3 [PATCH] ext4: ren... |
1001 |
handle_t *handle = ext4_journal_current_handle(); |
bfc1af650 ext4: convert to ... |
1002 |
struct inode *inode = mapping->host; |
ac27a0ec1 [PATCH] ext4: ini... |
1003 1004 |
int ret = 0, ret2; int partial = 0; |
bfc1af650 ext4: convert to ... |
1005 |
unsigned from, to; |
cf17fea65 ext4: Properly up... |
1006 |
loff_t new_i_size; |
ac27a0ec1 [PATCH] ext4: ini... |
1007 |
|
9bffad1ed ext4: convert ins... |
1008 |
trace_ext4_journalled_write_end(inode, pos, len, copied); |
bfc1af650 ext4: convert to ... |
1009 1010 |
from = pos & (PAGE_CACHE_SIZE - 1); to = from + len; |
441c85085 ext4: Fix ext4_sh... |
1011 |
BUG_ON(!ext4_handle_valid(handle)); |
bfc1af650 ext4: convert to ... |
1012 1013 1014 1015 1016 |
if (copied < len) { if (!PageUptodate(page)) copied = 0; page_zero_new_buffers(page, from+copied, to); } |
ac27a0ec1 [PATCH] ext4: ini... |
1017 1018 |
ret = walk_page_buffers(handle, page_buffers(page), from, |
bfc1af650 ext4: convert to ... |
1019 |
to, &partial, write_end_fn); |
ac27a0ec1 [PATCH] ext4: ini... |
1020 1021 |
if (!partial) SetPageUptodate(page); |
cf17fea65 ext4: Properly up... |
1022 1023 |
new_i_size = pos + copied; if (new_i_size > inode->i_size) |
bfc1af650 ext4: convert to ... |
1024 |
i_size_write(inode, pos+copied); |
19f5fb7ad ext4: Use bitops ... |
1025 |
ext4_set_inode_state(inode, EXT4_STATE_JDATA); |
2d859db3e ext4: fix data co... |
1026 |
EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid; |
cf17fea65 ext4: Properly up... |
1027 1028 |
if (new_i_size > EXT4_I(inode)->i_disksize) { ext4_update_i_disksize(inode, new_i_size); |
617ba13b3 [PATCH] ext4: ren... |
1029 |
ret2 = ext4_mark_inode_dirty(handle, inode); |
ac27a0ec1 [PATCH] ext4: ini... |
1030 1031 1032 |
if (!ret) ret = ret2; } |
bfc1af650 ext4: convert to ... |
1033 |
|
cf108bca4 ext4: Invert the ... |
1034 |
unlock_page(page); |
f8514083c ext4: truncate th... |
1035 |
page_cache_release(page); |
ffacfa7a7 ext4: Fix truncat... |
1036 |
if (pos + len > inode->i_size && ext4_can_truncate(inode)) |
f8514083c ext4: truncate th... |
1037 1038 1039 1040 1041 |
/* if we have allocated more blocks and copied * less. We will have blocks allocated outside * inode->i_size. So truncate them */ ext4_orphan_add(handle, inode); |
617ba13b3 [PATCH] ext4: ren... |
1042 |
ret2 = ext4_journal_stop(handle); |
ac27a0ec1 [PATCH] ext4: ini... |
1043 1044 |
if (!ret) ret = ret2; |
f8514083c ext4: truncate th... |
1045 |
if (pos + len > inode->i_size) { |
b9a4207d5 ext4: Avoid data ... |
1046 |
ext4_truncate_failed_write(inode); |
de9a55b84 ext4: Fix up whit... |
1047 |
/* |
ffacfa7a7 ext4: Fix truncat... |
1048 |
* If truncate failed early the inode might still be |
f8514083c ext4: truncate th... |
1049 1050 1051 1052 1053 1054 |
* on the orphan list; we need to make sure the inode * is removed from the orphan list in that case. */ if (inode->i_nlink) ext4_orphan_del(NULL, inode); } |
bfc1af650 ext4: convert to ... |
1055 1056 |
return ret ? ret : copied; |
ac27a0ec1 [PATCH] ext4: ini... |
1057 |
} |
d2a176379 ext4: delayed all... |
1058 |
|
9d0be5023 ext4: Calculate m... |
1059 |
/* |
7b415bf60 ext4: Fix bigallo... |
1060 |
* Reserve a single cluster located at lblock |
9d0be5023 ext4: Calculate m... |
1061 |
*/ |
01f49d0b9 ext4: use ext4_lb... |
1062 |
static int ext4_da_reserve_space(struct inode *inode, ext4_lblk_t lblock) |
d2a176379 ext4: delayed all... |
1063 |
{ |
030ba6bc6 ext4: Retry block... |
1064 |
int retries = 0; |
60e58e0f3 ext4: quota reser... |
1065 |
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); |
0637c6f41 ext4: Patch up ho... |
1066 |
struct ext4_inode_info *ei = EXT4_I(inode); |
7b415bf60 ext4: Fix bigallo... |
1067 |
unsigned int md_needed; |
5dd4056db dquot: cleanup sp... |
1068 |
int ret; |
d2a176379 ext4: delayed all... |
1069 1070 1071 1072 1073 1074 |
/* * recalculate the amount of metadata blocks to reserve * in order to allocate nrblocks * worse case is one extent per block */ |
030ba6bc6 ext4: Retry block... |
1075 |
repeat: |
0637c6f41 ext4: Patch up ho... |
1076 |
spin_lock(&ei->i_block_reservation_lock); |
7b415bf60 ext4: Fix bigallo... |
1077 1078 |
md_needed = EXT4_NUM_B2C(sbi, ext4_calc_metadata_amount(inode, lblock)); |
f8ec9d683 ext4: Add new tra... |
1079 |
trace_ext4_da_reserve_space(inode, md_needed); |
0637c6f41 ext4: Patch up ho... |
1080 |
spin_unlock(&ei->i_block_reservation_lock); |
d2a176379 ext4: delayed all... |
1081 |
|
60e58e0f3 ext4: quota reser... |
1082 |
/* |
72b8ab9dd ext4: don't use q... |
1083 1084 1085 |
* We will charge metadata quota at writeout time; this saves * us from metadata over-estimation, though we may go over by * a small amount in the end. Here we just reserve for data. |
60e58e0f3 ext4: quota reser... |
1086 |
*/ |
7b415bf60 ext4: Fix bigallo... |
1087 |
ret = dquot_reserve_block(inode, EXT4_C2B(sbi, 1)); |
5dd4056db dquot: cleanup sp... |
1088 1089 |
if (ret) return ret; |
72b8ab9dd ext4: don't use q... |
1090 1091 1092 1093 |
/* * We do still charge estimated metadata to the sb though; * we cannot afford to run out of free blocks. */ |
e7d5f3156 ext4: rename ext4... |
1094 |
if (ext4_claim_free_clusters(sbi, md_needed + 1, 0)) { |
7b415bf60 ext4: Fix bigallo... |
1095 |
dquot_release_reservation_block(inode, EXT4_C2B(sbi, 1)); |
030ba6bc6 ext4: Retry block... |
1096 1097 1098 1099 |
if (ext4_should_retry_alloc(inode->i_sb, &retries)) { yield(); goto repeat; } |
d2a176379 ext4: delayed all... |
1100 1101 |
return -ENOSPC; } |
0637c6f41 ext4: Patch up ho... |
1102 |
spin_lock(&ei->i_block_reservation_lock); |
9d0be5023 ext4: Calculate m... |
1103 |
ei->i_reserved_data_blocks++; |
0637c6f41 ext4: Patch up ho... |
1104 1105 |
ei->i_reserved_meta_blocks += md_needed; spin_unlock(&ei->i_block_reservation_lock); |
39bc680a8 ext4: fix sleep i... |
1106 |
|
d2a176379 ext4: delayed all... |
1107 1108 |
return 0; /* success */ } |
12219aea6 ext4: Cleanup the... |
1109 |
static void ext4_da_release_space(struct inode *inode, int to_free) |
d2a176379 ext4: delayed all... |
1110 1111 |
{ struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); |
0637c6f41 ext4: Patch up ho... |
1112 |
struct ext4_inode_info *ei = EXT4_I(inode); |
d2a176379 ext4: delayed all... |
1113 |
|
cd2132261 ext4: Fix delallo... |
1114 1115 |
if (!to_free) return; /* Nothing to release, exit */ |
d2a176379 ext4: delayed all... |
1116 |
spin_lock(&EXT4_I(inode)->i_block_reservation_lock); |
cd2132261 ext4: Fix delallo... |
1117 |
|
5a58ec876 ext4: Add a missi... |
1118 |
trace_ext4_da_release_space(inode, to_free); |
0637c6f41 ext4: Patch up ho... |
1119 |
if (unlikely(to_free > ei->i_reserved_data_blocks)) { |
cd2132261 ext4: Fix delallo... |
1120 |
/* |
0637c6f41 ext4: Patch up ho... |
1121 1122 1123 1124 |
* if there aren't enough reserved blocks, then the * counter is messed up somewhere. Since this * function is called from invalidate page, it's * harmless to return without any action. |
cd2132261 ext4: Fix delallo... |
1125 |
*/ |
0637c6f41 ext4: Patch up ho... |
1126 1127 1128 1129 1130 1131 1132 |
ext4_msg(inode->i_sb, KERN_NOTICE, "ext4_da_release_space: " "ino %lu, to_free %d with only %d reserved " "data blocks ", inode->i_ino, to_free, ei->i_reserved_data_blocks); WARN_ON(1); to_free = ei->i_reserved_data_blocks; |
cd2132261 ext4: Fix delallo... |
1133 |
} |
0637c6f41 ext4: Patch up ho... |
1134 |
ei->i_reserved_data_blocks -= to_free; |
cd2132261 ext4: Fix delallo... |
1135 |
|
0637c6f41 ext4: Patch up ho... |
1136 1137 1138 1139 1140 |
if (ei->i_reserved_data_blocks == 0) { /* * We can release all of the reserved metadata blocks * only when we have written all of the delayed * allocation blocks. |
7b415bf60 ext4: Fix bigallo... |
1141 1142 |
* Note that in case of bigalloc, i_reserved_meta_blocks, * i_reserved_data_blocks, etc. refer to number of clusters. |
0637c6f41 ext4: Patch up ho... |
1143 |
*/ |
570426518 ext4: convert s_{... |
1144 |
percpu_counter_sub(&sbi->s_dirtyclusters_counter, |
72b8ab9dd ext4: don't use q... |
1145 |
ei->i_reserved_meta_blocks); |
ee5f4d9cd ext4: Fix account... |
1146 |
ei->i_reserved_meta_blocks = 0; |
9d0be5023 ext4: Calculate m... |
1147 |
ei->i_da_metadata_calc_len = 0; |
0637c6f41 ext4: Patch up ho... |
1148 |
} |
d2a176379 ext4: delayed all... |
1149 |
|
72b8ab9dd ext4: don't use q... |
1150 |
/* update fs dirty data blocks counter */ |
570426518 ext4: convert s_{... |
1151 |
percpu_counter_sub(&sbi->s_dirtyclusters_counter, to_free); |
d2a176379 ext4: delayed all... |
1152 |
|
d2a176379 ext4: delayed all... |
1153 |
spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); |
60e58e0f3 ext4: quota reser... |
1154 |
|
7b415bf60 ext4: Fix bigallo... |
1155 |
dquot_release_reservation_block(inode, EXT4_C2B(sbi, to_free)); |
d2a176379 ext4: delayed all... |
1156 1157 1158 |
} static void ext4_da_page_release_reservation(struct page *page, |
de9a55b84 ext4: Fix up whit... |
1159 |
unsigned long offset) |
d2a176379 ext4: delayed all... |
1160 1161 1162 1163 |
{ int to_release = 0; struct buffer_head *head, *bh; unsigned int curr_off = 0; |
7b415bf60 ext4: Fix bigallo... |
1164 1165 1166 |
struct inode *inode = page->mapping->host; struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); int num_clusters; |
d2a176379 ext4: delayed all... |
1167 1168 1169 1170 1171 1172 1173 1174 1175 |
head = page_buffers(page); bh = head; do { unsigned int next_off = curr_off + bh->b_size; if ((offset <= curr_off) && (buffer_delay(bh))) { to_release++; clear_buffer_delay(bh); |
5356f2615 ext4: attempt to ... |
1176 |
clear_buffer_da_mapped(bh); |
d2a176379 ext4: delayed all... |
1177 1178 1179 |
} curr_off = next_off; } while ((bh = bh->b_this_page) != head); |
7b415bf60 ext4: Fix bigallo... |
1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 |
/* If we have released all the blocks belonging to a cluster, then we * need to release the reserved space for that cluster. */ num_clusters = EXT4_NUM_B2C(sbi, to_release); while (num_clusters > 0) { ext4_fsblk_t lblk; lblk = (page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits)) + ((num_clusters - 1) << sbi->s_cluster_bits); if (sbi->s_cluster_ratio == 1 || !ext4_find_delalloc_cluster(inode, lblk, 1)) ext4_da_release_space(inode, 1); num_clusters--; } |
d2a176379 ext4: delayed all... |
1194 |
} |
ac27a0ec1 [PATCH] ext4: ini... |
1195 1196 |
/* |
64769240b ext4: Add delayed... |
1197 1198 |
* Delayed allocation stuff */ |
64769240b ext4: Add delayed... |
1199 1200 |
/* * mpage_da_submit_io - walks through extent of pages and try to write |
a1d6cc563 ext4: Rework the ... |
1201 |
* them with writepage() call back |
64769240b ext4: Add delayed... |
1202 1203 1204 1205 |
* * @mpd->inode: inode * @mpd->first_page: first page of the extent * @mpd->next_page: page after the last page of the extent |
64769240b ext4: Add delayed... |
1206 1207 1208 1209 1210 1211 |
* * By the time mpage_da_submit_io() is called we expect all blocks * to be allocated. this may be wrong if allocation failed. * * As pages are already locked by write_cache_pages(), we can't use it */ |
1de3e3df9 ext4: move mpage_... |
1212 1213 |
static int mpage_da_submit_io(struct mpage_da_data *mpd, struct ext4_map_blocks *map) |
64769240b ext4: Add delayed... |
1214 |
{ |
791b7f089 ext4: Fix the del... |
1215 1216 1217 1218 1219 |
struct pagevec pvec; unsigned long index, end; int ret = 0, err, nr_pages, i; struct inode *inode = mpd->inode; struct address_space *mapping = inode->i_mapping; |
cb20d5188 ext4: inline ext4... |
1220 |
loff_t size = i_size_read(inode); |
3ecdb3a19 ext4: inline walk... |
1221 1222 |
unsigned int len, block_start; struct buffer_head *bh, *page_bufs = NULL; |
cb20d5188 ext4: inline ext4... |
1223 |
int journal_data = ext4_should_journal_data(inode); |
1de3e3df9 ext4: move mpage_... |
1224 |
sector_t pblock = 0, cur_logical = 0; |
bd2d0210c ext4: use bio lay... |
1225 |
struct ext4_io_submit io_submit; |
64769240b ext4: Add delayed... |
1226 1227 |
BUG_ON(mpd->next_page <= mpd->first_page); |
bd2d0210c ext4: use bio lay... |
1228 |
memset(&io_submit, 0, sizeof(io_submit)); |
791b7f089 ext4: Fix the del... |
1229 1230 1231 |
/* * We need to start from the first_page to the next_page - 1 * to make sure we also write the mapped dirty buffer_heads. |
8dc207c0e ext4: Save stack ... |
1232 |
* If we look at mpd->b_blocknr we would only be looking |
791b7f089 ext4: Fix the del... |
1233 1234 |
* at the currently mapped buffer_heads. */ |
64769240b ext4: Add delayed... |
1235 1236 |
index = mpd->first_page; end = mpd->next_page - 1; |
791b7f089 ext4: Fix the del... |
1237 |
pagevec_init(&pvec, 0); |
64769240b ext4: Add delayed... |
1238 |
while (index <= end) { |
791b7f089 ext4: Fix the del... |
1239 |
nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE); |
64769240b ext4: Add delayed... |
1240 1241 1242 |
if (nr_pages == 0) break; for (i = 0; i < nr_pages; i++) { |
974989564 ext4: clear the d... |
1243 |
int commit_write = 0, skip_page = 0; |
64769240b ext4: Add delayed... |
1244 |
struct page *page = pvec.pages[i]; |
791b7f089 ext4: Fix the del... |
1245 1246 1247 |
index = page->index; if (index > end) break; |
cb20d5188 ext4: inline ext4... |
1248 1249 1250 1251 1252 |
if (index == size >> PAGE_CACHE_SHIFT) len = size & ~PAGE_CACHE_MASK; else len = PAGE_CACHE_SIZE; |
1de3e3df9 ext4: move mpage_... |
1253 1254 1255 1256 1257 1258 |
if (map) { cur_logical = index << (PAGE_CACHE_SHIFT - inode->i_blkbits); pblock = map->m_pblk + (cur_logical - map->m_lblk); } |
791b7f089 ext4: Fix the del... |
1259 1260 1261 1262 |
index++; BUG_ON(!PageLocked(page)); BUG_ON(PageWriteback(page)); |
64769240b ext4: Add delayed... |
1263 |
/* |
cb20d5188 ext4: inline ext4... |
1264 1265 |
* If the page does not have buffers (for * whatever reason), try to create them using |
a107e5a3a Merge branch 'nex... |
1266 |
* __block_write_begin. If this fails, |
974989564 ext4: clear the d... |
1267 |
* skip the page and move on. |
64769240b ext4: Add delayed... |
1268 |
*/ |
cb20d5188 ext4: inline ext4... |
1269 |
if (!page_has_buffers(page)) { |
a107e5a3a Merge branch 'nex... |
1270 |
if (__block_write_begin(page, 0, len, |
cb20d5188 ext4: inline ext4... |
1271 |
noalloc_get_block_write)) { |
974989564 ext4: clear the d... |
1272 |
skip_page: |
cb20d5188 ext4: inline ext4... |
1273 1274 1275 1276 1277 |
unlock_page(page); continue; } commit_write = 1; } |
64769240b ext4: Add delayed... |
1278 |
|
3ecdb3a19 ext4: inline walk... |
1279 1280 |
bh = page_bufs = page_buffers(page); block_start = 0; |
64769240b ext4: Add delayed... |
1281 |
do { |
1de3e3df9 ext4: move mpage_... |
1282 |
if (!bh) |
974989564 ext4: clear the d... |
1283 |
goto skip_page; |
1de3e3df9 ext4: move mpage_... |
1284 1285 1286 |
if (map && (cur_logical >= map->m_lblk) && (cur_logical <= (map->m_lblk + (map->m_len - 1)))) { |
29fa89d08 ext4: Mark the un... |
1287 1288 1289 |
if (buffer_delay(bh)) { clear_buffer_delay(bh); bh->b_blocknr = pblock; |
29fa89d08 ext4: Mark the un... |
1290 |
} |
5356f2615 ext4: attempt to ... |
1291 1292 |
if (buffer_da_mapped(bh)) clear_buffer_da_mapped(bh); |
1de3e3df9 ext4: move mpage_... |
1293 1294 1295 1296 1297 1298 1299 |
if (buffer_unwritten(bh) || buffer_mapped(bh)) BUG_ON(bh->b_blocknr != pblock); if (map->m_flags & EXT4_MAP_UNINIT) set_buffer_uninit(bh); clear_buffer_unwritten(bh); } |
29fa89d08 ext4: Mark the un... |
1300 |
|
13a79a474 ext4: avoid poten... |
1301 1302 1303 1304 1305 |
/* * skip page if block allocation undone and * block is dirty */ if (ext4_bh_delay_or_unwritten(NULL, bh)) |
974989564 ext4: clear the d... |
1306 |
skip_page = 1; |
3ecdb3a19 ext4: inline walk... |
1307 1308 |
bh = bh->b_this_page; block_start += bh->b_size; |
64769240b ext4: Add delayed... |
1309 1310 |
cur_logical++; pblock++; |
1de3e3df9 ext4: move mpage_... |
1311 |
} while (bh != page_bufs); |
974989564 ext4: clear the d... |
1312 1313 |
if (skip_page) goto skip_page; |
cb20d5188 ext4: inline ext4... |
1314 1315 1316 1317 |
if (commit_write) /* mark the buffer_heads as dirty & uptodate */ block_commit_write(page, 0, len); |
974989564 ext4: clear the d... |
1318 |
clear_page_dirty_for_io(page); |
bd2d0210c ext4: use bio lay... |
1319 1320 1321 1322 1323 1324 |
/* * Delalloc doesn't support data journalling, * but eventually maybe we'll lift this * restriction. */ if (unlikely(journal_data && PageChecked(page))) |
cb20d5188 ext4: inline ext4... |
1325 |
err = __ext4_journalled_writepage(page, len); |
1449032be ext4: Turn off mu... |
1326 |
else if (test_opt(inode->i_sb, MBLK_IO_SUBMIT)) |
bd2d0210c ext4: use bio lay... |
1327 1328 |
err = ext4_bio_write_page(&io_submit, page, len, mpd->wbc); |
9dd75f1f1 ext4: fix nomblk_... |
1329 1330 1331 1332 1333 1334 |
else if (buffer_uninit(page_bufs)) { ext4_set_bh_endio(page_bufs, inode); err = block_write_full_page_endio(page, noalloc_get_block_write, mpd->wbc, ext4_end_io_buffer_write); } else |
1449032be ext4: Turn off mu... |
1335 1336 |
err = block_write_full_page(page, noalloc_get_block_write, mpd->wbc); |
cb20d5188 ext4: inline ext4... |
1337 1338 |
if (!err) |
a1d6cc563 ext4: Rework the ... |
1339 |
mpd->pages_written++; |
64769240b ext4: Add delayed... |
1340 1341 1342 |
/* * In error case, we have to continue because * remaining pages are still locked |
64769240b ext4: Add delayed... |
1343 1344 1345 |
*/ if (ret == 0) ret = err; |
64769240b ext4: Add delayed... |
1346 1347 1348 |
} pagevec_release(&pvec); } |
bd2d0210c ext4: use bio lay... |
1349 |
ext4_io_submit(&io_submit); |
64769240b ext4: Add delayed... |
1350 |
return ret; |
64769240b ext4: Add delayed... |
1351 |
} |
c7f5938ad ext4: fix ext4_da... |
1352 |
static void ext4_da_block_invalidatepages(struct mpage_da_data *mpd) |
c4a0c46ec ext4: invalidate ... |
1353 1354 1355 1356 1357 1358 |
{ int nr_pages, i; pgoff_t index, end; struct pagevec pvec; struct inode *inode = mpd->inode; struct address_space *mapping = inode->i_mapping; |
c7f5938ad ext4: fix ext4_da... |
1359 1360 |
index = mpd->first_page; end = mpd->next_page - 1; |
c4a0c46ec ext4: invalidate ... |
1361 1362 1363 1364 1365 1366 |
while (index <= end) { nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE); if (nr_pages == 0) break; for (i = 0; i < nr_pages; i++) { struct page *page = pvec.pages[i]; |
9b1d0998d ext4: Release pag... |
1367 |
if (page->index > end) |
c4a0c46ec ext4: invalidate ... |
1368 |
break; |
c4a0c46ec ext4: invalidate ... |
1369 1370 1371 1372 1373 1374 |
BUG_ON(!PageLocked(page)); BUG_ON(PageWriteback(page)); block_invalidatepage(page, 0); ClearPageUptodate(page); unlock_page(page); } |
9b1d0998d ext4: Release pag... |
1375 1376 |
index = pvec.pages[nr_pages - 1]->index + 1; pagevec_release(&pvec); |
c4a0c46ec ext4: invalidate ... |
1377 1378 1379 |
} return; } |
df22291ff ext4: Retry block... |
1380 1381 1382 |
static void ext4_print_free_blocks(struct inode *inode) { struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); |
1693918e0 ext4: Use ext4_ms... |
1383 1384 |
printk(KERN_CRIT "Total free blocks count %lld ", |
5dee54372 ext4: rename ext4... |
1385 1386 |
EXT4_C2B(EXT4_SB(inode->i_sb), ext4_count_free_clusters(inode->i_sb))); |
1693918e0 ext4: Use ext4_ms... |
1387 1388 1389 1390 |
printk(KERN_CRIT "Free/Dirty block details "); printk(KERN_CRIT "free_blocks=%lld ", |
570426518 ext4: convert s_{... |
1391 1392 |
(long long) EXT4_C2B(EXT4_SB(inode->i_sb), percpu_counter_sum(&sbi->s_freeclusters_counter))); |
1693918e0 ext4: Use ext4_ms... |
1393 1394 |
printk(KERN_CRIT "dirty_blocks=%lld ", |
7b415bf60 ext4: Fix bigallo... |
1395 1396 |
(long long) EXT4_C2B(EXT4_SB(inode->i_sb), percpu_counter_sum(&sbi->s_dirtyclusters_counter))); |
1693918e0 ext4: Use ext4_ms... |
1397 1398 1399 1400 1401 1402 1403 1404 |
printk(KERN_CRIT "Block reservation details "); printk(KERN_CRIT "i_reserved_data_blocks=%u ", EXT4_I(inode)->i_reserved_data_blocks); printk(KERN_CRIT "i_reserved_meta_blocks=%u ", EXT4_I(inode)->i_reserved_meta_blocks); |
df22291ff ext4: Retry block... |
1405 1406 |
return; } |
b920c7550 ext4: Add documen... |
1407 |
/* |
5a87b7a5d ext4: call mpage_... |
1408 1409 |
* mpage_da_map_and_submit - go through given space, map them * if necessary, and then submit them for I/O |
64769240b ext4: Add delayed... |
1410 |
* |
8dc207c0e ext4: Save stack ... |
1411 |
* @mpd - bh describing space |
64769240b ext4: Add delayed... |
1412 1413 1414 |
* * The function skips space we know is already mapped to disk blocks. * |
64769240b ext4: Add delayed... |
1415 |
*/ |
5a87b7a5d ext4: call mpage_... |
1416 |
static void mpage_da_map_and_submit(struct mpage_da_data *mpd) |
64769240b ext4: Add delayed... |
1417 |
{ |
2ac3b6e00 ext4: Clean up ex... |
1418 |
int err, blks, get_blocks_flags; |
1de3e3df9 ext4: move mpage_... |
1419 |
struct ext4_map_blocks map, *mapp = NULL; |
2fa3cdfb3 ext4: Merge ext4_... |
1420 1421 1422 1423 |
sector_t next = mpd->b_blocknr; unsigned max_blocks = mpd->b_size >> mpd->inode->i_blkbits; loff_t disksize = EXT4_I(mpd->inode)->i_disksize; handle_t *handle = NULL; |
64769240b ext4: Add delayed... |
1424 1425 |
/* |
5a87b7a5d ext4: call mpage_... |
1426 1427 |
* If the blocks are mapped already, or we couldn't accumulate * any blocks, then proceed immediately to the submission stage. |
2fa3cdfb3 ext4: Merge ext4_... |
1428 |
*/ |
5a87b7a5d ext4: call mpage_... |
1429 1430 1431 1432 1433 |
if ((mpd->b_size == 0) || ((mpd->b_state & (1 << BH_Mapped)) && !(mpd->b_state & (1 << BH_Delay)) && !(mpd->b_state & (1 << BH_Unwritten)))) goto submit_io; |
2fa3cdfb3 ext4: Merge ext4_... |
1434 1435 1436 |
handle = ext4_journal_current_handle(); BUG_ON(!handle); |
79ffab343 ext4: Properly in... |
1437 |
/* |
79e830367 ext4: fix ext4_ge... |
1438 |
* Call ext4_map_blocks() to allocate any delayed allocation |
2ac3b6e00 ext4: Clean up ex... |
1439 1440 1441 1442 1443 1444 1445 1446 |
* blocks, or to convert an uninitialized extent to be * initialized (in the case where we have written into * one or more preallocated blocks). * * We pass in the magic EXT4_GET_BLOCKS_DELALLOC_RESERVE to * indicate that we are on the delayed allocation path. This * affects functions in many different parts of the allocation * call path. This flag exists primarily because we don't |
79e830367 ext4: fix ext4_ge... |
1447 |
* want to change *many* call functions, so ext4_map_blocks() |
f23210977 ext4: replace i_d... |
1448 |
* will set the EXT4_STATE_DELALLOC_RESERVED flag once the |
2ac3b6e00 ext4: Clean up ex... |
1449 1450 1451 1452 1453 |
* inode's allocation semaphore is taken. * * If the blocks in questions were delalloc blocks, set * EXT4_GET_BLOCKS_DELALLOC_RESERVE so the delalloc accounting * variables are updated after the blocks have been allocated. |
79ffab343 ext4: Properly in... |
1454 |
*/ |
2ed886852 ext4: Convert cal... |
1455 1456 |
map.m_lblk = next; map.m_len = max_blocks; |
1296cc85c ext4: Drop EXT4_G... |
1457 |
get_blocks_flags = EXT4_GET_BLOCKS_CREATE; |
744692dc0 ext4: use ext4_ge... |
1458 1459 |
if (ext4_should_dioread_nolock(mpd->inode)) get_blocks_flags |= EXT4_GET_BLOCKS_IO_CREATE_EXT; |
2ac3b6e00 ext4: Clean up ex... |
1460 |
if (mpd->b_state & (1 << BH_Delay)) |
1296cc85c ext4: Drop EXT4_G... |
1461 |
get_blocks_flags |= EXT4_GET_BLOCKS_DELALLOC_RESERVE; |
2ed886852 ext4: Convert cal... |
1462 |
blks = ext4_map_blocks(handle, mpd->inode, &map, get_blocks_flags); |
2fa3cdfb3 ext4: Merge ext4_... |
1463 |
if (blks < 0) { |
e3570639c ext4: don't print... |
1464 |
struct super_block *sb = mpd->inode->i_sb; |
2fa3cdfb3 ext4: Merge ext4_... |
1465 |
err = blks; |
ed5bde0bf ext4: Simplify de... |
1466 |
/* |
5a87b7a5d ext4: call mpage_... |
1467 |
* If get block returns EAGAIN or ENOSPC and there |
974989564 ext4: clear the d... |
1468 1469 |
* appears to be free blocks we will just let * mpage_da_submit_io() unlock all of the pages. |
c4a0c46ec ext4: invalidate ... |
1470 1471 |
*/ if (err == -EAGAIN) |
5a87b7a5d ext4: call mpage_... |
1472 |
goto submit_io; |
df22291ff ext4: Retry block... |
1473 |
|
5dee54372 ext4: rename ext4... |
1474 |
if (err == -ENOSPC && ext4_count_free_clusters(sb)) { |
df22291ff ext4: Retry block... |
1475 |
mpd->retval = err; |
5a87b7a5d ext4: call mpage_... |
1476 |
goto submit_io; |
df22291ff ext4: Retry block... |
1477 |
} |
c4a0c46ec ext4: invalidate ... |
1478 |
/* |
ed5bde0bf ext4: Simplify de... |
1479 1480 1481 1482 1483 |
* get block failure will cause us to loop in * writepages, because a_ops->writepage won't be able * to make progress. The page will be redirtied by * writepage and writepages will again try to write * the same. |
c4a0c46ec ext4: invalidate ... |
1484 |
*/ |
e3570639c ext4: don't print... |
1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 |
if (!(EXT4_SB(sb)->s_mount_flags & EXT4_MF_FS_ABORTED)) { ext4_msg(sb, KERN_CRIT, "delayed block allocation failed for inode %lu " "at logical offset %llu with max blocks %zd " "with error %d", mpd->inode->i_ino, (unsigned long long) next, mpd->b_size >> mpd->inode->i_blkbits, err); ext4_msg(sb, KERN_CRIT, "This should not happen!! Data will be lost "); if (err == -ENOSPC) ext4_print_free_blocks(mpd->inode); |
030ba6bc6 ext4: Retry block... |
1497 |
} |
2fa3cdfb3 ext4: Merge ext4_... |
1498 |
/* invalidate all the pages */ |
c7f5938ad ext4: fix ext4_da... |
1499 |
ext4_da_block_invalidatepages(mpd); |
e0fd9b907 ext4: mark multi-... |
1500 1501 1502 |
/* Mark this page range as having been completed */ mpd->io_done = 1; |
5a87b7a5d ext4: call mpage_... |
1503 |
return; |
c4a0c46ec ext4: invalidate ... |
1504 |
} |
2fa3cdfb3 ext4: Merge ext4_... |
1505 |
BUG_ON(blks == 0); |
1de3e3df9 ext4: move mpage_... |
1506 |
mapp = ↦ |
2ed886852 ext4: Convert cal... |
1507 1508 1509 |
if (map.m_flags & EXT4_MAP_NEW) { struct block_device *bdev = mpd->inode->i_sb->s_bdev; int i; |
64769240b ext4: Add delayed... |
1510 |
|
2ed886852 ext4: Convert cal... |
1511 1512 |
for (i = 0; i < map.m_len; i++) unmap_underlying_metadata(bdev, map.m_pblk + i); |
64769240b ext4: Add delayed... |
1513 |
|
decbd919f ext4: only call e... |
1514 1515 |
if (ext4_should_order_data(mpd->inode)) { err = ext4_jbd2_file_inode(handle, mpd->inode); |
8de49e674 ext4: fix the dea... |
1516 |
if (err) { |
decbd919f ext4: only call e... |
1517 |
/* Only if the journal is aborted */ |
8de49e674 ext4: fix the dea... |
1518 1519 1520 |
mpd->retval = err; goto submit_io; } |
decbd919f ext4: only call e... |
1521 |
} |
2fa3cdfb3 ext4: Merge ext4_... |
1522 1523 1524 |
} /* |
03f5d8bcf ext4: Get rid of ... |
1525 |
* Update on-disk size along with block allocation. |
2fa3cdfb3 ext4: Merge ext4_... |
1526 1527 1528 1529 1530 1531 |
*/ disksize = ((loff_t) next + blks) << mpd->inode->i_blkbits; if (disksize > i_size_read(mpd->inode)) disksize = i_size_read(mpd->inode); if (disksize > EXT4_I(mpd->inode)->i_disksize) { ext4_update_i_disksize(mpd->inode, disksize); |
5a87b7a5d ext4: call mpage_... |
1532 1533 1534 1535 1536 |
err = ext4_mark_inode_dirty(handle, mpd->inode); if (err) ext4_error(mpd->inode->i_sb, "Failed to mark inode %lu dirty", mpd->inode->i_ino); |
2fa3cdfb3 ext4: Merge ext4_... |
1537 |
} |
5a87b7a5d ext4: call mpage_... |
1538 |
submit_io: |
1de3e3df9 ext4: move mpage_... |
1539 |
mpage_da_submit_io(mpd, mapp); |
5a87b7a5d ext4: call mpage_... |
1540 |
mpd->io_done = 1; |
64769240b ext4: Add delayed... |
1541 |
} |
bf068ee26 ext4: Handle unwr... |
1542 1543 |
#define BH_FLAGS ((1 << BH_Uptodate) | (1 << BH_Mapped) | \ (1 << BH_Delay) | (1 << BH_Unwritten)) |
64769240b ext4: Add delayed... |
1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 |
/* * mpage_add_bh_to_extent - try to add one more block to extent of blocks * * @mpd->lbh - extent of blocks * @logical - logical number of the block in the file * @bh - bh of the block (used to access block's state) * * the function is used to collect contig. blocks in same state */ static void mpage_add_bh_to_extent(struct mpage_da_data *mpd, |
8dc207c0e ext4: Save stack ... |
1555 1556 |
sector_t logical, size_t b_size, unsigned long b_state) |
64769240b ext4: Add delayed... |
1557 |
{ |
64769240b ext4: Add delayed... |
1558 |
sector_t next; |
8dc207c0e ext4: Save stack ... |
1559 |
int nrblocks = mpd->b_size >> mpd->inode->i_blkbits; |
64769240b ext4: Add delayed... |
1560 |
|
c445e3e0a ext4: don't scan/... |
1561 1562 1563 1564 |
/* * XXX Don't go larger than mballoc is willing to allocate * This is a stopgap solution. We eventually need to fold * mpage_da_submit_io() into this function and then call |
79e830367 ext4: fix ext4_ge... |
1565 |
* ext4_map_blocks() multiple times in a loop |
c445e3e0a ext4: don't scan/... |
1566 1567 1568 |
*/ if (nrblocks >= 8*1024*1024/mpd->inode->i_sb->s_blocksize) goto flush_it; |
525f4ed8d ext4: journal cre... |
1569 |
/* check if thereserved journal credits might overflow */ |
12e9b8920 ext4: Use bitops ... |
1570 |
if (!(ext4_test_inode_flag(mpd->inode, EXT4_INODE_EXTENTS))) { |
525f4ed8d ext4: journal cre... |
1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 |
if (nrblocks >= EXT4_MAX_TRANS_DATA) { /* * With non-extent format we are limited by the journal * credit available. Total credit needed to insert * nrblocks contiguous blocks is dependent on the * nrblocks. So limit nrblocks. */ goto flush_it; } else if ((nrblocks + (b_size >> mpd->inode->i_blkbits)) > EXT4_MAX_TRANS_DATA) { /* * Adding the new buffer_head would make it cross the * allowed limit for which we have journal credit * reserved. So limit the new bh->b_size */ b_size = (EXT4_MAX_TRANS_DATA - nrblocks) << mpd->inode->i_blkbits; /* we will do mpage_da_submit_io in the next loop */ } } |
64769240b ext4: Add delayed... |
1591 1592 1593 |
/* * First block in the extent */ |
8dc207c0e ext4: Save stack ... |
1594 1595 1596 1597 |
if (mpd->b_size == 0) { mpd->b_blocknr = logical; mpd->b_size = b_size; mpd->b_state = b_state & BH_FLAGS; |
64769240b ext4: Add delayed... |
1598 1599 |
return; } |
8dc207c0e ext4: Save stack ... |
1600 |
next = mpd->b_blocknr + nrblocks; |
64769240b ext4: Add delayed... |
1601 1602 1603 |
/* * Can we merge the block to our big extent? */ |
8dc207c0e ext4: Save stack ... |
1604 1605 |
if (logical == next && (b_state & BH_FLAGS) == mpd->b_state) { mpd->b_size += b_size; |
64769240b ext4: Add delayed... |
1606 1607 |
return; } |
525f4ed8d ext4: journal cre... |
1608 |
flush_it: |
64769240b ext4: Add delayed... |
1609 1610 1611 1612 |
/* * We couldn't merge the block to our extent, so we * need to flush current extent and start new one */ |
5a87b7a5d ext4: call mpage_... |
1613 |
mpage_da_map_and_submit(mpd); |
a1d6cc563 ext4: Rework the ... |
1614 |
return; |
64769240b ext4: Add delayed... |
1615 |
} |
c364b22c9 ext4: Fix mmap/tr... |
1616 |
static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh) |
29fa89d08 ext4: Mark the un... |
1617 |
{ |
c364b22c9 ext4: Fix mmap/tr... |
1618 |
return (buffer_delay(bh) || buffer_unwritten(bh)) && buffer_dirty(bh); |
29fa89d08 ext4: Mark the un... |
1619 |
} |
64769240b ext4: Add delayed... |
1620 |
/* |
5356f2615 ext4: attempt to ... |
1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 |
* This function is grabs code from the very beginning of * ext4_map_blocks, but assumes that the caller is from delayed write * time. This function looks up the requested blocks and sets the * buffer delay bit under the protection of i_data_sem. */ static int ext4_da_map_blocks(struct inode *inode, sector_t iblock, struct ext4_map_blocks *map, struct buffer_head *bh) { int retval; sector_t invalid_block = ~((sector_t) 0xffff); if (invalid_block < ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es)) invalid_block = ~0; map->m_flags = 0; ext_debug("ext4_da_map_blocks(): inode %lu, max_blocks %u," "logical block %lu ", inode->i_ino, map->m_len, (unsigned long) map->m_lblk); /* * Try to see if we can get the block without requesting a new * file system block. */ down_read((&EXT4_I(inode)->i_data_sem)); if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) retval = ext4_ext_map_blocks(NULL, inode, map, 0); else retval = ext4_ind_map_blocks(NULL, inode, map, 0); if (retval == 0) { /* * XXX: __block_prepare_write() unmaps passed block, * is it OK? */ /* If the block was allocated from previously allocated cluster, * then we dont need to reserve it again. */ if (!(map->m_flags & EXT4_MAP_FROM_CLUSTER)) { retval = ext4_da_reserve_space(inode, iblock); if (retval) /* not enough space to reserve */ goto out_unlock; } /* Clear EXT4_MAP_FROM_CLUSTER flag since its purpose is served * and it should not appear on the bh->b_state. */ map->m_flags &= ~EXT4_MAP_FROM_CLUSTER; map_bh(bh, inode->i_sb, invalid_block); set_buffer_new(bh); set_buffer_delay(bh); } out_unlock: up_read((&EXT4_I(inode)->i_data_sem)); return retval; } /* |
b920c7550 ext4: Add documen... |
1682 1683 1684 |
* This is a special get_blocks_t callback which is used by * ext4_da_write_begin(). It will either return mapped block or * reserve space for a single block. |
29fa89d08 ext4: Mark the un... |
1685 1686 1687 1688 1689 1690 1691 |
* * For delayed buffer_head we have BH_Mapped, BH_New, BH_Delay set. * We also have b_blocknr = -1 and b_bdev initialized properly * * For unwritten buffer_head we have BH_Mapped, BH_New, BH_Unwritten set. * We also have b_blocknr = physicalblock mapping unwritten extent and b_bdev * initialized properly. |
64769240b ext4: Add delayed... |
1692 1693 |
*/ static int ext4_da_get_block_prep(struct inode *inode, sector_t iblock, |
2ed886852 ext4: Convert cal... |
1694 |
struct buffer_head *bh, int create) |
64769240b ext4: Add delayed... |
1695 |
{ |
2ed886852 ext4: Convert cal... |
1696 |
struct ext4_map_blocks map; |
64769240b ext4: Add delayed... |
1697 1698 1699 |
int ret = 0; BUG_ON(create == 0); |
2ed886852 ext4: Convert cal... |
1700 1701 1702 1703 |
BUG_ON(bh->b_size != inode->i_sb->s_blocksize); map.m_lblk = iblock; map.m_len = 1; |
64769240b ext4: Add delayed... |
1704 1705 1706 1707 1708 1709 |
/* * first, we need to know whether the block is allocated already * preallocated blocks are unmapped but should treated * the same as allocated blocks. */ |
5356f2615 ext4: attempt to ... |
1710 1711 |
ret = ext4_da_map_blocks(inode, iblock, &map, bh); if (ret <= 0) |
2ed886852 ext4: Convert cal... |
1712 |
return ret; |
64769240b ext4: Add delayed... |
1713 |
|
2ed886852 ext4: Convert cal... |
1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 |
map_bh(bh, inode->i_sb, map.m_pblk); bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | map.m_flags; if (buffer_unwritten(bh)) { /* A delayed write to unwritten bh should be marked * new and mapped. Mapped ensures that we don't do * get_block multiple times when we write to the same * offset and new ensures that we do proper zero out * for partial write. */ set_buffer_new(bh); |
c82056360 ext4: fix data co... |
1725 |
set_buffer_mapped(bh); |
2ed886852 ext4: Convert cal... |
1726 1727 |
} return 0; |
64769240b ext4: Add delayed... |
1728 |
} |
61628a3f3 ext4: Invert lock... |
1729 |
|
b920c7550 ext4: Add documen... |
1730 1731 1732 |
/* * This function is used as a standard get_block_t calback function * when there is no desire to allocate any blocks. It is used as a |
ebdec241d fs: kill block_pr... |
1733 |
* callback function for block_write_begin() and block_write_full_page(). |
206f7ab4f ext4: remove vest... |
1734 |
* These functions should only try to map a single block at a time. |
b920c7550 ext4: Add documen... |
1735 1736 1737 1738 1739 |
* * Since this function doesn't do block allocations even if the caller * requests it by passing in create=1, it is critically important that * any caller checks to make sure that any buffer heads are returned * by this function are either all already mapped or marked for |
206f7ab4f ext4: remove vest... |
1740 1741 1742 |
* delayed allocation before calling block_write_full_page(). Otherwise, * b_blocknr could be left unitialized, and the page write functions will * be taken by surprise. |
b920c7550 ext4: Add documen... |
1743 1744 |
*/ static int noalloc_get_block_write(struct inode *inode, sector_t iblock, |
f0e6c9859 ext4: Handle page... |
1745 1746 |
struct buffer_head *bh_result, int create) { |
a2dc52b5d ext4: Add BUG_ON ... |
1747 |
BUG_ON(bh_result->b_size != inode->i_sb->s_blocksize); |
2ed886852 ext4: Convert cal... |
1748 |
return _ext4_get_block(inode, iblock, bh_result, 0); |
61628a3f3 ext4: Invert lock... |
1749 |
} |
62e086be5 ext4: Move __ext4... |
1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 |
static int bget_one(handle_t *handle, struct buffer_head *bh) { get_bh(bh); return 0; } static int bput_one(handle_t *handle, struct buffer_head *bh) { put_bh(bh); return 0; } static int __ext4_journalled_writepage(struct page *page, |
62e086be5 ext4: Move __ext4... |
1763 1764 1765 1766 1767 1768 1769 1770 |
unsigned int len) { struct address_space *mapping = page->mapping; struct inode *inode = mapping->host; struct buffer_head *page_bufs; handle_t *handle = NULL; int ret = 0; int err; |
cb20d5188 ext4: inline ext4... |
1771 |
ClearPageChecked(page); |
62e086be5 ext4: Move __ext4... |
1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 |
page_bufs = page_buffers(page); BUG_ON(!page_bufs); walk_page_buffers(handle, page_bufs, 0, len, NULL, bget_one); /* As soon as we unlock the page, it can go away, but we have * references to buffers so we are safe */ unlock_page(page); handle = ext4_journal_start(inode, ext4_writepage_trans_blocks(inode)); if (IS_ERR(handle)) { ret = PTR_ERR(handle); goto out; } |
441c85085 ext4: Fix ext4_sh... |
1784 |
BUG_ON(!ext4_handle_valid(handle)); |
62e086be5 ext4: Move __ext4... |
1785 1786 1787 1788 1789 1790 1791 |
ret = walk_page_buffers(handle, page_bufs, 0, len, NULL, do_journal_get_write_access); err = walk_page_buffers(handle, page_bufs, 0, len, NULL, write_end_fn); if (ret == 0) ret = err; |
2d859db3e ext4: fix data co... |
1792 |
EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid; |
62e086be5 ext4: Move __ext4... |
1793 1794 1795 1796 1797 |
err = ext4_journal_stop(handle); if (!ret) ret = err; walk_page_buffers(handle, page_bufs, 0, len, NULL, bput_one); |
19f5fb7ad ext4: Use bitops ... |
1798 |
ext4_set_inode_state(inode, EXT4_STATE_JDATA); |
62e086be5 ext4: Move __ext4... |
1799 1800 1801 |
out: return ret; } |
744692dc0 ext4: use ext4_ge... |
1802 1803 |
static int ext4_set_bh_endio(struct buffer_head *bh, struct inode *inode); static void ext4_end_io_buffer_write(struct buffer_head *bh, int uptodate); |
61628a3f3 ext4: Invert lock... |
1804 |
/* |
43ce1d23b ext4: Fix mmap/tr... |
1805 1806 1807 1808 |
* Note that we don't need to start a transaction unless we're journaling data * because we should have holes filled from ext4_page_mkwrite(). We even don't * need to file the inode to the transaction's list in ordered mode because if * we are writing back data added by write(), the inode is already there and if |
25985edce Fix common misspe... |
1809 |
* we are writing back data modified via mmap(), no one guarantees in which |
43ce1d23b ext4: Fix mmap/tr... |
1810 1811 1812 1813 |
* transaction the data will hit the disk. In case we are journaling data, we * cannot start transaction directly because transaction start ranks above page * lock so we have to do some magic. * |
b920c7550 ext4: Add documen... |
1814 1815 1816 1817 1818 |
* This function can get called via... * - ext4_da_writepages after taking page lock (have journal handle) * - journal_submit_inode_data_buffers (no journal handle) * - shrink_page_list via pdflush (no journal handle) * - grab_page_cache when doing write_begin (have journal handle) |
43ce1d23b ext4: Fix mmap/tr... |
1819 1820 1821 1822 1823 1824 1825 1826 1827 |
* * We don't do any block allocation in this function. If we have page with * multiple blocks we need to write those buffer_heads that are mapped. This * is important for mmaped based write. So if we do with blocksize 1K * truncate(f, 1024); * a = mmap(f, 0, 4096); * a[0] = 'a'; * truncate(f, 4096); * we have in the page first buffer_head mapped via page_mkwrite call back |
90802ed9c treewide: Fix com... |
1828 |
* but other buffer_heads would be unmapped but dirty (dirty done via the |
43ce1d23b ext4: Fix mmap/tr... |
1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 |
* do_wp_page). So writepage should write the first block. If we modify * the mmap area beyond 1024 we will again get a page_fault and the * page_mkwrite callback will do the block allocation and mark the * buffer_heads mapped. * * We redirty the page if we have any buffer_heads that is either delay or * unwritten in the page. * * We can get recursively called as show below. * * ext4_writepage() -> kmalloc() -> __alloc_pages() -> page_launder() -> * ext4_writepage() * * But since we don't do any block allocation we should not deadlock. * Page also have the dirty flag cleared so we don't get recurive page_lock. |
61628a3f3 ext4: Invert lock... |
1844 |
*/ |
43ce1d23b ext4: Fix mmap/tr... |
1845 |
static int ext4_writepage(struct page *page, |
62e086be5 ext4: Move __ext4... |
1846 |
struct writeback_control *wbc) |
64769240b ext4: Add delayed... |
1847 |
{ |
a42afc5f5 ext4: simplify ex... |
1848 |
int ret = 0, commit_write = 0; |
61628a3f3 ext4: Invert lock... |
1849 |
loff_t size; |
498e5f241 ext4: Change unsi... |
1850 |
unsigned int len; |
744692dc0 ext4: use ext4_ge... |
1851 |
struct buffer_head *page_bufs = NULL; |
61628a3f3 ext4: Invert lock... |
1852 |
struct inode *inode = page->mapping->host; |
a9c667f8f ext4: fixed trace... |
1853 |
trace_ext4_writepage(page); |
f0e6c9859 ext4: Handle page... |
1854 1855 1856 1857 1858 |
size = i_size_read(inode); if (page->index == size >> PAGE_CACHE_SHIFT) len = size & ~PAGE_CACHE_MASK; else len = PAGE_CACHE_SIZE; |
64769240b ext4: Add delayed... |
1859 |
|
a42afc5f5 ext4: simplify ex... |
1860 1861 |
/* * If the page does not have buffers (for whatever reason), |
a107e5a3a Merge branch 'nex... |
1862 |
* try to create them using __block_write_begin. If this |
a42afc5f5 ext4: simplify ex... |
1863 1864 |
* fails, redirty the page and move on. */ |
b1142e8fe ext4: BUG_ON fix:... |
1865 |
if (!page_has_buffers(page)) { |
a107e5a3a Merge branch 'nex... |
1866 |
if (__block_write_begin(page, 0, len, |
a42afc5f5 ext4: simplify ex... |
1867 1868 |
noalloc_get_block_write)) { redirty_page: |
f0e6c9859 ext4: Handle page... |
1869 1870 1871 1872 |
redirty_page_for_writepage(wbc, page); unlock_page(page); return 0; } |
a42afc5f5 ext4: simplify ex... |
1873 1874 1875 1876 1877 |
commit_write = 1; } page_bufs = page_buffers(page); if (walk_page_buffers(NULL, page_bufs, 0, len, NULL, ext4_bh_delay_or_unwritten)) { |
f0e6c9859 ext4: Handle page... |
1878 |
/* |
b1142e8fe ext4: BUG_ON fix:... |
1879 1880 1881 |
* We don't want to do block allocation, so redirty * the page and return. We may reach here when we do * a journal commit via journal_submit_inode_data_buffers. |
966dbde2c ext4: warn if dir... |
1882 1883 1884 |
* We can also reach here via shrink_page_list but it * should never be for direct reclaim so warn if that * happens |
f0e6c9859 ext4: Handle page... |
1885 |
*/ |
966dbde2c ext4: warn if dir... |
1886 1887 |
WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD)) == PF_MEMALLOC); |
a42afc5f5 ext4: simplify ex... |
1888 1889 1890 |
goto redirty_page; } if (commit_write) |
ed9b3e337 ext4: Mark the bu... |
1891 |
/* now mark the buffer_heads as dirty and uptodate */ |
b767e78a1 ext4: Don't look ... |
1892 |
block_commit_write(page, 0, len); |
64769240b ext4: Add delayed... |
1893 |
|
cb20d5188 ext4: inline ext4... |
1894 |
if (PageChecked(page) && ext4_should_journal_data(inode)) |
43ce1d23b ext4: Fix mmap/tr... |
1895 1896 1897 1898 |
/* * It's mmapped pagecache. Add buffers and journal it. There * doesn't seem much point in redirtying the page here. */ |
3f0ca3098 ext4: remove unus... |
1899 |
return __ext4_journalled_writepage(page, len); |
43ce1d23b ext4: Fix mmap/tr... |
1900 |
|
a42afc5f5 ext4: simplify ex... |
1901 |
if (buffer_uninit(page_bufs)) { |
744692dc0 ext4: use ext4_ge... |
1902 1903 1904 1905 |
ext4_set_bh_endio(page_bufs, inode); ret = block_write_full_page_endio(page, noalloc_get_block_write, wbc, ext4_end_io_buffer_write); } else |
b920c7550 ext4: Add documen... |
1906 1907 |
ret = block_write_full_page(page, noalloc_get_block_write, wbc); |
64769240b ext4: Add delayed... |
1908 |
|
64769240b ext4: Add delayed... |
1909 1910 |
return ret; } |
61628a3f3 ext4: Invert lock... |
1911 |
/* |
525f4ed8d ext4: journal cre... |
1912 |
* This is called via ext4_da_writepages() to |
25985edce Fix common misspe... |
1913 |
* calculate the total number of credits to reserve to fit |
525f4ed8d ext4: journal cre... |
1914 1915 1916 |
* a single extent allocation into a single transaction, * ext4_da_writpeages() will loop calling this before * the block allocation. |
61628a3f3 ext4: Invert lock... |
1917 |
*/ |
525f4ed8d ext4: journal cre... |
1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 |
static int ext4_da_writepages_trans_blocks(struct inode *inode) { int max_blocks = EXT4_I(inode)->i_reserved_data_blocks; /* * With non-extent format the journal credit needed to * insert nrblocks contiguous block is dependent on * number of contiguous block. So we will limit * number of contiguous block to a sane value */ |
12e9b8920 ext4: Use bitops ... |
1929 |
if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) && |
525f4ed8d ext4: journal cre... |
1930 1931 1932 1933 1934 |
(max_blocks > EXT4_MAX_TRANS_DATA)) max_blocks = EXT4_MAX_TRANS_DATA; return ext4_chunk_trans_blocks(inode, max_blocks); } |
61628a3f3 ext4: Invert lock... |
1935 |
|
8e48dcfbd ext4: Use our own... |
1936 1937 |
/* * write_cache_pages_da - walk the list of dirty pages of the given |
8eb9e5ce2 ext4: fold __mpag... |
1938 |
* address space and accumulate pages that need writing, and call |
168fc0223 ext4: move setup ... |
1939 1940 |
* mpage_da_map_and_submit to map a single contiguous memory region * and then write them. |
8e48dcfbd ext4: Use our own... |
1941 1942 1943 |
*/ static int write_cache_pages_da(struct address_space *mapping, struct writeback_control *wbc, |
72f84e656 ext4: update writ... |
1944 1945 |
struct mpage_da_data *mpd, pgoff_t *done_index) |
8e48dcfbd ext4: Use our own... |
1946 |
{ |
4f01b02c8 ext4: simple clea... |
1947 |
struct buffer_head *bh, *head; |
168fc0223 ext4: move setup ... |
1948 |
struct inode *inode = mapping->host; |
4f01b02c8 ext4: simple clea... |
1949 1950 1951 1952 1953 1954 |
struct pagevec pvec; unsigned int nr_pages; sector_t logical; pgoff_t index, end; long nr_to_write = wbc->nr_to_write; int i, tag, ret = 0; |
8e48dcfbd ext4: Use our own... |
1955 |
|
168fc0223 ext4: move setup ... |
1956 1957 1958 |
memset(mpd, 0, sizeof(struct mpage_da_data)); mpd->wbc = wbc; mpd->inode = inode; |
8e48dcfbd ext4: Use our own... |
1959 1960 1961 |
pagevec_init(&pvec, 0); index = wbc->range_start >> PAGE_CACHE_SHIFT; end = wbc->range_end >> PAGE_CACHE_SHIFT; |
6e6938b6d writeback: introd... |
1962 |
if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) |
5b41d9243 ext4: implement w... |
1963 1964 1965 |
tag = PAGECACHE_TAG_TOWRITE; else tag = PAGECACHE_TAG_DIRTY; |
72f84e656 ext4: update writ... |
1966 |
*done_index = index; |
4f01b02c8 ext4: simple clea... |
1967 |
while (index <= end) { |
5b41d9243 ext4: implement w... |
1968 |
nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag, |
8e48dcfbd ext4: Use our own... |
1969 1970 |
min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1); if (nr_pages == 0) |
4f01b02c8 ext4: simple clea... |
1971 |
return 0; |
8e48dcfbd ext4: Use our own... |
1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 |
for (i = 0; i < nr_pages; i++) { struct page *page = pvec.pages[i]; /* * At this point, the page may be truncated or * invalidated (changing page->mapping to NULL), or * even swizzled back from swapper_space to tmpfs file * mapping. However, page->index will not change * because we have a reference on the page. */ |
4f01b02c8 ext4: simple clea... |
1983 1984 |
if (page->index > end) goto out; |
8e48dcfbd ext4: Use our own... |
1985 |
|
72f84e656 ext4: update writ... |
1986 |
*done_index = page->index + 1; |
78aaced34 ext4: don't lock ... |
1987 1988 1989 1990 1991 1992 1993 1994 1995 |
/* * If we can't merge this page, and we have * accumulated an contiguous region, write it */ if ((mpd->next_page != page->index) && (mpd->next_page != mpd->first_page)) { mpage_da_map_and_submit(mpd); goto ret_extent_tail; } |
8e48dcfbd ext4: Use our own... |
1996 1997 1998 |
lock_page(page); /* |
4f01b02c8 ext4: simple clea... |
1999 2000 2001 2002 2003 2004 |
* If the page is no longer dirty, or its * mapping no longer corresponds to inode we * are writing (which means it has been * truncated or invalidated), or the page is * already under writeback and we are not * doing a data integrity writeback, skip the page |
8e48dcfbd ext4: Use our own... |
2005 |
*/ |
4f01b02c8 ext4: simple clea... |
2006 2007 2008 2009 |
if (!PageDirty(page) || (PageWriteback(page) && (wbc->sync_mode == WB_SYNC_NONE)) || unlikely(page->mapping != mapping)) { |
8e48dcfbd ext4: Use our own... |
2010 2011 2012 |
unlock_page(page); continue; } |
7cb1a5351 ext4: clean up so... |
2013 |
wait_on_page_writeback(page); |
8e48dcfbd ext4: Use our own... |
2014 |
BUG_ON(PageWriteback(page)); |
8e48dcfbd ext4: Use our own... |
2015 |
|
168fc0223 ext4: move setup ... |
2016 |
if (mpd->next_page != page->index) |
8eb9e5ce2 ext4: fold __mpag... |
2017 |
mpd->first_page = page->index; |
8eb9e5ce2 ext4: fold __mpag... |
2018 2019 2020 2021 2022 |
mpd->next_page = page->index + 1; logical = (sector_t) page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits); if (!page_has_buffers(page)) { |
4f01b02c8 ext4: simple clea... |
2023 2024 |
mpage_add_bh_to_extent(mpd, logical, PAGE_CACHE_SIZE, |
8eb9e5ce2 ext4: fold __mpag... |
2025 |
(1 << BH_Dirty) | (1 << BH_Uptodate)); |
4f01b02c8 ext4: simple clea... |
2026 2027 |
if (mpd->io_done) goto ret_extent_tail; |
8eb9e5ce2 ext4: fold __mpag... |
2028 2029 |
} else { /* |
4f01b02c8 ext4: simple clea... |
2030 2031 |
* Page with regular buffer heads, * just add all dirty ones |
8eb9e5ce2 ext4: fold __mpag... |
2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 |
*/ head = page_buffers(page); bh = head; do { BUG_ON(buffer_locked(bh)); /* * We need to try to allocate * unmapped blocks in the same page. * Otherwise we won't make progress * with the page in ext4_writepage */ if (ext4_bh_delay_or_unwritten(NULL, bh)) { mpage_add_bh_to_extent(mpd, logical, bh->b_size, bh->b_state); |
4f01b02c8 ext4: simple clea... |
2047 2048 |
if (mpd->io_done) goto ret_extent_tail; |
8eb9e5ce2 ext4: fold __mpag... |
2049 2050 |
} else if (buffer_dirty(bh) && (buffer_mapped(bh))) { /* |
4f01b02c8 ext4: simple clea... |
2051 2052 2053 2054 2055 2056 2057 2058 2059 |
* mapped dirty buffer. We need * to update the b_state * because we look at b_state * in mpage_da_map_blocks. We * don't update b_size because * if we find an unmapped * buffer_head later we need to * use the b_state flag of that * buffer_head. |
8eb9e5ce2 ext4: fold __mpag... |
2060 2061 2062 2063 2064 2065 |
*/ if (mpd->b_size == 0) mpd->b_state = bh->b_state & BH_FLAGS; } logical++; } while ((bh = bh->b_this_page) != head); |
8e48dcfbd ext4: Use our own... |
2066 2067 2068 2069 2070 |
} if (nr_to_write > 0) { nr_to_write--; if (nr_to_write == 0 && |
4f01b02c8 ext4: simple clea... |
2071 |
wbc->sync_mode == WB_SYNC_NONE) |
8e48dcfbd ext4: Use our own... |
2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 |
/* * We stop writing back only if we are * not doing integrity sync. In case of * integrity sync we have to keep going * because someone may be concurrently * dirtying pages, and we might have * synced a lot of newly appeared dirty * pages, but have not synced all of the * old dirty pages. */ |
4f01b02c8 ext4: simple clea... |
2082 |
goto out; |
8e48dcfbd ext4: Use our own... |
2083 2084 2085 2086 2087 |
} } pagevec_release(&pvec); cond_resched(); } |
4f01b02c8 ext4: simple clea... |
2088 2089 2090 |
return 0; ret_extent_tail: ret = MPAGE_DA_EXTENT_TAIL; |
8eb9e5ce2 ext4: fold __mpag... |
2091 2092 2093 |
out: pagevec_release(&pvec); cond_resched(); |
8e48dcfbd ext4: Use our own... |
2094 2095 |
return ret; } |
64769240b ext4: Add delayed... |
2096 |
static int ext4_da_writepages(struct address_space *mapping, |
a1d6cc563 ext4: Rework the ... |
2097 |
struct writeback_control *wbc) |
64769240b ext4: Add delayed... |
2098 |
{ |
22208dedb ext4: Fix file fr... |
2099 2100 |
pgoff_t index; int range_whole = 0; |
61628a3f3 ext4: Invert lock... |
2101 |
handle_t *handle = NULL; |
df22291ff ext4: Retry block... |
2102 |
struct mpage_da_data mpd; |
5e745b041 ext4: Fix small f... |
2103 |
struct inode *inode = mapping->host; |
498e5f241 ext4: Change unsi... |
2104 |
int pages_written = 0; |
55138e0bc ext4: Adjust ext4... |
2105 |
unsigned int max_pages; |
2acf2c261 ext4: Implement r... |
2106 |
int range_cyclic, cycled = 1, io_done = 0; |
55138e0bc ext4: Adjust ext4... |
2107 2108 |
int needed_blocks, ret = 0; long desired_nr_to_write, nr_to_writebump = 0; |
de89de6e0 ext4: Restore wbc... |
2109 |
loff_t range_start = wbc->range_start; |
5e745b041 ext4: Fix small f... |
2110 |
struct ext4_sb_info *sbi = EXT4_SB(mapping->host->i_sb); |
72f84e656 ext4: update writ... |
2111 |
pgoff_t done_index = 0; |
5b41d9243 ext4: implement w... |
2112 |
pgoff_t end; |
1bce63d1a ext4: add block p... |
2113 |
struct blk_plug plug; |
61628a3f3 ext4: Invert lock... |
2114 |
|
9bffad1ed ext4: convert ins... |
2115 |
trace_ext4_da_writepages(inode, wbc); |
ba80b1019 ext4: Add markers... |
2116 |
|
61628a3f3 ext4: Invert lock... |
2117 2118 2119 2120 2121 |
/* * No pages to write? This is mainly a kludge to avoid starting * a transaction for special inodes like journal inode on last iput() * because that could violate lock ordering on umount */ |
a1d6cc563 ext4: Rework the ... |
2122 |
if (!mapping->nrpages || !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) |
61628a3f3 ext4: Invert lock... |
2123 |
return 0; |
2a21e37e4 ext4: tone down e... |
2124 2125 2126 2127 2128 |
/* * If the filesystem has aborted, it is read-only, so return * right away instead of dumping stack traces later on that * will obscure the real source of the problem. We test |
4ab2f15b7 ext4: move the ab... |
2129 |
* EXT4_MF_FS_ABORTED instead of sb->s_flag's MS_RDONLY because |
2a21e37e4 ext4: tone down e... |
2130 2131 2132 2133 2134 |
* the latter could be true if the filesystem is mounted * read-only, and in that case, ext4_da_writepages should * *never* be called, so if that ever happens, we would want * the stack trace. */ |
4ab2f15b7 ext4: move the ab... |
2135 |
if (unlikely(sbi->s_mount_flags & EXT4_MF_FS_ABORTED)) |
2a21e37e4 ext4: tone down e... |
2136 |
return -EROFS; |
22208dedb ext4: Fix file fr... |
2137 2138 |
if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) range_whole = 1; |
61628a3f3 ext4: Invert lock... |
2139 |
|
2acf2c261 ext4: Implement r... |
2140 2141 |
range_cyclic = wbc->range_cyclic; if (wbc->range_cyclic) { |
22208dedb ext4: Fix file fr... |
2142 |
index = mapping->writeback_index; |
2acf2c261 ext4: Implement r... |
2143 2144 2145 2146 2147 |
if (index) cycled = 0; wbc->range_start = index << PAGE_CACHE_SHIFT; wbc->range_end = LLONG_MAX; wbc->range_cyclic = 0; |
5b41d9243 ext4: implement w... |
2148 2149 |
end = -1; } else { |
22208dedb ext4: Fix file fr... |
2150 |
index = wbc->range_start >> PAGE_CACHE_SHIFT; |
5b41d9243 ext4: implement w... |
2151 2152 |
end = wbc->range_end >> PAGE_CACHE_SHIFT; } |
a1d6cc563 ext4: Rework the ... |
2153 |
|
55138e0bc ext4: Adjust ext4... |
2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 |
/* * This works around two forms of stupidity. The first is in * the writeback code, which caps the maximum number of pages * written to be 1024 pages. This is wrong on multiple * levels; different architectues have a different page size, * which changes the maximum amount of data which gets * written. Secondly, 4 megabytes is way too small. XFS * forces this value to be 16 megabytes by multiplying * nr_to_write parameter by four, and then relies on its * allocator to allocate larger extents to make them * contiguous. Unfortunately this brings us to the second * stupidity, which is that ext4's mballoc code only allocates * at most 2048 blocks. So we force contiguous writes up to * the number of dirty blocks in the inode, or * sbi->max_writeback_mb_bump whichever is smaller. */ max_pages = sbi->s_max_writeback_mb_bump << (20 - PAGE_CACHE_SHIFT); |
b443e7339 ext4: don't bump ... |
2171 2172 2173 2174 2175 2176 |
if (!range_cyclic && range_whole) { if (wbc->nr_to_write == LONG_MAX) desired_nr_to_write = wbc->nr_to_write; else desired_nr_to_write = wbc->nr_to_write * 8; } else |
55138e0bc ext4: Adjust ext4... |
2177 2178 2179 2180 2181 2182 2183 2184 2185 |
desired_nr_to_write = ext4_num_dirty_pages(inode, index, max_pages); if (desired_nr_to_write > max_pages) desired_nr_to_write = max_pages; if (wbc->nr_to_write < desired_nr_to_write) { nr_to_writebump = desired_nr_to_write - wbc->nr_to_write; wbc->nr_to_write = desired_nr_to_write; } |
2acf2c261 ext4: Implement r... |
2186 |
retry: |
6e6938b6d writeback: introd... |
2187 |
if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) |
5b41d9243 ext4: implement w... |
2188 |
tag_pages_for_writeback(mapping, index, end); |
1bce63d1a ext4: add block p... |
2189 |
blk_start_plug(&plug); |
22208dedb ext4: Fix file fr... |
2190 |
while (!ret && wbc->nr_to_write > 0) { |
a1d6cc563 ext4: Rework the ... |
2191 2192 2193 2194 2195 2196 2197 2198 |
/* * we insert one extent at a time. So we need * credit needed for single extent allocation. * journalled mode is currently not supported * by delalloc */ BUG_ON(ext4_should_journal_data(inode)); |
525f4ed8d ext4: journal cre... |
2199 |
needed_blocks = ext4_da_writepages_trans_blocks(inode); |
a1d6cc563 ext4: Rework the ... |
2200 |
|
61628a3f3 ext4: Invert lock... |
2201 2202 2203 2204 |
/* start a new transaction*/ handle = ext4_journal_start(inode, needed_blocks); if (IS_ERR(handle)) { ret = PTR_ERR(handle); |
1693918e0 ext4: Use ext4_ms... |
2205 |
ext4_msg(inode->i_sb, KERN_CRIT, "%s: jbd2_start: " |
fbe845ddf ext4: Remove extr... |
2206 |
"%ld pages, ino %lu; err %d", __func__, |
a1d6cc563 ext4: Rework the ... |
2207 |
wbc->nr_to_write, inode->i_ino, ret); |
3c1fcb2c2 ext4: add blk_fin... |
2208 |
blk_finish_plug(&plug); |
61628a3f3 ext4: Invert lock... |
2209 2210 |
goto out_writepages; } |
f63e6005b ext4: Simplify de... |
2211 2212 |
/* |
8eb9e5ce2 ext4: fold __mpag... |
2213 |
* Now call write_cache_pages_da() to find the next |
f63e6005b ext4: Simplify de... |
2214 |
* contiguous region of logical blocks that need |
8eb9e5ce2 ext4: fold __mpag... |
2215 |
* blocks to be allocated by ext4 and submit them. |
f63e6005b ext4: Simplify de... |
2216 |
*/ |
72f84e656 ext4: update writ... |
2217 |
ret = write_cache_pages_da(mapping, wbc, &mpd, &done_index); |
f63e6005b ext4: Simplify de... |
2218 |
/* |
af901ca18 tree-wide: fix as... |
2219 |
* If we have a contiguous extent of pages and we |
f63e6005b ext4: Simplify de... |
2220 2221 2222 2223 |
* haven't done the I/O yet, map the blocks and submit * them for I/O. */ if (!mpd.io_done && mpd.next_page != mpd.first_page) { |
5a87b7a5d ext4: call mpage_... |
2224 |
mpage_da_map_and_submit(&mpd); |
f63e6005b ext4: Simplify de... |
2225 2226 |
ret = MPAGE_DA_EXTENT_TAIL; } |
b3a3ca8ca ext4: Add new tra... |
2227 |
trace_ext4_da_write_pages(inode, &mpd); |
f63e6005b ext4: Simplify de... |
2228 |
wbc->nr_to_write -= mpd.pages_written; |
df22291ff ext4: Retry block... |
2229 |
|
61628a3f3 ext4: Invert lock... |
2230 |
ext4_journal_stop(handle); |
df22291ff ext4: Retry block... |
2231 |
|
8f64b32eb ext4: don't call ... |
2232 |
if ((mpd.retval == -ENOSPC) && sbi->s_journal) { |
22208dedb ext4: Fix file fr... |
2233 2234 2235 2236 |
/* commit the transaction which would * free blocks released in the transaction * and try again */ |
df22291ff ext4: Retry block... |
2237 |
jbd2_journal_force_commit_nested(sbi->s_journal); |
22208dedb ext4: Fix file fr... |
2238 2239 |
ret = 0; } else if (ret == MPAGE_DA_EXTENT_TAIL) { |
a1d6cc563 ext4: Rework the ... |
2240 |
/* |
8de49e674 ext4: fix the dea... |
2241 2242 2243 |
* Got one extent now try with rest of the pages. * If mpd.retval is set -EIO, journal is aborted. * So we don't need to write any more. |
a1d6cc563 ext4: Rework the ... |
2244 |
*/ |
22208dedb ext4: Fix file fr... |
2245 |
pages_written += mpd.pages_written; |
8de49e674 ext4: fix the dea... |
2246 |
ret = mpd.retval; |
2acf2c261 ext4: Implement r... |
2247 |
io_done = 1; |
22208dedb ext4: Fix file fr... |
2248 |
} else if (wbc->nr_to_write) |
61628a3f3 ext4: Invert lock... |
2249 2250 2251 2252 2253 |
/* * There is no more writeout needed * or we requested for a noblocking writeout * and we found the device congested */ |
61628a3f3 ext4: Invert lock... |
2254 |
break; |
a1d6cc563 ext4: Rework the ... |
2255 |
} |
1bce63d1a ext4: add block p... |
2256 |
blk_finish_plug(&plug); |
2acf2c261 ext4: Implement r... |
2257 2258 2259 2260 2261 2262 2263 |
if (!io_done && !cycled) { cycled = 1; index = 0; wbc->range_start = index << PAGE_CACHE_SHIFT; wbc->range_end = mapping->writeback_index - 1; goto retry; } |
22208dedb ext4: Fix file fr... |
2264 2265 |
/* Update index */ |
2acf2c261 ext4: Implement r... |
2266 |
wbc->range_cyclic = range_cyclic; |
22208dedb ext4: Fix file fr... |
2267 2268 2269 2270 2271 |
if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) /* * set the writeback_index so that range_cyclic * mode will write it back later */ |
72f84e656 ext4: update writ... |
2272 |
mapping->writeback_index = done_index; |
a1d6cc563 ext4: Rework the ... |
2273 |
|
61628a3f3 ext4: Invert lock... |
2274 |
out_writepages: |
2faf2e19d ext4: return corr... |
2275 |
wbc->nr_to_write -= nr_to_writebump; |
de89de6e0 ext4: Restore wbc... |
2276 |
wbc->range_start = range_start; |
9bffad1ed ext4: convert ins... |
2277 |
trace_ext4_da_writepages_result(inode, wbc, ret, pages_written); |
61628a3f3 ext4: Invert lock... |
2278 |
return ret; |
64769240b ext4: Add delayed... |
2279 |
} |
79f0be8d2 ext4: Switch to n... |
2280 2281 2282 2283 2284 2285 2286 2287 2288 |
#define FALL_BACK_TO_NONDELALLOC 1 static int ext4_nonda_switch(struct super_block *sb) { s64 free_blocks, dirty_blocks; struct ext4_sb_info *sbi = EXT4_SB(sb); /* * switch to non delalloc mode if we are running low * on free block. The free block accounting via percpu |
179f7ebff percpu_counter: F... |
2289 |
* counters can get slightly wrong with percpu_counter_batch getting |
79f0be8d2 ext4: Switch to n... |
2290 2291 2292 2293 |
* accumulated on each CPU without updating global counters * Delalloc need an accurate free block accounting. So switch * to non delalloc when we are near to error range. */ |
570426518 ext4: convert s_{... |
2294 2295 2296 |
free_blocks = EXT4_C2B(sbi, percpu_counter_read_positive(&sbi->s_freeclusters_counter)); dirty_blocks = percpu_counter_read_positive(&sbi->s_dirtyclusters_counter); |
79f0be8d2 ext4: Switch to n... |
2297 |
if (2 * free_blocks < 3 * dirty_blocks || |
df55c99dc ext4: rename ext4... |
2298 |
free_blocks < (dirty_blocks + EXT4_FREECLUSTERS_WATERMARK)) { |
79f0be8d2 ext4: Switch to n... |
2299 |
/* |
c8afb4468 ext4: flush delal... |
2300 2301 |
* free block count is less than 150% of dirty blocks * or free blocks is less than watermark |
79f0be8d2 ext4: Switch to n... |
2302 2303 2304 |
*/ return 1; } |
c8afb4468 ext4: flush delal... |
2305 2306 2307 2308 2309 |
/* * Even if we don't switch but are nearing capacity, * start pushing delalloc when 1/2 of free blocks are dirty. */ if (free_blocks < 2 * dirty_blocks) |
0e175a183 writeback: Add a ... |
2310 |
writeback_inodes_sb_if_idle(sb, WB_REASON_FS_FREE_SPACE); |
c8afb4468 ext4: flush delal... |
2311 |
|
79f0be8d2 ext4: Switch to n... |
2312 2313 |
return 0; } |
64769240b ext4: Add delayed... |
2314 |
static int ext4_da_write_begin(struct file *file, struct address_space *mapping, |
de9a55b84 ext4: Fix up whit... |
2315 2316 |
loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata) |
64769240b ext4: Add delayed... |
2317 |
{ |
72b8ab9dd ext4: don't use q... |
2318 |
int ret, retries = 0; |
64769240b ext4: Add delayed... |
2319 2320 |
struct page *page; pgoff_t index; |
64769240b ext4: Add delayed... |
2321 2322 2323 2324 |
struct inode *inode = mapping->host; handle_t *handle; index = pos >> PAGE_CACHE_SHIFT; |
79f0be8d2 ext4: Switch to n... |
2325 2326 2327 2328 2329 2330 2331 |
if (ext4_nonda_switch(inode->i_sb)) { *fsdata = (void *)FALL_BACK_TO_NONDELALLOC; return ext4_write_begin(file, mapping, pos, len, flags, pagep, fsdata); } *fsdata = (void *)0; |
9bffad1ed ext4: convert ins... |
2332 |
trace_ext4_da_write_begin(inode, pos, len, flags); |
d2a176379 ext4: delayed all... |
2333 |
retry: |
64769240b ext4: Add delayed... |
2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 |
/* * With delayed allocation, we don't log the i_disksize update * if there is delayed block allocation. But we still need * to journalling the i_disksize update if writes to the end * of file which has an already mapped buffer. */ handle = ext4_journal_start(inode, 1); if (IS_ERR(handle)) { ret = PTR_ERR(handle); goto out; } |
ebd3610b1 ext4: Fix deadloc... |
2345 2346 2347 |
/* We cannot recurse into the filesystem as the transaction is already * started */ flags |= AOP_FLAG_NOFS; |
64769240b ext4: Add delayed... |
2348 |
|
54566b2c1 fs: symlink write... |
2349 |
page = grab_cache_page_write_begin(mapping, index, flags); |
d5a0d4f73 ext4: fix ext4_da... |
2350 2351 2352 2353 2354 |
if (!page) { ext4_journal_stop(handle); ret = -ENOMEM; goto out; } |
64769240b ext4: Add delayed... |
2355 |
*pagep = page; |
6e1db88d5 introduce __block... |
2356 |
ret = __block_write_begin(page, pos, len, ext4_da_get_block_prep); |
64769240b ext4: Add delayed... |
2357 2358 2359 2360 |
if (ret < 0) { unlock_page(page); ext4_journal_stop(handle); page_cache_release(page); |
ae4d53721 ext4: truncate bl... |
2361 2362 2363 2364 2365 2366 |
/* * block_write_begin may have instantiated a few blocks * outside i_size. Trim these off again. Don't need * i_size_read because we hold i_mutex. */ if (pos + len > inode->i_size) |
b9a4207d5 ext4: Avoid data ... |
2367 |
ext4_truncate_failed_write(inode); |
64769240b ext4: Add delayed... |
2368 |
} |
d2a176379 ext4: delayed all... |
2369 2370 |
if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) goto retry; |
64769240b ext4: Add delayed... |
2371 2372 2373 |
out: return ret; } |
632eaeab1 ext4: fix delallo... |
2374 2375 2376 2377 2378 |
/* * Check if we should update i_disksize * when write to the end of file but not require block allocation */ static int ext4_da_should_update_i_disksize(struct page *page, |
de9a55b84 ext4: Fix up whit... |
2379 |
unsigned long offset) |
632eaeab1 ext4: fix delallo... |
2380 2381 2382 2383 2384 2385 2386 2387 |
{ struct buffer_head *bh; struct inode *inode = page->mapping->host; unsigned int idx; int i; bh = page_buffers(page); idx = offset >> inode->i_blkbits; |
af5bc92dd ext4: Fix whitesp... |
2388 |
for (i = 0; i < idx; i++) |
632eaeab1 ext4: fix delallo... |
2389 |
bh = bh->b_this_page; |
29fa89d08 ext4: Mark the un... |
2390 |
if (!buffer_mapped(bh) || (buffer_delay(bh)) || buffer_unwritten(bh)) |
632eaeab1 ext4: fix delallo... |
2391 2392 2393 |
return 0; return 1; } |
64769240b ext4: Add delayed... |
2394 |
static int ext4_da_write_end(struct file *file, |
de9a55b84 ext4: Fix up whit... |
2395 2396 2397 |
struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata) |
64769240b ext4: Add delayed... |
2398 2399 2400 2401 2402 |
{ struct inode *inode = mapping->host; int ret = 0, ret2; handle_t *handle = ext4_journal_current_handle(); loff_t new_i_size; |
632eaeab1 ext4: fix delallo... |
2403 |
unsigned long start, end; |
79f0be8d2 ext4: Switch to n... |
2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 |
int write_mode = (int)(unsigned long)fsdata; if (write_mode == FALL_BACK_TO_NONDELALLOC) { if (ext4_should_order_data(inode)) { return ext4_ordered_write_end(file, mapping, pos, len, copied, page, fsdata); } else if (ext4_should_writeback_data(inode)) { return ext4_writeback_write_end(file, mapping, pos, len, copied, page, fsdata); } else { BUG(); } } |
632eaeab1 ext4: fix delallo... |
2417 |
|
9bffad1ed ext4: convert ins... |
2418 |
trace_ext4_da_write_end(inode, pos, len, copied); |
632eaeab1 ext4: fix delallo... |
2419 |
start = pos & (PAGE_CACHE_SIZE - 1); |
af5bc92dd ext4: Fix whitesp... |
2420 |
end = start + copied - 1; |
64769240b ext4: Add delayed... |
2421 2422 2423 2424 2425 2426 2427 2428 |
/* * generic_write_end() will run mark_inode_dirty() if i_size * changes. So let's piggyback the i_disksize mark_inode_dirty * into that. */ new_i_size = pos + copied; |
ea51d132d ext4: avoid hangs... |
2429 |
if (copied && new_i_size > EXT4_I(inode)->i_disksize) { |
632eaeab1 ext4: fix delallo... |
2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 |
if (ext4_da_should_update_i_disksize(page, end)) { down_write(&EXT4_I(inode)->i_data_sem); if (new_i_size > EXT4_I(inode)->i_disksize) { /* * Updating i_disksize when extending file * without needing block allocation */ if (ext4_should_order_data(inode)) ret = ext4_jbd2_file_inode(handle, inode); |
64769240b ext4: Add delayed... |
2440 |
|
632eaeab1 ext4: fix delallo... |
2441 2442 2443 |
EXT4_I(inode)->i_disksize = new_i_size; } up_write(&EXT4_I(inode)->i_data_sem); |
cf17fea65 ext4: Properly up... |
2444 2445 2446 2447 2448 |
/* We need to mark inode dirty even if * new_i_size is less that inode->i_size * bu greater than i_disksize.(hint delalloc) */ ext4_mark_inode_dirty(handle, inode); |
64769240b ext4: Add delayed... |
2449 |
} |
632eaeab1 ext4: fix delallo... |
2450 |
} |
64769240b ext4: Add delayed... |
2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 |
ret2 = generic_write_end(file, mapping, pos, len, copied, page, fsdata); copied = ret2; if (ret2 < 0) ret = ret2; ret2 = ext4_journal_stop(handle); if (!ret) ret = ret2; return ret ? ret : copied; } static void ext4_da_invalidatepage(struct page *page, unsigned long offset) { |
64769240b ext4: Add delayed... |
2465 2466 2467 2468 2469 2470 |
/* * Drop reserved blocks */ BUG_ON(!PageLocked(page)); if (!page_has_buffers(page)) goto out; |
d2a176379 ext4: delayed all... |
2471 |
ext4_da_page_release_reservation(page, offset); |
64769240b ext4: Add delayed... |
2472 2473 2474 2475 2476 2477 |
out: ext4_invalidatepage(page, offset); return; } |
ccd2506bd ext4: add EXT4_IO... |
2478 2479 2480 2481 2482 |
/* * Force all delayed allocation blocks to be allocated for a given inode. */ int ext4_alloc_da_blocks(struct inode *inode) { |
fb40ba0d9 ext4: Add a trace... |
2483 |
trace_ext4_alloc_da_blocks(inode); |
ccd2506bd ext4: add EXT4_IO... |
2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 |
if (!EXT4_I(inode)->i_reserved_data_blocks && !EXT4_I(inode)->i_reserved_meta_blocks) return 0; /* * We do something simple for now. The filemap_flush() will * also start triggering a write of the data blocks, which is * not strictly speaking necessary (and for users of * laptop_mode, not even desirable). However, to do otherwise * would require replicating code paths in: |
de9a55b84 ext4: Fix up whit... |
2494 |
* |
ccd2506bd ext4: add EXT4_IO... |
2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 |
* ext4_da_writepages() -> * write_cache_pages() ---> (via passed in callback function) * __mpage_da_writepage() --> * mpage_add_bh_to_extent() * mpage_da_map_blocks() * * The problem is that write_cache_pages(), located in * mm/page-writeback.c, marks pages clean in preparation for * doing I/O, which is not desirable if we're not planning on * doing I/O at all. * * We could call write_cache_pages(), and then redirty all of |
380cf090f ext4: fix redirty... |
2507 |
* the pages by calling redirty_page_for_writepage() but that |
ccd2506bd ext4: add EXT4_IO... |
2508 2509 |
* would be ugly in the extreme. So instead we would need to * replicate parts of the code in the above functions, |
25985edce Fix common misspe... |
2510 |
* simplifying them because we wouldn't actually intend to |
ccd2506bd ext4: add EXT4_IO... |
2511 2512 2513 |
* write out the pages, but rather only collect contiguous * logical block extents, call the multi-block allocator, and * then update the buffer heads with the block allocations. |
de9a55b84 ext4: Fix up whit... |
2514 |
* |
ccd2506bd ext4: add EXT4_IO... |
2515 2516 2517 2518 2519 2520 |
* For now, though, we'll cheat by calling filemap_flush(), * which will map the blocks, and start the I/O, but not * actually wait for the I/O to complete. */ return filemap_flush(inode->i_mapping); } |
64769240b ext4: Add delayed... |
2521 2522 |
/* |
ac27a0ec1 [PATCH] ext4: ini... |
2523 2524 2525 2526 |
* bmap() is special. It gets used by applications such as lilo and by * the swapper to find the on-disk block of a specific piece of data. * * Naturally, this is dangerous if the block concerned is still in the |
617ba13b3 [PATCH] ext4: ren... |
2527 |
* journal. If somebody makes a swapfile on an ext4 data-journaling |
ac27a0ec1 [PATCH] ext4: ini... |
2528 2529 2530 2531 2532 2533 2534 2535 |
* filesystem and enables swap, then they may get a nasty shock when the * data getting swapped to that swapfile suddenly gets overwritten by * the original zero's written out previously to the journal and * awaiting writeback in the kernel's buffer cache. * * So, if we see any bmap calls here on a modified, data-journaled file, * take extra steps to flush any blocks which might be in the cache. */ |
617ba13b3 [PATCH] ext4: ren... |
2536 |
static sector_t ext4_bmap(struct address_space *mapping, sector_t block) |
ac27a0ec1 [PATCH] ext4: ini... |
2537 2538 2539 2540 |
{ struct inode *inode = mapping->host; journal_t *journal; int err; |
64769240b ext4: Add delayed... |
2541 2542 2543 2544 2545 2546 2547 2548 2549 |
if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) && test_opt(inode->i_sb, DELALLOC)) { /* * With delalloc we want to sync the file * so that we can make sure we allocate * blocks for file */ filemap_write_and_wait(mapping); } |
19f5fb7ad ext4: Use bitops ... |
2550 2551 |
if (EXT4_JOURNAL(inode) && ext4_test_inode_state(inode, EXT4_STATE_JDATA)) { |
ac27a0ec1 [PATCH] ext4: ini... |
2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 |
/* * This is a REALLY heavyweight approach, but the use of * bmap on dirty files is expected to be extremely rare: * only if we run lilo or swapon on a freshly made file * do we expect this to happen. * * (bmap requires CAP_SYS_RAWIO so this does not * represent an unprivileged user DOS attack --- we'd be * in trouble if mortal users could trigger this path at * will.) * |
617ba13b3 [PATCH] ext4: ren... |
2563 |
* NB. EXT4_STATE_JDATA is not set on files other than |
ac27a0ec1 [PATCH] ext4: ini... |
2564 2565 2566 2567 2568 |
* regular files. If somebody wants to bmap a directory * or symlink and gets confused because the buffer * hasn't yet been flushed to disk, they deserve * everything they get. */ |
19f5fb7ad ext4: Use bitops ... |
2569 |
ext4_clear_inode_state(inode, EXT4_STATE_JDATA); |
617ba13b3 [PATCH] ext4: ren... |
2570 |
journal = EXT4_JOURNAL(inode); |
dab291af8 [PATCH] jbd2: ena... |
2571 2572 2573 |
jbd2_journal_lock_updates(journal); err = jbd2_journal_flush(journal); jbd2_journal_unlock_updates(journal); |
ac27a0ec1 [PATCH] ext4: ini... |
2574 2575 2576 2577 |
if (err) return 0; } |
af5bc92dd ext4: Fix whitesp... |
2578 |
return generic_block_bmap(mapping, block, ext4_get_block); |
ac27a0ec1 [PATCH] ext4: ini... |
2579 |
} |
617ba13b3 [PATCH] ext4: ren... |
2580 |
static int ext4_readpage(struct file *file, struct page *page) |
ac27a0ec1 [PATCH] ext4: ini... |
2581 |
{ |
0562e0bad ext4: add more tr... |
2582 |
trace_ext4_readpage(page); |
617ba13b3 [PATCH] ext4: ren... |
2583 |
return mpage_readpage(page, ext4_get_block); |
ac27a0ec1 [PATCH] ext4: ini... |
2584 2585 2586 |
} static int |
617ba13b3 [PATCH] ext4: ren... |
2587 |
ext4_readpages(struct file *file, struct address_space *mapping, |
ac27a0ec1 [PATCH] ext4: ini... |
2588 2589 |
struct list_head *pages, unsigned nr_pages) { |
617ba13b3 [PATCH] ext4: ren... |
2590 |
return mpage_readpages(mapping, pages, nr_pages, ext4_get_block); |
ac27a0ec1 [PATCH] ext4: ini... |
2591 |
} |
744692dc0 ext4: use ext4_ge... |
2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 |
static void ext4_invalidatepage_free_endio(struct page *page, unsigned long offset) { struct buffer_head *head, *bh; unsigned int curr_off = 0; if (!page_has_buffers(page)) return; head = bh = page_buffers(page); do { if (offset <= curr_off && test_clear_buffer_uninit(bh) && bh->b_private) { ext4_free_io_end(bh->b_private); bh->b_private = NULL; bh->b_end_io = NULL; } curr_off = curr_off + bh->b_size; bh = bh->b_this_page; } while (bh != head); } |
617ba13b3 [PATCH] ext4: ren... |
2611 |
static void ext4_invalidatepage(struct page *page, unsigned long offset) |
ac27a0ec1 [PATCH] ext4: ini... |
2612 |
{ |
617ba13b3 [PATCH] ext4: ren... |
2613 |
journal_t *journal = EXT4_JOURNAL(page->mapping->host); |
ac27a0ec1 [PATCH] ext4: ini... |
2614 |
|
0562e0bad ext4: add more tr... |
2615 |
trace_ext4_invalidatepage(page, offset); |
ac27a0ec1 [PATCH] ext4: ini... |
2616 |
/* |
744692dc0 ext4: use ext4_ge... |
2617 2618 2619 2620 2621 |
* free any io_end structure allocated for buffers to be discarded */ if (ext4_should_dioread_nolock(page->mapping->host)) ext4_invalidatepage_free_endio(page, offset); /* |
ac27a0ec1 [PATCH] ext4: ini... |
2622 2623 2624 2625 |
* If it's a full truncate we just forget about the pending dirtying */ if (offset == 0) ClearPageChecked(page); |
0390131ba ext4: Allow ext4 ... |
2626 2627 2628 2629 |
if (journal) jbd2_journal_invalidatepage(journal, page, offset); else block_invalidatepage(page, offset); |
ac27a0ec1 [PATCH] ext4: ini... |
2630 |
} |
617ba13b3 [PATCH] ext4: ren... |
2631 |
static int ext4_releasepage(struct page *page, gfp_t wait) |
ac27a0ec1 [PATCH] ext4: ini... |
2632 |
{ |
617ba13b3 [PATCH] ext4: ren... |
2633 |
journal_t *journal = EXT4_JOURNAL(page->mapping->host); |
ac27a0ec1 [PATCH] ext4: ini... |
2634 |
|
0562e0bad ext4: add more tr... |
2635 |
trace_ext4_releasepage(page); |
ac27a0ec1 [PATCH] ext4: ini... |
2636 2637 2638 |
WARN_ON(PageChecked(page)); if (!page_has_buffers(page)) return 0; |
0390131ba ext4: Allow ext4 ... |
2639 2640 2641 2642 |
if (journal) return jbd2_journal_try_to_free_buffers(journal, page, wait); else return try_to_free_buffers(page); |
ac27a0ec1 [PATCH] ext4: ini... |
2643 2644 2645 |
} /* |
2ed886852 ext4: Convert cal... |
2646 2647 2648 2649 |
* ext4_get_block used when preparing for a DIO write or buffer write. * We allocate an uinitialized extent if blocks haven't been allocated. * The extent will be converted to initialized after the IO is complete. */ |
c7064ef13 ext4: mechanical ... |
2650 |
static int ext4_get_block_write(struct inode *inode, sector_t iblock, |
4c0425ff6 ext4: Use end_io ... |
2651 2652 |
struct buffer_head *bh_result, int create) { |
c7064ef13 ext4: mechanical ... |
2653 2654 |
ext4_debug("ext4_get_block_write: inode %lu, create flag %d ", |
8d5d02e6b ext4: async direc... |
2655 |
inode->i_ino, create); |
2ed886852 ext4: Convert cal... |
2656 2657 |
return _ext4_get_block(inode, iblock, bh_result, EXT4_GET_BLOCKS_IO_CREATE_EXT); |
4c0425ff6 ext4: Use end_io ... |
2658 |
} |
4c0425ff6 ext4: Use end_io ... |
2659 |
static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset, |
552ef8024 direct-io: move a... |
2660 2661 |
ssize_t size, void *private, int ret, bool is_async) |
4c0425ff6 ext4: Use end_io ... |
2662 |
{ |
72c5052dd fs: move inode_di... |
2663 |
struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode; |
4c0425ff6 ext4: Use end_io ... |
2664 2665 |
ext4_io_end_t *io_end = iocb->private; struct workqueue_struct *wq; |
744692dc0 ext4: use ext4_ge... |
2666 2667 |
unsigned long flags; struct ext4_inode_info *ei; |
4c0425ff6 ext4: Use end_io ... |
2668 |
|
4b70df181 ext4: code clean ... |
2669 2670 |
/* if not async direct IO or dio with 0 bytes write, just return */ if (!io_end || !size) |
552ef8024 direct-io: move a... |
2671 |
goto out; |
4b70df181 ext4: code clean ... |
2672 |
|
88635ca27 ext4: add missing... |
2673 |
ext_debug("ext4_end_io_dio(): io_end 0x%p " |
8d5d02e6b ext4: async direc... |
2674 2675 2676 2677 |
"for inode %lu, iocb 0x%p, offset %llu, size %llu ", iocb->private, io_end->inode->i_ino, iocb, offset, size); |
8d5d02e6b ext4: async direc... |
2678 |
|
b5a7e9703 ext4: fix ext4_en... |
2679 |
iocb->private = NULL; |
8d5d02e6b ext4: async direc... |
2680 |
/* if not aio dio with unwritten extents, just free io and return */ |
bd2d0210c ext4: use bio lay... |
2681 |
if (!(io_end->flag & EXT4_IO_END_UNWRITTEN)) { |
8d5d02e6b ext4: async direc... |
2682 |
ext4_free_io_end(io_end); |
5b3ff237b ext4: move aio co... |
2683 2684 2685 |
out: if (is_async) aio_complete(iocb, ret, 0); |
72c5052dd fs: move inode_di... |
2686 |
inode_dio_done(inode); |
5b3ff237b ext4: move aio co... |
2687 |
return; |
8d5d02e6b ext4: async direc... |
2688 |
} |
4c0425ff6 ext4: Use end_io ... |
2689 2690 |
io_end->offset = offset; io_end->size = size; |
5b3ff237b ext4: move aio co... |
2691 2692 2693 2694 |
if (is_async) { io_end->iocb = iocb; io_end->result = ret; } |
4c0425ff6 ext4: Use end_io ... |
2695 |
wq = EXT4_SB(io_end->inode->i_sb)->dio_unwritten_wq; |
8d5d02e6b ext4: async direc... |
2696 |
/* Add the io_end to per-inode completed aio dio list*/ |
744692dc0 ext4: use ext4_ge... |
2697 2698 2699 2700 |
ei = EXT4_I(io_end->inode); spin_lock_irqsave(&ei->i_completed_io_lock, flags); list_add_tail(&io_end->list, &ei->i_completed_io_list); spin_unlock_irqrestore(&ei->i_completed_io_lock, flags); |
c999af2b3 ext4: queue conve... |
2701 2702 |
/* queue the work to convert unwritten extents to written */ |
4c81f045c ext4: fix racy us... |
2703 |
queue_work(wq, &io_end->work); |
72c5052dd fs: move inode_di... |
2704 2705 2706 |
/* XXX: probably should move into the real I/O completion handler */ inode_dio_done(inode); |
4c0425ff6 ext4: Use end_io ... |
2707 |
} |
c7064ef13 ext4: mechanical ... |
2708 |
|
744692dc0 ext4: use ext4_ge... |
2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 |
static void ext4_end_io_buffer_write(struct buffer_head *bh, int uptodate) { ext4_io_end_t *io_end = bh->b_private; struct workqueue_struct *wq; struct inode *inode; unsigned long flags; if (!test_clear_buffer_uninit(bh) || !io_end) goto out; if (!(io_end->inode->i_sb->s_flags & MS_ACTIVE)) { printk("sb umounted, discard end_io request for inode %lu ", io_end->inode->i_ino); ext4_free_io_end(io_end); goto out; } |
32c80b32c ext4: Resolve the... |
2726 2727 2728 2729 |
/* * It may be over-defensive here to check EXT4_IO_END_UNWRITTEN now, * but being more careful is always safe for the future change. */ |
744692dc0 ext4: use ext4_ge... |
2730 |
inode = io_end->inode; |
0edeb71dc ext4: Create help... |
2731 |
ext4_set_io_unwritten_flag(inode, io_end); |
744692dc0 ext4: use ext4_ge... |
2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 |
/* Add the io_end to per-inode completed io list*/ spin_lock_irqsave(&EXT4_I(inode)->i_completed_io_lock, flags); list_add_tail(&io_end->list, &EXT4_I(inode)->i_completed_io_list); spin_unlock_irqrestore(&EXT4_I(inode)->i_completed_io_lock, flags); wq = EXT4_SB(inode->i_sb)->dio_unwritten_wq; /* queue the work to convert unwritten extents to written */ queue_work(wq, &io_end->work); out: bh->b_private = NULL; bh->b_end_io = NULL; clear_buffer_uninit(bh); end_buffer_async_write(bh, uptodate); } static int ext4_set_bh_endio(struct buffer_head *bh, struct inode *inode) { ext4_io_end_t *io_end; struct page *page = bh->b_page; loff_t offset = (sector_t)page->index << PAGE_CACHE_SHIFT; size_t size = bh->b_size; retry: io_end = ext4_init_io_end(inode, GFP_ATOMIC); if (!io_end) { |
6db26ffc9 fs/ext4/inode.c: ... |
2758 2759 |
pr_warn_ratelimited("%s: allocation fail ", __func__); |
744692dc0 ext4: use ext4_ge... |
2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 |
schedule(); goto retry; } io_end->offset = offset; io_end->size = size; /* * We need to hold a reference to the page to make sure it * doesn't get evicted before ext4_end_io_work() has a chance * to convert the extent from written to unwritten. */ io_end->page = page; get_page(io_end->page); bh->b_private = io_end; bh->b_end_io = ext4_end_io_buffer_write; return 0; } |
4c0425ff6 ext4: Use end_io ... |
2777 2778 2779 2780 2781 |
/* * For ext4 extent files, ext4 will do direct-io write to holes, * preallocated extents, and those write extend the file, no need to * fall back to buffered IO. * |
b595076a1 tree-wide: fix co... |
2782 |
* For holes, we fallocate those blocks, mark them as uninitialized |
4c0425ff6 ext4: Use end_io ... |
2783 |
* If those blocks were preallocated, we mark sure they are splited, but |
b595076a1 tree-wide: fix co... |
2784 |
* still keep the range to write as uninitialized. |
4c0425ff6 ext4: Use end_io ... |
2785 |
* |
8d5d02e6b ext4: async direc... |
2786 2787 |
* The unwrritten extents will be converted to written when DIO is completed. * For async direct IO, since the IO may still pending when return, we |
25985edce Fix common misspe... |
2788 |
* set up an end_io call back function, which will do the conversion |
8d5d02e6b ext4: async direc... |
2789 |
* when async direct IO completed. |
4c0425ff6 ext4: Use end_io ... |
2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 |
* * If the O_DIRECT write will extend the file then add this inode to the * orphan list. So recovery will truncate it back to the original size * if the machine crashes during the write. * */ static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, loff_t offset, unsigned long nr_segs) { struct file *file = iocb->ki_filp; struct inode *inode = file->f_mapping->host; ssize_t ret; size_t count = iov_length(iov, nr_segs); loff_t final_size = offset + count; if (rw == WRITE && final_size <= inode->i_size) { /* |
8d5d02e6b ext4: async direc... |
2808 2809 2810 |
* We could direct write to holes and fallocate. * * Allocated blocks to fill the hole are marked as uninitialized |
25985edce Fix common misspe... |
2811 |
* to prevent parallel buffered read to expose the stale data |
4c0425ff6 ext4: Use end_io ... |
2812 |
* before DIO complete the data IO. |
8d5d02e6b ext4: async direc... |
2813 2814 |
* * As to previously fallocated extents, ext4 get_block |
4c0425ff6 ext4: Use end_io ... |
2815 2816 2817 |
* will just simply mark the buffer mapped but still * keep the extents uninitialized. * |
8d5d02e6b ext4: async direc... |
2818 2819 2820 2821 2822 2823 2824 2825 |
* for non AIO case, we will convert those unwritten extents * to written after return back from blockdev_direct_IO. * * for async DIO, the conversion needs to be defered when * the IO is completed. The ext4 end_io callback function * will be called to take care of the conversion work. * Here for async case, we allocate an io_end structure to * hook to the iocb. |
4c0425ff6 ext4: Use end_io ... |
2826 |
*/ |
8d5d02e6b ext4: async direc... |
2827 2828 2829 |
iocb->private = NULL; EXT4_I(inode)->cur_aio_dio = NULL; if (!is_sync_kiocb(iocb)) { |
744692dc0 ext4: use ext4_ge... |
2830 |
iocb->private = ext4_init_io_end(inode, GFP_NOFS); |
8d5d02e6b ext4: async direc... |
2831 2832 2833 2834 |
if (!iocb->private) return -ENOMEM; /* * we save the io structure for current async |
79e830367 ext4: fix ext4_ge... |
2835 |
* direct IO, so that later ext4_map_blocks() |
8d5d02e6b ext4: async direc... |
2836 2837 2838 2839 2840 2841 |
* could flag the io structure whether there * is a unwritten extents needs to be converted * when IO is completed. */ EXT4_I(inode)->cur_aio_dio = iocb->private; } |
aacfc19c6 fs: simplify the ... |
2842 |
ret = __blockdev_direct_IO(rw, iocb, inode, |
4c0425ff6 ext4: Use end_io ... |
2843 2844 |
inode->i_sb->s_bdev, iov, offset, nr_segs, |
c7064ef13 ext4: mechanical ... |
2845 |
ext4_get_block_write, |
aacfc19c6 fs: simplify the ... |
2846 2847 2848 |
ext4_end_io_dio, NULL, DIO_LOCKING | DIO_SKIP_HOLES); |
8d5d02e6b ext4: async direc... |
2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 |
if (iocb->private) EXT4_I(inode)->cur_aio_dio = NULL; /* * The io_end structure takes a reference to the inode, * that structure needs to be destroyed and the * reference to the inode need to be dropped, when IO is * complete, even with 0 byte write, or failed. * * In the successful AIO DIO case, the io_end structure will be * desctroyed and the reference to the inode will be dropped * after the end_io call back function is called. * * In the case there is 0 byte write, or error case, since * VFS direct IO won't invoke the end_io call back function, * we need to free the end_io structure here. */ if (ret != -EIOCBQUEUED && ret <= 0 && iocb->private) { ext4_free_io_end(iocb->private); iocb->private = NULL; |
19f5fb7ad ext4: Use bitops ... |
2868 2869 |
} else if (ret > 0 && ext4_test_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN)) { |
109f55651 ext4: fix ext4_ex... |
2870 |
int err; |
8d5d02e6b ext4: async direc... |
2871 2872 |
/* * for non AIO case, since the IO is already |
25985edce Fix common misspe... |
2873 |
* completed, we could do the conversion right here |
8d5d02e6b ext4: async direc... |
2874 |
*/ |
109f55651 ext4: fix ext4_ex... |
2875 2876 2877 2878 |
err = ext4_convert_unwritten_extents(inode, offset, ret); if (err < 0) ret = err; |
19f5fb7ad ext4: Use bitops ... |
2879 |
ext4_clear_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN); |
109f55651 ext4: fix ext4_ex... |
2880 |
} |
4c0425ff6 ext4: Use end_io ... |
2881 2882 |
return ret; } |
8d5d02e6b ext4: async direc... |
2883 2884 |
/* for write the the end of file case, we fall back to old way */ |
4c0425ff6 ext4: Use end_io ... |
2885 2886 2887 2888 2889 2890 2891 2892 2893 |
return ext4_ind_direct_IO(rw, iocb, iov, offset, nr_segs); } static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, loff_t offset, unsigned long nr_segs) { struct file *file = iocb->ki_filp; struct inode *inode = file->f_mapping->host; |
0562e0bad ext4: add more tr... |
2894 |
ssize_t ret; |
4c0425ff6 ext4: Use end_io ... |
2895 |
|
84ebd7956 ext4: fake direct... |
2896 2897 2898 2899 2900 |
/* * If we are doing data journalling we don't support O_DIRECT */ if (ext4_should_journal_data(inode)) return 0; |
0562e0bad ext4: add more tr... |
2901 |
trace_ext4_direct_IO_enter(inode, offset, iov_length(iov, nr_segs), rw); |
12e9b8920 ext4: Use bitops ... |
2902 |
if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) |
0562e0bad ext4: add more tr... |
2903 2904 2905 2906 2907 2908 |
ret = ext4_ext_direct_IO(rw, iocb, iov, offset, nr_segs); else ret = ext4_ind_direct_IO(rw, iocb, iov, offset, nr_segs); trace_ext4_direct_IO_exit(inode, offset, iov_length(iov, nr_segs), rw, ret); return ret; |
4c0425ff6 ext4: Use end_io ... |
2909 |
} |
ac27a0ec1 [PATCH] ext4: ini... |
2910 |
/* |
617ba13b3 [PATCH] ext4: ren... |
2911 |
* Pages can be marked dirty completely asynchronously from ext4's journalling |
ac27a0ec1 [PATCH] ext4: ini... |
2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 |
* activity. By filemap_sync_pte(), try_to_unmap_one(), etc. We cannot do * much here because ->set_page_dirty is called under VFS locks. The page is * not necessarily locked. * * We cannot just dirty the page and leave attached buffers clean, because the * buffers' dirty state is "definitive". We cannot just set the buffers dirty * or jbddirty because all the journalling code will explode. * * So what we do is to mark the page "pending dirty" and next time writepage * is called, propagate that into the buffers appropriately. */ |
617ba13b3 [PATCH] ext4: ren... |
2923 |
static int ext4_journalled_set_page_dirty(struct page *page) |
ac27a0ec1 [PATCH] ext4: ini... |
2924 2925 2926 2927 |
{ SetPageChecked(page); return __set_page_dirty_nobuffers(page); } |
617ba13b3 [PATCH] ext4: ren... |
2928 |
static const struct address_space_operations ext4_ordered_aops = { |
8ab22b9ab vfs: pagecache us... |
2929 2930 |
.readpage = ext4_readpage, .readpages = ext4_readpages, |
43ce1d23b ext4: Fix mmap/tr... |
2931 |
.writepage = ext4_writepage, |
8ab22b9ab vfs: pagecache us... |
2932 2933 2934 2935 2936 2937 2938 2939 |
.write_begin = ext4_write_begin, .write_end = ext4_ordered_write_end, .bmap = ext4_bmap, .invalidatepage = ext4_invalidatepage, .releasepage = ext4_releasepage, .direct_IO = ext4_direct_IO, .migratepage = buffer_migrate_page, .is_partially_uptodate = block_is_partially_uptodate, |
aa261f549 HWPOISON: Enable ... |
2940 |
.error_remove_page = generic_error_remove_page, |
ac27a0ec1 [PATCH] ext4: ini... |
2941 |
}; |
617ba13b3 [PATCH] ext4: ren... |
2942 |
static const struct address_space_operations ext4_writeback_aops = { |
8ab22b9ab vfs: pagecache us... |
2943 2944 |
.readpage = ext4_readpage, .readpages = ext4_readpages, |
43ce1d23b ext4: Fix mmap/tr... |
2945 |
.writepage = ext4_writepage, |
8ab22b9ab vfs: pagecache us... |
2946 2947 2948 2949 2950 2951 2952 2953 |
.write_begin = ext4_write_begin, .write_end = ext4_writeback_write_end, .bmap = ext4_bmap, .invalidatepage = ext4_invalidatepage, .releasepage = ext4_releasepage, .direct_IO = ext4_direct_IO, .migratepage = buffer_migrate_page, .is_partially_uptodate = block_is_partially_uptodate, |
aa261f549 HWPOISON: Enable ... |
2954 |
.error_remove_page = generic_error_remove_page, |
ac27a0ec1 [PATCH] ext4: ini... |
2955 |
}; |
617ba13b3 [PATCH] ext4: ren... |
2956 |
static const struct address_space_operations ext4_journalled_aops = { |
8ab22b9ab vfs: pagecache us... |
2957 2958 |
.readpage = ext4_readpage, .readpages = ext4_readpages, |
43ce1d23b ext4: Fix mmap/tr... |
2959 |
.writepage = ext4_writepage, |
8ab22b9ab vfs: pagecache us... |
2960 2961 2962 2963 2964 2965 |
.write_begin = ext4_write_begin, .write_end = ext4_journalled_write_end, .set_page_dirty = ext4_journalled_set_page_dirty, .bmap = ext4_bmap, .invalidatepage = ext4_invalidatepage, .releasepage = ext4_releasepage, |
84ebd7956 ext4: fake direct... |
2966 |
.direct_IO = ext4_direct_IO, |
8ab22b9ab vfs: pagecache us... |
2967 |
.is_partially_uptodate = block_is_partially_uptodate, |
aa261f549 HWPOISON: Enable ... |
2968 |
.error_remove_page = generic_error_remove_page, |
ac27a0ec1 [PATCH] ext4: ini... |
2969 |
}; |
64769240b ext4: Add delayed... |
2970 |
static const struct address_space_operations ext4_da_aops = { |
8ab22b9ab vfs: pagecache us... |
2971 2972 |
.readpage = ext4_readpage, .readpages = ext4_readpages, |
43ce1d23b ext4: Fix mmap/tr... |
2973 |
.writepage = ext4_writepage, |
8ab22b9ab vfs: pagecache us... |
2974 |
.writepages = ext4_da_writepages, |
8ab22b9ab vfs: pagecache us... |
2975 2976 2977 2978 2979 2980 2981 2982 |
.write_begin = ext4_da_write_begin, .write_end = ext4_da_write_end, .bmap = ext4_bmap, .invalidatepage = ext4_da_invalidatepage, .releasepage = ext4_releasepage, .direct_IO = ext4_direct_IO, .migratepage = buffer_migrate_page, .is_partially_uptodate = block_is_partially_uptodate, |
aa261f549 HWPOISON: Enable ... |
2983 |
.error_remove_page = generic_error_remove_page, |
64769240b ext4: Add delayed... |
2984 |
}; |
617ba13b3 [PATCH] ext4: ren... |
2985 |
void ext4_set_aops(struct inode *inode) |
ac27a0ec1 [PATCH] ext4: ini... |
2986 |
{ |
cd1aac329 ext4: Add ordered... |
2987 2988 2989 2990 |
if (ext4_should_order_data(inode) && test_opt(inode->i_sb, DELALLOC)) inode->i_mapping->a_ops = &ext4_da_aops; else if (ext4_should_order_data(inode)) |
617ba13b3 [PATCH] ext4: ren... |
2991 |
inode->i_mapping->a_ops = &ext4_ordered_aops; |
64769240b ext4: Add delayed... |
2992 2993 2994 |
else if (ext4_should_writeback_data(inode) && test_opt(inode->i_sb, DELALLOC)) inode->i_mapping->a_ops = &ext4_da_aops; |
617ba13b3 [PATCH] ext4: ren... |
2995 2996 |
else if (ext4_should_writeback_data(inode)) inode->i_mapping->a_ops = &ext4_writeback_aops; |
ac27a0ec1 [PATCH] ext4: ini... |
2997 |
else |
617ba13b3 [PATCH] ext4: ren... |
2998 |
inode->i_mapping->a_ops = &ext4_journalled_aops; |
ac27a0ec1 [PATCH] ext4: ini... |
2999 |
} |
4e96b2dbb ext4: Add new ext... |
3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 |
/* * ext4_discard_partial_page_buffers() * Wrapper function for ext4_discard_partial_page_buffers_no_lock. * This function finds and locks the page containing the offset * "from" and passes it to ext4_discard_partial_page_buffers_no_lock. * Calling functions that already have the page locked should call * ext4_discard_partial_page_buffers_no_lock directly. */ int ext4_discard_partial_page_buffers(handle_t *handle, struct address_space *mapping, loff_t from, loff_t length, int flags) { struct inode *inode = mapping->host; struct page *page; int err = 0; page = find_or_create_page(mapping, from >> PAGE_CACHE_SHIFT, mapping_gfp_mask(mapping) & ~__GFP_FS); if (!page) |
5129d05fd ext4: return ENOM... |
3020 |
return -ENOMEM; |
4e96b2dbb ext4: Add new ext... |
3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 |
err = ext4_discard_partial_page_buffers_no_lock(handle, inode, page, from, length, flags); unlock_page(page); page_cache_release(page); return err; } /* * ext4_discard_partial_page_buffers_no_lock() * Zeros a page range of length 'length' starting from offset 'from'. * Buffer heads that correspond to the block aligned regions of the * zeroed range will be unmapped. Unblock aligned regions * will have the corresponding buffer head mapped if needed so that * that region of the page can be updated with the partial zero out. * * This function assumes that the page has already been locked. The * The range to be discarded must be contained with in the given page. * If the specified range exceeds the end of the page it will be shortened * to the end of the page that corresponds to 'from'. This function is * appropriate for updating a page and it buffer heads to be unmapped and * zeroed for blocks that have been either released, or are going to be * released. * * handle: The journal handle * inode: The files inode * page: A locked page that contains the offset "from" * from: The starting byte offset (from the begining of the file) * to begin discarding * len: The length of bytes to discard * flags: Optional flags that may be used: * * EXT4_DISCARD_PARTIAL_PG_ZERO_UNMAPPED * Only zero the regions of the page whose buffer heads * have already been unmapped. This flag is appropriate * for updateing the contents of a page whose blocks may * have already been released, and we only want to zero * out the regions that correspond to those released blocks. * * Returns zero on sucess or negative on failure. */ |
5f163cc75 ext4: make more s... |
3063 |
static int ext4_discard_partial_page_buffers_no_lock(handle_t *handle, |
4e96b2dbb ext4: Add new ext... |
3064 3065 3066 3067 3068 3069 |
struct inode *inode, struct page *page, loff_t from, loff_t length, int flags) { ext4_fsblk_t index = from >> PAGE_CACHE_SHIFT; unsigned int offset = from & (PAGE_CACHE_SIZE-1); unsigned int blocksize, max, pos; |
4e96b2dbb ext4: Add new ext... |
3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 |
ext4_lblk_t iblock; struct buffer_head *bh; int err = 0; blocksize = inode->i_sb->s_blocksize; max = PAGE_CACHE_SIZE - offset; if (index != page->index) return -EINVAL; /* * correct length if it does not fall between * 'from' and the end of the page */ if (length > max || length < 0) length = max; iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits); |
093e6e366 ext4: correctly h... |
3088 3089 |
if (!page_has_buffers(page)) create_empty_buffers(page, blocksize, 0); |
4e96b2dbb ext4: Add new ext... |
3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 |
/* Find the buffer that contains "offset" */ bh = page_buffers(page); pos = blocksize; while (offset >= pos) { bh = bh->b_this_page; iblock++; pos += blocksize; } pos = offset; while (pos < offset + length) { |
e260daf27 ext4: move vars t... |
3102 |
unsigned int end_of_block, range_to_discard; |
4e96b2dbb ext4: Add new ext... |
3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 |
err = 0; /* The length of space left to zero and unmap */ range_to_discard = offset + length - pos; /* The length of space until the end of the block */ end_of_block = blocksize - (pos & (blocksize-1)); /* * Do not unmap or zero past end of block * for this buffer head */ if (range_to_discard > end_of_block) range_to_discard = end_of_block; /* * Skip this buffer head if we are only zeroing unampped * regions of the page */ if (flags & EXT4_DISCARD_PARTIAL_PG_ZERO_UNMAPPED && buffer_mapped(bh)) goto next; /* If the range is block aligned, unmap */ if (range_to_discard == blocksize) { clear_buffer_dirty(bh); bh->b_bdev = NULL; clear_buffer_mapped(bh); clear_buffer_req(bh); clear_buffer_new(bh); clear_buffer_delay(bh); clear_buffer_unwritten(bh); clear_buffer_uptodate(bh); zero_user(page, pos, range_to_discard); BUFFER_TRACE(bh, "Buffer discarded"); goto next; } /* * If this block is not completely contained in the range * to be discarded, then it is not going to be released. Because * we need to keep this block, we need to make sure this part * of the page is uptodate before we modify it by writeing * partial zeros on it. */ if (!buffer_mapped(bh)) { /* * Buffer head must be mapped before we can read * from the block */ BUFFER_TRACE(bh, "unmapped"); ext4_get_block(inode, iblock, bh, 0); /* unmapped? It's a hole - nothing to do */ if (!buffer_mapped(bh)) { BUFFER_TRACE(bh, "still unmapped"); goto next; } } /* Ok, it's mapped. Make sure it's up-to-date */ if (PageUptodate(page)) set_buffer_uptodate(bh); if (!buffer_uptodate(bh)) { err = -EIO; ll_rw_block(READ, 1, &bh); wait_on_buffer(bh); /* Uhhuh. Read error. Complain and punt.*/ if (!buffer_uptodate(bh)) goto next; } if (ext4_should_journal_data(inode)) { BUFFER_TRACE(bh, "get write access"); err = ext4_journal_get_write_access(handle, bh); if (err) goto next; } zero_user(page, pos, range_to_discard); err = 0; if (ext4_should_journal_data(inode)) { err = ext4_handle_dirty_metadata(handle, inode, bh); |
decbd919f ext4: only call e... |
3188 |
} else |
4e96b2dbb ext4: Add new ext... |
3189 |
mark_buffer_dirty(bh); |
4e96b2dbb ext4: Add new ext... |
3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 |
BUFFER_TRACE(bh, "Partial buffer zeroed"); next: bh = bh->b_this_page; iblock++; pos += range_to_discard; } return err; } |
91ef4caf8 ext4: handle corr... |
3200 3201 |
int ext4_can_truncate(struct inode *inode) { |
91ef4caf8 ext4: handle corr... |
3202 3203 3204 3205 3206 3207 3208 3209 |
if (S_ISREG(inode->i_mode)) return 1; if (S_ISDIR(inode->i_mode)) return 1; if (S_ISLNK(inode->i_mode)) return !ext4_inode_is_fast_symlink(inode); return 0; } |
ac27a0ec1 [PATCH] ext4: ini... |
3210 |
/* |
a4bb6b64e ext4: enable "pun... |
3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 |
* ext4_punch_hole: punches a hole in a file by releaseing the blocks * associated with the given offset and length * * @inode: File inode * @offset: The offset where the hole will begin * @len: The length of the hole * * Returns: 0 on sucess or negative on failure */ int ext4_punch_hole(struct file *file, loff_t offset, loff_t length) { struct inode *inode = file->f_path.dentry->d_inode; if (!S_ISREG(inode->i_mode)) return -ENOTSUPP; if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) { /* TODO: Add support for non extent hole punching */ return -ENOTSUPP; } |
bab08ab96 ext4: enforce big... |
3231 3232 3233 3234 |
if (EXT4_SB(inode->i_sb)->s_cluster_ratio > 1) { /* TODO: Add support for bigalloc file systems */ return -ENOTSUPP; } |
a4bb6b64e ext4: enable "pun... |
3235 3236 3237 3238 |
return ext4_ext_punch_hole(file, offset, length); } /* |
617ba13b3 [PATCH] ext4: ren... |
3239 |
* ext4_truncate() |
ac27a0ec1 [PATCH] ext4: ini... |
3240 |
* |
617ba13b3 [PATCH] ext4: ren... |
3241 3242 |
* We block out ext4_get_block() block instantiations across the entire * transaction, and VFS/VM ensures that ext4_truncate() cannot run |
ac27a0ec1 [PATCH] ext4: ini... |
3243 3244 |
* simultaneously on behalf of the same inode. * |
42b2aa86c treewide: Fix typ... |
3245 |
* As we work through the truncate and commit bits of it to the journal there |
ac27a0ec1 [PATCH] ext4: ini... |
3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 |
* is one core, guiding principle: the file's tree must always be consistent on * disk. We must be able to restart the truncate after a crash. * * The file's tree may be transiently inconsistent in memory (although it * probably isn't), but whenever we close off and commit a journal transaction, * the contents of (the filesystem + the journal) must be consistent and * restartable. It's pretty simple, really: bottom up, right to left (although * left-to-right works OK too). * * Note that at recovery time, journal replay occurs *before* the restart of * truncate against the orphan inode list. * * The committed inode has the new, desired i_size (which is the same as |
617ba13b3 [PATCH] ext4: ren... |
3259 |
* i_disksize in this case). After a crash, ext4_orphan_cleanup() will see |
ac27a0ec1 [PATCH] ext4: ini... |
3260 |
* that this inode's truncate did not complete and it will again call |
617ba13b3 [PATCH] ext4: ren... |
3261 3262 |
* ext4_truncate() to have another go. So there will be instantiated blocks * to the right of the truncation point in a crashed ext4 filesystem. But |
ac27a0ec1 [PATCH] ext4: ini... |
3263 |
* that's fine - as long as they are linked from the inode, the post-crash |
617ba13b3 [PATCH] ext4: ren... |
3264 |
* ext4_truncate() run will find them and release them. |
ac27a0ec1 [PATCH] ext4: ini... |
3265 |
*/ |
617ba13b3 [PATCH] ext4: ren... |
3266 |
void ext4_truncate(struct inode *inode) |
ac27a0ec1 [PATCH] ext4: ini... |
3267 |
{ |
0562e0bad ext4: add more tr... |
3268 |
trace_ext4_truncate_enter(inode); |
91ef4caf8 ext4: handle corr... |
3269 |
if (!ext4_can_truncate(inode)) |
ac27a0ec1 [PATCH] ext4: ini... |
3270 |
return; |
12e9b8920 ext4: Use bitops ... |
3271 |
ext4_clear_inode_flag(inode, EXT4_INODE_EOFBLOCKS); |
c8d46e41b ext4: Add flag to... |
3272 |
|
5534fb5bb ext4: Fix the all... |
3273 |
if (inode->i_size == 0 && !test_opt(inode->i_sb, NO_AUTO_DA_ALLOC)) |
19f5fb7ad ext4: Use bitops ... |
3274 |
ext4_set_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE); |
7d8f9f7d1 ext4: Automatical... |
3275 |
|
ff9893dc8 ext4: split ext4_... |
3276 |
if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) |
cf108bca4 ext4: Invert the ... |
3277 |
ext4_ext_truncate(inode); |
ff9893dc8 ext4: split ext4_... |
3278 3279 |
else ext4_ind_truncate(inode); |
ac27a0ec1 [PATCH] ext4: ini... |
3280 |
|
0562e0bad ext4: add more tr... |
3281 |
trace_ext4_truncate_exit(inode); |
ac27a0ec1 [PATCH] ext4: ini... |
3282 |
} |
ac27a0ec1 [PATCH] ext4: ini... |
3283 |
/* |
617ba13b3 [PATCH] ext4: ren... |
3284 |
* ext4_get_inode_loc returns with an extra refcount against the inode's |
ac27a0ec1 [PATCH] ext4: ini... |
3285 3286 3287 3288 |
* underlying buffer_head on success. If 'in_mem' is true, we have all * data in memory that is needed to recreate the on-disk version of this * inode. */ |
617ba13b3 [PATCH] ext4: ren... |
3289 3290 |
static int __ext4_get_inode_loc(struct inode *inode, struct ext4_iloc *iloc, int in_mem) |
ac27a0ec1 [PATCH] ext4: ini... |
3291 |
{ |
240799cdf ext4: Use readahe... |
3292 3293 3294 3295 3296 |
struct ext4_group_desc *gdp; struct buffer_head *bh; struct super_block *sb = inode->i_sb; ext4_fsblk_t block; int inodes_per_block, inode_offset; |
3a06d778d ext4: sparse fixes |
3297 |
iloc->bh = NULL; |
240799cdf ext4: Use readahe... |
3298 3299 |
if (!ext4_valid_inum(sb, inode->i_ino)) return -EIO; |
ac27a0ec1 [PATCH] ext4: ini... |
3300 |
|
240799cdf ext4: Use readahe... |
3301 3302 3303 |
iloc->block_group = (inode->i_ino - 1) / EXT4_INODES_PER_GROUP(sb); gdp = ext4_get_group_desc(sb, iloc->block_group, NULL); if (!gdp) |
ac27a0ec1 [PATCH] ext4: ini... |
3304 |
return -EIO; |
240799cdf ext4: Use readahe... |
3305 3306 3307 |
/* * Figure out the offset within the block group inode table */ |
00d098822 ext4: use s_inode... |
3308 |
inodes_per_block = EXT4_SB(sb)->s_inodes_per_block; |
240799cdf ext4: Use readahe... |
3309 3310 3311 3312 3313 3314 |
inode_offset = ((inode->i_ino - 1) % EXT4_INODES_PER_GROUP(sb)); block = ext4_inode_table(sb, gdp) + (inode_offset / inodes_per_block); iloc->offset = (inode_offset % inodes_per_block) * EXT4_INODE_SIZE(sb); bh = sb_getblk(sb, block); |
ac27a0ec1 [PATCH] ext4: ini... |
3315 |
if (!bh) { |
c398eda0e ext4: Pass line n... |
3316 3317 |
EXT4_ERROR_INODE_BLOCK(inode, block, "unable to read itable block"); |
ac27a0ec1 [PATCH] ext4: ini... |
3318 3319 3320 3321 |
return -EIO; } if (!buffer_uptodate(bh)) { lock_buffer(bh); |
9c83a923c ext4: don't read ... |
3322 3323 3324 3325 3326 3327 3328 3329 3330 |
/* * If the buffer has the write error flag, we have failed * to write out another inode in the same block. In this * case, we don't have to read the block because we may * read the old inode data successfully. */ if (buffer_write_io_error(bh) && !buffer_uptodate(bh)) set_buffer_uptodate(bh); |
ac27a0ec1 [PATCH] ext4: ini... |
3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 |
if (buffer_uptodate(bh)) { /* someone brought it uptodate while we waited */ unlock_buffer(bh); goto has_buffer; } /* * If we have all information of the inode in memory and this * is the only valid inode in the block, we need not read the * block. */ if (in_mem) { struct buffer_head *bitmap_bh; |
240799cdf ext4: Use readahe... |
3344 |
int i, start; |
ac27a0ec1 [PATCH] ext4: ini... |
3345 |
|
240799cdf ext4: Use readahe... |
3346 |
start = inode_offset & ~(inodes_per_block - 1); |
ac27a0ec1 [PATCH] ext4: ini... |
3347 |
|
240799cdf ext4: Use readahe... |
3348 3349 |
/* Is the inode bitmap in cache? */ bitmap_bh = sb_getblk(sb, ext4_inode_bitmap(sb, gdp)); |
ac27a0ec1 [PATCH] ext4: ini... |
3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 |
if (!bitmap_bh) goto make_io; /* * If the inode bitmap isn't in cache then the * optimisation may end up performing two reads instead * of one, so skip it. */ if (!buffer_uptodate(bitmap_bh)) { brelse(bitmap_bh); goto make_io; } |
240799cdf ext4: Use readahe... |
3362 |
for (i = start; i < start + inodes_per_block; i++) { |
ac27a0ec1 [PATCH] ext4: ini... |
3363 3364 |
if (i == inode_offset) continue; |
617ba13b3 [PATCH] ext4: ren... |
3365 |
if (ext4_test_bit(i, bitmap_bh->b_data)) |
ac27a0ec1 [PATCH] ext4: ini... |
3366 3367 3368 |
break; } brelse(bitmap_bh); |
240799cdf ext4: Use readahe... |
3369 |
if (i == start + inodes_per_block) { |
ac27a0ec1 [PATCH] ext4: ini... |
3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 |
/* all other inodes are free, so skip I/O */ memset(bh->b_data, 0, bh->b_size); set_buffer_uptodate(bh); unlock_buffer(bh); goto has_buffer; } } make_io: /* |
240799cdf ext4: Use readahe... |
3380 3381 3382 3383 3384 3385 3386 3387 |
* If we need to do any I/O, try to pre-readahead extra * blocks from the inode table. */ if (EXT4_SB(sb)->s_inode_readahead_blks) { ext4_fsblk_t b, end, table; unsigned num; table = ext4_inode_table(sb, gdp); |
b713a5ec5 ext4: remove /pro... |
3388 |
/* s_inode_readahead_blks is always a power of 2 */ |
240799cdf ext4: Use readahe... |
3389 3390 3391 3392 3393 3394 3395 |
b = block & ~(EXT4_SB(sb)->s_inode_readahead_blks-1); if (table > b) b = table; end = b + EXT4_SB(sb)->s_inode_readahead_blks; num = EXT4_INODES_PER_GROUP(sb); if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_GDT_CSUM)) |
560671a0d ext4: Use high 16... |
3396 |
num -= ext4_itable_unused_count(sb, gdp); |
240799cdf ext4: Use readahe... |
3397 3398 3399 3400 3401 3402 3403 3404 |
table += num / inodes_per_block; if (end > table) end = table; while (b <= end) sb_breadahead(sb, b++); } /* |
ac27a0ec1 [PATCH] ext4: ini... |
3405 3406 3407 3408 |
* There are other valid inodes in the buffer, this inode * has in-inode xattrs, or we don't have this inode in memory. * Read the block from disk. */ |
0562e0bad ext4: add more tr... |
3409 |
trace_ext4_load_inode(inode); |
ac27a0ec1 [PATCH] ext4: ini... |
3410 3411 |
get_bh(bh); bh->b_end_io = end_buffer_read_sync; |
65299a3b7 block: separate p... |
3412 |
submit_bh(READ | REQ_META | REQ_PRIO, bh); |
ac27a0ec1 [PATCH] ext4: ini... |
3413 3414 |
wait_on_buffer(bh); if (!buffer_uptodate(bh)) { |
c398eda0e ext4: Pass line n... |
3415 3416 |
EXT4_ERROR_INODE_BLOCK(inode, block, "unable to read itable block"); |
ac27a0ec1 [PATCH] ext4: ini... |
3417 3418 3419 3420 3421 3422 3423 3424 |
brelse(bh); return -EIO; } } has_buffer: iloc->bh = bh; return 0; } |
617ba13b3 [PATCH] ext4: ren... |
3425 |
int ext4_get_inode_loc(struct inode *inode, struct ext4_iloc *iloc) |
ac27a0ec1 [PATCH] ext4: ini... |
3426 3427 |
{ /* We have all inode data except xattrs in memory here. */ |
617ba13b3 [PATCH] ext4: ren... |
3428 |
return __ext4_get_inode_loc(inode, iloc, |
19f5fb7ad ext4: Use bitops ... |
3429 |
!ext4_test_inode_state(inode, EXT4_STATE_XATTR)); |
ac27a0ec1 [PATCH] ext4: ini... |
3430 |
} |
617ba13b3 [PATCH] ext4: ren... |
3431 |
void ext4_set_inode_flags(struct inode *inode) |
ac27a0ec1 [PATCH] ext4: ini... |
3432 |
{ |
617ba13b3 [PATCH] ext4: ren... |
3433 |
unsigned int flags = EXT4_I(inode)->i_flags; |
ac27a0ec1 [PATCH] ext4: ini... |
3434 3435 |
inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC); |
617ba13b3 [PATCH] ext4: ren... |
3436 |
if (flags & EXT4_SYNC_FL) |
ac27a0ec1 [PATCH] ext4: ini... |
3437 |
inode->i_flags |= S_SYNC; |
617ba13b3 [PATCH] ext4: ren... |
3438 |
if (flags & EXT4_APPEND_FL) |
ac27a0ec1 [PATCH] ext4: ini... |
3439 |
inode->i_flags |= S_APPEND; |
617ba13b3 [PATCH] ext4: ren... |
3440 |
if (flags & EXT4_IMMUTABLE_FL) |
ac27a0ec1 [PATCH] ext4: ini... |
3441 |
inode->i_flags |= S_IMMUTABLE; |
617ba13b3 [PATCH] ext4: ren... |
3442 |
if (flags & EXT4_NOATIME_FL) |
ac27a0ec1 [PATCH] ext4: ini... |
3443 |
inode->i_flags |= S_NOATIME; |
617ba13b3 [PATCH] ext4: ren... |
3444 |
if (flags & EXT4_DIRSYNC_FL) |
ac27a0ec1 [PATCH] ext4: ini... |
3445 3446 |
inode->i_flags |= S_DIRSYNC; } |
ff9ddf7e8 ext4: copy i_flag... |
3447 3448 3449 |
/* Propagate flags from i_flags to EXT4_I(inode)->i_flags */ void ext4_get_inode_flags(struct ext4_inode_info *ei) { |
84a8dce27 ext4: Fix remaini... |
3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 |
unsigned int vfs_fl; unsigned long old_fl, new_fl; do { vfs_fl = ei->vfs_inode.i_flags; old_fl = ei->i_flags; new_fl = old_fl & ~(EXT4_SYNC_FL|EXT4_APPEND_FL| EXT4_IMMUTABLE_FL|EXT4_NOATIME_FL| EXT4_DIRSYNC_FL); if (vfs_fl & S_SYNC) new_fl |= EXT4_SYNC_FL; if (vfs_fl & S_APPEND) new_fl |= EXT4_APPEND_FL; if (vfs_fl & S_IMMUTABLE) new_fl |= EXT4_IMMUTABLE_FL; if (vfs_fl & S_NOATIME) new_fl |= EXT4_NOATIME_FL; if (vfs_fl & S_DIRSYNC) new_fl |= EXT4_DIRSYNC_FL; } while (cmpxchg(&ei->i_flags, old_fl, new_fl) != old_fl); |
ff9ddf7e8 ext4: copy i_flag... |
3470 |
} |
de9a55b84 ext4: Fix up whit... |
3471 |
|
0fc1b4514 ext4: Add support... |
3472 |
static blkcnt_t ext4_inode_blocks(struct ext4_inode *raw_inode, |
de9a55b84 ext4: Fix up whit... |
3473 |
struct ext4_inode_info *ei) |
0fc1b4514 ext4: Add support... |
3474 3475 |
{ blkcnt_t i_blocks ; |
8180a5627 ext4: Support lar... |
3476 3477 |
struct inode *inode = &(ei->vfs_inode); struct super_block *sb = inode->i_sb; |
0fc1b4514 ext4: Add support... |
3478 3479 3480 3481 3482 3483 |
if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_HUGE_FILE)) { /* we are using combined 48 bit field */ i_blocks = ((u64)le16_to_cpu(raw_inode->i_blocks_high)) << 32 | le32_to_cpu(raw_inode->i_blocks_lo); |
07a038245 ext4: Convert mor... |
3484 |
if (ext4_test_inode_flag(inode, EXT4_INODE_HUGE_FILE)) { |
8180a5627 ext4: Support lar... |
3485 3486 3487 3488 3489 |
/* i_blocks represent file system block size */ return i_blocks << (inode->i_blkbits - 9); } else { return i_blocks; } |
0fc1b4514 ext4: Add support... |
3490 3491 3492 3493 |
} else { return le32_to_cpu(raw_inode->i_blocks_lo); } } |
ff9ddf7e8 ext4: copy i_flag... |
3494 |
|
1d1fe1ee0 iget: stop EXT4 f... |
3495 |
struct inode *ext4_iget(struct super_block *sb, unsigned long ino) |
ac27a0ec1 [PATCH] ext4: ini... |
3496 |
{ |
617ba13b3 [PATCH] ext4: ren... |
3497 3498 |
struct ext4_iloc iloc; struct ext4_inode *raw_inode; |
1d1fe1ee0 iget: stop EXT4 f... |
3499 |
struct ext4_inode_info *ei; |
1d1fe1ee0 iget: stop EXT4 f... |
3500 |
struct inode *inode; |
b436b9bef ext4: Wait for pr... |
3501 |
journal_t *journal = EXT4_SB(sb)->s_journal; |
1d1fe1ee0 iget: stop EXT4 f... |
3502 |
long ret; |
ac27a0ec1 [PATCH] ext4: ini... |
3503 |
int block; |
1d1fe1ee0 iget: stop EXT4 f... |
3504 3505 3506 3507 3508 3509 3510 |
inode = iget_locked(sb, ino); if (!inode) return ERR_PTR(-ENOMEM); if (!(inode->i_state & I_NEW)) return inode; ei = EXT4_I(inode); |
7dc576158 ext4: Fix sparse ... |
3511 |
iloc.bh = NULL; |
ac27a0ec1 [PATCH] ext4: ini... |
3512 |
|
1d1fe1ee0 iget: stop EXT4 f... |
3513 3514 |
ret = __ext4_get_inode_loc(inode, &iloc, 0); if (ret < 0) |
ac27a0ec1 [PATCH] ext4: ini... |
3515 |
goto bad_inode; |
617ba13b3 [PATCH] ext4: ren... |
3516 |
raw_inode = ext4_raw_inode(&iloc); |
ac27a0ec1 [PATCH] ext4: ini... |
3517 3518 3519 |
inode->i_mode = le16_to_cpu(raw_inode->i_mode); inode->i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low); inode->i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low); |
af5bc92dd ext4: Fix whitesp... |
3520 |
if (!(test_opt(inode->i_sb, NO_UID32))) { |
ac27a0ec1 [PATCH] ext4: ini... |
3521 3522 3523 |
inode->i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16; inode->i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16; } |
bfe868486 filesystems: add ... |
3524 |
set_nlink(inode, le16_to_cpu(raw_inode->i_links_count)); |
ac27a0ec1 [PATCH] ext4: ini... |
3525 |
|
353eb83c1 ext4: drop i_stat... |
3526 |
ext4_clear_state_flags(ei); /* Only relevant on 32-bit archs */ |
ac27a0ec1 [PATCH] ext4: ini... |
3527 3528 3529 3530 3531 3532 3533 3534 3535 |
ei->i_dir_start_lookup = 0; ei->i_dtime = le32_to_cpu(raw_inode->i_dtime); /* We now have enough fields to check if the inode was active or not. * This is needed because nfsd might try to access dead inodes * the test is that same one that e2fsck uses * NeilBrown 1999oct15 */ if (inode->i_nlink == 0) { if (inode->i_mode == 0 || |
617ba13b3 [PATCH] ext4: ren... |
3536 |
!(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_ORPHAN_FS)) { |
ac27a0ec1 [PATCH] ext4: ini... |
3537 |
/* this inode is deleted */ |
1d1fe1ee0 iget: stop EXT4 f... |
3538 |
ret = -ESTALE; |
ac27a0ec1 [PATCH] ext4: ini... |
3539 3540 3541 3542 3543 3544 3545 |
goto bad_inode; } /* The only unlinked inodes we let through here have * valid i_mode and are being read by the orphan * recovery code: that's fine, we're about to complete * the process of deleting those. */ } |
ac27a0ec1 [PATCH] ext4: ini... |
3546 |
ei->i_flags = le32_to_cpu(raw_inode->i_flags); |
0fc1b4514 ext4: Add support... |
3547 |
inode->i_blocks = ext4_inode_blocks(raw_inode, ei); |
7973c0c19 ext4: Rename i_fi... |
3548 |
ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl_lo); |
a9e817425 ext4: Ignore i_fi... |
3549 |
if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_64BIT)) |
a1ddeb7ea [PATCH] ext4: 48b... |
3550 3551 |
ei->i_file_acl |= ((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32; |
a48380f76 ext4: Rename i_di... |
3552 |
inode->i_size = ext4_isize(raw_inode); |
ac27a0ec1 [PATCH] ext4: ini... |
3553 |
ei->i_disksize = inode->i_size; |
a9e7f4472 ext4: Convert to ... |
3554 3555 3556 |
#ifdef CONFIG_QUOTA ei->i_reserved_quota = 0; #endif |
ac27a0ec1 [PATCH] ext4: ini... |
3557 3558 |
inode->i_generation = le32_to_cpu(raw_inode->i_generation); ei->i_block_group = iloc.block_group; |
a4912123b ext4: New inode/b... |
3559 |
ei->i_last_alloc_group = ~0; |
ac27a0ec1 [PATCH] ext4: ini... |
3560 3561 3562 3563 |
/* * NOTE! The in-memory inode i_data array is in little-endian order * even on big-endian machines: we do NOT byteswap the block numbers! */ |
617ba13b3 [PATCH] ext4: ren... |
3564 |
for (block = 0; block < EXT4_N_BLOCKS; block++) |
ac27a0ec1 [PATCH] ext4: ini... |
3565 3566 |
ei->i_data[block] = raw_inode->i_block[block]; INIT_LIST_HEAD(&ei->i_orphan); |
b436b9bef ext4: Wait for pr... |
3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 |
/* * Set transaction id's of transactions that have to be committed * to finish f[data]sync. We set them to currently running transaction * as we cannot be sure that the inode or some of its metadata isn't * part of the transaction - the inode could have been reclaimed and * now it is reread from disk. */ if (journal) { transaction_t *transaction; tid_t tid; |
a931da6ac jbd2: Change j_st... |
3577 |
read_lock(&journal->j_state_lock); |
b436b9bef ext4: Wait for pr... |
3578 3579 3580 3581 3582 3583 3584 3585 |
if (journal->j_running_transaction) transaction = journal->j_running_transaction; else transaction = journal->j_committing_transaction; if (transaction) tid = transaction->t_tid; else tid = journal->j_commit_sequence; |
a931da6ac jbd2: Change j_st... |
3586 |
read_unlock(&journal->j_state_lock); |
b436b9bef ext4: Wait for pr... |
3587 3588 3589 |
ei->i_sync_tid = tid; ei->i_datasync_tid = tid; } |
0040d9875 allow in-inode EA... |
3590 |
if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) { |
ac27a0ec1 [PATCH] ext4: ini... |
3591 |
ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize); |
617ba13b3 [PATCH] ext4: ren... |
3592 |
if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize > |
e5d2861f3 ext4: lost brelse... |
3593 |
EXT4_INODE_SIZE(inode->i_sb)) { |
1d1fe1ee0 iget: stop EXT4 f... |
3594 |
ret = -EIO; |
ac27a0ec1 [PATCH] ext4: ini... |
3595 |
goto bad_inode; |
e5d2861f3 ext4: lost brelse... |
3596 |
} |
ac27a0ec1 [PATCH] ext4: ini... |
3597 3598 |
if (ei->i_extra_isize == 0) { /* The extra space is currently unused. Use it. */ |
617ba13b3 [PATCH] ext4: ren... |
3599 3600 |
ei->i_extra_isize = sizeof(struct ext4_inode) - EXT4_GOOD_OLD_INODE_SIZE; |
ac27a0ec1 [PATCH] ext4: ini... |
3601 3602 |
} else { __le32 *magic = (void *)raw_inode + |
617ba13b3 [PATCH] ext4: ren... |
3603 |
EXT4_GOOD_OLD_INODE_SIZE + |
ac27a0ec1 [PATCH] ext4: ini... |
3604 |
ei->i_extra_isize; |
617ba13b3 [PATCH] ext4: ren... |
3605 |
if (*magic == cpu_to_le32(EXT4_XATTR_MAGIC)) |
19f5fb7ad ext4: Use bitops ... |
3606 |
ext4_set_inode_state(inode, EXT4_STATE_XATTR); |
ac27a0ec1 [PATCH] ext4: ini... |
3607 3608 3609 |
} } else ei->i_extra_isize = 0; |
ef7f38359 ext4: Add nanosec... |
3610 3611 3612 3613 |
EXT4_INODE_GET_XTIME(i_ctime, inode, raw_inode); EXT4_INODE_GET_XTIME(i_mtime, inode, raw_inode); EXT4_INODE_GET_XTIME(i_atime, inode, raw_inode); EXT4_EINODE_GET_XTIME(i_crtime, ei, raw_inode); |
25ec56b51 ext4: Add inode v... |
3614 3615 3616 3617 3618 3619 |
inode->i_version = le32_to_cpu(raw_inode->i_disk_version); if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) { if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi)) inode->i_version |= (__u64)(le32_to_cpu(raw_inode->i_version_hi)) << 32; } |
c4b5a6143 ext4: Do not try ... |
3620 |
ret = 0; |
485c26ec7 ext4: Fix softloc... |
3621 |
if (ei->i_file_acl && |
1032988c7 ext4: fix block v... |
3622 |
!ext4_data_block_valid(EXT4_SB(sb), ei->i_file_acl, 1)) { |
24676da46 ext4: Convert cal... |
3623 3624 |
EXT4_ERROR_INODE(inode, "bad extended attribute block %llu", ei->i_file_acl); |
485c26ec7 ext4: Fix softloc... |
3625 3626 |
ret = -EIO; goto bad_inode; |
07a038245 ext4: Convert mor... |
3627 |
} else if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) { |
c4b5a6143 ext4: Do not try ... |
3628 3629 3630 3631 3632 |
if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || (S_ISLNK(inode->i_mode) && !ext4_inode_is_fast_symlink(inode))) /* Validate extent which is part of inode */ ret = ext4_ext_check_inode(inode); |
de9a55b84 ext4: Fix up whit... |
3633 |
} else if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || |
fe2c8191f ext4: add checks ... |
3634 3635 |
(S_ISLNK(inode->i_mode) && !ext4_inode_is_fast_symlink(inode))) { |
de9a55b84 ext4: Fix up whit... |
3636 |
/* Validate block references which are part of inode */ |
1f7d1e774 ext4: move __ext4... |
3637 |
ret = ext4_ind_check_inode(inode); |
fe2c8191f ext4: add checks ... |
3638 |
} |
567f3e9a7 ext4: plug a buff... |
3639 |
if (ret) |
de9a55b84 ext4: Fix up whit... |
3640 |
goto bad_inode; |
7a262f7c6 ext4: Validate ex... |
3641 |
|
ac27a0ec1 [PATCH] ext4: ini... |
3642 |
if (S_ISREG(inode->i_mode)) { |
617ba13b3 [PATCH] ext4: ren... |
3643 3644 3645 |
inode->i_op = &ext4_file_inode_operations; inode->i_fop = &ext4_file_operations; ext4_set_aops(inode); |
ac27a0ec1 [PATCH] ext4: ini... |
3646 |
} else if (S_ISDIR(inode->i_mode)) { |
617ba13b3 [PATCH] ext4: ren... |
3647 3648 |
inode->i_op = &ext4_dir_inode_operations; inode->i_fop = &ext4_dir_operations; |
ac27a0ec1 [PATCH] ext4: ini... |
3649 |
} else if (S_ISLNK(inode->i_mode)) { |
e83c1397c ext4: ensure fast... |
3650 |
if (ext4_inode_is_fast_symlink(inode)) { |
617ba13b3 [PATCH] ext4: ren... |
3651 |
inode->i_op = &ext4_fast_symlink_inode_operations; |
e83c1397c ext4: ensure fast... |
3652 3653 3654 |
nd_terminate_link(ei->i_data, inode->i_size, sizeof(ei->i_data) - 1); } else { |
617ba13b3 [PATCH] ext4: ren... |
3655 3656 |
inode->i_op = &ext4_symlink_inode_operations; ext4_set_aops(inode); |
ac27a0ec1 [PATCH] ext4: ini... |
3657 |
} |
563bdd61f ext4: Check for a... |
3658 3659 |
} else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) || S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) { |
617ba13b3 [PATCH] ext4: ren... |
3660 |
inode->i_op = &ext4_special_inode_operations; |
ac27a0ec1 [PATCH] ext4: ini... |
3661 3662 3663 3664 3665 3666 |
if (raw_inode->i_block[0]) init_special_inode(inode, inode->i_mode, old_decode_dev(le32_to_cpu(raw_inode->i_block[0]))); else init_special_inode(inode, inode->i_mode, new_decode_dev(le32_to_cpu(raw_inode->i_block[1]))); |
563bdd61f ext4: Check for a... |
3667 |
} else { |
563bdd61f ext4: Check for a... |
3668 |
ret = -EIO; |
24676da46 ext4: Convert cal... |
3669 |
EXT4_ERROR_INODE(inode, "bogus i_mode (%o)", inode->i_mode); |
563bdd61f ext4: Check for a... |
3670 |
goto bad_inode; |
ac27a0ec1 [PATCH] ext4: ini... |
3671 |
} |
af5bc92dd ext4: Fix whitesp... |
3672 |
brelse(iloc.bh); |
617ba13b3 [PATCH] ext4: ren... |
3673 |
ext4_set_inode_flags(inode); |
1d1fe1ee0 iget: stop EXT4 f... |
3674 3675 |
unlock_new_inode(inode); return inode; |
ac27a0ec1 [PATCH] ext4: ini... |
3676 3677 |
bad_inode: |
567f3e9a7 ext4: plug a buff... |
3678 |
brelse(iloc.bh); |
1d1fe1ee0 iget: stop EXT4 f... |
3679 3680 |
iget_failed(inode); return ERR_PTR(ret); |
ac27a0ec1 [PATCH] ext4: ini... |
3681 |
} |
0fc1b4514 ext4: Add support... |
3682 3683 3684 3685 3686 3687 3688 |
static int ext4_inode_blocks_set(handle_t *handle, struct ext4_inode *raw_inode, struct ext4_inode_info *ei) { struct inode *inode = &(ei->vfs_inode); u64 i_blocks = inode->i_blocks; struct super_block *sb = inode->i_sb; |
0fc1b4514 ext4: Add support... |
3689 3690 3691 3692 3693 3694 |
if (i_blocks <= ~0U) { /* * i_blocks can be represnted in a 32 bit variable * as multiple of 512 bytes */ |
8180a5627 ext4: Support lar... |
3695 |
raw_inode->i_blocks_lo = cpu_to_le32(i_blocks); |
0fc1b4514 ext4: Add support... |
3696 |
raw_inode->i_blocks_high = 0; |
84a8dce27 ext4: Fix remaini... |
3697 |
ext4_clear_inode_flag(inode, EXT4_INODE_HUGE_FILE); |
f287a1a56 ext4: Remove auto... |
3698 3699 3700 3701 3702 3703 |
return 0; } if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_HUGE_FILE)) return -EFBIG; if (i_blocks <= 0xffffffffffffULL) { |
0fc1b4514 ext4: Add support... |
3704 3705 3706 3707 |
/* * i_blocks can be represented in a 48 bit variable * as multiple of 512 bytes */ |
8180a5627 ext4: Support lar... |
3708 |
raw_inode->i_blocks_lo = cpu_to_le32(i_blocks); |
0fc1b4514 ext4: Add support... |
3709 |
raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32); |
84a8dce27 ext4: Fix remaini... |
3710 |
ext4_clear_inode_flag(inode, EXT4_INODE_HUGE_FILE); |
0fc1b4514 ext4: Add support... |
3711 |
} else { |
84a8dce27 ext4: Fix remaini... |
3712 |
ext4_set_inode_flag(inode, EXT4_INODE_HUGE_FILE); |
8180a5627 ext4: Support lar... |
3713 3714 3715 3716 |
/* i_block is stored in file system block size */ i_blocks = i_blocks >> (inode->i_blkbits - 9); raw_inode->i_blocks_lo = cpu_to_le32(i_blocks); raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32); |
0fc1b4514 ext4: Add support... |
3717 |
} |
f287a1a56 ext4: Remove auto... |
3718 |
return 0; |
0fc1b4514 ext4: Add support... |
3719 |
} |
ac27a0ec1 [PATCH] ext4: ini... |
3720 3721 3722 3723 3724 3725 3726 |
/* * Post the struct inode info into an on-disk inode location in the * buffer-cache. This gobbles the caller's reference to the * buffer_head in the inode location struct. * * The caller must have write access to iloc->bh. */ |
617ba13b3 [PATCH] ext4: ren... |
3727 |
static int ext4_do_update_inode(handle_t *handle, |
ac27a0ec1 [PATCH] ext4: ini... |
3728 |
struct inode *inode, |
830156c79 ext4: Avoid updat... |
3729 |
struct ext4_iloc *iloc) |
ac27a0ec1 [PATCH] ext4: ini... |
3730 |
{ |
617ba13b3 [PATCH] ext4: ren... |
3731 3732 |
struct ext4_inode *raw_inode = ext4_raw_inode(iloc); struct ext4_inode_info *ei = EXT4_I(inode); |
ac27a0ec1 [PATCH] ext4: ini... |
3733 3734 3735 3736 3737 |
struct buffer_head *bh = iloc->bh; int err = 0, rc, block; /* For fields not not tracking in the in-memory inode, * initialise them to zero for new inodes. */ |
19f5fb7ad ext4: Use bitops ... |
3738 |
if (ext4_test_inode_state(inode, EXT4_STATE_NEW)) |
617ba13b3 [PATCH] ext4: ren... |
3739 |
memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size); |
ac27a0ec1 [PATCH] ext4: ini... |
3740 |
|
ff9ddf7e8 ext4: copy i_flag... |
3741 |
ext4_get_inode_flags(ei); |
ac27a0ec1 [PATCH] ext4: ini... |
3742 |
raw_inode->i_mode = cpu_to_le16(inode->i_mode); |
af5bc92dd ext4: Fix whitesp... |
3743 |
if (!(test_opt(inode->i_sb, NO_UID32))) { |
ac27a0ec1 [PATCH] ext4: ini... |
3744 3745 3746 3747 3748 3749 |
raw_inode->i_uid_low = cpu_to_le16(low_16_bits(inode->i_uid)); raw_inode->i_gid_low = cpu_to_le16(low_16_bits(inode->i_gid)); /* * Fix up interoperability with old kernels. Otherwise, old inodes get * re-used with the upper 16 bits of the uid/gid intact */ |
af5bc92dd ext4: Fix whitesp... |
3750 |
if (!ei->i_dtime) { |
ac27a0ec1 [PATCH] ext4: ini... |
3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 |
raw_inode->i_uid_high = cpu_to_le16(high_16_bits(inode->i_uid)); raw_inode->i_gid_high = cpu_to_le16(high_16_bits(inode->i_gid)); } else { raw_inode->i_uid_high = 0; raw_inode->i_gid_high = 0; } } else { raw_inode->i_uid_low = cpu_to_le16(fs_high2lowuid(inode->i_uid)); raw_inode->i_gid_low = cpu_to_le16(fs_high2lowgid(inode->i_gid)); raw_inode->i_uid_high = 0; raw_inode->i_gid_high = 0; } raw_inode->i_links_count = cpu_to_le16(inode->i_nlink); |
ef7f38359 ext4: Add nanosec... |
3768 3769 3770 3771 3772 |
EXT4_INODE_SET_XTIME(i_ctime, inode, raw_inode); EXT4_INODE_SET_XTIME(i_mtime, inode, raw_inode); EXT4_INODE_SET_XTIME(i_atime, inode, raw_inode); EXT4_EINODE_SET_XTIME(i_crtime, ei, raw_inode); |
0fc1b4514 ext4: Add support... |
3773 3774 |
if (ext4_inode_blocks_set(handle, raw_inode, ei)) goto out_brelse; |
ac27a0ec1 [PATCH] ext4: ini... |
3775 |
raw_inode->i_dtime = cpu_to_le32(ei->i_dtime); |
353eb83c1 ext4: drop i_stat... |
3776 |
raw_inode->i_flags = cpu_to_le32(ei->i_flags & 0xFFFFFFFF); |
9b8f1f010 [PATCH] ext4: rem... |
3777 3778 |
if (EXT4_SB(inode->i_sb)->s_es->s_creator_os != cpu_to_le32(EXT4_OS_HURD)) |
a1ddeb7ea [PATCH] ext4: 48b... |
3779 3780 |
raw_inode->i_file_acl_high = cpu_to_le16(ei->i_file_acl >> 32); |
7973c0c19 ext4: Rename i_fi... |
3781 |
raw_inode->i_file_acl_lo = cpu_to_le32(ei->i_file_acl); |
a48380f76 ext4: Rename i_di... |
3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 |
ext4_isize_set(raw_inode, ei->i_disksize); if (ei->i_disksize > 0x7fffffffULL) { struct super_block *sb = inode->i_sb; if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_LARGE_FILE) || EXT4_SB(sb)->s_es->s_rev_level == cpu_to_le32(EXT4_GOOD_OLD_REV)) { /* If this is the first large file * created, add a flag to the superblock. */ err = ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh); if (err) goto out_brelse; ext4_update_dynamic_rev(sb); EXT4_SET_RO_COMPAT_FEATURE(sb, |
617ba13b3 [PATCH] ext4: ren... |
3798 |
EXT4_FEATURE_RO_COMPAT_LARGE_FILE); |
a48380f76 ext4: Rename i_di... |
3799 |
sb->s_dirt = 1; |
0390131ba ext4: Allow ext4 ... |
3800 |
ext4_handle_sync(handle); |
73b50c1c9 ext4: Fix BUG_ON ... |
3801 |
err = ext4_handle_dirty_metadata(handle, NULL, |
a48380f76 ext4: Rename i_di... |
3802 |
EXT4_SB(sb)->s_sbh); |
ac27a0ec1 [PATCH] ext4: ini... |
3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 |
} } raw_inode->i_generation = cpu_to_le32(inode->i_generation); if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) { if (old_valid_dev(inode->i_rdev)) { raw_inode->i_block[0] = cpu_to_le32(old_encode_dev(inode->i_rdev)); raw_inode->i_block[1] = 0; } else { raw_inode->i_block[0] = 0; raw_inode->i_block[1] = cpu_to_le32(new_encode_dev(inode->i_rdev)); raw_inode->i_block[2] = 0; } |
de9a55b84 ext4: Fix up whit... |
3817 3818 3819 |
} else for (block = 0; block < EXT4_N_BLOCKS; block++) raw_inode->i_block[block] = ei->i_data[block]; |
ac27a0ec1 [PATCH] ext4: ini... |
3820 |
|
25ec56b51 ext4: Add inode v... |
3821 3822 3823 3824 3825 |
raw_inode->i_disk_version = cpu_to_le32(inode->i_version); if (ei->i_extra_isize) { if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi)) raw_inode->i_version_hi = cpu_to_le32(inode->i_version >> 32); |
ac27a0ec1 [PATCH] ext4: ini... |
3826 |
raw_inode->i_extra_isize = cpu_to_le16(ei->i_extra_isize); |
25ec56b51 ext4: Add inode v... |
3827 |
} |
830156c79 ext4: Avoid updat... |
3828 |
BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); |
73b50c1c9 ext4: Fix BUG_ON ... |
3829 |
rc = ext4_handle_dirty_metadata(handle, NULL, bh); |
830156c79 ext4: Avoid updat... |
3830 3831 |
if (!err) err = rc; |
19f5fb7ad ext4: Use bitops ... |
3832 |
ext4_clear_inode_state(inode, EXT4_STATE_NEW); |
ac27a0ec1 [PATCH] ext4: ini... |
3833 |
|
b436b9bef ext4: Wait for pr... |
3834 |
ext4_update_inode_fsync_trans(handle, inode, 0); |
ac27a0ec1 [PATCH] ext4: ini... |
3835 |
out_brelse: |
af5bc92dd ext4: Fix whitesp... |
3836 |
brelse(bh); |
617ba13b3 [PATCH] ext4: ren... |
3837 |
ext4_std_error(inode->i_sb, err); |
ac27a0ec1 [PATCH] ext4: ini... |
3838 3839 3840 3841 |
return err; } /* |
617ba13b3 [PATCH] ext4: ren... |
3842 |
* ext4_write_inode() |
ac27a0ec1 [PATCH] ext4: ini... |
3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 |
* * We are called from a few places: * * - Within generic_file_write() for O_SYNC files. * Here, there will be no transaction running. We wait for any running * trasnaction to commit. * * - Within sys_sync(), kupdate and such. * We wait on commit, if tol to. * * - Within prune_icache() (PF_MEMALLOC == true) * Here we simply return. We can't afford to block kswapd on the * journal commit. * * In all cases it is actually safe for us to return without doing anything, * because the inode has been copied into a raw inode buffer in |
617ba13b3 [PATCH] ext4: ren... |
3859 |
* ext4_mark_inode_dirty(). This is a correctness thing for O_SYNC and for |
ac27a0ec1 [PATCH] ext4: ini... |
3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 |
* knfsd. * * Note that we are absolutely dependent upon all inode dirtiers doing the * right thing: they *must* call mark_inode_dirty() after dirtying info in * which we are interested. * * It would be a bug for them to not do this. The code: * * mark_inode_dirty(inode) * stuff(); * inode->i_size = expr; * * is in error because a kswapd-driven write_inode() could occur while * `stuff()' is running, and the new i_size will be lost. Plus the inode * will no longer be on the superblock's dirty inode list. */ |
a9185b41a pass writeback_co... |
3876 |
int ext4_write_inode(struct inode *inode, struct writeback_control *wbc) |
ac27a0ec1 [PATCH] ext4: ini... |
3877 |
{ |
91ac6f433 ext4: Make non-jo... |
3878 |
int err; |
ac27a0ec1 [PATCH] ext4: ini... |
3879 3880 |
if (current->flags & PF_MEMALLOC) return 0; |
91ac6f433 ext4: Make non-jo... |
3881 3882 3883 3884 3885 3886 3887 |
if (EXT4_SB(inode->i_sb)->s_journal) { if (ext4_journal_current_handle()) { jbd_debug(1, "called recursively, non-PF_MEMALLOC! "); dump_stack(); return -EIO; } |
ac27a0ec1 [PATCH] ext4: ini... |
3888 |
|
a9185b41a pass writeback_co... |
3889 |
if (wbc->sync_mode != WB_SYNC_ALL) |
91ac6f433 ext4: Make non-jo... |
3890 3891 3892 3893 3894 |
return 0; err = ext4_force_commit(inode->i_sb); } else { struct ext4_iloc iloc; |
ac27a0ec1 [PATCH] ext4: ini... |
3895 |
|
8b472d739 ext4: Fix possibl... |
3896 |
err = __ext4_get_inode_loc(inode, &iloc, 0); |
91ac6f433 ext4: Make non-jo... |
3897 3898 |
if (err) return err; |
a9185b41a pass writeback_co... |
3899 |
if (wbc->sync_mode == WB_SYNC_ALL) |
830156c79 ext4: Avoid updat... |
3900 3901 |
sync_dirty_buffer(iloc.bh); if (buffer_req(iloc.bh) && !buffer_uptodate(iloc.bh)) { |
c398eda0e ext4: Pass line n... |
3902 3903 |
EXT4_ERROR_INODE_BLOCK(inode, iloc.bh->b_blocknr, "IO error syncing inode"); |
830156c79 ext4: Avoid updat... |
3904 3905 |
err = -EIO; } |
fd2dd9fba ext4: Fix buffer ... |
3906 |
brelse(iloc.bh); |
91ac6f433 ext4: Make non-jo... |
3907 3908 |
} return err; |
ac27a0ec1 [PATCH] ext4: ini... |
3909 3910 3911 |
} /* |
617ba13b3 [PATCH] ext4: ren... |
3912 |
* ext4_setattr() |
ac27a0ec1 [PATCH] ext4: ini... |
3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 |
* * Called from notify_change. * * We want to trap VFS attempts to truncate the file as soon as * possible. In particular, we want to make sure that when the VFS * shrinks i_size, we put the inode on the orphan list and modify * i_disksize immediately, so that during the subsequent flushing of * dirty pages and freeing of disk blocks, we can guarantee that any * commit will leave the blocks being flushed in an unused state on * disk. (On recovery, the inode will get truncated and the blocks will * be freed, so we have a strong guarantee that no future commit will * leave these blocks visible to the user.) * |
678aaf481 ext4: Use new fra... |
3926 3927 3928 3929 3930 3931 3932 3933 |
* Another thing we have to assure is that if we are in ordered mode * and inode is still attached to the committing transaction, we must * we start writeout of all the dirty pages which are being truncated. * This way we are sure that all the data written in the previous * transaction are already on disk (truncate waits for pages under * writeback). * * Called with inode->i_mutex down. |
ac27a0ec1 [PATCH] ext4: ini... |
3934 |
*/ |
617ba13b3 [PATCH] ext4: ren... |
3935 |
int ext4_setattr(struct dentry *dentry, struct iattr *attr) |
ac27a0ec1 [PATCH] ext4: ini... |
3936 3937 3938 |
{ struct inode *inode = dentry->d_inode; int error, rc = 0; |
3d287de3b ext4: optimize or... |
3939 |
int orphan = 0; |
ac27a0ec1 [PATCH] ext4: ini... |
3940 3941 3942 3943 3944 |
const unsigned int ia_valid = attr->ia_valid; error = inode_change_ok(inode, attr); if (error) return error; |
12755627b quota: unify quot... |
3945 |
if (is_quota_modification(inode, attr)) |
871a29315 dquot: cleanup dq... |
3946 |
dquot_initialize(inode); |
ac27a0ec1 [PATCH] ext4: ini... |
3947 3948 3949 3950 3951 3952 |
if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) || (ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) { handle_t *handle; /* (user+group)*(old+new) structure, inode write (sb, * inode block, ? - but truncate inode update has it) */ |
5aca07eb7 ext4: quota macro... |
3953 |
handle = ext4_journal_start(inode, (EXT4_MAXQUOTAS_INIT_BLOCKS(inode->i_sb)+ |
194074aca ext4: fix incorre... |
3954 |
EXT4_MAXQUOTAS_DEL_BLOCKS(inode->i_sb))+3); |
ac27a0ec1 [PATCH] ext4: ini... |
3955 3956 3957 3958 |
if (IS_ERR(handle)) { error = PTR_ERR(handle); goto err_out; } |
b43fa8284 dquot: cleanup dq... |
3959 |
error = dquot_transfer(inode, attr); |
ac27a0ec1 [PATCH] ext4: ini... |
3960 |
if (error) { |
617ba13b3 [PATCH] ext4: ren... |
3961 |
ext4_journal_stop(handle); |
ac27a0ec1 [PATCH] ext4: ini... |
3962 3963 3964 3965 3966 3967 3968 3969 |
return error; } /* Update corresponding info in inode so that everything is in * one transaction */ if (attr->ia_valid & ATTR_UID) inode->i_uid = attr->ia_uid; if (attr->ia_valid & ATTR_GID) inode->i_gid = attr->ia_gid; |
617ba13b3 [PATCH] ext4: ren... |
3970 3971 |
error = ext4_mark_inode_dirty(handle, inode); ext4_journal_stop(handle); |
ac27a0ec1 [PATCH] ext4: ini... |
3972 |
} |
e2b465745 ext4: store maxby... |
3973 |
if (attr->ia_valid & ATTR_SIZE) { |
562c72aa5 fs: move inode_di... |
3974 |
inode_dio_wait(inode); |
12e9b8920 ext4: Use bitops ... |
3975 |
if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) { |
e2b465745 ext4: store maxby... |
3976 |
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); |
0c095c7f1 ext4: Don't error... |
3977 3978 |
if (attr->ia_size > sbi->s_bitmap_maxbytes) return -EFBIG; |
e2b465745 ext4: store maxby... |
3979 3980 |
} } |
ac27a0ec1 [PATCH] ext4: ini... |
3981 |
if (S_ISREG(inode->i_mode) && |
c8d46e41b ext4: Add flag to... |
3982 |
attr->ia_valid & ATTR_SIZE && |
072bd7ea7 ext4: use truncat... |
3983 |
(attr->ia_size < inode->i_size)) { |
ac27a0ec1 [PATCH] ext4: ini... |
3984 |
handle_t *handle; |
617ba13b3 [PATCH] ext4: ren... |
3985 |
handle = ext4_journal_start(inode, 3); |
ac27a0ec1 [PATCH] ext4: ini... |
3986 3987 3988 3989 |
if (IS_ERR(handle)) { error = PTR_ERR(handle); goto err_out; } |
3d287de3b ext4: optimize or... |
3990 3991 3992 3993 |
if (ext4_handle_valid(handle)) { error = ext4_orphan_add(handle, inode); orphan = 1; } |
617ba13b3 [PATCH] ext4: ren... |
3994 3995 |
EXT4_I(inode)->i_disksize = attr->ia_size; rc = ext4_mark_inode_dirty(handle, inode); |
ac27a0ec1 [PATCH] ext4: ini... |
3996 3997 |
if (!error) error = rc; |
617ba13b3 [PATCH] ext4: ren... |
3998 |
ext4_journal_stop(handle); |
678aaf481 ext4: Use new fra... |
3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 |
if (ext4_should_order_data(inode)) { error = ext4_begin_ordered_truncate(inode, attr->ia_size); if (error) { /* Do as much error cleanup as possible */ handle = ext4_journal_start(inode, 3); if (IS_ERR(handle)) { ext4_orphan_del(NULL, inode); goto err_out; } ext4_orphan_del(handle, inode); |
3d287de3b ext4: optimize or... |
4011 |
orphan = 0; |
678aaf481 ext4: Use new fra... |
4012 4013 4014 4015 |
ext4_journal_stop(handle); goto err_out; } } |
ac27a0ec1 [PATCH] ext4: ini... |
4016 |
} |
072bd7ea7 ext4: use truncat... |
4017 4018 4019 4020 4021 4022 4023 |
if (attr->ia_valid & ATTR_SIZE) { if (attr->ia_size != i_size_read(inode)) { truncate_setsize(inode, attr->ia_size); ext4_truncate(inode); } else if (ext4_test_inode_flag(inode, EXT4_INODE_EOFBLOCKS)) ext4_truncate(inode); } |
ac27a0ec1 [PATCH] ext4: ini... |
4024 |
|
1025774ce remove inode_setattr |
4025 4026 4027 4028 4029 4030 4031 4032 4033 |
if (!rc) { setattr_copy(inode, attr); mark_inode_dirty(inode); } /* * If the call to ext4_truncate failed to get a transaction handle at * all, we need to clean up the in-core orphan list manually. */ |
3d287de3b ext4: optimize or... |
4034 |
if (orphan && inode->i_nlink) |
617ba13b3 [PATCH] ext4: ren... |
4035 |
ext4_orphan_del(NULL, inode); |
ac27a0ec1 [PATCH] ext4: ini... |
4036 4037 |
if (!rc && (ia_valid & ATTR_MODE)) |
617ba13b3 [PATCH] ext4: ren... |
4038 |
rc = ext4_acl_chmod(inode); |
ac27a0ec1 [PATCH] ext4: ini... |
4039 4040 |
err_out: |
617ba13b3 [PATCH] ext4: ren... |
4041 |
ext4_std_error(inode->i_sb, error); |
ac27a0ec1 [PATCH] ext4: ini... |
4042 4043 4044 4045 |
if (!error) error = rc; return error; } |
3e3398a08 ext4: delayed all... |
4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 |
int ext4_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) { struct inode *inode; unsigned long delalloc_blocks; inode = dentry->d_inode; generic_fillattr(inode, stat); /* * We can't update i_blocks if the block allocation is delayed * otherwise in the case of system crash before the real block * allocation is done, we will have i_blocks inconsistent with * on-disk file blocks. * We always keep i_blocks updated together with real * allocation. But to not confuse with user, stat * will return the blocks that include the delayed allocation * blocks for this file. */ |
3e3398a08 ext4: delayed all... |
4065 |
delalloc_blocks = EXT4_I(inode)->i_reserved_data_blocks; |
3e3398a08 ext4: delayed all... |
4066 4067 4068 4069 |
stat->blocks += (delalloc_blocks << inode->i_sb->s_blocksize_bits)>>9; return 0; } |
ac27a0ec1 [PATCH] ext4: ini... |
4070 |
|
a02908f19 ext4: journal cre... |
4071 4072 |
static int ext4_index_trans_blocks(struct inode *inode, int nrblocks, int chunk) { |
12e9b8920 ext4: Use bitops ... |
4073 |
if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) |
8bb2b2471 ext4: rename ext4... |
4074 |
return ext4_ind_trans_blocks(inode, nrblocks, chunk); |
ac51d8370 ext4: calculate j... |
4075 |
return ext4_ext_index_trans_blocks(inode, nrblocks, chunk); |
a02908f19 ext4: journal cre... |
4076 |
} |
ac51d8370 ext4: calculate j... |
4077 |
|
ac27a0ec1 [PATCH] ext4: ini... |
4078 |
/* |
a02908f19 ext4: journal cre... |
4079 4080 4081 |
* Account for index blocks, block groups bitmaps and block group * descriptor blocks if modify datablocks and index blocks * worse case, the indexs blocks spread over different block groups |
ac27a0ec1 [PATCH] ext4: ini... |
4082 |
* |
a02908f19 ext4: journal cre... |
4083 |
* If datablocks are discontiguous, they are possible to spread over |
af901ca18 tree-wide: fix as... |
4084 |
* different block groups too. If they are contiuguous, with flexbg, |
a02908f19 ext4: journal cre... |
4085 |
* they could still across block group boundary. |
ac27a0ec1 [PATCH] ext4: ini... |
4086 |
* |
a02908f19 ext4: journal cre... |
4087 4088 |
* Also account for superblock, inode, quota and xattr blocks */ |
1f109d5a1 ext4: make variou... |
4089 |
static int ext4_meta_trans_blocks(struct inode *inode, int nrblocks, int chunk) |
a02908f19 ext4: journal cre... |
4090 |
{ |
8df9675f8 ext4: Avoid races... |
4091 4092 |
ext4_group_t groups, ngroups = ext4_get_groups_count(inode->i_sb); int gdpblocks; |
a02908f19 ext4: journal cre... |
4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 |
int idxblocks; int ret = 0; /* * How many index blocks need to touch to modify nrblocks? * The "Chunk" flag indicating whether the nrblocks is * physically contiguous on disk * * For Direct IO and fallocate, they calls get_block to allocate * one single extent at a time, so they could set the "Chunk" flag */ idxblocks = ext4_index_trans_blocks(inode, nrblocks, chunk); ret = idxblocks; /* * Now let's see how many group bitmaps and group descriptors need * to account */ groups = idxblocks; if (chunk) groups += 1; else groups += nrblocks; gdpblocks = groups; |
8df9675f8 ext4: Avoid races... |
4119 4120 |
if (groups > ngroups) groups = ngroups; |
a02908f19 ext4: journal cre... |
4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 |
if (groups > EXT4_SB(inode->i_sb)->s_gdb_count) gdpblocks = EXT4_SB(inode->i_sb)->s_gdb_count; /* bitmaps and block group descriptor blocks */ ret += groups + gdpblocks; /* Blocks for super block, inode, quota and xattr blocks */ ret += EXT4_META_TRANS_BLOCKS(inode->i_sb); return ret; } /* |
25985edce Fix common misspe... |
4134 |
* Calculate the total number of credits to reserve to fit |
f3bd1f3fa ext4: journal cre... |
4135 4136 |
* the modification of a single pages into a single transaction, * which may include multiple chunks of block allocations. |
ac27a0ec1 [PATCH] ext4: ini... |
4137 |
* |
525f4ed8d ext4: journal cre... |
4138 |
* This could be called via ext4_write_begin() |
ac27a0ec1 [PATCH] ext4: ini... |
4139 |
* |
525f4ed8d ext4: journal cre... |
4140 |
* We need to consider the worse case, when |
a02908f19 ext4: journal cre... |
4141 |
* one new block per extent. |
ac27a0ec1 [PATCH] ext4: ini... |
4142 |
*/ |
a86c61812 [PATCH] ext3: add... |
4143 |
int ext4_writepage_trans_blocks(struct inode *inode) |
ac27a0ec1 [PATCH] ext4: ini... |
4144 |
{ |
617ba13b3 [PATCH] ext4: ren... |
4145 |
int bpp = ext4_journal_blocks_per_page(inode); |
ac27a0ec1 [PATCH] ext4: ini... |
4146 |
int ret; |
a02908f19 ext4: journal cre... |
4147 |
ret = ext4_meta_trans_blocks(inode, bpp, 0); |
a86c61812 [PATCH] ext3: add... |
4148 |
|
a02908f19 ext4: journal cre... |
4149 |
/* Account for data blocks for journalled mode */ |
617ba13b3 [PATCH] ext4: ren... |
4150 |
if (ext4_should_journal_data(inode)) |
a02908f19 ext4: journal cre... |
4151 |
ret += bpp; |
ac27a0ec1 [PATCH] ext4: ini... |
4152 4153 |
return ret; } |
f3bd1f3fa ext4: journal cre... |
4154 4155 4156 4157 4158 |
/* * Calculate the journal credits for a chunk of data modification. * * This is called from DIO, fallocate or whoever calling |
79e830367 ext4: fix ext4_ge... |
4159 |
* ext4_map_blocks() to map/allocate a chunk of contiguous disk blocks. |
f3bd1f3fa ext4: journal cre... |
4160 4161 4162 4163 4164 4165 4166 4167 |
* * journal buffers for data blocks are not included here, as DIO * and fallocate do no need to journal data buffers. */ int ext4_chunk_trans_blocks(struct inode *inode, int nrblocks) { return ext4_meta_trans_blocks(inode, nrblocks, 1); } |
ac27a0ec1 [PATCH] ext4: ini... |
4168 |
/* |
617ba13b3 [PATCH] ext4: ren... |
4169 |
* The caller must have previously called ext4_reserve_inode_write(). |
ac27a0ec1 [PATCH] ext4: ini... |
4170 4171 |
* Give this, we know that the caller already has write access to iloc->bh. */ |
617ba13b3 [PATCH] ext4: ren... |
4172 |
int ext4_mark_iloc_dirty(handle_t *handle, |
de9a55b84 ext4: Fix up whit... |
4173 |
struct inode *inode, struct ext4_iloc *iloc) |
ac27a0ec1 [PATCH] ext4: ini... |
4174 4175 |
{ int err = 0; |
25ec56b51 ext4: Add inode v... |
4176 4177 |
if (test_opt(inode->i_sb, I_VERSION)) inode_inc_iversion(inode); |
ac27a0ec1 [PATCH] ext4: ini... |
4178 4179 |
/* the do_update_inode consumes one bh->b_count */ get_bh(iloc->bh); |
dab291af8 [PATCH] jbd2: ena... |
4180 |
/* ext4_do_update_inode() does jbd2_journal_dirty_metadata */ |
830156c79 ext4: Avoid updat... |
4181 |
err = ext4_do_update_inode(handle, inode, iloc); |
ac27a0ec1 [PATCH] ext4: ini... |
4182 4183 4184 4185 4186 4187 4188 4189 4190 4191 |
put_bh(iloc->bh); return err; } /* * On success, We end up with an outstanding reference count against * iloc->bh. This _must_ be cleaned up later. */ int |
617ba13b3 [PATCH] ext4: ren... |
4192 4193 |
ext4_reserve_inode_write(handle_t *handle, struct inode *inode, struct ext4_iloc *iloc) |
ac27a0ec1 [PATCH] ext4: ini... |
4194 |
{ |
0390131ba ext4: Allow ext4 ... |
4195 4196 4197 4198 4199 4200 4201 4202 4203 |
int err; err = ext4_get_inode_loc(inode, iloc); if (!err) { BUFFER_TRACE(iloc->bh, "get_write_access"); err = ext4_journal_get_write_access(handle, iloc->bh); if (err) { brelse(iloc->bh); iloc->bh = NULL; |
ac27a0ec1 [PATCH] ext4: ini... |
4204 4205 |
} } |
617ba13b3 [PATCH] ext4: ren... |
4206 |
ext4_std_error(inode->i_sb, err); |
ac27a0ec1 [PATCH] ext4: ini... |
4207 4208 4209 4210 |
return err; } /* |
6dd4ee7ca ext4: Expand extr... |
4211 4212 4213 |
* Expand an inode by new_extra_isize bytes. * Returns 0 on success or negative error number on failure. */ |
1d03ec984 ext4: Fix sparse... |
4214 4215 4216 4217 |
static int ext4_expand_extra_isize(struct inode *inode, unsigned int new_extra_isize, struct ext4_iloc iloc, handle_t *handle) |
6dd4ee7ca ext4: Expand extr... |
4218 4219 4220 |
{ struct ext4_inode *raw_inode; struct ext4_xattr_ibody_header *header; |
6dd4ee7ca ext4: Expand extr... |
4221 4222 4223 4224 4225 4226 4227 |
if (EXT4_I(inode)->i_extra_isize >= new_extra_isize) return 0; raw_inode = ext4_raw_inode(&iloc); header = IHDR(inode, raw_inode); |
6dd4ee7ca ext4: Expand extr... |
4228 4229 |
/* No extended attributes present */ |
19f5fb7ad ext4: Use bitops ... |
4230 4231 |
if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR) || header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)) { |
6dd4ee7ca ext4: Expand extr... |
4232 4233 4234 4235 4236 4237 4238 4239 4240 4241 4242 4243 |
memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE, 0, new_extra_isize); EXT4_I(inode)->i_extra_isize = new_extra_isize; return 0; } /* try to expand with EAs present */ return ext4_expand_extra_isize_ea(inode, new_extra_isize, raw_inode, handle); } /* |
ac27a0ec1 [PATCH] ext4: ini... |
4244 4245 4246 4247 4248 4249 4250 4251 4252 4253 4254 4255 4256 4257 4258 4259 4260 4261 4262 4263 |
* What we do here is to mark the in-core inode as clean with respect to inode * dirtiness (it may still be data-dirty). * This means that the in-core inode may be reaped by prune_icache * without having to perform any I/O. This is a very good thing, * because *any* task may call prune_icache - even ones which * have a transaction open against a different journal. * * Is this cheating? Not really. Sure, we haven't written the * inode out, but prune_icache isn't a user-visible syncing function. * Whenever the user wants stuff synced (sys_sync, sys_msync, sys_fsync) * we start and wait on commits. * * Is this efficient/effective? Well, we're being nice to the system * by cleaning up our inodes proactively so they can be reaped * without I/O. But we are potentially leaving up to five seconds' * worth of inodes floating about which prune_icache wants us to * write out. One way to fix that would be to get prune_icache() * to do a write_super() to free up some memory. It has the desired * effect. */ |
617ba13b3 [PATCH] ext4: ren... |
4264 |
int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode) |
ac27a0ec1 [PATCH] ext4: ini... |
4265 |
{ |
617ba13b3 [PATCH] ext4: ren... |
4266 |
struct ext4_iloc iloc; |
6dd4ee7ca ext4: Expand extr... |
4267 4268 4269 |
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); static unsigned int mnt_count; int err, ret; |
ac27a0ec1 [PATCH] ext4: ini... |
4270 4271 |
might_sleep(); |
7ff9c073d ext4: Add new ext... |
4272 |
trace_ext4_mark_inode_dirty(inode, _RET_IP_); |
617ba13b3 [PATCH] ext4: ren... |
4273 |
err = ext4_reserve_inode_write(handle, inode, &iloc); |
0390131ba ext4: Allow ext4 ... |
4274 4275 |
if (ext4_handle_valid(handle) && EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize && |
19f5fb7ad ext4: Use bitops ... |
4276 |
!ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND)) { |
6dd4ee7ca ext4: Expand extr... |
4277 4278 4279 4280 4281 4282 4283 4284 4285 4286 4287 4288 4289 |
/* * We need extra buffer credits since we may write into EA block * with this same handle. If journal_extend fails, then it will * only result in a minor loss of functionality for that inode. * If this is felt to be critical, then e2fsck should be run to * force a large enough s_min_extra_isize. */ if ((jbd2_journal_extend(handle, EXT4_DATA_TRANS_BLOCKS(inode->i_sb))) == 0) { ret = ext4_expand_extra_isize(inode, sbi->s_want_extra_isize, iloc, handle); if (ret) { |
19f5fb7ad ext4: Use bitops ... |
4290 4291 |
ext4_set_inode_state(inode, EXT4_STATE_NO_EXPAND); |
c1bddad94 ext4: Fix sparse ... |
4292 4293 |
if (mnt_count != le16_to_cpu(sbi->s_es->s_mnt_count)) { |
12062dddd ext4: move __func... |
4294 |
ext4_warning(inode->i_sb, |
6dd4ee7ca ext4: Expand extr... |
4295 4296 4297 |
"Unable to expand inode %lu. Delete" " some EAs or run e2fsck.", inode->i_ino); |
c1bddad94 ext4: Fix sparse ... |
4298 4299 |
mnt_count = le16_to_cpu(sbi->s_es->s_mnt_count); |
6dd4ee7ca ext4: Expand extr... |
4300 4301 4302 4303 |
} } } } |
ac27a0ec1 [PATCH] ext4: ini... |
4304 |
if (!err) |
617ba13b3 [PATCH] ext4: ren... |
4305 |
err = ext4_mark_iloc_dirty(handle, inode, &iloc); |
ac27a0ec1 [PATCH] ext4: ini... |
4306 4307 4308 4309 |
return err; } /* |
617ba13b3 [PATCH] ext4: ren... |
4310 |
* ext4_dirty_inode() is called from __mark_inode_dirty() |
ac27a0ec1 [PATCH] ext4: ini... |
4311 4312 4313 4314 4315 |
* * We're really interested in the case where a file is being extended. * i_size has been changed by generic_commit_write() and we thus need * to include the updated inode in the current transaction. * |
5dd4056db dquot: cleanup sp... |
4316 |
* Also, dquot_alloc_block() will always dirty the inode when blocks |
ac27a0ec1 [PATCH] ext4: ini... |
4317 4318 4319 4320 4321 4322 |
* are allocated to the file. * * If the inode is marked synchronous, we don't honour that here - doing * so would cause a commit on atime updates, which we don't bother doing. * We handle synchronous inodes at the highest possible level. */ |
aa3857295 fs: pass exact ty... |
4323 |
void ext4_dirty_inode(struct inode *inode, int flags) |
ac27a0ec1 [PATCH] ext4: ini... |
4324 |
{ |
ac27a0ec1 [PATCH] ext4: ini... |
4325 |
handle_t *handle; |
617ba13b3 [PATCH] ext4: ren... |
4326 |
handle = ext4_journal_start(inode, 2); |
ac27a0ec1 [PATCH] ext4: ini... |
4327 4328 |
if (IS_ERR(handle)) goto out; |
f3dc272fd ext4: Make sure e... |
4329 |
|
f3dc272fd ext4: Make sure e... |
4330 |
ext4_mark_inode_dirty(handle, inode); |
617ba13b3 [PATCH] ext4: ren... |
4331 |
ext4_journal_stop(handle); |
ac27a0ec1 [PATCH] ext4: ini... |
4332 4333 4334 4335 4336 4337 4338 4339 |
out: return; } #if 0 /* * Bind an inode's backing buffer_head into this transaction, to prevent * it from being flushed to disk early. Unlike |
617ba13b3 [PATCH] ext4: ren... |
4340 |
* ext4_reserve_inode_write, this leaves behind no bh reference and |
ac27a0ec1 [PATCH] ext4: ini... |
4341 4342 4343 |
* returns no iloc structure, so the caller needs to repeat the iloc * lookup to mark the inode dirty later. */ |
617ba13b3 [PATCH] ext4: ren... |
4344 |
static int ext4_pin_inode(handle_t *handle, struct inode *inode) |
ac27a0ec1 [PATCH] ext4: ini... |
4345 |
{ |
617ba13b3 [PATCH] ext4: ren... |
4346 |
struct ext4_iloc iloc; |
ac27a0ec1 [PATCH] ext4: ini... |
4347 4348 4349 |
int err = 0; if (handle) { |
617ba13b3 [PATCH] ext4: ren... |
4350 |
err = ext4_get_inode_loc(inode, &iloc); |
ac27a0ec1 [PATCH] ext4: ini... |
4351 4352 |
if (!err) { BUFFER_TRACE(iloc.bh, "get_write_access"); |
dab291af8 [PATCH] jbd2: ena... |
4353 |
err = jbd2_journal_get_write_access(handle, iloc.bh); |
ac27a0ec1 [PATCH] ext4: ini... |
4354 |
if (!err) |
0390131ba ext4: Allow ext4 ... |
4355 |
err = ext4_handle_dirty_metadata(handle, |
73b50c1c9 ext4: Fix BUG_ON ... |
4356 |
NULL, |
0390131ba ext4: Allow ext4 ... |
4357 |
iloc.bh); |
ac27a0ec1 [PATCH] ext4: ini... |
4358 4359 4360 |
brelse(iloc.bh); } } |
617ba13b3 [PATCH] ext4: ren... |
4361 |
ext4_std_error(inode->i_sb, err); |
ac27a0ec1 [PATCH] ext4: ini... |
4362 4363 4364 |
return err; } #endif |
617ba13b3 [PATCH] ext4: ren... |
4365 |
int ext4_change_inode_journal_flag(struct inode *inode, int val) |
ac27a0ec1 [PATCH] ext4: ini... |
4366 4367 4368 4369 4370 4371 4372 4373 4374 4375 4376 4377 4378 4379 |
{ journal_t *journal; handle_t *handle; int err; /* * We have to be very careful here: changing a data block's * journaling status dynamically is dangerous. If we write a * data block to the journal, change the status and then delete * that block, we risk forgetting to revoke the old log record * from the journal and so a subsequent replay can corrupt data. * So, first we make sure that the journal is empty and that * nobody is changing anything. */ |
617ba13b3 [PATCH] ext4: ren... |
4380 |
journal = EXT4_JOURNAL(inode); |
0390131ba ext4: Allow ext4 ... |
4381 4382 |
if (!journal) return 0; |
d699594dc ext4: remove extr... |
4383 |
if (is_journal_aborted(journal)) |
ac27a0ec1 [PATCH] ext4: ini... |
4384 |
return -EROFS; |
2aff57b0c ext4: allocate de... |
4385 4386 4387 4388 4389 4390 4391 4392 4393 4394 4395 |
/* We have to allocate physical blocks for delalloc blocks * before flushing journal. otherwise delalloc blocks can not * be allocated any more. even more truncate on delalloc blocks * could trigger BUG by flushing delalloc blocks in journal. * There is no delalloc block in non-journal data mode. */ if (val && test_opt(inode->i_sb, DELALLOC)) { err = ext4_alloc_da_blocks(inode); if (err < 0) return err; } |
ac27a0ec1 [PATCH] ext4: ini... |
4396 |
|
dab291af8 [PATCH] jbd2: ena... |
4397 |
jbd2_journal_lock_updates(journal); |
ac27a0ec1 [PATCH] ext4: ini... |
4398 4399 4400 4401 4402 4403 4404 4405 4406 4407 |
/* * OK, there are no updates running now, and all cached data is * synced to disk. We are now in a completely consistent state * which doesn't have anything in the journal, and we know that * no filesystem updates are running, so it is safe to modify * the inode's in-core data-journaling state flag now. */ if (val) |
12e9b8920 ext4: Use bitops ... |
4408 |
ext4_set_inode_flag(inode, EXT4_INODE_JOURNAL_DATA); |
5872ddaaf ext4: flush journ... |
4409 4410 |
else { jbd2_journal_flush(journal); |
12e9b8920 ext4: Use bitops ... |
4411 |
ext4_clear_inode_flag(inode, EXT4_INODE_JOURNAL_DATA); |
5872ddaaf ext4: flush journ... |
4412 |
} |
617ba13b3 [PATCH] ext4: ren... |
4413 |
ext4_set_aops(inode); |
ac27a0ec1 [PATCH] ext4: ini... |
4414 |
|
dab291af8 [PATCH] jbd2: ena... |
4415 |
jbd2_journal_unlock_updates(journal); |
ac27a0ec1 [PATCH] ext4: ini... |
4416 4417 |
/* Finally we can mark the inode as dirty. */ |
617ba13b3 [PATCH] ext4: ren... |
4418 |
handle = ext4_journal_start(inode, 1); |
ac27a0ec1 [PATCH] ext4: ini... |
4419 4420 |
if (IS_ERR(handle)) return PTR_ERR(handle); |
617ba13b3 [PATCH] ext4: ren... |
4421 |
err = ext4_mark_inode_dirty(handle, inode); |
0390131ba ext4: Allow ext4 ... |
4422 |
ext4_handle_sync(handle); |
617ba13b3 [PATCH] ext4: ren... |
4423 4424 |
ext4_journal_stop(handle); ext4_std_error(inode->i_sb, err); |
ac27a0ec1 [PATCH] ext4: ini... |
4425 4426 4427 |
return err; } |
2e9ee8503 ext4: Use page_mk... |
4428 4429 4430 4431 4432 |
static int ext4_bh_unmapped(handle_t *handle, struct buffer_head *bh) { return !buffer_mapped(bh); } |
c2ec175c3 mm: page_mkwrite ... |
4433 |
int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) |
2e9ee8503 ext4: Use page_mk... |
4434 |
{ |
c2ec175c3 mm: page_mkwrite ... |
4435 |
struct page *page = vmf->page; |
2e9ee8503 ext4: Use page_mk... |
4436 4437 |
loff_t size; unsigned long len; |
9ea7df534 ext4: Rewrite ext... |
4438 |
int ret; |
2e9ee8503 ext4: Use page_mk... |
4439 4440 4441 |
struct file *file = vma->vm_file; struct inode *inode = file->f_path.dentry->d_inode; struct address_space *mapping = inode->i_mapping; |
9ea7df534 ext4: Rewrite ext... |
4442 4443 4444 |
handle_t *handle; get_block_t *get_block; int retries = 0; |
2e9ee8503 ext4: Use page_mk... |
4445 4446 |
/* |
9ea7df534 ext4: Rewrite ext... |
4447 4448 |
* This check is racy but catches the common case. We rely on * __block_page_mkwrite() to do a reliable check. |
2e9ee8503 ext4: Use page_mk... |
4449 |
*/ |
9ea7df534 ext4: Rewrite ext... |
4450 4451 4452 4453 4454 4455 4456 4457 4458 4459 4460 |
vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE); /* Delalloc case is easy... */ if (test_opt(inode->i_sb, DELALLOC) && !ext4_should_journal_data(inode) && !ext4_nonda_switch(inode->i_sb)) { do { ret = __block_page_mkwrite(vma, vmf, ext4_da_get_block_prep); } while (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)); goto out_ret; |
2e9ee8503 ext4: Use page_mk... |
4461 |
} |
0e499890c ext4: wait for wr... |
4462 4463 |
lock_page(page); |
9ea7df534 ext4: Rewrite ext... |
4464 4465 4466 4467 4468 4469 |
size = i_size_read(inode); /* Page got truncated from under us? */ if (page->mapping != mapping || page_offset(page) > size) { unlock_page(page); ret = VM_FAULT_NOPAGE; goto out; |
0e499890c ext4: wait for wr... |
4470 |
} |
2e9ee8503 ext4: Use page_mk... |
4471 4472 4473 4474 4475 |
if (page->index == size >> PAGE_CACHE_SHIFT) len = size & ~PAGE_CACHE_MASK; else len = PAGE_CACHE_SIZE; |
a827eafff ext4: Take page l... |
4476 |
/* |
9ea7df534 ext4: Rewrite ext... |
4477 4478 |
* Return if we have all the buffers mapped. This avoids the need to do * journal_start/journal_stop which can block and take a long time |
a827eafff ext4: Take page l... |
4479 |
*/ |
2e9ee8503 ext4: Use page_mk... |
4480 |
if (page_has_buffers(page)) { |
2e9ee8503 ext4: Use page_mk... |
4481 |
if (!walk_page_buffers(NULL, page_buffers(page), 0, len, NULL, |
a827eafff ext4: Take page l... |
4482 |
ext4_bh_unmapped)) { |
9ea7df534 ext4: Rewrite ext... |
4483 4484 4485 4486 |
/* Wait so that we don't change page under IO */ wait_on_page_writeback(page); ret = VM_FAULT_LOCKED; goto out; |
a827eafff ext4: Take page l... |
4487 |
} |
2e9ee8503 ext4: Use page_mk... |
4488 |
} |
a827eafff ext4: Take page l... |
4489 |
unlock_page(page); |
9ea7df534 ext4: Rewrite ext... |
4490 4491 4492 4493 4494 4495 4496 4497 |
/* OK, we need to fill the hole... */ if (ext4_should_dioread_nolock(inode)) get_block = ext4_get_block_write; else get_block = ext4_get_block; retry_alloc: handle = ext4_journal_start(inode, ext4_writepage_trans_blocks(inode)); if (IS_ERR(handle)) { |
c2ec175c3 mm: page_mkwrite ... |
4498 |
ret = VM_FAULT_SIGBUS; |
9ea7df534 ext4: Rewrite ext... |
4499 4500 4501 4502 4503 4504 4505 4506 |
goto out; } ret = __block_page_mkwrite(vma, vmf, get_block); if (!ret && ext4_should_journal_data(inode)) { if (walk_page_buffers(handle, page_buffers(page), 0, PAGE_CACHE_SIZE, NULL, do_journal_get_write_access)) { unlock_page(page); ret = VM_FAULT_SIGBUS; |
fcbb55158 ext4: let ext4_pa... |
4507 |
ext4_journal_stop(handle); |
9ea7df534 ext4: Rewrite ext... |
4508 4509 4510 4511 4512 4513 4514 4515 4516 4517 |
goto out; } ext4_set_inode_state(inode, EXT4_STATE_JDATA); } ext4_journal_stop(handle); if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) goto retry_alloc; out_ret: ret = block_page_mkwrite_return(ret); out: |
2e9ee8503 ext4: Use page_mk... |
4518 4519 |
return ret; } |