Blame view
fs/ext4/migrate.c
14.8 KB
c14c6fd5c
|
1 2 3 4 5 6 7 8 9 10 11 12 13 |
/* * Copyright IBM Corporation, 2007 * Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2.1 of the GNU Lesser General Public License * as published by the Free Software Foundation. * * This program is distributed in the hope that it would be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * */ |
5a0e3ad6a
|
14 |
#include <linux/slab.h> |
3dcf54515
|
15 |
#include "ext4_jbd2.h" |
c14c6fd5c
|
16 17 18 19 20 |
/* * The contiguous blocks details which can be * represented by a single extent */ |
fba90ffee
|
21 22 |
struct migrate_struct { ext4_lblk_t first_block, last_block, curr_block; |
c14c6fd5c
|
23 24 25 26 |
ext4_fsblk_t first_pblock, last_pblock; }; static int finish_range(handle_t *handle, struct inode *inode, |
fba90ffee
|
27 |
struct migrate_struct *lb) |
c14c6fd5c
|
28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 |
{ int retval = 0, needed; struct ext4_extent newext; struct ext4_ext_path *path; if (lb->first_pblock == 0) return 0; /* Add the extent to temp inode*/ newext.ee_block = cpu_to_le32(lb->first_block); newext.ee_len = cpu_to_le16(lb->last_block - lb->first_block + 1); ext4_ext_store_pblock(&newext, lb->first_pblock); path = ext4_ext_find_extent(inode, lb->first_block, NULL); if (IS_ERR(path)) { retval = PTR_ERR(path); |
b35905c16
|
44 |
path = NULL; |
c14c6fd5c
|
45 46 47 48 49 50 51 52 53 |
goto err_out; } /* * Calculate the credit needed to inserting this extent * Since we are doing this in loop we may accumalate extra * credit. But below we try to not accumalate too much * of them by restarting the journal. */ |
ee12b6306
|
54 55 |
needed = ext4_ext_calc_credits_for_single_extent(inode, lb->last_block - lb->first_block + 1, path); |
c14c6fd5c
|
56 57 58 59 |
/* * Make sure the credit we accumalated is not really high */ |
0390131ba
|
60 61 |
if (needed && ext4_handle_has_enough_credits(handle, EXT4_RESERVE_TRANS_BLOCKS)) { |
c14c6fd5c
|
62 63 64 |
retval = ext4_journal_restart(handle, needed); if (retval) goto err_out; |
8009f9fb3
|
65 |
} else if (needed) { |
c14c6fd5c
|
66 |
retval = ext4_journal_extend(handle, needed); |
8009f9fb3
|
67 |
if (retval) { |
c14c6fd5c
|
68 69 70 71 72 73 74 75 |
/* * IF not able to extend the journal restart the journal */ retval = ext4_journal_restart(handle, needed); if (retval) goto err_out; } } |
0031462b5
|
76 |
retval = ext4_ext_insert_extent(handle, inode, path, &newext, 0); |
c14c6fd5c
|
77 |
err_out: |
b35905c16
|
78 79 80 81 |
if (path) { ext4_ext_drop_refs(path); kfree(path); } |
c14c6fd5c
|
82 83 84 85 86 |
lb->first_pblock = 0; return retval; } static int update_extent_range(handle_t *handle, struct inode *inode, |
fba90ffee
|
87 |
ext4_fsblk_t pblock, struct migrate_struct *lb) |
c14c6fd5c
|
88 89 90 91 92 93 94 |
{ int retval; /* * See if we can add on to the existing range (if it exists) */ if (lb->first_pblock && (lb->last_pblock+1 == pblock) && |
fba90ffee
|
95 |
(lb->last_block+1 == lb->curr_block)) { |
c14c6fd5c
|
96 |
lb->last_pblock = pblock; |
fba90ffee
|
97 98 |
lb->last_block = lb->curr_block; lb->curr_block++; |
c14c6fd5c
|
99 100 101 102 103 104 105 |
return 0; } /* * Start a new range. */ retval = finish_range(handle, inode, lb); lb->first_pblock = lb->last_pblock = pblock; |
fba90ffee
|
106 107 |
lb->first_block = lb->last_block = lb->curr_block; lb->curr_block++; |
c14c6fd5c
|
108 109 110 111 |
return retval; } static int update_ind_extent_range(handle_t *handle, struct inode *inode, |
fba90ffee
|
112 113 |
ext4_fsblk_t pblock, struct migrate_struct *lb) |
c14c6fd5c
|
114 115 116 117 |
{ struct buffer_head *bh; __le32 *i_data; int i, retval = 0; |
c14c6fd5c
|
118 |
unsigned long max_entries = inode->i_sb->s_blocksize >> 2; |
c14c6fd5c
|
119 120 121 122 123 |
bh = sb_bread(inode->i_sb, pblock); if (!bh) return -EIO; i_data = (__le32 *)bh->b_data; |
fba90ffee
|
124 |
for (i = 0; i < max_entries; i++) { |
c14c6fd5c
|
125 126 |
if (i_data[i]) { retval = update_extent_range(handle, inode, |
fba90ffee
|
127 |
le32_to_cpu(i_data[i]), lb); |
c14c6fd5c
|
128 129 |
if (retval) break; |
fba90ffee
|
130 131 |
} else { lb->curr_block++; |
c14c6fd5c
|
132 133 |
} } |
c14c6fd5c
|
134 135 136 137 138 139 |
put_bh(bh); return retval; } static int update_dind_extent_range(handle_t *handle, struct inode *inode, |
fba90ffee
|
140 141 |
ext4_fsblk_t pblock, struct migrate_struct *lb) |
c14c6fd5c
|
142 143 144 145 |
{ struct buffer_head *bh; __le32 *i_data; int i, retval = 0; |
c14c6fd5c
|
146 |
unsigned long max_entries = inode->i_sb->s_blocksize >> 2; |
c14c6fd5c
|
147 148 149 150 151 152 153 154 |
bh = sb_bread(inode->i_sb, pblock); if (!bh) return -EIO; i_data = (__le32 *)bh->b_data; for (i = 0; i < max_entries; i++) { if (i_data[i]) { retval = update_ind_extent_range(handle, inode, |
fba90ffee
|
155 |
le32_to_cpu(i_data[i]), lb); |
c14c6fd5c
|
156 157 158 159 |
if (retval) break; } else { /* Only update the file block number */ |
fba90ffee
|
160 |
lb->curr_block += max_entries; |
c14c6fd5c
|
161 162 |
} } |
c14c6fd5c
|
163 164 165 166 167 168 |
put_bh(bh); return retval; } static int update_tind_extent_range(handle_t *handle, struct inode *inode, |
fba90ffee
|
169 170 |
ext4_fsblk_t pblock, struct migrate_struct *lb) |
c14c6fd5c
|
171 172 173 174 |
{ struct buffer_head *bh; __le32 *i_data; int i, retval = 0; |
c14c6fd5c
|
175 |
unsigned long max_entries = inode->i_sb->s_blocksize >> 2; |
c14c6fd5c
|
176 177 178 179 180 181 182 183 |
bh = sb_bread(inode->i_sb, pblock); if (!bh) return -EIO; i_data = (__le32 *)bh->b_data; for (i = 0; i < max_entries; i++) { if (i_data[i]) { retval = update_dind_extent_range(handle, inode, |
fba90ffee
|
184 |
le32_to_cpu(i_data[i]), lb); |
c14c6fd5c
|
185 186 |
if (retval) break; |
fba90ffee
|
187 |
} else { |
c14c6fd5c
|
188 |
/* Only update the file block number */ |
fba90ffee
|
189 190 |
lb->curr_block += max_entries * max_entries; } |
c14c6fd5c
|
191 |
} |
c14c6fd5c
|
192 193 194 195 |
put_bh(bh); return retval; } |
8009f9fb3
|
196 197 198 |
static int extend_credit_for_blkdel(handle_t *handle, struct inode *inode) { int retval = 0, needed; |
0390131ba
|
199 |
if (ext4_handle_has_enough_credits(handle, EXT4_RESERVE_TRANS_BLOCKS+1)) |
8009f9fb3
|
200 201 202 203 204 205 206 |
return 0; /* * We are freeing a blocks. During this we touch * superblock, group descriptor and block bitmap. * So allocate a credit of 3. We may update * quota (user and group). */ |
5aca07eb7
|
207 |
needed = 3 + EXT4_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb); |
8009f9fb3
|
208 209 210 211 212 213 |
if (ext4_journal_extend(handle, needed) != 0) retval = ext4_journal_restart(handle, needed); return retval; } |
c14c6fd5c
|
214 215 216 217 218 219 220 221 222 223 224 225 226 227 |
static int free_dind_blocks(handle_t *handle, struct inode *inode, __le32 i_data) { int i; __le32 *tmp_idata; struct buffer_head *bh; unsigned long max_entries = inode->i_sb->s_blocksize >> 2; bh = sb_bread(inode->i_sb, le32_to_cpu(i_data)); if (!bh) return -EIO; tmp_idata = (__le32 *)bh->b_data; for (i = 0; i < max_entries; i++) { |
8009f9fb3
|
228 229 |
if (tmp_idata[i]) { extend_credit_for_blkdel(handle, inode); |
7dc576158
|
230 |
ext4_free_blocks(handle, inode, NULL, |
e6362609b
|
231 232 233 |
le32_to_cpu(tmp_idata[i]), 1, EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET); |
8009f9fb3
|
234 |
} |
c14c6fd5c
|
235 236 |
} put_bh(bh); |
8009f9fb3
|
237 |
extend_credit_for_blkdel(handle, inode); |
7dc576158
|
238 |
ext4_free_blocks(handle, inode, NULL, le32_to_cpu(i_data), 1, |
e6362609b
|
239 240 |
EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET); |
c14c6fd5c
|
241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 |
return 0; } static int free_tind_blocks(handle_t *handle, struct inode *inode, __le32 i_data) { int i, retval = 0; __le32 *tmp_idata; struct buffer_head *bh; unsigned long max_entries = inode->i_sb->s_blocksize >> 2; bh = sb_bread(inode->i_sb, le32_to_cpu(i_data)); if (!bh) return -EIO; tmp_idata = (__le32 *)bh->b_data; for (i = 0; i < max_entries; i++) { if (tmp_idata[i]) { retval = free_dind_blocks(handle, inode, tmp_idata[i]); if (retval) { put_bh(bh); return retval; } } } put_bh(bh); |
8009f9fb3
|
268 |
extend_credit_for_blkdel(handle, inode); |
7dc576158
|
269 |
ext4_free_blocks(handle, inode, NULL, le32_to_cpu(i_data), 1, |
e6362609b
|
270 271 |
EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET); |
c14c6fd5c
|
272 273 |
return 0; } |
8009f9fb3
|
274 |
static int free_ind_block(handle_t *handle, struct inode *inode, __le32 *i_data) |
c14c6fd5c
|
275 276 |
{ int retval; |
c14c6fd5c
|
277 |
|
8009f9fb3
|
278 279 280 |
/* ei->i_data[EXT4_IND_BLOCK] */ if (i_data[0]) { extend_credit_for_blkdel(handle, inode); |
7dc576158
|
281 |
ext4_free_blocks(handle, inode, NULL, |
e6362609b
|
282 283 284 |
le32_to_cpu(i_data[0]), 1, EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET); |
8009f9fb3
|
285 |
} |
c14c6fd5c
|
286 |
|
8009f9fb3
|
287 288 289 |
/* ei->i_data[EXT4_DIND_BLOCK] */ if (i_data[1]) { retval = free_dind_blocks(handle, inode, i_data[1]); |
c14c6fd5c
|
290 291 292 |
if (retval) return retval; } |
8009f9fb3
|
293 294 295 |
/* ei->i_data[EXT4_TIND_BLOCK] */ if (i_data[2]) { retval = free_tind_blocks(handle, inode, i_data[2]); |
c14c6fd5c
|
296 297 298 299 300 301 302 |
if (retval) return retval; } return 0; } static int ext4_ext_swap_inode_data(handle_t *handle, struct inode *inode, |
267e4db9a
|
303 |
struct inode *tmp_inode) |
c14c6fd5c
|
304 |
{ |
8009f9fb3
|
305 306 |
int retval; __le32 i_data[3]; |
c14c6fd5c
|
307 308 |
struct ext4_inode_info *ei = EXT4_I(inode); struct ext4_inode_info *tmp_ei = EXT4_I(tmp_inode); |
c14c6fd5c
|
309 310 311 312 313 |
/* * One credit accounted for writing the * i_data field of the original inode */ retval = ext4_journal_extend(handle, 1); |
267e4db9a
|
314 |
if (retval) { |
c14c6fd5c
|
315 316 317 318 |
retval = ext4_journal_restart(handle, 1); if (retval) goto err_out; } |
8009f9fb3
|
319 320 321 322 323 |
i_data[0] = ei->i_data[EXT4_IND_BLOCK]; i_data[1] = ei->i_data[EXT4_DIND_BLOCK]; i_data[2] = ei->i_data[EXT4_TIND_BLOCK]; down_write(&EXT4_I(inode)->i_data_sem); |
c14c6fd5c
|
324 |
/* |
1b9c12f44
|
325 |
* if EXT4_STATE_EXT_MIGRATE is cleared a block allocation |
267e4db9a
|
326 327 328 |
* happened after we started the migrate. We need to * fail the migrate */ |
19f5fb7ad
|
329 |
if (!ext4_test_inode_state(inode, EXT4_STATE_EXT_MIGRATE)) { |
267e4db9a
|
330 331 332 333 |
retval = -EAGAIN; up_write(&EXT4_I(inode)->i_data_sem); goto err_out; } else |
19f5fb7ad
|
334 |
ext4_clear_inode_state(inode, EXT4_STATE_EXT_MIGRATE); |
267e4db9a
|
335 |
/* |
c14c6fd5c
|
336 337 338 |
* We have the extent map build with the tmp inode. * Now copy the i_data across */ |
74e4e6db3
|
339 |
ext4_set_inode_flag(inode, EXT4_INODE_EXTENTS); |
c14c6fd5c
|
340 341 342 343 344 345 346 347 348 349 350 351 352 353 |
memcpy(ei->i_data, tmp_ei->i_data, sizeof(ei->i_data)); /* * Update i_blocks with the new blocks that got * allocated while adding extents for extent index * blocks. * * While converting to extents we need not * update the orignal inode i_blocks for extent blocks * via quota APIs. The quota update happened via tmp_inode already. */ spin_lock(&inode->i_lock); inode->i_blocks += tmp_inode->i_blocks; spin_unlock(&inode->i_lock); |
8009f9fb3
|
354 |
up_write(&EXT4_I(inode)->i_data_sem); |
c14c6fd5c
|
355 |
|
8009f9fb3
|
356 357 358 359 360 |
/* * We mark the inode dirty after, because we decrement the * i_blocks when freeing the indirect meta-data blocks */ retval = free_ind_block(handle, inode, i_data); |
c14c6fd5c
|
361 |
ext4_mark_inode_dirty(handle, inode); |
8009f9fb3
|
362 |
|
c14c6fd5c
|
363 364 365 366 367 368 369 370 371 372 373 |
err_out: return retval; } static int free_ext_idx(handle_t *handle, struct inode *inode, struct ext4_extent_idx *ix) { int i, retval = 0; ext4_fsblk_t block; struct buffer_head *bh; struct ext4_extent_header *eh; |
bf89d16f6
|
374 |
block = ext4_idx_pblock(ix); |
c14c6fd5c
|
375 376 377 378 379 380 381 382 383 384 385 386 387 388 |
bh = sb_bread(inode->i_sb, block); if (!bh) return -EIO; eh = (struct ext4_extent_header *)bh->b_data; if (eh->eh_depth != 0) { ix = EXT_FIRST_INDEX(eh); for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ix++) { retval = free_ext_idx(handle, inode, ix); if (retval) break; } } put_bh(bh); |
8009f9fb3
|
389 |
extend_credit_for_blkdel(handle, inode); |
7dc576158
|
390 |
ext4_free_blocks(handle, inode, NULL, block, 1, |
e6362609b
|
391 |
EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET); |
c14c6fd5c
|
392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 |
return retval; } /* * Free the extent meta data blocks only */ static int free_ext_block(handle_t *handle, struct inode *inode) { int i, retval = 0; struct ext4_inode_info *ei = EXT4_I(inode); struct ext4_extent_header *eh = (struct ext4_extent_header *)ei->i_data; struct ext4_extent_idx *ix; if (eh->eh_depth == 0) /* * No extra blocks allocated for extent meta data */ return 0; ix = EXT_FIRST_INDEX(eh); for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ix++) { retval = free_ext_idx(handle, inode, ix); if (retval) return retval; } return retval; } |
2a43a8780
|
418 |
int ext4_ext_migrate(struct inode *inode) |
c14c6fd5c
|
419 420 421 422 |
{ handle_t *handle; int retval = 0, i; __le32 *i_data; |
c14c6fd5c
|
423 424 |
struct ext4_inode_info *ei; struct inode *tmp_inode = NULL; |
fba90ffee
|
425 |
struct migrate_struct lb; |
c14c6fd5c
|
426 |
unsigned long max_entries; |
11013911d
|
427 |
__u32 goal; |
5cb81dabc
|
428 |
uid_t owner[2]; |
c14c6fd5c
|
429 |
|
83982b6f4
|
430 431 432 433 434 435 |
/* * If the filesystem does not support extents, or the inode * already is extent-based, error out. */ if (!EXT4_HAS_INCOMPAT_FEATURE(inode->i_sb, EXT4_FEATURE_INCOMPAT_EXTENTS) || |
12e9b8920
|
436 |
(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) |
c14c6fd5c
|
437 |
return -EINVAL; |
b8356c465
|
438 439 440 441 442 |
if (S_ISLNK(inode->i_mode) && inode->i_blocks == 0) /* * don't migrate fast symlink */ return retval; |
c14c6fd5c
|
443 444 445 |
handle = ext4_journal_start(inode, EXT4_DATA_TRANS_BLOCKS(inode->i_sb) + EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3 + |
5aca07eb7
|
446 |
EXT4_MAXQUOTAS_INIT_BLOCKS(inode->i_sb) |
c14c6fd5c
|
447 448 449 |
+ 1); if (IS_ERR(handle)) { retval = PTR_ERR(handle); |
090542641
|
450 |
return retval; |
c14c6fd5c
|
451 |
} |
11013911d
|
452 453 |
goal = (((inode->i_ino - 1) / EXT4_INODES_PER_GROUP(inode->i_sb)) * EXT4_INODES_PER_GROUP(inode->i_sb)) + 1; |
5cb81dabc
|
454 455 |
owner[0] = inode->i_uid; owner[1] = inode->i_gid; |
f157a4aa9
|
456 |
tmp_inode = ext4_new_inode(handle, inode->i_sb->s_root->d_inode, |
5cb81dabc
|
457 |
S_IFREG, NULL, goal, owner); |
c14c6fd5c
|
458 |
if (IS_ERR(tmp_inode)) { |
5cb81dabc
|
459 |
retval = PTR_ERR(inode); |
c14c6fd5c
|
460 |
ext4_journal_stop(handle); |
090542641
|
461 |
return retval; |
c14c6fd5c
|
462 463 464 |
} i_size_write(tmp_inode, i_size_read(inode)); /* |
f39490bcd
|
465 466 |
* Set the i_nlink to zero so it will be deleted later * when we drop inode reference. |
c14c6fd5c
|
467 |
*/ |
6d6b77f16
|
468 |
clear_nlink(tmp_inode); |
c14c6fd5c
|
469 470 471 472 |
ext4_ext_tree_init(handle, tmp_inode); ext4_orphan_add(handle, tmp_inode); ext4_journal_stop(handle); |
c14c6fd5c
|
473 474 475 476 |
/* * start with one credit accounted for * superblock modification. * |
25985edce
|
477 |
* For the tmp_inode we already have committed the |
c14c6fd5c
|
478 479 480 |
* trascation that created the inode. Later as and * when we add extents we extent the journal */ |
8009f9fb3
|
481 |
/* |
1b9c12f44
|
482 483 484 485 486 487 |
* Even though we take i_mutex we can still cause block * allocation via mmap write to holes. If we have allocated * new blocks we fail migrate. New block allocation will * clear EXT4_STATE_EXT_MIGRATE flag. The flag is updated * with i_data_sem held to prevent racing with block * allocation. |
267e4db9a
|
488 489 |
*/ down_read((&EXT4_I(inode)->i_data_sem)); |
19f5fb7ad
|
490 |
ext4_set_inode_state(inode, EXT4_STATE_EXT_MIGRATE); |
267e4db9a
|
491 |
up_read((&EXT4_I(inode)->i_data_sem)); |
c14c6fd5c
|
492 |
handle = ext4_journal_start(inode, 1); |
f39490bcd
|
493 494 495 496 497 498 499 500 501 502 |
if (IS_ERR(handle)) { /* * It is impossible to update on-disk structures without * a handle, so just rollback in-core changes and live other * work to orphan_list_cleanup() */ ext4_orphan_del(NULL, tmp_inode); retval = PTR_ERR(handle); goto out; } |
8009f9fb3
|
503 504 505 506 507 508 509 |
ei = EXT4_I(inode); i_data = ei->i_data; memset(&lb, 0, sizeof(lb)); /* 32 bit block address 4 bytes */ max_entries = inode->i_sb->s_blocksize >> 2; |
fba90ffee
|
510 |
for (i = 0; i < EXT4_NDIR_BLOCKS; i++) { |
c14c6fd5c
|
511 512 |
if (i_data[i]) { retval = update_extent_range(handle, tmp_inode, |
fba90ffee
|
513 |
le32_to_cpu(i_data[i]), &lb); |
c14c6fd5c
|
514 515 |
if (retval) goto err_out; |
fba90ffee
|
516 517 |
} else lb.curr_block++; |
c14c6fd5c
|
518 519 520 |
} if (i_data[EXT4_IND_BLOCK]) { retval = update_ind_extent_range(handle, tmp_inode, |
fba90ffee
|
521 |
le32_to_cpu(i_data[EXT4_IND_BLOCK]), &lb); |
c14c6fd5c
|
522 523 524 |
if (retval) goto err_out; } else |
fba90ffee
|
525 |
lb.curr_block += max_entries; |
c14c6fd5c
|
526 527 |
if (i_data[EXT4_DIND_BLOCK]) { retval = update_dind_extent_range(handle, tmp_inode, |
fba90ffee
|
528 |
le32_to_cpu(i_data[EXT4_DIND_BLOCK]), &lb); |
c14c6fd5c
|
529 530 531 |
if (retval) goto err_out; } else |
fba90ffee
|
532 |
lb.curr_block += max_entries * max_entries; |
c14c6fd5c
|
533 534 |
if (i_data[EXT4_TIND_BLOCK]) { retval = update_tind_extent_range(handle, tmp_inode, |
fba90ffee
|
535 |
le32_to_cpu(i_data[EXT4_TIND_BLOCK]), &lb); |
c14c6fd5c
|
536 537 538 539 540 541 542 543 |
if (retval) goto err_out; } /* * Build the last extent */ retval = finish_range(handle, tmp_inode, &lb); err_out: |
c14c6fd5c
|
544 545 546 547 548 549 |
if (retval) /* * Failure case delete the extent information with the * tmp_inode */ free_ext_block(handle, tmp_inode); |
267e4db9a
|
550 551 552 553 554 555 556 557 558 |
else { retval = ext4_ext_swap_inode_data(handle, inode, tmp_inode); if (retval) /* * if we fail to swap inode data free the extent * details of the tmp inode */ free_ext_block(handle, tmp_inode); } |
8009f9fb3
|
559 560 561 562 |
/* We mark the tmp_inode dirty via ext4_ext_tree_init. */ if (ext4_journal_extend(handle, 1) != 0) ext4_journal_restart(handle, 1); |
c14c6fd5c
|
563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 |
/* * Mark the tmp_inode as of size zero */ i_size_write(tmp_inode, 0); /* * set the i_blocks count to zero * so that the ext4_delete_inode does the * right job * * We don't need to take the i_lock because * the inode is not visible to user space. */ tmp_inode->i_blocks = 0; /* Reset the extent details */ ext4_ext_tree_init(handle, tmp_inode); |
c14c6fd5c
|
581 |
ext4_journal_stop(handle); |
f39490bcd
|
582 |
out: |
a8526e84a
|
583 |
unlock_new_inode(tmp_inode); |
090542641
|
584 |
iput(tmp_inode); |
c14c6fd5c
|
585 586 587 |
return retval; } |