Blame view
fs/btrfs/ordered-data.c
25.8 KB
dc17ff8f1
|
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 |
/* * Copyright (C) 2007 Oracle. All rights reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License v2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this program; if not, write to the * Free Software Foundation, Inc., 59 Temple Place - Suite 330, * Boston, MA 021110-1307, USA. */ |
dc17ff8f1
|
18 |
#include <linux/slab.h> |
d6bfde876
|
19 |
#include <linux/blkdev.h> |
f421950f8
|
20 21 |
#include <linux/writeback.h> #include <linux/pagevec.h> |
dc17ff8f1
|
22 23 24 |
#include "ctree.h" #include "transaction.h" #include "btrfs_inode.h" |
e6dcd2dc9
|
25 |
#include "extent_io.h" |
dc17ff8f1
|
26 |
|
e6dcd2dc9
|
27 |
static u64 entry_end(struct btrfs_ordered_extent *entry) |
dc17ff8f1
|
28 |
{ |
e6dcd2dc9
|
29 30 31 |
if (entry->file_offset + entry->len < entry->file_offset) return (u64)-1; return entry->file_offset + entry->len; |
dc17ff8f1
|
32 |
} |
d352ac681
|
33 34 35 |
/* returns NULL if the insertion worked, or it returns the node it did find * in the tree */ |
e6dcd2dc9
|
36 37 |
static struct rb_node *tree_insert(struct rb_root *root, u64 file_offset, struct rb_node *node) |
dc17ff8f1
|
38 |
{ |
d397712bc
|
39 40 |
struct rb_node **p = &root->rb_node; struct rb_node *parent = NULL; |
e6dcd2dc9
|
41 |
struct btrfs_ordered_extent *entry; |
dc17ff8f1
|
42 |
|
d397712bc
|
43 |
while (*p) { |
dc17ff8f1
|
44 |
parent = *p; |
e6dcd2dc9
|
45 |
entry = rb_entry(parent, struct btrfs_ordered_extent, rb_node); |
dc17ff8f1
|
46 |
|
e6dcd2dc9
|
47 |
if (file_offset < entry->file_offset) |
dc17ff8f1
|
48 |
p = &(*p)->rb_left; |
e6dcd2dc9
|
49 |
else if (file_offset >= entry_end(entry)) |
dc17ff8f1
|
50 51 52 53 54 55 56 57 58 |
p = &(*p)->rb_right; else return parent; } rb_link_node(node, parent, p); rb_insert_color(node, root); return NULL; } |
d352ac681
|
59 60 61 62 |
/* * look for a given offset in the tree, and if it can't be found return the * first lesser offset */ |
e6dcd2dc9
|
63 64 |
static struct rb_node *__tree_search(struct rb_root *root, u64 file_offset, struct rb_node **prev_ret) |
dc17ff8f1
|
65 |
{ |
d397712bc
|
66 |
struct rb_node *n = root->rb_node; |
dc17ff8f1
|
67 |
struct rb_node *prev = NULL; |
e6dcd2dc9
|
68 69 70 |
struct rb_node *test; struct btrfs_ordered_extent *entry; struct btrfs_ordered_extent *prev_entry = NULL; |
dc17ff8f1
|
71 |
|
d397712bc
|
72 |
while (n) { |
e6dcd2dc9
|
73 |
entry = rb_entry(n, struct btrfs_ordered_extent, rb_node); |
dc17ff8f1
|
74 75 |
prev = n; prev_entry = entry; |
dc17ff8f1
|
76 |
|
e6dcd2dc9
|
77 |
if (file_offset < entry->file_offset) |
dc17ff8f1
|
78 |
n = n->rb_left; |
e6dcd2dc9
|
79 |
else if (file_offset >= entry_end(entry)) |
dc17ff8f1
|
80 81 82 83 84 85 |
n = n->rb_right; else return n; } if (!prev_ret) return NULL; |
d397712bc
|
86 |
while (prev && file_offset >= entry_end(prev_entry)) { |
e6dcd2dc9
|
87 88 89 90 91 92 93 94 95 96 97 98 99 |
test = rb_next(prev); if (!test) break; prev_entry = rb_entry(test, struct btrfs_ordered_extent, rb_node); if (file_offset < entry_end(prev_entry)) break; prev = test; } if (prev) prev_entry = rb_entry(prev, struct btrfs_ordered_extent, rb_node); |
d397712bc
|
100 |
while (prev && file_offset < entry_end(prev_entry)) { |
e6dcd2dc9
|
101 102 103 104 105 106 |
test = rb_prev(prev); if (!test) break; prev_entry = rb_entry(test, struct btrfs_ordered_extent, rb_node); prev = test; |
dc17ff8f1
|
107 108 109 110 |
} *prev_ret = prev; return NULL; } |
d352ac681
|
111 112 113 |
/* * helper to check if a given offset is inside a given entry */ |
e6dcd2dc9
|
114 115 116 117 118 119 120 |
static int offset_in_entry(struct btrfs_ordered_extent *entry, u64 file_offset) { if (file_offset < entry->file_offset || entry->file_offset + entry->len <= file_offset) return 0; return 1; } |
4b46fce23
|
121 122 123 124 125 126 127 128 |
static int range_overlaps(struct btrfs_ordered_extent *entry, u64 file_offset, u64 len) { if (file_offset + len <= entry->file_offset || entry->file_offset + entry->len <= file_offset) return 0; return 1; } |
d352ac681
|
129 130 131 132 |
/* * look find the first ordered struct that has this offset, otherwise * the first one less than this offset */ |
e6dcd2dc9
|
133 134 |
static inline struct rb_node *tree_search(struct btrfs_ordered_inode_tree *tree, u64 file_offset) |
dc17ff8f1
|
135 |
{ |
e6dcd2dc9
|
136 |
struct rb_root *root = &tree->tree; |
c87fb6fdc
|
137 |
struct rb_node *prev = NULL; |
dc17ff8f1
|
138 |
struct rb_node *ret; |
e6dcd2dc9
|
139 140 141 142 143 144 145 146 147 |
struct btrfs_ordered_extent *entry; if (tree->last) { entry = rb_entry(tree->last, struct btrfs_ordered_extent, rb_node); if (offset_in_entry(entry, file_offset)) return tree->last; } ret = __tree_search(root, file_offset, &prev); |
dc17ff8f1
|
148 |
if (!ret) |
e6dcd2dc9
|
149 150 151 |
ret = prev; if (ret) tree->last = ret; |
dc17ff8f1
|
152 153 |
return ret; } |
eb84ae039
|
154 155 156 157 158 159 160 161 |
/* allocate and add a new ordered_extent into the per-inode tree. * file_offset is the logical offset in the file * * start is the disk block number of an extent already reserved in the * extent allocation tree * * len is the length of the extent * |
eb84ae039
|
162 163 164 |
* The tree is given a single reference on the ordered extent that was * inserted. */ |
4b46fce23
|
165 166 |
static int __btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, u64 start, u64 len, u64 disk_len, |
261507a02
|
167 |
int type, int dio, int compress_type) |
dc17ff8f1
|
168 |
{ |
dc17ff8f1
|
169 |
struct btrfs_ordered_inode_tree *tree; |
e6dcd2dc9
|
170 171 |
struct rb_node *node; struct btrfs_ordered_extent *entry; |
dc17ff8f1
|
172 |
|
e6dcd2dc9
|
173 174 |
tree = &BTRFS_I(inode)->ordered_tree; entry = kzalloc(sizeof(*entry), GFP_NOFS); |
dc17ff8f1
|
175 176 |
if (!entry) return -ENOMEM; |
e6dcd2dc9
|
177 178 179 |
entry->file_offset = file_offset; entry->start = start; entry->len = len; |
c8b978188
|
180 |
entry->disk_len = disk_len; |
8b62b72b2
|
181 |
entry->bytes_left = len; |
3eaa28852
|
182 |
entry->inode = inode; |
261507a02
|
183 |
entry->compress_type = compress_type; |
d899e0521
|
184 |
if (type != BTRFS_ORDERED_IO_DONE && type != BTRFS_ORDERED_COMPLETE) |
80ff38566
|
185 |
set_bit(type, &entry->flags); |
3eaa28852
|
186 |
|
4b46fce23
|
187 188 |
if (dio) set_bit(BTRFS_ORDERED_DIRECT, &entry->flags); |
e6dcd2dc9
|
189 190 191 192 |
/* one ref for the tree */ atomic_set(&entry->refs, 1); init_waitqueue_head(&entry->wait); INIT_LIST_HEAD(&entry->list); |
3eaa28852
|
193 |
INIT_LIST_HEAD(&entry->root_extent_list); |
dc17ff8f1
|
194 |
|
1abe9b8a1
|
195 |
trace_btrfs_ordered_extent_add(inode, entry); |
49958fd7d
|
196 |
spin_lock(&tree->lock); |
e6dcd2dc9
|
197 198 |
node = tree_insert(&tree->tree, file_offset, &entry->rb_node); |
d397712bc
|
199 |
BUG_ON(node); |
49958fd7d
|
200 |
spin_unlock(&tree->lock); |
d397712bc
|
201 |
|
3eaa28852
|
202 203 204 205 |
spin_lock(&BTRFS_I(inode)->root->fs_info->ordered_extent_lock); list_add_tail(&entry->root_extent_list, &BTRFS_I(inode)->root->fs_info->ordered_extents); spin_unlock(&BTRFS_I(inode)->root->fs_info->ordered_extent_lock); |
e6dcd2dc9
|
206 |
BUG_ON(node); |
dc17ff8f1
|
207 208 |
return 0; } |
4b46fce23
|
209 210 211 212 |
int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, u64 start, u64 len, u64 disk_len, int type) { return __btrfs_add_ordered_extent(inode, file_offset, start, len, |
261507a02
|
213 214 |
disk_len, type, 0, BTRFS_COMPRESS_NONE); |
4b46fce23
|
215 216 217 218 219 220 |
} int btrfs_add_ordered_extent_dio(struct inode *inode, u64 file_offset, u64 start, u64 len, u64 disk_len, int type) { return __btrfs_add_ordered_extent(inode, file_offset, start, len, |
261507a02
|
221 222 223 224 225 226 227 228 229 230 231 |
disk_len, type, 1, BTRFS_COMPRESS_NONE); } int btrfs_add_ordered_extent_compress(struct inode *inode, u64 file_offset, u64 start, u64 len, u64 disk_len, int type, int compress_type) { return __btrfs_add_ordered_extent(inode, file_offset, start, len, disk_len, type, 0, compress_type); |
4b46fce23
|
232 |
} |
eb84ae039
|
233 234 |
/* * Add a struct btrfs_ordered_sum into the list of checksums to be inserted |
3edf7d33f
|
235 236 |
* when an ordered extent is finished. If the list covers more than one * ordered extent, it is split across multiples. |
eb84ae039
|
237 |
*/ |
3edf7d33f
|
238 239 240 |
int btrfs_add_ordered_sum(struct inode *inode, struct btrfs_ordered_extent *entry, struct btrfs_ordered_sum *sum) |
dc17ff8f1
|
241 |
{ |
e6dcd2dc9
|
242 |
struct btrfs_ordered_inode_tree *tree; |
dc17ff8f1
|
243 |
|
e6dcd2dc9
|
244 |
tree = &BTRFS_I(inode)->ordered_tree; |
49958fd7d
|
245 |
spin_lock(&tree->lock); |
e6dcd2dc9
|
246 |
list_add_tail(&sum->list, &entry->list); |
49958fd7d
|
247 |
spin_unlock(&tree->lock); |
e6dcd2dc9
|
248 |
return 0; |
dc17ff8f1
|
249 |
} |
eb84ae039
|
250 251 |
/* * this is used to account for finished IO across a given range |
163cf09c2
|
252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 |
* of the file. The IO may span ordered extents. If * a given ordered_extent is completely done, 1 is returned, otherwise * 0. * * test_and_set_bit on a flag in the struct btrfs_ordered_extent is used * to make sure this function only returns 1 once for a given ordered extent. * * file_offset is updated to one byte past the range that is recorded as * complete. This allows you to walk forward in the file. */ int btrfs_dec_test_first_ordered_pending(struct inode *inode, struct btrfs_ordered_extent **cached, u64 *file_offset, u64 io_size) { struct btrfs_ordered_inode_tree *tree; struct rb_node *node; struct btrfs_ordered_extent *entry = NULL; int ret; u64 dec_end; u64 dec_start; u64 to_dec; tree = &BTRFS_I(inode)->ordered_tree; spin_lock(&tree->lock); node = tree_search(tree, *file_offset); if (!node) { ret = 1; goto out; } entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); if (!offset_in_entry(entry, *file_offset)) { ret = 1; goto out; } dec_start = max(*file_offset, entry->file_offset); dec_end = min(*file_offset + io_size, entry->file_offset + entry->len); *file_offset = dec_end; if (dec_start > dec_end) { printk(KERN_CRIT "bad ordering dec_start %llu end %llu ", (unsigned long long)dec_start, (unsigned long long)dec_end); } to_dec = dec_end - dec_start; if (to_dec > entry->bytes_left) { printk(KERN_CRIT "bad ordered accounting left %llu size %llu ", (unsigned long long)entry->bytes_left, (unsigned long long)to_dec); } entry->bytes_left -= to_dec; if (entry->bytes_left == 0) ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags); else ret = 1; out: if (!ret && cached && entry) { *cached = entry; atomic_inc(&entry->refs); } spin_unlock(&tree->lock); return ret == 0; } /* * this is used to account for finished IO across a given range |
eb84ae039
|
321 322 323 324 325 326 327 |
* of the file. The IO should not span ordered extents. If * a given ordered_extent is completely done, 1 is returned, otherwise * 0. * * test_and_set_bit on a flag in the struct btrfs_ordered_extent is used * to make sure this function only returns 1 once for a given ordered extent. */ |
e6dcd2dc9
|
328 |
int btrfs_dec_test_ordered_pending(struct inode *inode, |
5a1a3df1f
|
329 |
struct btrfs_ordered_extent **cached, |
e6dcd2dc9
|
330 |
u64 file_offset, u64 io_size) |
dc17ff8f1
|
331 |
{ |
e6dcd2dc9
|
332 |
struct btrfs_ordered_inode_tree *tree; |
dc17ff8f1
|
333 |
struct rb_node *node; |
5a1a3df1f
|
334 |
struct btrfs_ordered_extent *entry = NULL; |
e6dcd2dc9
|
335 336 337 |
int ret; tree = &BTRFS_I(inode)->ordered_tree; |
49958fd7d
|
338 |
spin_lock(&tree->lock); |
e6dcd2dc9
|
339 |
node = tree_search(tree, file_offset); |
dc17ff8f1
|
340 |
if (!node) { |
e6dcd2dc9
|
341 342 |
ret = 1; goto out; |
dc17ff8f1
|
343 |
} |
e6dcd2dc9
|
344 345 346 347 |
entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); if (!offset_in_entry(entry, file_offset)) { ret = 1; goto out; |
dc17ff8f1
|
348 |
} |
e6dcd2dc9
|
349 |
|
8b62b72b2
|
350 351 352 353 354 355 356 357 |
if (io_size > entry->bytes_left) { printk(KERN_CRIT "bad ordered accounting left %llu size %llu ", (unsigned long long)entry->bytes_left, (unsigned long long)io_size); } entry->bytes_left -= io_size; if (entry->bytes_left == 0) |
e6dcd2dc9
|
358 |
ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags); |
8b62b72b2
|
359 360 |
else ret = 1; |
e6dcd2dc9
|
361 |
out: |
5a1a3df1f
|
362 363 364 365 |
if (!ret && cached && entry) { *cached = entry; atomic_inc(&entry->refs); } |
49958fd7d
|
366 |
spin_unlock(&tree->lock); |
e6dcd2dc9
|
367 368 |
return ret == 0; } |
dc17ff8f1
|
369 |
|
eb84ae039
|
370 371 372 373 |
/* * used to drop a reference on an ordered extent. This will free * the extent if the last reference is dropped */ |
e6dcd2dc9
|
374 375 |
int btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry) { |
ba1da2f44
|
376 377 |
struct list_head *cur; struct btrfs_ordered_sum *sum; |
1abe9b8a1
|
378 |
trace_btrfs_ordered_extent_put(entry->inode, entry); |
ba1da2f44
|
379 |
if (atomic_dec_and_test(&entry->refs)) { |
d397712bc
|
380 |
while (!list_empty(&entry->list)) { |
ba1da2f44
|
381 382 383 384 385 |
cur = entry->list.next; sum = list_entry(cur, struct btrfs_ordered_sum, list); list_del(&sum->list); kfree(sum); } |
e6dcd2dc9
|
386 |
kfree(entry); |
ba1da2f44
|
387 |
} |
e6dcd2dc9
|
388 |
return 0; |
dc17ff8f1
|
389 |
} |
cee36a03e
|
390 |
|
eb84ae039
|
391 392 |
/* * remove an ordered extent from the tree. No references are dropped |
49958fd7d
|
393 |
* and you must wake_up entry->wait. You must hold the tree lock |
c21677545
|
394 |
* while you call this function. |
eb84ae039
|
395 |
*/ |
c21677545
|
396 |
static int __btrfs_remove_ordered_extent(struct inode *inode, |
e6dcd2dc9
|
397 |
struct btrfs_ordered_extent *entry) |
cee36a03e
|
398 |
{ |
e6dcd2dc9
|
399 |
struct btrfs_ordered_inode_tree *tree; |
287a0ab91
|
400 |
struct btrfs_root *root = BTRFS_I(inode)->root; |
cee36a03e
|
401 |
struct rb_node *node; |
cee36a03e
|
402 |
|
e6dcd2dc9
|
403 |
tree = &BTRFS_I(inode)->ordered_tree; |
e6dcd2dc9
|
404 |
node = &entry->rb_node; |
cee36a03e
|
405 |
rb_erase(node, &tree->tree); |
e6dcd2dc9
|
406 407 |
tree->last = NULL; set_bit(BTRFS_ORDERED_COMPLETE, &entry->flags); |
3eaa28852
|
408 |
|
287a0ab91
|
409 |
spin_lock(&root->fs_info->ordered_extent_lock); |
3eaa28852
|
410 |
list_del_init(&entry->root_extent_list); |
5a3f23d51
|
411 |
|
1abe9b8a1
|
412 |
trace_btrfs_ordered_extent_remove(inode, entry); |
5a3f23d51
|
413 414 415 416 417 418 419 420 421 |
/* * we have no more ordered extents for this inode and * no dirty pages. We can safely remove it from the * list of ordered extents */ if (RB_EMPTY_ROOT(&tree->tree) && !mapping_tagged(inode->i_mapping, PAGECACHE_TAG_DIRTY)) { list_del_init(&BTRFS_I(inode)->ordered_operations); } |
287a0ab91
|
422 |
spin_unlock(&root->fs_info->ordered_extent_lock); |
3eaa28852
|
423 |
|
c21677545
|
424 425 426 427 428 429 430 431 432 433 434 435 436 437 |
return 0; } /* * remove an ordered extent from the tree. No references are dropped * but any waiters are woken. */ int btrfs_remove_ordered_extent(struct inode *inode, struct btrfs_ordered_extent *entry) { struct btrfs_ordered_inode_tree *tree; int ret; tree = &BTRFS_I(inode)->ordered_tree; |
49958fd7d
|
438 |
spin_lock(&tree->lock); |
c21677545
|
439 |
ret = __btrfs_remove_ordered_extent(inode, entry); |
49958fd7d
|
440 |
spin_unlock(&tree->lock); |
e6dcd2dc9
|
441 |
wake_up(&entry->wait); |
c21677545
|
442 443 |
return ret; |
cee36a03e
|
444 |
} |
d352ac681
|
445 446 447 448 |
/* * wait for all the ordered extents in a root. This is done when balancing * space between drives. */ |
24bbcf044
|
449 450 |
int btrfs_wait_ordered_extents(struct btrfs_root *root, int nocow_only, int delay_iput) |
3eaa28852
|
451 452 453 454 455 456 457 458 459 460 |
{ struct list_head splice; struct list_head *cur; struct btrfs_ordered_extent *ordered; struct inode *inode; INIT_LIST_HEAD(&splice); spin_lock(&root->fs_info->ordered_extent_lock); list_splice_init(&root->fs_info->ordered_extents, &splice); |
5b21f2ed3
|
461 |
while (!list_empty(&splice)) { |
3eaa28852
|
462 463 464 |
cur = splice.next; ordered = list_entry(cur, struct btrfs_ordered_extent, root_extent_list); |
7ea394f11
|
465 |
if (nocow_only && |
d899e0521
|
466 467 |
!test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags) && !test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags)) { |
5b21f2ed3
|
468 469 |
list_move(&ordered->root_extent_list, &root->fs_info->ordered_extents); |
7ea394f11
|
470 471 472 |
cond_resched_lock(&root->fs_info->ordered_extent_lock); continue; } |
3eaa28852
|
473 474 |
list_del_init(&ordered->root_extent_list); atomic_inc(&ordered->refs); |
3eaa28852
|
475 476 |
/* |
5b21f2ed3
|
477 |
* the inode may be getting freed (in sys_unlink path). |
3eaa28852
|
478 |
*/ |
5b21f2ed3
|
479 |
inode = igrab(ordered->inode); |
3eaa28852
|
480 |
spin_unlock(&root->fs_info->ordered_extent_lock); |
5b21f2ed3
|
481 482 483 |
if (inode) { btrfs_start_ordered_extent(inode, ordered, 1); btrfs_put_ordered_extent(ordered); |
24bbcf044
|
484 485 486 487 |
if (delay_iput) btrfs_add_delayed_iput(inode); else iput(inode); |
5b21f2ed3
|
488 489 490 |
} else { btrfs_put_ordered_extent(ordered); } |
3eaa28852
|
491 492 493 494 495 496 |
spin_lock(&root->fs_info->ordered_extent_lock); } spin_unlock(&root->fs_info->ordered_extent_lock); return 0; } |
eb84ae039
|
497 |
/* |
5a3f23d51
|
498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 |
* this is used during transaction commit to write all the inodes * added to the ordered operation list. These files must be fully on * disk before the transaction commits. * * we have two modes here, one is to just start the IO via filemap_flush * and the other is to wait for all the io. When we wait, we have an * extra check to make sure the ordered operation list really is empty * before we return */ int btrfs_run_ordered_operations(struct btrfs_root *root, int wait) { struct btrfs_inode *btrfs_inode; struct inode *inode; struct list_head splice; INIT_LIST_HEAD(&splice); mutex_lock(&root->fs_info->ordered_operations_mutex); spin_lock(&root->fs_info->ordered_extent_lock); again: list_splice_init(&root->fs_info->ordered_operations, &splice); while (!list_empty(&splice)) { btrfs_inode = list_entry(splice.next, struct btrfs_inode, ordered_operations); inode = &btrfs_inode->vfs_inode; list_del_init(&btrfs_inode->ordered_operations); /* * the inode may be getting freed (in sys_unlink path). */ inode = igrab(inode); if (!wait && inode) { list_add_tail(&BTRFS_I(inode)->ordered_operations, &root->fs_info->ordered_operations); } spin_unlock(&root->fs_info->ordered_extent_lock); if (inode) { if (wait) btrfs_wait_ordered_range(inode, 0, (u64)-1); else filemap_flush(inode->i_mapping); |
24bbcf044
|
544 |
btrfs_add_delayed_iput(inode); |
5a3f23d51
|
545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 |
} cond_resched(); spin_lock(&root->fs_info->ordered_extent_lock); } if (wait && !list_empty(&root->fs_info->ordered_operations)) goto again; spin_unlock(&root->fs_info->ordered_extent_lock); mutex_unlock(&root->fs_info->ordered_operations_mutex); return 0; } /* |
eb84ae039
|
560 561 562 563 564 565 566 567 568 |
* Used to start IO or wait for a given ordered extent to finish. * * If wait is one, this effectively waits on page writeback for all the pages * in the extent, and it waits on the io completion code to insert * metadata into the btree corresponding to the extent */ void btrfs_start_ordered_extent(struct inode *inode, struct btrfs_ordered_extent *entry, int wait) |
e6dcd2dc9
|
569 570 571 |
{ u64 start = entry->file_offset; u64 end = start + entry->len - 1; |
e1b81e676
|
572 |
|
1abe9b8a1
|
573 |
trace_btrfs_ordered_extent_start(inode, entry); |
eb84ae039
|
574 575 576 577 578 |
/* * pages in the range can be dirty, clean or writeback. We * start IO on any dirty ones so the wait doesn't stall waiting * for pdflush to find them */ |
4b46fce23
|
579 580 |
if (!test_bit(BTRFS_ORDERED_DIRECT, &entry->flags)) filemap_fdatawrite_range(inode->i_mapping, start, end); |
c8b978188
|
581 |
if (wait) { |
e6dcd2dc9
|
582 583 |
wait_event(entry->wait, test_bit(BTRFS_ORDERED_COMPLETE, &entry->flags)); |
c8b978188
|
584 |
} |
e6dcd2dc9
|
585 |
} |
cee36a03e
|
586 |
|
eb84ae039
|
587 588 589 |
/* * Used to wait on ordered extents across a large range of bytes. */ |
cb843a6f5
|
590 |
int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len) |
e6dcd2dc9
|
591 592 |
{ u64 end; |
e5a2217ef
|
593 |
u64 orig_end; |
e6dcd2dc9
|
594 |
struct btrfs_ordered_extent *ordered; |
8b62b72b2
|
595 |
int found; |
e5a2217ef
|
596 597 |
if (start + len < start) { |
f421950f8
|
598 |
orig_end = INT_LIMIT(loff_t); |
e5a2217ef
|
599 600 |
} else { orig_end = start + len - 1; |
f421950f8
|
601 602 |
if (orig_end > INT_LIMIT(loff_t)) orig_end = INT_LIMIT(loff_t); |
e5a2217ef
|
603 |
} |
4a0967527
|
604 |
again: |
e5a2217ef
|
605 606 607 |
/* start IO across the range first to instantiate any delalloc * extents */ |
8aa38c31b
|
608 |
filemap_fdatawrite_range(inode->i_mapping, start, orig_end); |
f421950f8
|
609 |
|
771ed689d
|
610 611 612 613 |
/* The compression code will leave pages locked but return from * writepage without setting the page writeback. Starting again * with WB_SYNC_ALL will end up waiting for the IO to actually start. */ |
8aa38c31b
|
614 |
filemap_fdatawrite_range(inode->i_mapping, start, orig_end); |
771ed689d
|
615 |
|
8aa38c31b
|
616 |
filemap_fdatawait_range(inode->i_mapping, start, orig_end); |
e5a2217ef
|
617 |
|
f421950f8
|
618 |
end = orig_end; |
8b62b72b2
|
619 |
found = 0; |
d397712bc
|
620 |
while (1) { |
e6dcd2dc9
|
621 |
ordered = btrfs_lookup_first_ordered_extent(inode, end); |
d397712bc
|
622 |
if (!ordered) |
e6dcd2dc9
|
623 |
break; |
e5a2217ef
|
624 |
if (ordered->file_offset > orig_end) { |
e6dcd2dc9
|
625 626 627 628 629 630 631 |
btrfs_put_ordered_extent(ordered); break; } if (ordered->file_offset + ordered->len < start) { btrfs_put_ordered_extent(ordered); break; } |
8b62b72b2
|
632 |
found++; |
e5a2217ef
|
633 |
btrfs_start_ordered_extent(inode, ordered, 1); |
e6dcd2dc9
|
634 635 |
end = ordered->file_offset; btrfs_put_ordered_extent(ordered); |
e5a2217ef
|
636 |
if (end == 0 || end == start) |
e6dcd2dc9
|
637 638 639 |
break; end--; } |
8b62b72b2
|
640 641 |
if (found || test_range_bit(&BTRFS_I(inode)->io_tree, start, orig_end, EXTENT_DELALLOC, 0, NULL)) { |
771ed689d
|
642 |
schedule_timeout(1); |
4a0967527
|
643 644 |
goto again; } |
cb843a6f5
|
645 |
return 0; |
cee36a03e
|
646 |
} |
eb84ae039
|
647 648 649 650 |
/* * find an ordered extent corresponding to file_offset. return NULL if * nothing is found, otherwise take a reference on the extent and return it */ |
e6dcd2dc9
|
651 652 653 654 655 656 657 658 |
struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct inode *inode, u64 file_offset) { struct btrfs_ordered_inode_tree *tree; struct rb_node *node; struct btrfs_ordered_extent *entry = NULL; tree = &BTRFS_I(inode)->ordered_tree; |
49958fd7d
|
659 |
spin_lock(&tree->lock); |
e6dcd2dc9
|
660 661 662 663 664 665 666 667 668 669 |
node = tree_search(tree, file_offset); if (!node) goto out; entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); if (!offset_in_entry(entry, file_offset)) entry = NULL; if (entry) atomic_inc(&entry->refs); out: |
49958fd7d
|
670 |
spin_unlock(&tree->lock); |
e6dcd2dc9
|
671 672 |
return entry; } |
4b46fce23
|
673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 |
/* Since the DIO code tries to lock a wide area we need to look for any ordered * extents that exist in the range, rather than just the start of the range. */ struct btrfs_ordered_extent *btrfs_lookup_ordered_range(struct inode *inode, u64 file_offset, u64 len) { struct btrfs_ordered_inode_tree *tree; struct rb_node *node; struct btrfs_ordered_extent *entry = NULL; tree = &BTRFS_I(inode)->ordered_tree; spin_lock(&tree->lock); node = tree_search(tree, file_offset); if (!node) { node = tree_search(tree, file_offset + len); if (!node) goto out; } while (1) { entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); if (range_overlaps(entry, file_offset, len)) break; if (entry->file_offset >= file_offset + len) { entry = NULL; break; } entry = NULL; node = rb_next(node); if (!node) break; } out: if (entry) atomic_inc(&entry->refs); spin_unlock(&tree->lock); return entry; } |
eb84ae039
|
713 714 715 716 |
/* * lookup and return any extent before 'file_offset'. NULL is returned * if none is found */ |
e6dcd2dc9
|
717 |
struct btrfs_ordered_extent * |
d397712bc
|
718 |
btrfs_lookup_first_ordered_extent(struct inode *inode, u64 file_offset) |
e6dcd2dc9
|
719 720 721 722 723 724 |
{ struct btrfs_ordered_inode_tree *tree; struct rb_node *node; struct btrfs_ordered_extent *entry = NULL; tree = &BTRFS_I(inode)->ordered_tree; |
49958fd7d
|
725 |
spin_lock(&tree->lock); |
e6dcd2dc9
|
726 727 728 729 730 731 732 |
node = tree_search(tree, file_offset); if (!node) goto out; entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); atomic_inc(&entry->refs); out: |
49958fd7d
|
733 |
spin_unlock(&tree->lock); |
e6dcd2dc9
|
734 |
return entry; |
81d7ed29f
|
735 |
} |
dbe674a99
|
736 |
|
eb84ae039
|
737 738 739 740 |
/* * After an extent is done, call this to conditionally update the on disk * i_size. i_size is updated to cover any fully written part of the file. */ |
c21677545
|
741 |
int btrfs_ordered_update_i_size(struct inode *inode, u64 offset, |
dbe674a99
|
742 743 744 745 746 747 748 |
struct btrfs_ordered_extent *ordered) { struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree; struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; u64 disk_i_size; u64 new_i_size; u64 i_size_test; |
c21677545
|
749 |
u64 i_size = i_size_read(inode); |
dbe674a99
|
750 |
struct rb_node *node; |
c21677545
|
751 |
struct rb_node *prev = NULL; |
dbe674a99
|
752 |
struct btrfs_ordered_extent *test; |
c21677545
|
753 754 755 756 |
int ret = 1; if (ordered) offset = entry_end(ordered); |
a038fab0c
|
757 758 |
else offset = ALIGN(offset, BTRFS_I(inode)->root->sectorsize); |
dbe674a99
|
759 |
|
49958fd7d
|
760 |
spin_lock(&tree->lock); |
dbe674a99
|
761 |
disk_i_size = BTRFS_I(inode)->disk_i_size; |
c21677545
|
762 763 764 765 766 767 |
/* truncate file */ if (disk_i_size > i_size) { BTRFS_I(inode)->disk_i_size = i_size; ret = 0; goto out; } |
dbe674a99
|
768 769 770 771 |
/* * if the disk i_size is already at the inode->i_size, or * this ordered extent is inside the disk i_size, we're done */ |
c21677545
|
772 |
if (disk_i_size == i_size || offset <= disk_i_size) { |
dbe674a99
|
773 774 775 776 777 778 779 |
goto out; } /* * we can't update the disk_isize if there are delalloc bytes * between disk_i_size and this ordered extent */ |
c21677545
|
780 |
if (test_range_bit(io_tree, disk_i_size, offset - 1, |
9655d2982
|
781 |
EXTENT_DELALLOC, 0, NULL)) { |
dbe674a99
|
782 783 784 785 786 787 788 |
goto out; } /* * walk backward from this ordered extent to disk_i_size. * if we find an ordered extent then we can't update disk i_size * yet */ |
c21677545
|
789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 |
if (ordered) { node = rb_prev(&ordered->rb_node); } else { prev = tree_search(tree, offset); /* * we insert file extents without involving ordered struct, * so there should be no ordered struct cover this offset */ if (prev) { test = rb_entry(prev, struct btrfs_ordered_extent, rb_node); BUG_ON(offset_in_entry(test, offset)); } node = prev; } while (node) { |
dbe674a99
|
805 806 807 |
test = rb_entry(node, struct btrfs_ordered_extent, rb_node); if (test->file_offset + test->len <= disk_i_size) break; |
c21677545
|
808 |
if (test->file_offset >= i_size) |
dbe674a99
|
809 810 811 |
break; if (test->file_offset >= disk_i_size) goto out; |
c21677545
|
812 |
node = rb_prev(node); |
dbe674a99
|
813 |
} |
c21677545
|
814 |
new_i_size = min_t(u64, offset, i_size); |
dbe674a99
|
815 816 817 818 819 820 821 |
/* * at this point, we know we can safely update i_size to at least * the offset from this ordered extent. But, we need to * walk forward and see if ios from higher up in the file have * finished. */ |
c21677545
|
822 823 824 825 826 827 828 829 |
if (ordered) { node = rb_next(&ordered->rb_node); } else { if (prev) node = rb_next(prev); else node = rb_first(&tree->tree); } |
dbe674a99
|
830 831 832 833 834 835 836 |
i_size_test = 0; if (node) { /* * do we have an area where IO might have finished * between our ordered extent and the next one. */ test = rb_entry(node, struct btrfs_ordered_extent, rb_node); |
c21677545
|
837 |
if (test->file_offset > offset) |
b48652c10
|
838 |
i_size_test = test->file_offset; |
dbe674a99
|
839 |
} else { |
c21677545
|
840 |
i_size_test = i_size; |
dbe674a99
|
841 842 843 844 845 846 847 848 |
} /* * i_size_test is the end of a region after this ordered * extent where there are no ordered extents. As long as there * are no delalloc bytes in this area, it is safe to update * disk_i_size to the end of the region. */ |
c21677545
|
849 850 851 852 |
if (i_size_test > offset && !test_range_bit(io_tree, offset, i_size_test - 1, EXTENT_DELALLOC, 0, NULL)) { new_i_size = min_t(u64, i_size_test, i_size); |
dbe674a99
|
853 854 |
} BTRFS_I(inode)->disk_i_size = new_i_size; |
c21677545
|
855 |
ret = 0; |
dbe674a99
|
856 |
out: |
c21677545
|
857 858 859 860 861 862 863 |
/* * we need to remove the ordered extent with the tree lock held * so that other people calling this function don't find our fully * processed ordered entry and skip updating the i_size */ if (ordered) __btrfs_remove_ordered_extent(inode, ordered); |
49958fd7d
|
864 |
spin_unlock(&tree->lock); |
c21677545
|
865 866 867 |
if (ordered) wake_up(&ordered->wait); return ret; |
dbe674a99
|
868 |
} |
ba1da2f44
|
869 |
|
eb84ae039
|
870 871 872 873 874 |
/* * search the ordered extents for one corresponding to 'offset' and * try to find a checksum. This is used because we allow pages to * be reclaimed before their checksum is actually put into the btree */ |
d20f7043f
|
875 876 |
int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr, u32 *sum) |
ba1da2f44
|
877 878 879 880 881 |
{ struct btrfs_ordered_sum *ordered_sum; struct btrfs_sector_sum *sector_sums; struct btrfs_ordered_extent *ordered; struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree; |
3edf7d33f
|
882 883 884 |
unsigned long num_sectors; unsigned long i; u32 sectorsize = BTRFS_I(inode)->root->sectorsize; |
ba1da2f44
|
885 |
int ret = 1; |
ba1da2f44
|
886 887 888 889 |
ordered = btrfs_lookup_ordered_extent(inode, offset); if (!ordered) return 1; |
49958fd7d
|
890 |
spin_lock(&tree->lock); |
c6e308713
|
891 |
list_for_each_entry_reverse(ordered_sum, &ordered->list, list) { |
d20f7043f
|
892 |
if (disk_bytenr >= ordered_sum->bytenr) { |
3edf7d33f
|
893 |
num_sectors = ordered_sum->len / sectorsize; |
ed98b56a6
|
894 |
sector_sums = ordered_sum->sums; |
3edf7d33f
|
895 |
for (i = 0; i < num_sectors; i++) { |
d20f7043f
|
896 |
if (sector_sums[i].bytenr == disk_bytenr) { |
3edf7d33f
|
897 898 899 900 901 |
*sum = sector_sums[i].sum; ret = 0; goto out; } } |
ba1da2f44
|
902 903 904 |
} } out: |
49958fd7d
|
905 |
spin_unlock(&tree->lock); |
89642229a
|
906 |
btrfs_put_ordered_extent(ordered); |
ba1da2f44
|
907 908 |
return ret; } |
f421950f8
|
909 |
|
5a3f23d51
|
910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 |
/* * add a given inode to the list of inodes that must be fully on * disk before a transaction commit finishes. * * This basically gives us the ext3 style data=ordered mode, and it is mostly * used to make sure renamed files are fully on disk. * * It is a noop if the inode is already fully on disk. * * If trans is not null, we'll do a friendly check for a transaction that * is already flushing things and force the IO down ourselves. */ int btrfs_add_ordered_operation(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct inode *inode) { u64 last_mod; last_mod = max(BTRFS_I(inode)->generation, BTRFS_I(inode)->last_trans); /* * if this file hasn't been changed since the last transaction * commit, we can safely return without doing anything */ if (last_mod < root->fs_info->last_trans_committed) return 0; /* * the transaction is already committing. Just start the IO and * don't bother with all of this list nonsense */ if (trans && root->fs_info->running_transaction->blocked) { btrfs_wait_ordered_range(inode, 0, (u64)-1); return 0; } spin_lock(&root->fs_info->ordered_extent_lock); if (list_empty(&BTRFS_I(inode)->ordered_operations)) { list_add_tail(&BTRFS_I(inode)->ordered_operations, &root->fs_info->ordered_operations); } spin_unlock(&root->fs_info->ordered_extent_lock); return 0; } |