Blame view
fs/btrfs/delayed-ref.c
25.5 KB
56bec294d Btrfs: do extent ... |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 |
/* * Copyright (C) 2009 Oracle. All rights reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License v2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this program; if not, write to the * Free Software Foundation, Inc., 59 Temple Place - Suite 330, * Boston, MA 021110-1307, USA. */ #include <linux/sched.h> |
5a0e3ad6a include cleanup: ... |
20 |
#include <linux/slab.h> |
56bec294d Btrfs: do extent ... |
21 |
#include <linux/sort.h> |
56bec294d Btrfs: do extent ... |
22 23 24 |
#include "ctree.h" #include "delayed-ref.h" #include "transaction.h" |
3368d001b btrfs: qgroup: Re... |
25 |
#include "qgroup.h" |
56bec294d Btrfs: do extent ... |
26 |
|
78a6184a3 Btrfs: use slabs ... |
27 28 29 30 |
struct kmem_cache *btrfs_delayed_ref_head_cachep; struct kmem_cache *btrfs_delayed_tree_ref_cachep; struct kmem_cache *btrfs_delayed_data_ref_cachep; struct kmem_cache *btrfs_delayed_extent_op_cachep; |
56bec294d Btrfs: do extent ... |
31 32 33 34 35 36 37 |
/* * delayed back reference update tracking. For subvolume trees * we queue up extent allocations and backref maintenance for * delayed processing. This avoids deep call chains where we * add extents in the middle of btrfs_search_slot, and it allows * us to buffer up frequently modified backrefs in an rb tree instead * of hammering updates on the extent allocation tree. |
56bec294d Btrfs: do extent ... |
38 39 40 |
*/ /* |
5d4f98a28 Btrfs: Mixed back... |
41 42 43 |
* compare two delayed tree backrefs with same bytenr and type */ static int comp_tree_refs(struct btrfs_delayed_tree_ref *ref2, |
41b0fc428 Btrfs: compare re... |
44 |
struct btrfs_delayed_tree_ref *ref1, int type) |
5d4f98a28 Btrfs: Mixed back... |
45 |
{ |
41b0fc428 Btrfs: compare re... |
46 47 48 49 50 51 52 53 54 55 56 |
if (type == BTRFS_TREE_BLOCK_REF_KEY) { if (ref1->root < ref2->root) return -1; if (ref1->root > ref2->root) return 1; } else { if (ref1->parent < ref2->parent) return -1; if (ref1->parent > ref2->parent) return 1; } |
5d4f98a28 Btrfs: Mixed back... |
57 58 59 60 61 |
return 0; } /* * compare two delayed data backrefs with same bytenr and type |
56bec294d Btrfs: do extent ... |
62 |
*/ |
5d4f98a28 Btrfs: Mixed back... |
63 64 |
static int comp_data_refs(struct btrfs_delayed_data_ref *ref2, struct btrfs_delayed_data_ref *ref1) |
56bec294d Btrfs: do extent ... |
65 |
{ |
5d4f98a28 Btrfs: Mixed back... |
66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 |
if (ref1->node.type == BTRFS_EXTENT_DATA_REF_KEY) { if (ref1->root < ref2->root) return -1; if (ref1->root > ref2->root) return 1; if (ref1->objectid < ref2->objectid) return -1; if (ref1->objectid > ref2->objectid) return 1; if (ref1->offset < ref2->offset) return -1; if (ref1->offset > ref2->offset) return 1; } else { if (ref1->parent < ref2->parent) return -1; if (ref1->parent > ref2->parent) return 1; } return 0; } |
c46effa60 Btrfs: introduce ... |
87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 |
/* insert a new ref to head ref rbtree */ static struct btrfs_delayed_ref_head *htree_insert(struct rb_root *root, struct rb_node *node) { struct rb_node **p = &root->rb_node; struct rb_node *parent_node = NULL; struct btrfs_delayed_ref_head *entry; struct btrfs_delayed_ref_head *ins; u64 bytenr; ins = rb_entry(node, struct btrfs_delayed_ref_head, href_node); bytenr = ins->node.bytenr; while (*p) { parent_node = *p; entry = rb_entry(parent_node, struct btrfs_delayed_ref_head, href_node); if (bytenr < entry->node.bytenr) p = &(*p)->rb_left; else if (bytenr > entry->node.bytenr) p = &(*p)->rb_right; else return entry; } rb_link_node(node, parent_node, p); rb_insert_color(node, root); return NULL; } |
56bec294d Btrfs: do extent ... |
116 |
/* |
5d4f98a28 Btrfs: Mixed back... |
117 |
* find an head entry based on bytenr. This returns the delayed ref |
d1270cd91 Btrfs: put back d... |
118 119 120 |
* head if it was able to find one, or NULL if nothing was in that spot. * If return_bigger is given, the next bigger entry is returned if no exact * match is found. |
56bec294d Btrfs: do extent ... |
121 |
*/ |
c46effa60 Btrfs: introduce ... |
122 123 |
static struct btrfs_delayed_ref_head * find_ref_head(struct rb_root *root, u64 bytenr, |
85fdfdf61 Btrfs: cleanup de... |
124 |
int return_bigger) |
56bec294d Btrfs: do extent ... |
125 |
{ |
d1270cd91 Btrfs: put back d... |
126 |
struct rb_node *n; |
c46effa60 Btrfs: introduce ... |
127 |
struct btrfs_delayed_ref_head *entry; |
56bec294d Btrfs: do extent ... |
128 |
|
d1270cd91 Btrfs: put back d... |
129 130 |
n = root->rb_node; entry = NULL; |
56bec294d Btrfs: do extent ... |
131 |
while (n) { |
c46effa60 Btrfs: introduce ... |
132 |
entry = rb_entry(n, struct btrfs_delayed_ref_head, href_node); |
56bec294d Btrfs: do extent ... |
133 |
|
c46effa60 Btrfs: introduce ... |
134 |
if (bytenr < entry->node.bytenr) |
56bec294d Btrfs: do extent ... |
135 |
n = n->rb_left; |
85fdfdf61 Btrfs: cleanup de... |
136 |
else if (bytenr > entry->node.bytenr) |
56bec294d Btrfs: do extent ... |
137 138 139 140 |
n = n->rb_right; else return entry; } |
d1270cd91 Btrfs: put back d... |
141 |
if (entry && return_bigger) { |
85fdfdf61 Btrfs: cleanup de... |
142 |
if (bytenr > entry->node.bytenr) { |
c46effa60 Btrfs: introduce ... |
143 |
n = rb_next(&entry->href_node); |
d1270cd91 Btrfs: put back d... |
144 145 |
if (!n) n = rb_first(root); |
c46effa60 Btrfs: introduce ... |
146 147 |
entry = rb_entry(n, struct btrfs_delayed_ref_head, href_node); |
6103fb43f Btrfs: remove unn... |
148 |
return entry; |
d1270cd91 Btrfs: put back d... |
149 150 151 |
} return entry; } |
56bec294d Btrfs: do extent ... |
152 153 |
return NULL; } |
c3e69d58e Btrfs: process th... |
154 155 |
int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans, struct btrfs_delayed_ref_head *head) |
56bec294d Btrfs: do extent ... |
156 |
{ |
c3e69d58e Btrfs: process th... |
157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 |
struct btrfs_delayed_ref_root *delayed_refs; delayed_refs = &trans->transaction->delayed_refs; assert_spin_locked(&delayed_refs->lock); if (mutex_trylock(&head->mutex)) return 0; atomic_inc(&head->node.refs); spin_unlock(&delayed_refs->lock); mutex_lock(&head->mutex); spin_lock(&delayed_refs->lock); if (!head->node.in_tree) { mutex_unlock(&head->mutex); btrfs_put_delayed_ref(&head->node); return -EAGAIN; } btrfs_put_delayed_ref(&head->node); return 0; } |
35a3621be Btrfs: get rid of... |
177 |
static inline void drop_delayed_ref(struct btrfs_trans_handle *trans, |
ae1e206b8 Btrfs: allow dela... |
178 |
struct btrfs_delayed_ref_root *delayed_refs, |
d7df2c796 Btrfs: attach del... |
179 |
struct btrfs_delayed_ref_head *head, |
ae1e206b8 Btrfs: allow dela... |
180 181 |
struct btrfs_delayed_ref_node *ref) { |
c46effa60 Btrfs: introduce ... |
182 |
if (btrfs_delayed_ref_is_head(ref)) { |
c46effa60 Btrfs: introduce ... |
183 184 |
head = btrfs_delayed_node_to_head(ref); rb_erase(&head->href_node, &delayed_refs->href_root); |
d7df2c796 Btrfs: attach del... |
185 186 |
} else { assert_spin_locked(&head->lock); |
c6fc24549 btrfs: delayed-re... |
187 |
list_del(&ref->list); |
c46effa60 Btrfs: introduce ... |
188 |
} |
ae1e206b8 Btrfs: allow dela... |
189 190 |
ref->in_tree = 0; btrfs_put_delayed_ref(ref); |
d7df2c796 Btrfs: attach del... |
191 |
atomic_dec(&delayed_refs->num_entries); |
ae1e206b8 Btrfs: allow dela... |
192 193 194 |
if (trans->delayed_ref_updates) trans->delayed_ref_updates--; } |
2c3cf7d5f Btrfs: fix regres... |
195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 |
static bool merge_ref(struct btrfs_trans_handle *trans, struct btrfs_delayed_ref_root *delayed_refs, struct btrfs_delayed_ref_head *head, struct btrfs_delayed_ref_node *ref, u64 seq) { struct btrfs_delayed_ref_node *next; bool done = false; next = list_first_entry(&head->ref_list, struct btrfs_delayed_ref_node, list); while (!done && &next->list != &head->ref_list) { int mod; struct btrfs_delayed_ref_node *next2; next2 = list_next_entry(next, list); if (next == ref) goto next; if (seq && next->seq >= seq) goto next; |
b06c4bf5c Btrfs: fix regres... |
217 |
if (next->type != ref->type) |
2c3cf7d5f Btrfs: fix regres... |
218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 |
goto next; if ((ref->type == BTRFS_TREE_BLOCK_REF_KEY || ref->type == BTRFS_SHARED_BLOCK_REF_KEY) && comp_tree_refs(btrfs_delayed_node_to_tree_ref(ref), btrfs_delayed_node_to_tree_ref(next), ref->type)) goto next; if ((ref->type == BTRFS_EXTENT_DATA_REF_KEY || ref->type == BTRFS_SHARED_DATA_REF_KEY) && comp_data_refs(btrfs_delayed_node_to_data_ref(ref), btrfs_delayed_node_to_data_ref(next))) goto next; if (ref->action == next->action) { mod = next->ref_mod; } else { if (ref->ref_mod < next->ref_mod) { swap(ref, next); done = true; } mod = -next->ref_mod; } drop_delayed_ref(trans, delayed_refs, head, next); ref->ref_mod += mod; if (ref->ref_mod == 0) { drop_delayed_ref(trans, delayed_refs, head, ref); done = true; } else { /* * Can't have multiples of the same ref on a tree block. */ WARN_ON(ref->type == BTRFS_TREE_BLOCK_REF_KEY || ref->type == BTRFS_SHARED_BLOCK_REF_KEY); } next: next = next2; } return done; } void btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans, struct btrfs_fs_info *fs_info, struct btrfs_delayed_ref_root *delayed_refs, struct btrfs_delayed_ref_head *head) { struct btrfs_delayed_ref_node *ref; u64 seq = 0; assert_spin_locked(&head->lock); if (list_empty(&head->ref_list)) return; /* We don't have too many refs to merge for data. */ if (head->is_data) return; spin_lock(&fs_info->tree_mod_seq_lock); if (!list_empty(&fs_info->tree_mod_seq_list)) { struct seq_list *elem; elem = list_first_entry(&fs_info->tree_mod_seq_list, struct seq_list, list); seq = elem->seq; } spin_unlock(&fs_info->tree_mod_seq_lock); ref = list_first_entry(&head->ref_list, struct btrfs_delayed_ref_node, list); while (&ref->list != &head->ref_list) { if (seq && ref->seq >= seq) goto next; if (merge_ref(trans, delayed_refs, head, ref, seq)) { if (list_empty(&head->ref_list)) break; ref = list_first_entry(&head->ref_list, struct btrfs_delayed_ref_node, list); continue; } next: ref = list_next_entry(ref, list); } } |
097b8a7c9 Btrfs: join tree ... |
306 307 |
int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info, struct btrfs_delayed_ref_root *delayed_refs, |
00f04b887 Btrfs: add sequen... |
308 309 310 |
u64 seq) { struct seq_list *elem; |
097b8a7c9 Btrfs: join tree ... |
311 312 313 314 315 316 317 |
int ret = 0; spin_lock(&fs_info->tree_mod_seq_lock); if (!list_empty(&fs_info->tree_mod_seq_list)) { elem = list_first_entry(&fs_info->tree_mod_seq_list, struct seq_list, list); if (seq >= elem->seq) { |
ab8d0fc48 btrfs: convert pr... |
318 319 320 321 322 |
btrfs_debug(fs_info, "holding back delayed_ref %#x.%x, lowest is %#x.%x (%p)", (u32)(seq >> 32), (u32)seq, (u32)(elem->seq >> 32), (u32)elem->seq, delayed_refs); |
097b8a7c9 Btrfs: join tree ... |
323 324 |
ret = 1; } |
00f04b887 Btrfs: add sequen... |
325 |
} |
097b8a7c9 Btrfs: join tree ... |
326 327 328 |
spin_unlock(&fs_info->tree_mod_seq_lock); return ret; |
00f04b887 Btrfs: add sequen... |
329 |
} |
d7df2c796 Btrfs: attach del... |
330 331 |
struct btrfs_delayed_ref_head * btrfs_select_ref_head(struct btrfs_trans_handle *trans) |
c3e69d58e Btrfs: process th... |
332 |
{ |
c3e69d58e Btrfs: process th... |
333 |
struct btrfs_delayed_ref_root *delayed_refs; |
d7df2c796 Btrfs: attach del... |
334 335 336 |
struct btrfs_delayed_ref_head *head; u64 start; bool loop = false; |
56bec294d Btrfs: do extent ... |
337 |
|
c3e69d58e Btrfs: process th... |
338 |
delayed_refs = &trans->transaction->delayed_refs; |
c46effa60 Btrfs: introduce ... |
339 |
|
c3e69d58e Btrfs: process th... |
340 |
again: |
d7df2c796 Btrfs: attach del... |
341 |
start = delayed_refs->run_delayed_start; |
85fdfdf61 Btrfs: cleanup de... |
342 |
head = find_ref_head(&delayed_refs->href_root, start, 1); |
d7df2c796 Btrfs: attach del... |
343 344 |
if (!head && !loop) { delayed_refs->run_delayed_start = 0; |
c3e69d58e Btrfs: process th... |
345 |
start = 0; |
d7df2c796 Btrfs: attach del... |
346 |
loop = true; |
85fdfdf61 Btrfs: cleanup de... |
347 |
head = find_ref_head(&delayed_refs->href_root, start, 1); |
d7df2c796 Btrfs: attach del... |
348 349 350 351 |
if (!head) return NULL; } else if (!head && loop) { return NULL; |
c3e69d58e Btrfs: process th... |
352 |
} |
56bec294d Btrfs: do extent ... |
353 |
|
d7df2c796 Btrfs: attach del... |
354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 |
while (head->processing) { struct rb_node *node; node = rb_next(&head->href_node); if (!node) { if (loop) return NULL; delayed_refs->run_delayed_start = 0; start = 0; loop = true; goto again; } head = rb_entry(node, struct btrfs_delayed_ref_head, href_node); } |
093486c45 Btrfs: make delay... |
369 |
|
d7df2c796 Btrfs: attach del... |
370 371 372 373 374 375 |
head->processing = 1; WARN_ON(delayed_refs->num_heads_ready == 0); delayed_refs->num_heads_ready--; delayed_refs->run_delayed_start = head->node.bytenr + head->node.num_bytes; return head; |
093486c45 Btrfs: make delay... |
376 |
} |
56bec294d Btrfs: do extent ... |
377 |
/* |
c6fc24549 btrfs: delayed-re... |
378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 |
* Helper to insert the ref_node to the tail or merge with tail. * * Return 0 for insert. * Return >0 for merge. */ static int add_delayed_ref_tail_merge(struct btrfs_trans_handle *trans, struct btrfs_delayed_ref_root *root, struct btrfs_delayed_ref_head *href, struct btrfs_delayed_ref_node *ref) { struct btrfs_delayed_ref_node *exist; int mod; int ret = 0; spin_lock(&href->lock); /* Check whether we can merge the tail node with ref */ if (list_empty(&href->ref_list)) goto add_tail; exist = list_entry(href->ref_list.prev, struct btrfs_delayed_ref_node, list); /* No need to compare bytenr nor is_head */ |
b06c4bf5c Btrfs: fix regres... |
400 |
if (exist->type != ref->type || exist->seq != ref->seq) |
c6fc24549 btrfs: delayed-re... |
401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 |
goto add_tail; if ((exist->type == BTRFS_TREE_BLOCK_REF_KEY || exist->type == BTRFS_SHARED_BLOCK_REF_KEY) && comp_tree_refs(btrfs_delayed_node_to_tree_ref(exist), btrfs_delayed_node_to_tree_ref(ref), ref->type)) goto add_tail; if ((exist->type == BTRFS_EXTENT_DATA_REF_KEY || exist->type == BTRFS_SHARED_DATA_REF_KEY) && comp_data_refs(btrfs_delayed_node_to_data_ref(exist), btrfs_delayed_node_to_data_ref(ref))) goto add_tail; /* Now we are sure we can merge */ ret = 1; if (exist->action == ref->action) { mod = ref->ref_mod; } else { /* Need to change action */ if (exist->ref_mod < ref->ref_mod) { exist->action = ref->action; mod = -exist->ref_mod; exist->ref_mod = ref->ref_mod; } else mod = -ref->ref_mod; } exist->ref_mod += mod; /* remove existing tail if its ref_mod is zero */ if (exist->ref_mod == 0) drop_delayed_ref(trans, root, href, exist); spin_unlock(&href->lock); return ret; add_tail: list_add_tail(&ref->list, &href->ref_list); atomic_inc(&root->num_entries); trans->delayed_ref_updates++; spin_unlock(&href->lock); return ret; } /* |
56bec294d Btrfs: do extent ... |
445 446 447 448 |
* helper function to update the accounting in the head ref * existing and update must have the same bytenr */ static noinline void |
1262133b8 Btrfs: account fo... |
449 450 |
update_existing_head_ref(struct btrfs_delayed_ref_root *delayed_refs, struct btrfs_delayed_ref_node *existing, |
56bec294d Btrfs: do extent ... |
451 452 453 454 |
struct btrfs_delayed_ref_node *update) { struct btrfs_delayed_ref_head *existing_ref; struct btrfs_delayed_ref_head *ref; |
1262133b8 Btrfs: account fo... |
455 |
int old_ref_mod; |
56bec294d Btrfs: do extent ... |
456 457 458 |
existing_ref = btrfs_delayed_node_to_head(existing); ref = btrfs_delayed_node_to_head(update); |
5d4f98a28 Btrfs: Mixed back... |
459 |
BUG_ON(existing_ref->is_data != ref->is_data); |
56bec294d Btrfs: do extent ... |
460 |
|
21543badd Btrfs: fix race w... |
461 |
spin_lock(&existing_ref->lock); |
56bec294d Btrfs: do extent ... |
462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 |
if (ref->must_insert_reserved) { /* if the extent was freed and then * reallocated before the delayed ref * entries were processed, we can end up * with an existing head ref without * the must_insert_reserved flag set. * Set it again here */ existing_ref->must_insert_reserved = ref->must_insert_reserved; /* * update the num_bytes so we make sure the accounting * is done correctly */ existing->num_bytes = update->num_bytes; } |
5d4f98a28 Btrfs: Mixed back... |
479 480 481 482 483 484 485 486 |
if (ref->extent_op) { if (!existing_ref->extent_op) { existing_ref->extent_op = ref->extent_op; } else { if (ref->extent_op->update_key) { memcpy(&existing_ref->extent_op->key, &ref->extent_op->key, sizeof(ref->extent_op->key)); |
35b3ad50b btrfs: better pac... |
487 |
existing_ref->extent_op->update_key = true; |
5d4f98a28 Btrfs: Mixed back... |
488 489 490 491 |
} if (ref->extent_op->update_flags) { existing_ref->extent_op->flags_to_set |= ref->extent_op->flags_to_set; |
35b3ad50b btrfs: better pac... |
492 |
existing_ref->extent_op->update_flags = true; |
5d4f98a28 Btrfs: Mixed back... |
493 |
} |
78a6184a3 Btrfs: use slabs ... |
494 |
btrfs_free_delayed_extent_op(ref->extent_op); |
5d4f98a28 Btrfs: Mixed back... |
495 496 |
} } |
56bec294d Btrfs: do extent ... |
497 |
/* |
d7df2c796 Btrfs: attach del... |
498 499 500 |
* update the reference mod on the head to reflect this new operation, * only need the lock for this case cause we could be processing it * currently, for refs we just added we know we're a-ok. |
56bec294d Btrfs: do extent ... |
501 |
*/ |
1262133b8 Btrfs: account fo... |
502 |
old_ref_mod = existing_ref->total_ref_mod; |
56bec294d Btrfs: do extent ... |
503 |
existing->ref_mod += update->ref_mod; |
1262133b8 Btrfs: account fo... |
504 505 506 507 508 509 510 511 512 513 514 515 |
existing_ref->total_ref_mod += update->ref_mod; /* * If we are going to from a positive ref mod to a negative or vice * versa we need to make sure to adjust pending_csums accordingly. */ if (existing_ref->is_data) { if (existing_ref->total_ref_mod >= 0 && old_ref_mod < 0) delayed_refs->pending_csums -= existing->num_bytes; if (existing_ref->total_ref_mod < 0 && old_ref_mod >= 0) delayed_refs->pending_csums += existing->num_bytes; } |
d7df2c796 Btrfs: attach del... |
516 |
spin_unlock(&existing_ref->lock); |
56bec294d Btrfs: do extent ... |
517 518 519 |
} /* |
5d4f98a28 Btrfs: Mixed back... |
520 |
* helper function to actually insert a head node into the rbtree. |
56bec294d Btrfs: do extent ... |
521 |
* this does all the dirty work in terms of maintaining the correct |
5d4f98a28 Btrfs: Mixed back... |
522 |
* overall modification count. |
56bec294d Btrfs: do extent ... |
523 |
*/ |
d7df2c796 Btrfs: attach del... |
524 525 526 |
static noinline struct btrfs_delayed_ref_head * add_delayed_ref_head(struct btrfs_fs_info *fs_info, struct btrfs_trans_handle *trans, |
3368d001b btrfs: qgroup: Re... |
527 528 |
struct btrfs_delayed_ref_node *ref, struct btrfs_qgroup_extent_record *qrecord, |
5846a3c26 btrfs: qgroup: Fi... |
529 530 |
u64 bytenr, u64 num_bytes, u64 ref_root, u64 reserved, int action, int is_data) |
56bec294d Btrfs: do extent ... |
531 |
{ |
d7df2c796 Btrfs: attach del... |
532 |
struct btrfs_delayed_ref_head *existing; |
c3e69d58e Btrfs: process th... |
533 |
struct btrfs_delayed_ref_head *head_ref = NULL; |
56bec294d Btrfs: do extent ... |
534 535 536 |
struct btrfs_delayed_ref_root *delayed_refs; int count_mod = 1; int must_insert_reserved = 0; |
5846a3c26 btrfs: qgroup: Fi... |
537 538 |
/* If reserved is provided, it must be a data extent. */ BUG_ON(!is_data && reserved); |
56bec294d Btrfs: do extent ... |
539 540 541 542 |
/* * the head node stores the sum of all the mods, so dropping a ref * should drop the sum in the head node by one. */ |
5d4f98a28 Btrfs: Mixed back... |
543 544 545 546 |
if (action == BTRFS_UPDATE_DELAYED_HEAD) count_mod = 0; else if (action == BTRFS_DROP_DELAYED_REF) count_mod = -1; |
56bec294d Btrfs: do extent ... |
547 548 549 550 551 552 553 554 555 556 557 558 |
/* * BTRFS_ADD_DELAYED_EXTENT means that we need to update * the reserved accounting when the extent is finally added, or * if a later modification deletes the delayed ref without ever * inserting the extent into the extent allocation tree. * ref->must_insert_reserved is the flag used to record * that accounting mods are required. * * Once we record must_insert_reserved, switch the action to * BTRFS_ADD_DELAYED_REF because other special casing is not required. */ |
5d4f98a28 Btrfs: Mixed back... |
559 |
if (action == BTRFS_ADD_DELAYED_EXTENT) |
56bec294d Btrfs: do extent ... |
560 |
must_insert_reserved = 1; |
5d4f98a28 Btrfs: Mixed back... |
561 |
else |
56bec294d Btrfs: do extent ... |
562 |
must_insert_reserved = 0; |
56bec294d Btrfs: do extent ... |
563 564 565 566 567 568 |
delayed_refs = &trans->transaction->delayed_refs; /* first set the basic ref node struct up */ atomic_set(&ref->refs, 1); ref->bytenr = bytenr; |
5d4f98a28 Btrfs: Mixed back... |
569 |
ref->num_bytes = num_bytes; |
56bec294d Btrfs: do extent ... |
570 |
ref->ref_mod = count_mod; |
5d4f98a28 Btrfs: Mixed back... |
571 572 573 |
ref->type = 0; ref->action = 0; ref->is_head = 1; |
56bec294d Btrfs: do extent ... |
574 |
ref->in_tree = 1; |
00f04b887 Btrfs: add sequen... |
575 |
ref->seq = 0; |
5d4f98a28 Btrfs: Mixed back... |
576 577 578 579 |
head_ref = btrfs_delayed_node_to_head(ref); head_ref->must_insert_reserved = must_insert_reserved; head_ref->is_data = is_data; |
c6fc24549 btrfs: delayed-re... |
580 |
INIT_LIST_HEAD(&head_ref->ref_list); |
d7df2c796 Btrfs: attach del... |
581 |
head_ref->processing = 0; |
1262133b8 Btrfs: account fo... |
582 |
head_ref->total_ref_mod = count_mod; |
f64d5ca86 btrfs: delayed_re... |
583 584 |
head_ref->qgroup_reserved = 0; head_ref->qgroup_ref_root = 0; |
5d4f98a28 Btrfs: Mixed back... |
585 |
|
3368d001b btrfs: qgroup: Re... |
586 587 |
/* Record qgroup extent info if provided */ if (qrecord) { |
5846a3c26 btrfs: qgroup: Fi... |
588 589 590 591 |
if (ref_root && reserved) { head_ref->qgroup_ref_root = ref_root; head_ref->qgroup_reserved = reserved; } |
3368d001b btrfs: qgroup: Re... |
592 593 594 |
qrecord->bytenr = bytenr; qrecord->num_bytes = num_bytes; qrecord->old_roots = NULL; |
cb93b52cc btrfs: qgroup: Re... |
595 596 |
if(btrfs_qgroup_insert_dirty_extent_nolock(fs_info, delayed_refs, qrecord)) |
3368d001b btrfs: qgroup: Re... |
597 598 |
kfree(qrecord); } |
d7df2c796 Btrfs: attach del... |
599 |
spin_lock_init(&head_ref->lock); |
5d4f98a28 Btrfs: Mixed back... |
600 |
mutex_init(&head_ref->mutex); |
bc074524e btrfs: prefix fsi... |
601 |
trace_add_delayed_ref_head(fs_info, ref, head_ref, action); |
1abe9b8a1 Btrfs: add initia... |
602 |
|
d7df2c796 Btrfs: attach del... |
603 604 |
existing = htree_insert(&delayed_refs->href_root, &head_ref->href_node); |
5d4f98a28 Btrfs: Mixed back... |
605 |
if (existing) { |
5846a3c26 btrfs: qgroup: Fi... |
606 607 |
WARN_ON(ref_root && reserved && existing->qgroup_ref_root && existing->qgroup_reserved); |
1262133b8 Btrfs: account fo... |
608 |
update_existing_head_ref(delayed_refs, &existing->node, ref); |
5d4f98a28 Btrfs: Mixed back... |
609 610 611 612 |
/* * we've updated the existing ref, free the newly * allocated ref */ |
78a6184a3 Btrfs: use slabs ... |
613 |
kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref); |
d7df2c796 Btrfs: attach del... |
614 |
head_ref = existing; |
5d4f98a28 Btrfs: Mixed back... |
615 |
} else { |
1262133b8 Btrfs: account fo... |
616 617 |
if (is_data && count_mod < 0) delayed_refs->pending_csums += num_bytes; |
5d4f98a28 Btrfs: Mixed back... |
618 619 |
delayed_refs->num_heads++; delayed_refs->num_heads_ready++; |
d7df2c796 Btrfs: attach del... |
620 |
atomic_inc(&delayed_refs->num_entries); |
5d4f98a28 Btrfs: Mixed back... |
621 622 |
trans->delayed_ref_updates++; } |
d7df2c796 Btrfs: attach del... |
623 |
return head_ref; |
5d4f98a28 Btrfs: Mixed back... |
624 625 626 627 628 |
} /* * helper to insert a delayed tree ref into the rbtree. */ |
d7df2c796 Btrfs: attach del... |
629 630 631 632 633 634 |
static noinline void add_delayed_tree_ref(struct btrfs_fs_info *fs_info, struct btrfs_trans_handle *trans, struct btrfs_delayed_ref_head *head_ref, struct btrfs_delayed_ref_node *ref, u64 bytenr, u64 num_bytes, u64 parent, u64 ref_root, int level, |
b06c4bf5c Btrfs: fix regres... |
635 |
int action) |
5d4f98a28 Btrfs: Mixed back... |
636 |
{ |
5d4f98a28 Btrfs: Mixed back... |
637 638 |
struct btrfs_delayed_tree_ref *full_ref; struct btrfs_delayed_ref_root *delayed_refs; |
00f04b887 Btrfs: add sequen... |
639 |
u64 seq = 0; |
c6fc24549 btrfs: delayed-re... |
640 |
int ret; |
5d4f98a28 Btrfs: Mixed back... |
641 642 643 |
if (action == BTRFS_ADD_DELAYED_EXTENT) action = BTRFS_ADD_DELAYED_REF; |
fcebe4562 Btrfs: rework qgr... |
644 645 |
if (is_fstree(ref_root)) seq = atomic64_read(&fs_info->tree_mod_seq); |
5d4f98a28 Btrfs: Mixed back... |
646 647 648 649 650 |
delayed_refs = &trans->transaction->delayed_refs; /* first set the basic ref node struct up */ atomic_set(&ref->refs, 1); ref->bytenr = bytenr; |
56bec294d Btrfs: do extent ... |
651 |
ref->num_bytes = num_bytes; |
5d4f98a28 Btrfs: Mixed back... |
652 653 654 655 |
ref->ref_mod = 1; ref->action = action; ref->is_head = 0; ref->in_tree = 1; |
00f04b887 Btrfs: add sequen... |
656 |
ref->seq = seq; |
5d4f98a28 Btrfs: Mixed back... |
657 |
full_ref = btrfs_delayed_node_to_tree_ref(ref); |
eebe063b7 Btrfs: always sav... |
658 659 660 |
full_ref->parent = parent; full_ref->root = ref_root; if (parent) |
5d4f98a28 Btrfs: Mixed back... |
661 |
ref->type = BTRFS_SHARED_BLOCK_REF_KEY; |
eebe063b7 Btrfs: always sav... |
662 |
else |
5d4f98a28 Btrfs: Mixed back... |
663 |
ref->type = BTRFS_TREE_BLOCK_REF_KEY; |
5d4f98a28 Btrfs: Mixed back... |
664 |
full_ref->level = level; |
56bec294d Btrfs: do extent ... |
665 |
|
bc074524e btrfs: prefix fsi... |
666 |
trace_add_delayed_tree_ref(fs_info, ref, full_ref, action); |
1abe9b8a1 Btrfs: add initia... |
667 |
|
c6fc24549 btrfs: delayed-re... |
668 669 670 671 672 673 674 |
ret = add_delayed_ref_tail_merge(trans, delayed_refs, head_ref, ref); /* * XXX: memory should be freed at the same level allocated. * But bad practice is anywhere... Follow it now. Need cleanup. */ if (ret > 0) |
78a6184a3 Btrfs: use slabs ... |
675 |
kmem_cache_free(btrfs_delayed_tree_ref_cachep, full_ref); |
5d4f98a28 Btrfs: Mixed back... |
676 677 678 679 680 |
} /* * helper to insert a delayed data ref into the rbtree. */ |
d7df2c796 Btrfs: attach del... |
681 682 683 684 685 686 |
static noinline void add_delayed_data_ref(struct btrfs_fs_info *fs_info, struct btrfs_trans_handle *trans, struct btrfs_delayed_ref_head *head_ref, struct btrfs_delayed_ref_node *ref, u64 bytenr, u64 num_bytes, u64 parent, u64 ref_root, u64 owner, |
b06c4bf5c Btrfs: fix regres... |
687 |
u64 offset, int action) |
5d4f98a28 Btrfs: Mixed back... |
688 |
{ |
5d4f98a28 Btrfs: Mixed back... |
689 690 |
struct btrfs_delayed_data_ref *full_ref; struct btrfs_delayed_ref_root *delayed_refs; |
00f04b887 Btrfs: add sequen... |
691 |
u64 seq = 0; |
c6fc24549 btrfs: delayed-re... |
692 |
int ret; |
5d4f98a28 Btrfs: Mixed back... |
693 694 695 696 697 |
if (action == BTRFS_ADD_DELAYED_EXTENT) action = BTRFS_ADD_DELAYED_REF; delayed_refs = &trans->transaction->delayed_refs; |
fcebe4562 Btrfs: rework qgr... |
698 699 |
if (is_fstree(ref_root)) seq = atomic64_read(&fs_info->tree_mod_seq); |
5d4f98a28 Btrfs: Mixed back... |
700 701 702 703 704 705 706 707 |
/* first set the basic ref node struct up */ atomic_set(&ref->refs, 1); ref->bytenr = bytenr; ref->num_bytes = num_bytes; ref->ref_mod = 1; ref->action = action; ref->is_head = 0; ref->in_tree = 1; |
00f04b887 Btrfs: add sequen... |
708 |
ref->seq = seq; |
5d4f98a28 Btrfs: Mixed back... |
709 |
full_ref = btrfs_delayed_node_to_data_ref(ref); |
eebe063b7 Btrfs: always sav... |
710 711 712 |
full_ref->parent = parent; full_ref->root = ref_root; if (parent) |
5d4f98a28 Btrfs: Mixed back... |
713 |
ref->type = BTRFS_SHARED_DATA_REF_KEY; |
eebe063b7 Btrfs: always sav... |
714 |
else |
5d4f98a28 Btrfs: Mixed back... |
715 |
ref->type = BTRFS_EXTENT_DATA_REF_KEY; |
66d7e7f09 Btrfs: mark delay... |
716 |
|
5d4f98a28 Btrfs: Mixed back... |
717 718 |
full_ref->objectid = owner; full_ref->offset = offset; |
56bec294d Btrfs: do extent ... |
719 |
|
bc074524e btrfs: prefix fsi... |
720 |
trace_add_delayed_data_ref(fs_info, ref, full_ref, action); |
1abe9b8a1 Btrfs: add initia... |
721 |
|
c6fc24549 btrfs: delayed-re... |
722 723 724 |
ret = add_delayed_ref_tail_merge(trans, delayed_refs, head_ref, ref); if (ret > 0) |
78a6184a3 Btrfs: use slabs ... |
725 |
kmem_cache_free(btrfs_delayed_data_ref_cachep, full_ref); |
56bec294d Btrfs: do extent ... |
726 727 728 |
} /* |
5d4f98a28 Btrfs: Mixed back... |
729 |
* add a delayed tree ref. This does all of the accounting required |
56bec294d Btrfs: do extent ... |
730 731 732 |
* to make sure the delayed ref is eventually processed before this * transaction commits. */ |
66d7e7f09 Btrfs: mark delay... |
733 734 |
int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info, struct btrfs_trans_handle *trans, |
5d4f98a28 Btrfs: Mixed back... |
735 736 |
u64 bytenr, u64 num_bytes, u64 parent, u64 ref_root, int level, int action, |
b06c4bf5c Btrfs: fix regres... |
737 |
struct btrfs_delayed_extent_op *extent_op) |
56bec294d Btrfs: do extent ... |
738 |
{ |
5d4f98a28 Btrfs: Mixed back... |
739 |
struct btrfs_delayed_tree_ref *ref; |
56bec294d Btrfs: do extent ... |
740 741 |
struct btrfs_delayed_ref_head *head_ref; struct btrfs_delayed_ref_root *delayed_refs; |
3368d001b btrfs: qgroup: Re... |
742 |
struct btrfs_qgroup_extent_record *record = NULL; |
56bec294d Btrfs: do extent ... |
743 |
|
5d4f98a28 Btrfs: Mixed back... |
744 |
BUG_ON(extent_op && extent_op->is_data); |
78a6184a3 Btrfs: use slabs ... |
745 |
ref = kmem_cache_alloc(btrfs_delayed_tree_ref_cachep, GFP_NOFS); |
56bec294d Btrfs: do extent ... |
746 747 |
if (!ref) return -ENOMEM; |
78a6184a3 Btrfs: use slabs ... |
748 |
head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS); |
5a5003df9 btrfs: delayed-re... |
749 750 |
if (!head_ref) goto free_ref; |
5d4f98a28 Btrfs: Mixed back... |
751 |
|
afcdd129e Btrfs: add a flag... |
752 753 |
if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) && is_fstree(ref_root)) { |
3368d001b btrfs: qgroup: Re... |
754 |
record = kmalloc(sizeof(*record), GFP_NOFS); |
5a5003df9 btrfs: delayed-re... |
755 756 |
if (!record) goto free_head_ref; |
3368d001b btrfs: qgroup: Re... |
757 |
} |
5d4f98a28 Btrfs: Mixed back... |
758 759 760 761 |
head_ref->extent_op = extent_op; delayed_refs = &trans->transaction->delayed_refs; spin_lock(&delayed_refs->lock); |
56bec294d Btrfs: do extent ... |
762 |
/* |
5d4f98a28 Btrfs: Mixed back... |
763 764 |
* insert both the head node and the new ref without dropping * the spin lock |
56bec294d Btrfs: do extent ... |
765 |
*/ |
3368d001b btrfs: qgroup: Re... |
766 |
head_ref = add_delayed_ref_head(fs_info, trans, &head_ref->node, record, |
5846a3c26 btrfs: qgroup: Fi... |
767 |
bytenr, num_bytes, 0, 0, action, 0); |
5d4f98a28 Btrfs: Mixed back... |
768 |
|
d7df2c796 Btrfs: attach del... |
769 |
add_delayed_tree_ref(fs_info, trans, head_ref, &ref->node, bytenr, |
b06c4bf5c Btrfs: fix regres... |
770 |
num_bytes, parent, ref_root, level, action); |
5d4f98a28 Btrfs: Mixed back... |
771 |
spin_unlock(&delayed_refs->lock); |
95a06077f Btrfs: use delaye... |
772 |
|
5d4f98a28 Btrfs: Mixed back... |
773 |
return 0; |
5a5003df9 btrfs: delayed-re... |
774 775 776 777 778 779 780 |
free_head_ref: kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref); free_ref: kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref); return -ENOMEM; |
5d4f98a28 Btrfs: Mixed back... |
781 782 783 784 785 |
} /* * add a delayed data ref. it's similar to btrfs_add_delayed_tree_ref. */ |
66d7e7f09 Btrfs: mark delay... |
786 787 |
int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info, struct btrfs_trans_handle *trans, |
5d4f98a28 Btrfs: Mixed back... |
788 789 |
u64 bytenr, u64 num_bytes, u64 parent, u64 ref_root, |
5846a3c26 btrfs: qgroup: Fi... |
790 |
u64 owner, u64 offset, u64 reserved, int action, |
b06c4bf5c Btrfs: fix regres... |
791 |
struct btrfs_delayed_extent_op *extent_op) |
5d4f98a28 Btrfs: Mixed back... |
792 793 794 795 |
{ struct btrfs_delayed_data_ref *ref; struct btrfs_delayed_ref_head *head_ref; struct btrfs_delayed_ref_root *delayed_refs; |
3368d001b btrfs: qgroup: Re... |
796 |
struct btrfs_qgroup_extent_record *record = NULL; |
5d4f98a28 Btrfs: Mixed back... |
797 798 |
BUG_ON(extent_op && !extent_op->is_data); |
78a6184a3 Btrfs: use slabs ... |
799 |
ref = kmem_cache_alloc(btrfs_delayed_data_ref_cachep, GFP_NOFS); |
5d4f98a28 Btrfs: Mixed back... |
800 801 |
if (!ref) return -ENOMEM; |
56bec294d Btrfs: do extent ... |
802 |
|
78a6184a3 Btrfs: use slabs ... |
803 |
head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS); |
56bec294d Btrfs: do extent ... |
804 |
if (!head_ref) { |
78a6184a3 Btrfs: use slabs ... |
805 |
kmem_cache_free(btrfs_delayed_data_ref_cachep, ref); |
56bec294d Btrfs: do extent ... |
806 807 |
return -ENOMEM; } |
5d4f98a28 Btrfs: Mixed back... |
808 |
|
afcdd129e Btrfs: add a flag... |
809 810 |
if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) && is_fstree(ref_root)) { |
3368d001b btrfs: qgroup: Re... |
811 812 813 814 815 816 817 818 |
record = kmalloc(sizeof(*record), GFP_NOFS); if (!record) { kmem_cache_free(btrfs_delayed_data_ref_cachep, ref); kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref); return -ENOMEM; } } |
5d4f98a28 Btrfs: Mixed back... |
819 |
head_ref->extent_op = extent_op; |
56bec294d Btrfs: do extent ... |
820 821 822 823 824 825 826 |
delayed_refs = &trans->transaction->delayed_refs; spin_lock(&delayed_refs->lock); /* * insert both the head node and the new ref without dropping * the spin lock */ |
3368d001b btrfs: qgroup: Re... |
827 |
head_ref = add_delayed_ref_head(fs_info, trans, &head_ref->node, record, |
5846a3c26 btrfs: qgroup: Fi... |
828 829 |
bytenr, num_bytes, ref_root, reserved, action, 1); |
56bec294d Btrfs: do extent ... |
830 |
|
d7df2c796 Btrfs: attach del... |
831 |
add_delayed_data_ref(fs_info, trans, head_ref, &ref->node, bytenr, |
66d7e7f09 Btrfs: mark delay... |
832 |
num_bytes, parent, ref_root, owner, offset, |
b06c4bf5c Btrfs: fix regres... |
833 |
action); |
5d4f98a28 Btrfs: Mixed back... |
834 |
spin_unlock(&delayed_refs->lock); |
95a06077f Btrfs: use delaye... |
835 |
|
5d4f98a28 Btrfs: Mixed back... |
836 837 |
return 0; } |
66d7e7f09 Btrfs: mark delay... |
838 839 |
int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info, struct btrfs_trans_handle *trans, |
5d4f98a28 Btrfs: Mixed back... |
840 841 842 843 844 |
u64 bytenr, u64 num_bytes, struct btrfs_delayed_extent_op *extent_op) { struct btrfs_delayed_ref_head *head_ref; struct btrfs_delayed_ref_root *delayed_refs; |
5d4f98a28 Btrfs: Mixed back... |
845 |
|
78a6184a3 Btrfs: use slabs ... |
846 |
head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS); |
5d4f98a28 Btrfs: Mixed back... |
847 848 849 850 851 852 853 |
if (!head_ref) return -ENOMEM; head_ref->extent_op = extent_op; delayed_refs = &trans->transaction->delayed_refs; spin_lock(&delayed_refs->lock); |
3368d001b btrfs: qgroup: Re... |
854 |
add_delayed_ref_head(fs_info, trans, &head_ref->node, NULL, bytenr, |
5846a3c26 btrfs: qgroup: Fi... |
855 |
num_bytes, 0, 0, BTRFS_UPDATE_DELAYED_HEAD, |
3368d001b btrfs: qgroup: Re... |
856 |
extent_op->is_data); |
5d4f98a28 Btrfs: Mixed back... |
857 |
|
56bec294d Btrfs: do extent ... |
858 859 860 861 862 |
spin_unlock(&delayed_refs->lock); return 0; } /* |
1887be66d Btrfs: try to cle... |
863 864 865 866 867 868 869 |
* this does a simple search for the head node for a given extent. * It must be called with the delayed ref spinlock held, and it returns * the head node if any where found, or NULL if not. */ struct btrfs_delayed_ref_head * btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr) { |
1887be66d Btrfs: try to cle... |
870 871 872 |
struct btrfs_delayed_ref_root *delayed_refs; delayed_refs = &trans->transaction->delayed_refs; |
85fdfdf61 Btrfs: cleanup de... |
873 |
return find_ref_head(&delayed_refs->href_root, bytenr, 0); |
1887be66d Btrfs: try to cle... |
874 |
} |
78a6184a3 Btrfs: use slabs ... |
875 876 877 |
void btrfs_delayed_ref_exit(void) { |
5598e9005 btrfs: drop null ... |
878 879 880 881 |
kmem_cache_destroy(btrfs_delayed_ref_head_cachep); kmem_cache_destroy(btrfs_delayed_tree_ref_cachep); kmem_cache_destroy(btrfs_delayed_data_ref_cachep); kmem_cache_destroy(btrfs_delayed_extent_op_cachep); |
78a6184a3 Btrfs: use slabs ... |
882 883 884 885 886 887 888 |
} int btrfs_delayed_ref_init(void) { btrfs_delayed_ref_head_cachep = kmem_cache_create( "btrfs_delayed_ref_head", sizeof(struct btrfs_delayed_ref_head), 0, |
fba4b6977 btrfs: Fix slab a... |
889 |
SLAB_MEM_SPREAD, NULL); |
78a6184a3 Btrfs: use slabs ... |
890 891 892 893 894 895 |
if (!btrfs_delayed_ref_head_cachep) goto fail; btrfs_delayed_tree_ref_cachep = kmem_cache_create( "btrfs_delayed_tree_ref", sizeof(struct btrfs_delayed_tree_ref), 0, |
fba4b6977 btrfs: Fix slab a... |
896 |
SLAB_MEM_SPREAD, NULL); |
78a6184a3 Btrfs: use slabs ... |
897 898 899 900 901 902 |
if (!btrfs_delayed_tree_ref_cachep) goto fail; btrfs_delayed_data_ref_cachep = kmem_cache_create( "btrfs_delayed_data_ref", sizeof(struct btrfs_delayed_data_ref), 0, |
fba4b6977 btrfs: Fix slab a... |
903 |
SLAB_MEM_SPREAD, NULL); |
78a6184a3 Btrfs: use slabs ... |
904 905 906 907 908 909 |
if (!btrfs_delayed_data_ref_cachep) goto fail; btrfs_delayed_extent_op_cachep = kmem_cache_create( "btrfs_delayed_extent_op", sizeof(struct btrfs_delayed_extent_op), 0, |
fba4b6977 btrfs: Fix slab a... |
910 |
SLAB_MEM_SPREAD, NULL); |
78a6184a3 Btrfs: use slabs ... |
911 912 913 914 915 916 917 918 |
if (!btrfs_delayed_extent_op_cachep) goto fail; return 0; fail: btrfs_delayed_ref_exit(); return -ENOMEM; } |