Blame view
fs/btrfs/delayed-ref.h
8.1 KB
56bec294d
|
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 |
/* * Copyright (C) 2008 Oracle. All rights reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License v2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this program; if not, write to the * Free Software Foundation, Inc., 59 Temple Place - Suite 330, * Boston, MA 021110-1307, USA. */ #ifndef __DELAYED_REF__ #define __DELAYED_REF__ |
44a075bde
|
20 |
/* these are the possible values of struct btrfs_delayed_ref_node->action */ |
56bec294d
|
21 22 23 |
#define BTRFS_ADD_DELAYED_REF 1 /* add one backref to the tree */ #define BTRFS_DROP_DELAYED_REF 2 /* delete one backref from the tree */ #define BTRFS_ADD_DELAYED_EXTENT 3 /* record a full extent allocation */ |
1a81af4d1
|
24 |
#define BTRFS_UPDATE_DELAYED_HEAD 4 /* not changing ref count on head ref */ |
56bec294d
|
25 26 27 28 29 30 |
struct btrfs_delayed_ref_node { struct rb_node rb_node; /* the starting bytenr of the extent */ u64 bytenr; |
56bec294d
|
31 32 |
/* the size of the extent */ u64 num_bytes; |
00f04b887
|
33 34 |
/* seq number to keep track of insertion order */ u64 seq; |
56bec294d
|
35 36 37 38 39 40 41 42 43 44 45 46 47 |
/* ref count on this data structure */ atomic_t refs; /* * how many refs is this entry adding or deleting. For * head refs, this may be a negative number because it is keeping * track of the total mods done to the reference count. * For individual refs, this will always be a positive number * * It may be more than one, since it is possible for a single * parent to have more than one ref on an extent */ int ref_mod; |
5d4f98a28
|
48 49 |
unsigned int action:8; unsigned int type:8; |
56bec294d
|
50 |
/* is this node still in the rbtree? */ |
5d4f98a28
|
51 |
unsigned int is_head:1; |
56bec294d
|
52 53 |
unsigned int in_tree:1; }; |
5d4f98a28
|
54 55 56 |
struct btrfs_delayed_extent_op { struct btrfs_disk_key key; u64 flags_to_set; |
b1c79e094
|
57 |
int level; |
5d4f98a28
|
58 59 60 61 |
unsigned int update_key:1; unsigned int update_flags:1; unsigned int is_data:1; }; |
56bec294d
|
62 63 64 65 66 67 68 69 70 71 72 73 74 75 |
/* * the head refs are used to hold a lock on a given extent, which allows us * to make sure that only one process is running the delayed refs * at a time for a single extent. They also store the sum of all the * reference count modifications we've queued up. */ struct btrfs_delayed_ref_head { struct btrfs_delayed_ref_node node; /* * the mutex is held while running the refs, and it is also * held when checking the sum of reference modifications. */ struct mutex mutex; |
d7df2c796
|
76 77 |
spinlock_t lock; struct rb_root ref_root; |
c3e69d58e
|
78 |
|
c46effa60
|
79 |
struct rb_node href_node; |
5d4f98a28
|
80 |
struct btrfs_delayed_extent_op *extent_op; |
56bec294d
|
81 82 83 84 85 86 87 88 89 90 91 92 93 |
/* * when a new extent is allocated, it is just reserved in memory * The actual extent isn't inserted into the extent allocation tree * until the delayed ref is processed. must_insert_reserved is * used to flag a delayed ref so the accounting can be updated * when a full insert is done. * * It is possible the extent will be freed before it is ever * inserted into the extent allocation tree. In this case * we need to update the in ram accounting to properly reflect * the free has happened. */ unsigned int must_insert_reserved:1; |
5d4f98a28
|
94 |
unsigned int is_data:1; |
d7df2c796
|
95 |
unsigned int processing:1; |
56bec294d
|
96 |
}; |
5d4f98a28
|
97 |
struct btrfs_delayed_tree_ref { |
56bec294d
|
98 |
struct btrfs_delayed_ref_node node; |
eebe063b7
|
99 100 |
u64 root; u64 parent; |
5d4f98a28
|
101 102 |
int level; }; |
56bec294d
|
103 |
|
5d4f98a28
|
104 105 |
struct btrfs_delayed_data_ref { struct btrfs_delayed_ref_node node; |
eebe063b7
|
106 107 |
u64 root; u64 parent; |
5d4f98a28
|
108 109 |
u64 objectid; u64 offset; |
56bec294d
|
110 111 112 |
}; struct btrfs_delayed_ref_root { |
c46effa60
|
113 114 |
/* head ref rbtree */ struct rb_root href_root; |
56bec294d
|
115 116 117 118 119 120 |
/* this spin lock protects the rbtree and the entries inside */ spinlock_t lock; /* how many delayed ref updates we've queued, used by the * throttling code */ |
d7df2c796
|
121 |
atomic_t num_entries; |
56bec294d
|
122 |
|
c3e69d58e
|
123 124 125 126 127 |
/* total number of head nodes in tree */ unsigned long num_heads; /* total number of head nodes ready for processing */ unsigned long num_heads_ready; |
56bec294d
|
128 129 130 131 132 133 |
/* * set when the tree is flushing before a transaction commit, * used by the throttling code to decide if new updates need * to be run right away */ int flushing; |
c3e69d58e
|
134 135 |
u64 run_delayed_start; |
56bec294d
|
136 |
}; |
78a6184a3
|
137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 |
extern struct kmem_cache *btrfs_delayed_ref_head_cachep; extern struct kmem_cache *btrfs_delayed_tree_ref_cachep; extern struct kmem_cache *btrfs_delayed_data_ref_cachep; extern struct kmem_cache *btrfs_delayed_extent_op_cachep; int btrfs_delayed_ref_init(void); void btrfs_delayed_ref_exit(void); static inline struct btrfs_delayed_extent_op * btrfs_alloc_delayed_extent_op(void) { return kmem_cache_alloc(btrfs_delayed_extent_op_cachep, GFP_NOFS); } static inline void btrfs_free_delayed_extent_op(struct btrfs_delayed_extent_op *op) { if (op) kmem_cache_free(btrfs_delayed_extent_op_cachep, op); } |
56bec294d
|
157 158 159 160 161 |
static inline void btrfs_put_delayed_ref(struct btrfs_delayed_ref_node *ref) { WARN_ON(atomic_read(&ref->refs) == 0); if (atomic_dec_and_test(&ref->refs)) { WARN_ON(ref->in_tree); |
78a6184a3
|
162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 |
switch (ref->type) { case BTRFS_TREE_BLOCK_REF_KEY: case BTRFS_SHARED_BLOCK_REF_KEY: kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref); break; case BTRFS_EXTENT_DATA_REF_KEY: case BTRFS_SHARED_DATA_REF_KEY: kmem_cache_free(btrfs_delayed_data_ref_cachep, ref); break; case 0: kmem_cache_free(btrfs_delayed_ref_head_cachep, ref); break; default: BUG(); } |
56bec294d
|
177 178 |
} } |
66d7e7f09
|
179 180 |
int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info, struct btrfs_trans_handle *trans, |
5d4f98a28
|
181 182 |
u64 bytenr, u64 num_bytes, u64 parent, u64 ref_root, int level, int action, |
66d7e7f09
|
183 184 185 186 |
struct btrfs_delayed_extent_op *extent_op, int for_cow); int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info, struct btrfs_trans_handle *trans, |
5d4f98a28
|
187 188 189 |
u64 bytenr, u64 num_bytes, u64 parent, u64 ref_root, u64 owner, u64 offset, int action, |
66d7e7f09
|
190 191 192 193 |
struct btrfs_delayed_extent_op *extent_op, int for_cow); int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info, struct btrfs_trans_handle *trans, |
5d4f98a28
|
194 195 |
u64 bytenr, u64 num_bytes, struct btrfs_delayed_extent_op *extent_op); |
ae1e206b8
|
196 197 198 199 |
void btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans, struct btrfs_fs_info *fs_info, struct btrfs_delayed_ref_root *delayed_refs, struct btrfs_delayed_ref_head *head); |
56bec294d
|
200 |
|
1887be66d
|
201 202 |
struct btrfs_delayed_ref_head * btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr); |
c3e69d58e
|
203 204 |
int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans, struct btrfs_delayed_ref_head *head); |
093486c45
|
205 206 207 208 |
static inline void btrfs_delayed_ref_unlock(struct btrfs_delayed_ref_head *head) { mutex_unlock(&head->mutex); } |
d7df2c796
|
209 210 211 |
struct btrfs_delayed_ref_head * btrfs_select_ref_head(struct btrfs_trans_handle *trans); |
00f04b887
|
212 |
|
097b8a7c9
|
213 214 |
int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info, struct btrfs_delayed_ref_root *delayed_refs, |
00f04b887
|
215 216 217 |
u64 seq); /* |
546adb0d8
|
218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 |
* delayed refs with a ref_seq > 0 must be held back during backref walking. * this only applies to items in one of the fs-trees. for_cow items never need * to be held back, so they won't get a ref_seq number. */ static inline int need_ref_seq(int for_cow, u64 rootid) { if (for_cow) return 0; if (rootid == BTRFS_FS_TREE_OBJECTID) return 1; if ((s64)rootid >= (s64)BTRFS_FIRST_FREE_OBJECTID) return 1; return 0; } /* |
56bec294d
|
237 238 239 240 241 |
* a node might live in a head or a regular ref, this lets you * test for the proper type to use. */ static int btrfs_delayed_ref_is_head(struct btrfs_delayed_ref_node *node) { |
5d4f98a28
|
242 |
return node->is_head; |
56bec294d
|
243 244 245 246 247 |
} /* * helper functions to cast a node into its container */ |
5d4f98a28
|
248 249 |
static inline struct btrfs_delayed_tree_ref * btrfs_delayed_node_to_tree_ref(struct btrfs_delayed_ref_node *node) |
56bec294d
|
250 251 |
{ WARN_ON(btrfs_delayed_ref_is_head(node)); |
5d4f98a28
|
252 253 |
return container_of(node, struct btrfs_delayed_tree_ref, node); } |
56bec294d
|
254 |
|
5d4f98a28
|
255 256 257 258 259 |
static inline struct btrfs_delayed_data_ref * btrfs_delayed_node_to_data_ref(struct btrfs_delayed_ref_node *node) { WARN_ON(btrfs_delayed_ref_is_head(node)); return container_of(node, struct btrfs_delayed_data_ref, node); |
56bec294d
|
260 261 262 263 264 265 266 |
} static inline struct btrfs_delayed_ref_head * btrfs_delayed_node_to_head(struct btrfs_delayed_ref_node *node) { WARN_ON(!btrfs_delayed_ref_is_head(node)); return container_of(node, struct btrfs_delayed_ref_head, node); |
56bec294d
|
267 268 |
} #endif |