Commit d038a63ace6cf2ce3aeafa741b73d542ffb65163
Exists in
ti-lsk-linux-4.1.y
and in
10 other branches
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs
Pull btrfs deadlock fix from Chris Mason: "This has a fix for a long standing deadlock that we've been trying to nail down for a while. It ended up being a bad interaction with the fair reader/writer locks and the order btrfs reacquires locks in the btree" * 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs: btrfs: fix lockups from btrfs_clear_path_blocking
Showing 3 changed files Inline Diff
fs/btrfs/ctree.c
1 | /* | 1 | /* |
2 | * Copyright (C) 2007,2008 Oracle. All rights reserved. | 2 | * Copyright (C) 2007,2008 Oracle. All rights reserved. |
3 | * | 3 | * |
4 | * This program is free software; you can redistribute it and/or | 4 | * This program is free software; you can redistribute it and/or |
5 | * modify it under the terms of the GNU General Public | 5 | * modify it under the terms of the GNU General Public |
6 | * License v2 as published by the Free Software Foundation. | 6 | * License v2 as published by the Free Software Foundation. |
7 | * | 7 | * |
8 | * This program is distributed in the hope that it will be useful, | 8 | * This program is distributed in the hope that it will be useful, |
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | 10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
11 | * General Public License for more details. | 11 | * General Public License for more details. |
12 | * | 12 | * |
13 | * You should have received a copy of the GNU General Public | 13 | * You should have received a copy of the GNU General Public |
14 | * License along with this program; if not, write to the | 14 | * License along with this program; if not, write to the |
15 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, | 15 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, |
16 | * Boston, MA 021110-1307, USA. | 16 | * Boston, MA 021110-1307, USA. |
17 | */ | 17 | */ |
18 | 18 | ||
19 | #include <linux/sched.h> | 19 | #include <linux/sched.h> |
20 | #include <linux/slab.h> | 20 | #include <linux/slab.h> |
21 | #include <linux/rbtree.h> | 21 | #include <linux/rbtree.h> |
22 | #include "ctree.h" | 22 | #include "ctree.h" |
23 | #include "disk-io.h" | 23 | #include "disk-io.h" |
24 | #include "transaction.h" | 24 | #include "transaction.h" |
25 | #include "print-tree.h" | 25 | #include "print-tree.h" |
26 | #include "locking.h" | 26 | #include "locking.h" |
27 | 27 | ||
28 | static int split_node(struct btrfs_trans_handle *trans, struct btrfs_root | 28 | static int split_node(struct btrfs_trans_handle *trans, struct btrfs_root |
29 | *root, struct btrfs_path *path, int level); | 29 | *root, struct btrfs_path *path, int level); |
30 | static int split_leaf(struct btrfs_trans_handle *trans, struct btrfs_root | 30 | static int split_leaf(struct btrfs_trans_handle *trans, struct btrfs_root |
31 | *root, struct btrfs_key *ins_key, | 31 | *root, struct btrfs_key *ins_key, |
32 | struct btrfs_path *path, int data_size, int extend); | 32 | struct btrfs_path *path, int data_size, int extend); |
33 | static int push_node_left(struct btrfs_trans_handle *trans, | 33 | static int push_node_left(struct btrfs_trans_handle *trans, |
34 | struct btrfs_root *root, struct extent_buffer *dst, | 34 | struct btrfs_root *root, struct extent_buffer *dst, |
35 | struct extent_buffer *src, int empty); | 35 | struct extent_buffer *src, int empty); |
36 | static int balance_node_right(struct btrfs_trans_handle *trans, | 36 | static int balance_node_right(struct btrfs_trans_handle *trans, |
37 | struct btrfs_root *root, | 37 | struct btrfs_root *root, |
38 | struct extent_buffer *dst_buf, | 38 | struct extent_buffer *dst_buf, |
39 | struct extent_buffer *src_buf); | 39 | struct extent_buffer *src_buf); |
40 | static void del_ptr(struct btrfs_root *root, struct btrfs_path *path, | 40 | static void del_ptr(struct btrfs_root *root, struct btrfs_path *path, |
41 | int level, int slot); | 41 | int level, int slot); |
42 | static int tree_mod_log_free_eb(struct btrfs_fs_info *fs_info, | 42 | static int tree_mod_log_free_eb(struct btrfs_fs_info *fs_info, |
43 | struct extent_buffer *eb); | 43 | struct extent_buffer *eb); |
44 | 44 | ||
45 | struct btrfs_path *btrfs_alloc_path(void) | 45 | struct btrfs_path *btrfs_alloc_path(void) |
46 | { | 46 | { |
47 | struct btrfs_path *path; | 47 | struct btrfs_path *path; |
48 | path = kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS); | 48 | path = kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS); |
49 | return path; | 49 | return path; |
50 | } | 50 | } |
51 | 51 | ||
52 | /* | 52 | /* |
53 | * set all locked nodes in the path to blocking locks. This should | 53 | * set all locked nodes in the path to blocking locks. This should |
54 | * be done before scheduling | 54 | * be done before scheduling |
55 | */ | 55 | */ |
56 | noinline void btrfs_set_path_blocking(struct btrfs_path *p) | 56 | noinline void btrfs_set_path_blocking(struct btrfs_path *p) |
57 | { | 57 | { |
58 | int i; | 58 | int i; |
59 | for (i = 0; i < BTRFS_MAX_LEVEL; i++) { | 59 | for (i = 0; i < BTRFS_MAX_LEVEL; i++) { |
60 | if (!p->nodes[i] || !p->locks[i]) | 60 | if (!p->nodes[i] || !p->locks[i]) |
61 | continue; | 61 | continue; |
62 | btrfs_set_lock_blocking_rw(p->nodes[i], p->locks[i]); | 62 | btrfs_set_lock_blocking_rw(p->nodes[i], p->locks[i]); |
63 | if (p->locks[i] == BTRFS_READ_LOCK) | 63 | if (p->locks[i] == BTRFS_READ_LOCK) |
64 | p->locks[i] = BTRFS_READ_LOCK_BLOCKING; | 64 | p->locks[i] = BTRFS_READ_LOCK_BLOCKING; |
65 | else if (p->locks[i] == BTRFS_WRITE_LOCK) | 65 | else if (p->locks[i] == BTRFS_WRITE_LOCK) |
66 | p->locks[i] = BTRFS_WRITE_LOCK_BLOCKING; | 66 | p->locks[i] = BTRFS_WRITE_LOCK_BLOCKING; |
67 | } | 67 | } |
68 | } | 68 | } |
69 | 69 | ||
70 | /* | 70 | /* |
71 | * reset all the locked nodes in the patch to spinning locks. | 71 | * reset all the locked nodes in the patch to spinning locks. |
72 | * | 72 | * |
73 | * held is used to keep lockdep happy, when lockdep is enabled | 73 | * held is used to keep lockdep happy, when lockdep is enabled |
74 | * we set held to a blocking lock before we go around and | 74 | * we set held to a blocking lock before we go around and |
75 | * retake all the spinlocks in the path. You can safely use NULL | 75 | * retake all the spinlocks in the path. You can safely use NULL |
76 | * for held | 76 | * for held |
77 | */ | 77 | */ |
78 | noinline void btrfs_clear_path_blocking(struct btrfs_path *p, | 78 | noinline void btrfs_clear_path_blocking(struct btrfs_path *p, |
79 | struct extent_buffer *held, int held_rw) | 79 | struct extent_buffer *held, int held_rw) |
80 | { | 80 | { |
81 | int i; | 81 | int i; |
82 | 82 | ||
83 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
84 | /* lockdep really cares that we take all of these spinlocks | ||
85 | * in the right order. If any of the locks in the path are not | ||
86 | * currently blocking, it is going to complain. So, make really | ||
87 | * really sure by forcing the path to blocking before we clear | ||
88 | * the path blocking. | ||
89 | */ | ||
90 | if (held) { | 83 | if (held) { |
91 | btrfs_set_lock_blocking_rw(held, held_rw); | 84 | btrfs_set_lock_blocking_rw(held, held_rw); |
92 | if (held_rw == BTRFS_WRITE_LOCK) | 85 | if (held_rw == BTRFS_WRITE_LOCK) |
93 | held_rw = BTRFS_WRITE_LOCK_BLOCKING; | 86 | held_rw = BTRFS_WRITE_LOCK_BLOCKING; |
94 | else if (held_rw == BTRFS_READ_LOCK) | 87 | else if (held_rw == BTRFS_READ_LOCK) |
95 | held_rw = BTRFS_READ_LOCK_BLOCKING; | 88 | held_rw = BTRFS_READ_LOCK_BLOCKING; |
96 | } | 89 | } |
97 | btrfs_set_path_blocking(p); | 90 | btrfs_set_path_blocking(p); |
98 | #endif | ||
99 | 91 | ||
100 | for (i = BTRFS_MAX_LEVEL - 1; i >= 0; i--) { | 92 | for (i = BTRFS_MAX_LEVEL - 1; i >= 0; i--) { |
101 | if (p->nodes[i] && p->locks[i]) { | 93 | if (p->nodes[i] && p->locks[i]) { |
102 | btrfs_clear_lock_blocking_rw(p->nodes[i], p->locks[i]); | 94 | btrfs_clear_lock_blocking_rw(p->nodes[i], p->locks[i]); |
103 | if (p->locks[i] == BTRFS_WRITE_LOCK_BLOCKING) | 95 | if (p->locks[i] == BTRFS_WRITE_LOCK_BLOCKING) |
104 | p->locks[i] = BTRFS_WRITE_LOCK; | 96 | p->locks[i] = BTRFS_WRITE_LOCK; |
105 | else if (p->locks[i] == BTRFS_READ_LOCK_BLOCKING) | 97 | else if (p->locks[i] == BTRFS_READ_LOCK_BLOCKING) |
106 | p->locks[i] = BTRFS_READ_LOCK; | 98 | p->locks[i] = BTRFS_READ_LOCK; |
107 | } | 99 | } |
108 | } | 100 | } |
109 | 101 | ||
110 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
111 | if (held) | 102 | if (held) |
112 | btrfs_clear_lock_blocking_rw(held, held_rw); | 103 | btrfs_clear_lock_blocking_rw(held, held_rw); |
113 | #endif | ||
114 | } | 104 | } |
115 | 105 | ||
116 | /* this also releases the path */ | 106 | /* this also releases the path */ |
117 | void btrfs_free_path(struct btrfs_path *p) | 107 | void btrfs_free_path(struct btrfs_path *p) |
118 | { | 108 | { |
119 | if (!p) | 109 | if (!p) |
120 | return; | 110 | return; |
121 | btrfs_release_path(p); | 111 | btrfs_release_path(p); |
122 | kmem_cache_free(btrfs_path_cachep, p); | 112 | kmem_cache_free(btrfs_path_cachep, p); |
123 | } | 113 | } |
124 | 114 | ||
125 | /* | 115 | /* |
126 | * path release drops references on the extent buffers in the path | 116 | * path release drops references on the extent buffers in the path |
127 | * and it drops any locks held by this path | 117 | * and it drops any locks held by this path |
128 | * | 118 | * |
129 | * It is safe to call this on paths that no locks or extent buffers held. | 119 | * It is safe to call this on paths that no locks or extent buffers held. |
130 | */ | 120 | */ |
131 | noinline void btrfs_release_path(struct btrfs_path *p) | 121 | noinline void btrfs_release_path(struct btrfs_path *p) |
132 | { | 122 | { |
133 | int i; | 123 | int i; |
134 | 124 | ||
135 | for (i = 0; i < BTRFS_MAX_LEVEL; i++) { | 125 | for (i = 0; i < BTRFS_MAX_LEVEL; i++) { |
136 | p->slots[i] = 0; | 126 | p->slots[i] = 0; |
137 | if (!p->nodes[i]) | 127 | if (!p->nodes[i]) |
138 | continue; | 128 | continue; |
139 | if (p->locks[i]) { | 129 | if (p->locks[i]) { |
140 | btrfs_tree_unlock_rw(p->nodes[i], p->locks[i]); | 130 | btrfs_tree_unlock_rw(p->nodes[i], p->locks[i]); |
141 | p->locks[i] = 0; | 131 | p->locks[i] = 0; |
142 | } | 132 | } |
143 | free_extent_buffer(p->nodes[i]); | 133 | free_extent_buffer(p->nodes[i]); |
144 | p->nodes[i] = NULL; | 134 | p->nodes[i] = NULL; |
145 | } | 135 | } |
146 | } | 136 | } |
147 | 137 | ||
148 | /* | 138 | /* |
149 | * safely gets a reference on the root node of a tree. A lock | 139 | * safely gets a reference on the root node of a tree. A lock |
150 | * is not taken, so a concurrent writer may put a different node | 140 | * is not taken, so a concurrent writer may put a different node |
151 | * at the root of the tree. See btrfs_lock_root_node for the | 141 | * at the root of the tree. See btrfs_lock_root_node for the |
152 | * looping required. | 142 | * looping required. |
153 | * | 143 | * |
154 | * The extent buffer returned by this has a reference taken, so | 144 | * The extent buffer returned by this has a reference taken, so |
155 | * it won't disappear. It may stop being the root of the tree | 145 | * it won't disappear. It may stop being the root of the tree |
156 | * at any time because there are no locks held. | 146 | * at any time because there are no locks held. |
157 | */ | 147 | */ |
158 | struct extent_buffer *btrfs_root_node(struct btrfs_root *root) | 148 | struct extent_buffer *btrfs_root_node(struct btrfs_root *root) |
159 | { | 149 | { |
160 | struct extent_buffer *eb; | 150 | struct extent_buffer *eb; |
161 | 151 | ||
162 | while (1) { | 152 | while (1) { |
163 | rcu_read_lock(); | 153 | rcu_read_lock(); |
164 | eb = rcu_dereference(root->node); | 154 | eb = rcu_dereference(root->node); |
165 | 155 | ||
166 | /* | 156 | /* |
167 | * RCU really hurts here, we could free up the root node because | 157 | * RCU really hurts here, we could free up the root node because |
168 | * it was cow'ed but we may not get the new root node yet so do | 158 | * it was cow'ed but we may not get the new root node yet so do |
169 | * the inc_not_zero dance and if it doesn't work then | 159 | * the inc_not_zero dance and if it doesn't work then |
170 | * synchronize_rcu and try again. | 160 | * synchronize_rcu and try again. |
171 | */ | 161 | */ |
172 | if (atomic_inc_not_zero(&eb->refs)) { | 162 | if (atomic_inc_not_zero(&eb->refs)) { |
173 | rcu_read_unlock(); | 163 | rcu_read_unlock(); |
174 | break; | 164 | break; |
175 | } | 165 | } |
176 | rcu_read_unlock(); | 166 | rcu_read_unlock(); |
177 | synchronize_rcu(); | 167 | synchronize_rcu(); |
178 | } | 168 | } |
179 | return eb; | 169 | return eb; |
180 | } | 170 | } |
181 | 171 | ||
182 | /* loop around taking references on and locking the root node of the | 172 | /* loop around taking references on and locking the root node of the |
183 | * tree until you end up with a lock on the root. A locked buffer | 173 | * tree until you end up with a lock on the root. A locked buffer |
184 | * is returned, with a reference held. | 174 | * is returned, with a reference held. |
185 | */ | 175 | */ |
186 | struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root) | 176 | struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root) |
187 | { | 177 | { |
188 | struct extent_buffer *eb; | 178 | struct extent_buffer *eb; |
189 | 179 | ||
190 | while (1) { | 180 | while (1) { |
191 | eb = btrfs_root_node(root); | 181 | eb = btrfs_root_node(root); |
192 | btrfs_tree_lock(eb); | 182 | btrfs_tree_lock(eb); |
193 | if (eb == root->node) | 183 | if (eb == root->node) |
194 | break; | 184 | break; |
195 | btrfs_tree_unlock(eb); | 185 | btrfs_tree_unlock(eb); |
196 | free_extent_buffer(eb); | 186 | free_extent_buffer(eb); |
197 | } | 187 | } |
198 | return eb; | 188 | return eb; |
199 | } | 189 | } |
200 | 190 | ||
201 | /* loop around taking references on and locking the root node of the | 191 | /* loop around taking references on and locking the root node of the |
202 | * tree until you end up with a lock on the root. A locked buffer | 192 | * tree until you end up with a lock on the root. A locked buffer |
203 | * is returned, with a reference held. | 193 | * is returned, with a reference held. |
204 | */ | 194 | */ |
205 | static struct extent_buffer *btrfs_read_lock_root_node(struct btrfs_root *root) | 195 | static struct extent_buffer *btrfs_read_lock_root_node(struct btrfs_root *root) |
206 | { | 196 | { |
207 | struct extent_buffer *eb; | 197 | struct extent_buffer *eb; |
208 | 198 | ||
209 | while (1) { | 199 | while (1) { |
210 | eb = btrfs_root_node(root); | 200 | eb = btrfs_root_node(root); |
211 | btrfs_tree_read_lock(eb); | 201 | btrfs_tree_read_lock(eb); |
212 | if (eb == root->node) | 202 | if (eb == root->node) |
213 | break; | 203 | break; |
214 | btrfs_tree_read_unlock(eb); | 204 | btrfs_tree_read_unlock(eb); |
215 | free_extent_buffer(eb); | 205 | free_extent_buffer(eb); |
216 | } | 206 | } |
217 | return eb; | 207 | return eb; |
218 | } | 208 | } |
219 | 209 | ||
220 | /* cowonly root (everything not a reference counted cow subvolume), just get | 210 | /* cowonly root (everything not a reference counted cow subvolume), just get |
221 | * put onto a simple dirty list. transaction.c walks this to make sure they | 211 | * put onto a simple dirty list. transaction.c walks this to make sure they |
222 | * get properly updated on disk. | 212 | * get properly updated on disk. |
223 | */ | 213 | */ |
224 | static void add_root_to_dirty_list(struct btrfs_root *root) | 214 | static void add_root_to_dirty_list(struct btrfs_root *root) |
225 | { | 215 | { |
226 | spin_lock(&root->fs_info->trans_lock); | 216 | spin_lock(&root->fs_info->trans_lock); |
227 | if (test_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state) && | 217 | if (test_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state) && |
228 | list_empty(&root->dirty_list)) { | 218 | list_empty(&root->dirty_list)) { |
229 | list_add(&root->dirty_list, | 219 | list_add(&root->dirty_list, |
230 | &root->fs_info->dirty_cowonly_roots); | 220 | &root->fs_info->dirty_cowonly_roots); |
231 | } | 221 | } |
232 | spin_unlock(&root->fs_info->trans_lock); | 222 | spin_unlock(&root->fs_info->trans_lock); |
233 | } | 223 | } |
234 | 224 | ||
235 | /* | 225 | /* |
236 | * used by snapshot creation to make a copy of a root for a tree with | 226 | * used by snapshot creation to make a copy of a root for a tree with |
237 | * a given objectid. The buffer with the new root node is returned in | 227 | * a given objectid. The buffer with the new root node is returned in |
238 | * cow_ret, and this func returns zero on success or a negative error code. | 228 | * cow_ret, and this func returns zero on success or a negative error code. |
239 | */ | 229 | */ |
240 | int btrfs_copy_root(struct btrfs_trans_handle *trans, | 230 | int btrfs_copy_root(struct btrfs_trans_handle *trans, |
241 | struct btrfs_root *root, | 231 | struct btrfs_root *root, |
242 | struct extent_buffer *buf, | 232 | struct extent_buffer *buf, |
243 | struct extent_buffer **cow_ret, u64 new_root_objectid) | 233 | struct extent_buffer **cow_ret, u64 new_root_objectid) |
244 | { | 234 | { |
245 | struct extent_buffer *cow; | 235 | struct extent_buffer *cow; |
246 | int ret = 0; | 236 | int ret = 0; |
247 | int level; | 237 | int level; |
248 | struct btrfs_disk_key disk_key; | 238 | struct btrfs_disk_key disk_key; |
249 | 239 | ||
250 | WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) && | 240 | WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) && |
251 | trans->transid != root->fs_info->running_transaction->transid); | 241 | trans->transid != root->fs_info->running_transaction->transid); |
252 | WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) && | 242 | WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) && |
253 | trans->transid != root->last_trans); | 243 | trans->transid != root->last_trans); |
254 | 244 | ||
255 | level = btrfs_header_level(buf); | 245 | level = btrfs_header_level(buf); |
256 | if (level == 0) | 246 | if (level == 0) |
257 | btrfs_item_key(buf, &disk_key, 0); | 247 | btrfs_item_key(buf, &disk_key, 0); |
258 | else | 248 | else |
259 | btrfs_node_key(buf, &disk_key, 0); | 249 | btrfs_node_key(buf, &disk_key, 0); |
260 | 250 | ||
261 | cow = btrfs_alloc_tree_block(trans, root, 0, new_root_objectid, | 251 | cow = btrfs_alloc_tree_block(trans, root, 0, new_root_objectid, |
262 | &disk_key, level, buf->start, 0); | 252 | &disk_key, level, buf->start, 0); |
263 | if (IS_ERR(cow)) | 253 | if (IS_ERR(cow)) |
264 | return PTR_ERR(cow); | 254 | return PTR_ERR(cow); |
265 | 255 | ||
266 | copy_extent_buffer(cow, buf, 0, 0, cow->len); | 256 | copy_extent_buffer(cow, buf, 0, 0, cow->len); |
267 | btrfs_set_header_bytenr(cow, cow->start); | 257 | btrfs_set_header_bytenr(cow, cow->start); |
268 | btrfs_set_header_generation(cow, trans->transid); | 258 | btrfs_set_header_generation(cow, trans->transid); |
269 | btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV); | 259 | btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV); |
270 | btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN | | 260 | btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN | |
271 | BTRFS_HEADER_FLAG_RELOC); | 261 | BTRFS_HEADER_FLAG_RELOC); |
272 | if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID) | 262 | if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID) |
273 | btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC); | 263 | btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC); |
274 | else | 264 | else |
275 | btrfs_set_header_owner(cow, new_root_objectid); | 265 | btrfs_set_header_owner(cow, new_root_objectid); |
276 | 266 | ||
277 | write_extent_buffer(cow, root->fs_info->fsid, btrfs_header_fsid(), | 267 | write_extent_buffer(cow, root->fs_info->fsid, btrfs_header_fsid(), |
278 | BTRFS_FSID_SIZE); | 268 | BTRFS_FSID_SIZE); |
279 | 269 | ||
280 | WARN_ON(btrfs_header_generation(buf) > trans->transid); | 270 | WARN_ON(btrfs_header_generation(buf) > trans->transid); |
281 | if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID) | 271 | if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID) |
282 | ret = btrfs_inc_ref(trans, root, cow, 1); | 272 | ret = btrfs_inc_ref(trans, root, cow, 1); |
283 | else | 273 | else |
284 | ret = btrfs_inc_ref(trans, root, cow, 0); | 274 | ret = btrfs_inc_ref(trans, root, cow, 0); |
285 | 275 | ||
286 | if (ret) | 276 | if (ret) |
287 | return ret; | 277 | return ret; |
288 | 278 | ||
289 | btrfs_mark_buffer_dirty(cow); | 279 | btrfs_mark_buffer_dirty(cow); |
290 | *cow_ret = cow; | 280 | *cow_ret = cow; |
291 | return 0; | 281 | return 0; |
292 | } | 282 | } |
293 | 283 | ||
294 | enum mod_log_op { | 284 | enum mod_log_op { |
295 | MOD_LOG_KEY_REPLACE, | 285 | MOD_LOG_KEY_REPLACE, |
296 | MOD_LOG_KEY_ADD, | 286 | MOD_LOG_KEY_ADD, |
297 | MOD_LOG_KEY_REMOVE, | 287 | MOD_LOG_KEY_REMOVE, |
298 | MOD_LOG_KEY_REMOVE_WHILE_FREEING, | 288 | MOD_LOG_KEY_REMOVE_WHILE_FREEING, |
299 | MOD_LOG_KEY_REMOVE_WHILE_MOVING, | 289 | MOD_LOG_KEY_REMOVE_WHILE_MOVING, |
300 | MOD_LOG_MOVE_KEYS, | 290 | MOD_LOG_MOVE_KEYS, |
301 | MOD_LOG_ROOT_REPLACE, | 291 | MOD_LOG_ROOT_REPLACE, |
302 | }; | 292 | }; |
303 | 293 | ||
304 | struct tree_mod_move { | 294 | struct tree_mod_move { |
305 | int dst_slot; | 295 | int dst_slot; |
306 | int nr_items; | 296 | int nr_items; |
307 | }; | 297 | }; |
308 | 298 | ||
309 | struct tree_mod_root { | 299 | struct tree_mod_root { |
310 | u64 logical; | 300 | u64 logical; |
311 | u8 level; | 301 | u8 level; |
312 | }; | 302 | }; |
313 | 303 | ||
314 | struct tree_mod_elem { | 304 | struct tree_mod_elem { |
315 | struct rb_node node; | 305 | struct rb_node node; |
316 | u64 index; /* shifted logical */ | 306 | u64 index; /* shifted logical */ |
317 | u64 seq; | 307 | u64 seq; |
318 | enum mod_log_op op; | 308 | enum mod_log_op op; |
319 | 309 | ||
320 | /* this is used for MOD_LOG_KEY_* and MOD_LOG_MOVE_KEYS operations */ | 310 | /* this is used for MOD_LOG_KEY_* and MOD_LOG_MOVE_KEYS operations */ |
321 | int slot; | 311 | int slot; |
322 | 312 | ||
323 | /* this is used for MOD_LOG_KEY* and MOD_LOG_ROOT_REPLACE */ | 313 | /* this is used for MOD_LOG_KEY* and MOD_LOG_ROOT_REPLACE */ |
324 | u64 generation; | 314 | u64 generation; |
325 | 315 | ||
326 | /* those are used for op == MOD_LOG_KEY_{REPLACE,REMOVE} */ | 316 | /* those are used for op == MOD_LOG_KEY_{REPLACE,REMOVE} */ |
327 | struct btrfs_disk_key key; | 317 | struct btrfs_disk_key key; |
328 | u64 blockptr; | 318 | u64 blockptr; |
329 | 319 | ||
330 | /* this is used for op == MOD_LOG_MOVE_KEYS */ | 320 | /* this is used for op == MOD_LOG_MOVE_KEYS */ |
331 | struct tree_mod_move move; | 321 | struct tree_mod_move move; |
332 | 322 | ||
333 | /* this is used for op == MOD_LOG_ROOT_REPLACE */ | 323 | /* this is used for op == MOD_LOG_ROOT_REPLACE */ |
334 | struct tree_mod_root old_root; | 324 | struct tree_mod_root old_root; |
335 | }; | 325 | }; |
336 | 326 | ||
337 | static inline void tree_mod_log_read_lock(struct btrfs_fs_info *fs_info) | 327 | static inline void tree_mod_log_read_lock(struct btrfs_fs_info *fs_info) |
338 | { | 328 | { |
339 | read_lock(&fs_info->tree_mod_log_lock); | 329 | read_lock(&fs_info->tree_mod_log_lock); |
340 | } | 330 | } |
341 | 331 | ||
342 | static inline void tree_mod_log_read_unlock(struct btrfs_fs_info *fs_info) | 332 | static inline void tree_mod_log_read_unlock(struct btrfs_fs_info *fs_info) |
343 | { | 333 | { |
344 | read_unlock(&fs_info->tree_mod_log_lock); | 334 | read_unlock(&fs_info->tree_mod_log_lock); |
345 | } | 335 | } |
346 | 336 | ||
347 | static inline void tree_mod_log_write_lock(struct btrfs_fs_info *fs_info) | 337 | static inline void tree_mod_log_write_lock(struct btrfs_fs_info *fs_info) |
348 | { | 338 | { |
349 | write_lock(&fs_info->tree_mod_log_lock); | 339 | write_lock(&fs_info->tree_mod_log_lock); |
350 | } | 340 | } |
351 | 341 | ||
352 | static inline void tree_mod_log_write_unlock(struct btrfs_fs_info *fs_info) | 342 | static inline void tree_mod_log_write_unlock(struct btrfs_fs_info *fs_info) |
353 | { | 343 | { |
354 | write_unlock(&fs_info->tree_mod_log_lock); | 344 | write_unlock(&fs_info->tree_mod_log_lock); |
355 | } | 345 | } |
356 | 346 | ||
357 | /* | 347 | /* |
358 | * Pull a new tree mod seq number for our operation. | 348 | * Pull a new tree mod seq number for our operation. |
359 | */ | 349 | */ |
360 | static inline u64 btrfs_inc_tree_mod_seq(struct btrfs_fs_info *fs_info) | 350 | static inline u64 btrfs_inc_tree_mod_seq(struct btrfs_fs_info *fs_info) |
361 | { | 351 | { |
362 | return atomic64_inc_return(&fs_info->tree_mod_seq); | 352 | return atomic64_inc_return(&fs_info->tree_mod_seq); |
363 | } | 353 | } |
364 | 354 | ||
365 | /* | 355 | /* |
366 | * This adds a new blocker to the tree mod log's blocker list if the @elem | 356 | * This adds a new blocker to the tree mod log's blocker list if the @elem |
367 | * passed does not already have a sequence number set. So when a caller expects | 357 | * passed does not already have a sequence number set. So when a caller expects |
368 | * to record tree modifications, it should ensure to set elem->seq to zero | 358 | * to record tree modifications, it should ensure to set elem->seq to zero |
369 | * before calling btrfs_get_tree_mod_seq. | 359 | * before calling btrfs_get_tree_mod_seq. |
370 | * Returns a fresh, unused tree log modification sequence number, even if no new | 360 | * Returns a fresh, unused tree log modification sequence number, even if no new |
371 | * blocker was added. | 361 | * blocker was added. |
372 | */ | 362 | */ |
373 | u64 btrfs_get_tree_mod_seq(struct btrfs_fs_info *fs_info, | 363 | u64 btrfs_get_tree_mod_seq(struct btrfs_fs_info *fs_info, |
374 | struct seq_list *elem) | 364 | struct seq_list *elem) |
375 | { | 365 | { |
376 | tree_mod_log_write_lock(fs_info); | 366 | tree_mod_log_write_lock(fs_info); |
377 | spin_lock(&fs_info->tree_mod_seq_lock); | 367 | spin_lock(&fs_info->tree_mod_seq_lock); |
378 | if (!elem->seq) { | 368 | if (!elem->seq) { |
379 | elem->seq = btrfs_inc_tree_mod_seq(fs_info); | 369 | elem->seq = btrfs_inc_tree_mod_seq(fs_info); |
380 | list_add_tail(&elem->list, &fs_info->tree_mod_seq_list); | 370 | list_add_tail(&elem->list, &fs_info->tree_mod_seq_list); |
381 | } | 371 | } |
382 | spin_unlock(&fs_info->tree_mod_seq_lock); | 372 | spin_unlock(&fs_info->tree_mod_seq_lock); |
383 | tree_mod_log_write_unlock(fs_info); | 373 | tree_mod_log_write_unlock(fs_info); |
384 | 374 | ||
385 | return elem->seq; | 375 | return elem->seq; |
386 | } | 376 | } |
387 | 377 | ||
388 | void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info, | 378 | void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info, |
389 | struct seq_list *elem) | 379 | struct seq_list *elem) |
390 | { | 380 | { |
391 | struct rb_root *tm_root; | 381 | struct rb_root *tm_root; |
392 | struct rb_node *node; | 382 | struct rb_node *node; |
393 | struct rb_node *next; | 383 | struct rb_node *next; |
394 | struct seq_list *cur_elem; | 384 | struct seq_list *cur_elem; |
395 | struct tree_mod_elem *tm; | 385 | struct tree_mod_elem *tm; |
396 | u64 min_seq = (u64)-1; | 386 | u64 min_seq = (u64)-1; |
397 | u64 seq_putting = elem->seq; | 387 | u64 seq_putting = elem->seq; |
398 | 388 | ||
399 | if (!seq_putting) | 389 | if (!seq_putting) |
400 | return; | 390 | return; |
401 | 391 | ||
402 | spin_lock(&fs_info->tree_mod_seq_lock); | 392 | spin_lock(&fs_info->tree_mod_seq_lock); |
403 | list_del(&elem->list); | 393 | list_del(&elem->list); |
404 | elem->seq = 0; | 394 | elem->seq = 0; |
405 | 395 | ||
406 | list_for_each_entry(cur_elem, &fs_info->tree_mod_seq_list, list) { | 396 | list_for_each_entry(cur_elem, &fs_info->tree_mod_seq_list, list) { |
407 | if (cur_elem->seq < min_seq) { | 397 | if (cur_elem->seq < min_seq) { |
408 | if (seq_putting > cur_elem->seq) { | 398 | if (seq_putting > cur_elem->seq) { |
409 | /* | 399 | /* |
410 | * blocker with lower sequence number exists, we | 400 | * blocker with lower sequence number exists, we |
411 | * cannot remove anything from the log | 401 | * cannot remove anything from the log |
412 | */ | 402 | */ |
413 | spin_unlock(&fs_info->tree_mod_seq_lock); | 403 | spin_unlock(&fs_info->tree_mod_seq_lock); |
414 | return; | 404 | return; |
415 | } | 405 | } |
416 | min_seq = cur_elem->seq; | 406 | min_seq = cur_elem->seq; |
417 | } | 407 | } |
418 | } | 408 | } |
419 | spin_unlock(&fs_info->tree_mod_seq_lock); | 409 | spin_unlock(&fs_info->tree_mod_seq_lock); |
420 | 410 | ||
421 | /* | 411 | /* |
422 | * anything that's lower than the lowest existing (read: blocked) | 412 | * anything that's lower than the lowest existing (read: blocked) |
423 | * sequence number can be removed from the tree. | 413 | * sequence number can be removed from the tree. |
424 | */ | 414 | */ |
425 | tree_mod_log_write_lock(fs_info); | 415 | tree_mod_log_write_lock(fs_info); |
426 | tm_root = &fs_info->tree_mod_log; | 416 | tm_root = &fs_info->tree_mod_log; |
427 | for (node = rb_first(tm_root); node; node = next) { | 417 | for (node = rb_first(tm_root); node; node = next) { |
428 | next = rb_next(node); | 418 | next = rb_next(node); |
429 | tm = container_of(node, struct tree_mod_elem, node); | 419 | tm = container_of(node, struct tree_mod_elem, node); |
430 | if (tm->seq > min_seq) | 420 | if (tm->seq > min_seq) |
431 | continue; | 421 | continue; |
432 | rb_erase(node, tm_root); | 422 | rb_erase(node, tm_root); |
433 | kfree(tm); | 423 | kfree(tm); |
434 | } | 424 | } |
435 | tree_mod_log_write_unlock(fs_info); | 425 | tree_mod_log_write_unlock(fs_info); |
436 | } | 426 | } |
437 | 427 | ||
438 | /* | 428 | /* |
439 | * key order of the log: | 429 | * key order of the log: |
440 | * index -> sequence | 430 | * index -> sequence |
441 | * | 431 | * |
442 | * the index is the shifted logical of the *new* root node for root replace | 432 | * the index is the shifted logical of the *new* root node for root replace |
443 | * operations, or the shifted logical of the affected block for all other | 433 | * operations, or the shifted logical of the affected block for all other |
444 | * operations. | 434 | * operations. |
445 | * | 435 | * |
446 | * Note: must be called with write lock (tree_mod_log_write_lock). | 436 | * Note: must be called with write lock (tree_mod_log_write_lock). |
447 | */ | 437 | */ |
448 | static noinline int | 438 | static noinline int |
449 | __tree_mod_log_insert(struct btrfs_fs_info *fs_info, struct tree_mod_elem *tm) | 439 | __tree_mod_log_insert(struct btrfs_fs_info *fs_info, struct tree_mod_elem *tm) |
450 | { | 440 | { |
451 | struct rb_root *tm_root; | 441 | struct rb_root *tm_root; |
452 | struct rb_node **new; | 442 | struct rb_node **new; |
453 | struct rb_node *parent = NULL; | 443 | struct rb_node *parent = NULL; |
454 | struct tree_mod_elem *cur; | 444 | struct tree_mod_elem *cur; |
455 | 445 | ||
456 | BUG_ON(!tm); | 446 | BUG_ON(!tm); |
457 | 447 | ||
458 | tm->seq = btrfs_inc_tree_mod_seq(fs_info); | 448 | tm->seq = btrfs_inc_tree_mod_seq(fs_info); |
459 | 449 | ||
460 | tm_root = &fs_info->tree_mod_log; | 450 | tm_root = &fs_info->tree_mod_log; |
461 | new = &tm_root->rb_node; | 451 | new = &tm_root->rb_node; |
462 | while (*new) { | 452 | while (*new) { |
463 | cur = container_of(*new, struct tree_mod_elem, node); | 453 | cur = container_of(*new, struct tree_mod_elem, node); |
464 | parent = *new; | 454 | parent = *new; |
465 | if (cur->index < tm->index) | 455 | if (cur->index < tm->index) |
466 | new = &((*new)->rb_left); | 456 | new = &((*new)->rb_left); |
467 | else if (cur->index > tm->index) | 457 | else if (cur->index > tm->index) |
468 | new = &((*new)->rb_right); | 458 | new = &((*new)->rb_right); |
469 | else if (cur->seq < tm->seq) | 459 | else if (cur->seq < tm->seq) |
470 | new = &((*new)->rb_left); | 460 | new = &((*new)->rb_left); |
471 | else if (cur->seq > tm->seq) | 461 | else if (cur->seq > tm->seq) |
472 | new = &((*new)->rb_right); | 462 | new = &((*new)->rb_right); |
473 | else | 463 | else |
474 | return -EEXIST; | 464 | return -EEXIST; |
475 | } | 465 | } |
476 | 466 | ||
477 | rb_link_node(&tm->node, parent, new); | 467 | rb_link_node(&tm->node, parent, new); |
478 | rb_insert_color(&tm->node, tm_root); | 468 | rb_insert_color(&tm->node, tm_root); |
479 | return 0; | 469 | return 0; |
480 | } | 470 | } |
481 | 471 | ||
482 | /* | 472 | /* |
483 | * Determines if logging can be omitted. Returns 1 if it can. Otherwise, it | 473 | * Determines if logging can be omitted. Returns 1 if it can. Otherwise, it |
484 | * returns zero with the tree_mod_log_lock acquired. The caller must hold | 474 | * returns zero with the tree_mod_log_lock acquired. The caller must hold |
485 | * this until all tree mod log insertions are recorded in the rb tree and then | 475 | * this until all tree mod log insertions are recorded in the rb tree and then |
486 | * call tree_mod_log_write_unlock() to release. | 476 | * call tree_mod_log_write_unlock() to release. |
487 | */ | 477 | */ |
488 | static inline int tree_mod_dont_log(struct btrfs_fs_info *fs_info, | 478 | static inline int tree_mod_dont_log(struct btrfs_fs_info *fs_info, |
489 | struct extent_buffer *eb) { | 479 | struct extent_buffer *eb) { |
490 | smp_mb(); | 480 | smp_mb(); |
491 | if (list_empty(&(fs_info)->tree_mod_seq_list)) | 481 | if (list_empty(&(fs_info)->tree_mod_seq_list)) |
492 | return 1; | 482 | return 1; |
493 | if (eb && btrfs_header_level(eb) == 0) | 483 | if (eb && btrfs_header_level(eb) == 0) |
494 | return 1; | 484 | return 1; |
495 | 485 | ||
496 | tree_mod_log_write_lock(fs_info); | 486 | tree_mod_log_write_lock(fs_info); |
497 | if (list_empty(&(fs_info)->tree_mod_seq_list)) { | 487 | if (list_empty(&(fs_info)->tree_mod_seq_list)) { |
498 | tree_mod_log_write_unlock(fs_info); | 488 | tree_mod_log_write_unlock(fs_info); |
499 | return 1; | 489 | return 1; |
500 | } | 490 | } |
501 | 491 | ||
502 | return 0; | 492 | return 0; |
503 | } | 493 | } |
504 | 494 | ||
505 | /* Similar to tree_mod_dont_log, but doesn't acquire any locks. */ | 495 | /* Similar to tree_mod_dont_log, but doesn't acquire any locks. */ |
506 | static inline int tree_mod_need_log(const struct btrfs_fs_info *fs_info, | 496 | static inline int tree_mod_need_log(const struct btrfs_fs_info *fs_info, |
507 | struct extent_buffer *eb) | 497 | struct extent_buffer *eb) |
508 | { | 498 | { |
509 | smp_mb(); | 499 | smp_mb(); |
510 | if (list_empty(&(fs_info)->tree_mod_seq_list)) | 500 | if (list_empty(&(fs_info)->tree_mod_seq_list)) |
511 | return 0; | 501 | return 0; |
512 | if (eb && btrfs_header_level(eb) == 0) | 502 | if (eb && btrfs_header_level(eb) == 0) |
513 | return 0; | 503 | return 0; |
514 | 504 | ||
515 | return 1; | 505 | return 1; |
516 | } | 506 | } |
517 | 507 | ||
518 | static struct tree_mod_elem * | 508 | static struct tree_mod_elem * |
519 | alloc_tree_mod_elem(struct extent_buffer *eb, int slot, | 509 | alloc_tree_mod_elem(struct extent_buffer *eb, int slot, |
520 | enum mod_log_op op, gfp_t flags) | 510 | enum mod_log_op op, gfp_t flags) |
521 | { | 511 | { |
522 | struct tree_mod_elem *tm; | 512 | struct tree_mod_elem *tm; |
523 | 513 | ||
524 | tm = kzalloc(sizeof(*tm), flags); | 514 | tm = kzalloc(sizeof(*tm), flags); |
525 | if (!tm) | 515 | if (!tm) |
526 | return NULL; | 516 | return NULL; |
527 | 517 | ||
528 | tm->index = eb->start >> PAGE_CACHE_SHIFT; | 518 | tm->index = eb->start >> PAGE_CACHE_SHIFT; |
529 | if (op != MOD_LOG_KEY_ADD) { | 519 | if (op != MOD_LOG_KEY_ADD) { |
530 | btrfs_node_key(eb, &tm->key, slot); | 520 | btrfs_node_key(eb, &tm->key, slot); |
531 | tm->blockptr = btrfs_node_blockptr(eb, slot); | 521 | tm->blockptr = btrfs_node_blockptr(eb, slot); |
532 | } | 522 | } |
533 | tm->op = op; | 523 | tm->op = op; |
534 | tm->slot = slot; | 524 | tm->slot = slot; |
535 | tm->generation = btrfs_node_ptr_generation(eb, slot); | 525 | tm->generation = btrfs_node_ptr_generation(eb, slot); |
536 | RB_CLEAR_NODE(&tm->node); | 526 | RB_CLEAR_NODE(&tm->node); |
537 | 527 | ||
538 | return tm; | 528 | return tm; |
539 | } | 529 | } |
540 | 530 | ||
541 | static noinline int | 531 | static noinline int |
542 | tree_mod_log_insert_key(struct btrfs_fs_info *fs_info, | 532 | tree_mod_log_insert_key(struct btrfs_fs_info *fs_info, |
543 | struct extent_buffer *eb, int slot, | 533 | struct extent_buffer *eb, int slot, |
544 | enum mod_log_op op, gfp_t flags) | 534 | enum mod_log_op op, gfp_t flags) |
545 | { | 535 | { |
546 | struct tree_mod_elem *tm; | 536 | struct tree_mod_elem *tm; |
547 | int ret; | 537 | int ret; |
548 | 538 | ||
549 | if (!tree_mod_need_log(fs_info, eb)) | 539 | if (!tree_mod_need_log(fs_info, eb)) |
550 | return 0; | 540 | return 0; |
551 | 541 | ||
552 | tm = alloc_tree_mod_elem(eb, slot, op, flags); | 542 | tm = alloc_tree_mod_elem(eb, slot, op, flags); |
553 | if (!tm) | 543 | if (!tm) |
554 | return -ENOMEM; | 544 | return -ENOMEM; |
555 | 545 | ||
556 | if (tree_mod_dont_log(fs_info, eb)) { | 546 | if (tree_mod_dont_log(fs_info, eb)) { |
557 | kfree(tm); | 547 | kfree(tm); |
558 | return 0; | 548 | return 0; |
559 | } | 549 | } |
560 | 550 | ||
561 | ret = __tree_mod_log_insert(fs_info, tm); | 551 | ret = __tree_mod_log_insert(fs_info, tm); |
562 | tree_mod_log_write_unlock(fs_info); | 552 | tree_mod_log_write_unlock(fs_info); |
563 | if (ret) | 553 | if (ret) |
564 | kfree(tm); | 554 | kfree(tm); |
565 | 555 | ||
566 | return ret; | 556 | return ret; |
567 | } | 557 | } |
568 | 558 | ||
569 | static noinline int | 559 | static noinline int |
570 | tree_mod_log_insert_move(struct btrfs_fs_info *fs_info, | 560 | tree_mod_log_insert_move(struct btrfs_fs_info *fs_info, |
571 | struct extent_buffer *eb, int dst_slot, int src_slot, | 561 | struct extent_buffer *eb, int dst_slot, int src_slot, |
572 | int nr_items, gfp_t flags) | 562 | int nr_items, gfp_t flags) |
573 | { | 563 | { |
574 | struct tree_mod_elem *tm = NULL; | 564 | struct tree_mod_elem *tm = NULL; |
575 | struct tree_mod_elem **tm_list = NULL; | 565 | struct tree_mod_elem **tm_list = NULL; |
576 | int ret = 0; | 566 | int ret = 0; |
577 | int i; | 567 | int i; |
578 | int locked = 0; | 568 | int locked = 0; |
579 | 569 | ||
580 | if (!tree_mod_need_log(fs_info, eb)) | 570 | if (!tree_mod_need_log(fs_info, eb)) |
581 | return 0; | 571 | return 0; |
582 | 572 | ||
583 | tm_list = kzalloc(nr_items * sizeof(struct tree_mod_elem *), flags); | 573 | tm_list = kzalloc(nr_items * sizeof(struct tree_mod_elem *), flags); |
584 | if (!tm_list) | 574 | if (!tm_list) |
585 | return -ENOMEM; | 575 | return -ENOMEM; |
586 | 576 | ||
587 | tm = kzalloc(sizeof(*tm), flags); | 577 | tm = kzalloc(sizeof(*tm), flags); |
588 | if (!tm) { | 578 | if (!tm) { |
589 | ret = -ENOMEM; | 579 | ret = -ENOMEM; |
590 | goto free_tms; | 580 | goto free_tms; |
591 | } | 581 | } |
592 | 582 | ||
593 | tm->index = eb->start >> PAGE_CACHE_SHIFT; | 583 | tm->index = eb->start >> PAGE_CACHE_SHIFT; |
594 | tm->slot = src_slot; | 584 | tm->slot = src_slot; |
595 | tm->move.dst_slot = dst_slot; | 585 | tm->move.dst_slot = dst_slot; |
596 | tm->move.nr_items = nr_items; | 586 | tm->move.nr_items = nr_items; |
597 | tm->op = MOD_LOG_MOVE_KEYS; | 587 | tm->op = MOD_LOG_MOVE_KEYS; |
598 | 588 | ||
599 | for (i = 0; i + dst_slot < src_slot && i < nr_items; i++) { | 589 | for (i = 0; i + dst_slot < src_slot && i < nr_items; i++) { |
600 | tm_list[i] = alloc_tree_mod_elem(eb, i + dst_slot, | 590 | tm_list[i] = alloc_tree_mod_elem(eb, i + dst_slot, |
601 | MOD_LOG_KEY_REMOVE_WHILE_MOVING, flags); | 591 | MOD_LOG_KEY_REMOVE_WHILE_MOVING, flags); |
602 | if (!tm_list[i]) { | 592 | if (!tm_list[i]) { |
603 | ret = -ENOMEM; | 593 | ret = -ENOMEM; |
604 | goto free_tms; | 594 | goto free_tms; |
605 | } | 595 | } |
606 | } | 596 | } |
607 | 597 | ||
608 | if (tree_mod_dont_log(fs_info, eb)) | 598 | if (tree_mod_dont_log(fs_info, eb)) |
609 | goto free_tms; | 599 | goto free_tms; |
610 | locked = 1; | 600 | locked = 1; |
611 | 601 | ||
612 | /* | 602 | /* |
613 | * When we override something during the move, we log these removals. | 603 | * When we override something during the move, we log these removals. |
614 | * This can only happen when we move towards the beginning of the | 604 | * This can only happen when we move towards the beginning of the |
615 | * buffer, i.e. dst_slot < src_slot. | 605 | * buffer, i.e. dst_slot < src_slot. |
616 | */ | 606 | */ |
617 | for (i = 0; i + dst_slot < src_slot && i < nr_items; i++) { | 607 | for (i = 0; i + dst_slot < src_slot && i < nr_items; i++) { |
618 | ret = __tree_mod_log_insert(fs_info, tm_list[i]); | 608 | ret = __tree_mod_log_insert(fs_info, tm_list[i]); |
619 | if (ret) | 609 | if (ret) |
620 | goto free_tms; | 610 | goto free_tms; |
621 | } | 611 | } |
622 | 612 | ||
623 | ret = __tree_mod_log_insert(fs_info, tm); | 613 | ret = __tree_mod_log_insert(fs_info, tm); |
624 | if (ret) | 614 | if (ret) |
625 | goto free_tms; | 615 | goto free_tms; |
626 | tree_mod_log_write_unlock(fs_info); | 616 | tree_mod_log_write_unlock(fs_info); |
627 | kfree(tm_list); | 617 | kfree(tm_list); |
628 | 618 | ||
629 | return 0; | 619 | return 0; |
630 | free_tms: | 620 | free_tms: |
631 | for (i = 0; i < nr_items; i++) { | 621 | for (i = 0; i < nr_items; i++) { |
632 | if (tm_list[i] && !RB_EMPTY_NODE(&tm_list[i]->node)) | 622 | if (tm_list[i] && !RB_EMPTY_NODE(&tm_list[i]->node)) |
633 | rb_erase(&tm_list[i]->node, &fs_info->tree_mod_log); | 623 | rb_erase(&tm_list[i]->node, &fs_info->tree_mod_log); |
634 | kfree(tm_list[i]); | 624 | kfree(tm_list[i]); |
635 | } | 625 | } |
636 | if (locked) | 626 | if (locked) |
637 | tree_mod_log_write_unlock(fs_info); | 627 | tree_mod_log_write_unlock(fs_info); |
638 | kfree(tm_list); | 628 | kfree(tm_list); |
639 | kfree(tm); | 629 | kfree(tm); |
640 | 630 | ||
641 | return ret; | 631 | return ret; |
642 | } | 632 | } |
643 | 633 | ||
644 | static inline int | 634 | static inline int |
645 | __tree_mod_log_free_eb(struct btrfs_fs_info *fs_info, | 635 | __tree_mod_log_free_eb(struct btrfs_fs_info *fs_info, |
646 | struct tree_mod_elem **tm_list, | 636 | struct tree_mod_elem **tm_list, |
647 | int nritems) | 637 | int nritems) |
648 | { | 638 | { |
649 | int i, j; | 639 | int i, j; |
650 | int ret; | 640 | int ret; |
651 | 641 | ||
652 | for (i = nritems - 1; i >= 0; i--) { | 642 | for (i = nritems - 1; i >= 0; i--) { |
653 | ret = __tree_mod_log_insert(fs_info, tm_list[i]); | 643 | ret = __tree_mod_log_insert(fs_info, tm_list[i]); |
654 | if (ret) { | 644 | if (ret) { |
655 | for (j = nritems - 1; j > i; j--) | 645 | for (j = nritems - 1; j > i; j--) |
656 | rb_erase(&tm_list[j]->node, | 646 | rb_erase(&tm_list[j]->node, |
657 | &fs_info->tree_mod_log); | 647 | &fs_info->tree_mod_log); |
658 | return ret; | 648 | return ret; |
659 | } | 649 | } |
660 | } | 650 | } |
661 | 651 | ||
662 | return 0; | 652 | return 0; |
663 | } | 653 | } |
664 | 654 | ||
665 | static noinline int | 655 | static noinline int |
666 | tree_mod_log_insert_root(struct btrfs_fs_info *fs_info, | 656 | tree_mod_log_insert_root(struct btrfs_fs_info *fs_info, |
667 | struct extent_buffer *old_root, | 657 | struct extent_buffer *old_root, |
668 | struct extent_buffer *new_root, gfp_t flags, | 658 | struct extent_buffer *new_root, gfp_t flags, |
669 | int log_removal) | 659 | int log_removal) |
670 | { | 660 | { |
671 | struct tree_mod_elem *tm = NULL; | 661 | struct tree_mod_elem *tm = NULL; |
672 | struct tree_mod_elem **tm_list = NULL; | 662 | struct tree_mod_elem **tm_list = NULL; |
673 | int nritems = 0; | 663 | int nritems = 0; |
674 | int ret = 0; | 664 | int ret = 0; |
675 | int i; | 665 | int i; |
676 | 666 | ||
677 | if (!tree_mod_need_log(fs_info, NULL)) | 667 | if (!tree_mod_need_log(fs_info, NULL)) |
678 | return 0; | 668 | return 0; |
679 | 669 | ||
680 | if (log_removal && btrfs_header_level(old_root) > 0) { | 670 | if (log_removal && btrfs_header_level(old_root) > 0) { |
681 | nritems = btrfs_header_nritems(old_root); | 671 | nritems = btrfs_header_nritems(old_root); |
682 | tm_list = kzalloc(nritems * sizeof(struct tree_mod_elem *), | 672 | tm_list = kzalloc(nritems * sizeof(struct tree_mod_elem *), |
683 | flags); | 673 | flags); |
684 | if (!tm_list) { | 674 | if (!tm_list) { |
685 | ret = -ENOMEM; | 675 | ret = -ENOMEM; |
686 | goto free_tms; | 676 | goto free_tms; |
687 | } | 677 | } |
688 | for (i = 0; i < nritems; i++) { | 678 | for (i = 0; i < nritems; i++) { |
689 | tm_list[i] = alloc_tree_mod_elem(old_root, i, | 679 | tm_list[i] = alloc_tree_mod_elem(old_root, i, |
690 | MOD_LOG_KEY_REMOVE_WHILE_FREEING, flags); | 680 | MOD_LOG_KEY_REMOVE_WHILE_FREEING, flags); |
691 | if (!tm_list[i]) { | 681 | if (!tm_list[i]) { |
692 | ret = -ENOMEM; | 682 | ret = -ENOMEM; |
693 | goto free_tms; | 683 | goto free_tms; |
694 | } | 684 | } |
695 | } | 685 | } |
696 | } | 686 | } |
697 | 687 | ||
698 | tm = kzalloc(sizeof(*tm), flags); | 688 | tm = kzalloc(sizeof(*tm), flags); |
699 | if (!tm) { | 689 | if (!tm) { |
700 | ret = -ENOMEM; | 690 | ret = -ENOMEM; |
701 | goto free_tms; | 691 | goto free_tms; |
702 | } | 692 | } |
703 | 693 | ||
704 | tm->index = new_root->start >> PAGE_CACHE_SHIFT; | 694 | tm->index = new_root->start >> PAGE_CACHE_SHIFT; |
705 | tm->old_root.logical = old_root->start; | 695 | tm->old_root.logical = old_root->start; |
706 | tm->old_root.level = btrfs_header_level(old_root); | 696 | tm->old_root.level = btrfs_header_level(old_root); |
707 | tm->generation = btrfs_header_generation(old_root); | 697 | tm->generation = btrfs_header_generation(old_root); |
708 | tm->op = MOD_LOG_ROOT_REPLACE; | 698 | tm->op = MOD_LOG_ROOT_REPLACE; |
709 | 699 | ||
710 | if (tree_mod_dont_log(fs_info, NULL)) | 700 | if (tree_mod_dont_log(fs_info, NULL)) |
711 | goto free_tms; | 701 | goto free_tms; |
712 | 702 | ||
713 | if (tm_list) | 703 | if (tm_list) |
714 | ret = __tree_mod_log_free_eb(fs_info, tm_list, nritems); | 704 | ret = __tree_mod_log_free_eb(fs_info, tm_list, nritems); |
715 | if (!ret) | 705 | if (!ret) |
716 | ret = __tree_mod_log_insert(fs_info, tm); | 706 | ret = __tree_mod_log_insert(fs_info, tm); |
717 | 707 | ||
718 | tree_mod_log_write_unlock(fs_info); | 708 | tree_mod_log_write_unlock(fs_info); |
719 | if (ret) | 709 | if (ret) |
720 | goto free_tms; | 710 | goto free_tms; |
721 | kfree(tm_list); | 711 | kfree(tm_list); |
722 | 712 | ||
723 | return ret; | 713 | return ret; |
724 | 714 | ||
725 | free_tms: | 715 | free_tms: |
726 | if (tm_list) { | 716 | if (tm_list) { |
727 | for (i = 0; i < nritems; i++) | 717 | for (i = 0; i < nritems; i++) |
728 | kfree(tm_list[i]); | 718 | kfree(tm_list[i]); |
729 | kfree(tm_list); | 719 | kfree(tm_list); |
730 | } | 720 | } |
731 | kfree(tm); | 721 | kfree(tm); |
732 | 722 | ||
733 | return ret; | 723 | return ret; |
734 | } | 724 | } |
735 | 725 | ||
736 | static struct tree_mod_elem * | 726 | static struct tree_mod_elem * |
737 | __tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq, | 727 | __tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq, |
738 | int smallest) | 728 | int smallest) |
739 | { | 729 | { |
740 | struct rb_root *tm_root; | 730 | struct rb_root *tm_root; |
741 | struct rb_node *node; | 731 | struct rb_node *node; |
742 | struct tree_mod_elem *cur = NULL; | 732 | struct tree_mod_elem *cur = NULL; |
743 | struct tree_mod_elem *found = NULL; | 733 | struct tree_mod_elem *found = NULL; |
744 | u64 index = start >> PAGE_CACHE_SHIFT; | 734 | u64 index = start >> PAGE_CACHE_SHIFT; |
745 | 735 | ||
746 | tree_mod_log_read_lock(fs_info); | 736 | tree_mod_log_read_lock(fs_info); |
747 | tm_root = &fs_info->tree_mod_log; | 737 | tm_root = &fs_info->tree_mod_log; |
748 | node = tm_root->rb_node; | 738 | node = tm_root->rb_node; |
749 | while (node) { | 739 | while (node) { |
750 | cur = container_of(node, struct tree_mod_elem, node); | 740 | cur = container_of(node, struct tree_mod_elem, node); |
751 | if (cur->index < index) { | 741 | if (cur->index < index) { |
752 | node = node->rb_left; | 742 | node = node->rb_left; |
753 | } else if (cur->index > index) { | 743 | } else if (cur->index > index) { |
754 | node = node->rb_right; | 744 | node = node->rb_right; |
755 | } else if (cur->seq < min_seq) { | 745 | } else if (cur->seq < min_seq) { |
756 | node = node->rb_left; | 746 | node = node->rb_left; |
757 | } else if (!smallest) { | 747 | } else if (!smallest) { |
758 | /* we want the node with the highest seq */ | 748 | /* we want the node with the highest seq */ |
759 | if (found) | 749 | if (found) |
760 | BUG_ON(found->seq > cur->seq); | 750 | BUG_ON(found->seq > cur->seq); |
761 | found = cur; | 751 | found = cur; |
762 | node = node->rb_left; | 752 | node = node->rb_left; |
763 | } else if (cur->seq > min_seq) { | 753 | } else if (cur->seq > min_seq) { |
764 | /* we want the node with the smallest seq */ | 754 | /* we want the node with the smallest seq */ |
765 | if (found) | 755 | if (found) |
766 | BUG_ON(found->seq < cur->seq); | 756 | BUG_ON(found->seq < cur->seq); |
767 | found = cur; | 757 | found = cur; |
768 | node = node->rb_right; | 758 | node = node->rb_right; |
769 | } else { | 759 | } else { |
770 | found = cur; | 760 | found = cur; |
771 | break; | 761 | break; |
772 | } | 762 | } |
773 | } | 763 | } |
774 | tree_mod_log_read_unlock(fs_info); | 764 | tree_mod_log_read_unlock(fs_info); |
775 | 765 | ||
776 | return found; | 766 | return found; |
777 | } | 767 | } |
778 | 768 | ||
779 | /* | 769 | /* |
780 | * this returns the element from the log with the smallest time sequence | 770 | * this returns the element from the log with the smallest time sequence |
781 | * value that's in the log (the oldest log item). any element with a time | 771 | * value that's in the log (the oldest log item). any element with a time |
782 | * sequence lower than min_seq will be ignored. | 772 | * sequence lower than min_seq will be ignored. |
783 | */ | 773 | */ |
784 | static struct tree_mod_elem * | 774 | static struct tree_mod_elem * |
785 | tree_mod_log_search_oldest(struct btrfs_fs_info *fs_info, u64 start, | 775 | tree_mod_log_search_oldest(struct btrfs_fs_info *fs_info, u64 start, |
786 | u64 min_seq) | 776 | u64 min_seq) |
787 | { | 777 | { |
788 | return __tree_mod_log_search(fs_info, start, min_seq, 1); | 778 | return __tree_mod_log_search(fs_info, start, min_seq, 1); |
789 | } | 779 | } |
790 | 780 | ||
791 | /* | 781 | /* |
792 | * this returns the element from the log with the largest time sequence | 782 | * this returns the element from the log with the largest time sequence |
793 | * value that's in the log (the most recent log item). any element with | 783 | * value that's in the log (the most recent log item). any element with |
794 | * a time sequence lower than min_seq will be ignored. | 784 | * a time sequence lower than min_seq will be ignored. |
795 | */ | 785 | */ |
796 | static struct tree_mod_elem * | 786 | static struct tree_mod_elem * |
797 | tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq) | 787 | tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq) |
798 | { | 788 | { |
799 | return __tree_mod_log_search(fs_info, start, min_seq, 0); | 789 | return __tree_mod_log_search(fs_info, start, min_seq, 0); |
800 | } | 790 | } |
801 | 791 | ||
802 | static noinline int | 792 | static noinline int |
803 | tree_mod_log_eb_copy(struct btrfs_fs_info *fs_info, struct extent_buffer *dst, | 793 | tree_mod_log_eb_copy(struct btrfs_fs_info *fs_info, struct extent_buffer *dst, |
804 | struct extent_buffer *src, unsigned long dst_offset, | 794 | struct extent_buffer *src, unsigned long dst_offset, |
805 | unsigned long src_offset, int nr_items) | 795 | unsigned long src_offset, int nr_items) |
806 | { | 796 | { |
807 | int ret = 0; | 797 | int ret = 0; |
808 | struct tree_mod_elem **tm_list = NULL; | 798 | struct tree_mod_elem **tm_list = NULL; |
809 | struct tree_mod_elem **tm_list_add, **tm_list_rem; | 799 | struct tree_mod_elem **tm_list_add, **tm_list_rem; |
810 | int i; | 800 | int i; |
811 | int locked = 0; | 801 | int locked = 0; |
812 | 802 | ||
813 | if (!tree_mod_need_log(fs_info, NULL)) | 803 | if (!tree_mod_need_log(fs_info, NULL)) |
814 | return 0; | 804 | return 0; |
815 | 805 | ||
816 | if (btrfs_header_level(dst) == 0 && btrfs_header_level(src) == 0) | 806 | if (btrfs_header_level(dst) == 0 && btrfs_header_level(src) == 0) |
817 | return 0; | 807 | return 0; |
818 | 808 | ||
819 | tm_list = kzalloc(nr_items * 2 * sizeof(struct tree_mod_elem *), | 809 | tm_list = kzalloc(nr_items * 2 * sizeof(struct tree_mod_elem *), |
820 | GFP_NOFS); | 810 | GFP_NOFS); |
821 | if (!tm_list) | 811 | if (!tm_list) |
822 | return -ENOMEM; | 812 | return -ENOMEM; |
823 | 813 | ||
824 | tm_list_add = tm_list; | 814 | tm_list_add = tm_list; |
825 | tm_list_rem = tm_list + nr_items; | 815 | tm_list_rem = tm_list + nr_items; |
826 | for (i = 0; i < nr_items; i++) { | 816 | for (i = 0; i < nr_items; i++) { |
827 | tm_list_rem[i] = alloc_tree_mod_elem(src, i + src_offset, | 817 | tm_list_rem[i] = alloc_tree_mod_elem(src, i + src_offset, |
828 | MOD_LOG_KEY_REMOVE, GFP_NOFS); | 818 | MOD_LOG_KEY_REMOVE, GFP_NOFS); |
829 | if (!tm_list_rem[i]) { | 819 | if (!tm_list_rem[i]) { |
830 | ret = -ENOMEM; | 820 | ret = -ENOMEM; |
831 | goto free_tms; | 821 | goto free_tms; |
832 | } | 822 | } |
833 | 823 | ||
834 | tm_list_add[i] = alloc_tree_mod_elem(dst, i + dst_offset, | 824 | tm_list_add[i] = alloc_tree_mod_elem(dst, i + dst_offset, |
835 | MOD_LOG_KEY_ADD, GFP_NOFS); | 825 | MOD_LOG_KEY_ADD, GFP_NOFS); |
836 | if (!tm_list_add[i]) { | 826 | if (!tm_list_add[i]) { |
837 | ret = -ENOMEM; | 827 | ret = -ENOMEM; |
838 | goto free_tms; | 828 | goto free_tms; |
839 | } | 829 | } |
840 | } | 830 | } |
841 | 831 | ||
842 | if (tree_mod_dont_log(fs_info, NULL)) | 832 | if (tree_mod_dont_log(fs_info, NULL)) |
843 | goto free_tms; | 833 | goto free_tms; |
844 | locked = 1; | 834 | locked = 1; |
845 | 835 | ||
846 | for (i = 0; i < nr_items; i++) { | 836 | for (i = 0; i < nr_items; i++) { |
847 | ret = __tree_mod_log_insert(fs_info, tm_list_rem[i]); | 837 | ret = __tree_mod_log_insert(fs_info, tm_list_rem[i]); |
848 | if (ret) | 838 | if (ret) |
849 | goto free_tms; | 839 | goto free_tms; |
850 | ret = __tree_mod_log_insert(fs_info, tm_list_add[i]); | 840 | ret = __tree_mod_log_insert(fs_info, tm_list_add[i]); |
851 | if (ret) | 841 | if (ret) |
852 | goto free_tms; | 842 | goto free_tms; |
853 | } | 843 | } |
854 | 844 | ||
855 | tree_mod_log_write_unlock(fs_info); | 845 | tree_mod_log_write_unlock(fs_info); |
856 | kfree(tm_list); | 846 | kfree(tm_list); |
857 | 847 | ||
858 | return 0; | 848 | return 0; |
859 | 849 | ||
860 | free_tms: | 850 | free_tms: |
861 | for (i = 0; i < nr_items * 2; i++) { | 851 | for (i = 0; i < nr_items * 2; i++) { |
862 | if (tm_list[i] && !RB_EMPTY_NODE(&tm_list[i]->node)) | 852 | if (tm_list[i] && !RB_EMPTY_NODE(&tm_list[i]->node)) |
863 | rb_erase(&tm_list[i]->node, &fs_info->tree_mod_log); | 853 | rb_erase(&tm_list[i]->node, &fs_info->tree_mod_log); |
864 | kfree(tm_list[i]); | 854 | kfree(tm_list[i]); |
865 | } | 855 | } |
866 | if (locked) | 856 | if (locked) |
867 | tree_mod_log_write_unlock(fs_info); | 857 | tree_mod_log_write_unlock(fs_info); |
868 | kfree(tm_list); | 858 | kfree(tm_list); |
869 | 859 | ||
870 | return ret; | 860 | return ret; |
871 | } | 861 | } |
872 | 862 | ||
873 | static inline void | 863 | static inline void |
874 | tree_mod_log_eb_move(struct btrfs_fs_info *fs_info, struct extent_buffer *dst, | 864 | tree_mod_log_eb_move(struct btrfs_fs_info *fs_info, struct extent_buffer *dst, |
875 | int dst_offset, int src_offset, int nr_items) | 865 | int dst_offset, int src_offset, int nr_items) |
876 | { | 866 | { |
877 | int ret; | 867 | int ret; |
878 | ret = tree_mod_log_insert_move(fs_info, dst, dst_offset, src_offset, | 868 | ret = tree_mod_log_insert_move(fs_info, dst, dst_offset, src_offset, |
879 | nr_items, GFP_NOFS); | 869 | nr_items, GFP_NOFS); |
880 | BUG_ON(ret < 0); | 870 | BUG_ON(ret < 0); |
881 | } | 871 | } |
882 | 872 | ||
883 | static noinline void | 873 | static noinline void |
884 | tree_mod_log_set_node_key(struct btrfs_fs_info *fs_info, | 874 | tree_mod_log_set_node_key(struct btrfs_fs_info *fs_info, |
885 | struct extent_buffer *eb, int slot, int atomic) | 875 | struct extent_buffer *eb, int slot, int atomic) |
886 | { | 876 | { |
887 | int ret; | 877 | int ret; |
888 | 878 | ||
889 | ret = tree_mod_log_insert_key(fs_info, eb, slot, | 879 | ret = tree_mod_log_insert_key(fs_info, eb, slot, |
890 | MOD_LOG_KEY_REPLACE, | 880 | MOD_LOG_KEY_REPLACE, |
891 | atomic ? GFP_ATOMIC : GFP_NOFS); | 881 | atomic ? GFP_ATOMIC : GFP_NOFS); |
892 | BUG_ON(ret < 0); | 882 | BUG_ON(ret < 0); |
893 | } | 883 | } |
894 | 884 | ||
895 | static noinline int | 885 | static noinline int |
896 | tree_mod_log_free_eb(struct btrfs_fs_info *fs_info, struct extent_buffer *eb) | 886 | tree_mod_log_free_eb(struct btrfs_fs_info *fs_info, struct extent_buffer *eb) |
897 | { | 887 | { |
898 | struct tree_mod_elem **tm_list = NULL; | 888 | struct tree_mod_elem **tm_list = NULL; |
899 | int nritems = 0; | 889 | int nritems = 0; |
900 | int i; | 890 | int i; |
901 | int ret = 0; | 891 | int ret = 0; |
902 | 892 | ||
903 | if (btrfs_header_level(eb) == 0) | 893 | if (btrfs_header_level(eb) == 0) |
904 | return 0; | 894 | return 0; |
905 | 895 | ||
906 | if (!tree_mod_need_log(fs_info, NULL)) | 896 | if (!tree_mod_need_log(fs_info, NULL)) |
907 | return 0; | 897 | return 0; |
908 | 898 | ||
909 | nritems = btrfs_header_nritems(eb); | 899 | nritems = btrfs_header_nritems(eb); |
910 | tm_list = kzalloc(nritems * sizeof(struct tree_mod_elem *), | 900 | tm_list = kzalloc(nritems * sizeof(struct tree_mod_elem *), |
911 | GFP_NOFS); | 901 | GFP_NOFS); |
912 | if (!tm_list) | 902 | if (!tm_list) |
913 | return -ENOMEM; | 903 | return -ENOMEM; |
914 | 904 | ||
915 | for (i = 0; i < nritems; i++) { | 905 | for (i = 0; i < nritems; i++) { |
916 | tm_list[i] = alloc_tree_mod_elem(eb, i, | 906 | tm_list[i] = alloc_tree_mod_elem(eb, i, |
917 | MOD_LOG_KEY_REMOVE_WHILE_FREEING, GFP_NOFS); | 907 | MOD_LOG_KEY_REMOVE_WHILE_FREEING, GFP_NOFS); |
918 | if (!tm_list[i]) { | 908 | if (!tm_list[i]) { |
919 | ret = -ENOMEM; | 909 | ret = -ENOMEM; |
920 | goto free_tms; | 910 | goto free_tms; |
921 | } | 911 | } |
922 | } | 912 | } |
923 | 913 | ||
924 | if (tree_mod_dont_log(fs_info, eb)) | 914 | if (tree_mod_dont_log(fs_info, eb)) |
925 | goto free_tms; | 915 | goto free_tms; |
926 | 916 | ||
927 | ret = __tree_mod_log_free_eb(fs_info, tm_list, nritems); | 917 | ret = __tree_mod_log_free_eb(fs_info, tm_list, nritems); |
928 | tree_mod_log_write_unlock(fs_info); | 918 | tree_mod_log_write_unlock(fs_info); |
929 | if (ret) | 919 | if (ret) |
930 | goto free_tms; | 920 | goto free_tms; |
931 | kfree(tm_list); | 921 | kfree(tm_list); |
932 | 922 | ||
933 | return 0; | 923 | return 0; |
934 | 924 | ||
935 | free_tms: | 925 | free_tms: |
936 | for (i = 0; i < nritems; i++) | 926 | for (i = 0; i < nritems; i++) |
937 | kfree(tm_list[i]); | 927 | kfree(tm_list[i]); |
938 | kfree(tm_list); | 928 | kfree(tm_list); |
939 | 929 | ||
940 | return ret; | 930 | return ret; |
941 | } | 931 | } |
942 | 932 | ||
943 | static noinline void | 933 | static noinline void |
944 | tree_mod_log_set_root_pointer(struct btrfs_root *root, | 934 | tree_mod_log_set_root_pointer(struct btrfs_root *root, |
945 | struct extent_buffer *new_root_node, | 935 | struct extent_buffer *new_root_node, |
946 | int log_removal) | 936 | int log_removal) |
947 | { | 937 | { |
948 | int ret; | 938 | int ret; |
949 | ret = tree_mod_log_insert_root(root->fs_info, root->node, | 939 | ret = tree_mod_log_insert_root(root->fs_info, root->node, |
950 | new_root_node, GFP_NOFS, log_removal); | 940 | new_root_node, GFP_NOFS, log_removal); |
951 | BUG_ON(ret < 0); | 941 | BUG_ON(ret < 0); |
952 | } | 942 | } |
953 | 943 | ||
954 | /* | 944 | /* |
955 | * check if the tree block can be shared by multiple trees | 945 | * check if the tree block can be shared by multiple trees |
956 | */ | 946 | */ |
957 | int btrfs_block_can_be_shared(struct btrfs_root *root, | 947 | int btrfs_block_can_be_shared(struct btrfs_root *root, |
958 | struct extent_buffer *buf) | 948 | struct extent_buffer *buf) |
959 | { | 949 | { |
960 | /* | 950 | /* |
961 | * Tree blocks not in refernece counted trees and tree roots | 951 | * Tree blocks not in refernece counted trees and tree roots |
962 | * are never shared. If a block was allocated after the last | 952 | * are never shared. If a block was allocated after the last |
963 | * snapshot and the block was not allocated by tree relocation, | 953 | * snapshot and the block was not allocated by tree relocation, |
964 | * we know the block is not shared. | 954 | * we know the block is not shared. |
965 | */ | 955 | */ |
966 | if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) && | 956 | if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) && |
967 | buf != root->node && buf != root->commit_root && | 957 | buf != root->node && buf != root->commit_root && |
968 | (btrfs_header_generation(buf) <= | 958 | (btrfs_header_generation(buf) <= |
969 | btrfs_root_last_snapshot(&root->root_item) || | 959 | btrfs_root_last_snapshot(&root->root_item) || |
970 | btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC))) | 960 | btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC))) |
971 | return 1; | 961 | return 1; |
972 | #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 | 962 | #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 |
973 | if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) && | 963 | if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) && |
974 | btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV) | 964 | btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV) |
975 | return 1; | 965 | return 1; |
976 | #endif | 966 | #endif |
977 | return 0; | 967 | return 0; |
978 | } | 968 | } |
979 | 969 | ||
980 | static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans, | 970 | static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans, |
981 | struct btrfs_root *root, | 971 | struct btrfs_root *root, |
982 | struct extent_buffer *buf, | 972 | struct extent_buffer *buf, |
983 | struct extent_buffer *cow, | 973 | struct extent_buffer *cow, |
984 | int *last_ref) | 974 | int *last_ref) |
985 | { | 975 | { |
986 | u64 refs; | 976 | u64 refs; |
987 | u64 owner; | 977 | u64 owner; |
988 | u64 flags; | 978 | u64 flags; |
989 | u64 new_flags = 0; | 979 | u64 new_flags = 0; |
990 | int ret; | 980 | int ret; |
991 | 981 | ||
992 | /* | 982 | /* |
993 | * Backrefs update rules: | 983 | * Backrefs update rules: |
994 | * | 984 | * |
995 | * Always use full backrefs for extent pointers in tree block | 985 | * Always use full backrefs for extent pointers in tree block |
996 | * allocated by tree relocation. | 986 | * allocated by tree relocation. |
997 | * | 987 | * |
998 | * If a shared tree block is no longer referenced by its owner | 988 | * If a shared tree block is no longer referenced by its owner |
999 | * tree (btrfs_header_owner(buf) == root->root_key.objectid), | 989 | * tree (btrfs_header_owner(buf) == root->root_key.objectid), |
1000 | * use full backrefs for extent pointers in tree block. | 990 | * use full backrefs for extent pointers in tree block. |
1001 | * | 991 | * |
1002 | * If a tree block is been relocating | 992 | * If a tree block is been relocating |
1003 | * (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID), | 993 | * (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID), |
1004 | * use full backrefs for extent pointers in tree block. | 994 | * use full backrefs for extent pointers in tree block. |
1005 | * The reason for this is some operations (such as drop tree) | 995 | * The reason for this is some operations (such as drop tree) |
1006 | * are only allowed for blocks use full backrefs. | 996 | * are only allowed for blocks use full backrefs. |
1007 | */ | 997 | */ |
1008 | 998 | ||
1009 | if (btrfs_block_can_be_shared(root, buf)) { | 999 | if (btrfs_block_can_be_shared(root, buf)) { |
1010 | ret = btrfs_lookup_extent_info(trans, root, buf->start, | 1000 | ret = btrfs_lookup_extent_info(trans, root, buf->start, |
1011 | btrfs_header_level(buf), 1, | 1001 | btrfs_header_level(buf), 1, |
1012 | &refs, &flags); | 1002 | &refs, &flags); |
1013 | if (ret) | 1003 | if (ret) |
1014 | return ret; | 1004 | return ret; |
1015 | if (refs == 0) { | 1005 | if (refs == 0) { |
1016 | ret = -EROFS; | 1006 | ret = -EROFS; |
1017 | btrfs_std_error(root->fs_info, ret); | 1007 | btrfs_std_error(root->fs_info, ret); |
1018 | return ret; | 1008 | return ret; |
1019 | } | 1009 | } |
1020 | } else { | 1010 | } else { |
1021 | refs = 1; | 1011 | refs = 1; |
1022 | if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID || | 1012 | if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID || |
1023 | btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV) | 1013 | btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV) |
1024 | flags = BTRFS_BLOCK_FLAG_FULL_BACKREF; | 1014 | flags = BTRFS_BLOCK_FLAG_FULL_BACKREF; |
1025 | else | 1015 | else |
1026 | flags = 0; | 1016 | flags = 0; |
1027 | } | 1017 | } |
1028 | 1018 | ||
1029 | owner = btrfs_header_owner(buf); | 1019 | owner = btrfs_header_owner(buf); |
1030 | BUG_ON(owner == BTRFS_TREE_RELOC_OBJECTID && | 1020 | BUG_ON(owner == BTRFS_TREE_RELOC_OBJECTID && |
1031 | !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)); | 1021 | !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)); |
1032 | 1022 | ||
1033 | if (refs > 1) { | 1023 | if (refs > 1) { |
1034 | if ((owner == root->root_key.objectid || | 1024 | if ((owner == root->root_key.objectid || |
1035 | root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) && | 1025 | root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) && |
1036 | !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) { | 1026 | !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) { |
1037 | ret = btrfs_inc_ref(trans, root, buf, 1); | 1027 | ret = btrfs_inc_ref(trans, root, buf, 1); |
1038 | BUG_ON(ret); /* -ENOMEM */ | 1028 | BUG_ON(ret); /* -ENOMEM */ |
1039 | 1029 | ||
1040 | if (root->root_key.objectid == | 1030 | if (root->root_key.objectid == |
1041 | BTRFS_TREE_RELOC_OBJECTID) { | 1031 | BTRFS_TREE_RELOC_OBJECTID) { |
1042 | ret = btrfs_dec_ref(trans, root, buf, 0); | 1032 | ret = btrfs_dec_ref(trans, root, buf, 0); |
1043 | BUG_ON(ret); /* -ENOMEM */ | 1033 | BUG_ON(ret); /* -ENOMEM */ |
1044 | ret = btrfs_inc_ref(trans, root, cow, 1); | 1034 | ret = btrfs_inc_ref(trans, root, cow, 1); |
1045 | BUG_ON(ret); /* -ENOMEM */ | 1035 | BUG_ON(ret); /* -ENOMEM */ |
1046 | } | 1036 | } |
1047 | new_flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF; | 1037 | new_flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF; |
1048 | } else { | 1038 | } else { |
1049 | 1039 | ||
1050 | if (root->root_key.objectid == | 1040 | if (root->root_key.objectid == |
1051 | BTRFS_TREE_RELOC_OBJECTID) | 1041 | BTRFS_TREE_RELOC_OBJECTID) |
1052 | ret = btrfs_inc_ref(trans, root, cow, 1); | 1042 | ret = btrfs_inc_ref(trans, root, cow, 1); |
1053 | else | 1043 | else |
1054 | ret = btrfs_inc_ref(trans, root, cow, 0); | 1044 | ret = btrfs_inc_ref(trans, root, cow, 0); |
1055 | BUG_ON(ret); /* -ENOMEM */ | 1045 | BUG_ON(ret); /* -ENOMEM */ |
1056 | } | 1046 | } |
1057 | if (new_flags != 0) { | 1047 | if (new_flags != 0) { |
1058 | int level = btrfs_header_level(buf); | 1048 | int level = btrfs_header_level(buf); |
1059 | 1049 | ||
1060 | ret = btrfs_set_disk_extent_flags(trans, root, | 1050 | ret = btrfs_set_disk_extent_flags(trans, root, |
1061 | buf->start, | 1051 | buf->start, |
1062 | buf->len, | 1052 | buf->len, |
1063 | new_flags, level, 0); | 1053 | new_flags, level, 0); |
1064 | if (ret) | 1054 | if (ret) |
1065 | return ret; | 1055 | return ret; |
1066 | } | 1056 | } |
1067 | } else { | 1057 | } else { |
1068 | if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF) { | 1058 | if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF) { |
1069 | if (root->root_key.objectid == | 1059 | if (root->root_key.objectid == |
1070 | BTRFS_TREE_RELOC_OBJECTID) | 1060 | BTRFS_TREE_RELOC_OBJECTID) |
1071 | ret = btrfs_inc_ref(trans, root, cow, 1); | 1061 | ret = btrfs_inc_ref(trans, root, cow, 1); |
1072 | else | 1062 | else |
1073 | ret = btrfs_inc_ref(trans, root, cow, 0); | 1063 | ret = btrfs_inc_ref(trans, root, cow, 0); |
1074 | BUG_ON(ret); /* -ENOMEM */ | 1064 | BUG_ON(ret); /* -ENOMEM */ |
1075 | ret = btrfs_dec_ref(trans, root, buf, 1); | 1065 | ret = btrfs_dec_ref(trans, root, buf, 1); |
1076 | BUG_ON(ret); /* -ENOMEM */ | 1066 | BUG_ON(ret); /* -ENOMEM */ |
1077 | } | 1067 | } |
1078 | clean_tree_block(trans, root, buf); | 1068 | clean_tree_block(trans, root, buf); |
1079 | *last_ref = 1; | 1069 | *last_ref = 1; |
1080 | } | 1070 | } |
1081 | return 0; | 1071 | return 0; |
1082 | } | 1072 | } |
1083 | 1073 | ||
1084 | /* | 1074 | /* |
1085 | * does the dirty work in cow of a single block. The parent block (if | 1075 | * does the dirty work in cow of a single block. The parent block (if |
1086 | * supplied) is updated to point to the new cow copy. The new buffer is marked | 1076 | * supplied) is updated to point to the new cow copy. The new buffer is marked |
1087 | * dirty and returned locked. If you modify the block it needs to be marked | 1077 | * dirty and returned locked. If you modify the block it needs to be marked |
1088 | * dirty again. | 1078 | * dirty again. |
1089 | * | 1079 | * |
1090 | * search_start -- an allocation hint for the new block | 1080 | * search_start -- an allocation hint for the new block |
1091 | * | 1081 | * |
1092 | * empty_size -- a hint that you plan on doing more cow. This is the size in | 1082 | * empty_size -- a hint that you plan on doing more cow. This is the size in |
1093 | * bytes the allocator should try to find free next to the block it returns. | 1083 | * bytes the allocator should try to find free next to the block it returns. |
1094 | * This is just a hint and may be ignored by the allocator. | 1084 | * This is just a hint and may be ignored by the allocator. |
1095 | */ | 1085 | */ |
1096 | static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans, | 1086 | static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans, |
1097 | struct btrfs_root *root, | 1087 | struct btrfs_root *root, |
1098 | struct extent_buffer *buf, | 1088 | struct extent_buffer *buf, |
1099 | struct extent_buffer *parent, int parent_slot, | 1089 | struct extent_buffer *parent, int parent_slot, |
1100 | struct extent_buffer **cow_ret, | 1090 | struct extent_buffer **cow_ret, |
1101 | u64 search_start, u64 empty_size) | 1091 | u64 search_start, u64 empty_size) |
1102 | { | 1092 | { |
1103 | struct btrfs_disk_key disk_key; | 1093 | struct btrfs_disk_key disk_key; |
1104 | struct extent_buffer *cow; | 1094 | struct extent_buffer *cow; |
1105 | int level, ret; | 1095 | int level, ret; |
1106 | int last_ref = 0; | 1096 | int last_ref = 0; |
1107 | int unlock_orig = 0; | 1097 | int unlock_orig = 0; |
1108 | u64 parent_start; | 1098 | u64 parent_start; |
1109 | 1099 | ||
1110 | if (*cow_ret == buf) | 1100 | if (*cow_ret == buf) |
1111 | unlock_orig = 1; | 1101 | unlock_orig = 1; |
1112 | 1102 | ||
1113 | btrfs_assert_tree_locked(buf); | 1103 | btrfs_assert_tree_locked(buf); |
1114 | 1104 | ||
1115 | WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) && | 1105 | WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) && |
1116 | trans->transid != root->fs_info->running_transaction->transid); | 1106 | trans->transid != root->fs_info->running_transaction->transid); |
1117 | WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) && | 1107 | WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) && |
1118 | trans->transid != root->last_trans); | 1108 | trans->transid != root->last_trans); |
1119 | 1109 | ||
1120 | level = btrfs_header_level(buf); | 1110 | level = btrfs_header_level(buf); |
1121 | 1111 | ||
1122 | if (level == 0) | 1112 | if (level == 0) |
1123 | btrfs_item_key(buf, &disk_key, 0); | 1113 | btrfs_item_key(buf, &disk_key, 0); |
1124 | else | 1114 | else |
1125 | btrfs_node_key(buf, &disk_key, 0); | 1115 | btrfs_node_key(buf, &disk_key, 0); |
1126 | 1116 | ||
1127 | if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) { | 1117 | if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) { |
1128 | if (parent) | 1118 | if (parent) |
1129 | parent_start = parent->start; | 1119 | parent_start = parent->start; |
1130 | else | 1120 | else |
1131 | parent_start = 0; | 1121 | parent_start = 0; |
1132 | } else | 1122 | } else |
1133 | parent_start = 0; | 1123 | parent_start = 0; |
1134 | 1124 | ||
1135 | cow = btrfs_alloc_tree_block(trans, root, parent_start, | 1125 | cow = btrfs_alloc_tree_block(trans, root, parent_start, |
1136 | root->root_key.objectid, &disk_key, level, | 1126 | root->root_key.objectid, &disk_key, level, |
1137 | search_start, empty_size); | 1127 | search_start, empty_size); |
1138 | if (IS_ERR(cow)) | 1128 | if (IS_ERR(cow)) |
1139 | return PTR_ERR(cow); | 1129 | return PTR_ERR(cow); |
1140 | 1130 | ||
1141 | /* cow is set to blocking by btrfs_init_new_buffer */ | 1131 | /* cow is set to blocking by btrfs_init_new_buffer */ |
1142 | 1132 | ||
1143 | copy_extent_buffer(cow, buf, 0, 0, cow->len); | 1133 | copy_extent_buffer(cow, buf, 0, 0, cow->len); |
1144 | btrfs_set_header_bytenr(cow, cow->start); | 1134 | btrfs_set_header_bytenr(cow, cow->start); |
1145 | btrfs_set_header_generation(cow, trans->transid); | 1135 | btrfs_set_header_generation(cow, trans->transid); |
1146 | btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV); | 1136 | btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV); |
1147 | btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN | | 1137 | btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN | |
1148 | BTRFS_HEADER_FLAG_RELOC); | 1138 | BTRFS_HEADER_FLAG_RELOC); |
1149 | if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) | 1139 | if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) |
1150 | btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC); | 1140 | btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC); |
1151 | else | 1141 | else |
1152 | btrfs_set_header_owner(cow, root->root_key.objectid); | 1142 | btrfs_set_header_owner(cow, root->root_key.objectid); |
1153 | 1143 | ||
1154 | write_extent_buffer(cow, root->fs_info->fsid, btrfs_header_fsid(), | 1144 | write_extent_buffer(cow, root->fs_info->fsid, btrfs_header_fsid(), |
1155 | BTRFS_FSID_SIZE); | 1145 | BTRFS_FSID_SIZE); |
1156 | 1146 | ||
1157 | ret = update_ref_for_cow(trans, root, buf, cow, &last_ref); | 1147 | ret = update_ref_for_cow(trans, root, buf, cow, &last_ref); |
1158 | if (ret) { | 1148 | if (ret) { |
1159 | btrfs_abort_transaction(trans, root, ret); | 1149 | btrfs_abort_transaction(trans, root, ret); |
1160 | return ret; | 1150 | return ret; |
1161 | } | 1151 | } |
1162 | 1152 | ||
1163 | if (test_bit(BTRFS_ROOT_REF_COWS, &root->state)) { | 1153 | if (test_bit(BTRFS_ROOT_REF_COWS, &root->state)) { |
1164 | ret = btrfs_reloc_cow_block(trans, root, buf, cow); | 1154 | ret = btrfs_reloc_cow_block(trans, root, buf, cow); |
1165 | if (ret) | 1155 | if (ret) |
1166 | return ret; | 1156 | return ret; |
1167 | } | 1157 | } |
1168 | 1158 | ||
1169 | if (buf == root->node) { | 1159 | if (buf == root->node) { |
1170 | WARN_ON(parent && parent != buf); | 1160 | WARN_ON(parent && parent != buf); |
1171 | if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID || | 1161 | if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID || |
1172 | btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV) | 1162 | btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV) |
1173 | parent_start = buf->start; | 1163 | parent_start = buf->start; |
1174 | else | 1164 | else |
1175 | parent_start = 0; | 1165 | parent_start = 0; |
1176 | 1166 | ||
1177 | extent_buffer_get(cow); | 1167 | extent_buffer_get(cow); |
1178 | tree_mod_log_set_root_pointer(root, cow, 1); | 1168 | tree_mod_log_set_root_pointer(root, cow, 1); |
1179 | rcu_assign_pointer(root->node, cow); | 1169 | rcu_assign_pointer(root->node, cow); |
1180 | 1170 | ||
1181 | btrfs_free_tree_block(trans, root, buf, parent_start, | 1171 | btrfs_free_tree_block(trans, root, buf, parent_start, |
1182 | last_ref); | 1172 | last_ref); |
1183 | free_extent_buffer(buf); | 1173 | free_extent_buffer(buf); |
1184 | add_root_to_dirty_list(root); | 1174 | add_root_to_dirty_list(root); |
1185 | } else { | 1175 | } else { |
1186 | if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) | 1176 | if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) |
1187 | parent_start = parent->start; | 1177 | parent_start = parent->start; |
1188 | else | 1178 | else |
1189 | parent_start = 0; | 1179 | parent_start = 0; |
1190 | 1180 | ||
1191 | WARN_ON(trans->transid != btrfs_header_generation(parent)); | 1181 | WARN_ON(trans->transid != btrfs_header_generation(parent)); |
1192 | tree_mod_log_insert_key(root->fs_info, parent, parent_slot, | 1182 | tree_mod_log_insert_key(root->fs_info, parent, parent_slot, |
1193 | MOD_LOG_KEY_REPLACE, GFP_NOFS); | 1183 | MOD_LOG_KEY_REPLACE, GFP_NOFS); |
1194 | btrfs_set_node_blockptr(parent, parent_slot, | 1184 | btrfs_set_node_blockptr(parent, parent_slot, |
1195 | cow->start); | 1185 | cow->start); |
1196 | btrfs_set_node_ptr_generation(parent, parent_slot, | 1186 | btrfs_set_node_ptr_generation(parent, parent_slot, |
1197 | trans->transid); | 1187 | trans->transid); |
1198 | btrfs_mark_buffer_dirty(parent); | 1188 | btrfs_mark_buffer_dirty(parent); |
1199 | if (last_ref) { | 1189 | if (last_ref) { |
1200 | ret = tree_mod_log_free_eb(root->fs_info, buf); | 1190 | ret = tree_mod_log_free_eb(root->fs_info, buf); |
1201 | if (ret) { | 1191 | if (ret) { |
1202 | btrfs_abort_transaction(trans, root, ret); | 1192 | btrfs_abort_transaction(trans, root, ret); |
1203 | return ret; | 1193 | return ret; |
1204 | } | 1194 | } |
1205 | } | 1195 | } |
1206 | btrfs_free_tree_block(trans, root, buf, parent_start, | 1196 | btrfs_free_tree_block(trans, root, buf, parent_start, |
1207 | last_ref); | 1197 | last_ref); |
1208 | } | 1198 | } |
1209 | if (unlock_orig) | 1199 | if (unlock_orig) |
1210 | btrfs_tree_unlock(buf); | 1200 | btrfs_tree_unlock(buf); |
1211 | free_extent_buffer_stale(buf); | 1201 | free_extent_buffer_stale(buf); |
1212 | btrfs_mark_buffer_dirty(cow); | 1202 | btrfs_mark_buffer_dirty(cow); |
1213 | *cow_ret = cow; | 1203 | *cow_ret = cow; |
1214 | return 0; | 1204 | return 0; |
1215 | } | 1205 | } |
1216 | 1206 | ||
1217 | /* | 1207 | /* |
1218 | * returns the logical address of the oldest predecessor of the given root. | 1208 | * returns the logical address of the oldest predecessor of the given root. |
1219 | * entries older than time_seq are ignored. | 1209 | * entries older than time_seq are ignored. |
1220 | */ | 1210 | */ |
1221 | static struct tree_mod_elem * | 1211 | static struct tree_mod_elem * |
1222 | __tree_mod_log_oldest_root(struct btrfs_fs_info *fs_info, | 1212 | __tree_mod_log_oldest_root(struct btrfs_fs_info *fs_info, |
1223 | struct extent_buffer *eb_root, u64 time_seq) | 1213 | struct extent_buffer *eb_root, u64 time_seq) |
1224 | { | 1214 | { |
1225 | struct tree_mod_elem *tm; | 1215 | struct tree_mod_elem *tm; |
1226 | struct tree_mod_elem *found = NULL; | 1216 | struct tree_mod_elem *found = NULL; |
1227 | u64 root_logical = eb_root->start; | 1217 | u64 root_logical = eb_root->start; |
1228 | int looped = 0; | 1218 | int looped = 0; |
1229 | 1219 | ||
1230 | if (!time_seq) | 1220 | if (!time_seq) |
1231 | return NULL; | 1221 | return NULL; |
1232 | 1222 | ||
1233 | /* | 1223 | /* |
1234 | * the very last operation that's logged for a root is the replacement | 1224 | * the very last operation that's logged for a root is the replacement |
1235 | * operation (if it is replaced at all). this has the index of the *new* | 1225 | * operation (if it is replaced at all). this has the index of the *new* |
1236 | * root, making it the very first operation that's logged for this root. | 1226 | * root, making it the very first operation that's logged for this root. |
1237 | */ | 1227 | */ |
1238 | while (1) { | 1228 | while (1) { |
1239 | tm = tree_mod_log_search_oldest(fs_info, root_logical, | 1229 | tm = tree_mod_log_search_oldest(fs_info, root_logical, |
1240 | time_seq); | 1230 | time_seq); |
1241 | if (!looped && !tm) | 1231 | if (!looped && !tm) |
1242 | return NULL; | 1232 | return NULL; |
1243 | /* | 1233 | /* |
1244 | * if there are no tree operation for the oldest root, we simply | 1234 | * if there are no tree operation for the oldest root, we simply |
1245 | * return it. this should only happen if that (old) root is at | 1235 | * return it. this should only happen if that (old) root is at |
1246 | * level 0. | 1236 | * level 0. |
1247 | */ | 1237 | */ |
1248 | if (!tm) | 1238 | if (!tm) |
1249 | break; | 1239 | break; |
1250 | 1240 | ||
1251 | /* | 1241 | /* |
1252 | * if there's an operation that's not a root replacement, we | 1242 | * if there's an operation that's not a root replacement, we |
1253 | * found the oldest version of our root. normally, we'll find a | 1243 | * found the oldest version of our root. normally, we'll find a |
1254 | * MOD_LOG_KEY_REMOVE_WHILE_FREEING operation here. | 1244 | * MOD_LOG_KEY_REMOVE_WHILE_FREEING operation here. |
1255 | */ | 1245 | */ |
1256 | if (tm->op != MOD_LOG_ROOT_REPLACE) | 1246 | if (tm->op != MOD_LOG_ROOT_REPLACE) |
1257 | break; | 1247 | break; |
1258 | 1248 | ||
1259 | found = tm; | 1249 | found = tm; |
1260 | root_logical = tm->old_root.logical; | 1250 | root_logical = tm->old_root.logical; |
1261 | looped = 1; | 1251 | looped = 1; |
1262 | } | 1252 | } |
1263 | 1253 | ||
1264 | /* if there's no old root to return, return what we found instead */ | 1254 | /* if there's no old root to return, return what we found instead */ |
1265 | if (!found) | 1255 | if (!found) |
1266 | found = tm; | 1256 | found = tm; |
1267 | 1257 | ||
1268 | return found; | 1258 | return found; |
1269 | } | 1259 | } |
1270 | 1260 | ||
1271 | /* | 1261 | /* |
1272 | * tm is a pointer to the first operation to rewind within eb. then, all | 1262 | * tm is a pointer to the first operation to rewind within eb. then, all |
1273 | * previous operations will be rewinded (until we reach something older than | 1263 | * previous operations will be rewinded (until we reach something older than |
1274 | * time_seq). | 1264 | * time_seq). |
1275 | */ | 1265 | */ |
1276 | static void | 1266 | static void |
1277 | __tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *eb, | 1267 | __tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *eb, |
1278 | u64 time_seq, struct tree_mod_elem *first_tm) | 1268 | u64 time_seq, struct tree_mod_elem *first_tm) |
1279 | { | 1269 | { |
1280 | u32 n; | 1270 | u32 n; |
1281 | struct rb_node *next; | 1271 | struct rb_node *next; |
1282 | struct tree_mod_elem *tm = first_tm; | 1272 | struct tree_mod_elem *tm = first_tm; |
1283 | unsigned long o_dst; | 1273 | unsigned long o_dst; |
1284 | unsigned long o_src; | 1274 | unsigned long o_src; |
1285 | unsigned long p_size = sizeof(struct btrfs_key_ptr); | 1275 | unsigned long p_size = sizeof(struct btrfs_key_ptr); |
1286 | 1276 | ||
1287 | n = btrfs_header_nritems(eb); | 1277 | n = btrfs_header_nritems(eb); |
1288 | tree_mod_log_read_lock(fs_info); | 1278 | tree_mod_log_read_lock(fs_info); |
1289 | while (tm && tm->seq >= time_seq) { | 1279 | while (tm && tm->seq >= time_seq) { |
1290 | /* | 1280 | /* |
1291 | * all the operations are recorded with the operator used for | 1281 | * all the operations are recorded with the operator used for |
1292 | * the modification. as we're going backwards, we do the | 1282 | * the modification. as we're going backwards, we do the |
1293 | * opposite of each operation here. | 1283 | * opposite of each operation here. |
1294 | */ | 1284 | */ |
1295 | switch (tm->op) { | 1285 | switch (tm->op) { |
1296 | case MOD_LOG_KEY_REMOVE_WHILE_FREEING: | 1286 | case MOD_LOG_KEY_REMOVE_WHILE_FREEING: |
1297 | BUG_ON(tm->slot < n); | 1287 | BUG_ON(tm->slot < n); |
1298 | /* Fallthrough */ | 1288 | /* Fallthrough */ |
1299 | case MOD_LOG_KEY_REMOVE_WHILE_MOVING: | 1289 | case MOD_LOG_KEY_REMOVE_WHILE_MOVING: |
1300 | case MOD_LOG_KEY_REMOVE: | 1290 | case MOD_LOG_KEY_REMOVE: |
1301 | btrfs_set_node_key(eb, &tm->key, tm->slot); | 1291 | btrfs_set_node_key(eb, &tm->key, tm->slot); |
1302 | btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr); | 1292 | btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr); |
1303 | btrfs_set_node_ptr_generation(eb, tm->slot, | 1293 | btrfs_set_node_ptr_generation(eb, tm->slot, |
1304 | tm->generation); | 1294 | tm->generation); |
1305 | n++; | 1295 | n++; |
1306 | break; | 1296 | break; |
1307 | case MOD_LOG_KEY_REPLACE: | 1297 | case MOD_LOG_KEY_REPLACE: |
1308 | BUG_ON(tm->slot >= n); | 1298 | BUG_ON(tm->slot >= n); |
1309 | btrfs_set_node_key(eb, &tm->key, tm->slot); | 1299 | btrfs_set_node_key(eb, &tm->key, tm->slot); |
1310 | btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr); | 1300 | btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr); |
1311 | btrfs_set_node_ptr_generation(eb, tm->slot, | 1301 | btrfs_set_node_ptr_generation(eb, tm->slot, |
1312 | tm->generation); | 1302 | tm->generation); |
1313 | break; | 1303 | break; |
1314 | case MOD_LOG_KEY_ADD: | 1304 | case MOD_LOG_KEY_ADD: |
1315 | /* if a move operation is needed it's in the log */ | 1305 | /* if a move operation is needed it's in the log */ |
1316 | n--; | 1306 | n--; |
1317 | break; | 1307 | break; |
1318 | case MOD_LOG_MOVE_KEYS: | 1308 | case MOD_LOG_MOVE_KEYS: |
1319 | o_dst = btrfs_node_key_ptr_offset(tm->slot); | 1309 | o_dst = btrfs_node_key_ptr_offset(tm->slot); |
1320 | o_src = btrfs_node_key_ptr_offset(tm->move.dst_slot); | 1310 | o_src = btrfs_node_key_ptr_offset(tm->move.dst_slot); |
1321 | memmove_extent_buffer(eb, o_dst, o_src, | 1311 | memmove_extent_buffer(eb, o_dst, o_src, |
1322 | tm->move.nr_items * p_size); | 1312 | tm->move.nr_items * p_size); |
1323 | break; | 1313 | break; |
1324 | case MOD_LOG_ROOT_REPLACE: | 1314 | case MOD_LOG_ROOT_REPLACE: |
1325 | /* | 1315 | /* |
1326 | * this operation is special. for roots, this must be | 1316 | * this operation is special. for roots, this must be |
1327 | * handled explicitly before rewinding. | 1317 | * handled explicitly before rewinding. |
1328 | * for non-roots, this operation may exist if the node | 1318 | * for non-roots, this operation may exist if the node |
1329 | * was a root: root A -> child B; then A gets empty and | 1319 | * was a root: root A -> child B; then A gets empty and |
1330 | * B is promoted to the new root. in the mod log, we'll | 1320 | * B is promoted to the new root. in the mod log, we'll |
1331 | * have a root-replace operation for B, a tree block | 1321 | * have a root-replace operation for B, a tree block |
1332 | * that is no root. we simply ignore that operation. | 1322 | * that is no root. we simply ignore that operation. |
1333 | */ | 1323 | */ |
1334 | break; | 1324 | break; |
1335 | } | 1325 | } |
1336 | next = rb_next(&tm->node); | 1326 | next = rb_next(&tm->node); |
1337 | if (!next) | 1327 | if (!next) |
1338 | break; | 1328 | break; |
1339 | tm = container_of(next, struct tree_mod_elem, node); | 1329 | tm = container_of(next, struct tree_mod_elem, node); |
1340 | if (tm->index != first_tm->index) | 1330 | if (tm->index != first_tm->index) |
1341 | break; | 1331 | break; |
1342 | } | 1332 | } |
1343 | tree_mod_log_read_unlock(fs_info); | 1333 | tree_mod_log_read_unlock(fs_info); |
1344 | btrfs_set_header_nritems(eb, n); | 1334 | btrfs_set_header_nritems(eb, n); |
1345 | } | 1335 | } |
1346 | 1336 | ||
1347 | /* | 1337 | /* |
1348 | * Called with eb read locked. If the buffer cannot be rewinded, the same buffer | 1338 | * Called with eb read locked. If the buffer cannot be rewinded, the same buffer |
1349 | * is returned. If rewind operations happen, a fresh buffer is returned. The | 1339 | * is returned. If rewind operations happen, a fresh buffer is returned. The |
1350 | * returned buffer is always read-locked. If the returned buffer is not the | 1340 | * returned buffer is always read-locked. If the returned buffer is not the |
1351 | * input buffer, the lock on the input buffer is released and the input buffer | 1341 | * input buffer, the lock on the input buffer is released and the input buffer |
1352 | * is freed (its refcount is decremented). | 1342 | * is freed (its refcount is decremented). |
1353 | */ | 1343 | */ |
1354 | static struct extent_buffer * | 1344 | static struct extent_buffer * |
1355 | tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct btrfs_path *path, | 1345 | tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct btrfs_path *path, |
1356 | struct extent_buffer *eb, u64 time_seq) | 1346 | struct extent_buffer *eb, u64 time_seq) |
1357 | { | 1347 | { |
1358 | struct extent_buffer *eb_rewin; | 1348 | struct extent_buffer *eb_rewin; |
1359 | struct tree_mod_elem *tm; | 1349 | struct tree_mod_elem *tm; |
1360 | 1350 | ||
1361 | if (!time_seq) | 1351 | if (!time_seq) |
1362 | return eb; | 1352 | return eb; |
1363 | 1353 | ||
1364 | if (btrfs_header_level(eb) == 0) | 1354 | if (btrfs_header_level(eb) == 0) |
1365 | return eb; | 1355 | return eb; |
1366 | 1356 | ||
1367 | tm = tree_mod_log_search(fs_info, eb->start, time_seq); | 1357 | tm = tree_mod_log_search(fs_info, eb->start, time_seq); |
1368 | if (!tm) | 1358 | if (!tm) |
1369 | return eb; | 1359 | return eb; |
1370 | 1360 | ||
1371 | btrfs_set_path_blocking(path); | 1361 | btrfs_set_path_blocking(path); |
1372 | btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK); | 1362 | btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK); |
1373 | 1363 | ||
1374 | if (tm->op == MOD_LOG_KEY_REMOVE_WHILE_FREEING) { | 1364 | if (tm->op == MOD_LOG_KEY_REMOVE_WHILE_FREEING) { |
1375 | BUG_ON(tm->slot != 0); | 1365 | BUG_ON(tm->slot != 0); |
1376 | eb_rewin = alloc_dummy_extent_buffer(eb->start, | 1366 | eb_rewin = alloc_dummy_extent_buffer(eb->start, |
1377 | fs_info->tree_root->nodesize); | 1367 | fs_info->tree_root->nodesize); |
1378 | if (!eb_rewin) { | 1368 | if (!eb_rewin) { |
1379 | btrfs_tree_read_unlock_blocking(eb); | 1369 | btrfs_tree_read_unlock_blocking(eb); |
1380 | free_extent_buffer(eb); | 1370 | free_extent_buffer(eb); |
1381 | return NULL; | 1371 | return NULL; |
1382 | } | 1372 | } |
1383 | btrfs_set_header_bytenr(eb_rewin, eb->start); | 1373 | btrfs_set_header_bytenr(eb_rewin, eb->start); |
1384 | btrfs_set_header_backref_rev(eb_rewin, | 1374 | btrfs_set_header_backref_rev(eb_rewin, |
1385 | btrfs_header_backref_rev(eb)); | 1375 | btrfs_header_backref_rev(eb)); |
1386 | btrfs_set_header_owner(eb_rewin, btrfs_header_owner(eb)); | 1376 | btrfs_set_header_owner(eb_rewin, btrfs_header_owner(eb)); |
1387 | btrfs_set_header_level(eb_rewin, btrfs_header_level(eb)); | 1377 | btrfs_set_header_level(eb_rewin, btrfs_header_level(eb)); |
1388 | } else { | 1378 | } else { |
1389 | eb_rewin = btrfs_clone_extent_buffer(eb); | 1379 | eb_rewin = btrfs_clone_extent_buffer(eb); |
1390 | if (!eb_rewin) { | 1380 | if (!eb_rewin) { |
1391 | btrfs_tree_read_unlock_blocking(eb); | 1381 | btrfs_tree_read_unlock_blocking(eb); |
1392 | free_extent_buffer(eb); | 1382 | free_extent_buffer(eb); |
1393 | return NULL; | 1383 | return NULL; |
1394 | } | 1384 | } |
1395 | } | 1385 | } |
1396 | 1386 | ||
1397 | btrfs_clear_path_blocking(path, NULL, BTRFS_READ_LOCK); | 1387 | btrfs_clear_path_blocking(path, NULL, BTRFS_READ_LOCK); |
1398 | btrfs_tree_read_unlock_blocking(eb); | 1388 | btrfs_tree_read_unlock_blocking(eb); |
1399 | free_extent_buffer(eb); | 1389 | free_extent_buffer(eb); |
1400 | 1390 | ||
1401 | extent_buffer_get(eb_rewin); | 1391 | extent_buffer_get(eb_rewin); |
1402 | btrfs_tree_read_lock(eb_rewin); | 1392 | btrfs_tree_read_lock(eb_rewin); |
1403 | __tree_mod_log_rewind(fs_info, eb_rewin, time_seq, tm); | 1393 | __tree_mod_log_rewind(fs_info, eb_rewin, time_seq, tm); |
1404 | WARN_ON(btrfs_header_nritems(eb_rewin) > | 1394 | WARN_ON(btrfs_header_nritems(eb_rewin) > |
1405 | BTRFS_NODEPTRS_PER_BLOCK(fs_info->tree_root)); | 1395 | BTRFS_NODEPTRS_PER_BLOCK(fs_info->tree_root)); |
1406 | 1396 | ||
1407 | return eb_rewin; | 1397 | return eb_rewin; |
1408 | } | 1398 | } |
1409 | 1399 | ||
1410 | /* | 1400 | /* |
1411 | * get_old_root() rewinds the state of @root's root node to the given @time_seq | 1401 | * get_old_root() rewinds the state of @root's root node to the given @time_seq |
1412 | * value. If there are no changes, the current root->root_node is returned. If | 1402 | * value. If there are no changes, the current root->root_node is returned. If |
1413 | * anything changed in between, there's a fresh buffer allocated on which the | 1403 | * anything changed in between, there's a fresh buffer allocated on which the |
1414 | * rewind operations are done. In any case, the returned buffer is read locked. | 1404 | * rewind operations are done. In any case, the returned buffer is read locked. |
1415 | * Returns NULL on error (with no locks held). | 1405 | * Returns NULL on error (with no locks held). |
1416 | */ | 1406 | */ |
1417 | static inline struct extent_buffer * | 1407 | static inline struct extent_buffer * |
1418 | get_old_root(struct btrfs_root *root, u64 time_seq) | 1408 | get_old_root(struct btrfs_root *root, u64 time_seq) |
1419 | { | 1409 | { |
1420 | struct tree_mod_elem *tm; | 1410 | struct tree_mod_elem *tm; |
1421 | struct extent_buffer *eb = NULL; | 1411 | struct extent_buffer *eb = NULL; |
1422 | struct extent_buffer *eb_root; | 1412 | struct extent_buffer *eb_root; |
1423 | struct extent_buffer *old; | 1413 | struct extent_buffer *old; |
1424 | struct tree_mod_root *old_root = NULL; | 1414 | struct tree_mod_root *old_root = NULL; |
1425 | u64 old_generation = 0; | 1415 | u64 old_generation = 0; |
1426 | u64 logical; | 1416 | u64 logical; |
1427 | 1417 | ||
1428 | eb_root = btrfs_read_lock_root_node(root); | 1418 | eb_root = btrfs_read_lock_root_node(root); |
1429 | tm = __tree_mod_log_oldest_root(root->fs_info, eb_root, time_seq); | 1419 | tm = __tree_mod_log_oldest_root(root->fs_info, eb_root, time_seq); |
1430 | if (!tm) | 1420 | if (!tm) |
1431 | return eb_root; | 1421 | return eb_root; |
1432 | 1422 | ||
1433 | if (tm->op == MOD_LOG_ROOT_REPLACE) { | 1423 | if (tm->op == MOD_LOG_ROOT_REPLACE) { |
1434 | old_root = &tm->old_root; | 1424 | old_root = &tm->old_root; |
1435 | old_generation = tm->generation; | 1425 | old_generation = tm->generation; |
1436 | logical = old_root->logical; | 1426 | logical = old_root->logical; |
1437 | } else { | 1427 | } else { |
1438 | logical = eb_root->start; | 1428 | logical = eb_root->start; |
1439 | } | 1429 | } |
1440 | 1430 | ||
1441 | tm = tree_mod_log_search(root->fs_info, logical, time_seq); | 1431 | tm = tree_mod_log_search(root->fs_info, logical, time_seq); |
1442 | if (old_root && tm && tm->op != MOD_LOG_KEY_REMOVE_WHILE_FREEING) { | 1432 | if (old_root && tm && tm->op != MOD_LOG_KEY_REMOVE_WHILE_FREEING) { |
1443 | btrfs_tree_read_unlock(eb_root); | 1433 | btrfs_tree_read_unlock(eb_root); |
1444 | free_extent_buffer(eb_root); | 1434 | free_extent_buffer(eb_root); |
1445 | old = read_tree_block(root, logical, 0); | 1435 | old = read_tree_block(root, logical, 0); |
1446 | if (WARN_ON(!old || !extent_buffer_uptodate(old))) { | 1436 | if (WARN_ON(!old || !extent_buffer_uptodate(old))) { |
1447 | free_extent_buffer(old); | 1437 | free_extent_buffer(old); |
1448 | btrfs_warn(root->fs_info, | 1438 | btrfs_warn(root->fs_info, |
1449 | "failed to read tree block %llu from get_old_root", logical); | 1439 | "failed to read tree block %llu from get_old_root", logical); |
1450 | } else { | 1440 | } else { |
1451 | eb = btrfs_clone_extent_buffer(old); | 1441 | eb = btrfs_clone_extent_buffer(old); |
1452 | free_extent_buffer(old); | 1442 | free_extent_buffer(old); |
1453 | } | 1443 | } |
1454 | } else if (old_root) { | 1444 | } else if (old_root) { |
1455 | btrfs_tree_read_unlock(eb_root); | 1445 | btrfs_tree_read_unlock(eb_root); |
1456 | free_extent_buffer(eb_root); | 1446 | free_extent_buffer(eb_root); |
1457 | eb = alloc_dummy_extent_buffer(logical, root->nodesize); | 1447 | eb = alloc_dummy_extent_buffer(logical, root->nodesize); |
1458 | } else { | 1448 | } else { |
1459 | btrfs_set_lock_blocking_rw(eb_root, BTRFS_READ_LOCK); | 1449 | btrfs_set_lock_blocking_rw(eb_root, BTRFS_READ_LOCK); |
1460 | eb = btrfs_clone_extent_buffer(eb_root); | 1450 | eb = btrfs_clone_extent_buffer(eb_root); |
1461 | btrfs_tree_read_unlock_blocking(eb_root); | 1451 | btrfs_tree_read_unlock_blocking(eb_root); |
1462 | free_extent_buffer(eb_root); | 1452 | free_extent_buffer(eb_root); |
1463 | } | 1453 | } |
1464 | 1454 | ||
1465 | if (!eb) | 1455 | if (!eb) |
1466 | return NULL; | 1456 | return NULL; |
1467 | extent_buffer_get(eb); | 1457 | extent_buffer_get(eb); |
1468 | btrfs_tree_read_lock(eb); | 1458 | btrfs_tree_read_lock(eb); |
1469 | if (old_root) { | 1459 | if (old_root) { |
1470 | btrfs_set_header_bytenr(eb, eb->start); | 1460 | btrfs_set_header_bytenr(eb, eb->start); |
1471 | btrfs_set_header_backref_rev(eb, BTRFS_MIXED_BACKREF_REV); | 1461 | btrfs_set_header_backref_rev(eb, BTRFS_MIXED_BACKREF_REV); |
1472 | btrfs_set_header_owner(eb, btrfs_header_owner(eb_root)); | 1462 | btrfs_set_header_owner(eb, btrfs_header_owner(eb_root)); |
1473 | btrfs_set_header_level(eb, old_root->level); | 1463 | btrfs_set_header_level(eb, old_root->level); |
1474 | btrfs_set_header_generation(eb, old_generation); | 1464 | btrfs_set_header_generation(eb, old_generation); |
1475 | } | 1465 | } |
1476 | if (tm) | 1466 | if (tm) |
1477 | __tree_mod_log_rewind(root->fs_info, eb, time_seq, tm); | 1467 | __tree_mod_log_rewind(root->fs_info, eb, time_seq, tm); |
1478 | else | 1468 | else |
1479 | WARN_ON(btrfs_header_level(eb) != 0); | 1469 | WARN_ON(btrfs_header_level(eb) != 0); |
1480 | WARN_ON(btrfs_header_nritems(eb) > BTRFS_NODEPTRS_PER_BLOCK(root)); | 1470 | WARN_ON(btrfs_header_nritems(eb) > BTRFS_NODEPTRS_PER_BLOCK(root)); |
1481 | 1471 | ||
1482 | return eb; | 1472 | return eb; |
1483 | } | 1473 | } |
1484 | 1474 | ||
1485 | int btrfs_old_root_level(struct btrfs_root *root, u64 time_seq) | 1475 | int btrfs_old_root_level(struct btrfs_root *root, u64 time_seq) |
1486 | { | 1476 | { |
1487 | struct tree_mod_elem *tm; | 1477 | struct tree_mod_elem *tm; |
1488 | int level; | 1478 | int level; |
1489 | struct extent_buffer *eb_root = btrfs_root_node(root); | 1479 | struct extent_buffer *eb_root = btrfs_root_node(root); |
1490 | 1480 | ||
1491 | tm = __tree_mod_log_oldest_root(root->fs_info, eb_root, time_seq); | 1481 | tm = __tree_mod_log_oldest_root(root->fs_info, eb_root, time_seq); |
1492 | if (tm && tm->op == MOD_LOG_ROOT_REPLACE) { | 1482 | if (tm && tm->op == MOD_LOG_ROOT_REPLACE) { |
1493 | level = tm->old_root.level; | 1483 | level = tm->old_root.level; |
1494 | } else { | 1484 | } else { |
1495 | level = btrfs_header_level(eb_root); | 1485 | level = btrfs_header_level(eb_root); |
1496 | } | 1486 | } |
1497 | free_extent_buffer(eb_root); | 1487 | free_extent_buffer(eb_root); |
1498 | 1488 | ||
1499 | return level; | 1489 | return level; |
1500 | } | 1490 | } |
1501 | 1491 | ||
1502 | static inline int should_cow_block(struct btrfs_trans_handle *trans, | 1492 | static inline int should_cow_block(struct btrfs_trans_handle *trans, |
1503 | struct btrfs_root *root, | 1493 | struct btrfs_root *root, |
1504 | struct extent_buffer *buf) | 1494 | struct extent_buffer *buf) |
1505 | { | 1495 | { |
1506 | if (btrfs_test_is_dummy_root(root)) | 1496 | if (btrfs_test_is_dummy_root(root)) |
1507 | return 0; | 1497 | return 0; |
1508 | 1498 | ||
1509 | /* ensure we can see the force_cow */ | 1499 | /* ensure we can see the force_cow */ |
1510 | smp_rmb(); | 1500 | smp_rmb(); |
1511 | 1501 | ||
1512 | /* | 1502 | /* |
1513 | * We do not need to cow a block if | 1503 | * We do not need to cow a block if |
1514 | * 1) this block is not created or changed in this transaction; | 1504 | * 1) this block is not created or changed in this transaction; |
1515 | * 2) this block does not belong to TREE_RELOC tree; | 1505 | * 2) this block does not belong to TREE_RELOC tree; |
1516 | * 3) the root is not forced COW. | 1506 | * 3) the root is not forced COW. |
1517 | * | 1507 | * |
1518 | * What is forced COW: | 1508 | * What is forced COW: |
1519 | * when we create snapshot during commiting the transaction, | 1509 | * when we create snapshot during commiting the transaction, |
1520 | * after we've finished coping src root, we must COW the shared | 1510 | * after we've finished coping src root, we must COW the shared |
1521 | * block to ensure the metadata consistency. | 1511 | * block to ensure the metadata consistency. |
1522 | */ | 1512 | */ |
1523 | if (btrfs_header_generation(buf) == trans->transid && | 1513 | if (btrfs_header_generation(buf) == trans->transid && |
1524 | !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN) && | 1514 | !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN) && |
1525 | !(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID && | 1515 | !(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID && |
1526 | btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)) && | 1516 | btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)) && |
1527 | !test_bit(BTRFS_ROOT_FORCE_COW, &root->state)) | 1517 | !test_bit(BTRFS_ROOT_FORCE_COW, &root->state)) |
1528 | return 0; | 1518 | return 0; |
1529 | return 1; | 1519 | return 1; |
1530 | } | 1520 | } |
1531 | 1521 | ||
1532 | /* | 1522 | /* |
1533 | * cows a single block, see __btrfs_cow_block for the real work. | 1523 | * cows a single block, see __btrfs_cow_block for the real work. |
1534 | * This version of it has extra checks so that a block isn't cow'd more than | 1524 | * This version of it has extra checks so that a block isn't cow'd more than |
1535 | * once per transaction, as long as it hasn't been written yet | 1525 | * once per transaction, as long as it hasn't been written yet |
1536 | */ | 1526 | */ |
1537 | noinline int btrfs_cow_block(struct btrfs_trans_handle *trans, | 1527 | noinline int btrfs_cow_block(struct btrfs_trans_handle *trans, |
1538 | struct btrfs_root *root, struct extent_buffer *buf, | 1528 | struct btrfs_root *root, struct extent_buffer *buf, |
1539 | struct extent_buffer *parent, int parent_slot, | 1529 | struct extent_buffer *parent, int parent_slot, |
1540 | struct extent_buffer **cow_ret) | 1530 | struct extent_buffer **cow_ret) |
1541 | { | 1531 | { |
1542 | u64 search_start; | 1532 | u64 search_start; |
1543 | int ret; | 1533 | int ret; |
1544 | 1534 | ||
1545 | if (trans->transaction != root->fs_info->running_transaction) | 1535 | if (trans->transaction != root->fs_info->running_transaction) |
1546 | WARN(1, KERN_CRIT "trans %llu running %llu\n", | 1536 | WARN(1, KERN_CRIT "trans %llu running %llu\n", |
1547 | trans->transid, | 1537 | trans->transid, |
1548 | root->fs_info->running_transaction->transid); | 1538 | root->fs_info->running_transaction->transid); |
1549 | 1539 | ||
1550 | if (trans->transid != root->fs_info->generation) | 1540 | if (trans->transid != root->fs_info->generation) |
1551 | WARN(1, KERN_CRIT "trans %llu running %llu\n", | 1541 | WARN(1, KERN_CRIT "trans %llu running %llu\n", |
1552 | trans->transid, root->fs_info->generation); | 1542 | trans->transid, root->fs_info->generation); |
1553 | 1543 | ||
1554 | if (!should_cow_block(trans, root, buf)) { | 1544 | if (!should_cow_block(trans, root, buf)) { |
1555 | *cow_ret = buf; | 1545 | *cow_ret = buf; |
1556 | return 0; | 1546 | return 0; |
1557 | } | 1547 | } |
1558 | 1548 | ||
1559 | search_start = buf->start & ~((u64)(1024 * 1024 * 1024) - 1); | 1549 | search_start = buf->start & ~((u64)(1024 * 1024 * 1024) - 1); |
1560 | 1550 | ||
1561 | if (parent) | 1551 | if (parent) |
1562 | btrfs_set_lock_blocking(parent); | 1552 | btrfs_set_lock_blocking(parent); |
1563 | btrfs_set_lock_blocking(buf); | 1553 | btrfs_set_lock_blocking(buf); |
1564 | 1554 | ||
1565 | ret = __btrfs_cow_block(trans, root, buf, parent, | 1555 | ret = __btrfs_cow_block(trans, root, buf, parent, |
1566 | parent_slot, cow_ret, search_start, 0); | 1556 | parent_slot, cow_ret, search_start, 0); |
1567 | 1557 | ||
1568 | trace_btrfs_cow_block(root, buf, *cow_ret); | 1558 | trace_btrfs_cow_block(root, buf, *cow_ret); |
1569 | 1559 | ||
1570 | return ret; | 1560 | return ret; |
1571 | } | 1561 | } |
1572 | 1562 | ||
1573 | /* | 1563 | /* |
1574 | * helper function for defrag to decide if two blocks pointed to by a | 1564 | * helper function for defrag to decide if two blocks pointed to by a |
1575 | * node are actually close by | 1565 | * node are actually close by |
1576 | */ | 1566 | */ |
1577 | static int close_blocks(u64 blocknr, u64 other, u32 blocksize) | 1567 | static int close_blocks(u64 blocknr, u64 other, u32 blocksize) |
1578 | { | 1568 | { |
1579 | if (blocknr < other && other - (blocknr + blocksize) < 32768) | 1569 | if (blocknr < other && other - (blocknr + blocksize) < 32768) |
1580 | return 1; | 1570 | return 1; |
1581 | if (blocknr > other && blocknr - (other + blocksize) < 32768) | 1571 | if (blocknr > other && blocknr - (other + blocksize) < 32768) |
1582 | return 1; | 1572 | return 1; |
1583 | return 0; | 1573 | return 0; |
1584 | } | 1574 | } |
1585 | 1575 | ||
1586 | /* | 1576 | /* |
1587 | * compare two keys in a memcmp fashion | 1577 | * compare two keys in a memcmp fashion |
1588 | */ | 1578 | */ |
1589 | static int comp_keys(struct btrfs_disk_key *disk, struct btrfs_key *k2) | 1579 | static int comp_keys(struct btrfs_disk_key *disk, struct btrfs_key *k2) |
1590 | { | 1580 | { |
1591 | struct btrfs_key k1; | 1581 | struct btrfs_key k1; |
1592 | 1582 | ||
1593 | btrfs_disk_key_to_cpu(&k1, disk); | 1583 | btrfs_disk_key_to_cpu(&k1, disk); |
1594 | 1584 | ||
1595 | return btrfs_comp_cpu_keys(&k1, k2); | 1585 | return btrfs_comp_cpu_keys(&k1, k2); |
1596 | } | 1586 | } |
1597 | 1587 | ||
1598 | /* | 1588 | /* |
1599 | * same as comp_keys only with two btrfs_key's | 1589 | * same as comp_keys only with two btrfs_key's |
1600 | */ | 1590 | */ |
1601 | int btrfs_comp_cpu_keys(struct btrfs_key *k1, struct btrfs_key *k2) | 1591 | int btrfs_comp_cpu_keys(struct btrfs_key *k1, struct btrfs_key *k2) |
1602 | { | 1592 | { |
1603 | if (k1->objectid > k2->objectid) | 1593 | if (k1->objectid > k2->objectid) |
1604 | return 1; | 1594 | return 1; |
1605 | if (k1->objectid < k2->objectid) | 1595 | if (k1->objectid < k2->objectid) |
1606 | return -1; | 1596 | return -1; |
1607 | if (k1->type > k2->type) | 1597 | if (k1->type > k2->type) |
1608 | return 1; | 1598 | return 1; |
1609 | if (k1->type < k2->type) | 1599 | if (k1->type < k2->type) |
1610 | return -1; | 1600 | return -1; |
1611 | if (k1->offset > k2->offset) | 1601 | if (k1->offset > k2->offset) |
1612 | return 1; | 1602 | return 1; |
1613 | if (k1->offset < k2->offset) | 1603 | if (k1->offset < k2->offset) |
1614 | return -1; | 1604 | return -1; |
1615 | return 0; | 1605 | return 0; |
1616 | } | 1606 | } |
1617 | 1607 | ||
1618 | /* | 1608 | /* |
1619 | * this is used by the defrag code to go through all the | 1609 | * this is used by the defrag code to go through all the |
1620 | * leaves pointed to by a node and reallocate them so that | 1610 | * leaves pointed to by a node and reallocate them so that |
1621 | * disk order is close to key order | 1611 | * disk order is close to key order |
1622 | */ | 1612 | */ |
1623 | int btrfs_realloc_node(struct btrfs_trans_handle *trans, | 1613 | int btrfs_realloc_node(struct btrfs_trans_handle *trans, |
1624 | struct btrfs_root *root, struct extent_buffer *parent, | 1614 | struct btrfs_root *root, struct extent_buffer *parent, |
1625 | int start_slot, u64 *last_ret, | 1615 | int start_slot, u64 *last_ret, |
1626 | struct btrfs_key *progress) | 1616 | struct btrfs_key *progress) |
1627 | { | 1617 | { |
1628 | struct extent_buffer *cur; | 1618 | struct extent_buffer *cur; |
1629 | u64 blocknr; | 1619 | u64 blocknr; |
1630 | u64 gen; | 1620 | u64 gen; |
1631 | u64 search_start = *last_ret; | 1621 | u64 search_start = *last_ret; |
1632 | u64 last_block = 0; | 1622 | u64 last_block = 0; |
1633 | u64 other; | 1623 | u64 other; |
1634 | u32 parent_nritems; | 1624 | u32 parent_nritems; |
1635 | int end_slot; | 1625 | int end_slot; |
1636 | int i; | 1626 | int i; |
1637 | int err = 0; | 1627 | int err = 0; |
1638 | int parent_level; | 1628 | int parent_level; |
1639 | int uptodate; | 1629 | int uptodate; |
1640 | u32 blocksize; | 1630 | u32 blocksize; |
1641 | int progress_passed = 0; | 1631 | int progress_passed = 0; |
1642 | struct btrfs_disk_key disk_key; | 1632 | struct btrfs_disk_key disk_key; |
1643 | 1633 | ||
1644 | parent_level = btrfs_header_level(parent); | 1634 | parent_level = btrfs_header_level(parent); |
1645 | 1635 | ||
1646 | WARN_ON(trans->transaction != root->fs_info->running_transaction); | 1636 | WARN_ON(trans->transaction != root->fs_info->running_transaction); |
1647 | WARN_ON(trans->transid != root->fs_info->generation); | 1637 | WARN_ON(trans->transid != root->fs_info->generation); |
1648 | 1638 | ||
1649 | parent_nritems = btrfs_header_nritems(parent); | 1639 | parent_nritems = btrfs_header_nritems(parent); |
1650 | blocksize = root->nodesize; | 1640 | blocksize = root->nodesize; |
1651 | end_slot = parent_nritems; | 1641 | end_slot = parent_nritems; |
1652 | 1642 | ||
1653 | if (parent_nritems == 1) | 1643 | if (parent_nritems == 1) |
1654 | return 0; | 1644 | return 0; |
1655 | 1645 | ||
1656 | btrfs_set_lock_blocking(parent); | 1646 | btrfs_set_lock_blocking(parent); |
1657 | 1647 | ||
1658 | for (i = start_slot; i < end_slot; i++) { | 1648 | for (i = start_slot; i < end_slot; i++) { |
1659 | int close = 1; | 1649 | int close = 1; |
1660 | 1650 | ||
1661 | btrfs_node_key(parent, &disk_key, i); | 1651 | btrfs_node_key(parent, &disk_key, i); |
1662 | if (!progress_passed && comp_keys(&disk_key, progress) < 0) | 1652 | if (!progress_passed && comp_keys(&disk_key, progress) < 0) |
1663 | continue; | 1653 | continue; |
1664 | 1654 | ||
1665 | progress_passed = 1; | 1655 | progress_passed = 1; |
1666 | blocknr = btrfs_node_blockptr(parent, i); | 1656 | blocknr = btrfs_node_blockptr(parent, i); |
1667 | gen = btrfs_node_ptr_generation(parent, i); | 1657 | gen = btrfs_node_ptr_generation(parent, i); |
1668 | if (last_block == 0) | 1658 | if (last_block == 0) |
1669 | last_block = blocknr; | 1659 | last_block = blocknr; |
1670 | 1660 | ||
1671 | if (i > 0) { | 1661 | if (i > 0) { |
1672 | other = btrfs_node_blockptr(parent, i - 1); | 1662 | other = btrfs_node_blockptr(parent, i - 1); |
1673 | close = close_blocks(blocknr, other, blocksize); | 1663 | close = close_blocks(blocknr, other, blocksize); |
1674 | } | 1664 | } |
1675 | if (!close && i < end_slot - 2) { | 1665 | if (!close && i < end_slot - 2) { |
1676 | other = btrfs_node_blockptr(parent, i + 1); | 1666 | other = btrfs_node_blockptr(parent, i + 1); |
1677 | close = close_blocks(blocknr, other, blocksize); | 1667 | close = close_blocks(blocknr, other, blocksize); |
1678 | } | 1668 | } |
1679 | if (close) { | 1669 | if (close) { |
1680 | last_block = blocknr; | 1670 | last_block = blocknr; |
1681 | continue; | 1671 | continue; |
1682 | } | 1672 | } |
1683 | 1673 | ||
1684 | cur = btrfs_find_tree_block(root, blocknr); | 1674 | cur = btrfs_find_tree_block(root, blocknr); |
1685 | if (cur) | 1675 | if (cur) |
1686 | uptodate = btrfs_buffer_uptodate(cur, gen, 0); | 1676 | uptodate = btrfs_buffer_uptodate(cur, gen, 0); |
1687 | else | 1677 | else |
1688 | uptodate = 0; | 1678 | uptodate = 0; |
1689 | if (!cur || !uptodate) { | 1679 | if (!cur || !uptodate) { |
1690 | if (!cur) { | 1680 | if (!cur) { |
1691 | cur = read_tree_block(root, blocknr, gen); | 1681 | cur = read_tree_block(root, blocknr, gen); |
1692 | if (!cur || !extent_buffer_uptodate(cur)) { | 1682 | if (!cur || !extent_buffer_uptodate(cur)) { |
1693 | free_extent_buffer(cur); | 1683 | free_extent_buffer(cur); |
1694 | return -EIO; | 1684 | return -EIO; |
1695 | } | 1685 | } |
1696 | } else if (!uptodate) { | 1686 | } else if (!uptodate) { |
1697 | err = btrfs_read_buffer(cur, gen); | 1687 | err = btrfs_read_buffer(cur, gen); |
1698 | if (err) { | 1688 | if (err) { |
1699 | free_extent_buffer(cur); | 1689 | free_extent_buffer(cur); |
1700 | return err; | 1690 | return err; |
1701 | } | 1691 | } |
1702 | } | 1692 | } |
1703 | } | 1693 | } |
1704 | if (search_start == 0) | 1694 | if (search_start == 0) |
1705 | search_start = last_block; | 1695 | search_start = last_block; |
1706 | 1696 | ||
1707 | btrfs_tree_lock(cur); | 1697 | btrfs_tree_lock(cur); |
1708 | btrfs_set_lock_blocking(cur); | 1698 | btrfs_set_lock_blocking(cur); |
1709 | err = __btrfs_cow_block(trans, root, cur, parent, i, | 1699 | err = __btrfs_cow_block(trans, root, cur, parent, i, |
1710 | &cur, search_start, | 1700 | &cur, search_start, |
1711 | min(16 * blocksize, | 1701 | min(16 * blocksize, |
1712 | (end_slot - i) * blocksize)); | 1702 | (end_slot - i) * blocksize)); |
1713 | if (err) { | 1703 | if (err) { |
1714 | btrfs_tree_unlock(cur); | 1704 | btrfs_tree_unlock(cur); |
1715 | free_extent_buffer(cur); | 1705 | free_extent_buffer(cur); |
1716 | break; | 1706 | break; |
1717 | } | 1707 | } |
1718 | search_start = cur->start; | 1708 | search_start = cur->start; |
1719 | last_block = cur->start; | 1709 | last_block = cur->start; |
1720 | *last_ret = search_start; | 1710 | *last_ret = search_start; |
1721 | btrfs_tree_unlock(cur); | 1711 | btrfs_tree_unlock(cur); |
1722 | free_extent_buffer(cur); | 1712 | free_extent_buffer(cur); |
1723 | } | 1713 | } |
1724 | return err; | 1714 | return err; |
1725 | } | 1715 | } |
1726 | 1716 | ||
1727 | /* | 1717 | /* |
1728 | * The leaf data grows from end-to-front in the node. | 1718 | * The leaf data grows from end-to-front in the node. |
1729 | * this returns the address of the start of the last item, | 1719 | * this returns the address of the start of the last item, |
1730 | * which is the stop of the leaf data stack | 1720 | * which is the stop of the leaf data stack |
1731 | */ | 1721 | */ |
1732 | static inline unsigned int leaf_data_end(struct btrfs_root *root, | 1722 | static inline unsigned int leaf_data_end(struct btrfs_root *root, |
1733 | struct extent_buffer *leaf) | 1723 | struct extent_buffer *leaf) |
1734 | { | 1724 | { |
1735 | u32 nr = btrfs_header_nritems(leaf); | 1725 | u32 nr = btrfs_header_nritems(leaf); |
1736 | if (nr == 0) | 1726 | if (nr == 0) |
1737 | return BTRFS_LEAF_DATA_SIZE(root); | 1727 | return BTRFS_LEAF_DATA_SIZE(root); |
1738 | return btrfs_item_offset_nr(leaf, nr - 1); | 1728 | return btrfs_item_offset_nr(leaf, nr - 1); |
1739 | } | 1729 | } |
1740 | 1730 | ||
1741 | 1731 | ||
1742 | /* | 1732 | /* |
1743 | * search for key in the extent_buffer. The items start at offset p, | 1733 | * search for key in the extent_buffer. The items start at offset p, |
1744 | * and they are item_size apart. There are 'max' items in p. | 1734 | * and they are item_size apart. There are 'max' items in p. |
1745 | * | 1735 | * |
1746 | * the slot in the array is returned via slot, and it points to | 1736 | * the slot in the array is returned via slot, and it points to |
1747 | * the place where you would insert key if it is not found in | 1737 | * the place where you would insert key if it is not found in |
1748 | * the array. | 1738 | * the array. |
1749 | * | 1739 | * |
1750 | * slot may point to max if the key is bigger than all of the keys | 1740 | * slot may point to max if the key is bigger than all of the keys |
1751 | */ | 1741 | */ |
1752 | static noinline int generic_bin_search(struct extent_buffer *eb, | 1742 | static noinline int generic_bin_search(struct extent_buffer *eb, |
1753 | unsigned long p, | 1743 | unsigned long p, |
1754 | int item_size, struct btrfs_key *key, | 1744 | int item_size, struct btrfs_key *key, |
1755 | int max, int *slot) | 1745 | int max, int *slot) |
1756 | { | 1746 | { |
1757 | int low = 0; | 1747 | int low = 0; |
1758 | int high = max; | 1748 | int high = max; |
1759 | int mid; | 1749 | int mid; |
1760 | int ret; | 1750 | int ret; |
1761 | struct btrfs_disk_key *tmp = NULL; | 1751 | struct btrfs_disk_key *tmp = NULL; |
1762 | struct btrfs_disk_key unaligned; | 1752 | struct btrfs_disk_key unaligned; |
1763 | unsigned long offset; | 1753 | unsigned long offset; |
1764 | char *kaddr = NULL; | 1754 | char *kaddr = NULL; |
1765 | unsigned long map_start = 0; | 1755 | unsigned long map_start = 0; |
1766 | unsigned long map_len = 0; | 1756 | unsigned long map_len = 0; |
1767 | int err; | 1757 | int err; |
1768 | 1758 | ||
1769 | while (low < high) { | 1759 | while (low < high) { |
1770 | mid = (low + high) / 2; | 1760 | mid = (low + high) / 2; |
1771 | offset = p + mid * item_size; | 1761 | offset = p + mid * item_size; |
1772 | 1762 | ||
1773 | if (!kaddr || offset < map_start || | 1763 | if (!kaddr || offset < map_start || |
1774 | (offset + sizeof(struct btrfs_disk_key)) > | 1764 | (offset + sizeof(struct btrfs_disk_key)) > |
1775 | map_start + map_len) { | 1765 | map_start + map_len) { |
1776 | 1766 | ||
1777 | err = map_private_extent_buffer(eb, offset, | 1767 | err = map_private_extent_buffer(eb, offset, |
1778 | sizeof(struct btrfs_disk_key), | 1768 | sizeof(struct btrfs_disk_key), |
1779 | &kaddr, &map_start, &map_len); | 1769 | &kaddr, &map_start, &map_len); |
1780 | 1770 | ||
1781 | if (!err) { | 1771 | if (!err) { |
1782 | tmp = (struct btrfs_disk_key *)(kaddr + offset - | 1772 | tmp = (struct btrfs_disk_key *)(kaddr + offset - |
1783 | map_start); | 1773 | map_start); |
1784 | } else { | 1774 | } else { |
1785 | read_extent_buffer(eb, &unaligned, | 1775 | read_extent_buffer(eb, &unaligned, |
1786 | offset, sizeof(unaligned)); | 1776 | offset, sizeof(unaligned)); |
1787 | tmp = &unaligned; | 1777 | tmp = &unaligned; |
1788 | } | 1778 | } |
1789 | 1779 | ||
1790 | } else { | 1780 | } else { |
1791 | tmp = (struct btrfs_disk_key *)(kaddr + offset - | 1781 | tmp = (struct btrfs_disk_key *)(kaddr + offset - |
1792 | map_start); | 1782 | map_start); |
1793 | } | 1783 | } |
1794 | ret = comp_keys(tmp, key); | 1784 | ret = comp_keys(tmp, key); |
1795 | 1785 | ||
1796 | if (ret < 0) | 1786 | if (ret < 0) |
1797 | low = mid + 1; | 1787 | low = mid + 1; |
1798 | else if (ret > 0) | 1788 | else if (ret > 0) |
1799 | high = mid; | 1789 | high = mid; |
1800 | else { | 1790 | else { |
1801 | *slot = mid; | 1791 | *slot = mid; |
1802 | return 0; | 1792 | return 0; |
1803 | } | 1793 | } |
1804 | } | 1794 | } |
1805 | *slot = low; | 1795 | *slot = low; |
1806 | return 1; | 1796 | return 1; |
1807 | } | 1797 | } |
1808 | 1798 | ||
1809 | /* | 1799 | /* |
1810 | * simple bin_search frontend that does the right thing for | 1800 | * simple bin_search frontend that does the right thing for |
1811 | * leaves vs nodes | 1801 | * leaves vs nodes |
1812 | */ | 1802 | */ |
1813 | static int bin_search(struct extent_buffer *eb, struct btrfs_key *key, | 1803 | static int bin_search(struct extent_buffer *eb, struct btrfs_key *key, |
1814 | int level, int *slot) | 1804 | int level, int *slot) |
1815 | { | 1805 | { |
1816 | if (level == 0) | 1806 | if (level == 0) |
1817 | return generic_bin_search(eb, | 1807 | return generic_bin_search(eb, |
1818 | offsetof(struct btrfs_leaf, items), | 1808 | offsetof(struct btrfs_leaf, items), |
1819 | sizeof(struct btrfs_item), | 1809 | sizeof(struct btrfs_item), |
1820 | key, btrfs_header_nritems(eb), | 1810 | key, btrfs_header_nritems(eb), |
1821 | slot); | 1811 | slot); |
1822 | else | 1812 | else |
1823 | return generic_bin_search(eb, | 1813 | return generic_bin_search(eb, |
1824 | offsetof(struct btrfs_node, ptrs), | 1814 | offsetof(struct btrfs_node, ptrs), |
1825 | sizeof(struct btrfs_key_ptr), | 1815 | sizeof(struct btrfs_key_ptr), |
1826 | key, btrfs_header_nritems(eb), | 1816 | key, btrfs_header_nritems(eb), |
1827 | slot); | 1817 | slot); |
1828 | } | 1818 | } |
1829 | 1819 | ||
1830 | int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key, | 1820 | int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key, |
1831 | int level, int *slot) | 1821 | int level, int *slot) |
1832 | { | 1822 | { |
1833 | return bin_search(eb, key, level, slot); | 1823 | return bin_search(eb, key, level, slot); |
1834 | } | 1824 | } |
1835 | 1825 | ||
1836 | static void root_add_used(struct btrfs_root *root, u32 size) | 1826 | static void root_add_used(struct btrfs_root *root, u32 size) |
1837 | { | 1827 | { |
1838 | spin_lock(&root->accounting_lock); | 1828 | spin_lock(&root->accounting_lock); |
1839 | btrfs_set_root_used(&root->root_item, | 1829 | btrfs_set_root_used(&root->root_item, |
1840 | btrfs_root_used(&root->root_item) + size); | 1830 | btrfs_root_used(&root->root_item) + size); |
1841 | spin_unlock(&root->accounting_lock); | 1831 | spin_unlock(&root->accounting_lock); |
1842 | } | 1832 | } |
1843 | 1833 | ||
1844 | static void root_sub_used(struct btrfs_root *root, u32 size) | 1834 | static void root_sub_used(struct btrfs_root *root, u32 size) |
1845 | { | 1835 | { |
1846 | spin_lock(&root->accounting_lock); | 1836 | spin_lock(&root->accounting_lock); |
1847 | btrfs_set_root_used(&root->root_item, | 1837 | btrfs_set_root_used(&root->root_item, |
1848 | btrfs_root_used(&root->root_item) - size); | 1838 | btrfs_root_used(&root->root_item) - size); |
1849 | spin_unlock(&root->accounting_lock); | 1839 | spin_unlock(&root->accounting_lock); |
1850 | } | 1840 | } |
1851 | 1841 | ||
1852 | /* given a node and slot number, this reads the blocks it points to. The | 1842 | /* given a node and slot number, this reads the blocks it points to. The |
1853 | * extent buffer is returned with a reference taken (but unlocked). | 1843 | * extent buffer is returned with a reference taken (but unlocked). |
1854 | * NULL is returned on error. | 1844 | * NULL is returned on error. |
1855 | */ | 1845 | */ |
1856 | static noinline struct extent_buffer *read_node_slot(struct btrfs_root *root, | 1846 | static noinline struct extent_buffer *read_node_slot(struct btrfs_root *root, |
1857 | struct extent_buffer *parent, int slot) | 1847 | struct extent_buffer *parent, int slot) |
1858 | { | 1848 | { |
1859 | int level = btrfs_header_level(parent); | 1849 | int level = btrfs_header_level(parent); |
1860 | struct extent_buffer *eb; | 1850 | struct extent_buffer *eb; |
1861 | 1851 | ||
1862 | if (slot < 0) | 1852 | if (slot < 0) |
1863 | return NULL; | 1853 | return NULL; |
1864 | if (slot >= btrfs_header_nritems(parent)) | 1854 | if (slot >= btrfs_header_nritems(parent)) |
1865 | return NULL; | 1855 | return NULL; |
1866 | 1856 | ||
1867 | BUG_ON(level == 0); | 1857 | BUG_ON(level == 0); |
1868 | 1858 | ||
1869 | eb = read_tree_block(root, btrfs_node_blockptr(parent, slot), | 1859 | eb = read_tree_block(root, btrfs_node_blockptr(parent, slot), |
1870 | btrfs_node_ptr_generation(parent, slot)); | 1860 | btrfs_node_ptr_generation(parent, slot)); |
1871 | if (eb && !extent_buffer_uptodate(eb)) { | 1861 | if (eb && !extent_buffer_uptodate(eb)) { |
1872 | free_extent_buffer(eb); | 1862 | free_extent_buffer(eb); |
1873 | eb = NULL; | 1863 | eb = NULL; |
1874 | } | 1864 | } |
1875 | 1865 | ||
1876 | return eb; | 1866 | return eb; |
1877 | } | 1867 | } |
1878 | 1868 | ||
1879 | /* | 1869 | /* |
1880 | * node level balancing, used to make sure nodes are in proper order for | 1870 | * node level balancing, used to make sure nodes are in proper order for |
1881 | * item deletion. We balance from the top down, so we have to make sure | 1871 | * item deletion. We balance from the top down, so we have to make sure |
1882 | * that a deletion won't leave an node completely empty later on. | 1872 | * that a deletion won't leave an node completely empty later on. |
1883 | */ | 1873 | */ |
1884 | static noinline int balance_level(struct btrfs_trans_handle *trans, | 1874 | static noinline int balance_level(struct btrfs_trans_handle *trans, |
1885 | struct btrfs_root *root, | 1875 | struct btrfs_root *root, |
1886 | struct btrfs_path *path, int level) | 1876 | struct btrfs_path *path, int level) |
1887 | { | 1877 | { |
1888 | struct extent_buffer *right = NULL; | 1878 | struct extent_buffer *right = NULL; |
1889 | struct extent_buffer *mid; | 1879 | struct extent_buffer *mid; |
1890 | struct extent_buffer *left = NULL; | 1880 | struct extent_buffer *left = NULL; |
1891 | struct extent_buffer *parent = NULL; | 1881 | struct extent_buffer *parent = NULL; |
1892 | int ret = 0; | 1882 | int ret = 0; |
1893 | int wret; | 1883 | int wret; |
1894 | int pslot; | 1884 | int pslot; |
1895 | int orig_slot = path->slots[level]; | 1885 | int orig_slot = path->slots[level]; |
1896 | u64 orig_ptr; | 1886 | u64 orig_ptr; |
1897 | 1887 | ||
1898 | if (level == 0) | 1888 | if (level == 0) |
1899 | return 0; | 1889 | return 0; |
1900 | 1890 | ||
1901 | mid = path->nodes[level]; | 1891 | mid = path->nodes[level]; |
1902 | 1892 | ||
1903 | WARN_ON(path->locks[level] != BTRFS_WRITE_LOCK && | 1893 | WARN_ON(path->locks[level] != BTRFS_WRITE_LOCK && |
1904 | path->locks[level] != BTRFS_WRITE_LOCK_BLOCKING); | 1894 | path->locks[level] != BTRFS_WRITE_LOCK_BLOCKING); |
1905 | WARN_ON(btrfs_header_generation(mid) != trans->transid); | 1895 | WARN_ON(btrfs_header_generation(mid) != trans->transid); |
1906 | 1896 | ||
1907 | orig_ptr = btrfs_node_blockptr(mid, orig_slot); | 1897 | orig_ptr = btrfs_node_blockptr(mid, orig_slot); |
1908 | 1898 | ||
1909 | if (level < BTRFS_MAX_LEVEL - 1) { | 1899 | if (level < BTRFS_MAX_LEVEL - 1) { |
1910 | parent = path->nodes[level + 1]; | 1900 | parent = path->nodes[level + 1]; |
1911 | pslot = path->slots[level + 1]; | 1901 | pslot = path->slots[level + 1]; |
1912 | } | 1902 | } |
1913 | 1903 | ||
1914 | /* | 1904 | /* |
1915 | * deal with the case where there is only one pointer in the root | 1905 | * deal with the case where there is only one pointer in the root |
1916 | * by promoting the node below to a root | 1906 | * by promoting the node below to a root |
1917 | */ | 1907 | */ |
1918 | if (!parent) { | 1908 | if (!parent) { |
1919 | struct extent_buffer *child; | 1909 | struct extent_buffer *child; |
1920 | 1910 | ||
1921 | if (btrfs_header_nritems(mid) != 1) | 1911 | if (btrfs_header_nritems(mid) != 1) |
1922 | return 0; | 1912 | return 0; |
1923 | 1913 | ||
1924 | /* promote the child to a root */ | 1914 | /* promote the child to a root */ |
1925 | child = read_node_slot(root, mid, 0); | 1915 | child = read_node_slot(root, mid, 0); |
1926 | if (!child) { | 1916 | if (!child) { |
1927 | ret = -EROFS; | 1917 | ret = -EROFS; |
1928 | btrfs_std_error(root->fs_info, ret); | 1918 | btrfs_std_error(root->fs_info, ret); |
1929 | goto enospc; | 1919 | goto enospc; |
1930 | } | 1920 | } |
1931 | 1921 | ||
1932 | btrfs_tree_lock(child); | 1922 | btrfs_tree_lock(child); |
1933 | btrfs_set_lock_blocking(child); | 1923 | btrfs_set_lock_blocking(child); |
1934 | ret = btrfs_cow_block(trans, root, child, mid, 0, &child); | 1924 | ret = btrfs_cow_block(trans, root, child, mid, 0, &child); |
1935 | if (ret) { | 1925 | if (ret) { |
1936 | btrfs_tree_unlock(child); | 1926 | btrfs_tree_unlock(child); |
1937 | free_extent_buffer(child); | 1927 | free_extent_buffer(child); |
1938 | goto enospc; | 1928 | goto enospc; |
1939 | } | 1929 | } |
1940 | 1930 | ||
1941 | tree_mod_log_set_root_pointer(root, child, 1); | 1931 | tree_mod_log_set_root_pointer(root, child, 1); |
1942 | rcu_assign_pointer(root->node, child); | 1932 | rcu_assign_pointer(root->node, child); |
1943 | 1933 | ||
1944 | add_root_to_dirty_list(root); | 1934 | add_root_to_dirty_list(root); |
1945 | btrfs_tree_unlock(child); | 1935 | btrfs_tree_unlock(child); |
1946 | 1936 | ||
1947 | path->locks[level] = 0; | 1937 | path->locks[level] = 0; |
1948 | path->nodes[level] = NULL; | 1938 | path->nodes[level] = NULL; |
1949 | clean_tree_block(trans, root, mid); | 1939 | clean_tree_block(trans, root, mid); |
1950 | btrfs_tree_unlock(mid); | 1940 | btrfs_tree_unlock(mid); |
1951 | /* once for the path */ | 1941 | /* once for the path */ |
1952 | free_extent_buffer(mid); | 1942 | free_extent_buffer(mid); |
1953 | 1943 | ||
1954 | root_sub_used(root, mid->len); | 1944 | root_sub_used(root, mid->len); |
1955 | btrfs_free_tree_block(trans, root, mid, 0, 1); | 1945 | btrfs_free_tree_block(trans, root, mid, 0, 1); |
1956 | /* once for the root ptr */ | 1946 | /* once for the root ptr */ |
1957 | free_extent_buffer_stale(mid); | 1947 | free_extent_buffer_stale(mid); |
1958 | return 0; | 1948 | return 0; |
1959 | } | 1949 | } |
1960 | if (btrfs_header_nritems(mid) > | 1950 | if (btrfs_header_nritems(mid) > |
1961 | BTRFS_NODEPTRS_PER_BLOCK(root) / 4) | 1951 | BTRFS_NODEPTRS_PER_BLOCK(root) / 4) |
1962 | return 0; | 1952 | return 0; |
1963 | 1953 | ||
1964 | left = read_node_slot(root, parent, pslot - 1); | 1954 | left = read_node_slot(root, parent, pslot - 1); |
1965 | if (left) { | 1955 | if (left) { |
1966 | btrfs_tree_lock(left); | 1956 | btrfs_tree_lock(left); |
1967 | btrfs_set_lock_blocking(left); | 1957 | btrfs_set_lock_blocking(left); |
1968 | wret = btrfs_cow_block(trans, root, left, | 1958 | wret = btrfs_cow_block(trans, root, left, |
1969 | parent, pslot - 1, &left); | 1959 | parent, pslot - 1, &left); |
1970 | if (wret) { | 1960 | if (wret) { |
1971 | ret = wret; | 1961 | ret = wret; |
1972 | goto enospc; | 1962 | goto enospc; |
1973 | } | 1963 | } |
1974 | } | 1964 | } |
1975 | right = read_node_slot(root, parent, pslot + 1); | 1965 | right = read_node_slot(root, parent, pslot + 1); |
1976 | if (right) { | 1966 | if (right) { |
1977 | btrfs_tree_lock(right); | 1967 | btrfs_tree_lock(right); |
1978 | btrfs_set_lock_blocking(right); | 1968 | btrfs_set_lock_blocking(right); |
1979 | wret = btrfs_cow_block(trans, root, right, | 1969 | wret = btrfs_cow_block(trans, root, right, |
1980 | parent, pslot + 1, &right); | 1970 | parent, pslot + 1, &right); |
1981 | if (wret) { | 1971 | if (wret) { |
1982 | ret = wret; | 1972 | ret = wret; |
1983 | goto enospc; | 1973 | goto enospc; |
1984 | } | 1974 | } |
1985 | } | 1975 | } |
1986 | 1976 | ||
1987 | /* first, try to make some room in the middle buffer */ | 1977 | /* first, try to make some room in the middle buffer */ |
1988 | if (left) { | 1978 | if (left) { |
1989 | orig_slot += btrfs_header_nritems(left); | 1979 | orig_slot += btrfs_header_nritems(left); |
1990 | wret = push_node_left(trans, root, left, mid, 1); | 1980 | wret = push_node_left(trans, root, left, mid, 1); |
1991 | if (wret < 0) | 1981 | if (wret < 0) |
1992 | ret = wret; | 1982 | ret = wret; |
1993 | } | 1983 | } |
1994 | 1984 | ||
1995 | /* | 1985 | /* |
1996 | * then try to empty the right most buffer into the middle | 1986 | * then try to empty the right most buffer into the middle |
1997 | */ | 1987 | */ |
1998 | if (right) { | 1988 | if (right) { |
1999 | wret = push_node_left(trans, root, mid, right, 1); | 1989 | wret = push_node_left(trans, root, mid, right, 1); |
2000 | if (wret < 0 && wret != -ENOSPC) | 1990 | if (wret < 0 && wret != -ENOSPC) |
2001 | ret = wret; | 1991 | ret = wret; |
2002 | if (btrfs_header_nritems(right) == 0) { | 1992 | if (btrfs_header_nritems(right) == 0) { |
2003 | clean_tree_block(trans, root, right); | 1993 | clean_tree_block(trans, root, right); |
2004 | btrfs_tree_unlock(right); | 1994 | btrfs_tree_unlock(right); |
2005 | del_ptr(root, path, level + 1, pslot + 1); | 1995 | del_ptr(root, path, level + 1, pslot + 1); |
2006 | root_sub_used(root, right->len); | 1996 | root_sub_used(root, right->len); |
2007 | btrfs_free_tree_block(trans, root, right, 0, 1); | 1997 | btrfs_free_tree_block(trans, root, right, 0, 1); |
2008 | free_extent_buffer_stale(right); | 1998 | free_extent_buffer_stale(right); |
2009 | right = NULL; | 1999 | right = NULL; |
2010 | } else { | 2000 | } else { |
2011 | struct btrfs_disk_key right_key; | 2001 | struct btrfs_disk_key right_key; |
2012 | btrfs_node_key(right, &right_key, 0); | 2002 | btrfs_node_key(right, &right_key, 0); |
2013 | tree_mod_log_set_node_key(root->fs_info, parent, | 2003 | tree_mod_log_set_node_key(root->fs_info, parent, |
2014 | pslot + 1, 0); | 2004 | pslot + 1, 0); |
2015 | btrfs_set_node_key(parent, &right_key, pslot + 1); | 2005 | btrfs_set_node_key(parent, &right_key, pslot + 1); |
2016 | btrfs_mark_buffer_dirty(parent); | 2006 | btrfs_mark_buffer_dirty(parent); |
2017 | } | 2007 | } |
2018 | } | 2008 | } |
2019 | if (btrfs_header_nritems(mid) == 1) { | 2009 | if (btrfs_header_nritems(mid) == 1) { |
2020 | /* | 2010 | /* |
2021 | * we're not allowed to leave a node with one item in the | 2011 | * we're not allowed to leave a node with one item in the |
2022 | * tree during a delete. A deletion from lower in the tree | 2012 | * tree during a delete. A deletion from lower in the tree |
2023 | * could try to delete the only pointer in this node. | 2013 | * could try to delete the only pointer in this node. |
2024 | * So, pull some keys from the left. | 2014 | * So, pull some keys from the left. |
2025 | * There has to be a left pointer at this point because | 2015 | * There has to be a left pointer at this point because |
2026 | * otherwise we would have pulled some pointers from the | 2016 | * otherwise we would have pulled some pointers from the |
2027 | * right | 2017 | * right |
2028 | */ | 2018 | */ |
2029 | if (!left) { | 2019 | if (!left) { |
2030 | ret = -EROFS; | 2020 | ret = -EROFS; |
2031 | btrfs_std_error(root->fs_info, ret); | 2021 | btrfs_std_error(root->fs_info, ret); |
2032 | goto enospc; | 2022 | goto enospc; |
2033 | } | 2023 | } |
2034 | wret = balance_node_right(trans, root, mid, left); | 2024 | wret = balance_node_right(trans, root, mid, left); |
2035 | if (wret < 0) { | 2025 | if (wret < 0) { |
2036 | ret = wret; | 2026 | ret = wret; |
2037 | goto enospc; | 2027 | goto enospc; |
2038 | } | 2028 | } |
2039 | if (wret == 1) { | 2029 | if (wret == 1) { |
2040 | wret = push_node_left(trans, root, left, mid, 1); | 2030 | wret = push_node_left(trans, root, left, mid, 1); |
2041 | if (wret < 0) | 2031 | if (wret < 0) |
2042 | ret = wret; | 2032 | ret = wret; |
2043 | } | 2033 | } |
2044 | BUG_ON(wret == 1); | 2034 | BUG_ON(wret == 1); |
2045 | } | 2035 | } |
2046 | if (btrfs_header_nritems(mid) == 0) { | 2036 | if (btrfs_header_nritems(mid) == 0) { |
2047 | clean_tree_block(trans, root, mid); | 2037 | clean_tree_block(trans, root, mid); |
2048 | btrfs_tree_unlock(mid); | 2038 | btrfs_tree_unlock(mid); |
2049 | del_ptr(root, path, level + 1, pslot); | 2039 | del_ptr(root, path, level + 1, pslot); |
2050 | root_sub_used(root, mid->len); | 2040 | root_sub_used(root, mid->len); |
2051 | btrfs_free_tree_block(trans, root, mid, 0, 1); | 2041 | btrfs_free_tree_block(trans, root, mid, 0, 1); |
2052 | free_extent_buffer_stale(mid); | 2042 | free_extent_buffer_stale(mid); |
2053 | mid = NULL; | 2043 | mid = NULL; |
2054 | } else { | 2044 | } else { |
2055 | /* update the parent key to reflect our changes */ | 2045 | /* update the parent key to reflect our changes */ |
2056 | struct btrfs_disk_key mid_key; | 2046 | struct btrfs_disk_key mid_key; |
2057 | btrfs_node_key(mid, &mid_key, 0); | 2047 | btrfs_node_key(mid, &mid_key, 0); |
2058 | tree_mod_log_set_node_key(root->fs_info, parent, | 2048 | tree_mod_log_set_node_key(root->fs_info, parent, |
2059 | pslot, 0); | 2049 | pslot, 0); |
2060 | btrfs_set_node_key(parent, &mid_key, pslot); | 2050 | btrfs_set_node_key(parent, &mid_key, pslot); |
2061 | btrfs_mark_buffer_dirty(parent); | 2051 | btrfs_mark_buffer_dirty(parent); |
2062 | } | 2052 | } |
2063 | 2053 | ||
2064 | /* update the path */ | 2054 | /* update the path */ |
2065 | if (left) { | 2055 | if (left) { |
2066 | if (btrfs_header_nritems(left) > orig_slot) { | 2056 | if (btrfs_header_nritems(left) > orig_slot) { |
2067 | extent_buffer_get(left); | 2057 | extent_buffer_get(left); |
2068 | /* left was locked after cow */ | 2058 | /* left was locked after cow */ |
2069 | path->nodes[level] = left; | 2059 | path->nodes[level] = left; |
2070 | path->slots[level + 1] -= 1; | 2060 | path->slots[level + 1] -= 1; |
2071 | path->slots[level] = orig_slot; | 2061 | path->slots[level] = orig_slot; |
2072 | if (mid) { | 2062 | if (mid) { |
2073 | btrfs_tree_unlock(mid); | 2063 | btrfs_tree_unlock(mid); |
2074 | free_extent_buffer(mid); | 2064 | free_extent_buffer(mid); |
2075 | } | 2065 | } |
2076 | } else { | 2066 | } else { |
2077 | orig_slot -= btrfs_header_nritems(left); | 2067 | orig_slot -= btrfs_header_nritems(left); |
2078 | path->slots[level] = orig_slot; | 2068 | path->slots[level] = orig_slot; |
2079 | } | 2069 | } |
2080 | } | 2070 | } |
2081 | /* double check we haven't messed things up */ | 2071 | /* double check we haven't messed things up */ |
2082 | if (orig_ptr != | 2072 | if (orig_ptr != |
2083 | btrfs_node_blockptr(path->nodes[level], path->slots[level])) | 2073 | btrfs_node_blockptr(path->nodes[level], path->slots[level])) |
2084 | BUG(); | 2074 | BUG(); |
2085 | enospc: | 2075 | enospc: |
2086 | if (right) { | 2076 | if (right) { |
2087 | btrfs_tree_unlock(right); | 2077 | btrfs_tree_unlock(right); |
2088 | free_extent_buffer(right); | 2078 | free_extent_buffer(right); |
2089 | } | 2079 | } |
2090 | if (left) { | 2080 | if (left) { |
2091 | if (path->nodes[level] != left) | 2081 | if (path->nodes[level] != left) |
2092 | btrfs_tree_unlock(left); | 2082 | btrfs_tree_unlock(left); |
2093 | free_extent_buffer(left); | 2083 | free_extent_buffer(left); |
2094 | } | 2084 | } |
2095 | return ret; | 2085 | return ret; |
2096 | } | 2086 | } |
2097 | 2087 | ||
2098 | /* Node balancing for insertion. Here we only split or push nodes around | 2088 | /* Node balancing for insertion. Here we only split or push nodes around |
2099 | * when they are completely full. This is also done top down, so we | 2089 | * when they are completely full. This is also done top down, so we |
2100 | * have to be pessimistic. | 2090 | * have to be pessimistic. |
2101 | */ | 2091 | */ |
2102 | static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans, | 2092 | static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans, |
2103 | struct btrfs_root *root, | 2093 | struct btrfs_root *root, |
2104 | struct btrfs_path *path, int level) | 2094 | struct btrfs_path *path, int level) |
2105 | { | 2095 | { |
2106 | struct extent_buffer *right = NULL; | 2096 | struct extent_buffer *right = NULL; |
2107 | struct extent_buffer *mid; | 2097 | struct extent_buffer *mid; |
2108 | struct extent_buffer *left = NULL; | 2098 | struct extent_buffer *left = NULL; |
2109 | struct extent_buffer *parent = NULL; | 2099 | struct extent_buffer *parent = NULL; |
2110 | int ret = 0; | 2100 | int ret = 0; |
2111 | int wret; | 2101 | int wret; |
2112 | int pslot; | 2102 | int pslot; |
2113 | int orig_slot = path->slots[level]; | 2103 | int orig_slot = path->slots[level]; |
2114 | 2104 | ||
2115 | if (level == 0) | 2105 | if (level == 0) |
2116 | return 1; | 2106 | return 1; |
2117 | 2107 | ||
2118 | mid = path->nodes[level]; | 2108 | mid = path->nodes[level]; |
2119 | WARN_ON(btrfs_header_generation(mid) != trans->transid); | 2109 | WARN_ON(btrfs_header_generation(mid) != trans->transid); |
2120 | 2110 | ||
2121 | if (level < BTRFS_MAX_LEVEL - 1) { | 2111 | if (level < BTRFS_MAX_LEVEL - 1) { |
2122 | parent = path->nodes[level + 1]; | 2112 | parent = path->nodes[level + 1]; |
2123 | pslot = path->slots[level + 1]; | 2113 | pslot = path->slots[level + 1]; |
2124 | } | 2114 | } |
2125 | 2115 | ||
2126 | if (!parent) | 2116 | if (!parent) |
2127 | return 1; | 2117 | return 1; |
2128 | 2118 | ||
2129 | left = read_node_slot(root, parent, pslot - 1); | 2119 | left = read_node_slot(root, parent, pslot - 1); |
2130 | 2120 | ||
2131 | /* first, try to make some room in the middle buffer */ | 2121 | /* first, try to make some room in the middle buffer */ |
2132 | if (left) { | 2122 | if (left) { |
2133 | u32 left_nr; | 2123 | u32 left_nr; |
2134 | 2124 | ||
2135 | btrfs_tree_lock(left); | 2125 | btrfs_tree_lock(left); |
2136 | btrfs_set_lock_blocking(left); | 2126 | btrfs_set_lock_blocking(left); |
2137 | 2127 | ||
2138 | left_nr = btrfs_header_nritems(left); | 2128 | left_nr = btrfs_header_nritems(left); |
2139 | if (left_nr >= BTRFS_NODEPTRS_PER_BLOCK(root) - 1) { | 2129 | if (left_nr >= BTRFS_NODEPTRS_PER_BLOCK(root) - 1) { |
2140 | wret = 1; | 2130 | wret = 1; |
2141 | } else { | 2131 | } else { |
2142 | ret = btrfs_cow_block(trans, root, left, parent, | 2132 | ret = btrfs_cow_block(trans, root, left, parent, |
2143 | pslot - 1, &left); | 2133 | pslot - 1, &left); |
2144 | if (ret) | 2134 | if (ret) |
2145 | wret = 1; | 2135 | wret = 1; |
2146 | else { | 2136 | else { |
2147 | wret = push_node_left(trans, root, | 2137 | wret = push_node_left(trans, root, |
2148 | left, mid, 0); | 2138 | left, mid, 0); |
2149 | } | 2139 | } |
2150 | } | 2140 | } |
2151 | if (wret < 0) | 2141 | if (wret < 0) |
2152 | ret = wret; | 2142 | ret = wret; |
2153 | if (wret == 0) { | 2143 | if (wret == 0) { |
2154 | struct btrfs_disk_key disk_key; | 2144 | struct btrfs_disk_key disk_key; |
2155 | orig_slot += left_nr; | 2145 | orig_slot += left_nr; |
2156 | btrfs_node_key(mid, &disk_key, 0); | 2146 | btrfs_node_key(mid, &disk_key, 0); |
2157 | tree_mod_log_set_node_key(root->fs_info, parent, | 2147 | tree_mod_log_set_node_key(root->fs_info, parent, |
2158 | pslot, 0); | 2148 | pslot, 0); |
2159 | btrfs_set_node_key(parent, &disk_key, pslot); | 2149 | btrfs_set_node_key(parent, &disk_key, pslot); |
2160 | btrfs_mark_buffer_dirty(parent); | 2150 | btrfs_mark_buffer_dirty(parent); |
2161 | if (btrfs_header_nritems(left) > orig_slot) { | 2151 | if (btrfs_header_nritems(left) > orig_slot) { |
2162 | path->nodes[level] = left; | 2152 | path->nodes[level] = left; |
2163 | path->slots[level + 1] -= 1; | 2153 | path->slots[level + 1] -= 1; |
2164 | path->slots[level] = orig_slot; | 2154 | path->slots[level] = orig_slot; |
2165 | btrfs_tree_unlock(mid); | 2155 | btrfs_tree_unlock(mid); |
2166 | free_extent_buffer(mid); | 2156 | free_extent_buffer(mid); |
2167 | } else { | 2157 | } else { |
2168 | orig_slot -= | 2158 | orig_slot -= |
2169 | btrfs_header_nritems(left); | 2159 | btrfs_header_nritems(left); |
2170 | path->slots[level] = orig_slot; | 2160 | path->slots[level] = orig_slot; |
2171 | btrfs_tree_unlock(left); | 2161 | btrfs_tree_unlock(left); |
2172 | free_extent_buffer(left); | 2162 | free_extent_buffer(left); |
2173 | } | 2163 | } |
2174 | return 0; | 2164 | return 0; |
2175 | } | 2165 | } |
2176 | btrfs_tree_unlock(left); | 2166 | btrfs_tree_unlock(left); |
2177 | free_extent_buffer(left); | 2167 | free_extent_buffer(left); |
2178 | } | 2168 | } |
2179 | right = read_node_slot(root, parent, pslot + 1); | 2169 | right = read_node_slot(root, parent, pslot + 1); |
2180 | 2170 | ||
2181 | /* | 2171 | /* |
2182 | * then try to empty the right most buffer into the middle | 2172 | * then try to empty the right most buffer into the middle |
2183 | */ | 2173 | */ |
2184 | if (right) { | 2174 | if (right) { |
2185 | u32 right_nr; | 2175 | u32 right_nr; |
2186 | 2176 | ||
2187 | btrfs_tree_lock(right); | 2177 | btrfs_tree_lock(right); |
2188 | btrfs_set_lock_blocking(right); | 2178 | btrfs_set_lock_blocking(right); |
2189 | 2179 | ||
2190 | right_nr = btrfs_header_nritems(right); | 2180 | right_nr = btrfs_header_nritems(right); |
2191 | if (right_nr >= BTRFS_NODEPTRS_PER_BLOCK(root) - 1) { | 2181 | if (right_nr >= BTRFS_NODEPTRS_PER_BLOCK(root) - 1) { |
2192 | wret = 1; | 2182 | wret = 1; |
2193 | } else { | 2183 | } else { |
2194 | ret = btrfs_cow_block(trans, root, right, | 2184 | ret = btrfs_cow_block(trans, root, right, |
2195 | parent, pslot + 1, | 2185 | parent, pslot + 1, |
2196 | &right); | 2186 | &right); |
2197 | if (ret) | 2187 | if (ret) |
2198 | wret = 1; | 2188 | wret = 1; |
2199 | else { | 2189 | else { |
2200 | wret = balance_node_right(trans, root, | 2190 | wret = balance_node_right(trans, root, |
2201 | right, mid); | 2191 | right, mid); |
2202 | } | 2192 | } |
2203 | } | 2193 | } |
2204 | if (wret < 0) | 2194 | if (wret < 0) |
2205 | ret = wret; | 2195 | ret = wret; |
2206 | if (wret == 0) { | 2196 | if (wret == 0) { |
2207 | struct btrfs_disk_key disk_key; | 2197 | struct btrfs_disk_key disk_key; |
2208 | 2198 | ||
2209 | btrfs_node_key(right, &disk_key, 0); | 2199 | btrfs_node_key(right, &disk_key, 0); |
2210 | tree_mod_log_set_node_key(root->fs_info, parent, | 2200 | tree_mod_log_set_node_key(root->fs_info, parent, |
2211 | pslot + 1, 0); | 2201 | pslot + 1, 0); |
2212 | btrfs_set_node_key(parent, &disk_key, pslot + 1); | 2202 | btrfs_set_node_key(parent, &disk_key, pslot + 1); |
2213 | btrfs_mark_buffer_dirty(parent); | 2203 | btrfs_mark_buffer_dirty(parent); |
2214 | 2204 | ||
2215 | if (btrfs_header_nritems(mid) <= orig_slot) { | 2205 | if (btrfs_header_nritems(mid) <= orig_slot) { |
2216 | path->nodes[level] = right; | 2206 | path->nodes[level] = right; |
2217 | path->slots[level + 1] += 1; | 2207 | path->slots[level + 1] += 1; |
2218 | path->slots[level] = orig_slot - | 2208 | path->slots[level] = orig_slot - |
2219 | btrfs_header_nritems(mid); | 2209 | btrfs_header_nritems(mid); |
2220 | btrfs_tree_unlock(mid); | 2210 | btrfs_tree_unlock(mid); |
2221 | free_extent_buffer(mid); | 2211 | free_extent_buffer(mid); |
2222 | } else { | 2212 | } else { |
2223 | btrfs_tree_unlock(right); | 2213 | btrfs_tree_unlock(right); |
2224 | free_extent_buffer(right); | 2214 | free_extent_buffer(right); |
2225 | } | 2215 | } |
2226 | return 0; | 2216 | return 0; |
2227 | } | 2217 | } |
2228 | btrfs_tree_unlock(right); | 2218 | btrfs_tree_unlock(right); |
2229 | free_extent_buffer(right); | 2219 | free_extent_buffer(right); |
2230 | } | 2220 | } |
2231 | return 1; | 2221 | return 1; |
2232 | } | 2222 | } |
2233 | 2223 | ||
2234 | /* | 2224 | /* |
2235 | * readahead one full node of leaves, finding things that are close | 2225 | * readahead one full node of leaves, finding things that are close |
2236 | * to the block in 'slot', and triggering ra on them. | 2226 | * to the block in 'slot', and triggering ra on them. |
2237 | */ | 2227 | */ |
2238 | static void reada_for_search(struct btrfs_root *root, | 2228 | static void reada_for_search(struct btrfs_root *root, |
2239 | struct btrfs_path *path, | 2229 | struct btrfs_path *path, |
2240 | int level, int slot, u64 objectid) | 2230 | int level, int slot, u64 objectid) |
2241 | { | 2231 | { |
2242 | struct extent_buffer *node; | 2232 | struct extent_buffer *node; |
2243 | struct btrfs_disk_key disk_key; | 2233 | struct btrfs_disk_key disk_key; |
2244 | u32 nritems; | 2234 | u32 nritems; |
2245 | u64 search; | 2235 | u64 search; |
2246 | u64 target; | 2236 | u64 target; |
2247 | u64 nread = 0; | 2237 | u64 nread = 0; |
2248 | u64 gen; | 2238 | u64 gen; |
2249 | int direction = path->reada; | 2239 | int direction = path->reada; |
2250 | struct extent_buffer *eb; | 2240 | struct extent_buffer *eb; |
2251 | u32 nr; | 2241 | u32 nr; |
2252 | u32 blocksize; | 2242 | u32 blocksize; |
2253 | u32 nscan = 0; | 2243 | u32 nscan = 0; |
2254 | 2244 | ||
2255 | if (level != 1) | 2245 | if (level != 1) |
2256 | return; | 2246 | return; |
2257 | 2247 | ||
2258 | if (!path->nodes[level]) | 2248 | if (!path->nodes[level]) |
2259 | return; | 2249 | return; |
2260 | 2250 | ||
2261 | node = path->nodes[level]; | 2251 | node = path->nodes[level]; |
2262 | 2252 | ||
2263 | search = btrfs_node_blockptr(node, slot); | 2253 | search = btrfs_node_blockptr(node, slot); |
2264 | blocksize = root->nodesize; | 2254 | blocksize = root->nodesize; |
2265 | eb = btrfs_find_tree_block(root, search); | 2255 | eb = btrfs_find_tree_block(root, search); |
2266 | if (eb) { | 2256 | if (eb) { |
2267 | free_extent_buffer(eb); | 2257 | free_extent_buffer(eb); |
2268 | return; | 2258 | return; |
2269 | } | 2259 | } |
2270 | 2260 | ||
2271 | target = search; | 2261 | target = search; |
2272 | 2262 | ||
2273 | nritems = btrfs_header_nritems(node); | 2263 | nritems = btrfs_header_nritems(node); |
2274 | nr = slot; | 2264 | nr = slot; |
2275 | 2265 | ||
2276 | while (1) { | 2266 | while (1) { |
2277 | if (direction < 0) { | 2267 | if (direction < 0) { |
2278 | if (nr == 0) | 2268 | if (nr == 0) |
2279 | break; | 2269 | break; |
2280 | nr--; | 2270 | nr--; |
2281 | } else if (direction > 0) { | 2271 | } else if (direction > 0) { |
2282 | nr++; | 2272 | nr++; |
2283 | if (nr >= nritems) | 2273 | if (nr >= nritems) |
2284 | break; | 2274 | break; |
2285 | } | 2275 | } |
2286 | if (path->reada < 0 && objectid) { | 2276 | if (path->reada < 0 && objectid) { |
2287 | btrfs_node_key(node, &disk_key, nr); | 2277 | btrfs_node_key(node, &disk_key, nr); |
2288 | if (btrfs_disk_key_objectid(&disk_key) != objectid) | 2278 | if (btrfs_disk_key_objectid(&disk_key) != objectid) |
2289 | break; | 2279 | break; |
2290 | } | 2280 | } |
2291 | search = btrfs_node_blockptr(node, nr); | 2281 | search = btrfs_node_blockptr(node, nr); |
2292 | if ((search <= target && target - search <= 65536) || | 2282 | if ((search <= target && target - search <= 65536) || |
2293 | (search > target && search - target <= 65536)) { | 2283 | (search > target && search - target <= 65536)) { |
2294 | gen = btrfs_node_ptr_generation(node, nr); | 2284 | gen = btrfs_node_ptr_generation(node, nr); |
2295 | readahead_tree_block(root, search, blocksize); | 2285 | readahead_tree_block(root, search, blocksize); |
2296 | nread += blocksize; | 2286 | nread += blocksize; |
2297 | } | 2287 | } |
2298 | nscan++; | 2288 | nscan++; |
2299 | if ((nread > 65536 || nscan > 32)) | 2289 | if ((nread > 65536 || nscan > 32)) |
2300 | break; | 2290 | break; |
2301 | } | 2291 | } |
2302 | } | 2292 | } |
2303 | 2293 | ||
2304 | static noinline void reada_for_balance(struct btrfs_root *root, | 2294 | static noinline void reada_for_balance(struct btrfs_root *root, |
2305 | struct btrfs_path *path, int level) | 2295 | struct btrfs_path *path, int level) |
2306 | { | 2296 | { |
2307 | int slot; | 2297 | int slot; |
2308 | int nritems; | 2298 | int nritems; |
2309 | struct extent_buffer *parent; | 2299 | struct extent_buffer *parent; |
2310 | struct extent_buffer *eb; | 2300 | struct extent_buffer *eb; |
2311 | u64 gen; | 2301 | u64 gen; |
2312 | u64 block1 = 0; | 2302 | u64 block1 = 0; |
2313 | u64 block2 = 0; | 2303 | u64 block2 = 0; |
2314 | int blocksize; | 2304 | int blocksize; |
2315 | 2305 | ||
2316 | parent = path->nodes[level + 1]; | 2306 | parent = path->nodes[level + 1]; |
2317 | if (!parent) | 2307 | if (!parent) |
2318 | return; | 2308 | return; |
2319 | 2309 | ||
2320 | nritems = btrfs_header_nritems(parent); | 2310 | nritems = btrfs_header_nritems(parent); |
2321 | slot = path->slots[level + 1]; | 2311 | slot = path->slots[level + 1]; |
2322 | blocksize = root->nodesize; | 2312 | blocksize = root->nodesize; |
2323 | 2313 | ||
2324 | if (slot > 0) { | 2314 | if (slot > 0) { |
2325 | block1 = btrfs_node_blockptr(parent, slot - 1); | 2315 | block1 = btrfs_node_blockptr(parent, slot - 1); |
2326 | gen = btrfs_node_ptr_generation(parent, slot - 1); | 2316 | gen = btrfs_node_ptr_generation(parent, slot - 1); |
2327 | eb = btrfs_find_tree_block(root, block1); | 2317 | eb = btrfs_find_tree_block(root, block1); |
2328 | /* | 2318 | /* |
2329 | * if we get -eagain from btrfs_buffer_uptodate, we | 2319 | * if we get -eagain from btrfs_buffer_uptodate, we |
2330 | * don't want to return eagain here. That will loop | 2320 | * don't want to return eagain here. That will loop |
2331 | * forever | 2321 | * forever |
2332 | */ | 2322 | */ |
2333 | if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0) | 2323 | if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0) |
2334 | block1 = 0; | 2324 | block1 = 0; |
2335 | free_extent_buffer(eb); | 2325 | free_extent_buffer(eb); |
2336 | } | 2326 | } |
2337 | if (slot + 1 < nritems) { | 2327 | if (slot + 1 < nritems) { |
2338 | block2 = btrfs_node_blockptr(parent, slot + 1); | 2328 | block2 = btrfs_node_blockptr(parent, slot + 1); |
2339 | gen = btrfs_node_ptr_generation(parent, slot + 1); | 2329 | gen = btrfs_node_ptr_generation(parent, slot + 1); |
2340 | eb = btrfs_find_tree_block(root, block2); | 2330 | eb = btrfs_find_tree_block(root, block2); |
2341 | if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0) | 2331 | if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0) |
2342 | block2 = 0; | 2332 | block2 = 0; |
2343 | free_extent_buffer(eb); | 2333 | free_extent_buffer(eb); |
2344 | } | 2334 | } |
2345 | 2335 | ||
2346 | if (block1) | 2336 | if (block1) |
2347 | readahead_tree_block(root, block1, blocksize); | 2337 | readahead_tree_block(root, block1, blocksize); |
2348 | if (block2) | 2338 | if (block2) |
2349 | readahead_tree_block(root, block2, blocksize); | 2339 | readahead_tree_block(root, block2, blocksize); |
2350 | } | 2340 | } |
2351 | 2341 | ||
2352 | 2342 | ||
2353 | /* | 2343 | /* |
2354 | * when we walk down the tree, it is usually safe to unlock the higher layers | 2344 | * when we walk down the tree, it is usually safe to unlock the higher layers |
2355 | * in the tree. The exceptions are when our path goes through slot 0, because | 2345 | * in the tree. The exceptions are when our path goes through slot 0, because |
2356 | * operations on the tree might require changing key pointers higher up in the | 2346 | * operations on the tree might require changing key pointers higher up in the |
2357 | * tree. | 2347 | * tree. |
2358 | * | 2348 | * |
2359 | * callers might also have set path->keep_locks, which tells this code to keep | 2349 | * callers might also have set path->keep_locks, which tells this code to keep |
2360 | * the lock if the path points to the last slot in the block. This is part of | 2350 | * the lock if the path points to the last slot in the block. This is part of |
2361 | * walking through the tree, and selecting the next slot in the higher block. | 2351 | * walking through the tree, and selecting the next slot in the higher block. |
2362 | * | 2352 | * |
2363 | * lowest_unlock sets the lowest level in the tree we're allowed to unlock. so | 2353 | * lowest_unlock sets the lowest level in the tree we're allowed to unlock. so |
2364 | * if lowest_unlock is 1, level 0 won't be unlocked | 2354 | * if lowest_unlock is 1, level 0 won't be unlocked |
2365 | */ | 2355 | */ |
2366 | static noinline void unlock_up(struct btrfs_path *path, int level, | 2356 | static noinline void unlock_up(struct btrfs_path *path, int level, |
2367 | int lowest_unlock, int min_write_lock_level, | 2357 | int lowest_unlock, int min_write_lock_level, |
2368 | int *write_lock_level) | 2358 | int *write_lock_level) |
2369 | { | 2359 | { |
2370 | int i; | 2360 | int i; |
2371 | int skip_level = level; | 2361 | int skip_level = level; |
2372 | int no_skips = 0; | 2362 | int no_skips = 0; |
2373 | struct extent_buffer *t; | 2363 | struct extent_buffer *t; |
2374 | 2364 | ||
2375 | for (i = level; i < BTRFS_MAX_LEVEL; i++) { | 2365 | for (i = level; i < BTRFS_MAX_LEVEL; i++) { |
2376 | if (!path->nodes[i]) | 2366 | if (!path->nodes[i]) |
2377 | break; | 2367 | break; |
2378 | if (!path->locks[i]) | 2368 | if (!path->locks[i]) |
2379 | break; | 2369 | break; |
2380 | if (!no_skips && path->slots[i] == 0) { | 2370 | if (!no_skips && path->slots[i] == 0) { |
2381 | skip_level = i + 1; | 2371 | skip_level = i + 1; |
2382 | continue; | 2372 | continue; |
2383 | } | 2373 | } |
2384 | if (!no_skips && path->keep_locks) { | 2374 | if (!no_skips && path->keep_locks) { |
2385 | u32 nritems; | 2375 | u32 nritems; |
2386 | t = path->nodes[i]; | 2376 | t = path->nodes[i]; |
2387 | nritems = btrfs_header_nritems(t); | 2377 | nritems = btrfs_header_nritems(t); |
2388 | if (nritems < 1 || path->slots[i] >= nritems - 1) { | 2378 | if (nritems < 1 || path->slots[i] >= nritems - 1) { |
2389 | skip_level = i + 1; | 2379 | skip_level = i + 1; |
2390 | continue; | 2380 | continue; |
2391 | } | 2381 | } |
2392 | } | 2382 | } |
2393 | if (skip_level < i && i >= lowest_unlock) | 2383 | if (skip_level < i && i >= lowest_unlock) |
2394 | no_skips = 1; | 2384 | no_skips = 1; |
2395 | 2385 | ||
2396 | t = path->nodes[i]; | 2386 | t = path->nodes[i]; |
2397 | if (i >= lowest_unlock && i > skip_level && path->locks[i]) { | 2387 | if (i >= lowest_unlock && i > skip_level && path->locks[i]) { |
2398 | btrfs_tree_unlock_rw(t, path->locks[i]); | 2388 | btrfs_tree_unlock_rw(t, path->locks[i]); |
2399 | path->locks[i] = 0; | 2389 | path->locks[i] = 0; |
2400 | if (write_lock_level && | 2390 | if (write_lock_level && |
2401 | i > min_write_lock_level && | 2391 | i > min_write_lock_level && |
2402 | i <= *write_lock_level) { | 2392 | i <= *write_lock_level) { |
2403 | *write_lock_level = i - 1; | 2393 | *write_lock_level = i - 1; |
2404 | } | 2394 | } |
2405 | } | 2395 | } |
2406 | } | 2396 | } |
2407 | } | 2397 | } |
2408 | 2398 | ||
2409 | /* | 2399 | /* |
2410 | * This releases any locks held in the path starting at level and | 2400 | * This releases any locks held in the path starting at level and |
2411 | * going all the way up to the root. | 2401 | * going all the way up to the root. |
2412 | * | 2402 | * |
2413 | * btrfs_search_slot will keep the lock held on higher nodes in a few | 2403 | * btrfs_search_slot will keep the lock held on higher nodes in a few |
2414 | * corner cases, such as COW of the block at slot zero in the node. This | 2404 | * corner cases, such as COW of the block at slot zero in the node. This |
2415 | * ignores those rules, and it should only be called when there are no | 2405 | * ignores those rules, and it should only be called when there are no |
2416 | * more updates to be done higher up in the tree. | 2406 | * more updates to be done higher up in the tree. |
2417 | */ | 2407 | */ |
2418 | noinline void btrfs_unlock_up_safe(struct btrfs_path *path, int level) | 2408 | noinline void btrfs_unlock_up_safe(struct btrfs_path *path, int level) |
2419 | { | 2409 | { |
2420 | int i; | 2410 | int i; |
2421 | 2411 | ||
2422 | if (path->keep_locks) | 2412 | if (path->keep_locks) |
2423 | return; | 2413 | return; |
2424 | 2414 | ||
2425 | for (i = level; i < BTRFS_MAX_LEVEL; i++) { | 2415 | for (i = level; i < BTRFS_MAX_LEVEL; i++) { |
2426 | if (!path->nodes[i]) | 2416 | if (!path->nodes[i]) |
2427 | continue; | 2417 | continue; |
2428 | if (!path->locks[i]) | 2418 | if (!path->locks[i]) |
2429 | continue; | 2419 | continue; |
2430 | btrfs_tree_unlock_rw(path->nodes[i], path->locks[i]); | 2420 | btrfs_tree_unlock_rw(path->nodes[i], path->locks[i]); |
2431 | path->locks[i] = 0; | 2421 | path->locks[i] = 0; |
2432 | } | 2422 | } |
2433 | } | 2423 | } |
2434 | 2424 | ||
2435 | /* | 2425 | /* |
2436 | * helper function for btrfs_search_slot. The goal is to find a block | 2426 | * helper function for btrfs_search_slot. The goal is to find a block |
2437 | * in cache without setting the path to blocking. If we find the block | 2427 | * in cache without setting the path to blocking. If we find the block |
2438 | * we return zero and the path is unchanged. | 2428 | * we return zero and the path is unchanged. |
2439 | * | 2429 | * |
2440 | * If we can't find the block, we set the path blocking and do some | 2430 | * If we can't find the block, we set the path blocking and do some |
2441 | * reada. -EAGAIN is returned and the search must be repeated. | 2431 | * reada. -EAGAIN is returned and the search must be repeated. |
2442 | */ | 2432 | */ |
2443 | static int | 2433 | static int |
2444 | read_block_for_search(struct btrfs_trans_handle *trans, | 2434 | read_block_for_search(struct btrfs_trans_handle *trans, |
2445 | struct btrfs_root *root, struct btrfs_path *p, | 2435 | struct btrfs_root *root, struct btrfs_path *p, |
2446 | struct extent_buffer **eb_ret, int level, int slot, | 2436 | struct extent_buffer **eb_ret, int level, int slot, |
2447 | struct btrfs_key *key, u64 time_seq) | 2437 | struct btrfs_key *key, u64 time_seq) |
2448 | { | 2438 | { |
2449 | u64 blocknr; | 2439 | u64 blocknr; |
2450 | u64 gen; | 2440 | u64 gen; |
2451 | struct extent_buffer *b = *eb_ret; | 2441 | struct extent_buffer *b = *eb_ret; |
2452 | struct extent_buffer *tmp; | 2442 | struct extent_buffer *tmp; |
2453 | int ret; | 2443 | int ret; |
2454 | 2444 | ||
2455 | blocknr = btrfs_node_blockptr(b, slot); | 2445 | blocknr = btrfs_node_blockptr(b, slot); |
2456 | gen = btrfs_node_ptr_generation(b, slot); | 2446 | gen = btrfs_node_ptr_generation(b, slot); |
2457 | 2447 | ||
2458 | tmp = btrfs_find_tree_block(root, blocknr); | 2448 | tmp = btrfs_find_tree_block(root, blocknr); |
2459 | if (tmp) { | 2449 | if (tmp) { |
2460 | /* first we do an atomic uptodate check */ | 2450 | /* first we do an atomic uptodate check */ |
2461 | if (btrfs_buffer_uptodate(tmp, gen, 1) > 0) { | 2451 | if (btrfs_buffer_uptodate(tmp, gen, 1) > 0) { |
2462 | *eb_ret = tmp; | 2452 | *eb_ret = tmp; |
2463 | return 0; | 2453 | return 0; |
2464 | } | 2454 | } |
2465 | 2455 | ||
2466 | /* the pages were up to date, but we failed | 2456 | /* the pages were up to date, but we failed |
2467 | * the generation number check. Do a full | 2457 | * the generation number check. Do a full |
2468 | * read for the generation number that is correct. | 2458 | * read for the generation number that is correct. |
2469 | * We must do this without dropping locks so | 2459 | * We must do this without dropping locks so |
2470 | * we can trust our generation number | 2460 | * we can trust our generation number |
2471 | */ | 2461 | */ |
2472 | btrfs_set_path_blocking(p); | 2462 | btrfs_set_path_blocking(p); |
2473 | 2463 | ||
2474 | /* now we're allowed to do a blocking uptodate check */ | 2464 | /* now we're allowed to do a blocking uptodate check */ |
2475 | ret = btrfs_read_buffer(tmp, gen); | 2465 | ret = btrfs_read_buffer(tmp, gen); |
2476 | if (!ret) { | 2466 | if (!ret) { |
2477 | *eb_ret = tmp; | 2467 | *eb_ret = tmp; |
2478 | return 0; | 2468 | return 0; |
2479 | } | 2469 | } |
2480 | free_extent_buffer(tmp); | 2470 | free_extent_buffer(tmp); |
2481 | btrfs_release_path(p); | 2471 | btrfs_release_path(p); |
2482 | return -EIO; | 2472 | return -EIO; |
2483 | } | 2473 | } |
2484 | 2474 | ||
2485 | /* | 2475 | /* |
2486 | * reduce lock contention at high levels | 2476 | * reduce lock contention at high levels |
2487 | * of the btree by dropping locks before | 2477 | * of the btree by dropping locks before |
2488 | * we read. Don't release the lock on the current | 2478 | * we read. Don't release the lock on the current |
2489 | * level because we need to walk this node to figure | 2479 | * level because we need to walk this node to figure |
2490 | * out which blocks to read. | 2480 | * out which blocks to read. |
2491 | */ | 2481 | */ |
2492 | btrfs_unlock_up_safe(p, level + 1); | 2482 | btrfs_unlock_up_safe(p, level + 1); |
2493 | btrfs_set_path_blocking(p); | 2483 | btrfs_set_path_blocking(p); |
2494 | 2484 | ||
2495 | free_extent_buffer(tmp); | 2485 | free_extent_buffer(tmp); |
2496 | if (p->reada) | 2486 | if (p->reada) |
2497 | reada_for_search(root, p, level, slot, key->objectid); | 2487 | reada_for_search(root, p, level, slot, key->objectid); |
2498 | 2488 | ||
2499 | btrfs_release_path(p); | 2489 | btrfs_release_path(p); |
2500 | 2490 | ||
2501 | ret = -EAGAIN; | 2491 | ret = -EAGAIN; |
2502 | tmp = read_tree_block(root, blocknr, 0); | 2492 | tmp = read_tree_block(root, blocknr, 0); |
2503 | if (tmp) { | 2493 | if (tmp) { |
2504 | /* | 2494 | /* |
2505 | * If the read above didn't mark this buffer up to date, | 2495 | * If the read above didn't mark this buffer up to date, |
2506 | * it will never end up being up to date. Set ret to EIO now | 2496 | * it will never end up being up to date. Set ret to EIO now |
2507 | * and give up so that our caller doesn't loop forever | 2497 | * and give up so that our caller doesn't loop forever |
2508 | * on our EAGAINs. | 2498 | * on our EAGAINs. |
2509 | */ | 2499 | */ |
2510 | if (!btrfs_buffer_uptodate(tmp, 0, 0)) | 2500 | if (!btrfs_buffer_uptodate(tmp, 0, 0)) |
2511 | ret = -EIO; | 2501 | ret = -EIO; |
2512 | free_extent_buffer(tmp); | 2502 | free_extent_buffer(tmp); |
2513 | } | 2503 | } |
2514 | return ret; | 2504 | return ret; |
2515 | } | 2505 | } |
2516 | 2506 | ||
2517 | /* | 2507 | /* |
2518 | * helper function for btrfs_search_slot. This does all of the checks | 2508 | * helper function for btrfs_search_slot. This does all of the checks |
2519 | * for node-level blocks and does any balancing required based on | 2509 | * for node-level blocks and does any balancing required based on |
2520 | * the ins_len. | 2510 | * the ins_len. |
2521 | * | 2511 | * |
2522 | * If no extra work was required, zero is returned. If we had to | 2512 | * If no extra work was required, zero is returned. If we had to |
2523 | * drop the path, -EAGAIN is returned and btrfs_search_slot must | 2513 | * drop the path, -EAGAIN is returned and btrfs_search_slot must |
2524 | * start over | 2514 | * start over |
2525 | */ | 2515 | */ |
2526 | static int | 2516 | static int |
2527 | setup_nodes_for_search(struct btrfs_trans_handle *trans, | 2517 | setup_nodes_for_search(struct btrfs_trans_handle *trans, |
2528 | struct btrfs_root *root, struct btrfs_path *p, | 2518 | struct btrfs_root *root, struct btrfs_path *p, |
2529 | struct extent_buffer *b, int level, int ins_len, | 2519 | struct extent_buffer *b, int level, int ins_len, |
2530 | int *write_lock_level) | 2520 | int *write_lock_level) |
2531 | { | 2521 | { |
2532 | int ret; | 2522 | int ret; |
2533 | if ((p->search_for_split || ins_len > 0) && btrfs_header_nritems(b) >= | 2523 | if ((p->search_for_split || ins_len > 0) && btrfs_header_nritems(b) >= |
2534 | BTRFS_NODEPTRS_PER_BLOCK(root) - 3) { | 2524 | BTRFS_NODEPTRS_PER_BLOCK(root) - 3) { |
2535 | int sret; | 2525 | int sret; |
2536 | 2526 | ||
2537 | if (*write_lock_level < level + 1) { | 2527 | if (*write_lock_level < level + 1) { |
2538 | *write_lock_level = level + 1; | 2528 | *write_lock_level = level + 1; |
2539 | btrfs_release_path(p); | 2529 | btrfs_release_path(p); |
2540 | goto again; | 2530 | goto again; |
2541 | } | 2531 | } |
2542 | 2532 | ||
2543 | btrfs_set_path_blocking(p); | 2533 | btrfs_set_path_blocking(p); |
2544 | reada_for_balance(root, p, level); | 2534 | reada_for_balance(root, p, level); |
2545 | sret = split_node(trans, root, p, level); | 2535 | sret = split_node(trans, root, p, level); |
2546 | btrfs_clear_path_blocking(p, NULL, 0); | 2536 | btrfs_clear_path_blocking(p, NULL, 0); |
2547 | 2537 | ||
2548 | BUG_ON(sret > 0); | 2538 | BUG_ON(sret > 0); |
2549 | if (sret) { | 2539 | if (sret) { |
2550 | ret = sret; | 2540 | ret = sret; |
2551 | goto done; | 2541 | goto done; |
2552 | } | 2542 | } |
2553 | b = p->nodes[level]; | 2543 | b = p->nodes[level]; |
2554 | } else if (ins_len < 0 && btrfs_header_nritems(b) < | 2544 | } else if (ins_len < 0 && btrfs_header_nritems(b) < |
2555 | BTRFS_NODEPTRS_PER_BLOCK(root) / 2) { | 2545 | BTRFS_NODEPTRS_PER_BLOCK(root) / 2) { |
2556 | int sret; | 2546 | int sret; |
2557 | 2547 | ||
2558 | if (*write_lock_level < level + 1) { | 2548 | if (*write_lock_level < level + 1) { |
2559 | *write_lock_level = level + 1; | 2549 | *write_lock_level = level + 1; |
2560 | btrfs_release_path(p); | 2550 | btrfs_release_path(p); |
2561 | goto again; | 2551 | goto again; |
2562 | } | 2552 | } |
2563 | 2553 | ||
2564 | btrfs_set_path_blocking(p); | 2554 | btrfs_set_path_blocking(p); |
2565 | reada_for_balance(root, p, level); | 2555 | reada_for_balance(root, p, level); |
2566 | sret = balance_level(trans, root, p, level); | 2556 | sret = balance_level(trans, root, p, level); |
2567 | btrfs_clear_path_blocking(p, NULL, 0); | 2557 | btrfs_clear_path_blocking(p, NULL, 0); |
2568 | 2558 | ||
2569 | if (sret) { | 2559 | if (sret) { |
2570 | ret = sret; | 2560 | ret = sret; |
2571 | goto done; | 2561 | goto done; |
2572 | } | 2562 | } |
2573 | b = p->nodes[level]; | 2563 | b = p->nodes[level]; |
2574 | if (!b) { | 2564 | if (!b) { |
2575 | btrfs_release_path(p); | 2565 | btrfs_release_path(p); |
2576 | goto again; | 2566 | goto again; |
2577 | } | 2567 | } |
2578 | BUG_ON(btrfs_header_nritems(b) == 1); | 2568 | BUG_ON(btrfs_header_nritems(b) == 1); |
2579 | } | 2569 | } |
2580 | return 0; | 2570 | return 0; |
2581 | 2571 | ||
2582 | again: | 2572 | again: |
2583 | ret = -EAGAIN; | 2573 | ret = -EAGAIN; |
2584 | done: | 2574 | done: |
2585 | return ret; | 2575 | return ret; |
2586 | } | 2576 | } |
2587 | 2577 | ||
2588 | static void key_search_validate(struct extent_buffer *b, | 2578 | static void key_search_validate(struct extent_buffer *b, |
2589 | struct btrfs_key *key, | 2579 | struct btrfs_key *key, |
2590 | int level) | 2580 | int level) |
2591 | { | 2581 | { |
2592 | #ifdef CONFIG_BTRFS_ASSERT | 2582 | #ifdef CONFIG_BTRFS_ASSERT |
2593 | struct btrfs_disk_key disk_key; | 2583 | struct btrfs_disk_key disk_key; |
2594 | 2584 | ||
2595 | btrfs_cpu_key_to_disk(&disk_key, key); | 2585 | btrfs_cpu_key_to_disk(&disk_key, key); |
2596 | 2586 | ||
2597 | if (level == 0) | 2587 | if (level == 0) |
2598 | ASSERT(!memcmp_extent_buffer(b, &disk_key, | 2588 | ASSERT(!memcmp_extent_buffer(b, &disk_key, |
2599 | offsetof(struct btrfs_leaf, items[0].key), | 2589 | offsetof(struct btrfs_leaf, items[0].key), |
2600 | sizeof(disk_key))); | 2590 | sizeof(disk_key))); |
2601 | else | 2591 | else |
2602 | ASSERT(!memcmp_extent_buffer(b, &disk_key, | 2592 | ASSERT(!memcmp_extent_buffer(b, &disk_key, |
2603 | offsetof(struct btrfs_node, ptrs[0].key), | 2593 | offsetof(struct btrfs_node, ptrs[0].key), |
2604 | sizeof(disk_key))); | 2594 | sizeof(disk_key))); |
2605 | #endif | 2595 | #endif |
2606 | } | 2596 | } |
2607 | 2597 | ||
2608 | static int key_search(struct extent_buffer *b, struct btrfs_key *key, | 2598 | static int key_search(struct extent_buffer *b, struct btrfs_key *key, |
2609 | int level, int *prev_cmp, int *slot) | 2599 | int level, int *prev_cmp, int *slot) |
2610 | { | 2600 | { |
2611 | if (*prev_cmp != 0) { | 2601 | if (*prev_cmp != 0) { |
2612 | *prev_cmp = bin_search(b, key, level, slot); | 2602 | *prev_cmp = bin_search(b, key, level, slot); |
2613 | return *prev_cmp; | 2603 | return *prev_cmp; |
2614 | } | 2604 | } |
2615 | 2605 | ||
2616 | key_search_validate(b, key, level); | 2606 | key_search_validate(b, key, level); |
2617 | *slot = 0; | 2607 | *slot = 0; |
2618 | 2608 | ||
2619 | return 0; | 2609 | return 0; |
2620 | } | 2610 | } |
2621 | 2611 | ||
2622 | int btrfs_find_item(struct btrfs_root *fs_root, struct btrfs_path *found_path, | 2612 | int btrfs_find_item(struct btrfs_root *fs_root, struct btrfs_path *found_path, |
2623 | u64 iobjectid, u64 ioff, u8 key_type, | 2613 | u64 iobjectid, u64 ioff, u8 key_type, |
2624 | struct btrfs_key *found_key) | 2614 | struct btrfs_key *found_key) |
2625 | { | 2615 | { |
2626 | int ret; | 2616 | int ret; |
2627 | struct btrfs_key key; | 2617 | struct btrfs_key key; |
2628 | struct extent_buffer *eb; | 2618 | struct extent_buffer *eb; |
2629 | struct btrfs_path *path; | 2619 | struct btrfs_path *path; |
2630 | 2620 | ||
2631 | key.type = key_type; | 2621 | key.type = key_type; |
2632 | key.objectid = iobjectid; | 2622 | key.objectid = iobjectid; |
2633 | key.offset = ioff; | 2623 | key.offset = ioff; |
2634 | 2624 | ||
2635 | if (found_path == NULL) { | 2625 | if (found_path == NULL) { |
2636 | path = btrfs_alloc_path(); | 2626 | path = btrfs_alloc_path(); |
2637 | if (!path) | 2627 | if (!path) |
2638 | return -ENOMEM; | 2628 | return -ENOMEM; |
2639 | } else | 2629 | } else |
2640 | path = found_path; | 2630 | path = found_path; |
2641 | 2631 | ||
2642 | ret = btrfs_search_slot(NULL, fs_root, &key, path, 0, 0); | 2632 | ret = btrfs_search_slot(NULL, fs_root, &key, path, 0, 0); |
2643 | if ((ret < 0) || (found_key == NULL)) { | 2633 | if ((ret < 0) || (found_key == NULL)) { |
2644 | if (path != found_path) | 2634 | if (path != found_path) |
2645 | btrfs_free_path(path); | 2635 | btrfs_free_path(path); |
2646 | return ret; | 2636 | return ret; |
2647 | } | 2637 | } |
2648 | 2638 | ||
2649 | eb = path->nodes[0]; | 2639 | eb = path->nodes[0]; |
2650 | if (ret && path->slots[0] >= btrfs_header_nritems(eb)) { | 2640 | if (ret && path->slots[0] >= btrfs_header_nritems(eb)) { |
2651 | ret = btrfs_next_leaf(fs_root, path); | 2641 | ret = btrfs_next_leaf(fs_root, path); |
2652 | if (ret) | 2642 | if (ret) |
2653 | return ret; | 2643 | return ret; |
2654 | eb = path->nodes[0]; | 2644 | eb = path->nodes[0]; |
2655 | } | 2645 | } |
2656 | 2646 | ||
2657 | btrfs_item_key_to_cpu(eb, found_key, path->slots[0]); | 2647 | btrfs_item_key_to_cpu(eb, found_key, path->slots[0]); |
2658 | if (found_key->type != key.type || | 2648 | if (found_key->type != key.type || |
2659 | found_key->objectid != key.objectid) | 2649 | found_key->objectid != key.objectid) |
2660 | return 1; | 2650 | return 1; |
2661 | 2651 | ||
2662 | return 0; | 2652 | return 0; |
2663 | } | 2653 | } |
2664 | 2654 | ||
2665 | /* | 2655 | /* |
2666 | * look for key in the tree. path is filled in with nodes along the way | 2656 | * look for key in the tree. path is filled in with nodes along the way |
2667 | * if key is found, we return zero and you can find the item in the leaf | 2657 | * if key is found, we return zero and you can find the item in the leaf |
2668 | * level of the path (level 0) | 2658 | * level of the path (level 0) |
2669 | * | 2659 | * |
2670 | * If the key isn't found, the path points to the slot where it should | 2660 | * If the key isn't found, the path points to the slot where it should |
2671 | * be inserted, and 1 is returned. If there are other errors during the | 2661 | * be inserted, and 1 is returned. If there are other errors during the |
2672 | * search a negative error number is returned. | 2662 | * search a negative error number is returned. |
2673 | * | 2663 | * |
2674 | * if ins_len > 0, nodes and leaves will be split as we walk down the | 2664 | * if ins_len > 0, nodes and leaves will be split as we walk down the |
2675 | * tree. if ins_len < 0, nodes will be merged as we walk down the tree (if | 2665 | * tree. if ins_len < 0, nodes will be merged as we walk down the tree (if |
2676 | * possible) | 2666 | * possible) |
2677 | */ | 2667 | */ |
2678 | int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root | 2668 | int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root |
2679 | *root, struct btrfs_key *key, struct btrfs_path *p, int | 2669 | *root, struct btrfs_key *key, struct btrfs_path *p, int |
2680 | ins_len, int cow) | 2670 | ins_len, int cow) |
2681 | { | 2671 | { |
2682 | struct extent_buffer *b; | 2672 | struct extent_buffer *b; |
2683 | int slot; | 2673 | int slot; |
2684 | int ret; | 2674 | int ret; |
2685 | int err; | 2675 | int err; |
2686 | int level; | 2676 | int level; |
2687 | int lowest_unlock = 1; | 2677 | int lowest_unlock = 1; |
2688 | int root_lock; | 2678 | int root_lock; |
2689 | /* everything at write_lock_level or lower must be write locked */ | 2679 | /* everything at write_lock_level or lower must be write locked */ |
2690 | int write_lock_level = 0; | 2680 | int write_lock_level = 0; |
2691 | u8 lowest_level = 0; | 2681 | u8 lowest_level = 0; |
2692 | int min_write_lock_level; | 2682 | int min_write_lock_level; |
2693 | int prev_cmp; | 2683 | int prev_cmp; |
2694 | 2684 | ||
2695 | lowest_level = p->lowest_level; | 2685 | lowest_level = p->lowest_level; |
2696 | WARN_ON(lowest_level && ins_len > 0); | 2686 | WARN_ON(lowest_level && ins_len > 0); |
2697 | WARN_ON(p->nodes[0] != NULL); | 2687 | WARN_ON(p->nodes[0] != NULL); |
2698 | BUG_ON(!cow && ins_len); | 2688 | BUG_ON(!cow && ins_len); |
2699 | 2689 | ||
2700 | if (ins_len < 0) { | 2690 | if (ins_len < 0) { |
2701 | lowest_unlock = 2; | 2691 | lowest_unlock = 2; |
2702 | 2692 | ||
2703 | /* when we are removing items, we might have to go up to level | 2693 | /* when we are removing items, we might have to go up to level |
2704 | * two as we update tree pointers Make sure we keep write | 2694 | * two as we update tree pointers Make sure we keep write |
2705 | * for those levels as well | 2695 | * for those levels as well |
2706 | */ | 2696 | */ |
2707 | write_lock_level = 2; | 2697 | write_lock_level = 2; |
2708 | } else if (ins_len > 0) { | 2698 | } else if (ins_len > 0) { |
2709 | /* | 2699 | /* |
2710 | * for inserting items, make sure we have a write lock on | 2700 | * for inserting items, make sure we have a write lock on |
2711 | * level 1 so we can update keys | 2701 | * level 1 so we can update keys |
2712 | */ | 2702 | */ |
2713 | write_lock_level = 1; | 2703 | write_lock_level = 1; |
2714 | } | 2704 | } |
2715 | 2705 | ||
2716 | if (!cow) | 2706 | if (!cow) |
2717 | write_lock_level = -1; | 2707 | write_lock_level = -1; |
2718 | 2708 | ||
2719 | if (cow && (p->keep_locks || p->lowest_level)) | 2709 | if (cow && (p->keep_locks || p->lowest_level)) |
2720 | write_lock_level = BTRFS_MAX_LEVEL; | 2710 | write_lock_level = BTRFS_MAX_LEVEL; |
2721 | 2711 | ||
2722 | min_write_lock_level = write_lock_level; | 2712 | min_write_lock_level = write_lock_level; |
2723 | 2713 | ||
2724 | again: | 2714 | again: |
2725 | prev_cmp = -1; | 2715 | prev_cmp = -1; |
2726 | /* | 2716 | /* |
2727 | * we try very hard to do read locks on the root | 2717 | * we try very hard to do read locks on the root |
2728 | */ | 2718 | */ |
2729 | root_lock = BTRFS_READ_LOCK; | 2719 | root_lock = BTRFS_READ_LOCK; |
2730 | level = 0; | 2720 | level = 0; |
2731 | if (p->search_commit_root) { | 2721 | if (p->search_commit_root) { |
2732 | /* | 2722 | /* |
2733 | * the commit roots are read only | 2723 | * the commit roots are read only |
2734 | * so we always do read locks | 2724 | * so we always do read locks |
2735 | */ | 2725 | */ |
2736 | if (p->need_commit_sem) | 2726 | if (p->need_commit_sem) |
2737 | down_read(&root->fs_info->commit_root_sem); | 2727 | down_read(&root->fs_info->commit_root_sem); |
2738 | b = root->commit_root; | 2728 | b = root->commit_root; |
2739 | extent_buffer_get(b); | 2729 | extent_buffer_get(b); |
2740 | level = btrfs_header_level(b); | 2730 | level = btrfs_header_level(b); |
2741 | if (p->need_commit_sem) | 2731 | if (p->need_commit_sem) |
2742 | up_read(&root->fs_info->commit_root_sem); | 2732 | up_read(&root->fs_info->commit_root_sem); |
2743 | if (!p->skip_locking) | 2733 | if (!p->skip_locking) |
2744 | btrfs_tree_read_lock(b); | 2734 | btrfs_tree_read_lock(b); |
2745 | } else { | 2735 | } else { |
2746 | if (p->skip_locking) { | 2736 | if (p->skip_locking) { |
2747 | b = btrfs_root_node(root); | 2737 | b = btrfs_root_node(root); |
2748 | level = btrfs_header_level(b); | 2738 | level = btrfs_header_level(b); |
2749 | } else { | 2739 | } else { |
2750 | /* we don't know the level of the root node | 2740 | /* we don't know the level of the root node |
2751 | * until we actually have it read locked | 2741 | * until we actually have it read locked |
2752 | */ | 2742 | */ |
2753 | b = btrfs_read_lock_root_node(root); | 2743 | b = btrfs_read_lock_root_node(root); |
2754 | level = btrfs_header_level(b); | 2744 | level = btrfs_header_level(b); |
2755 | if (level <= write_lock_level) { | 2745 | if (level <= write_lock_level) { |
2756 | /* whoops, must trade for write lock */ | 2746 | /* whoops, must trade for write lock */ |
2757 | btrfs_tree_read_unlock(b); | 2747 | btrfs_tree_read_unlock(b); |
2758 | free_extent_buffer(b); | 2748 | free_extent_buffer(b); |
2759 | b = btrfs_lock_root_node(root); | 2749 | b = btrfs_lock_root_node(root); |
2760 | root_lock = BTRFS_WRITE_LOCK; | 2750 | root_lock = BTRFS_WRITE_LOCK; |
2761 | 2751 | ||
2762 | /* the level might have changed, check again */ | 2752 | /* the level might have changed, check again */ |
2763 | level = btrfs_header_level(b); | 2753 | level = btrfs_header_level(b); |
2764 | } | 2754 | } |
2765 | } | 2755 | } |
2766 | } | 2756 | } |
2767 | p->nodes[level] = b; | 2757 | p->nodes[level] = b; |
2768 | if (!p->skip_locking) | 2758 | if (!p->skip_locking) |
2769 | p->locks[level] = root_lock; | 2759 | p->locks[level] = root_lock; |
2770 | 2760 | ||
2771 | while (b) { | 2761 | while (b) { |
2772 | level = btrfs_header_level(b); | 2762 | level = btrfs_header_level(b); |
2773 | 2763 | ||
2774 | /* | 2764 | /* |
2775 | * setup the path here so we can release it under lock | 2765 | * setup the path here so we can release it under lock |
2776 | * contention with the cow code | 2766 | * contention with the cow code |
2777 | */ | 2767 | */ |
2778 | if (cow) { | 2768 | if (cow) { |
2779 | /* | 2769 | /* |
2780 | * if we don't really need to cow this block | 2770 | * if we don't really need to cow this block |
2781 | * then we don't want to set the path blocking, | 2771 | * then we don't want to set the path blocking, |
2782 | * so we test it here | 2772 | * so we test it here |
2783 | */ | 2773 | */ |
2784 | if (!should_cow_block(trans, root, b)) | 2774 | if (!should_cow_block(trans, root, b)) |
2785 | goto cow_done; | 2775 | goto cow_done; |
2786 | 2776 | ||
2787 | /* | 2777 | /* |
2788 | * must have write locks on this node and the | 2778 | * must have write locks on this node and the |
2789 | * parent | 2779 | * parent |
2790 | */ | 2780 | */ |
2791 | if (level > write_lock_level || | 2781 | if (level > write_lock_level || |
2792 | (level + 1 > write_lock_level && | 2782 | (level + 1 > write_lock_level && |
2793 | level + 1 < BTRFS_MAX_LEVEL && | 2783 | level + 1 < BTRFS_MAX_LEVEL && |
2794 | p->nodes[level + 1])) { | 2784 | p->nodes[level + 1])) { |
2795 | write_lock_level = level + 1; | 2785 | write_lock_level = level + 1; |
2796 | btrfs_release_path(p); | 2786 | btrfs_release_path(p); |
2797 | goto again; | 2787 | goto again; |
2798 | } | 2788 | } |
2799 | 2789 | ||
2800 | btrfs_set_path_blocking(p); | 2790 | btrfs_set_path_blocking(p); |
2801 | err = btrfs_cow_block(trans, root, b, | 2791 | err = btrfs_cow_block(trans, root, b, |
2802 | p->nodes[level + 1], | 2792 | p->nodes[level + 1], |
2803 | p->slots[level + 1], &b); | 2793 | p->slots[level + 1], &b); |
2804 | if (err) { | 2794 | if (err) { |
2805 | ret = err; | 2795 | ret = err; |
2806 | goto done; | 2796 | goto done; |
2807 | } | 2797 | } |
2808 | } | 2798 | } |
2809 | cow_done: | 2799 | cow_done: |
2810 | p->nodes[level] = b; | 2800 | p->nodes[level] = b; |
2811 | btrfs_clear_path_blocking(p, NULL, 0); | 2801 | btrfs_clear_path_blocking(p, NULL, 0); |
2812 | 2802 | ||
2813 | /* | 2803 | /* |
2814 | * we have a lock on b and as long as we aren't changing | 2804 | * we have a lock on b and as long as we aren't changing |
2815 | * the tree, there is no way to for the items in b to change. | 2805 | * the tree, there is no way to for the items in b to change. |
2816 | * It is safe to drop the lock on our parent before we | 2806 | * It is safe to drop the lock on our parent before we |
2817 | * go through the expensive btree search on b. | 2807 | * go through the expensive btree search on b. |
2818 | * | 2808 | * |
2819 | * If we're inserting or deleting (ins_len != 0), then we might | 2809 | * If we're inserting or deleting (ins_len != 0), then we might |
2820 | * be changing slot zero, which may require changing the parent. | 2810 | * be changing slot zero, which may require changing the parent. |
2821 | * So, we can't drop the lock until after we know which slot | 2811 | * So, we can't drop the lock until after we know which slot |
2822 | * we're operating on. | 2812 | * we're operating on. |
2823 | */ | 2813 | */ |
2824 | if (!ins_len && !p->keep_locks) { | 2814 | if (!ins_len && !p->keep_locks) { |
2825 | int u = level + 1; | 2815 | int u = level + 1; |
2826 | 2816 | ||
2827 | if (u < BTRFS_MAX_LEVEL && p->locks[u]) { | 2817 | if (u < BTRFS_MAX_LEVEL && p->locks[u]) { |
2828 | btrfs_tree_unlock_rw(p->nodes[u], p->locks[u]); | 2818 | btrfs_tree_unlock_rw(p->nodes[u], p->locks[u]); |
2829 | p->locks[u] = 0; | 2819 | p->locks[u] = 0; |
2830 | } | 2820 | } |
2831 | } | 2821 | } |
2832 | 2822 | ||
2833 | ret = key_search(b, key, level, &prev_cmp, &slot); | 2823 | ret = key_search(b, key, level, &prev_cmp, &slot); |
2834 | 2824 | ||
2835 | if (level != 0) { | 2825 | if (level != 0) { |
2836 | int dec = 0; | 2826 | int dec = 0; |
2837 | if (ret && slot > 0) { | 2827 | if (ret && slot > 0) { |
2838 | dec = 1; | 2828 | dec = 1; |
2839 | slot -= 1; | 2829 | slot -= 1; |
2840 | } | 2830 | } |
2841 | p->slots[level] = slot; | 2831 | p->slots[level] = slot; |
2842 | err = setup_nodes_for_search(trans, root, p, b, level, | 2832 | err = setup_nodes_for_search(trans, root, p, b, level, |
2843 | ins_len, &write_lock_level); | 2833 | ins_len, &write_lock_level); |
2844 | if (err == -EAGAIN) | 2834 | if (err == -EAGAIN) |
2845 | goto again; | 2835 | goto again; |
2846 | if (err) { | 2836 | if (err) { |
2847 | ret = err; | 2837 | ret = err; |
2848 | goto done; | 2838 | goto done; |
2849 | } | 2839 | } |
2850 | b = p->nodes[level]; | 2840 | b = p->nodes[level]; |
2851 | slot = p->slots[level]; | 2841 | slot = p->slots[level]; |
2852 | 2842 | ||
2853 | /* | 2843 | /* |
2854 | * slot 0 is special, if we change the key | 2844 | * slot 0 is special, if we change the key |
2855 | * we have to update the parent pointer | 2845 | * we have to update the parent pointer |
2856 | * which means we must have a write lock | 2846 | * which means we must have a write lock |
2857 | * on the parent | 2847 | * on the parent |
2858 | */ | 2848 | */ |
2859 | if (slot == 0 && ins_len && | 2849 | if (slot == 0 && ins_len && |
2860 | write_lock_level < level + 1) { | 2850 | write_lock_level < level + 1) { |
2861 | write_lock_level = level + 1; | 2851 | write_lock_level = level + 1; |
2862 | btrfs_release_path(p); | 2852 | btrfs_release_path(p); |
2863 | goto again; | 2853 | goto again; |
2864 | } | 2854 | } |
2865 | 2855 | ||
2866 | unlock_up(p, level, lowest_unlock, | 2856 | unlock_up(p, level, lowest_unlock, |
2867 | min_write_lock_level, &write_lock_level); | 2857 | min_write_lock_level, &write_lock_level); |
2868 | 2858 | ||
2869 | if (level == lowest_level) { | 2859 | if (level == lowest_level) { |
2870 | if (dec) | 2860 | if (dec) |
2871 | p->slots[level]++; | 2861 | p->slots[level]++; |
2872 | goto done; | 2862 | goto done; |
2873 | } | 2863 | } |
2874 | 2864 | ||
2875 | err = read_block_for_search(trans, root, p, | 2865 | err = read_block_for_search(trans, root, p, |
2876 | &b, level, slot, key, 0); | 2866 | &b, level, slot, key, 0); |
2877 | if (err == -EAGAIN) | 2867 | if (err == -EAGAIN) |
2878 | goto again; | 2868 | goto again; |
2879 | if (err) { | 2869 | if (err) { |
2880 | ret = err; | 2870 | ret = err; |
2881 | goto done; | 2871 | goto done; |
2882 | } | 2872 | } |
2883 | 2873 | ||
2884 | if (!p->skip_locking) { | 2874 | if (!p->skip_locking) { |
2885 | level = btrfs_header_level(b); | 2875 | level = btrfs_header_level(b); |
2886 | if (level <= write_lock_level) { | 2876 | if (level <= write_lock_level) { |
2887 | err = btrfs_try_tree_write_lock(b); | 2877 | err = btrfs_try_tree_write_lock(b); |
2888 | if (!err) { | 2878 | if (!err) { |
2889 | btrfs_set_path_blocking(p); | 2879 | btrfs_set_path_blocking(p); |
2890 | btrfs_tree_lock(b); | 2880 | btrfs_tree_lock(b); |
2891 | btrfs_clear_path_blocking(p, b, | 2881 | btrfs_clear_path_blocking(p, b, |
2892 | BTRFS_WRITE_LOCK); | 2882 | BTRFS_WRITE_LOCK); |
2893 | } | 2883 | } |
2894 | p->locks[level] = BTRFS_WRITE_LOCK; | 2884 | p->locks[level] = BTRFS_WRITE_LOCK; |
2895 | } else { | 2885 | } else { |
2896 | err = btrfs_try_tree_read_lock(b); | 2886 | err = btrfs_tree_read_lock_atomic(b); |
2897 | if (!err) { | 2887 | if (!err) { |
2898 | btrfs_set_path_blocking(p); | 2888 | btrfs_set_path_blocking(p); |
2899 | btrfs_tree_read_lock(b); | 2889 | btrfs_tree_read_lock(b); |
2900 | btrfs_clear_path_blocking(p, b, | 2890 | btrfs_clear_path_blocking(p, b, |
2901 | BTRFS_READ_LOCK); | 2891 | BTRFS_READ_LOCK); |
2902 | } | 2892 | } |
2903 | p->locks[level] = BTRFS_READ_LOCK; | 2893 | p->locks[level] = BTRFS_READ_LOCK; |
2904 | } | 2894 | } |
2905 | p->nodes[level] = b; | 2895 | p->nodes[level] = b; |
2906 | } | 2896 | } |
2907 | } else { | 2897 | } else { |
2908 | p->slots[level] = slot; | 2898 | p->slots[level] = slot; |
2909 | if (ins_len > 0 && | 2899 | if (ins_len > 0 && |
2910 | btrfs_leaf_free_space(root, b) < ins_len) { | 2900 | btrfs_leaf_free_space(root, b) < ins_len) { |
2911 | if (write_lock_level < 1) { | 2901 | if (write_lock_level < 1) { |
2912 | write_lock_level = 1; | 2902 | write_lock_level = 1; |
2913 | btrfs_release_path(p); | 2903 | btrfs_release_path(p); |
2914 | goto again; | 2904 | goto again; |
2915 | } | 2905 | } |
2916 | 2906 | ||
2917 | btrfs_set_path_blocking(p); | 2907 | btrfs_set_path_blocking(p); |
2918 | err = split_leaf(trans, root, key, | 2908 | err = split_leaf(trans, root, key, |
2919 | p, ins_len, ret == 0); | 2909 | p, ins_len, ret == 0); |
2920 | btrfs_clear_path_blocking(p, NULL, 0); | 2910 | btrfs_clear_path_blocking(p, NULL, 0); |
2921 | 2911 | ||
2922 | BUG_ON(err > 0); | 2912 | BUG_ON(err > 0); |
2923 | if (err) { | 2913 | if (err) { |
2924 | ret = err; | 2914 | ret = err; |
2925 | goto done; | 2915 | goto done; |
2926 | } | 2916 | } |
2927 | } | 2917 | } |
2928 | if (!p->search_for_split) | 2918 | if (!p->search_for_split) |
2929 | unlock_up(p, level, lowest_unlock, | 2919 | unlock_up(p, level, lowest_unlock, |
2930 | min_write_lock_level, &write_lock_level); | 2920 | min_write_lock_level, &write_lock_level); |
2931 | goto done; | 2921 | goto done; |
2932 | } | 2922 | } |
2933 | } | 2923 | } |
2934 | ret = 1; | 2924 | ret = 1; |
2935 | done: | 2925 | done: |
2936 | /* | 2926 | /* |
2937 | * we don't really know what they plan on doing with the path | 2927 | * we don't really know what they plan on doing with the path |
2938 | * from here on, so for now just mark it as blocking | 2928 | * from here on, so for now just mark it as blocking |
2939 | */ | 2929 | */ |
2940 | if (!p->leave_spinning) | 2930 | if (!p->leave_spinning) |
2941 | btrfs_set_path_blocking(p); | 2931 | btrfs_set_path_blocking(p); |
2942 | if (ret < 0) | 2932 | if (ret < 0) |
2943 | btrfs_release_path(p); | 2933 | btrfs_release_path(p); |
2944 | return ret; | 2934 | return ret; |
2945 | } | 2935 | } |
2946 | 2936 | ||
2947 | /* | 2937 | /* |
2948 | * Like btrfs_search_slot, this looks for a key in the given tree. It uses the | 2938 | * Like btrfs_search_slot, this looks for a key in the given tree. It uses the |
2949 | * current state of the tree together with the operations recorded in the tree | 2939 | * current state of the tree together with the operations recorded in the tree |
2950 | * modification log to search for the key in a previous version of this tree, as | 2940 | * modification log to search for the key in a previous version of this tree, as |
2951 | * denoted by the time_seq parameter. | 2941 | * denoted by the time_seq parameter. |
2952 | * | 2942 | * |
2953 | * Naturally, there is no support for insert, delete or cow operations. | 2943 | * Naturally, there is no support for insert, delete or cow operations. |
2954 | * | 2944 | * |
2955 | * The resulting path and return value will be set up as if we called | 2945 | * The resulting path and return value will be set up as if we called |
2956 | * btrfs_search_slot at that point in time with ins_len and cow both set to 0. | 2946 | * btrfs_search_slot at that point in time with ins_len and cow both set to 0. |
2957 | */ | 2947 | */ |
2958 | int btrfs_search_old_slot(struct btrfs_root *root, struct btrfs_key *key, | 2948 | int btrfs_search_old_slot(struct btrfs_root *root, struct btrfs_key *key, |
2959 | struct btrfs_path *p, u64 time_seq) | 2949 | struct btrfs_path *p, u64 time_seq) |
2960 | { | 2950 | { |
2961 | struct extent_buffer *b; | 2951 | struct extent_buffer *b; |
2962 | int slot; | 2952 | int slot; |
2963 | int ret; | 2953 | int ret; |
2964 | int err; | 2954 | int err; |
2965 | int level; | 2955 | int level; |
2966 | int lowest_unlock = 1; | 2956 | int lowest_unlock = 1; |
2967 | u8 lowest_level = 0; | 2957 | u8 lowest_level = 0; |
2968 | int prev_cmp = -1; | 2958 | int prev_cmp = -1; |
2969 | 2959 | ||
2970 | lowest_level = p->lowest_level; | 2960 | lowest_level = p->lowest_level; |
2971 | WARN_ON(p->nodes[0] != NULL); | 2961 | WARN_ON(p->nodes[0] != NULL); |
2972 | 2962 | ||
2973 | if (p->search_commit_root) { | 2963 | if (p->search_commit_root) { |
2974 | BUG_ON(time_seq); | 2964 | BUG_ON(time_seq); |
2975 | return btrfs_search_slot(NULL, root, key, p, 0, 0); | 2965 | return btrfs_search_slot(NULL, root, key, p, 0, 0); |
2976 | } | 2966 | } |
2977 | 2967 | ||
2978 | again: | 2968 | again: |
2979 | b = get_old_root(root, time_seq); | 2969 | b = get_old_root(root, time_seq); |
2980 | level = btrfs_header_level(b); | 2970 | level = btrfs_header_level(b); |
2981 | p->locks[level] = BTRFS_READ_LOCK; | 2971 | p->locks[level] = BTRFS_READ_LOCK; |
2982 | 2972 | ||
2983 | while (b) { | 2973 | while (b) { |
2984 | level = btrfs_header_level(b); | 2974 | level = btrfs_header_level(b); |
2985 | p->nodes[level] = b; | 2975 | p->nodes[level] = b; |
2986 | btrfs_clear_path_blocking(p, NULL, 0); | 2976 | btrfs_clear_path_blocking(p, NULL, 0); |
2987 | 2977 | ||
2988 | /* | 2978 | /* |
2989 | * we have a lock on b and as long as we aren't changing | 2979 | * we have a lock on b and as long as we aren't changing |
2990 | * the tree, there is no way to for the items in b to change. | 2980 | * the tree, there is no way to for the items in b to change. |
2991 | * It is safe to drop the lock on our parent before we | 2981 | * It is safe to drop the lock on our parent before we |
2992 | * go through the expensive btree search on b. | 2982 | * go through the expensive btree search on b. |
2993 | */ | 2983 | */ |
2994 | btrfs_unlock_up_safe(p, level + 1); | 2984 | btrfs_unlock_up_safe(p, level + 1); |
2995 | 2985 | ||
2996 | /* | 2986 | /* |
2997 | * Since we can unwind eb's we want to do a real search every | 2987 | * Since we can unwind eb's we want to do a real search every |
2998 | * time. | 2988 | * time. |
2999 | */ | 2989 | */ |
3000 | prev_cmp = -1; | 2990 | prev_cmp = -1; |
3001 | ret = key_search(b, key, level, &prev_cmp, &slot); | 2991 | ret = key_search(b, key, level, &prev_cmp, &slot); |
3002 | 2992 | ||
3003 | if (level != 0) { | 2993 | if (level != 0) { |
3004 | int dec = 0; | 2994 | int dec = 0; |
3005 | if (ret && slot > 0) { | 2995 | if (ret && slot > 0) { |
3006 | dec = 1; | 2996 | dec = 1; |
3007 | slot -= 1; | 2997 | slot -= 1; |
3008 | } | 2998 | } |
3009 | p->slots[level] = slot; | 2999 | p->slots[level] = slot; |
3010 | unlock_up(p, level, lowest_unlock, 0, NULL); | 3000 | unlock_up(p, level, lowest_unlock, 0, NULL); |
3011 | 3001 | ||
3012 | if (level == lowest_level) { | 3002 | if (level == lowest_level) { |
3013 | if (dec) | 3003 | if (dec) |
3014 | p->slots[level]++; | 3004 | p->slots[level]++; |
3015 | goto done; | 3005 | goto done; |
3016 | } | 3006 | } |
3017 | 3007 | ||
3018 | err = read_block_for_search(NULL, root, p, &b, level, | 3008 | err = read_block_for_search(NULL, root, p, &b, level, |
3019 | slot, key, time_seq); | 3009 | slot, key, time_seq); |
3020 | if (err == -EAGAIN) | 3010 | if (err == -EAGAIN) |
3021 | goto again; | 3011 | goto again; |
3022 | if (err) { | 3012 | if (err) { |
3023 | ret = err; | 3013 | ret = err; |
3024 | goto done; | 3014 | goto done; |
3025 | } | 3015 | } |
3026 | 3016 | ||
3027 | level = btrfs_header_level(b); | 3017 | level = btrfs_header_level(b); |
3028 | err = btrfs_try_tree_read_lock(b); | 3018 | err = btrfs_tree_read_lock_atomic(b); |
3029 | if (!err) { | 3019 | if (!err) { |
3030 | btrfs_set_path_blocking(p); | 3020 | btrfs_set_path_blocking(p); |
3031 | btrfs_tree_read_lock(b); | 3021 | btrfs_tree_read_lock(b); |
3032 | btrfs_clear_path_blocking(p, b, | 3022 | btrfs_clear_path_blocking(p, b, |
3033 | BTRFS_READ_LOCK); | 3023 | BTRFS_READ_LOCK); |
3034 | } | 3024 | } |
3035 | b = tree_mod_log_rewind(root->fs_info, p, b, time_seq); | 3025 | b = tree_mod_log_rewind(root->fs_info, p, b, time_seq); |
3036 | if (!b) { | 3026 | if (!b) { |
3037 | ret = -ENOMEM; | 3027 | ret = -ENOMEM; |
3038 | goto done; | 3028 | goto done; |
3039 | } | 3029 | } |
3040 | p->locks[level] = BTRFS_READ_LOCK; | 3030 | p->locks[level] = BTRFS_READ_LOCK; |
3041 | p->nodes[level] = b; | 3031 | p->nodes[level] = b; |
3042 | } else { | 3032 | } else { |
3043 | p->slots[level] = slot; | 3033 | p->slots[level] = slot; |
3044 | unlock_up(p, level, lowest_unlock, 0, NULL); | 3034 | unlock_up(p, level, lowest_unlock, 0, NULL); |
3045 | goto done; | 3035 | goto done; |
3046 | } | 3036 | } |
3047 | } | 3037 | } |
3048 | ret = 1; | 3038 | ret = 1; |
3049 | done: | 3039 | done: |
3050 | if (!p->leave_spinning) | 3040 | if (!p->leave_spinning) |
3051 | btrfs_set_path_blocking(p); | 3041 | btrfs_set_path_blocking(p); |
3052 | if (ret < 0) | 3042 | if (ret < 0) |
3053 | btrfs_release_path(p); | 3043 | btrfs_release_path(p); |
3054 | 3044 | ||
3055 | return ret; | 3045 | return ret; |
3056 | } | 3046 | } |
3057 | 3047 | ||
3058 | /* | 3048 | /* |
3059 | * helper to use instead of search slot if no exact match is needed but | 3049 | * helper to use instead of search slot if no exact match is needed but |
3060 | * instead the next or previous item should be returned. | 3050 | * instead the next or previous item should be returned. |
3061 | * When find_higher is true, the next higher item is returned, the next lower | 3051 | * When find_higher is true, the next higher item is returned, the next lower |
3062 | * otherwise. | 3052 | * otherwise. |
3063 | * When return_any and find_higher are both true, and no higher item is found, | 3053 | * When return_any and find_higher are both true, and no higher item is found, |
3064 | * return the next lower instead. | 3054 | * return the next lower instead. |
3065 | * When return_any is true and find_higher is false, and no lower item is found, | 3055 | * When return_any is true and find_higher is false, and no lower item is found, |
3066 | * return the next higher instead. | 3056 | * return the next higher instead. |
3067 | * It returns 0 if any item is found, 1 if none is found (tree empty), and | 3057 | * It returns 0 if any item is found, 1 if none is found (tree empty), and |
3068 | * < 0 on error | 3058 | * < 0 on error |
3069 | */ | 3059 | */ |
3070 | int btrfs_search_slot_for_read(struct btrfs_root *root, | 3060 | int btrfs_search_slot_for_read(struct btrfs_root *root, |
3071 | struct btrfs_key *key, struct btrfs_path *p, | 3061 | struct btrfs_key *key, struct btrfs_path *p, |
3072 | int find_higher, int return_any) | 3062 | int find_higher, int return_any) |
3073 | { | 3063 | { |
3074 | int ret; | 3064 | int ret; |
3075 | struct extent_buffer *leaf; | 3065 | struct extent_buffer *leaf; |
3076 | 3066 | ||
3077 | again: | 3067 | again: |
3078 | ret = btrfs_search_slot(NULL, root, key, p, 0, 0); | 3068 | ret = btrfs_search_slot(NULL, root, key, p, 0, 0); |
3079 | if (ret <= 0) | 3069 | if (ret <= 0) |
3080 | return ret; | 3070 | return ret; |
3081 | /* | 3071 | /* |
3082 | * a return value of 1 means the path is at the position where the | 3072 | * a return value of 1 means the path is at the position where the |
3083 | * item should be inserted. Normally this is the next bigger item, | 3073 | * item should be inserted. Normally this is the next bigger item, |
3084 | * but in case the previous item is the last in a leaf, path points | 3074 | * but in case the previous item is the last in a leaf, path points |
3085 | * to the first free slot in the previous leaf, i.e. at an invalid | 3075 | * to the first free slot in the previous leaf, i.e. at an invalid |
3086 | * item. | 3076 | * item. |
3087 | */ | 3077 | */ |
3088 | leaf = p->nodes[0]; | 3078 | leaf = p->nodes[0]; |
3089 | 3079 | ||
3090 | if (find_higher) { | 3080 | if (find_higher) { |
3091 | if (p->slots[0] >= btrfs_header_nritems(leaf)) { | 3081 | if (p->slots[0] >= btrfs_header_nritems(leaf)) { |
3092 | ret = btrfs_next_leaf(root, p); | 3082 | ret = btrfs_next_leaf(root, p); |
3093 | if (ret <= 0) | 3083 | if (ret <= 0) |
3094 | return ret; | 3084 | return ret; |
3095 | if (!return_any) | 3085 | if (!return_any) |
3096 | return 1; | 3086 | return 1; |
3097 | /* | 3087 | /* |
3098 | * no higher item found, return the next | 3088 | * no higher item found, return the next |
3099 | * lower instead | 3089 | * lower instead |
3100 | */ | 3090 | */ |
3101 | return_any = 0; | 3091 | return_any = 0; |
3102 | find_higher = 0; | 3092 | find_higher = 0; |
3103 | btrfs_release_path(p); | 3093 | btrfs_release_path(p); |
3104 | goto again; | 3094 | goto again; |
3105 | } | 3095 | } |
3106 | } else { | 3096 | } else { |
3107 | if (p->slots[0] == 0) { | 3097 | if (p->slots[0] == 0) { |
3108 | ret = btrfs_prev_leaf(root, p); | 3098 | ret = btrfs_prev_leaf(root, p); |
3109 | if (ret < 0) | 3099 | if (ret < 0) |
3110 | return ret; | 3100 | return ret; |
3111 | if (!ret) { | 3101 | if (!ret) { |
3112 | leaf = p->nodes[0]; | 3102 | leaf = p->nodes[0]; |
3113 | if (p->slots[0] == btrfs_header_nritems(leaf)) | 3103 | if (p->slots[0] == btrfs_header_nritems(leaf)) |
3114 | p->slots[0]--; | 3104 | p->slots[0]--; |
3115 | return 0; | 3105 | return 0; |
3116 | } | 3106 | } |
3117 | if (!return_any) | 3107 | if (!return_any) |
3118 | return 1; | 3108 | return 1; |
3119 | /* | 3109 | /* |
3120 | * no lower item found, return the next | 3110 | * no lower item found, return the next |
3121 | * higher instead | 3111 | * higher instead |
3122 | */ | 3112 | */ |
3123 | return_any = 0; | 3113 | return_any = 0; |
3124 | find_higher = 1; | 3114 | find_higher = 1; |
3125 | btrfs_release_path(p); | 3115 | btrfs_release_path(p); |
3126 | goto again; | 3116 | goto again; |
3127 | } else { | 3117 | } else { |
3128 | --p->slots[0]; | 3118 | --p->slots[0]; |
3129 | } | 3119 | } |
3130 | } | 3120 | } |
3131 | return 0; | 3121 | return 0; |
3132 | } | 3122 | } |
3133 | 3123 | ||
3134 | /* | 3124 | /* |
3135 | * adjust the pointers going up the tree, starting at level | 3125 | * adjust the pointers going up the tree, starting at level |
3136 | * making sure the right key of each node is points to 'key'. | 3126 | * making sure the right key of each node is points to 'key'. |
3137 | * This is used after shifting pointers to the left, so it stops | 3127 | * This is used after shifting pointers to the left, so it stops |
3138 | * fixing up pointers when a given leaf/node is not in slot 0 of the | 3128 | * fixing up pointers when a given leaf/node is not in slot 0 of the |
3139 | * higher levels | 3129 | * higher levels |
3140 | * | 3130 | * |
3141 | */ | 3131 | */ |
3142 | static void fixup_low_keys(struct btrfs_root *root, struct btrfs_path *path, | 3132 | static void fixup_low_keys(struct btrfs_root *root, struct btrfs_path *path, |
3143 | struct btrfs_disk_key *key, int level) | 3133 | struct btrfs_disk_key *key, int level) |
3144 | { | 3134 | { |
3145 | int i; | 3135 | int i; |
3146 | struct extent_buffer *t; | 3136 | struct extent_buffer *t; |
3147 | 3137 | ||
3148 | for (i = level; i < BTRFS_MAX_LEVEL; i++) { | 3138 | for (i = level; i < BTRFS_MAX_LEVEL; i++) { |
3149 | int tslot = path->slots[i]; | 3139 | int tslot = path->slots[i]; |
3150 | if (!path->nodes[i]) | 3140 | if (!path->nodes[i]) |
3151 | break; | 3141 | break; |
3152 | t = path->nodes[i]; | 3142 | t = path->nodes[i]; |
3153 | tree_mod_log_set_node_key(root->fs_info, t, tslot, 1); | 3143 | tree_mod_log_set_node_key(root->fs_info, t, tslot, 1); |
3154 | btrfs_set_node_key(t, key, tslot); | 3144 | btrfs_set_node_key(t, key, tslot); |
3155 | btrfs_mark_buffer_dirty(path->nodes[i]); | 3145 | btrfs_mark_buffer_dirty(path->nodes[i]); |
3156 | if (tslot != 0) | 3146 | if (tslot != 0) |
3157 | break; | 3147 | break; |
3158 | } | 3148 | } |
3159 | } | 3149 | } |
3160 | 3150 | ||
3161 | /* | 3151 | /* |
3162 | * update item key. | 3152 | * update item key. |
3163 | * | 3153 | * |
3164 | * This function isn't completely safe. It's the caller's responsibility | 3154 | * This function isn't completely safe. It's the caller's responsibility |
3165 | * that the new key won't break the order | 3155 | * that the new key won't break the order |
3166 | */ | 3156 | */ |
3167 | void btrfs_set_item_key_safe(struct btrfs_root *root, struct btrfs_path *path, | 3157 | void btrfs_set_item_key_safe(struct btrfs_root *root, struct btrfs_path *path, |
3168 | struct btrfs_key *new_key) | 3158 | struct btrfs_key *new_key) |
3169 | { | 3159 | { |
3170 | struct btrfs_disk_key disk_key; | 3160 | struct btrfs_disk_key disk_key; |
3171 | struct extent_buffer *eb; | 3161 | struct extent_buffer *eb; |
3172 | int slot; | 3162 | int slot; |
3173 | 3163 | ||
3174 | eb = path->nodes[0]; | 3164 | eb = path->nodes[0]; |
3175 | slot = path->slots[0]; | 3165 | slot = path->slots[0]; |
3176 | if (slot > 0) { | 3166 | if (slot > 0) { |
3177 | btrfs_item_key(eb, &disk_key, slot - 1); | 3167 | btrfs_item_key(eb, &disk_key, slot - 1); |
3178 | BUG_ON(comp_keys(&disk_key, new_key) >= 0); | 3168 | BUG_ON(comp_keys(&disk_key, new_key) >= 0); |
3179 | } | 3169 | } |
3180 | if (slot < btrfs_header_nritems(eb) - 1) { | 3170 | if (slot < btrfs_header_nritems(eb) - 1) { |
3181 | btrfs_item_key(eb, &disk_key, slot + 1); | 3171 | btrfs_item_key(eb, &disk_key, slot + 1); |
3182 | BUG_ON(comp_keys(&disk_key, new_key) <= 0); | 3172 | BUG_ON(comp_keys(&disk_key, new_key) <= 0); |
3183 | } | 3173 | } |
3184 | 3174 | ||
3185 | btrfs_cpu_key_to_disk(&disk_key, new_key); | 3175 | btrfs_cpu_key_to_disk(&disk_key, new_key); |
3186 | btrfs_set_item_key(eb, &disk_key, slot); | 3176 | btrfs_set_item_key(eb, &disk_key, slot); |
3187 | btrfs_mark_buffer_dirty(eb); | 3177 | btrfs_mark_buffer_dirty(eb); |
3188 | if (slot == 0) | 3178 | if (slot == 0) |
3189 | fixup_low_keys(root, path, &disk_key, 1); | 3179 | fixup_low_keys(root, path, &disk_key, 1); |
3190 | } | 3180 | } |
3191 | 3181 | ||
3192 | /* | 3182 | /* |
3193 | * try to push data from one node into the next node left in the | 3183 | * try to push data from one node into the next node left in the |
3194 | * tree. | 3184 | * tree. |
3195 | * | 3185 | * |
3196 | * returns 0 if some ptrs were pushed left, < 0 if there was some horrible | 3186 | * returns 0 if some ptrs were pushed left, < 0 if there was some horrible |
3197 | * error, and > 0 if there was no room in the left hand block. | 3187 | * error, and > 0 if there was no room in the left hand block. |
3198 | */ | 3188 | */ |
3199 | static int push_node_left(struct btrfs_trans_handle *trans, | 3189 | static int push_node_left(struct btrfs_trans_handle *trans, |
3200 | struct btrfs_root *root, struct extent_buffer *dst, | 3190 | struct btrfs_root *root, struct extent_buffer *dst, |
3201 | struct extent_buffer *src, int empty) | 3191 | struct extent_buffer *src, int empty) |
3202 | { | 3192 | { |
3203 | int push_items = 0; | 3193 | int push_items = 0; |
3204 | int src_nritems; | 3194 | int src_nritems; |
3205 | int dst_nritems; | 3195 | int dst_nritems; |
3206 | int ret = 0; | 3196 | int ret = 0; |
3207 | 3197 | ||
3208 | src_nritems = btrfs_header_nritems(src); | 3198 | src_nritems = btrfs_header_nritems(src); |
3209 | dst_nritems = btrfs_header_nritems(dst); | 3199 | dst_nritems = btrfs_header_nritems(dst); |
3210 | push_items = BTRFS_NODEPTRS_PER_BLOCK(root) - dst_nritems; | 3200 | push_items = BTRFS_NODEPTRS_PER_BLOCK(root) - dst_nritems; |
3211 | WARN_ON(btrfs_header_generation(src) != trans->transid); | 3201 | WARN_ON(btrfs_header_generation(src) != trans->transid); |
3212 | WARN_ON(btrfs_header_generation(dst) != trans->transid); | 3202 | WARN_ON(btrfs_header_generation(dst) != trans->transid); |
3213 | 3203 | ||
3214 | if (!empty && src_nritems <= 8) | 3204 | if (!empty && src_nritems <= 8) |
3215 | return 1; | 3205 | return 1; |
3216 | 3206 | ||
3217 | if (push_items <= 0) | 3207 | if (push_items <= 0) |
3218 | return 1; | 3208 | return 1; |
3219 | 3209 | ||
3220 | if (empty) { | 3210 | if (empty) { |
3221 | push_items = min(src_nritems, push_items); | 3211 | push_items = min(src_nritems, push_items); |
3222 | if (push_items < src_nritems) { | 3212 | if (push_items < src_nritems) { |
3223 | /* leave at least 8 pointers in the node if | 3213 | /* leave at least 8 pointers in the node if |
3224 | * we aren't going to empty it | 3214 | * we aren't going to empty it |
3225 | */ | 3215 | */ |
3226 | if (src_nritems - push_items < 8) { | 3216 | if (src_nritems - push_items < 8) { |
3227 | if (push_items <= 8) | 3217 | if (push_items <= 8) |
3228 | return 1; | 3218 | return 1; |
3229 | push_items -= 8; | 3219 | push_items -= 8; |
3230 | } | 3220 | } |
3231 | } | 3221 | } |
3232 | } else | 3222 | } else |
3233 | push_items = min(src_nritems - 8, push_items); | 3223 | push_items = min(src_nritems - 8, push_items); |
3234 | 3224 | ||
3235 | ret = tree_mod_log_eb_copy(root->fs_info, dst, src, dst_nritems, 0, | 3225 | ret = tree_mod_log_eb_copy(root->fs_info, dst, src, dst_nritems, 0, |
3236 | push_items); | 3226 | push_items); |
3237 | if (ret) { | 3227 | if (ret) { |
3238 | btrfs_abort_transaction(trans, root, ret); | 3228 | btrfs_abort_transaction(trans, root, ret); |
3239 | return ret; | 3229 | return ret; |
3240 | } | 3230 | } |
3241 | copy_extent_buffer(dst, src, | 3231 | copy_extent_buffer(dst, src, |
3242 | btrfs_node_key_ptr_offset(dst_nritems), | 3232 | btrfs_node_key_ptr_offset(dst_nritems), |
3243 | btrfs_node_key_ptr_offset(0), | 3233 | btrfs_node_key_ptr_offset(0), |
3244 | push_items * sizeof(struct btrfs_key_ptr)); | 3234 | push_items * sizeof(struct btrfs_key_ptr)); |
3245 | 3235 | ||
3246 | if (push_items < src_nritems) { | 3236 | if (push_items < src_nritems) { |
3247 | /* | 3237 | /* |
3248 | * don't call tree_mod_log_eb_move here, key removal was already | 3238 | * don't call tree_mod_log_eb_move here, key removal was already |
3249 | * fully logged by tree_mod_log_eb_copy above. | 3239 | * fully logged by tree_mod_log_eb_copy above. |
3250 | */ | 3240 | */ |
3251 | memmove_extent_buffer(src, btrfs_node_key_ptr_offset(0), | 3241 | memmove_extent_buffer(src, btrfs_node_key_ptr_offset(0), |
3252 | btrfs_node_key_ptr_offset(push_items), | 3242 | btrfs_node_key_ptr_offset(push_items), |
3253 | (src_nritems - push_items) * | 3243 | (src_nritems - push_items) * |
3254 | sizeof(struct btrfs_key_ptr)); | 3244 | sizeof(struct btrfs_key_ptr)); |
3255 | } | 3245 | } |
3256 | btrfs_set_header_nritems(src, src_nritems - push_items); | 3246 | btrfs_set_header_nritems(src, src_nritems - push_items); |
3257 | btrfs_set_header_nritems(dst, dst_nritems + push_items); | 3247 | btrfs_set_header_nritems(dst, dst_nritems + push_items); |
3258 | btrfs_mark_buffer_dirty(src); | 3248 | btrfs_mark_buffer_dirty(src); |
3259 | btrfs_mark_buffer_dirty(dst); | 3249 | btrfs_mark_buffer_dirty(dst); |
3260 | 3250 | ||
3261 | return ret; | 3251 | return ret; |
3262 | } | 3252 | } |
3263 | 3253 | ||
3264 | /* | 3254 | /* |
3265 | * try to push data from one node into the next node right in the | 3255 | * try to push data from one node into the next node right in the |
3266 | * tree. | 3256 | * tree. |
3267 | * | 3257 | * |
3268 | * returns 0 if some ptrs were pushed, < 0 if there was some horrible | 3258 | * returns 0 if some ptrs were pushed, < 0 if there was some horrible |
3269 | * error, and > 0 if there was no room in the right hand block. | 3259 | * error, and > 0 if there was no room in the right hand block. |
3270 | * | 3260 | * |
3271 | * this will only push up to 1/2 the contents of the left node over | 3261 | * this will only push up to 1/2 the contents of the left node over |
3272 | */ | 3262 | */ |
3273 | static int balance_node_right(struct btrfs_trans_handle *trans, | 3263 | static int balance_node_right(struct btrfs_trans_handle *trans, |
3274 | struct btrfs_root *root, | 3264 | struct btrfs_root *root, |
3275 | struct extent_buffer *dst, | 3265 | struct extent_buffer *dst, |
3276 | struct extent_buffer *src) | 3266 | struct extent_buffer *src) |
3277 | { | 3267 | { |
3278 | int push_items = 0; | 3268 | int push_items = 0; |
3279 | int max_push; | 3269 | int max_push; |
3280 | int src_nritems; | 3270 | int src_nritems; |
3281 | int dst_nritems; | 3271 | int dst_nritems; |
3282 | int ret = 0; | 3272 | int ret = 0; |
3283 | 3273 | ||
3284 | WARN_ON(btrfs_header_generation(src) != trans->transid); | 3274 | WARN_ON(btrfs_header_generation(src) != trans->transid); |
3285 | WARN_ON(btrfs_header_generation(dst) != trans->transid); | 3275 | WARN_ON(btrfs_header_generation(dst) != trans->transid); |
3286 | 3276 | ||
3287 | src_nritems = btrfs_header_nritems(src); | 3277 | src_nritems = btrfs_header_nritems(src); |
3288 | dst_nritems = btrfs_header_nritems(dst); | 3278 | dst_nritems = btrfs_header_nritems(dst); |
3289 | push_items = BTRFS_NODEPTRS_PER_BLOCK(root) - dst_nritems; | 3279 | push_items = BTRFS_NODEPTRS_PER_BLOCK(root) - dst_nritems; |
3290 | if (push_items <= 0) | 3280 | if (push_items <= 0) |
3291 | return 1; | 3281 | return 1; |
3292 | 3282 | ||
3293 | if (src_nritems < 4) | 3283 | if (src_nritems < 4) |
3294 | return 1; | 3284 | return 1; |
3295 | 3285 | ||
3296 | max_push = src_nritems / 2 + 1; | 3286 | max_push = src_nritems / 2 + 1; |
3297 | /* don't try to empty the node */ | 3287 | /* don't try to empty the node */ |
3298 | if (max_push >= src_nritems) | 3288 | if (max_push >= src_nritems) |
3299 | return 1; | 3289 | return 1; |
3300 | 3290 | ||
3301 | if (max_push < push_items) | 3291 | if (max_push < push_items) |
3302 | push_items = max_push; | 3292 | push_items = max_push; |
3303 | 3293 | ||
3304 | tree_mod_log_eb_move(root->fs_info, dst, push_items, 0, dst_nritems); | 3294 | tree_mod_log_eb_move(root->fs_info, dst, push_items, 0, dst_nritems); |
3305 | memmove_extent_buffer(dst, btrfs_node_key_ptr_offset(push_items), | 3295 | memmove_extent_buffer(dst, btrfs_node_key_ptr_offset(push_items), |
3306 | btrfs_node_key_ptr_offset(0), | 3296 | btrfs_node_key_ptr_offset(0), |
3307 | (dst_nritems) * | 3297 | (dst_nritems) * |
3308 | sizeof(struct btrfs_key_ptr)); | 3298 | sizeof(struct btrfs_key_ptr)); |
3309 | 3299 | ||
3310 | ret = tree_mod_log_eb_copy(root->fs_info, dst, src, 0, | 3300 | ret = tree_mod_log_eb_copy(root->fs_info, dst, src, 0, |
3311 | src_nritems - push_items, push_items); | 3301 | src_nritems - push_items, push_items); |
3312 | if (ret) { | 3302 | if (ret) { |
3313 | btrfs_abort_transaction(trans, root, ret); | 3303 | btrfs_abort_transaction(trans, root, ret); |
3314 | return ret; | 3304 | return ret; |
3315 | } | 3305 | } |
3316 | copy_extent_buffer(dst, src, | 3306 | copy_extent_buffer(dst, src, |
3317 | btrfs_node_key_ptr_offset(0), | 3307 | btrfs_node_key_ptr_offset(0), |
3318 | btrfs_node_key_ptr_offset(src_nritems - push_items), | 3308 | btrfs_node_key_ptr_offset(src_nritems - push_items), |
3319 | push_items * sizeof(struct btrfs_key_ptr)); | 3309 | push_items * sizeof(struct btrfs_key_ptr)); |
3320 | 3310 | ||
3321 | btrfs_set_header_nritems(src, src_nritems - push_items); | 3311 | btrfs_set_header_nritems(src, src_nritems - push_items); |
3322 | btrfs_set_header_nritems(dst, dst_nritems + push_items); | 3312 | btrfs_set_header_nritems(dst, dst_nritems + push_items); |
3323 | 3313 | ||
3324 | btrfs_mark_buffer_dirty(src); | 3314 | btrfs_mark_buffer_dirty(src); |
3325 | btrfs_mark_buffer_dirty(dst); | 3315 | btrfs_mark_buffer_dirty(dst); |
3326 | 3316 | ||
3327 | return ret; | 3317 | return ret; |
3328 | } | 3318 | } |
3329 | 3319 | ||
3330 | /* | 3320 | /* |
3331 | * helper function to insert a new root level in the tree. | 3321 | * helper function to insert a new root level in the tree. |
3332 | * A new node is allocated, and a single item is inserted to | 3322 | * A new node is allocated, and a single item is inserted to |
3333 | * point to the existing root | 3323 | * point to the existing root |
3334 | * | 3324 | * |
3335 | * returns zero on success or < 0 on failure. | 3325 | * returns zero on success or < 0 on failure. |
3336 | */ | 3326 | */ |
3337 | static noinline int insert_new_root(struct btrfs_trans_handle *trans, | 3327 | static noinline int insert_new_root(struct btrfs_trans_handle *trans, |
3338 | struct btrfs_root *root, | 3328 | struct btrfs_root *root, |
3339 | struct btrfs_path *path, int level) | 3329 | struct btrfs_path *path, int level) |
3340 | { | 3330 | { |
3341 | u64 lower_gen; | 3331 | u64 lower_gen; |
3342 | struct extent_buffer *lower; | 3332 | struct extent_buffer *lower; |
3343 | struct extent_buffer *c; | 3333 | struct extent_buffer *c; |
3344 | struct extent_buffer *old; | 3334 | struct extent_buffer *old; |
3345 | struct btrfs_disk_key lower_key; | 3335 | struct btrfs_disk_key lower_key; |
3346 | 3336 | ||
3347 | BUG_ON(path->nodes[level]); | 3337 | BUG_ON(path->nodes[level]); |
3348 | BUG_ON(path->nodes[level-1] != root->node); | 3338 | BUG_ON(path->nodes[level-1] != root->node); |
3349 | 3339 | ||
3350 | lower = path->nodes[level-1]; | 3340 | lower = path->nodes[level-1]; |
3351 | if (level == 1) | 3341 | if (level == 1) |
3352 | btrfs_item_key(lower, &lower_key, 0); | 3342 | btrfs_item_key(lower, &lower_key, 0); |
3353 | else | 3343 | else |
3354 | btrfs_node_key(lower, &lower_key, 0); | 3344 | btrfs_node_key(lower, &lower_key, 0); |
3355 | 3345 | ||
3356 | c = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid, | 3346 | c = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid, |
3357 | &lower_key, level, root->node->start, 0); | 3347 | &lower_key, level, root->node->start, 0); |
3358 | if (IS_ERR(c)) | 3348 | if (IS_ERR(c)) |
3359 | return PTR_ERR(c); | 3349 | return PTR_ERR(c); |
3360 | 3350 | ||
3361 | root_add_used(root, root->nodesize); | 3351 | root_add_used(root, root->nodesize); |
3362 | 3352 | ||
3363 | memset_extent_buffer(c, 0, 0, sizeof(struct btrfs_header)); | 3353 | memset_extent_buffer(c, 0, 0, sizeof(struct btrfs_header)); |
3364 | btrfs_set_header_nritems(c, 1); | 3354 | btrfs_set_header_nritems(c, 1); |
3365 | btrfs_set_header_level(c, level); | 3355 | btrfs_set_header_level(c, level); |
3366 | btrfs_set_header_bytenr(c, c->start); | 3356 | btrfs_set_header_bytenr(c, c->start); |
3367 | btrfs_set_header_generation(c, trans->transid); | 3357 | btrfs_set_header_generation(c, trans->transid); |
3368 | btrfs_set_header_backref_rev(c, BTRFS_MIXED_BACKREF_REV); | 3358 | btrfs_set_header_backref_rev(c, BTRFS_MIXED_BACKREF_REV); |
3369 | btrfs_set_header_owner(c, root->root_key.objectid); | 3359 | btrfs_set_header_owner(c, root->root_key.objectid); |
3370 | 3360 | ||
3371 | write_extent_buffer(c, root->fs_info->fsid, btrfs_header_fsid(), | 3361 | write_extent_buffer(c, root->fs_info->fsid, btrfs_header_fsid(), |
3372 | BTRFS_FSID_SIZE); | 3362 | BTRFS_FSID_SIZE); |
3373 | 3363 | ||
3374 | write_extent_buffer(c, root->fs_info->chunk_tree_uuid, | 3364 | write_extent_buffer(c, root->fs_info->chunk_tree_uuid, |
3375 | btrfs_header_chunk_tree_uuid(c), BTRFS_UUID_SIZE); | 3365 | btrfs_header_chunk_tree_uuid(c), BTRFS_UUID_SIZE); |
3376 | 3366 | ||
3377 | btrfs_set_node_key(c, &lower_key, 0); | 3367 | btrfs_set_node_key(c, &lower_key, 0); |
3378 | btrfs_set_node_blockptr(c, 0, lower->start); | 3368 | btrfs_set_node_blockptr(c, 0, lower->start); |
3379 | lower_gen = btrfs_header_generation(lower); | 3369 | lower_gen = btrfs_header_generation(lower); |
3380 | WARN_ON(lower_gen != trans->transid); | 3370 | WARN_ON(lower_gen != trans->transid); |
3381 | 3371 | ||
3382 | btrfs_set_node_ptr_generation(c, 0, lower_gen); | 3372 | btrfs_set_node_ptr_generation(c, 0, lower_gen); |
3383 | 3373 | ||
3384 | btrfs_mark_buffer_dirty(c); | 3374 | btrfs_mark_buffer_dirty(c); |
3385 | 3375 | ||
3386 | old = root->node; | 3376 | old = root->node; |
3387 | tree_mod_log_set_root_pointer(root, c, 0); | 3377 | tree_mod_log_set_root_pointer(root, c, 0); |
3388 | rcu_assign_pointer(root->node, c); | 3378 | rcu_assign_pointer(root->node, c); |
3389 | 3379 | ||
3390 | /* the super has an extra ref to root->node */ | 3380 | /* the super has an extra ref to root->node */ |
3391 | free_extent_buffer(old); | 3381 | free_extent_buffer(old); |
3392 | 3382 | ||
3393 | add_root_to_dirty_list(root); | 3383 | add_root_to_dirty_list(root); |
3394 | extent_buffer_get(c); | 3384 | extent_buffer_get(c); |
3395 | path->nodes[level] = c; | 3385 | path->nodes[level] = c; |
3396 | path->locks[level] = BTRFS_WRITE_LOCK; | 3386 | path->locks[level] = BTRFS_WRITE_LOCK; |
3397 | path->slots[level] = 0; | 3387 | path->slots[level] = 0; |
3398 | return 0; | 3388 | return 0; |
3399 | } | 3389 | } |
3400 | 3390 | ||
3401 | /* | 3391 | /* |
3402 | * worker function to insert a single pointer in a node. | 3392 | * worker function to insert a single pointer in a node. |
3403 | * the node should have enough room for the pointer already | 3393 | * the node should have enough room for the pointer already |
3404 | * | 3394 | * |
3405 | * slot and level indicate where you want the key to go, and | 3395 | * slot and level indicate where you want the key to go, and |
3406 | * blocknr is the block the key points to. | 3396 | * blocknr is the block the key points to. |
3407 | */ | 3397 | */ |
3408 | static void insert_ptr(struct btrfs_trans_handle *trans, | 3398 | static void insert_ptr(struct btrfs_trans_handle *trans, |
3409 | struct btrfs_root *root, struct btrfs_path *path, | 3399 | struct btrfs_root *root, struct btrfs_path *path, |
3410 | struct btrfs_disk_key *key, u64 bytenr, | 3400 | struct btrfs_disk_key *key, u64 bytenr, |
3411 | int slot, int level) | 3401 | int slot, int level) |
3412 | { | 3402 | { |
3413 | struct extent_buffer *lower; | 3403 | struct extent_buffer *lower; |
3414 | int nritems; | 3404 | int nritems; |
3415 | int ret; | 3405 | int ret; |
3416 | 3406 | ||
3417 | BUG_ON(!path->nodes[level]); | 3407 | BUG_ON(!path->nodes[level]); |
3418 | btrfs_assert_tree_locked(path->nodes[level]); | 3408 | btrfs_assert_tree_locked(path->nodes[level]); |
3419 | lower = path->nodes[level]; | 3409 | lower = path->nodes[level]; |
3420 | nritems = btrfs_header_nritems(lower); | 3410 | nritems = btrfs_header_nritems(lower); |
3421 | BUG_ON(slot > nritems); | 3411 | BUG_ON(slot > nritems); |
3422 | BUG_ON(nritems == BTRFS_NODEPTRS_PER_BLOCK(root)); | 3412 | BUG_ON(nritems == BTRFS_NODEPTRS_PER_BLOCK(root)); |
3423 | if (slot != nritems) { | 3413 | if (slot != nritems) { |
3424 | if (level) | 3414 | if (level) |
3425 | tree_mod_log_eb_move(root->fs_info, lower, slot + 1, | 3415 | tree_mod_log_eb_move(root->fs_info, lower, slot + 1, |
3426 | slot, nritems - slot); | 3416 | slot, nritems - slot); |
3427 | memmove_extent_buffer(lower, | 3417 | memmove_extent_buffer(lower, |
3428 | btrfs_node_key_ptr_offset(slot + 1), | 3418 | btrfs_node_key_ptr_offset(slot + 1), |
3429 | btrfs_node_key_ptr_offset(slot), | 3419 | btrfs_node_key_ptr_offset(slot), |
3430 | (nritems - slot) * sizeof(struct btrfs_key_ptr)); | 3420 | (nritems - slot) * sizeof(struct btrfs_key_ptr)); |
3431 | } | 3421 | } |
3432 | if (level) { | 3422 | if (level) { |
3433 | ret = tree_mod_log_insert_key(root->fs_info, lower, slot, | 3423 | ret = tree_mod_log_insert_key(root->fs_info, lower, slot, |
3434 | MOD_LOG_KEY_ADD, GFP_NOFS); | 3424 | MOD_LOG_KEY_ADD, GFP_NOFS); |
3435 | BUG_ON(ret < 0); | 3425 | BUG_ON(ret < 0); |
3436 | } | 3426 | } |
3437 | btrfs_set_node_key(lower, key, slot); | 3427 | btrfs_set_node_key(lower, key, slot); |
3438 | btrfs_set_node_blockptr(lower, slot, bytenr); | 3428 | btrfs_set_node_blockptr(lower, slot, bytenr); |
3439 | WARN_ON(trans->transid == 0); | 3429 | WARN_ON(trans->transid == 0); |
3440 | btrfs_set_node_ptr_generation(lower, slot, trans->transid); | 3430 | btrfs_set_node_ptr_generation(lower, slot, trans->transid); |
3441 | btrfs_set_header_nritems(lower, nritems + 1); | 3431 | btrfs_set_header_nritems(lower, nritems + 1); |
3442 | btrfs_mark_buffer_dirty(lower); | 3432 | btrfs_mark_buffer_dirty(lower); |
3443 | } | 3433 | } |
3444 | 3434 | ||
3445 | /* | 3435 | /* |
3446 | * split the node at the specified level in path in two. | 3436 | * split the node at the specified level in path in two. |
3447 | * The path is corrected to point to the appropriate node after the split | 3437 | * The path is corrected to point to the appropriate node after the split |
3448 | * | 3438 | * |
3449 | * Before splitting this tries to make some room in the node by pushing | 3439 | * Before splitting this tries to make some room in the node by pushing |
3450 | * left and right, if either one works, it returns right away. | 3440 | * left and right, if either one works, it returns right away. |
3451 | * | 3441 | * |
3452 | * returns 0 on success and < 0 on failure | 3442 | * returns 0 on success and < 0 on failure |
3453 | */ | 3443 | */ |
3454 | static noinline int split_node(struct btrfs_trans_handle *trans, | 3444 | static noinline int split_node(struct btrfs_trans_handle *trans, |
3455 | struct btrfs_root *root, | 3445 | struct btrfs_root *root, |
3456 | struct btrfs_path *path, int level) | 3446 | struct btrfs_path *path, int level) |
3457 | { | 3447 | { |
3458 | struct extent_buffer *c; | 3448 | struct extent_buffer *c; |
3459 | struct extent_buffer *split; | 3449 | struct extent_buffer *split; |
3460 | struct btrfs_disk_key disk_key; | 3450 | struct btrfs_disk_key disk_key; |
3461 | int mid; | 3451 | int mid; |
3462 | int ret; | 3452 | int ret; |
3463 | u32 c_nritems; | 3453 | u32 c_nritems; |
3464 | 3454 | ||
3465 | c = path->nodes[level]; | 3455 | c = path->nodes[level]; |
3466 | WARN_ON(btrfs_header_generation(c) != trans->transid); | 3456 | WARN_ON(btrfs_header_generation(c) != trans->transid); |
3467 | if (c == root->node) { | 3457 | if (c == root->node) { |
3468 | /* | 3458 | /* |
3469 | * trying to split the root, lets make a new one | 3459 | * trying to split the root, lets make a new one |
3470 | * | 3460 | * |
3471 | * tree mod log: We don't log_removal old root in | 3461 | * tree mod log: We don't log_removal old root in |
3472 | * insert_new_root, because that root buffer will be kept as a | 3462 | * insert_new_root, because that root buffer will be kept as a |
3473 | * normal node. We are going to log removal of half of the | 3463 | * normal node. We are going to log removal of half of the |
3474 | * elements below with tree_mod_log_eb_copy. We're holding a | 3464 | * elements below with tree_mod_log_eb_copy. We're holding a |
3475 | * tree lock on the buffer, which is why we cannot race with | 3465 | * tree lock on the buffer, which is why we cannot race with |
3476 | * other tree_mod_log users. | 3466 | * other tree_mod_log users. |
3477 | */ | 3467 | */ |
3478 | ret = insert_new_root(trans, root, path, level + 1); | 3468 | ret = insert_new_root(trans, root, path, level + 1); |
3479 | if (ret) | 3469 | if (ret) |
3480 | return ret; | 3470 | return ret; |
3481 | } else { | 3471 | } else { |
3482 | ret = push_nodes_for_insert(trans, root, path, level); | 3472 | ret = push_nodes_for_insert(trans, root, path, level); |
3483 | c = path->nodes[level]; | 3473 | c = path->nodes[level]; |
3484 | if (!ret && btrfs_header_nritems(c) < | 3474 | if (!ret && btrfs_header_nritems(c) < |
3485 | BTRFS_NODEPTRS_PER_BLOCK(root) - 3) | 3475 | BTRFS_NODEPTRS_PER_BLOCK(root) - 3) |
3486 | return 0; | 3476 | return 0; |
3487 | if (ret < 0) | 3477 | if (ret < 0) |
3488 | return ret; | 3478 | return ret; |
3489 | } | 3479 | } |
3490 | 3480 | ||
3491 | c_nritems = btrfs_header_nritems(c); | 3481 | c_nritems = btrfs_header_nritems(c); |
3492 | mid = (c_nritems + 1) / 2; | 3482 | mid = (c_nritems + 1) / 2; |
3493 | btrfs_node_key(c, &disk_key, mid); | 3483 | btrfs_node_key(c, &disk_key, mid); |
3494 | 3484 | ||
3495 | split = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid, | 3485 | split = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid, |
3496 | &disk_key, level, c->start, 0); | 3486 | &disk_key, level, c->start, 0); |
3497 | if (IS_ERR(split)) | 3487 | if (IS_ERR(split)) |
3498 | return PTR_ERR(split); | 3488 | return PTR_ERR(split); |
3499 | 3489 | ||
3500 | root_add_used(root, root->nodesize); | 3490 | root_add_used(root, root->nodesize); |
3501 | 3491 | ||
3502 | memset_extent_buffer(split, 0, 0, sizeof(struct btrfs_header)); | 3492 | memset_extent_buffer(split, 0, 0, sizeof(struct btrfs_header)); |
3503 | btrfs_set_header_level(split, btrfs_header_level(c)); | 3493 | btrfs_set_header_level(split, btrfs_header_level(c)); |
3504 | btrfs_set_header_bytenr(split, split->start); | 3494 | btrfs_set_header_bytenr(split, split->start); |
3505 | btrfs_set_header_generation(split, trans->transid); | 3495 | btrfs_set_header_generation(split, trans->transid); |
3506 | btrfs_set_header_backref_rev(split, BTRFS_MIXED_BACKREF_REV); | 3496 | btrfs_set_header_backref_rev(split, BTRFS_MIXED_BACKREF_REV); |
3507 | btrfs_set_header_owner(split, root->root_key.objectid); | 3497 | btrfs_set_header_owner(split, root->root_key.objectid); |
3508 | write_extent_buffer(split, root->fs_info->fsid, | 3498 | write_extent_buffer(split, root->fs_info->fsid, |
3509 | btrfs_header_fsid(), BTRFS_FSID_SIZE); | 3499 | btrfs_header_fsid(), BTRFS_FSID_SIZE); |
3510 | write_extent_buffer(split, root->fs_info->chunk_tree_uuid, | 3500 | write_extent_buffer(split, root->fs_info->chunk_tree_uuid, |
3511 | btrfs_header_chunk_tree_uuid(split), | 3501 | btrfs_header_chunk_tree_uuid(split), |
3512 | BTRFS_UUID_SIZE); | 3502 | BTRFS_UUID_SIZE); |
3513 | 3503 | ||
3514 | ret = tree_mod_log_eb_copy(root->fs_info, split, c, 0, | 3504 | ret = tree_mod_log_eb_copy(root->fs_info, split, c, 0, |
3515 | mid, c_nritems - mid); | 3505 | mid, c_nritems - mid); |
3516 | if (ret) { | 3506 | if (ret) { |
3517 | btrfs_abort_transaction(trans, root, ret); | 3507 | btrfs_abort_transaction(trans, root, ret); |
3518 | return ret; | 3508 | return ret; |
3519 | } | 3509 | } |
3520 | copy_extent_buffer(split, c, | 3510 | copy_extent_buffer(split, c, |
3521 | btrfs_node_key_ptr_offset(0), | 3511 | btrfs_node_key_ptr_offset(0), |
3522 | btrfs_node_key_ptr_offset(mid), | 3512 | btrfs_node_key_ptr_offset(mid), |
3523 | (c_nritems - mid) * sizeof(struct btrfs_key_ptr)); | 3513 | (c_nritems - mid) * sizeof(struct btrfs_key_ptr)); |
3524 | btrfs_set_header_nritems(split, c_nritems - mid); | 3514 | btrfs_set_header_nritems(split, c_nritems - mid); |
3525 | btrfs_set_header_nritems(c, mid); | 3515 | btrfs_set_header_nritems(c, mid); |
3526 | ret = 0; | 3516 | ret = 0; |
3527 | 3517 | ||
3528 | btrfs_mark_buffer_dirty(c); | 3518 | btrfs_mark_buffer_dirty(c); |
3529 | btrfs_mark_buffer_dirty(split); | 3519 | btrfs_mark_buffer_dirty(split); |
3530 | 3520 | ||
3531 | insert_ptr(trans, root, path, &disk_key, split->start, | 3521 | insert_ptr(trans, root, path, &disk_key, split->start, |
3532 | path->slots[level + 1] + 1, level + 1); | 3522 | path->slots[level + 1] + 1, level + 1); |
3533 | 3523 | ||
3534 | if (path->slots[level] >= mid) { | 3524 | if (path->slots[level] >= mid) { |
3535 | path->slots[level] -= mid; | 3525 | path->slots[level] -= mid; |
3536 | btrfs_tree_unlock(c); | 3526 | btrfs_tree_unlock(c); |
3537 | free_extent_buffer(c); | 3527 | free_extent_buffer(c); |
3538 | path->nodes[level] = split; | 3528 | path->nodes[level] = split; |
3539 | path->slots[level + 1] += 1; | 3529 | path->slots[level + 1] += 1; |
3540 | } else { | 3530 | } else { |
3541 | btrfs_tree_unlock(split); | 3531 | btrfs_tree_unlock(split); |
3542 | free_extent_buffer(split); | 3532 | free_extent_buffer(split); |
3543 | } | 3533 | } |
3544 | return ret; | 3534 | return ret; |
3545 | } | 3535 | } |
3546 | 3536 | ||
3547 | /* | 3537 | /* |
3548 | * how many bytes are required to store the items in a leaf. start | 3538 | * how many bytes are required to store the items in a leaf. start |
3549 | * and nr indicate which items in the leaf to check. This totals up the | 3539 | * and nr indicate which items in the leaf to check. This totals up the |
3550 | * space used both by the item structs and the item data | 3540 | * space used both by the item structs and the item data |
3551 | */ | 3541 | */ |
3552 | static int leaf_space_used(struct extent_buffer *l, int start, int nr) | 3542 | static int leaf_space_used(struct extent_buffer *l, int start, int nr) |
3553 | { | 3543 | { |
3554 | struct btrfs_item *start_item; | 3544 | struct btrfs_item *start_item; |
3555 | struct btrfs_item *end_item; | 3545 | struct btrfs_item *end_item; |
3556 | struct btrfs_map_token token; | 3546 | struct btrfs_map_token token; |
3557 | int data_len; | 3547 | int data_len; |
3558 | int nritems = btrfs_header_nritems(l); | 3548 | int nritems = btrfs_header_nritems(l); |
3559 | int end = min(nritems, start + nr) - 1; | 3549 | int end = min(nritems, start + nr) - 1; |
3560 | 3550 | ||
3561 | if (!nr) | 3551 | if (!nr) |
3562 | return 0; | 3552 | return 0; |
3563 | btrfs_init_map_token(&token); | 3553 | btrfs_init_map_token(&token); |
3564 | start_item = btrfs_item_nr(start); | 3554 | start_item = btrfs_item_nr(start); |
3565 | end_item = btrfs_item_nr(end); | 3555 | end_item = btrfs_item_nr(end); |
3566 | data_len = btrfs_token_item_offset(l, start_item, &token) + | 3556 | data_len = btrfs_token_item_offset(l, start_item, &token) + |
3567 | btrfs_token_item_size(l, start_item, &token); | 3557 | btrfs_token_item_size(l, start_item, &token); |
3568 | data_len = data_len - btrfs_token_item_offset(l, end_item, &token); | 3558 | data_len = data_len - btrfs_token_item_offset(l, end_item, &token); |
3569 | data_len += sizeof(struct btrfs_item) * nr; | 3559 | data_len += sizeof(struct btrfs_item) * nr; |
3570 | WARN_ON(data_len < 0); | 3560 | WARN_ON(data_len < 0); |
3571 | return data_len; | 3561 | return data_len; |
3572 | } | 3562 | } |
3573 | 3563 | ||
3574 | /* | 3564 | /* |
3575 | * The space between the end of the leaf items and | 3565 | * The space between the end of the leaf items and |
3576 | * the start of the leaf data. IOW, how much room | 3566 | * the start of the leaf data. IOW, how much room |
3577 | * the leaf has left for both items and data | 3567 | * the leaf has left for both items and data |
3578 | */ | 3568 | */ |
3579 | noinline int btrfs_leaf_free_space(struct btrfs_root *root, | 3569 | noinline int btrfs_leaf_free_space(struct btrfs_root *root, |
3580 | struct extent_buffer *leaf) | 3570 | struct extent_buffer *leaf) |
3581 | { | 3571 | { |
3582 | int nritems = btrfs_header_nritems(leaf); | 3572 | int nritems = btrfs_header_nritems(leaf); |
3583 | int ret; | 3573 | int ret; |
3584 | ret = BTRFS_LEAF_DATA_SIZE(root) - leaf_space_used(leaf, 0, nritems); | 3574 | ret = BTRFS_LEAF_DATA_SIZE(root) - leaf_space_used(leaf, 0, nritems); |
3585 | if (ret < 0) { | 3575 | if (ret < 0) { |
3586 | btrfs_crit(root->fs_info, | 3576 | btrfs_crit(root->fs_info, |
3587 | "leaf free space ret %d, leaf data size %lu, used %d nritems %d", | 3577 | "leaf free space ret %d, leaf data size %lu, used %d nritems %d", |
3588 | ret, (unsigned long) BTRFS_LEAF_DATA_SIZE(root), | 3578 | ret, (unsigned long) BTRFS_LEAF_DATA_SIZE(root), |
3589 | leaf_space_used(leaf, 0, nritems), nritems); | 3579 | leaf_space_used(leaf, 0, nritems), nritems); |
3590 | } | 3580 | } |
3591 | return ret; | 3581 | return ret; |
3592 | } | 3582 | } |
3593 | 3583 | ||
3594 | /* | 3584 | /* |
3595 | * min slot controls the lowest index we're willing to push to the | 3585 | * min slot controls the lowest index we're willing to push to the |
3596 | * right. We'll push up to and including min_slot, but no lower | 3586 | * right. We'll push up to and including min_slot, but no lower |
3597 | */ | 3587 | */ |
3598 | static noinline int __push_leaf_right(struct btrfs_trans_handle *trans, | 3588 | static noinline int __push_leaf_right(struct btrfs_trans_handle *trans, |
3599 | struct btrfs_root *root, | 3589 | struct btrfs_root *root, |
3600 | struct btrfs_path *path, | 3590 | struct btrfs_path *path, |
3601 | int data_size, int empty, | 3591 | int data_size, int empty, |
3602 | struct extent_buffer *right, | 3592 | struct extent_buffer *right, |
3603 | int free_space, u32 left_nritems, | 3593 | int free_space, u32 left_nritems, |
3604 | u32 min_slot) | 3594 | u32 min_slot) |
3605 | { | 3595 | { |
3606 | struct extent_buffer *left = path->nodes[0]; | 3596 | struct extent_buffer *left = path->nodes[0]; |
3607 | struct extent_buffer *upper = path->nodes[1]; | 3597 | struct extent_buffer *upper = path->nodes[1]; |
3608 | struct btrfs_map_token token; | 3598 | struct btrfs_map_token token; |
3609 | struct btrfs_disk_key disk_key; | 3599 | struct btrfs_disk_key disk_key; |
3610 | int slot; | 3600 | int slot; |
3611 | u32 i; | 3601 | u32 i; |
3612 | int push_space = 0; | 3602 | int push_space = 0; |
3613 | int push_items = 0; | 3603 | int push_items = 0; |
3614 | struct btrfs_item *item; | 3604 | struct btrfs_item *item; |
3615 | u32 nr; | 3605 | u32 nr; |
3616 | u32 right_nritems; | 3606 | u32 right_nritems; |
3617 | u32 data_end; | 3607 | u32 data_end; |
3618 | u32 this_item_size; | 3608 | u32 this_item_size; |
3619 | 3609 | ||
3620 | btrfs_init_map_token(&token); | 3610 | btrfs_init_map_token(&token); |
3621 | 3611 | ||
3622 | if (empty) | 3612 | if (empty) |
3623 | nr = 0; | 3613 | nr = 0; |
3624 | else | 3614 | else |
3625 | nr = max_t(u32, 1, min_slot); | 3615 | nr = max_t(u32, 1, min_slot); |
3626 | 3616 | ||
3627 | if (path->slots[0] >= left_nritems) | 3617 | if (path->slots[0] >= left_nritems) |
3628 | push_space += data_size; | 3618 | push_space += data_size; |
3629 | 3619 | ||
3630 | slot = path->slots[1]; | 3620 | slot = path->slots[1]; |
3631 | i = left_nritems - 1; | 3621 | i = left_nritems - 1; |
3632 | while (i >= nr) { | 3622 | while (i >= nr) { |
3633 | item = btrfs_item_nr(i); | 3623 | item = btrfs_item_nr(i); |
3634 | 3624 | ||
3635 | if (!empty && push_items > 0) { | 3625 | if (!empty && push_items > 0) { |
3636 | if (path->slots[0] > i) | 3626 | if (path->slots[0] > i) |
3637 | break; | 3627 | break; |
3638 | if (path->slots[0] == i) { | 3628 | if (path->slots[0] == i) { |
3639 | int space = btrfs_leaf_free_space(root, left); | 3629 | int space = btrfs_leaf_free_space(root, left); |
3640 | if (space + push_space * 2 > free_space) | 3630 | if (space + push_space * 2 > free_space) |
3641 | break; | 3631 | break; |
3642 | } | 3632 | } |
3643 | } | 3633 | } |
3644 | 3634 | ||
3645 | if (path->slots[0] == i) | 3635 | if (path->slots[0] == i) |
3646 | push_space += data_size; | 3636 | push_space += data_size; |
3647 | 3637 | ||
3648 | this_item_size = btrfs_item_size(left, item); | 3638 | this_item_size = btrfs_item_size(left, item); |
3649 | if (this_item_size + sizeof(*item) + push_space > free_space) | 3639 | if (this_item_size + sizeof(*item) + push_space > free_space) |
3650 | break; | 3640 | break; |
3651 | 3641 | ||
3652 | push_items++; | 3642 | push_items++; |
3653 | push_space += this_item_size + sizeof(*item); | 3643 | push_space += this_item_size + sizeof(*item); |
3654 | if (i == 0) | 3644 | if (i == 0) |
3655 | break; | 3645 | break; |
3656 | i--; | 3646 | i--; |
3657 | } | 3647 | } |
3658 | 3648 | ||
3659 | if (push_items == 0) | 3649 | if (push_items == 0) |
3660 | goto out_unlock; | 3650 | goto out_unlock; |
3661 | 3651 | ||
3662 | WARN_ON(!empty && push_items == left_nritems); | 3652 | WARN_ON(!empty && push_items == left_nritems); |
3663 | 3653 | ||
3664 | /* push left to right */ | 3654 | /* push left to right */ |
3665 | right_nritems = btrfs_header_nritems(right); | 3655 | right_nritems = btrfs_header_nritems(right); |
3666 | 3656 | ||
3667 | push_space = btrfs_item_end_nr(left, left_nritems - push_items); | 3657 | push_space = btrfs_item_end_nr(left, left_nritems - push_items); |
3668 | push_space -= leaf_data_end(root, left); | 3658 | push_space -= leaf_data_end(root, left); |
3669 | 3659 | ||
3670 | /* make room in the right data area */ | 3660 | /* make room in the right data area */ |
3671 | data_end = leaf_data_end(root, right); | 3661 | data_end = leaf_data_end(root, right); |
3672 | memmove_extent_buffer(right, | 3662 | memmove_extent_buffer(right, |
3673 | btrfs_leaf_data(right) + data_end - push_space, | 3663 | btrfs_leaf_data(right) + data_end - push_space, |
3674 | btrfs_leaf_data(right) + data_end, | 3664 | btrfs_leaf_data(right) + data_end, |
3675 | BTRFS_LEAF_DATA_SIZE(root) - data_end); | 3665 | BTRFS_LEAF_DATA_SIZE(root) - data_end); |
3676 | 3666 | ||
3677 | /* copy from the left data area */ | 3667 | /* copy from the left data area */ |
3678 | copy_extent_buffer(right, left, btrfs_leaf_data(right) + | 3668 | copy_extent_buffer(right, left, btrfs_leaf_data(right) + |
3679 | BTRFS_LEAF_DATA_SIZE(root) - push_space, | 3669 | BTRFS_LEAF_DATA_SIZE(root) - push_space, |
3680 | btrfs_leaf_data(left) + leaf_data_end(root, left), | 3670 | btrfs_leaf_data(left) + leaf_data_end(root, left), |
3681 | push_space); | 3671 | push_space); |
3682 | 3672 | ||
3683 | memmove_extent_buffer(right, btrfs_item_nr_offset(push_items), | 3673 | memmove_extent_buffer(right, btrfs_item_nr_offset(push_items), |
3684 | btrfs_item_nr_offset(0), | 3674 | btrfs_item_nr_offset(0), |
3685 | right_nritems * sizeof(struct btrfs_item)); | 3675 | right_nritems * sizeof(struct btrfs_item)); |
3686 | 3676 | ||
3687 | /* copy the items from left to right */ | 3677 | /* copy the items from left to right */ |
3688 | copy_extent_buffer(right, left, btrfs_item_nr_offset(0), | 3678 | copy_extent_buffer(right, left, btrfs_item_nr_offset(0), |
3689 | btrfs_item_nr_offset(left_nritems - push_items), | 3679 | btrfs_item_nr_offset(left_nritems - push_items), |
3690 | push_items * sizeof(struct btrfs_item)); | 3680 | push_items * sizeof(struct btrfs_item)); |
3691 | 3681 | ||
3692 | /* update the item pointers */ | 3682 | /* update the item pointers */ |
3693 | right_nritems += push_items; | 3683 | right_nritems += push_items; |
3694 | btrfs_set_header_nritems(right, right_nritems); | 3684 | btrfs_set_header_nritems(right, right_nritems); |
3695 | push_space = BTRFS_LEAF_DATA_SIZE(root); | 3685 | push_space = BTRFS_LEAF_DATA_SIZE(root); |
3696 | for (i = 0; i < right_nritems; i++) { | 3686 | for (i = 0; i < right_nritems; i++) { |
3697 | item = btrfs_item_nr(i); | 3687 | item = btrfs_item_nr(i); |
3698 | push_space -= btrfs_token_item_size(right, item, &token); | 3688 | push_space -= btrfs_token_item_size(right, item, &token); |
3699 | btrfs_set_token_item_offset(right, item, push_space, &token); | 3689 | btrfs_set_token_item_offset(right, item, push_space, &token); |
3700 | } | 3690 | } |
3701 | 3691 | ||
3702 | left_nritems -= push_items; | 3692 | left_nritems -= push_items; |
3703 | btrfs_set_header_nritems(left, left_nritems); | 3693 | btrfs_set_header_nritems(left, left_nritems); |
3704 | 3694 | ||
3705 | if (left_nritems) | 3695 | if (left_nritems) |
3706 | btrfs_mark_buffer_dirty(left); | 3696 | btrfs_mark_buffer_dirty(left); |
3707 | else | 3697 | else |
3708 | clean_tree_block(trans, root, left); | 3698 | clean_tree_block(trans, root, left); |
3709 | 3699 | ||
3710 | btrfs_mark_buffer_dirty(right); | 3700 | btrfs_mark_buffer_dirty(right); |
3711 | 3701 | ||
3712 | btrfs_item_key(right, &disk_key, 0); | 3702 | btrfs_item_key(right, &disk_key, 0); |
3713 | btrfs_set_node_key(upper, &disk_key, slot + 1); | 3703 | btrfs_set_node_key(upper, &disk_key, slot + 1); |
3714 | btrfs_mark_buffer_dirty(upper); | 3704 | btrfs_mark_buffer_dirty(upper); |
3715 | 3705 | ||
3716 | /* then fixup the leaf pointer in the path */ | 3706 | /* then fixup the leaf pointer in the path */ |
3717 | if (path->slots[0] >= left_nritems) { | 3707 | if (path->slots[0] >= left_nritems) { |
3718 | path->slots[0] -= left_nritems; | 3708 | path->slots[0] -= left_nritems; |
3719 | if (btrfs_header_nritems(path->nodes[0]) == 0) | 3709 | if (btrfs_header_nritems(path->nodes[0]) == 0) |
3720 | clean_tree_block(trans, root, path->nodes[0]); | 3710 | clean_tree_block(trans, root, path->nodes[0]); |
3721 | btrfs_tree_unlock(path->nodes[0]); | 3711 | btrfs_tree_unlock(path->nodes[0]); |
3722 | free_extent_buffer(path->nodes[0]); | 3712 | free_extent_buffer(path->nodes[0]); |
3723 | path->nodes[0] = right; | 3713 | path->nodes[0] = right; |
3724 | path->slots[1] += 1; | 3714 | path->slots[1] += 1; |
3725 | } else { | 3715 | } else { |
3726 | btrfs_tree_unlock(right); | 3716 | btrfs_tree_unlock(right); |
3727 | free_extent_buffer(right); | 3717 | free_extent_buffer(right); |
3728 | } | 3718 | } |
3729 | return 0; | 3719 | return 0; |
3730 | 3720 | ||
3731 | out_unlock: | 3721 | out_unlock: |
3732 | btrfs_tree_unlock(right); | 3722 | btrfs_tree_unlock(right); |
3733 | free_extent_buffer(right); | 3723 | free_extent_buffer(right); |
3734 | return 1; | 3724 | return 1; |
3735 | } | 3725 | } |
3736 | 3726 | ||
3737 | /* | 3727 | /* |
3738 | * push some data in the path leaf to the right, trying to free up at | 3728 | * push some data in the path leaf to the right, trying to free up at |
3739 | * least data_size bytes. returns zero if the push worked, nonzero otherwise | 3729 | * least data_size bytes. returns zero if the push worked, nonzero otherwise |
3740 | * | 3730 | * |
3741 | * returns 1 if the push failed because the other node didn't have enough | 3731 | * returns 1 if the push failed because the other node didn't have enough |
3742 | * room, 0 if everything worked out and < 0 if there were major errors. | 3732 | * room, 0 if everything worked out and < 0 if there were major errors. |
3743 | * | 3733 | * |
3744 | * this will push starting from min_slot to the end of the leaf. It won't | 3734 | * this will push starting from min_slot to the end of the leaf. It won't |
3745 | * push any slot lower than min_slot | 3735 | * push any slot lower than min_slot |
3746 | */ | 3736 | */ |
3747 | static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root | 3737 | static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root |
3748 | *root, struct btrfs_path *path, | 3738 | *root, struct btrfs_path *path, |
3749 | int min_data_size, int data_size, | 3739 | int min_data_size, int data_size, |
3750 | int empty, u32 min_slot) | 3740 | int empty, u32 min_slot) |
3751 | { | 3741 | { |
3752 | struct extent_buffer *left = path->nodes[0]; | 3742 | struct extent_buffer *left = path->nodes[0]; |
3753 | struct extent_buffer *right; | 3743 | struct extent_buffer *right; |
3754 | struct extent_buffer *upper; | 3744 | struct extent_buffer *upper; |
3755 | int slot; | 3745 | int slot; |
3756 | int free_space; | 3746 | int free_space; |
3757 | u32 left_nritems; | 3747 | u32 left_nritems; |
3758 | int ret; | 3748 | int ret; |
3759 | 3749 | ||
3760 | if (!path->nodes[1]) | 3750 | if (!path->nodes[1]) |
3761 | return 1; | 3751 | return 1; |
3762 | 3752 | ||
3763 | slot = path->slots[1]; | 3753 | slot = path->slots[1]; |
3764 | upper = path->nodes[1]; | 3754 | upper = path->nodes[1]; |
3765 | if (slot >= btrfs_header_nritems(upper) - 1) | 3755 | if (slot >= btrfs_header_nritems(upper) - 1) |
3766 | return 1; | 3756 | return 1; |
3767 | 3757 | ||
3768 | btrfs_assert_tree_locked(path->nodes[1]); | 3758 | btrfs_assert_tree_locked(path->nodes[1]); |
3769 | 3759 | ||
3770 | right = read_node_slot(root, upper, slot + 1); | 3760 | right = read_node_slot(root, upper, slot + 1); |
3771 | if (right == NULL) | 3761 | if (right == NULL) |
3772 | return 1; | 3762 | return 1; |
3773 | 3763 | ||
3774 | btrfs_tree_lock(right); | 3764 | btrfs_tree_lock(right); |
3775 | btrfs_set_lock_blocking(right); | 3765 | btrfs_set_lock_blocking(right); |
3776 | 3766 | ||
3777 | free_space = btrfs_leaf_free_space(root, right); | 3767 | free_space = btrfs_leaf_free_space(root, right); |
3778 | if (free_space < data_size) | 3768 | if (free_space < data_size) |
3779 | goto out_unlock; | 3769 | goto out_unlock; |
3780 | 3770 | ||
3781 | /* cow and double check */ | 3771 | /* cow and double check */ |
3782 | ret = btrfs_cow_block(trans, root, right, upper, | 3772 | ret = btrfs_cow_block(trans, root, right, upper, |
3783 | slot + 1, &right); | 3773 | slot + 1, &right); |
3784 | if (ret) | 3774 | if (ret) |
3785 | goto out_unlock; | 3775 | goto out_unlock; |
3786 | 3776 | ||
3787 | free_space = btrfs_leaf_free_space(root, right); | 3777 | free_space = btrfs_leaf_free_space(root, right); |
3788 | if (free_space < data_size) | 3778 | if (free_space < data_size) |
3789 | goto out_unlock; | 3779 | goto out_unlock; |
3790 | 3780 | ||
3791 | left_nritems = btrfs_header_nritems(left); | 3781 | left_nritems = btrfs_header_nritems(left); |
3792 | if (left_nritems == 0) | 3782 | if (left_nritems == 0) |
3793 | goto out_unlock; | 3783 | goto out_unlock; |
3794 | 3784 | ||
3795 | if (path->slots[0] == left_nritems && !empty) { | 3785 | if (path->slots[0] == left_nritems && !empty) { |
3796 | /* Key greater than all keys in the leaf, right neighbor has | 3786 | /* Key greater than all keys in the leaf, right neighbor has |
3797 | * enough room for it and we're not emptying our leaf to delete | 3787 | * enough room for it and we're not emptying our leaf to delete |
3798 | * it, therefore use right neighbor to insert the new item and | 3788 | * it, therefore use right neighbor to insert the new item and |
3799 | * no need to touch/dirty our left leaft. */ | 3789 | * no need to touch/dirty our left leaft. */ |
3800 | btrfs_tree_unlock(left); | 3790 | btrfs_tree_unlock(left); |
3801 | free_extent_buffer(left); | 3791 | free_extent_buffer(left); |
3802 | path->nodes[0] = right; | 3792 | path->nodes[0] = right; |
3803 | path->slots[0] = 0; | 3793 | path->slots[0] = 0; |
3804 | path->slots[1]++; | 3794 | path->slots[1]++; |
3805 | return 0; | 3795 | return 0; |
3806 | } | 3796 | } |
3807 | 3797 | ||
3808 | return __push_leaf_right(trans, root, path, min_data_size, empty, | 3798 | return __push_leaf_right(trans, root, path, min_data_size, empty, |
3809 | right, free_space, left_nritems, min_slot); | 3799 | right, free_space, left_nritems, min_slot); |
3810 | out_unlock: | 3800 | out_unlock: |
3811 | btrfs_tree_unlock(right); | 3801 | btrfs_tree_unlock(right); |
3812 | free_extent_buffer(right); | 3802 | free_extent_buffer(right); |
3813 | return 1; | 3803 | return 1; |
3814 | } | 3804 | } |
3815 | 3805 | ||
3816 | /* | 3806 | /* |
3817 | * push some data in the path leaf to the left, trying to free up at | 3807 | * push some data in the path leaf to the left, trying to free up at |
3818 | * least data_size bytes. returns zero if the push worked, nonzero otherwise | 3808 | * least data_size bytes. returns zero if the push worked, nonzero otherwise |
3819 | * | 3809 | * |
3820 | * max_slot can put a limit on how far into the leaf we'll push items. The | 3810 | * max_slot can put a limit on how far into the leaf we'll push items. The |
3821 | * item at 'max_slot' won't be touched. Use (u32)-1 to make us do all the | 3811 | * item at 'max_slot' won't be touched. Use (u32)-1 to make us do all the |
3822 | * items | 3812 | * items |
3823 | */ | 3813 | */ |
3824 | static noinline int __push_leaf_left(struct btrfs_trans_handle *trans, | 3814 | static noinline int __push_leaf_left(struct btrfs_trans_handle *trans, |
3825 | struct btrfs_root *root, | 3815 | struct btrfs_root *root, |
3826 | struct btrfs_path *path, int data_size, | 3816 | struct btrfs_path *path, int data_size, |
3827 | int empty, struct extent_buffer *left, | 3817 | int empty, struct extent_buffer *left, |
3828 | int free_space, u32 right_nritems, | 3818 | int free_space, u32 right_nritems, |
3829 | u32 max_slot) | 3819 | u32 max_slot) |
3830 | { | 3820 | { |
3831 | struct btrfs_disk_key disk_key; | 3821 | struct btrfs_disk_key disk_key; |
3832 | struct extent_buffer *right = path->nodes[0]; | 3822 | struct extent_buffer *right = path->nodes[0]; |
3833 | int i; | 3823 | int i; |
3834 | int push_space = 0; | 3824 | int push_space = 0; |
3835 | int push_items = 0; | 3825 | int push_items = 0; |
3836 | struct btrfs_item *item; | 3826 | struct btrfs_item *item; |
3837 | u32 old_left_nritems; | 3827 | u32 old_left_nritems; |
3838 | u32 nr; | 3828 | u32 nr; |
3839 | int ret = 0; | 3829 | int ret = 0; |
3840 | u32 this_item_size; | 3830 | u32 this_item_size; |
3841 | u32 old_left_item_size; | 3831 | u32 old_left_item_size; |
3842 | struct btrfs_map_token token; | 3832 | struct btrfs_map_token token; |
3843 | 3833 | ||
3844 | btrfs_init_map_token(&token); | 3834 | btrfs_init_map_token(&token); |
3845 | 3835 | ||
3846 | if (empty) | 3836 | if (empty) |
3847 | nr = min(right_nritems, max_slot); | 3837 | nr = min(right_nritems, max_slot); |
3848 | else | 3838 | else |
3849 | nr = min(right_nritems - 1, max_slot); | 3839 | nr = min(right_nritems - 1, max_slot); |
3850 | 3840 | ||
3851 | for (i = 0; i < nr; i++) { | 3841 | for (i = 0; i < nr; i++) { |
3852 | item = btrfs_item_nr(i); | 3842 | item = btrfs_item_nr(i); |
3853 | 3843 | ||
3854 | if (!empty && push_items > 0) { | 3844 | if (!empty && push_items > 0) { |
3855 | if (path->slots[0] < i) | 3845 | if (path->slots[0] < i) |
3856 | break; | 3846 | break; |
3857 | if (path->slots[0] == i) { | 3847 | if (path->slots[0] == i) { |
3858 | int space = btrfs_leaf_free_space(root, right); | 3848 | int space = btrfs_leaf_free_space(root, right); |
3859 | if (space + push_space * 2 > free_space) | 3849 | if (space + push_space * 2 > free_space) |
3860 | break; | 3850 | break; |
3861 | } | 3851 | } |
3862 | } | 3852 | } |
3863 | 3853 | ||
3864 | if (path->slots[0] == i) | 3854 | if (path->slots[0] == i) |
3865 | push_space += data_size; | 3855 | push_space += data_size; |
3866 | 3856 | ||
3867 | this_item_size = btrfs_item_size(right, item); | 3857 | this_item_size = btrfs_item_size(right, item); |
3868 | if (this_item_size + sizeof(*item) + push_space > free_space) | 3858 | if (this_item_size + sizeof(*item) + push_space > free_space) |
3869 | break; | 3859 | break; |
3870 | 3860 | ||
3871 | push_items++; | 3861 | push_items++; |
3872 | push_space += this_item_size + sizeof(*item); | 3862 | push_space += this_item_size + sizeof(*item); |
3873 | } | 3863 | } |
3874 | 3864 | ||
3875 | if (push_items == 0) { | 3865 | if (push_items == 0) { |
3876 | ret = 1; | 3866 | ret = 1; |
3877 | goto out; | 3867 | goto out; |
3878 | } | 3868 | } |
3879 | WARN_ON(!empty && push_items == btrfs_header_nritems(right)); | 3869 | WARN_ON(!empty && push_items == btrfs_header_nritems(right)); |
3880 | 3870 | ||
3881 | /* push data from right to left */ | 3871 | /* push data from right to left */ |
3882 | copy_extent_buffer(left, right, | 3872 | copy_extent_buffer(left, right, |
3883 | btrfs_item_nr_offset(btrfs_header_nritems(left)), | 3873 | btrfs_item_nr_offset(btrfs_header_nritems(left)), |
3884 | btrfs_item_nr_offset(0), | 3874 | btrfs_item_nr_offset(0), |
3885 | push_items * sizeof(struct btrfs_item)); | 3875 | push_items * sizeof(struct btrfs_item)); |
3886 | 3876 | ||
3887 | push_space = BTRFS_LEAF_DATA_SIZE(root) - | 3877 | push_space = BTRFS_LEAF_DATA_SIZE(root) - |
3888 | btrfs_item_offset_nr(right, push_items - 1); | 3878 | btrfs_item_offset_nr(right, push_items - 1); |
3889 | 3879 | ||
3890 | copy_extent_buffer(left, right, btrfs_leaf_data(left) + | 3880 | copy_extent_buffer(left, right, btrfs_leaf_data(left) + |
3891 | leaf_data_end(root, left) - push_space, | 3881 | leaf_data_end(root, left) - push_space, |
3892 | btrfs_leaf_data(right) + | 3882 | btrfs_leaf_data(right) + |
3893 | btrfs_item_offset_nr(right, push_items - 1), | 3883 | btrfs_item_offset_nr(right, push_items - 1), |
3894 | push_space); | 3884 | push_space); |
3895 | old_left_nritems = btrfs_header_nritems(left); | 3885 | old_left_nritems = btrfs_header_nritems(left); |
3896 | BUG_ON(old_left_nritems <= 0); | 3886 | BUG_ON(old_left_nritems <= 0); |
3897 | 3887 | ||
3898 | old_left_item_size = btrfs_item_offset_nr(left, old_left_nritems - 1); | 3888 | old_left_item_size = btrfs_item_offset_nr(left, old_left_nritems - 1); |
3899 | for (i = old_left_nritems; i < old_left_nritems + push_items; i++) { | 3889 | for (i = old_left_nritems; i < old_left_nritems + push_items; i++) { |
3900 | u32 ioff; | 3890 | u32 ioff; |
3901 | 3891 | ||
3902 | item = btrfs_item_nr(i); | 3892 | item = btrfs_item_nr(i); |
3903 | 3893 | ||
3904 | ioff = btrfs_token_item_offset(left, item, &token); | 3894 | ioff = btrfs_token_item_offset(left, item, &token); |
3905 | btrfs_set_token_item_offset(left, item, | 3895 | btrfs_set_token_item_offset(left, item, |
3906 | ioff - (BTRFS_LEAF_DATA_SIZE(root) - old_left_item_size), | 3896 | ioff - (BTRFS_LEAF_DATA_SIZE(root) - old_left_item_size), |
3907 | &token); | 3897 | &token); |
3908 | } | 3898 | } |
3909 | btrfs_set_header_nritems(left, old_left_nritems + push_items); | 3899 | btrfs_set_header_nritems(left, old_left_nritems + push_items); |
3910 | 3900 | ||
3911 | /* fixup right node */ | 3901 | /* fixup right node */ |
3912 | if (push_items > right_nritems) | 3902 | if (push_items > right_nritems) |
3913 | WARN(1, KERN_CRIT "push items %d nr %u\n", push_items, | 3903 | WARN(1, KERN_CRIT "push items %d nr %u\n", push_items, |
3914 | right_nritems); | 3904 | right_nritems); |
3915 | 3905 | ||
3916 | if (push_items < right_nritems) { | 3906 | if (push_items < right_nritems) { |
3917 | push_space = btrfs_item_offset_nr(right, push_items - 1) - | 3907 | push_space = btrfs_item_offset_nr(right, push_items - 1) - |
3918 | leaf_data_end(root, right); | 3908 | leaf_data_end(root, right); |
3919 | memmove_extent_buffer(right, btrfs_leaf_data(right) + | 3909 | memmove_extent_buffer(right, btrfs_leaf_data(right) + |
3920 | BTRFS_LEAF_DATA_SIZE(root) - push_space, | 3910 | BTRFS_LEAF_DATA_SIZE(root) - push_space, |
3921 | btrfs_leaf_data(right) + | 3911 | btrfs_leaf_data(right) + |
3922 | leaf_data_end(root, right), push_space); | 3912 | leaf_data_end(root, right), push_space); |
3923 | 3913 | ||
3924 | memmove_extent_buffer(right, btrfs_item_nr_offset(0), | 3914 | memmove_extent_buffer(right, btrfs_item_nr_offset(0), |
3925 | btrfs_item_nr_offset(push_items), | 3915 | btrfs_item_nr_offset(push_items), |
3926 | (btrfs_header_nritems(right) - push_items) * | 3916 | (btrfs_header_nritems(right) - push_items) * |
3927 | sizeof(struct btrfs_item)); | 3917 | sizeof(struct btrfs_item)); |
3928 | } | 3918 | } |
3929 | right_nritems -= push_items; | 3919 | right_nritems -= push_items; |
3930 | btrfs_set_header_nritems(right, right_nritems); | 3920 | btrfs_set_header_nritems(right, right_nritems); |
3931 | push_space = BTRFS_LEAF_DATA_SIZE(root); | 3921 | push_space = BTRFS_LEAF_DATA_SIZE(root); |
3932 | for (i = 0; i < right_nritems; i++) { | 3922 | for (i = 0; i < right_nritems; i++) { |
3933 | item = btrfs_item_nr(i); | 3923 | item = btrfs_item_nr(i); |
3934 | 3924 | ||
3935 | push_space = push_space - btrfs_token_item_size(right, | 3925 | push_space = push_space - btrfs_token_item_size(right, |
3936 | item, &token); | 3926 | item, &token); |
3937 | btrfs_set_token_item_offset(right, item, push_space, &token); | 3927 | btrfs_set_token_item_offset(right, item, push_space, &token); |
3938 | } | 3928 | } |
3939 | 3929 | ||
3940 | btrfs_mark_buffer_dirty(left); | 3930 | btrfs_mark_buffer_dirty(left); |
3941 | if (right_nritems) | 3931 | if (right_nritems) |
3942 | btrfs_mark_buffer_dirty(right); | 3932 | btrfs_mark_buffer_dirty(right); |
3943 | else | 3933 | else |
3944 | clean_tree_block(trans, root, right); | 3934 | clean_tree_block(trans, root, right); |
3945 | 3935 | ||
3946 | btrfs_item_key(right, &disk_key, 0); | 3936 | btrfs_item_key(right, &disk_key, 0); |
3947 | fixup_low_keys(root, path, &disk_key, 1); | 3937 | fixup_low_keys(root, path, &disk_key, 1); |
3948 | 3938 | ||
3949 | /* then fixup the leaf pointer in the path */ | 3939 | /* then fixup the leaf pointer in the path */ |
3950 | if (path->slots[0] < push_items) { | 3940 | if (path->slots[0] < push_items) { |
3951 | path->slots[0] += old_left_nritems; | 3941 | path->slots[0] += old_left_nritems; |
3952 | btrfs_tree_unlock(path->nodes[0]); | 3942 | btrfs_tree_unlock(path->nodes[0]); |
3953 | free_extent_buffer(path->nodes[0]); | 3943 | free_extent_buffer(path->nodes[0]); |
3954 | path->nodes[0] = left; | 3944 | path->nodes[0] = left; |
3955 | path->slots[1] -= 1; | 3945 | path->slots[1] -= 1; |
3956 | } else { | 3946 | } else { |
3957 | btrfs_tree_unlock(left); | 3947 | btrfs_tree_unlock(left); |
3958 | free_extent_buffer(left); | 3948 | free_extent_buffer(left); |
3959 | path->slots[0] -= push_items; | 3949 | path->slots[0] -= push_items; |
3960 | } | 3950 | } |
3961 | BUG_ON(path->slots[0] < 0); | 3951 | BUG_ON(path->slots[0] < 0); |
3962 | return ret; | 3952 | return ret; |
3963 | out: | 3953 | out: |
3964 | btrfs_tree_unlock(left); | 3954 | btrfs_tree_unlock(left); |
3965 | free_extent_buffer(left); | 3955 | free_extent_buffer(left); |
3966 | return ret; | 3956 | return ret; |
3967 | } | 3957 | } |
3968 | 3958 | ||
3969 | /* | 3959 | /* |
3970 | * push some data in the path leaf to the left, trying to free up at | 3960 | * push some data in the path leaf to the left, trying to free up at |
3971 | * least data_size bytes. returns zero if the push worked, nonzero otherwise | 3961 | * least data_size bytes. returns zero if the push worked, nonzero otherwise |
3972 | * | 3962 | * |
3973 | * max_slot can put a limit on how far into the leaf we'll push items. The | 3963 | * max_slot can put a limit on how far into the leaf we'll push items. The |
3974 | * item at 'max_slot' won't be touched. Use (u32)-1 to make us push all the | 3964 | * item at 'max_slot' won't be touched. Use (u32)-1 to make us push all the |
3975 | * items | 3965 | * items |
3976 | */ | 3966 | */ |
3977 | static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root | 3967 | static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root |
3978 | *root, struct btrfs_path *path, int min_data_size, | 3968 | *root, struct btrfs_path *path, int min_data_size, |
3979 | int data_size, int empty, u32 max_slot) | 3969 | int data_size, int empty, u32 max_slot) |
3980 | { | 3970 | { |
3981 | struct extent_buffer *right = path->nodes[0]; | 3971 | struct extent_buffer *right = path->nodes[0]; |
3982 | struct extent_buffer *left; | 3972 | struct extent_buffer *left; |
3983 | int slot; | 3973 | int slot; |
3984 | int free_space; | 3974 | int free_space; |
3985 | u32 right_nritems; | 3975 | u32 right_nritems; |
3986 | int ret = 0; | 3976 | int ret = 0; |
3987 | 3977 | ||
3988 | slot = path->slots[1]; | 3978 | slot = path->slots[1]; |
3989 | if (slot == 0) | 3979 | if (slot == 0) |
3990 | return 1; | 3980 | return 1; |
3991 | if (!path->nodes[1]) | 3981 | if (!path->nodes[1]) |
3992 | return 1; | 3982 | return 1; |
3993 | 3983 | ||
3994 | right_nritems = btrfs_header_nritems(right); | 3984 | right_nritems = btrfs_header_nritems(right); |
3995 | if (right_nritems == 0) | 3985 | if (right_nritems == 0) |
3996 | return 1; | 3986 | return 1; |
3997 | 3987 | ||
3998 | btrfs_assert_tree_locked(path->nodes[1]); | 3988 | btrfs_assert_tree_locked(path->nodes[1]); |
3999 | 3989 | ||
4000 | left = read_node_slot(root, path->nodes[1], slot - 1); | 3990 | left = read_node_slot(root, path->nodes[1], slot - 1); |
4001 | if (left == NULL) | 3991 | if (left == NULL) |
4002 | return 1; | 3992 | return 1; |
4003 | 3993 | ||
4004 | btrfs_tree_lock(left); | 3994 | btrfs_tree_lock(left); |
4005 | btrfs_set_lock_blocking(left); | 3995 | btrfs_set_lock_blocking(left); |
4006 | 3996 | ||
4007 | free_space = btrfs_leaf_free_space(root, left); | 3997 | free_space = btrfs_leaf_free_space(root, left); |
4008 | if (free_space < data_size) { | 3998 | if (free_space < data_size) { |
4009 | ret = 1; | 3999 | ret = 1; |
4010 | goto out; | 4000 | goto out; |
4011 | } | 4001 | } |
4012 | 4002 | ||
4013 | /* cow and double check */ | 4003 | /* cow and double check */ |
4014 | ret = btrfs_cow_block(trans, root, left, | 4004 | ret = btrfs_cow_block(trans, root, left, |
4015 | path->nodes[1], slot - 1, &left); | 4005 | path->nodes[1], slot - 1, &left); |
4016 | if (ret) { | 4006 | if (ret) { |
4017 | /* we hit -ENOSPC, but it isn't fatal here */ | 4007 | /* we hit -ENOSPC, but it isn't fatal here */ |
4018 | if (ret == -ENOSPC) | 4008 | if (ret == -ENOSPC) |
4019 | ret = 1; | 4009 | ret = 1; |
4020 | goto out; | 4010 | goto out; |
4021 | } | 4011 | } |
4022 | 4012 | ||
4023 | free_space = btrfs_leaf_free_space(root, left); | 4013 | free_space = btrfs_leaf_free_space(root, left); |
4024 | if (free_space < data_size) { | 4014 | if (free_space < data_size) { |
4025 | ret = 1; | 4015 | ret = 1; |
4026 | goto out; | 4016 | goto out; |
4027 | } | 4017 | } |
4028 | 4018 | ||
4029 | return __push_leaf_left(trans, root, path, min_data_size, | 4019 | return __push_leaf_left(trans, root, path, min_data_size, |
4030 | empty, left, free_space, right_nritems, | 4020 | empty, left, free_space, right_nritems, |
4031 | max_slot); | 4021 | max_slot); |
4032 | out: | 4022 | out: |
4033 | btrfs_tree_unlock(left); | 4023 | btrfs_tree_unlock(left); |
4034 | free_extent_buffer(left); | 4024 | free_extent_buffer(left); |
4035 | return ret; | 4025 | return ret; |
4036 | } | 4026 | } |
4037 | 4027 | ||
4038 | /* | 4028 | /* |
4039 | * split the path's leaf in two, making sure there is at least data_size | 4029 | * split the path's leaf in two, making sure there is at least data_size |
4040 | * available for the resulting leaf level of the path. | 4030 | * available for the resulting leaf level of the path. |
4041 | */ | 4031 | */ |
4042 | static noinline void copy_for_split(struct btrfs_trans_handle *trans, | 4032 | static noinline void copy_for_split(struct btrfs_trans_handle *trans, |
4043 | struct btrfs_root *root, | 4033 | struct btrfs_root *root, |
4044 | struct btrfs_path *path, | 4034 | struct btrfs_path *path, |
4045 | struct extent_buffer *l, | 4035 | struct extent_buffer *l, |
4046 | struct extent_buffer *right, | 4036 | struct extent_buffer *right, |
4047 | int slot, int mid, int nritems) | 4037 | int slot, int mid, int nritems) |
4048 | { | 4038 | { |
4049 | int data_copy_size; | 4039 | int data_copy_size; |
4050 | int rt_data_off; | 4040 | int rt_data_off; |
4051 | int i; | 4041 | int i; |
4052 | struct btrfs_disk_key disk_key; | 4042 | struct btrfs_disk_key disk_key; |
4053 | struct btrfs_map_token token; | 4043 | struct btrfs_map_token token; |
4054 | 4044 | ||
4055 | btrfs_init_map_token(&token); | 4045 | btrfs_init_map_token(&token); |
4056 | 4046 | ||
4057 | nritems = nritems - mid; | 4047 | nritems = nritems - mid; |
4058 | btrfs_set_header_nritems(right, nritems); | 4048 | btrfs_set_header_nritems(right, nritems); |
4059 | data_copy_size = btrfs_item_end_nr(l, mid) - leaf_data_end(root, l); | 4049 | data_copy_size = btrfs_item_end_nr(l, mid) - leaf_data_end(root, l); |
4060 | 4050 | ||
4061 | copy_extent_buffer(right, l, btrfs_item_nr_offset(0), | 4051 | copy_extent_buffer(right, l, btrfs_item_nr_offset(0), |
4062 | btrfs_item_nr_offset(mid), | 4052 | btrfs_item_nr_offset(mid), |
4063 | nritems * sizeof(struct btrfs_item)); | 4053 | nritems * sizeof(struct btrfs_item)); |
4064 | 4054 | ||
4065 | copy_extent_buffer(right, l, | 4055 | copy_extent_buffer(right, l, |
4066 | btrfs_leaf_data(right) + BTRFS_LEAF_DATA_SIZE(root) - | 4056 | btrfs_leaf_data(right) + BTRFS_LEAF_DATA_SIZE(root) - |
4067 | data_copy_size, btrfs_leaf_data(l) + | 4057 | data_copy_size, btrfs_leaf_data(l) + |
4068 | leaf_data_end(root, l), data_copy_size); | 4058 | leaf_data_end(root, l), data_copy_size); |
4069 | 4059 | ||
4070 | rt_data_off = BTRFS_LEAF_DATA_SIZE(root) - | 4060 | rt_data_off = BTRFS_LEAF_DATA_SIZE(root) - |
4071 | btrfs_item_end_nr(l, mid); | 4061 | btrfs_item_end_nr(l, mid); |
4072 | 4062 | ||
4073 | for (i = 0; i < nritems; i++) { | 4063 | for (i = 0; i < nritems; i++) { |
4074 | struct btrfs_item *item = btrfs_item_nr(i); | 4064 | struct btrfs_item *item = btrfs_item_nr(i); |
4075 | u32 ioff; | 4065 | u32 ioff; |
4076 | 4066 | ||
4077 | ioff = btrfs_token_item_offset(right, item, &token); | 4067 | ioff = btrfs_token_item_offset(right, item, &token); |
4078 | btrfs_set_token_item_offset(right, item, | 4068 | btrfs_set_token_item_offset(right, item, |
4079 | ioff + rt_data_off, &token); | 4069 | ioff + rt_data_off, &token); |
4080 | } | 4070 | } |
4081 | 4071 | ||
4082 | btrfs_set_header_nritems(l, mid); | 4072 | btrfs_set_header_nritems(l, mid); |
4083 | btrfs_item_key(right, &disk_key, 0); | 4073 | btrfs_item_key(right, &disk_key, 0); |
4084 | insert_ptr(trans, root, path, &disk_key, right->start, | 4074 | insert_ptr(trans, root, path, &disk_key, right->start, |
4085 | path->slots[1] + 1, 1); | 4075 | path->slots[1] + 1, 1); |
4086 | 4076 | ||
4087 | btrfs_mark_buffer_dirty(right); | 4077 | btrfs_mark_buffer_dirty(right); |
4088 | btrfs_mark_buffer_dirty(l); | 4078 | btrfs_mark_buffer_dirty(l); |
4089 | BUG_ON(path->slots[0] != slot); | 4079 | BUG_ON(path->slots[0] != slot); |
4090 | 4080 | ||
4091 | if (mid <= slot) { | 4081 | if (mid <= slot) { |
4092 | btrfs_tree_unlock(path->nodes[0]); | 4082 | btrfs_tree_unlock(path->nodes[0]); |
4093 | free_extent_buffer(path->nodes[0]); | 4083 | free_extent_buffer(path->nodes[0]); |
4094 | path->nodes[0] = right; | 4084 | path->nodes[0] = right; |
4095 | path->slots[0] -= mid; | 4085 | path->slots[0] -= mid; |
4096 | path->slots[1] += 1; | 4086 | path->slots[1] += 1; |
4097 | } else { | 4087 | } else { |
4098 | btrfs_tree_unlock(right); | 4088 | btrfs_tree_unlock(right); |
4099 | free_extent_buffer(right); | 4089 | free_extent_buffer(right); |
4100 | } | 4090 | } |
4101 | 4091 | ||
4102 | BUG_ON(path->slots[0] < 0); | 4092 | BUG_ON(path->slots[0] < 0); |
4103 | } | 4093 | } |
4104 | 4094 | ||
4105 | /* | 4095 | /* |
4106 | * double splits happen when we need to insert a big item in the middle | 4096 | * double splits happen when we need to insert a big item in the middle |
4107 | * of a leaf. A double split can leave us with 3 mostly empty leaves: | 4097 | * of a leaf. A double split can leave us with 3 mostly empty leaves: |
4108 | * leaf: [ slots 0 - N] [ our target ] [ N + 1 - total in leaf ] | 4098 | * leaf: [ slots 0 - N] [ our target ] [ N + 1 - total in leaf ] |
4109 | * A B C | 4099 | * A B C |
4110 | * | 4100 | * |
4111 | * We avoid this by trying to push the items on either side of our target | 4101 | * We avoid this by trying to push the items on either side of our target |
4112 | * into the adjacent leaves. If all goes well we can avoid the double split | 4102 | * into the adjacent leaves. If all goes well we can avoid the double split |
4113 | * completely. | 4103 | * completely. |
4114 | */ | 4104 | */ |
4115 | static noinline int push_for_double_split(struct btrfs_trans_handle *trans, | 4105 | static noinline int push_for_double_split(struct btrfs_trans_handle *trans, |
4116 | struct btrfs_root *root, | 4106 | struct btrfs_root *root, |
4117 | struct btrfs_path *path, | 4107 | struct btrfs_path *path, |
4118 | int data_size) | 4108 | int data_size) |
4119 | { | 4109 | { |
4120 | int ret; | 4110 | int ret; |
4121 | int progress = 0; | 4111 | int progress = 0; |
4122 | int slot; | 4112 | int slot; |
4123 | u32 nritems; | 4113 | u32 nritems; |
4124 | int space_needed = data_size; | 4114 | int space_needed = data_size; |
4125 | 4115 | ||
4126 | slot = path->slots[0]; | 4116 | slot = path->slots[0]; |
4127 | if (slot < btrfs_header_nritems(path->nodes[0])) | 4117 | if (slot < btrfs_header_nritems(path->nodes[0])) |
4128 | space_needed -= btrfs_leaf_free_space(root, path->nodes[0]); | 4118 | space_needed -= btrfs_leaf_free_space(root, path->nodes[0]); |
4129 | 4119 | ||
4130 | /* | 4120 | /* |
4131 | * try to push all the items after our slot into the | 4121 | * try to push all the items after our slot into the |
4132 | * right leaf | 4122 | * right leaf |
4133 | */ | 4123 | */ |
4134 | ret = push_leaf_right(trans, root, path, 1, space_needed, 0, slot); | 4124 | ret = push_leaf_right(trans, root, path, 1, space_needed, 0, slot); |
4135 | if (ret < 0) | 4125 | if (ret < 0) |
4136 | return ret; | 4126 | return ret; |
4137 | 4127 | ||
4138 | if (ret == 0) | 4128 | if (ret == 0) |
4139 | progress++; | 4129 | progress++; |
4140 | 4130 | ||
4141 | nritems = btrfs_header_nritems(path->nodes[0]); | 4131 | nritems = btrfs_header_nritems(path->nodes[0]); |
4142 | /* | 4132 | /* |
4143 | * our goal is to get our slot at the start or end of a leaf. If | 4133 | * our goal is to get our slot at the start or end of a leaf. If |
4144 | * we've done so we're done | 4134 | * we've done so we're done |
4145 | */ | 4135 | */ |
4146 | if (path->slots[0] == 0 || path->slots[0] == nritems) | 4136 | if (path->slots[0] == 0 || path->slots[0] == nritems) |
4147 | return 0; | 4137 | return 0; |
4148 | 4138 | ||
4149 | if (btrfs_leaf_free_space(root, path->nodes[0]) >= data_size) | 4139 | if (btrfs_leaf_free_space(root, path->nodes[0]) >= data_size) |
4150 | return 0; | 4140 | return 0; |
4151 | 4141 | ||
4152 | /* try to push all the items before our slot into the next leaf */ | 4142 | /* try to push all the items before our slot into the next leaf */ |
4153 | slot = path->slots[0]; | 4143 | slot = path->slots[0]; |
4154 | ret = push_leaf_left(trans, root, path, 1, space_needed, 0, slot); | 4144 | ret = push_leaf_left(trans, root, path, 1, space_needed, 0, slot); |
4155 | if (ret < 0) | 4145 | if (ret < 0) |
4156 | return ret; | 4146 | return ret; |
4157 | 4147 | ||
4158 | if (ret == 0) | 4148 | if (ret == 0) |
4159 | progress++; | 4149 | progress++; |
4160 | 4150 | ||
4161 | if (progress) | 4151 | if (progress) |
4162 | return 0; | 4152 | return 0; |
4163 | return 1; | 4153 | return 1; |
4164 | } | 4154 | } |
4165 | 4155 | ||
4166 | /* | 4156 | /* |
4167 | * split the path's leaf in two, making sure there is at least data_size | 4157 | * split the path's leaf in two, making sure there is at least data_size |
4168 | * available for the resulting leaf level of the path. | 4158 | * available for the resulting leaf level of the path. |
4169 | * | 4159 | * |
4170 | * returns 0 if all went well and < 0 on failure. | 4160 | * returns 0 if all went well and < 0 on failure. |
4171 | */ | 4161 | */ |
4172 | static noinline int split_leaf(struct btrfs_trans_handle *trans, | 4162 | static noinline int split_leaf(struct btrfs_trans_handle *trans, |
4173 | struct btrfs_root *root, | 4163 | struct btrfs_root *root, |
4174 | struct btrfs_key *ins_key, | 4164 | struct btrfs_key *ins_key, |
4175 | struct btrfs_path *path, int data_size, | 4165 | struct btrfs_path *path, int data_size, |
4176 | int extend) | 4166 | int extend) |
4177 | { | 4167 | { |
4178 | struct btrfs_disk_key disk_key; | 4168 | struct btrfs_disk_key disk_key; |
4179 | struct extent_buffer *l; | 4169 | struct extent_buffer *l; |
4180 | u32 nritems; | 4170 | u32 nritems; |
4181 | int mid; | 4171 | int mid; |
4182 | int slot; | 4172 | int slot; |
4183 | struct extent_buffer *right; | 4173 | struct extent_buffer *right; |
4184 | int ret = 0; | 4174 | int ret = 0; |
4185 | int wret; | 4175 | int wret; |
4186 | int split; | 4176 | int split; |
4187 | int num_doubles = 0; | 4177 | int num_doubles = 0; |
4188 | int tried_avoid_double = 0; | 4178 | int tried_avoid_double = 0; |
4189 | 4179 | ||
4190 | l = path->nodes[0]; | 4180 | l = path->nodes[0]; |
4191 | slot = path->slots[0]; | 4181 | slot = path->slots[0]; |
4192 | if (extend && data_size + btrfs_item_size_nr(l, slot) + | 4182 | if (extend && data_size + btrfs_item_size_nr(l, slot) + |
4193 | sizeof(struct btrfs_item) > BTRFS_LEAF_DATA_SIZE(root)) | 4183 | sizeof(struct btrfs_item) > BTRFS_LEAF_DATA_SIZE(root)) |
4194 | return -EOVERFLOW; | 4184 | return -EOVERFLOW; |
4195 | 4185 | ||
4196 | /* first try to make some room by pushing left and right */ | 4186 | /* first try to make some room by pushing left and right */ |
4197 | if (data_size && path->nodes[1]) { | 4187 | if (data_size && path->nodes[1]) { |
4198 | int space_needed = data_size; | 4188 | int space_needed = data_size; |
4199 | 4189 | ||
4200 | if (slot < btrfs_header_nritems(l)) | 4190 | if (slot < btrfs_header_nritems(l)) |
4201 | space_needed -= btrfs_leaf_free_space(root, l); | 4191 | space_needed -= btrfs_leaf_free_space(root, l); |
4202 | 4192 | ||
4203 | wret = push_leaf_right(trans, root, path, space_needed, | 4193 | wret = push_leaf_right(trans, root, path, space_needed, |
4204 | space_needed, 0, 0); | 4194 | space_needed, 0, 0); |
4205 | if (wret < 0) | 4195 | if (wret < 0) |
4206 | return wret; | 4196 | return wret; |
4207 | if (wret) { | 4197 | if (wret) { |
4208 | wret = push_leaf_left(trans, root, path, space_needed, | 4198 | wret = push_leaf_left(trans, root, path, space_needed, |
4209 | space_needed, 0, (u32)-1); | 4199 | space_needed, 0, (u32)-1); |
4210 | if (wret < 0) | 4200 | if (wret < 0) |
4211 | return wret; | 4201 | return wret; |
4212 | } | 4202 | } |
4213 | l = path->nodes[0]; | 4203 | l = path->nodes[0]; |
4214 | 4204 | ||
4215 | /* did the pushes work? */ | 4205 | /* did the pushes work? */ |
4216 | if (btrfs_leaf_free_space(root, l) >= data_size) | 4206 | if (btrfs_leaf_free_space(root, l) >= data_size) |
4217 | return 0; | 4207 | return 0; |
4218 | } | 4208 | } |
4219 | 4209 | ||
4220 | if (!path->nodes[1]) { | 4210 | if (!path->nodes[1]) { |
4221 | ret = insert_new_root(trans, root, path, 1); | 4211 | ret = insert_new_root(trans, root, path, 1); |
4222 | if (ret) | 4212 | if (ret) |
4223 | return ret; | 4213 | return ret; |
4224 | } | 4214 | } |
4225 | again: | 4215 | again: |
4226 | split = 1; | 4216 | split = 1; |
4227 | l = path->nodes[0]; | 4217 | l = path->nodes[0]; |
4228 | slot = path->slots[0]; | 4218 | slot = path->slots[0]; |
4229 | nritems = btrfs_header_nritems(l); | 4219 | nritems = btrfs_header_nritems(l); |
4230 | mid = (nritems + 1) / 2; | 4220 | mid = (nritems + 1) / 2; |
4231 | 4221 | ||
4232 | if (mid <= slot) { | 4222 | if (mid <= slot) { |
4233 | if (nritems == 1 || | 4223 | if (nritems == 1 || |
4234 | leaf_space_used(l, mid, nritems - mid) + data_size > | 4224 | leaf_space_used(l, mid, nritems - mid) + data_size > |
4235 | BTRFS_LEAF_DATA_SIZE(root)) { | 4225 | BTRFS_LEAF_DATA_SIZE(root)) { |
4236 | if (slot >= nritems) { | 4226 | if (slot >= nritems) { |
4237 | split = 0; | 4227 | split = 0; |
4238 | } else { | 4228 | } else { |
4239 | mid = slot; | 4229 | mid = slot; |
4240 | if (mid != nritems && | 4230 | if (mid != nritems && |
4241 | leaf_space_used(l, mid, nritems - mid) + | 4231 | leaf_space_used(l, mid, nritems - mid) + |
4242 | data_size > BTRFS_LEAF_DATA_SIZE(root)) { | 4232 | data_size > BTRFS_LEAF_DATA_SIZE(root)) { |
4243 | if (data_size && !tried_avoid_double) | 4233 | if (data_size && !tried_avoid_double) |
4244 | goto push_for_double; | 4234 | goto push_for_double; |
4245 | split = 2; | 4235 | split = 2; |
4246 | } | 4236 | } |
4247 | } | 4237 | } |
4248 | } | 4238 | } |
4249 | } else { | 4239 | } else { |
4250 | if (leaf_space_used(l, 0, mid) + data_size > | 4240 | if (leaf_space_used(l, 0, mid) + data_size > |
4251 | BTRFS_LEAF_DATA_SIZE(root)) { | 4241 | BTRFS_LEAF_DATA_SIZE(root)) { |
4252 | if (!extend && data_size && slot == 0) { | 4242 | if (!extend && data_size && slot == 0) { |
4253 | split = 0; | 4243 | split = 0; |
4254 | } else if ((extend || !data_size) && slot == 0) { | 4244 | } else if ((extend || !data_size) && slot == 0) { |
4255 | mid = 1; | 4245 | mid = 1; |
4256 | } else { | 4246 | } else { |
4257 | mid = slot; | 4247 | mid = slot; |
4258 | if (mid != nritems && | 4248 | if (mid != nritems && |
4259 | leaf_space_used(l, mid, nritems - mid) + | 4249 | leaf_space_used(l, mid, nritems - mid) + |
4260 | data_size > BTRFS_LEAF_DATA_SIZE(root)) { | 4250 | data_size > BTRFS_LEAF_DATA_SIZE(root)) { |
4261 | if (data_size && !tried_avoid_double) | 4251 | if (data_size && !tried_avoid_double) |
4262 | goto push_for_double; | 4252 | goto push_for_double; |
4263 | split = 2; | 4253 | split = 2; |
4264 | } | 4254 | } |
4265 | } | 4255 | } |
4266 | } | 4256 | } |
4267 | } | 4257 | } |
4268 | 4258 | ||
4269 | if (split == 0) | 4259 | if (split == 0) |
4270 | btrfs_cpu_key_to_disk(&disk_key, ins_key); | 4260 | btrfs_cpu_key_to_disk(&disk_key, ins_key); |
4271 | else | 4261 | else |
4272 | btrfs_item_key(l, &disk_key, mid); | 4262 | btrfs_item_key(l, &disk_key, mid); |
4273 | 4263 | ||
4274 | right = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid, | 4264 | right = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid, |
4275 | &disk_key, 0, l->start, 0); | 4265 | &disk_key, 0, l->start, 0); |
4276 | if (IS_ERR(right)) | 4266 | if (IS_ERR(right)) |
4277 | return PTR_ERR(right); | 4267 | return PTR_ERR(right); |
4278 | 4268 | ||
4279 | root_add_used(root, root->nodesize); | 4269 | root_add_used(root, root->nodesize); |
4280 | 4270 | ||
4281 | memset_extent_buffer(right, 0, 0, sizeof(struct btrfs_header)); | 4271 | memset_extent_buffer(right, 0, 0, sizeof(struct btrfs_header)); |
4282 | btrfs_set_header_bytenr(right, right->start); | 4272 | btrfs_set_header_bytenr(right, right->start); |
4283 | btrfs_set_header_generation(right, trans->transid); | 4273 | btrfs_set_header_generation(right, trans->transid); |
4284 | btrfs_set_header_backref_rev(right, BTRFS_MIXED_BACKREF_REV); | 4274 | btrfs_set_header_backref_rev(right, BTRFS_MIXED_BACKREF_REV); |
4285 | btrfs_set_header_owner(right, root->root_key.objectid); | 4275 | btrfs_set_header_owner(right, root->root_key.objectid); |
4286 | btrfs_set_header_level(right, 0); | 4276 | btrfs_set_header_level(right, 0); |
4287 | write_extent_buffer(right, root->fs_info->fsid, | 4277 | write_extent_buffer(right, root->fs_info->fsid, |
4288 | btrfs_header_fsid(), BTRFS_FSID_SIZE); | 4278 | btrfs_header_fsid(), BTRFS_FSID_SIZE); |
4289 | 4279 | ||
4290 | write_extent_buffer(right, root->fs_info->chunk_tree_uuid, | 4280 | write_extent_buffer(right, root->fs_info->chunk_tree_uuid, |
4291 | btrfs_header_chunk_tree_uuid(right), | 4281 | btrfs_header_chunk_tree_uuid(right), |
4292 | BTRFS_UUID_SIZE); | 4282 | BTRFS_UUID_SIZE); |
4293 | 4283 | ||
4294 | if (split == 0) { | 4284 | if (split == 0) { |
4295 | if (mid <= slot) { | 4285 | if (mid <= slot) { |
4296 | btrfs_set_header_nritems(right, 0); | 4286 | btrfs_set_header_nritems(right, 0); |
4297 | insert_ptr(trans, root, path, &disk_key, right->start, | 4287 | insert_ptr(trans, root, path, &disk_key, right->start, |
4298 | path->slots[1] + 1, 1); | 4288 | path->slots[1] + 1, 1); |
4299 | btrfs_tree_unlock(path->nodes[0]); | 4289 | btrfs_tree_unlock(path->nodes[0]); |
4300 | free_extent_buffer(path->nodes[0]); | 4290 | free_extent_buffer(path->nodes[0]); |
4301 | path->nodes[0] = right; | 4291 | path->nodes[0] = right; |
4302 | path->slots[0] = 0; | 4292 | path->slots[0] = 0; |
4303 | path->slots[1] += 1; | 4293 | path->slots[1] += 1; |
4304 | } else { | 4294 | } else { |
4305 | btrfs_set_header_nritems(right, 0); | 4295 | btrfs_set_header_nritems(right, 0); |
4306 | insert_ptr(trans, root, path, &disk_key, right->start, | 4296 | insert_ptr(trans, root, path, &disk_key, right->start, |
4307 | path->slots[1], 1); | 4297 | path->slots[1], 1); |
4308 | btrfs_tree_unlock(path->nodes[0]); | 4298 | btrfs_tree_unlock(path->nodes[0]); |
4309 | free_extent_buffer(path->nodes[0]); | 4299 | free_extent_buffer(path->nodes[0]); |
4310 | path->nodes[0] = right; | 4300 | path->nodes[0] = right; |
4311 | path->slots[0] = 0; | 4301 | path->slots[0] = 0; |
4312 | if (path->slots[1] == 0) | 4302 | if (path->slots[1] == 0) |
4313 | fixup_low_keys(root, path, &disk_key, 1); | 4303 | fixup_low_keys(root, path, &disk_key, 1); |
4314 | } | 4304 | } |
4315 | btrfs_mark_buffer_dirty(right); | 4305 | btrfs_mark_buffer_dirty(right); |
4316 | return ret; | 4306 | return ret; |
4317 | } | 4307 | } |
4318 | 4308 | ||
4319 | copy_for_split(trans, root, path, l, right, slot, mid, nritems); | 4309 | copy_for_split(trans, root, path, l, right, slot, mid, nritems); |
4320 | 4310 | ||
4321 | if (split == 2) { | 4311 | if (split == 2) { |
4322 | BUG_ON(num_doubles != 0); | 4312 | BUG_ON(num_doubles != 0); |
4323 | num_doubles++; | 4313 | num_doubles++; |
4324 | goto again; | 4314 | goto again; |
4325 | } | 4315 | } |
4326 | 4316 | ||
4327 | return 0; | 4317 | return 0; |
4328 | 4318 | ||
4329 | push_for_double: | 4319 | push_for_double: |
4330 | push_for_double_split(trans, root, path, data_size); | 4320 | push_for_double_split(trans, root, path, data_size); |
4331 | tried_avoid_double = 1; | 4321 | tried_avoid_double = 1; |
4332 | if (btrfs_leaf_free_space(root, path->nodes[0]) >= data_size) | 4322 | if (btrfs_leaf_free_space(root, path->nodes[0]) >= data_size) |
4333 | return 0; | 4323 | return 0; |
4334 | goto again; | 4324 | goto again; |
4335 | } | 4325 | } |
4336 | 4326 | ||
4337 | static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans, | 4327 | static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans, |
4338 | struct btrfs_root *root, | 4328 | struct btrfs_root *root, |
4339 | struct btrfs_path *path, int ins_len) | 4329 | struct btrfs_path *path, int ins_len) |
4340 | { | 4330 | { |
4341 | struct btrfs_key key; | 4331 | struct btrfs_key key; |
4342 | struct extent_buffer *leaf; | 4332 | struct extent_buffer *leaf; |
4343 | struct btrfs_file_extent_item *fi; | 4333 | struct btrfs_file_extent_item *fi; |
4344 | u64 extent_len = 0; | 4334 | u64 extent_len = 0; |
4345 | u32 item_size; | 4335 | u32 item_size; |
4346 | int ret; | 4336 | int ret; |
4347 | 4337 | ||
4348 | leaf = path->nodes[0]; | 4338 | leaf = path->nodes[0]; |
4349 | btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); | 4339 | btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); |
4350 | 4340 | ||
4351 | BUG_ON(key.type != BTRFS_EXTENT_DATA_KEY && | 4341 | BUG_ON(key.type != BTRFS_EXTENT_DATA_KEY && |
4352 | key.type != BTRFS_EXTENT_CSUM_KEY); | 4342 | key.type != BTRFS_EXTENT_CSUM_KEY); |
4353 | 4343 | ||
4354 | if (btrfs_leaf_free_space(root, leaf) >= ins_len) | 4344 | if (btrfs_leaf_free_space(root, leaf) >= ins_len) |
4355 | return 0; | 4345 | return 0; |
4356 | 4346 | ||
4357 | item_size = btrfs_item_size_nr(leaf, path->slots[0]); | 4347 | item_size = btrfs_item_size_nr(leaf, path->slots[0]); |
4358 | if (key.type == BTRFS_EXTENT_DATA_KEY) { | 4348 | if (key.type == BTRFS_EXTENT_DATA_KEY) { |
4359 | fi = btrfs_item_ptr(leaf, path->slots[0], | 4349 | fi = btrfs_item_ptr(leaf, path->slots[0], |
4360 | struct btrfs_file_extent_item); | 4350 | struct btrfs_file_extent_item); |
4361 | extent_len = btrfs_file_extent_num_bytes(leaf, fi); | 4351 | extent_len = btrfs_file_extent_num_bytes(leaf, fi); |
4362 | } | 4352 | } |
4363 | btrfs_release_path(path); | 4353 | btrfs_release_path(path); |
4364 | 4354 | ||
4365 | path->keep_locks = 1; | 4355 | path->keep_locks = 1; |
4366 | path->search_for_split = 1; | 4356 | path->search_for_split = 1; |
4367 | ret = btrfs_search_slot(trans, root, &key, path, 0, 1); | 4357 | ret = btrfs_search_slot(trans, root, &key, path, 0, 1); |
4368 | path->search_for_split = 0; | 4358 | path->search_for_split = 0; |
4369 | if (ret < 0) | 4359 | if (ret < 0) |
4370 | goto err; | 4360 | goto err; |
4371 | 4361 | ||
4372 | ret = -EAGAIN; | 4362 | ret = -EAGAIN; |
4373 | leaf = path->nodes[0]; | 4363 | leaf = path->nodes[0]; |
4374 | /* if our item isn't there or got smaller, return now */ | 4364 | /* if our item isn't there or got smaller, return now */ |
4375 | if (ret > 0 || item_size != btrfs_item_size_nr(leaf, path->slots[0])) | 4365 | if (ret > 0 || item_size != btrfs_item_size_nr(leaf, path->slots[0])) |
4376 | goto err; | 4366 | goto err; |
4377 | 4367 | ||
4378 | /* the leaf has changed, it now has room. return now */ | 4368 | /* the leaf has changed, it now has room. return now */ |
4379 | if (btrfs_leaf_free_space(root, path->nodes[0]) >= ins_len) | 4369 | if (btrfs_leaf_free_space(root, path->nodes[0]) >= ins_len) |
4380 | goto err; | 4370 | goto err; |
4381 | 4371 | ||
4382 | if (key.type == BTRFS_EXTENT_DATA_KEY) { | 4372 | if (key.type == BTRFS_EXTENT_DATA_KEY) { |
4383 | fi = btrfs_item_ptr(leaf, path->slots[0], | 4373 | fi = btrfs_item_ptr(leaf, path->slots[0], |
4384 | struct btrfs_file_extent_item); | 4374 | struct btrfs_file_extent_item); |
4385 | if (extent_len != btrfs_file_extent_num_bytes(leaf, fi)) | 4375 | if (extent_len != btrfs_file_extent_num_bytes(leaf, fi)) |
4386 | goto err; | 4376 | goto err; |
4387 | } | 4377 | } |
4388 | 4378 | ||
4389 | btrfs_set_path_blocking(path); | 4379 | btrfs_set_path_blocking(path); |
4390 | ret = split_leaf(trans, root, &key, path, ins_len, 1); | 4380 | ret = split_leaf(trans, root, &key, path, ins_len, 1); |
4391 | if (ret) | 4381 | if (ret) |
4392 | goto err; | 4382 | goto err; |
4393 | 4383 | ||
4394 | path->keep_locks = 0; | 4384 | path->keep_locks = 0; |
4395 | btrfs_unlock_up_safe(path, 1); | 4385 | btrfs_unlock_up_safe(path, 1); |
4396 | return 0; | 4386 | return 0; |
4397 | err: | 4387 | err: |
4398 | path->keep_locks = 0; | 4388 | path->keep_locks = 0; |
4399 | return ret; | 4389 | return ret; |
4400 | } | 4390 | } |
4401 | 4391 | ||
4402 | static noinline int split_item(struct btrfs_trans_handle *trans, | 4392 | static noinline int split_item(struct btrfs_trans_handle *trans, |
4403 | struct btrfs_root *root, | 4393 | struct btrfs_root *root, |
4404 | struct btrfs_path *path, | 4394 | struct btrfs_path *path, |
4405 | struct btrfs_key *new_key, | 4395 | struct btrfs_key *new_key, |
4406 | unsigned long split_offset) | 4396 | unsigned long split_offset) |
4407 | { | 4397 | { |
4408 | struct extent_buffer *leaf; | 4398 | struct extent_buffer *leaf; |
4409 | struct btrfs_item *item; | 4399 | struct btrfs_item *item; |
4410 | struct btrfs_item *new_item; | 4400 | struct btrfs_item *new_item; |
4411 | int slot; | 4401 | int slot; |
4412 | char *buf; | 4402 | char *buf; |
4413 | u32 nritems; | 4403 | u32 nritems; |
4414 | u32 item_size; | 4404 | u32 item_size; |
4415 | u32 orig_offset; | 4405 | u32 orig_offset; |
4416 | struct btrfs_disk_key disk_key; | 4406 | struct btrfs_disk_key disk_key; |
4417 | 4407 | ||
4418 | leaf = path->nodes[0]; | 4408 | leaf = path->nodes[0]; |
4419 | BUG_ON(btrfs_leaf_free_space(root, leaf) < sizeof(struct btrfs_item)); | 4409 | BUG_ON(btrfs_leaf_free_space(root, leaf) < sizeof(struct btrfs_item)); |
4420 | 4410 | ||
4421 | btrfs_set_path_blocking(path); | 4411 | btrfs_set_path_blocking(path); |
4422 | 4412 | ||
4423 | item = btrfs_item_nr(path->slots[0]); | 4413 | item = btrfs_item_nr(path->slots[0]); |
4424 | orig_offset = btrfs_item_offset(leaf, item); | 4414 | orig_offset = btrfs_item_offset(leaf, item); |
4425 | item_size = btrfs_item_size(leaf, item); | 4415 | item_size = btrfs_item_size(leaf, item); |
4426 | 4416 | ||
4427 | buf = kmalloc(item_size, GFP_NOFS); | 4417 | buf = kmalloc(item_size, GFP_NOFS); |
4428 | if (!buf) | 4418 | if (!buf) |
4429 | return -ENOMEM; | 4419 | return -ENOMEM; |
4430 | 4420 | ||
4431 | read_extent_buffer(leaf, buf, btrfs_item_ptr_offset(leaf, | 4421 | read_extent_buffer(leaf, buf, btrfs_item_ptr_offset(leaf, |
4432 | path->slots[0]), item_size); | 4422 | path->slots[0]), item_size); |
4433 | 4423 | ||
4434 | slot = path->slots[0] + 1; | 4424 | slot = path->slots[0] + 1; |
4435 | nritems = btrfs_header_nritems(leaf); | 4425 | nritems = btrfs_header_nritems(leaf); |
4436 | if (slot != nritems) { | 4426 | if (slot != nritems) { |
4437 | /* shift the items */ | 4427 | /* shift the items */ |
4438 | memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + 1), | 4428 | memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + 1), |
4439 | btrfs_item_nr_offset(slot), | 4429 | btrfs_item_nr_offset(slot), |
4440 | (nritems - slot) * sizeof(struct btrfs_item)); | 4430 | (nritems - slot) * sizeof(struct btrfs_item)); |
4441 | } | 4431 | } |
4442 | 4432 | ||
4443 | btrfs_cpu_key_to_disk(&disk_key, new_key); | 4433 | btrfs_cpu_key_to_disk(&disk_key, new_key); |
4444 | btrfs_set_item_key(leaf, &disk_key, slot); | 4434 | btrfs_set_item_key(leaf, &disk_key, slot); |
4445 | 4435 | ||
4446 | new_item = btrfs_item_nr(slot); | 4436 | new_item = btrfs_item_nr(slot); |
4447 | 4437 | ||
4448 | btrfs_set_item_offset(leaf, new_item, orig_offset); | 4438 | btrfs_set_item_offset(leaf, new_item, orig_offset); |
4449 | btrfs_set_item_size(leaf, new_item, item_size - split_offset); | 4439 | btrfs_set_item_size(leaf, new_item, item_size - split_offset); |
4450 | 4440 | ||
4451 | btrfs_set_item_offset(leaf, item, | 4441 | btrfs_set_item_offset(leaf, item, |
4452 | orig_offset + item_size - split_offset); | 4442 | orig_offset + item_size - split_offset); |
4453 | btrfs_set_item_size(leaf, item, split_offset); | 4443 | btrfs_set_item_size(leaf, item, split_offset); |
4454 | 4444 | ||
4455 | btrfs_set_header_nritems(leaf, nritems + 1); | 4445 | btrfs_set_header_nritems(leaf, nritems + 1); |
4456 | 4446 | ||
4457 | /* write the data for the start of the original item */ | 4447 | /* write the data for the start of the original item */ |
4458 | write_extent_buffer(leaf, buf, | 4448 | write_extent_buffer(leaf, buf, |
4459 | btrfs_item_ptr_offset(leaf, path->slots[0]), | 4449 | btrfs_item_ptr_offset(leaf, path->slots[0]), |
4460 | split_offset); | 4450 | split_offset); |
4461 | 4451 | ||
4462 | /* write the data for the new item */ | 4452 | /* write the data for the new item */ |
4463 | write_extent_buffer(leaf, buf + split_offset, | 4453 | write_extent_buffer(leaf, buf + split_offset, |
4464 | btrfs_item_ptr_offset(leaf, slot), | 4454 | btrfs_item_ptr_offset(leaf, slot), |
4465 | item_size - split_offset); | 4455 | item_size - split_offset); |
4466 | btrfs_mark_buffer_dirty(leaf); | 4456 | btrfs_mark_buffer_dirty(leaf); |
4467 | 4457 | ||
4468 | BUG_ON(btrfs_leaf_free_space(root, leaf) < 0); | 4458 | BUG_ON(btrfs_leaf_free_space(root, leaf) < 0); |
4469 | kfree(buf); | 4459 | kfree(buf); |
4470 | return 0; | 4460 | return 0; |
4471 | } | 4461 | } |
4472 | 4462 | ||
4473 | /* | 4463 | /* |
4474 | * This function splits a single item into two items, | 4464 | * This function splits a single item into two items, |
4475 | * giving 'new_key' to the new item and splitting the | 4465 | * giving 'new_key' to the new item and splitting the |
4476 | * old one at split_offset (from the start of the item). | 4466 | * old one at split_offset (from the start of the item). |
4477 | * | 4467 | * |
4478 | * The path may be released by this operation. After | 4468 | * The path may be released by this operation. After |
4479 | * the split, the path is pointing to the old item. The | 4469 | * the split, the path is pointing to the old item. The |
4480 | * new item is going to be in the same node as the old one. | 4470 | * new item is going to be in the same node as the old one. |
4481 | * | 4471 | * |
4482 | * Note, the item being split must be smaller enough to live alone on | 4472 | * Note, the item being split must be smaller enough to live alone on |
4483 | * a tree block with room for one extra struct btrfs_item | 4473 | * a tree block with room for one extra struct btrfs_item |
4484 | * | 4474 | * |
4485 | * This allows us to split the item in place, keeping a lock on the | 4475 | * This allows us to split the item in place, keeping a lock on the |
4486 | * leaf the entire time. | 4476 | * leaf the entire time. |
4487 | */ | 4477 | */ |
4488 | int btrfs_split_item(struct btrfs_trans_handle *trans, | 4478 | int btrfs_split_item(struct btrfs_trans_handle *trans, |
4489 | struct btrfs_root *root, | 4479 | struct btrfs_root *root, |
4490 | struct btrfs_path *path, | 4480 | struct btrfs_path *path, |
4491 | struct btrfs_key *new_key, | 4481 | struct btrfs_key *new_key, |
4492 | unsigned long split_offset) | 4482 | unsigned long split_offset) |
4493 | { | 4483 | { |
4494 | int ret; | 4484 | int ret; |
4495 | ret = setup_leaf_for_split(trans, root, path, | 4485 | ret = setup_leaf_for_split(trans, root, path, |
4496 | sizeof(struct btrfs_item)); | 4486 | sizeof(struct btrfs_item)); |
4497 | if (ret) | 4487 | if (ret) |
4498 | return ret; | 4488 | return ret; |
4499 | 4489 | ||
4500 | ret = split_item(trans, root, path, new_key, split_offset); | 4490 | ret = split_item(trans, root, path, new_key, split_offset); |
4501 | return ret; | 4491 | return ret; |
4502 | } | 4492 | } |
4503 | 4493 | ||
4504 | /* | 4494 | /* |
4505 | * This function duplicate a item, giving 'new_key' to the new item. | 4495 | * This function duplicate a item, giving 'new_key' to the new item. |
4506 | * It guarantees both items live in the same tree leaf and the new item | 4496 | * It guarantees both items live in the same tree leaf and the new item |
4507 | * is contiguous with the original item. | 4497 | * is contiguous with the original item. |
4508 | * | 4498 | * |
4509 | * This allows us to split file extent in place, keeping a lock on the | 4499 | * This allows us to split file extent in place, keeping a lock on the |
4510 | * leaf the entire time. | 4500 | * leaf the entire time. |
4511 | */ | 4501 | */ |
4512 | int btrfs_duplicate_item(struct btrfs_trans_handle *trans, | 4502 | int btrfs_duplicate_item(struct btrfs_trans_handle *trans, |
4513 | struct btrfs_root *root, | 4503 | struct btrfs_root *root, |
4514 | struct btrfs_path *path, | 4504 | struct btrfs_path *path, |
4515 | struct btrfs_key *new_key) | 4505 | struct btrfs_key *new_key) |
4516 | { | 4506 | { |
4517 | struct extent_buffer *leaf; | 4507 | struct extent_buffer *leaf; |
4518 | int ret; | 4508 | int ret; |
4519 | u32 item_size; | 4509 | u32 item_size; |
4520 | 4510 | ||
4521 | leaf = path->nodes[0]; | 4511 | leaf = path->nodes[0]; |
4522 | item_size = btrfs_item_size_nr(leaf, path->slots[0]); | 4512 | item_size = btrfs_item_size_nr(leaf, path->slots[0]); |
4523 | ret = setup_leaf_for_split(trans, root, path, | 4513 | ret = setup_leaf_for_split(trans, root, path, |
4524 | item_size + sizeof(struct btrfs_item)); | 4514 | item_size + sizeof(struct btrfs_item)); |
4525 | if (ret) | 4515 | if (ret) |
4526 | return ret; | 4516 | return ret; |
4527 | 4517 | ||
4528 | path->slots[0]++; | 4518 | path->slots[0]++; |
4529 | setup_items_for_insert(root, path, new_key, &item_size, | 4519 | setup_items_for_insert(root, path, new_key, &item_size, |
4530 | item_size, item_size + | 4520 | item_size, item_size + |
4531 | sizeof(struct btrfs_item), 1); | 4521 | sizeof(struct btrfs_item), 1); |
4532 | leaf = path->nodes[0]; | 4522 | leaf = path->nodes[0]; |
4533 | memcpy_extent_buffer(leaf, | 4523 | memcpy_extent_buffer(leaf, |
4534 | btrfs_item_ptr_offset(leaf, path->slots[0]), | 4524 | btrfs_item_ptr_offset(leaf, path->slots[0]), |
4535 | btrfs_item_ptr_offset(leaf, path->slots[0] - 1), | 4525 | btrfs_item_ptr_offset(leaf, path->slots[0] - 1), |
4536 | item_size); | 4526 | item_size); |
4537 | return 0; | 4527 | return 0; |
4538 | } | 4528 | } |
4539 | 4529 | ||
4540 | /* | 4530 | /* |
4541 | * make the item pointed to by the path smaller. new_size indicates | 4531 | * make the item pointed to by the path smaller. new_size indicates |
4542 | * how small to make it, and from_end tells us if we just chop bytes | 4532 | * how small to make it, and from_end tells us if we just chop bytes |
4543 | * off the end of the item or if we shift the item to chop bytes off | 4533 | * off the end of the item or if we shift the item to chop bytes off |
4544 | * the front. | 4534 | * the front. |
4545 | */ | 4535 | */ |
4546 | void btrfs_truncate_item(struct btrfs_root *root, struct btrfs_path *path, | 4536 | void btrfs_truncate_item(struct btrfs_root *root, struct btrfs_path *path, |
4547 | u32 new_size, int from_end) | 4537 | u32 new_size, int from_end) |
4548 | { | 4538 | { |
4549 | int slot; | 4539 | int slot; |
4550 | struct extent_buffer *leaf; | 4540 | struct extent_buffer *leaf; |
4551 | struct btrfs_item *item; | 4541 | struct btrfs_item *item; |
4552 | u32 nritems; | 4542 | u32 nritems; |
4553 | unsigned int data_end; | 4543 | unsigned int data_end; |
4554 | unsigned int old_data_start; | 4544 | unsigned int old_data_start; |
4555 | unsigned int old_size; | 4545 | unsigned int old_size; |
4556 | unsigned int size_diff; | 4546 | unsigned int size_diff; |
4557 | int i; | 4547 | int i; |
4558 | struct btrfs_map_token token; | 4548 | struct btrfs_map_token token; |
4559 | 4549 | ||
4560 | btrfs_init_map_token(&token); | 4550 | btrfs_init_map_token(&token); |
4561 | 4551 | ||
4562 | leaf = path->nodes[0]; | 4552 | leaf = path->nodes[0]; |
4563 | slot = path->slots[0]; | 4553 | slot = path->slots[0]; |
4564 | 4554 | ||
4565 | old_size = btrfs_item_size_nr(leaf, slot); | 4555 | old_size = btrfs_item_size_nr(leaf, slot); |
4566 | if (old_size == new_size) | 4556 | if (old_size == new_size) |
4567 | return; | 4557 | return; |
4568 | 4558 | ||
4569 | nritems = btrfs_header_nritems(leaf); | 4559 | nritems = btrfs_header_nritems(leaf); |
4570 | data_end = leaf_data_end(root, leaf); | 4560 | data_end = leaf_data_end(root, leaf); |
4571 | 4561 | ||
4572 | old_data_start = btrfs_item_offset_nr(leaf, slot); | 4562 | old_data_start = btrfs_item_offset_nr(leaf, slot); |
4573 | 4563 | ||
4574 | size_diff = old_size - new_size; | 4564 | size_diff = old_size - new_size; |
4575 | 4565 | ||
4576 | BUG_ON(slot < 0); | 4566 | BUG_ON(slot < 0); |
4577 | BUG_ON(slot >= nritems); | 4567 | BUG_ON(slot >= nritems); |
4578 | 4568 | ||
4579 | /* | 4569 | /* |
4580 | * item0..itemN ... dataN.offset..dataN.size .. data0.size | 4570 | * item0..itemN ... dataN.offset..dataN.size .. data0.size |
4581 | */ | 4571 | */ |
4582 | /* first correct the data pointers */ | 4572 | /* first correct the data pointers */ |
4583 | for (i = slot; i < nritems; i++) { | 4573 | for (i = slot; i < nritems; i++) { |
4584 | u32 ioff; | 4574 | u32 ioff; |
4585 | item = btrfs_item_nr(i); | 4575 | item = btrfs_item_nr(i); |
4586 | 4576 | ||
4587 | ioff = btrfs_token_item_offset(leaf, item, &token); | 4577 | ioff = btrfs_token_item_offset(leaf, item, &token); |
4588 | btrfs_set_token_item_offset(leaf, item, | 4578 | btrfs_set_token_item_offset(leaf, item, |
4589 | ioff + size_diff, &token); | 4579 | ioff + size_diff, &token); |
4590 | } | 4580 | } |
4591 | 4581 | ||
4592 | /* shift the data */ | 4582 | /* shift the data */ |
4593 | if (from_end) { | 4583 | if (from_end) { |
4594 | memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) + | 4584 | memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) + |
4595 | data_end + size_diff, btrfs_leaf_data(leaf) + | 4585 | data_end + size_diff, btrfs_leaf_data(leaf) + |
4596 | data_end, old_data_start + new_size - data_end); | 4586 | data_end, old_data_start + new_size - data_end); |
4597 | } else { | 4587 | } else { |
4598 | struct btrfs_disk_key disk_key; | 4588 | struct btrfs_disk_key disk_key; |
4599 | u64 offset; | 4589 | u64 offset; |
4600 | 4590 | ||
4601 | btrfs_item_key(leaf, &disk_key, slot); | 4591 | btrfs_item_key(leaf, &disk_key, slot); |
4602 | 4592 | ||
4603 | if (btrfs_disk_key_type(&disk_key) == BTRFS_EXTENT_DATA_KEY) { | 4593 | if (btrfs_disk_key_type(&disk_key) == BTRFS_EXTENT_DATA_KEY) { |
4604 | unsigned long ptr; | 4594 | unsigned long ptr; |
4605 | struct btrfs_file_extent_item *fi; | 4595 | struct btrfs_file_extent_item *fi; |
4606 | 4596 | ||
4607 | fi = btrfs_item_ptr(leaf, slot, | 4597 | fi = btrfs_item_ptr(leaf, slot, |
4608 | struct btrfs_file_extent_item); | 4598 | struct btrfs_file_extent_item); |
4609 | fi = (struct btrfs_file_extent_item *)( | 4599 | fi = (struct btrfs_file_extent_item *)( |
4610 | (unsigned long)fi - size_diff); | 4600 | (unsigned long)fi - size_diff); |
4611 | 4601 | ||
4612 | if (btrfs_file_extent_type(leaf, fi) == | 4602 | if (btrfs_file_extent_type(leaf, fi) == |
4613 | BTRFS_FILE_EXTENT_INLINE) { | 4603 | BTRFS_FILE_EXTENT_INLINE) { |
4614 | ptr = btrfs_item_ptr_offset(leaf, slot); | 4604 | ptr = btrfs_item_ptr_offset(leaf, slot); |
4615 | memmove_extent_buffer(leaf, ptr, | 4605 | memmove_extent_buffer(leaf, ptr, |
4616 | (unsigned long)fi, | 4606 | (unsigned long)fi, |
4617 | BTRFS_FILE_EXTENT_INLINE_DATA_START); | 4607 | BTRFS_FILE_EXTENT_INLINE_DATA_START); |
4618 | } | 4608 | } |
4619 | } | 4609 | } |
4620 | 4610 | ||
4621 | memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) + | 4611 | memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) + |
4622 | data_end + size_diff, btrfs_leaf_data(leaf) + | 4612 | data_end + size_diff, btrfs_leaf_data(leaf) + |
4623 | data_end, old_data_start - data_end); | 4613 | data_end, old_data_start - data_end); |
4624 | 4614 | ||
4625 | offset = btrfs_disk_key_offset(&disk_key); | 4615 | offset = btrfs_disk_key_offset(&disk_key); |
4626 | btrfs_set_disk_key_offset(&disk_key, offset + size_diff); | 4616 | btrfs_set_disk_key_offset(&disk_key, offset + size_diff); |
4627 | btrfs_set_item_key(leaf, &disk_key, slot); | 4617 | btrfs_set_item_key(leaf, &disk_key, slot); |
4628 | if (slot == 0) | 4618 | if (slot == 0) |
4629 | fixup_low_keys(root, path, &disk_key, 1); | 4619 | fixup_low_keys(root, path, &disk_key, 1); |
4630 | } | 4620 | } |
4631 | 4621 | ||
4632 | item = btrfs_item_nr(slot); | 4622 | item = btrfs_item_nr(slot); |
4633 | btrfs_set_item_size(leaf, item, new_size); | 4623 | btrfs_set_item_size(leaf, item, new_size); |
4634 | btrfs_mark_buffer_dirty(leaf); | 4624 | btrfs_mark_buffer_dirty(leaf); |
4635 | 4625 | ||
4636 | if (btrfs_leaf_free_space(root, leaf) < 0) { | 4626 | if (btrfs_leaf_free_space(root, leaf) < 0) { |
4637 | btrfs_print_leaf(root, leaf); | 4627 | btrfs_print_leaf(root, leaf); |
4638 | BUG(); | 4628 | BUG(); |
4639 | } | 4629 | } |
4640 | } | 4630 | } |
4641 | 4631 | ||
4642 | /* | 4632 | /* |
4643 | * make the item pointed to by the path bigger, data_size is the added size. | 4633 | * make the item pointed to by the path bigger, data_size is the added size. |
4644 | */ | 4634 | */ |
4645 | void btrfs_extend_item(struct btrfs_root *root, struct btrfs_path *path, | 4635 | void btrfs_extend_item(struct btrfs_root *root, struct btrfs_path *path, |
4646 | u32 data_size) | 4636 | u32 data_size) |
4647 | { | 4637 | { |
4648 | int slot; | 4638 | int slot; |
4649 | struct extent_buffer *leaf; | 4639 | struct extent_buffer *leaf; |
4650 | struct btrfs_item *item; | 4640 | struct btrfs_item *item; |
4651 | u32 nritems; | 4641 | u32 nritems; |
4652 | unsigned int data_end; | 4642 | unsigned int data_end; |
4653 | unsigned int old_data; | 4643 | unsigned int old_data; |
4654 | unsigned int old_size; | 4644 | unsigned int old_size; |
4655 | int i; | 4645 | int i; |
4656 | struct btrfs_map_token token; | 4646 | struct btrfs_map_token token; |
4657 | 4647 | ||
4658 | btrfs_init_map_token(&token); | 4648 | btrfs_init_map_token(&token); |
4659 | 4649 | ||
4660 | leaf = path->nodes[0]; | 4650 | leaf = path->nodes[0]; |
4661 | 4651 | ||
4662 | nritems = btrfs_header_nritems(leaf); | 4652 | nritems = btrfs_header_nritems(leaf); |
4663 | data_end = leaf_data_end(root, leaf); | 4653 | data_end = leaf_data_end(root, leaf); |
4664 | 4654 | ||
4665 | if (btrfs_leaf_free_space(root, leaf) < data_size) { | 4655 | if (btrfs_leaf_free_space(root, leaf) < data_size) { |
4666 | btrfs_print_leaf(root, leaf); | 4656 | btrfs_print_leaf(root, leaf); |
4667 | BUG(); | 4657 | BUG(); |
4668 | } | 4658 | } |
4669 | slot = path->slots[0]; | 4659 | slot = path->slots[0]; |
4670 | old_data = btrfs_item_end_nr(leaf, slot); | 4660 | old_data = btrfs_item_end_nr(leaf, slot); |
4671 | 4661 | ||
4672 | BUG_ON(slot < 0); | 4662 | BUG_ON(slot < 0); |
4673 | if (slot >= nritems) { | 4663 | if (slot >= nritems) { |
4674 | btrfs_print_leaf(root, leaf); | 4664 | btrfs_print_leaf(root, leaf); |
4675 | btrfs_crit(root->fs_info, "slot %d too large, nritems %d", | 4665 | btrfs_crit(root->fs_info, "slot %d too large, nritems %d", |
4676 | slot, nritems); | 4666 | slot, nritems); |
4677 | BUG_ON(1); | 4667 | BUG_ON(1); |
4678 | } | 4668 | } |
4679 | 4669 | ||
4680 | /* | 4670 | /* |
4681 | * item0..itemN ... dataN.offset..dataN.size .. data0.size | 4671 | * item0..itemN ... dataN.offset..dataN.size .. data0.size |
4682 | */ | 4672 | */ |
4683 | /* first correct the data pointers */ | 4673 | /* first correct the data pointers */ |
4684 | for (i = slot; i < nritems; i++) { | 4674 | for (i = slot; i < nritems; i++) { |
4685 | u32 ioff; | 4675 | u32 ioff; |
4686 | item = btrfs_item_nr(i); | 4676 | item = btrfs_item_nr(i); |
4687 | 4677 | ||
4688 | ioff = btrfs_token_item_offset(leaf, item, &token); | 4678 | ioff = btrfs_token_item_offset(leaf, item, &token); |
4689 | btrfs_set_token_item_offset(leaf, item, | 4679 | btrfs_set_token_item_offset(leaf, item, |
4690 | ioff - data_size, &token); | 4680 | ioff - data_size, &token); |
4691 | } | 4681 | } |
4692 | 4682 | ||
4693 | /* shift the data */ | 4683 | /* shift the data */ |
4694 | memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) + | 4684 | memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) + |
4695 | data_end - data_size, btrfs_leaf_data(leaf) + | 4685 | data_end - data_size, btrfs_leaf_data(leaf) + |
4696 | data_end, old_data - data_end); | 4686 | data_end, old_data - data_end); |
4697 | 4687 | ||
4698 | data_end = old_data; | 4688 | data_end = old_data; |
4699 | old_size = btrfs_item_size_nr(leaf, slot); | 4689 | old_size = btrfs_item_size_nr(leaf, slot); |
4700 | item = btrfs_item_nr(slot); | 4690 | item = btrfs_item_nr(slot); |
4701 | btrfs_set_item_size(leaf, item, old_size + data_size); | 4691 | btrfs_set_item_size(leaf, item, old_size + data_size); |
4702 | btrfs_mark_buffer_dirty(leaf); | 4692 | btrfs_mark_buffer_dirty(leaf); |
4703 | 4693 | ||
4704 | if (btrfs_leaf_free_space(root, leaf) < 0) { | 4694 | if (btrfs_leaf_free_space(root, leaf) < 0) { |
4705 | btrfs_print_leaf(root, leaf); | 4695 | btrfs_print_leaf(root, leaf); |
4706 | BUG(); | 4696 | BUG(); |
4707 | } | 4697 | } |
4708 | } | 4698 | } |
4709 | 4699 | ||
4710 | /* | 4700 | /* |
4711 | * this is a helper for btrfs_insert_empty_items, the main goal here is | 4701 | * this is a helper for btrfs_insert_empty_items, the main goal here is |
4712 | * to save stack depth by doing the bulk of the work in a function | 4702 | * to save stack depth by doing the bulk of the work in a function |
4713 | * that doesn't call btrfs_search_slot | 4703 | * that doesn't call btrfs_search_slot |
4714 | */ | 4704 | */ |
4715 | void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *path, | 4705 | void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *path, |
4716 | struct btrfs_key *cpu_key, u32 *data_size, | 4706 | struct btrfs_key *cpu_key, u32 *data_size, |
4717 | u32 total_data, u32 total_size, int nr) | 4707 | u32 total_data, u32 total_size, int nr) |
4718 | { | 4708 | { |
4719 | struct btrfs_item *item; | 4709 | struct btrfs_item *item; |
4720 | int i; | 4710 | int i; |
4721 | u32 nritems; | 4711 | u32 nritems; |
4722 | unsigned int data_end; | 4712 | unsigned int data_end; |
4723 | struct btrfs_disk_key disk_key; | 4713 | struct btrfs_disk_key disk_key; |
4724 | struct extent_buffer *leaf; | 4714 | struct extent_buffer *leaf; |
4725 | int slot; | 4715 | int slot; |
4726 | struct btrfs_map_token token; | 4716 | struct btrfs_map_token token; |
4727 | 4717 | ||
4728 | if (path->slots[0] == 0) { | 4718 | if (path->slots[0] == 0) { |
4729 | btrfs_cpu_key_to_disk(&disk_key, cpu_key); | 4719 | btrfs_cpu_key_to_disk(&disk_key, cpu_key); |
4730 | fixup_low_keys(root, path, &disk_key, 1); | 4720 | fixup_low_keys(root, path, &disk_key, 1); |
4731 | } | 4721 | } |
4732 | btrfs_unlock_up_safe(path, 1); | 4722 | btrfs_unlock_up_safe(path, 1); |
4733 | 4723 | ||
4734 | btrfs_init_map_token(&token); | 4724 | btrfs_init_map_token(&token); |
4735 | 4725 | ||
4736 | leaf = path->nodes[0]; | 4726 | leaf = path->nodes[0]; |
4737 | slot = path->slots[0]; | 4727 | slot = path->slots[0]; |
4738 | 4728 | ||
4739 | nritems = btrfs_header_nritems(leaf); | 4729 | nritems = btrfs_header_nritems(leaf); |
4740 | data_end = leaf_data_end(root, leaf); | 4730 | data_end = leaf_data_end(root, leaf); |
4741 | 4731 | ||
4742 | if (btrfs_leaf_free_space(root, leaf) < total_size) { | 4732 | if (btrfs_leaf_free_space(root, leaf) < total_size) { |
4743 | btrfs_print_leaf(root, leaf); | 4733 | btrfs_print_leaf(root, leaf); |
4744 | btrfs_crit(root->fs_info, "not enough freespace need %u have %d", | 4734 | btrfs_crit(root->fs_info, "not enough freespace need %u have %d", |
4745 | total_size, btrfs_leaf_free_space(root, leaf)); | 4735 | total_size, btrfs_leaf_free_space(root, leaf)); |
4746 | BUG(); | 4736 | BUG(); |
4747 | } | 4737 | } |
4748 | 4738 | ||
4749 | if (slot != nritems) { | 4739 | if (slot != nritems) { |
4750 | unsigned int old_data = btrfs_item_end_nr(leaf, slot); | 4740 | unsigned int old_data = btrfs_item_end_nr(leaf, slot); |
4751 | 4741 | ||
4752 | if (old_data < data_end) { | 4742 | if (old_data < data_end) { |
4753 | btrfs_print_leaf(root, leaf); | 4743 | btrfs_print_leaf(root, leaf); |
4754 | btrfs_crit(root->fs_info, "slot %d old_data %d data_end %d", | 4744 | btrfs_crit(root->fs_info, "slot %d old_data %d data_end %d", |
4755 | slot, old_data, data_end); | 4745 | slot, old_data, data_end); |
4756 | BUG_ON(1); | 4746 | BUG_ON(1); |
4757 | } | 4747 | } |
4758 | /* | 4748 | /* |
4759 | * item0..itemN ... dataN.offset..dataN.size .. data0.size | 4749 | * item0..itemN ... dataN.offset..dataN.size .. data0.size |
4760 | */ | 4750 | */ |
4761 | /* first correct the data pointers */ | 4751 | /* first correct the data pointers */ |
4762 | for (i = slot; i < nritems; i++) { | 4752 | for (i = slot; i < nritems; i++) { |
4763 | u32 ioff; | 4753 | u32 ioff; |
4764 | 4754 | ||
4765 | item = btrfs_item_nr( i); | 4755 | item = btrfs_item_nr( i); |
4766 | ioff = btrfs_token_item_offset(leaf, item, &token); | 4756 | ioff = btrfs_token_item_offset(leaf, item, &token); |
4767 | btrfs_set_token_item_offset(leaf, item, | 4757 | btrfs_set_token_item_offset(leaf, item, |
4768 | ioff - total_data, &token); | 4758 | ioff - total_data, &token); |
4769 | } | 4759 | } |
4770 | /* shift the items */ | 4760 | /* shift the items */ |
4771 | memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + nr), | 4761 | memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + nr), |
4772 | btrfs_item_nr_offset(slot), | 4762 | btrfs_item_nr_offset(slot), |
4773 | (nritems - slot) * sizeof(struct btrfs_item)); | 4763 | (nritems - slot) * sizeof(struct btrfs_item)); |
4774 | 4764 | ||
4775 | /* shift the data */ | 4765 | /* shift the data */ |
4776 | memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) + | 4766 | memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) + |
4777 | data_end - total_data, btrfs_leaf_data(leaf) + | 4767 | data_end - total_data, btrfs_leaf_data(leaf) + |
4778 | data_end, old_data - data_end); | 4768 | data_end, old_data - data_end); |
4779 | data_end = old_data; | 4769 | data_end = old_data; |
4780 | } | 4770 | } |
4781 | 4771 | ||
4782 | /* setup the item for the new data */ | 4772 | /* setup the item for the new data */ |
4783 | for (i = 0; i < nr; i++) { | 4773 | for (i = 0; i < nr; i++) { |
4784 | btrfs_cpu_key_to_disk(&disk_key, cpu_key + i); | 4774 | btrfs_cpu_key_to_disk(&disk_key, cpu_key + i); |
4785 | btrfs_set_item_key(leaf, &disk_key, slot + i); | 4775 | btrfs_set_item_key(leaf, &disk_key, slot + i); |
4786 | item = btrfs_item_nr(slot + i); | 4776 | item = btrfs_item_nr(slot + i); |
4787 | btrfs_set_token_item_offset(leaf, item, | 4777 | btrfs_set_token_item_offset(leaf, item, |
4788 | data_end - data_size[i], &token); | 4778 | data_end - data_size[i], &token); |
4789 | data_end -= data_size[i]; | 4779 | data_end -= data_size[i]; |
4790 | btrfs_set_token_item_size(leaf, item, data_size[i], &token); | 4780 | btrfs_set_token_item_size(leaf, item, data_size[i], &token); |
4791 | } | 4781 | } |
4792 | 4782 | ||
4793 | btrfs_set_header_nritems(leaf, nritems + nr); | 4783 | btrfs_set_header_nritems(leaf, nritems + nr); |
4794 | btrfs_mark_buffer_dirty(leaf); | 4784 | btrfs_mark_buffer_dirty(leaf); |
4795 | 4785 | ||
4796 | if (btrfs_leaf_free_space(root, leaf) < 0) { | 4786 | if (btrfs_leaf_free_space(root, leaf) < 0) { |
4797 | btrfs_print_leaf(root, leaf); | 4787 | btrfs_print_leaf(root, leaf); |
4798 | BUG(); | 4788 | BUG(); |
4799 | } | 4789 | } |
4800 | } | 4790 | } |
4801 | 4791 | ||
4802 | /* | 4792 | /* |
4803 | * Given a key and some data, insert items into the tree. | 4793 | * Given a key and some data, insert items into the tree. |
4804 | * This does all the path init required, making room in the tree if needed. | 4794 | * This does all the path init required, making room in the tree if needed. |
4805 | */ | 4795 | */ |
4806 | int btrfs_insert_empty_items(struct btrfs_trans_handle *trans, | 4796 | int btrfs_insert_empty_items(struct btrfs_trans_handle *trans, |
4807 | struct btrfs_root *root, | 4797 | struct btrfs_root *root, |
4808 | struct btrfs_path *path, | 4798 | struct btrfs_path *path, |
4809 | struct btrfs_key *cpu_key, u32 *data_size, | 4799 | struct btrfs_key *cpu_key, u32 *data_size, |
4810 | int nr) | 4800 | int nr) |
4811 | { | 4801 | { |
4812 | int ret = 0; | 4802 | int ret = 0; |
4813 | int slot; | 4803 | int slot; |
4814 | int i; | 4804 | int i; |
4815 | u32 total_size = 0; | 4805 | u32 total_size = 0; |
4816 | u32 total_data = 0; | 4806 | u32 total_data = 0; |
4817 | 4807 | ||
4818 | for (i = 0; i < nr; i++) | 4808 | for (i = 0; i < nr; i++) |
4819 | total_data += data_size[i]; | 4809 | total_data += data_size[i]; |
4820 | 4810 | ||
4821 | total_size = total_data + (nr * sizeof(struct btrfs_item)); | 4811 | total_size = total_data + (nr * sizeof(struct btrfs_item)); |
4822 | ret = btrfs_search_slot(trans, root, cpu_key, path, total_size, 1); | 4812 | ret = btrfs_search_slot(trans, root, cpu_key, path, total_size, 1); |
4823 | if (ret == 0) | 4813 | if (ret == 0) |
4824 | return -EEXIST; | 4814 | return -EEXIST; |
4825 | if (ret < 0) | 4815 | if (ret < 0) |
4826 | return ret; | 4816 | return ret; |
4827 | 4817 | ||
4828 | slot = path->slots[0]; | 4818 | slot = path->slots[0]; |
4829 | BUG_ON(slot < 0); | 4819 | BUG_ON(slot < 0); |
4830 | 4820 | ||
4831 | setup_items_for_insert(root, path, cpu_key, data_size, | 4821 | setup_items_for_insert(root, path, cpu_key, data_size, |
4832 | total_data, total_size, nr); | 4822 | total_data, total_size, nr); |
4833 | return 0; | 4823 | return 0; |
4834 | } | 4824 | } |
4835 | 4825 | ||
4836 | /* | 4826 | /* |
4837 | * Given a key and some data, insert an item into the tree. | 4827 | * Given a key and some data, insert an item into the tree. |
4838 | * This does all the path init required, making room in the tree if needed. | 4828 | * This does all the path init required, making room in the tree if needed. |
4839 | */ | 4829 | */ |
4840 | int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root | 4830 | int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root |
4841 | *root, struct btrfs_key *cpu_key, void *data, u32 | 4831 | *root, struct btrfs_key *cpu_key, void *data, u32 |
4842 | data_size) | 4832 | data_size) |
4843 | { | 4833 | { |
4844 | int ret = 0; | 4834 | int ret = 0; |
4845 | struct btrfs_path *path; | 4835 | struct btrfs_path *path; |
4846 | struct extent_buffer *leaf; | 4836 | struct extent_buffer *leaf; |
4847 | unsigned long ptr; | 4837 | unsigned long ptr; |
4848 | 4838 | ||
4849 | path = btrfs_alloc_path(); | 4839 | path = btrfs_alloc_path(); |
4850 | if (!path) | 4840 | if (!path) |
4851 | return -ENOMEM; | 4841 | return -ENOMEM; |
4852 | ret = btrfs_insert_empty_item(trans, root, path, cpu_key, data_size); | 4842 | ret = btrfs_insert_empty_item(trans, root, path, cpu_key, data_size); |
4853 | if (!ret) { | 4843 | if (!ret) { |
4854 | leaf = path->nodes[0]; | 4844 | leaf = path->nodes[0]; |
4855 | ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); | 4845 | ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); |
4856 | write_extent_buffer(leaf, data, ptr, data_size); | 4846 | write_extent_buffer(leaf, data, ptr, data_size); |
4857 | btrfs_mark_buffer_dirty(leaf); | 4847 | btrfs_mark_buffer_dirty(leaf); |
4858 | } | 4848 | } |
4859 | btrfs_free_path(path); | 4849 | btrfs_free_path(path); |
4860 | return ret; | 4850 | return ret; |
4861 | } | 4851 | } |
4862 | 4852 | ||
4863 | /* | 4853 | /* |
4864 | * delete the pointer from a given node. | 4854 | * delete the pointer from a given node. |
4865 | * | 4855 | * |
4866 | * the tree should have been previously balanced so the deletion does not | 4856 | * the tree should have been previously balanced so the deletion does not |
4867 | * empty a node. | 4857 | * empty a node. |
4868 | */ | 4858 | */ |
4869 | static void del_ptr(struct btrfs_root *root, struct btrfs_path *path, | 4859 | static void del_ptr(struct btrfs_root *root, struct btrfs_path *path, |
4870 | int level, int slot) | 4860 | int level, int slot) |
4871 | { | 4861 | { |
4872 | struct extent_buffer *parent = path->nodes[level]; | 4862 | struct extent_buffer *parent = path->nodes[level]; |
4873 | u32 nritems; | 4863 | u32 nritems; |
4874 | int ret; | 4864 | int ret; |
4875 | 4865 | ||
4876 | nritems = btrfs_header_nritems(parent); | 4866 | nritems = btrfs_header_nritems(parent); |
4877 | if (slot != nritems - 1) { | 4867 | if (slot != nritems - 1) { |
4878 | if (level) | 4868 | if (level) |
4879 | tree_mod_log_eb_move(root->fs_info, parent, slot, | 4869 | tree_mod_log_eb_move(root->fs_info, parent, slot, |
4880 | slot + 1, nritems - slot - 1); | 4870 | slot + 1, nritems - slot - 1); |
4881 | memmove_extent_buffer(parent, | 4871 | memmove_extent_buffer(parent, |
4882 | btrfs_node_key_ptr_offset(slot), | 4872 | btrfs_node_key_ptr_offset(slot), |
4883 | btrfs_node_key_ptr_offset(slot + 1), | 4873 | btrfs_node_key_ptr_offset(slot + 1), |
4884 | sizeof(struct btrfs_key_ptr) * | 4874 | sizeof(struct btrfs_key_ptr) * |
4885 | (nritems - slot - 1)); | 4875 | (nritems - slot - 1)); |
4886 | } else if (level) { | 4876 | } else if (level) { |
4887 | ret = tree_mod_log_insert_key(root->fs_info, parent, slot, | 4877 | ret = tree_mod_log_insert_key(root->fs_info, parent, slot, |
4888 | MOD_LOG_KEY_REMOVE, GFP_NOFS); | 4878 | MOD_LOG_KEY_REMOVE, GFP_NOFS); |
4889 | BUG_ON(ret < 0); | 4879 | BUG_ON(ret < 0); |
4890 | } | 4880 | } |
4891 | 4881 | ||
4892 | nritems--; | 4882 | nritems--; |
4893 | btrfs_set_header_nritems(parent, nritems); | 4883 | btrfs_set_header_nritems(parent, nritems); |
4894 | if (nritems == 0 && parent == root->node) { | 4884 | if (nritems == 0 && parent == root->node) { |
4895 | BUG_ON(btrfs_header_level(root->node) != 1); | 4885 | BUG_ON(btrfs_header_level(root->node) != 1); |
4896 | /* just turn the root into a leaf and break */ | 4886 | /* just turn the root into a leaf and break */ |
4897 | btrfs_set_header_level(root->node, 0); | 4887 | btrfs_set_header_level(root->node, 0); |
4898 | } else if (slot == 0) { | 4888 | } else if (slot == 0) { |
4899 | struct btrfs_disk_key disk_key; | 4889 | struct btrfs_disk_key disk_key; |
4900 | 4890 | ||
4901 | btrfs_node_key(parent, &disk_key, 0); | 4891 | btrfs_node_key(parent, &disk_key, 0); |
4902 | fixup_low_keys(root, path, &disk_key, level + 1); | 4892 | fixup_low_keys(root, path, &disk_key, level + 1); |
4903 | } | 4893 | } |
4904 | btrfs_mark_buffer_dirty(parent); | 4894 | btrfs_mark_buffer_dirty(parent); |
4905 | } | 4895 | } |
4906 | 4896 | ||
4907 | /* | 4897 | /* |
4908 | * a helper function to delete the leaf pointed to by path->slots[1] and | 4898 | * a helper function to delete the leaf pointed to by path->slots[1] and |
4909 | * path->nodes[1]. | 4899 | * path->nodes[1]. |
4910 | * | 4900 | * |
4911 | * This deletes the pointer in path->nodes[1] and frees the leaf | 4901 | * This deletes the pointer in path->nodes[1] and frees the leaf |
4912 | * block extent. zero is returned if it all worked out, < 0 otherwise. | 4902 | * block extent. zero is returned if it all worked out, < 0 otherwise. |
4913 | * | 4903 | * |
4914 | * The path must have already been setup for deleting the leaf, including | 4904 | * The path must have already been setup for deleting the leaf, including |
4915 | * all the proper balancing. path->nodes[1] must be locked. | 4905 | * all the proper balancing. path->nodes[1] must be locked. |
4916 | */ | 4906 | */ |
4917 | static noinline void btrfs_del_leaf(struct btrfs_trans_handle *trans, | 4907 | static noinline void btrfs_del_leaf(struct btrfs_trans_handle *trans, |
4918 | struct btrfs_root *root, | 4908 | struct btrfs_root *root, |
4919 | struct btrfs_path *path, | 4909 | struct btrfs_path *path, |
4920 | struct extent_buffer *leaf) | 4910 | struct extent_buffer *leaf) |
4921 | { | 4911 | { |
4922 | WARN_ON(btrfs_header_generation(leaf) != trans->transid); | 4912 | WARN_ON(btrfs_header_generation(leaf) != trans->transid); |
4923 | del_ptr(root, path, 1, path->slots[1]); | 4913 | del_ptr(root, path, 1, path->slots[1]); |
4924 | 4914 | ||
4925 | /* | 4915 | /* |
4926 | * btrfs_free_extent is expensive, we want to make sure we | 4916 | * btrfs_free_extent is expensive, we want to make sure we |
4927 | * aren't holding any locks when we call it | 4917 | * aren't holding any locks when we call it |
4928 | */ | 4918 | */ |
4929 | btrfs_unlock_up_safe(path, 0); | 4919 | btrfs_unlock_up_safe(path, 0); |
4930 | 4920 | ||
4931 | root_sub_used(root, leaf->len); | 4921 | root_sub_used(root, leaf->len); |
4932 | 4922 | ||
4933 | extent_buffer_get(leaf); | 4923 | extent_buffer_get(leaf); |
4934 | btrfs_free_tree_block(trans, root, leaf, 0, 1); | 4924 | btrfs_free_tree_block(trans, root, leaf, 0, 1); |
4935 | free_extent_buffer_stale(leaf); | 4925 | free_extent_buffer_stale(leaf); |
4936 | } | 4926 | } |
4937 | /* | 4927 | /* |
4938 | * delete the item at the leaf level in path. If that empties | 4928 | * delete the item at the leaf level in path. If that empties |
4939 | * the leaf, remove it from the tree | 4929 | * the leaf, remove it from the tree |
4940 | */ | 4930 | */ |
4941 | int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root, | 4931 | int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root, |
4942 | struct btrfs_path *path, int slot, int nr) | 4932 | struct btrfs_path *path, int slot, int nr) |
4943 | { | 4933 | { |
4944 | struct extent_buffer *leaf; | 4934 | struct extent_buffer *leaf; |
4945 | struct btrfs_item *item; | 4935 | struct btrfs_item *item; |
4946 | int last_off; | 4936 | int last_off; |
4947 | int dsize = 0; | 4937 | int dsize = 0; |
4948 | int ret = 0; | 4938 | int ret = 0; |
4949 | int wret; | 4939 | int wret; |
4950 | int i; | 4940 | int i; |
4951 | u32 nritems; | 4941 | u32 nritems; |
4952 | struct btrfs_map_token token; | 4942 | struct btrfs_map_token token; |
4953 | 4943 | ||
4954 | btrfs_init_map_token(&token); | 4944 | btrfs_init_map_token(&token); |
4955 | 4945 | ||
4956 | leaf = path->nodes[0]; | 4946 | leaf = path->nodes[0]; |
4957 | last_off = btrfs_item_offset_nr(leaf, slot + nr - 1); | 4947 | last_off = btrfs_item_offset_nr(leaf, slot + nr - 1); |
4958 | 4948 | ||
4959 | for (i = 0; i < nr; i++) | 4949 | for (i = 0; i < nr; i++) |
4960 | dsize += btrfs_item_size_nr(leaf, slot + i); | 4950 | dsize += btrfs_item_size_nr(leaf, slot + i); |
4961 | 4951 | ||
4962 | nritems = btrfs_header_nritems(leaf); | 4952 | nritems = btrfs_header_nritems(leaf); |
4963 | 4953 | ||
4964 | if (slot + nr != nritems) { | 4954 | if (slot + nr != nritems) { |
4965 | int data_end = leaf_data_end(root, leaf); | 4955 | int data_end = leaf_data_end(root, leaf); |
4966 | 4956 | ||
4967 | memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) + | 4957 | memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) + |
4968 | data_end + dsize, | 4958 | data_end + dsize, |
4969 | btrfs_leaf_data(leaf) + data_end, | 4959 | btrfs_leaf_data(leaf) + data_end, |
4970 | last_off - data_end); | 4960 | last_off - data_end); |
4971 | 4961 | ||
4972 | for (i = slot + nr; i < nritems; i++) { | 4962 | for (i = slot + nr; i < nritems; i++) { |
4973 | u32 ioff; | 4963 | u32 ioff; |
4974 | 4964 | ||
4975 | item = btrfs_item_nr(i); | 4965 | item = btrfs_item_nr(i); |
4976 | ioff = btrfs_token_item_offset(leaf, item, &token); | 4966 | ioff = btrfs_token_item_offset(leaf, item, &token); |
4977 | btrfs_set_token_item_offset(leaf, item, | 4967 | btrfs_set_token_item_offset(leaf, item, |
4978 | ioff + dsize, &token); | 4968 | ioff + dsize, &token); |
4979 | } | 4969 | } |
4980 | 4970 | ||
4981 | memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot), | 4971 | memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot), |
4982 | btrfs_item_nr_offset(slot + nr), | 4972 | btrfs_item_nr_offset(slot + nr), |
4983 | sizeof(struct btrfs_item) * | 4973 | sizeof(struct btrfs_item) * |
4984 | (nritems - slot - nr)); | 4974 | (nritems - slot - nr)); |
4985 | } | 4975 | } |
4986 | btrfs_set_header_nritems(leaf, nritems - nr); | 4976 | btrfs_set_header_nritems(leaf, nritems - nr); |
4987 | nritems -= nr; | 4977 | nritems -= nr; |
4988 | 4978 | ||
4989 | /* delete the leaf if we've emptied it */ | 4979 | /* delete the leaf if we've emptied it */ |
4990 | if (nritems == 0) { | 4980 | if (nritems == 0) { |
4991 | if (leaf == root->node) { | 4981 | if (leaf == root->node) { |
4992 | btrfs_set_header_level(leaf, 0); | 4982 | btrfs_set_header_level(leaf, 0); |
4993 | } else { | 4983 | } else { |
4994 | btrfs_set_path_blocking(path); | 4984 | btrfs_set_path_blocking(path); |
4995 | clean_tree_block(trans, root, leaf); | 4985 | clean_tree_block(trans, root, leaf); |
4996 | btrfs_del_leaf(trans, root, path, leaf); | 4986 | btrfs_del_leaf(trans, root, path, leaf); |
4997 | } | 4987 | } |
4998 | } else { | 4988 | } else { |
4999 | int used = leaf_space_used(leaf, 0, nritems); | 4989 | int used = leaf_space_used(leaf, 0, nritems); |
5000 | if (slot == 0) { | 4990 | if (slot == 0) { |
5001 | struct btrfs_disk_key disk_key; | 4991 | struct btrfs_disk_key disk_key; |
5002 | 4992 | ||
5003 | btrfs_item_key(leaf, &disk_key, 0); | 4993 | btrfs_item_key(leaf, &disk_key, 0); |
5004 | fixup_low_keys(root, path, &disk_key, 1); | 4994 | fixup_low_keys(root, path, &disk_key, 1); |
5005 | } | 4995 | } |
5006 | 4996 | ||
5007 | /* delete the leaf if it is mostly empty */ | 4997 | /* delete the leaf if it is mostly empty */ |
5008 | if (used < BTRFS_LEAF_DATA_SIZE(root) / 3) { | 4998 | if (used < BTRFS_LEAF_DATA_SIZE(root) / 3) { |
5009 | /* push_leaf_left fixes the path. | 4999 | /* push_leaf_left fixes the path. |
5010 | * make sure the path still points to our leaf | 5000 | * make sure the path still points to our leaf |
5011 | * for possible call to del_ptr below | 5001 | * for possible call to del_ptr below |
5012 | */ | 5002 | */ |
5013 | slot = path->slots[1]; | 5003 | slot = path->slots[1]; |
5014 | extent_buffer_get(leaf); | 5004 | extent_buffer_get(leaf); |
5015 | 5005 | ||
5016 | btrfs_set_path_blocking(path); | 5006 | btrfs_set_path_blocking(path); |
5017 | wret = push_leaf_left(trans, root, path, 1, 1, | 5007 | wret = push_leaf_left(trans, root, path, 1, 1, |
5018 | 1, (u32)-1); | 5008 | 1, (u32)-1); |
5019 | if (wret < 0 && wret != -ENOSPC) | 5009 | if (wret < 0 && wret != -ENOSPC) |
5020 | ret = wret; | 5010 | ret = wret; |
5021 | 5011 | ||
5022 | if (path->nodes[0] == leaf && | 5012 | if (path->nodes[0] == leaf && |
5023 | btrfs_header_nritems(leaf)) { | 5013 | btrfs_header_nritems(leaf)) { |
5024 | wret = push_leaf_right(trans, root, path, 1, | 5014 | wret = push_leaf_right(trans, root, path, 1, |
5025 | 1, 1, 0); | 5015 | 1, 1, 0); |
5026 | if (wret < 0 && wret != -ENOSPC) | 5016 | if (wret < 0 && wret != -ENOSPC) |
5027 | ret = wret; | 5017 | ret = wret; |
5028 | } | 5018 | } |
5029 | 5019 | ||
5030 | if (btrfs_header_nritems(leaf) == 0) { | 5020 | if (btrfs_header_nritems(leaf) == 0) { |
5031 | path->slots[1] = slot; | 5021 | path->slots[1] = slot; |
5032 | btrfs_del_leaf(trans, root, path, leaf); | 5022 | btrfs_del_leaf(trans, root, path, leaf); |
5033 | free_extent_buffer(leaf); | 5023 | free_extent_buffer(leaf); |
5034 | ret = 0; | 5024 | ret = 0; |
5035 | } else { | 5025 | } else { |
5036 | /* if we're still in the path, make sure | 5026 | /* if we're still in the path, make sure |
5037 | * we're dirty. Otherwise, one of the | 5027 | * we're dirty. Otherwise, one of the |
5038 | * push_leaf functions must have already | 5028 | * push_leaf functions must have already |
5039 | * dirtied this buffer | 5029 | * dirtied this buffer |
5040 | */ | 5030 | */ |
5041 | if (path->nodes[0] == leaf) | 5031 | if (path->nodes[0] == leaf) |
5042 | btrfs_mark_buffer_dirty(leaf); | 5032 | btrfs_mark_buffer_dirty(leaf); |
5043 | free_extent_buffer(leaf); | 5033 | free_extent_buffer(leaf); |
5044 | } | 5034 | } |
5045 | } else { | 5035 | } else { |
5046 | btrfs_mark_buffer_dirty(leaf); | 5036 | btrfs_mark_buffer_dirty(leaf); |
5047 | } | 5037 | } |
5048 | } | 5038 | } |
5049 | return ret; | 5039 | return ret; |
5050 | } | 5040 | } |
5051 | 5041 | ||
5052 | /* | 5042 | /* |
5053 | * search the tree again to find a leaf with lesser keys | 5043 | * search the tree again to find a leaf with lesser keys |
5054 | * returns 0 if it found something or 1 if there are no lesser leaves. | 5044 | * returns 0 if it found something or 1 if there are no lesser leaves. |
5055 | * returns < 0 on io errors. | 5045 | * returns < 0 on io errors. |
5056 | * | 5046 | * |
5057 | * This may release the path, and so you may lose any locks held at the | 5047 | * This may release the path, and so you may lose any locks held at the |
5058 | * time you call it. | 5048 | * time you call it. |
5059 | */ | 5049 | */ |
5060 | int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path) | 5050 | int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path) |
5061 | { | 5051 | { |
5062 | struct btrfs_key key; | 5052 | struct btrfs_key key; |
5063 | struct btrfs_disk_key found_key; | 5053 | struct btrfs_disk_key found_key; |
5064 | int ret; | 5054 | int ret; |
5065 | 5055 | ||
5066 | btrfs_item_key_to_cpu(path->nodes[0], &key, 0); | 5056 | btrfs_item_key_to_cpu(path->nodes[0], &key, 0); |
5067 | 5057 | ||
5068 | if (key.offset > 0) { | 5058 | if (key.offset > 0) { |
5069 | key.offset--; | 5059 | key.offset--; |
5070 | } else if (key.type > 0) { | 5060 | } else if (key.type > 0) { |
5071 | key.type--; | 5061 | key.type--; |
5072 | key.offset = (u64)-1; | 5062 | key.offset = (u64)-1; |
5073 | } else if (key.objectid > 0) { | 5063 | } else if (key.objectid > 0) { |
5074 | key.objectid--; | 5064 | key.objectid--; |
5075 | key.type = (u8)-1; | 5065 | key.type = (u8)-1; |
5076 | key.offset = (u64)-1; | 5066 | key.offset = (u64)-1; |
5077 | } else { | 5067 | } else { |
5078 | return 1; | 5068 | return 1; |
5079 | } | 5069 | } |
5080 | 5070 | ||
5081 | btrfs_release_path(path); | 5071 | btrfs_release_path(path); |
5082 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); | 5072 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); |
5083 | if (ret < 0) | 5073 | if (ret < 0) |
5084 | return ret; | 5074 | return ret; |
5085 | btrfs_item_key(path->nodes[0], &found_key, 0); | 5075 | btrfs_item_key(path->nodes[0], &found_key, 0); |
5086 | ret = comp_keys(&found_key, &key); | 5076 | ret = comp_keys(&found_key, &key); |
5087 | /* | 5077 | /* |
5088 | * We might have had an item with the previous key in the tree right | 5078 | * We might have had an item with the previous key in the tree right |
5089 | * before we released our path. And after we released our path, that | 5079 | * before we released our path. And after we released our path, that |
5090 | * item might have been pushed to the first slot (0) of the leaf we | 5080 | * item might have been pushed to the first slot (0) of the leaf we |
5091 | * were holding due to a tree balance. Alternatively, an item with the | 5081 | * were holding due to a tree balance. Alternatively, an item with the |
5092 | * previous key can exist as the only element of a leaf (big fat item). | 5082 | * previous key can exist as the only element of a leaf (big fat item). |
5093 | * Therefore account for these 2 cases, so that our callers (like | 5083 | * Therefore account for these 2 cases, so that our callers (like |
5094 | * btrfs_previous_item) don't miss an existing item with a key matching | 5084 | * btrfs_previous_item) don't miss an existing item with a key matching |
5095 | * the previous key we computed above. | 5085 | * the previous key we computed above. |
5096 | */ | 5086 | */ |
5097 | if (ret <= 0) | 5087 | if (ret <= 0) |
5098 | return 0; | 5088 | return 0; |
5099 | return 1; | 5089 | return 1; |
5100 | } | 5090 | } |
5101 | 5091 | ||
5102 | /* | 5092 | /* |
5103 | * A helper function to walk down the tree starting at min_key, and looking | 5093 | * A helper function to walk down the tree starting at min_key, and looking |
5104 | * for nodes or leaves that are have a minimum transaction id. | 5094 | * for nodes or leaves that are have a minimum transaction id. |
5105 | * This is used by the btree defrag code, and tree logging | 5095 | * This is used by the btree defrag code, and tree logging |
5106 | * | 5096 | * |
5107 | * This does not cow, but it does stuff the starting key it finds back | 5097 | * This does not cow, but it does stuff the starting key it finds back |
5108 | * into min_key, so you can call btrfs_search_slot with cow=1 on the | 5098 | * into min_key, so you can call btrfs_search_slot with cow=1 on the |
5109 | * key and get a writable path. | 5099 | * key and get a writable path. |
5110 | * | 5100 | * |
5111 | * This does lock as it descends, and path->keep_locks should be set | 5101 | * This does lock as it descends, and path->keep_locks should be set |
5112 | * to 1 by the caller. | 5102 | * to 1 by the caller. |
5113 | * | 5103 | * |
5114 | * This honors path->lowest_level to prevent descent past a given level | 5104 | * This honors path->lowest_level to prevent descent past a given level |
5115 | * of the tree. | 5105 | * of the tree. |
5116 | * | 5106 | * |
5117 | * min_trans indicates the oldest transaction that you are interested | 5107 | * min_trans indicates the oldest transaction that you are interested |
5118 | * in walking through. Any nodes or leaves older than min_trans are | 5108 | * in walking through. Any nodes or leaves older than min_trans are |
5119 | * skipped over (without reading them). | 5109 | * skipped over (without reading them). |
5120 | * | 5110 | * |
5121 | * returns zero if something useful was found, < 0 on error and 1 if there | 5111 | * returns zero if something useful was found, < 0 on error and 1 if there |
5122 | * was nothing in the tree that matched the search criteria. | 5112 | * was nothing in the tree that matched the search criteria. |
5123 | */ | 5113 | */ |
5124 | int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key, | 5114 | int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key, |
5125 | struct btrfs_path *path, | 5115 | struct btrfs_path *path, |
5126 | u64 min_trans) | 5116 | u64 min_trans) |
5127 | { | 5117 | { |
5128 | struct extent_buffer *cur; | 5118 | struct extent_buffer *cur; |
5129 | struct btrfs_key found_key; | 5119 | struct btrfs_key found_key; |
5130 | int slot; | 5120 | int slot; |
5131 | int sret; | 5121 | int sret; |
5132 | u32 nritems; | 5122 | u32 nritems; |
5133 | int level; | 5123 | int level; |
5134 | int ret = 1; | 5124 | int ret = 1; |
5135 | int keep_locks = path->keep_locks; | 5125 | int keep_locks = path->keep_locks; |
5136 | 5126 | ||
5137 | path->keep_locks = 1; | 5127 | path->keep_locks = 1; |
5138 | again: | 5128 | again: |
5139 | cur = btrfs_read_lock_root_node(root); | 5129 | cur = btrfs_read_lock_root_node(root); |
5140 | level = btrfs_header_level(cur); | 5130 | level = btrfs_header_level(cur); |
5141 | WARN_ON(path->nodes[level]); | 5131 | WARN_ON(path->nodes[level]); |
5142 | path->nodes[level] = cur; | 5132 | path->nodes[level] = cur; |
5143 | path->locks[level] = BTRFS_READ_LOCK; | 5133 | path->locks[level] = BTRFS_READ_LOCK; |
5144 | 5134 | ||
5145 | if (btrfs_header_generation(cur) < min_trans) { | 5135 | if (btrfs_header_generation(cur) < min_trans) { |
5146 | ret = 1; | 5136 | ret = 1; |
5147 | goto out; | 5137 | goto out; |
5148 | } | 5138 | } |
5149 | while (1) { | 5139 | while (1) { |
5150 | nritems = btrfs_header_nritems(cur); | 5140 | nritems = btrfs_header_nritems(cur); |
5151 | level = btrfs_header_level(cur); | 5141 | level = btrfs_header_level(cur); |
5152 | sret = bin_search(cur, min_key, level, &slot); | 5142 | sret = bin_search(cur, min_key, level, &slot); |
5153 | 5143 | ||
5154 | /* at the lowest level, we're done, setup the path and exit */ | 5144 | /* at the lowest level, we're done, setup the path and exit */ |
5155 | if (level == path->lowest_level) { | 5145 | if (level == path->lowest_level) { |
5156 | if (slot >= nritems) | 5146 | if (slot >= nritems) |
5157 | goto find_next_key; | 5147 | goto find_next_key; |
5158 | ret = 0; | 5148 | ret = 0; |
5159 | path->slots[level] = slot; | 5149 | path->slots[level] = slot; |
5160 | btrfs_item_key_to_cpu(cur, &found_key, slot); | 5150 | btrfs_item_key_to_cpu(cur, &found_key, slot); |
5161 | goto out; | 5151 | goto out; |
5162 | } | 5152 | } |
5163 | if (sret && slot > 0) | 5153 | if (sret && slot > 0) |
5164 | slot--; | 5154 | slot--; |
5165 | /* | 5155 | /* |
5166 | * check this node pointer against the min_trans parameters. | 5156 | * check this node pointer against the min_trans parameters. |
5167 | * If it is too old, old, skip to the next one. | 5157 | * If it is too old, old, skip to the next one. |
5168 | */ | 5158 | */ |
5169 | while (slot < nritems) { | 5159 | while (slot < nritems) { |
5170 | u64 gen; | 5160 | u64 gen; |
5171 | 5161 | ||
5172 | gen = btrfs_node_ptr_generation(cur, slot); | 5162 | gen = btrfs_node_ptr_generation(cur, slot); |
5173 | if (gen < min_trans) { | 5163 | if (gen < min_trans) { |
5174 | slot++; | 5164 | slot++; |
5175 | continue; | 5165 | continue; |
5176 | } | 5166 | } |
5177 | break; | 5167 | break; |
5178 | } | 5168 | } |
5179 | find_next_key: | 5169 | find_next_key: |
5180 | /* | 5170 | /* |
5181 | * we didn't find a candidate key in this node, walk forward | 5171 | * we didn't find a candidate key in this node, walk forward |
5182 | * and find another one | 5172 | * and find another one |
5183 | */ | 5173 | */ |
5184 | if (slot >= nritems) { | 5174 | if (slot >= nritems) { |
5185 | path->slots[level] = slot; | 5175 | path->slots[level] = slot; |
5186 | btrfs_set_path_blocking(path); | 5176 | btrfs_set_path_blocking(path); |
5187 | sret = btrfs_find_next_key(root, path, min_key, level, | 5177 | sret = btrfs_find_next_key(root, path, min_key, level, |
5188 | min_trans); | 5178 | min_trans); |
5189 | if (sret == 0) { | 5179 | if (sret == 0) { |
5190 | btrfs_release_path(path); | 5180 | btrfs_release_path(path); |
5191 | goto again; | 5181 | goto again; |
5192 | } else { | 5182 | } else { |
5193 | goto out; | 5183 | goto out; |
5194 | } | 5184 | } |
5195 | } | 5185 | } |
5196 | /* save our key for returning back */ | 5186 | /* save our key for returning back */ |
5197 | btrfs_node_key_to_cpu(cur, &found_key, slot); | 5187 | btrfs_node_key_to_cpu(cur, &found_key, slot); |
5198 | path->slots[level] = slot; | 5188 | path->slots[level] = slot; |
5199 | if (level == path->lowest_level) { | 5189 | if (level == path->lowest_level) { |
5200 | ret = 0; | 5190 | ret = 0; |
5201 | goto out; | 5191 | goto out; |
5202 | } | 5192 | } |
5203 | btrfs_set_path_blocking(path); | 5193 | btrfs_set_path_blocking(path); |
5204 | cur = read_node_slot(root, cur, slot); | 5194 | cur = read_node_slot(root, cur, slot); |
5205 | BUG_ON(!cur); /* -ENOMEM */ | 5195 | BUG_ON(!cur); /* -ENOMEM */ |
5206 | 5196 | ||
5207 | btrfs_tree_read_lock(cur); | 5197 | btrfs_tree_read_lock(cur); |
5208 | 5198 | ||
5209 | path->locks[level - 1] = BTRFS_READ_LOCK; | 5199 | path->locks[level - 1] = BTRFS_READ_LOCK; |
5210 | path->nodes[level - 1] = cur; | 5200 | path->nodes[level - 1] = cur; |
5211 | unlock_up(path, level, 1, 0, NULL); | 5201 | unlock_up(path, level, 1, 0, NULL); |
5212 | btrfs_clear_path_blocking(path, NULL, 0); | 5202 | btrfs_clear_path_blocking(path, NULL, 0); |
5213 | } | 5203 | } |
5214 | out: | 5204 | out: |
5215 | path->keep_locks = keep_locks; | 5205 | path->keep_locks = keep_locks; |
5216 | if (ret == 0) { | 5206 | if (ret == 0) { |
5217 | btrfs_unlock_up_safe(path, path->lowest_level + 1); | 5207 | btrfs_unlock_up_safe(path, path->lowest_level + 1); |
5218 | btrfs_set_path_blocking(path); | 5208 | btrfs_set_path_blocking(path); |
5219 | memcpy(min_key, &found_key, sizeof(found_key)); | 5209 | memcpy(min_key, &found_key, sizeof(found_key)); |
5220 | } | 5210 | } |
5221 | return ret; | 5211 | return ret; |
5222 | } | 5212 | } |
5223 | 5213 | ||
5224 | static void tree_move_down(struct btrfs_root *root, | 5214 | static void tree_move_down(struct btrfs_root *root, |
5225 | struct btrfs_path *path, | 5215 | struct btrfs_path *path, |
5226 | int *level, int root_level) | 5216 | int *level, int root_level) |
5227 | { | 5217 | { |
5228 | BUG_ON(*level == 0); | 5218 | BUG_ON(*level == 0); |
5229 | path->nodes[*level - 1] = read_node_slot(root, path->nodes[*level], | 5219 | path->nodes[*level - 1] = read_node_slot(root, path->nodes[*level], |
5230 | path->slots[*level]); | 5220 | path->slots[*level]); |
5231 | path->slots[*level - 1] = 0; | 5221 | path->slots[*level - 1] = 0; |
5232 | (*level)--; | 5222 | (*level)--; |
5233 | } | 5223 | } |
5234 | 5224 | ||
5235 | static int tree_move_next_or_upnext(struct btrfs_root *root, | 5225 | static int tree_move_next_or_upnext(struct btrfs_root *root, |
5236 | struct btrfs_path *path, | 5226 | struct btrfs_path *path, |
5237 | int *level, int root_level) | 5227 | int *level, int root_level) |
5238 | { | 5228 | { |
5239 | int ret = 0; | 5229 | int ret = 0; |
5240 | int nritems; | 5230 | int nritems; |
5241 | nritems = btrfs_header_nritems(path->nodes[*level]); | 5231 | nritems = btrfs_header_nritems(path->nodes[*level]); |
5242 | 5232 | ||
5243 | path->slots[*level]++; | 5233 | path->slots[*level]++; |
5244 | 5234 | ||
5245 | while (path->slots[*level] >= nritems) { | 5235 | while (path->slots[*level] >= nritems) { |
5246 | if (*level == root_level) | 5236 | if (*level == root_level) |
5247 | return -1; | 5237 | return -1; |
5248 | 5238 | ||
5249 | /* move upnext */ | 5239 | /* move upnext */ |
5250 | path->slots[*level] = 0; | 5240 | path->slots[*level] = 0; |
5251 | free_extent_buffer(path->nodes[*level]); | 5241 | free_extent_buffer(path->nodes[*level]); |
5252 | path->nodes[*level] = NULL; | 5242 | path->nodes[*level] = NULL; |
5253 | (*level)++; | 5243 | (*level)++; |
5254 | path->slots[*level]++; | 5244 | path->slots[*level]++; |
5255 | 5245 | ||
5256 | nritems = btrfs_header_nritems(path->nodes[*level]); | 5246 | nritems = btrfs_header_nritems(path->nodes[*level]); |
5257 | ret = 1; | 5247 | ret = 1; |
5258 | } | 5248 | } |
5259 | return ret; | 5249 | return ret; |
5260 | } | 5250 | } |
5261 | 5251 | ||
5262 | /* | 5252 | /* |
5263 | * Returns 1 if it had to move up and next. 0 is returned if it moved only next | 5253 | * Returns 1 if it had to move up and next. 0 is returned if it moved only next |
5264 | * or down. | 5254 | * or down. |
5265 | */ | 5255 | */ |
5266 | static int tree_advance(struct btrfs_root *root, | 5256 | static int tree_advance(struct btrfs_root *root, |
5267 | struct btrfs_path *path, | 5257 | struct btrfs_path *path, |
5268 | int *level, int root_level, | 5258 | int *level, int root_level, |
5269 | int allow_down, | 5259 | int allow_down, |
5270 | struct btrfs_key *key) | 5260 | struct btrfs_key *key) |
5271 | { | 5261 | { |
5272 | int ret; | 5262 | int ret; |
5273 | 5263 | ||
5274 | if (*level == 0 || !allow_down) { | 5264 | if (*level == 0 || !allow_down) { |
5275 | ret = tree_move_next_or_upnext(root, path, level, root_level); | 5265 | ret = tree_move_next_or_upnext(root, path, level, root_level); |
5276 | } else { | 5266 | } else { |
5277 | tree_move_down(root, path, level, root_level); | 5267 | tree_move_down(root, path, level, root_level); |
5278 | ret = 0; | 5268 | ret = 0; |
5279 | } | 5269 | } |
5280 | if (ret >= 0) { | 5270 | if (ret >= 0) { |
5281 | if (*level == 0) | 5271 | if (*level == 0) |
5282 | btrfs_item_key_to_cpu(path->nodes[*level], key, | 5272 | btrfs_item_key_to_cpu(path->nodes[*level], key, |
5283 | path->slots[*level]); | 5273 | path->slots[*level]); |
5284 | else | 5274 | else |
5285 | btrfs_node_key_to_cpu(path->nodes[*level], key, | 5275 | btrfs_node_key_to_cpu(path->nodes[*level], key, |
5286 | path->slots[*level]); | 5276 | path->slots[*level]); |
5287 | } | 5277 | } |
5288 | return ret; | 5278 | return ret; |
5289 | } | 5279 | } |
5290 | 5280 | ||
5291 | static int tree_compare_item(struct btrfs_root *left_root, | 5281 | static int tree_compare_item(struct btrfs_root *left_root, |
5292 | struct btrfs_path *left_path, | 5282 | struct btrfs_path *left_path, |
5293 | struct btrfs_path *right_path, | 5283 | struct btrfs_path *right_path, |
5294 | char *tmp_buf) | 5284 | char *tmp_buf) |
5295 | { | 5285 | { |
5296 | int cmp; | 5286 | int cmp; |
5297 | int len1, len2; | 5287 | int len1, len2; |
5298 | unsigned long off1, off2; | 5288 | unsigned long off1, off2; |
5299 | 5289 | ||
5300 | len1 = btrfs_item_size_nr(left_path->nodes[0], left_path->slots[0]); | 5290 | len1 = btrfs_item_size_nr(left_path->nodes[0], left_path->slots[0]); |
5301 | len2 = btrfs_item_size_nr(right_path->nodes[0], right_path->slots[0]); | 5291 | len2 = btrfs_item_size_nr(right_path->nodes[0], right_path->slots[0]); |
5302 | if (len1 != len2) | 5292 | if (len1 != len2) |
5303 | return 1; | 5293 | return 1; |
5304 | 5294 | ||
5305 | off1 = btrfs_item_ptr_offset(left_path->nodes[0], left_path->slots[0]); | 5295 | off1 = btrfs_item_ptr_offset(left_path->nodes[0], left_path->slots[0]); |
5306 | off2 = btrfs_item_ptr_offset(right_path->nodes[0], | 5296 | off2 = btrfs_item_ptr_offset(right_path->nodes[0], |
5307 | right_path->slots[0]); | 5297 | right_path->slots[0]); |
5308 | 5298 | ||
5309 | read_extent_buffer(left_path->nodes[0], tmp_buf, off1, len1); | 5299 | read_extent_buffer(left_path->nodes[0], tmp_buf, off1, len1); |
5310 | 5300 | ||
5311 | cmp = memcmp_extent_buffer(right_path->nodes[0], tmp_buf, off2, len1); | 5301 | cmp = memcmp_extent_buffer(right_path->nodes[0], tmp_buf, off2, len1); |
5312 | if (cmp) | 5302 | if (cmp) |
5313 | return 1; | 5303 | return 1; |
5314 | return 0; | 5304 | return 0; |
5315 | } | 5305 | } |
5316 | 5306 | ||
5317 | #define ADVANCE 1 | 5307 | #define ADVANCE 1 |
5318 | #define ADVANCE_ONLY_NEXT -1 | 5308 | #define ADVANCE_ONLY_NEXT -1 |
5319 | 5309 | ||
5320 | /* | 5310 | /* |
5321 | * This function compares two trees and calls the provided callback for | 5311 | * This function compares two trees and calls the provided callback for |
5322 | * every changed/new/deleted item it finds. | 5312 | * every changed/new/deleted item it finds. |
5323 | * If shared tree blocks are encountered, whole subtrees are skipped, making | 5313 | * If shared tree blocks are encountered, whole subtrees are skipped, making |
5324 | * the compare pretty fast on snapshotted subvolumes. | 5314 | * the compare pretty fast on snapshotted subvolumes. |
5325 | * | 5315 | * |
5326 | * This currently works on commit roots only. As commit roots are read only, | 5316 | * This currently works on commit roots only. As commit roots are read only, |
5327 | * we don't do any locking. The commit roots are protected with transactions. | 5317 | * we don't do any locking. The commit roots are protected with transactions. |
5328 | * Transactions are ended and rejoined when a commit is tried in between. | 5318 | * Transactions are ended and rejoined when a commit is tried in between. |
5329 | * | 5319 | * |
5330 | * This function checks for modifications done to the trees while comparing. | 5320 | * This function checks for modifications done to the trees while comparing. |
5331 | * If it detects a change, it aborts immediately. | 5321 | * If it detects a change, it aborts immediately. |
5332 | */ | 5322 | */ |
5333 | int btrfs_compare_trees(struct btrfs_root *left_root, | 5323 | int btrfs_compare_trees(struct btrfs_root *left_root, |
5334 | struct btrfs_root *right_root, | 5324 | struct btrfs_root *right_root, |
5335 | btrfs_changed_cb_t changed_cb, void *ctx) | 5325 | btrfs_changed_cb_t changed_cb, void *ctx) |
5336 | { | 5326 | { |
5337 | int ret; | 5327 | int ret; |
5338 | int cmp; | 5328 | int cmp; |
5339 | struct btrfs_path *left_path = NULL; | 5329 | struct btrfs_path *left_path = NULL; |
5340 | struct btrfs_path *right_path = NULL; | 5330 | struct btrfs_path *right_path = NULL; |
5341 | struct btrfs_key left_key; | 5331 | struct btrfs_key left_key; |
5342 | struct btrfs_key right_key; | 5332 | struct btrfs_key right_key; |
5343 | char *tmp_buf = NULL; | 5333 | char *tmp_buf = NULL; |
5344 | int left_root_level; | 5334 | int left_root_level; |
5345 | int right_root_level; | 5335 | int right_root_level; |
5346 | int left_level; | 5336 | int left_level; |
5347 | int right_level; | 5337 | int right_level; |
5348 | int left_end_reached; | 5338 | int left_end_reached; |
5349 | int right_end_reached; | 5339 | int right_end_reached; |
5350 | int advance_left; | 5340 | int advance_left; |
5351 | int advance_right; | 5341 | int advance_right; |
5352 | u64 left_blockptr; | 5342 | u64 left_blockptr; |
5353 | u64 right_blockptr; | 5343 | u64 right_blockptr; |
5354 | u64 left_gen; | 5344 | u64 left_gen; |
5355 | u64 right_gen; | 5345 | u64 right_gen; |
5356 | 5346 | ||
5357 | left_path = btrfs_alloc_path(); | 5347 | left_path = btrfs_alloc_path(); |
5358 | if (!left_path) { | 5348 | if (!left_path) { |
5359 | ret = -ENOMEM; | 5349 | ret = -ENOMEM; |
5360 | goto out; | 5350 | goto out; |
5361 | } | 5351 | } |
5362 | right_path = btrfs_alloc_path(); | 5352 | right_path = btrfs_alloc_path(); |
5363 | if (!right_path) { | 5353 | if (!right_path) { |
5364 | ret = -ENOMEM; | 5354 | ret = -ENOMEM; |
5365 | goto out; | 5355 | goto out; |
5366 | } | 5356 | } |
5367 | 5357 | ||
5368 | tmp_buf = kmalloc(left_root->nodesize, GFP_NOFS); | 5358 | tmp_buf = kmalloc(left_root->nodesize, GFP_NOFS); |
5369 | if (!tmp_buf) { | 5359 | if (!tmp_buf) { |
5370 | ret = -ENOMEM; | 5360 | ret = -ENOMEM; |
5371 | goto out; | 5361 | goto out; |
5372 | } | 5362 | } |
5373 | 5363 | ||
5374 | left_path->search_commit_root = 1; | 5364 | left_path->search_commit_root = 1; |
5375 | left_path->skip_locking = 1; | 5365 | left_path->skip_locking = 1; |
5376 | right_path->search_commit_root = 1; | 5366 | right_path->search_commit_root = 1; |
5377 | right_path->skip_locking = 1; | 5367 | right_path->skip_locking = 1; |
5378 | 5368 | ||
5379 | /* | 5369 | /* |
5380 | * Strategy: Go to the first items of both trees. Then do | 5370 | * Strategy: Go to the first items of both trees. Then do |
5381 | * | 5371 | * |
5382 | * If both trees are at level 0 | 5372 | * If both trees are at level 0 |
5383 | * Compare keys of current items | 5373 | * Compare keys of current items |
5384 | * If left < right treat left item as new, advance left tree | 5374 | * If left < right treat left item as new, advance left tree |
5385 | * and repeat | 5375 | * and repeat |
5386 | * If left > right treat right item as deleted, advance right tree | 5376 | * If left > right treat right item as deleted, advance right tree |
5387 | * and repeat | 5377 | * and repeat |
5388 | * If left == right do deep compare of items, treat as changed if | 5378 | * If left == right do deep compare of items, treat as changed if |
5389 | * needed, advance both trees and repeat | 5379 | * needed, advance both trees and repeat |
5390 | * If both trees are at the same level but not at level 0 | 5380 | * If both trees are at the same level but not at level 0 |
5391 | * Compare keys of current nodes/leafs | 5381 | * Compare keys of current nodes/leafs |
5392 | * If left < right advance left tree and repeat | 5382 | * If left < right advance left tree and repeat |
5393 | * If left > right advance right tree and repeat | 5383 | * If left > right advance right tree and repeat |
5394 | * If left == right compare blockptrs of the next nodes/leafs | 5384 | * If left == right compare blockptrs of the next nodes/leafs |
5395 | * If they match advance both trees but stay at the same level | 5385 | * If they match advance both trees but stay at the same level |
5396 | * and repeat | 5386 | * and repeat |
5397 | * If they don't match advance both trees while allowing to go | 5387 | * If they don't match advance both trees while allowing to go |
5398 | * deeper and repeat | 5388 | * deeper and repeat |
5399 | * If tree levels are different | 5389 | * If tree levels are different |
5400 | * Advance the tree that needs it and repeat | 5390 | * Advance the tree that needs it and repeat |
5401 | * | 5391 | * |
5402 | * Advancing a tree means: | 5392 | * Advancing a tree means: |
5403 | * If we are at level 0, try to go to the next slot. If that's not | 5393 | * If we are at level 0, try to go to the next slot. If that's not |
5404 | * possible, go one level up and repeat. Stop when we found a level | 5394 | * possible, go one level up and repeat. Stop when we found a level |
5405 | * where we could go to the next slot. We may at this point be on a | 5395 | * where we could go to the next slot. We may at this point be on a |
5406 | * node or a leaf. | 5396 | * node or a leaf. |
5407 | * | 5397 | * |
5408 | * If we are not at level 0 and not on shared tree blocks, go one | 5398 | * If we are not at level 0 and not on shared tree blocks, go one |
5409 | * level deeper. | 5399 | * level deeper. |
5410 | * | 5400 | * |
5411 | * If we are not at level 0 and on shared tree blocks, go one slot to | 5401 | * If we are not at level 0 and on shared tree blocks, go one slot to |
5412 | * the right if possible or go up and right. | 5402 | * the right if possible or go up and right. |
5413 | */ | 5403 | */ |
5414 | 5404 | ||
5415 | down_read(&left_root->fs_info->commit_root_sem); | 5405 | down_read(&left_root->fs_info->commit_root_sem); |
5416 | left_level = btrfs_header_level(left_root->commit_root); | 5406 | left_level = btrfs_header_level(left_root->commit_root); |
5417 | left_root_level = left_level; | 5407 | left_root_level = left_level; |
5418 | left_path->nodes[left_level] = left_root->commit_root; | 5408 | left_path->nodes[left_level] = left_root->commit_root; |
5419 | extent_buffer_get(left_path->nodes[left_level]); | 5409 | extent_buffer_get(left_path->nodes[left_level]); |
5420 | 5410 | ||
5421 | right_level = btrfs_header_level(right_root->commit_root); | 5411 | right_level = btrfs_header_level(right_root->commit_root); |
5422 | right_root_level = right_level; | 5412 | right_root_level = right_level; |
5423 | right_path->nodes[right_level] = right_root->commit_root; | 5413 | right_path->nodes[right_level] = right_root->commit_root; |
5424 | extent_buffer_get(right_path->nodes[right_level]); | 5414 | extent_buffer_get(right_path->nodes[right_level]); |
5425 | up_read(&left_root->fs_info->commit_root_sem); | 5415 | up_read(&left_root->fs_info->commit_root_sem); |
5426 | 5416 | ||
5427 | if (left_level == 0) | 5417 | if (left_level == 0) |
5428 | btrfs_item_key_to_cpu(left_path->nodes[left_level], | 5418 | btrfs_item_key_to_cpu(left_path->nodes[left_level], |
5429 | &left_key, left_path->slots[left_level]); | 5419 | &left_key, left_path->slots[left_level]); |
5430 | else | 5420 | else |
5431 | btrfs_node_key_to_cpu(left_path->nodes[left_level], | 5421 | btrfs_node_key_to_cpu(left_path->nodes[left_level], |
5432 | &left_key, left_path->slots[left_level]); | 5422 | &left_key, left_path->slots[left_level]); |
5433 | if (right_level == 0) | 5423 | if (right_level == 0) |
5434 | btrfs_item_key_to_cpu(right_path->nodes[right_level], | 5424 | btrfs_item_key_to_cpu(right_path->nodes[right_level], |
5435 | &right_key, right_path->slots[right_level]); | 5425 | &right_key, right_path->slots[right_level]); |
5436 | else | 5426 | else |
5437 | btrfs_node_key_to_cpu(right_path->nodes[right_level], | 5427 | btrfs_node_key_to_cpu(right_path->nodes[right_level], |
5438 | &right_key, right_path->slots[right_level]); | 5428 | &right_key, right_path->slots[right_level]); |
5439 | 5429 | ||
5440 | left_end_reached = right_end_reached = 0; | 5430 | left_end_reached = right_end_reached = 0; |
5441 | advance_left = advance_right = 0; | 5431 | advance_left = advance_right = 0; |
5442 | 5432 | ||
5443 | while (1) { | 5433 | while (1) { |
5444 | if (advance_left && !left_end_reached) { | 5434 | if (advance_left && !left_end_reached) { |
5445 | ret = tree_advance(left_root, left_path, &left_level, | 5435 | ret = tree_advance(left_root, left_path, &left_level, |
5446 | left_root_level, | 5436 | left_root_level, |
5447 | advance_left != ADVANCE_ONLY_NEXT, | 5437 | advance_left != ADVANCE_ONLY_NEXT, |
5448 | &left_key); | 5438 | &left_key); |
5449 | if (ret < 0) | 5439 | if (ret < 0) |
5450 | left_end_reached = ADVANCE; | 5440 | left_end_reached = ADVANCE; |
5451 | advance_left = 0; | 5441 | advance_left = 0; |
5452 | } | 5442 | } |
5453 | if (advance_right && !right_end_reached) { | 5443 | if (advance_right && !right_end_reached) { |
5454 | ret = tree_advance(right_root, right_path, &right_level, | 5444 | ret = tree_advance(right_root, right_path, &right_level, |
5455 | right_root_level, | 5445 | right_root_level, |
5456 | advance_right != ADVANCE_ONLY_NEXT, | 5446 | advance_right != ADVANCE_ONLY_NEXT, |
5457 | &right_key); | 5447 | &right_key); |
5458 | if (ret < 0) | 5448 | if (ret < 0) |
5459 | right_end_reached = ADVANCE; | 5449 | right_end_reached = ADVANCE; |
5460 | advance_right = 0; | 5450 | advance_right = 0; |
5461 | } | 5451 | } |
5462 | 5452 | ||
5463 | if (left_end_reached && right_end_reached) { | 5453 | if (left_end_reached && right_end_reached) { |
5464 | ret = 0; | 5454 | ret = 0; |
5465 | goto out; | 5455 | goto out; |
5466 | } else if (left_end_reached) { | 5456 | } else if (left_end_reached) { |
5467 | if (right_level == 0) { | 5457 | if (right_level == 0) { |
5468 | ret = changed_cb(left_root, right_root, | 5458 | ret = changed_cb(left_root, right_root, |
5469 | left_path, right_path, | 5459 | left_path, right_path, |
5470 | &right_key, | 5460 | &right_key, |
5471 | BTRFS_COMPARE_TREE_DELETED, | 5461 | BTRFS_COMPARE_TREE_DELETED, |
5472 | ctx); | 5462 | ctx); |
5473 | if (ret < 0) | 5463 | if (ret < 0) |
5474 | goto out; | 5464 | goto out; |
5475 | } | 5465 | } |
5476 | advance_right = ADVANCE; | 5466 | advance_right = ADVANCE; |
5477 | continue; | 5467 | continue; |
5478 | } else if (right_end_reached) { | 5468 | } else if (right_end_reached) { |
5479 | if (left_level == 0) { | 5469 | if (left_level == 0) { |
5480 | ret = changed_cb(left_root, right_root, | 5470 | ret = changed_cb(left_root, right_root, |
5481 | left_path, right_path, | 5471 | left_path, right_path, |
5482 | &left_key, | 5472 | &left_key, |
5483 | BTRFS_COMPARE_TREE_NEW, | 5473 | BTRFS_COMPARE_TREE_NEW, |
5484 | ctx); | 5474 | ctx); |
5485 | if (ret < 0) | 5475 | if (ret < 0) |
5486 | goto out; | 5476 | goto out; |
5487 | } | 5477 | } |
5488 | advance_left = ADVANCE; | 5478 | advance_left = ADVANCE; |
5489 | continue; | 5479 | continue; |
5490 | } | 5480 | } |
5491 | 5481 | ||
5492 | if (left_level == 0 && right_level == 0) { | 5482 | if (left_level == 0 && right_level == 0) { |
5493 | cmp = btrfs_comp_cpu_keys(&left_key, &right_key); | 5483 | cmp = btrfs_comp_cpu_keys(&left_key, &right_key); |
5494 | if (cmp < 0) { | 5484 | if (cmp < 0) { |
5495 | ret = changed_cb(left_root, right_root, | 5485 | ret = changed_cb(left_root, right_root, |
5496 | left_path, right_path, | 5486 | left_path, right_path, |
5497 | &left_key, | 5487 | &left_key, |
5498 | BTRFS_COMPARE_TREE_NEW, | 5488 | BTRFS_COMPARE_TREE_NEW, |
5499 | ctx); | 5489 | ctx); |
5500 | if (ret < 0) | 5490 | if (ret < 0) |
5501 | goto out; | 5491 | goto out; |
5502 | advance_left = ADVANCE; | 5492 | advance_left = ADVANCE; |
5503 | } else if (cmp > 0) { | 5493 | } else if (cmp > 0) { |
5504 | ret = changed_cb(left_root, right_root, | 5494 | ret = changed_cb(left_root, right_root, |
5505 | left_path, right_path, | 5495 | left_path, right_path, |
5506 | &right_key, | 5496 | &right_key, |
5507 | BTRFS_COMPARE_TREE_DELETED, | 5497 | BTRFS_COMPARE_TREE_DELETED, |
5508 | ctx); | 5498 | ctx); |
5509 | if (ret < 0) | 5499 | if (ret < 0) |
5510 | goto out; | 5500 | goto out; |
5511 | advance_right = ADVANCE; | 5501 | advance_right = ADVANCE; |
5512 | } else { | 5502 | } else { |
5513 | enum btrfs_compare_tree_result result; | 5503 | enum btrfs_compare_tree_result result; |
5514 | 5504 | ||
5515 | WARN_ON(!extent_buffer_uptodate(left_path->nodes[0])); | 5505 | WARN_ON(!extent_buffer_uptodate(left_path->nodes[0])); |
5516 | ret = tree_compare_item(left_root, left_path, | 5506 | ret = tree_compare_item(left_root, left_path, |
5517 | right_path, tmp_buf); | 5507 | right_path, tmp_buf); |
5518 | if (ret) | 5508 | if (ret) |
5519 | result = BTRFS_COMPARE_TREE_CHANGED; | 5509 | result = BTRFS_COMPARE_TREE_CHANGED; |
5520 | else | 5510 | else |
5521 | result = BTRFS_COMPARE_TREE_SAME; | 5511 | result = BTRFS_COMPARE_TREE_SAME; |
5522 | ret = changed_cb(left_root, right_root, | 5512 | ret = changed_cb(left_root, right_root, |
5523 | left_path, right_path, | 5513 | left_path, right_path, |
5524 | &left_key, result, ctx); | 5514 | &left_key, result, ctx); |
5525 | if (ret < 0) | 5515 | if (ret < 0) |
5526 | goto out; | 5516 | goto out; |
5527 | advance_left = ADVANCE; | 5517 | advance_left = ADVANCE; |
5528 | advance_right = ADVANCE; | 5518 | advance_right = ADVANCE; |
5529 | } | 5519 | } |
5530 | } else if (left_level == right_level) { | 5520 | } else if (left_level == right_level) { |
5531 | cmp = btrfs_comp_cpu_keys(&left_key, &right_key); | 5521 | cmp = btrfs_comp_cpu_keys(&left_key, &right_key); |
5532 | if (cmp < 0) { | 5522 | if (cmp < 0) { |
5533 | advance_left = ADVANCE; | 5523 | advance_left = ADVANCE; |
5534 | } else if (cmp > 0) { | 5524 | } else if (cmp > 0) { |
5535 | advance_right = ADVANCE; | 5525 | advance_right = ADVANCE; |
5536 | } else { | 5526 | } else { |
5537 | left_blockptr = btrfs_node_blockptr( | 5527 | left_blockptr = btrfs_node_blockptr( |
5538 | left_path->nodes[left_level], | 5528 | left_path->nodes[left_level], |
5539 | left_path->slots[left_level]); | 5529 | left_path->slots[left_level]); |
5540 | right_blockptr = btrfs_node_blockptr( | 5530 | right_blockptr = btrfs_node_blockptr( |
5541 | right_path->nodes[right_level], | 5531 | right_path->nodes[right_level], |
5542 | right_path->slots[right_level]); | 5532 | right_path->slots[right_level]); |
5543 | left_gen = btrfs_node_ptr_generation( | 5533 | left_gen = btrfs_node_ptr_generation( |
5544 | left_path->nodes[left_level], | 5534 | left_path->nodes[left_level], |
5545 | left_path->slots[left_level]); | 5535 | left_path->slots[left_level]); |
5546 | right_gen = btrfs_node_ptr_generation( | 5536 | right_gen = btrfs_node_ptr_generation( |
5547 | right_path->nodes[right_level], | 5537 | right_path->nodes[right_level], |
5548 | right_path->slots[right_level]); | 5538 | right_path->slots[right_level]); |
5549 | if (left_blockptr == right_blockptr && | 5539 | if (left_blockptr == right_blockptr && |
5550 | left_gen == right_gen) { | 5540 | left_gen == right_gen) { |
5551 | /* | 5541 | /* |
5552 | * As we're on a shared block, don't | 5542 | * As we're on a shared block, don't |
5553 | * allow to go deeper. | 5543 | * allow to go deeper. |
5554 | */ | 5544 | */ |
5555 | advance_left = ADVANCE_ONLY_NEXT; | 5545 | advance_left = ADVANCE_ONLY_NEXT; |
5556 | advance_right = ADVANCE_ONLY_NEXT; | 5546 | advance_right = ADVANCE_ONLY_NEXT; |
5557 | } else { | 5547 | } else { |
5558 | advance_left = ADVANCE; | 5548 | advance_left = ADVANCE; |
5559 | advance_right = ADVANCE; | 5549 | advance_right = ADVANCE; |
5560 | } | 5550 | } |
5561 | } | 5551 | } |
5562 | } else if (left_level < right_level) { | 5552 | } else if (left_level < right_level) { |
5563 | advance_right = ADVANCE; | 5553 | advance_right = ADVANCE; |
5564 | } else { | 5554 | } else { |
5565 | advance_left = ADVANCE; | 5555 | advance_left = ADVANCE; |
5566 | } | 5556 | } |
5567 | } | 5557 | } |
5568 | 5558 | ||
5569 | out: | 5559 | out: |
5570 | btrfs_free_path(left_path); | 5560 | btrfs_free_path(left_path); |
5571 | btrfs_free_path(right_path); | 5561 | btrfs_free_path(right_path); |
5572 | kfree(tmp_buf); | 5562 | kfree(tmp_buf); |
5573 | return ret; | 5563 | return ret; |
5574 | } | 5564 | } |
5575 | 5565 | ||
5576 | /* | 5566 | /* |
5577 | * this is similar to btrfs_next_leaf, but does not try to preserve | 5567 | * this is similar to btrfs_next_leaf, but does not try to preserve |
5578 | * and fixup the path. It looks for and returns the next key in the | 5568 | * and fixup the path. It looks for and returns the next key in the |
5579 | * tree based on the current path and the min_trans parameters. | 5569 | * tree based on the current path and the min_trans parameters. |
5580 | * | 5570 | * |
5581 | * 0 is returned if another key is found, < 0 if there are any errors | 5571 | * 0 is returned if another key is found, < 0 if there are any errors |
5582 | * and 1 is returned if there are no higher keys in the tree | 5572 | * and 1 is returned if there are no higher keys in the tree |
5583 | * | 5573 | * |
5584 | * path->keep_locks should be set to 1 on the search made before | 5574 | * path->keep_locks should be set to 1 on the search made before |
5585 | * calling this function. | 5575 | * calling this function. |
5586 | */ | 5576 | */ |
5587 | int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path, | 5577 | int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path, |
5588 | struct btrfs_key *key, int level, u64 min_trans) | 5578 | struct btrfs_key *key, int level, u64 min_trans) |
5589 | { | 5579 | { |
5590 | int slot; | 5580 | int slot; |
5591 | struct extent_buffer *c; | 5581 | struct extent_buffer *c; |
5592 | 5582 | ||
5593 | WARN_ON(!path->keep_locks); | 5583 | WARN_ON(!path->keep_locks); |
5594 | while (level < BTRFS_MAX_LEVEL) { | 5584 | while (level < BTRFS_MAX_LEVEL) { |
5595 | if (!path->nodes[level]) | 5585 | if (!path->nodes[level]) |
5596 | return 1; | 5586 | return 1; |
5597 | 5587 | ||
5598 | slot = path->slots[level] + 1; | 5588 | slot = path->slots[level] + 1; |
5599 | c = path->nodes[level]; | 5589 | c = path->nodes[level]; |
5600 | next: | 5590 | next: |
5601 | if (slot >= btrfs_header_nritems(c)) { | 5591 | if (slot >= btrfs_header_nritems(c)) { |
5602 | int ret; | 5592 | int ret; |
5603 | int orig_lowest; | 5593 | int orig_lowest; |
5604 | struct btrfs_key cur_key; | 5594 | struct btrfs_key cur_key; |
5605 | if (level + 1 >= BTRFS_MAX_LEVEL || | 5595 | if (level + 1 >= BTRFS_MAX_LEVEL || |
5606 | !path->nodes[level + 1]) | 5596 | !path->nodes[level + 1]) |
5607 | return 1; | 5597 | return 1; |
5608 | 5598 | ||
5609 | if (path->locks[level + 1]) { | 5599 | if (path->locks[level + 1]) { |
5610 | level++; | 5600 | level++; |
5611 | continue; | 5601 | continue; |
5612 | } | 5602 | } |
5613 | 5603 | ||
5614 | slot = btrfs_header_nritems(c) - 1; | 5604 | slot = btrfs_header_nritems(c) - 1; |
5615 | if (level == 0) | 5605 | if (level == 0) |
5616 | btrfs_item_key_to_cpu(c, &cur_key, slot); | 5606 | btrfs_item_key_to_cpu(c, &cur_key, slot); |
5617 | else | 5607 | else |
5618 | btrfs_node_key_to_cpu(c, &cur_key, slot); | 5608 | btrfs_node_key_to_cpu(c, &cur_key, slot); |
5619 | 5609 | ||
5620 | orig_lowest = path->lowest_level; | 5610 | orig_lowest = path->lowest_level; |
5621 | btrfs_release_path(path); | 5611 | btrfs_release_path(path); |
5622 | path->lowest_level = level; | 5612 | path->lowest_level = level; |
5623 | ret = btrfs_search_slot(NULL, root, &cur_key, path, | 5613 | ret = btrfs_search_slot(NULL, root, &cur_key, path, |
5624 | 0, 0); | 5614 | 0, 0); |
5625 | path->lowest_level = orig_lowest; | 5615 | path->lowest_level = orig_lowest; |
5626 | if (ret < 0) | 5616 | if (ret < 0) |
5627 | return ret; | 5617 | return ret; |
5628 | 5618 | ||
5629 | c = path->nodes[level]; | 5619 | c = path->nodes[level]; |
5630 | slot = path->slots[level]; | 5620 | slot = path->slots[level]; |
5631 | if (ret == 0) | 5621 | if (ret == 0) |
5632 | slot++; | 5622 | slot++; |
5633 | goto next; | 5623 | goto next; |
5634 | } | 5624 | } |
5635 | 5625 | ||
5636 | if (level == 0) | 5626 | if (level == 0) |
5637 | btrfs_item_key_to_cpu(c, key, slot); | 5627 | btrfs_item_key_to_cpu(c, key, slot); |
5638 | else { | 5628 | else { |
5639 | u64 gen = btrfs_node_ptr_generation(c, slot); | 5629 | u64 gen = btrfs_node_ptr_generation(c, slot); |
5640 | 5630 | ||
5641 | if (gen < min_trans) { | 5631 | if (gen < min_trans) { |
5642 | slot++; | 5632 | slot++; |
5643 | goto next; | 5633 | goto next; |
5644 | } | 5634 | } |
5645 | btrfs_node_key_to_cpu(c, key, slot); | 5635 | btrfs_node_key_to_cpu(c, key, slot); |
5646 | } | 5636 | } |
5647 | return 0; | 5637 | return 0; |
5648 | } | 5638 | } |
5649 | return 1; | 5639 | return 1; |
5650 | } | 5640 | } |
5651 | 5641 | ||
5652 | /* | 5642 | /* |
5653 | * search the tree again to find a leaf with greater keys | 5643 | * search the tree again to find a leaf with greater keys |
5654 | * returns 0 if it found something or 1 if there are no greater leaves. | 5644 | * returns 0 if it found something or 1 if there are no greater leaves. |
5655 | * returns < 0 on io errors. | 5645 | * returns < 0 on io errors. |
5656 | */ | 5646 | */ |
5657 | int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path) | 5647 | int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path) |
5658 | { | 5648 | { |
5659 | return btrfs_next_old_leaf(root, path, 0); | 5649 | return btrfs_next_old_leaf(root, path, 0); |
5660 | } | 5650 | } |
5661 | 5651 | ||
5662 | int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path, | 5652 | int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path, |
5663 | u64 time_seq) | 5653 | u64 time_seq) |
5664 | { | 5654 | { |
5665 | int slot; | 5655 | int slot; |
5666 | int level; | 5656 | int level; |
5667 | struct extent_buffer *c; | 5657 | struct extent_buffer *c; |
5668 | struct extent_buffer *next; | 5658 | struct extent_buffer *next; |
5669 | struct btrfs_key key; | 5659 | struct btrfs_key key; |
5670 | u32 nritems; | 5660 | u32 nritems; |
5671 | int ret; | 5661 | int ret; |
5672 | int old_spinning = path->leave_spinning; | 5662 | int old_spinning = path->leave_spinning; |
5673 | int next_rw_lock = 0; | 5663 | int next_rw_lock = 0; |
5674 | 5664 | ||
5675 | nritems = btrfs_header_nritems(path->nodes[0]); | 5665 | nritems = btrfs_header_nritems(path->nodes[0]); |
5676 | if (nritems == 0) | 5666 | if (nritems == 0) |
5677 | return 1; | 5667 | return 1; |
5678 | 5668 | ||
5679 | btrfs_item_key_to_cpu(path->nodes[0], &key, nritems - 1); | 5669 | btrfs_item_key_to_cpu(path->nodes[0], &key, nritems - 1); |
5680 | again: | 5670 | again: |
5681 | level = 1; | 5671 | level = 1; |
5682 | next = NULL; | 5672 | next = NULL; |
5683 | next_rw_lock = 0; | 5673 | next_rw_lock = 0; |
5684 | btrfs_release_path(path); | 5674 | btrfs_release_path(path); |
5685 | 5675 | ||
5686 | path->keep_locks = 1; | 5676 | path->keep_locks = 1; |
5687 | path->leave_spinning = 1; | 5677 | path->leave_spinning = 1; |
5688 | 5678 | ||
5689 | if (time_seq) | 5679 | if (time_seq) |
5690 | ret = btrfs_search_old_slot(root, &key, path, time_seq); | 5680 | ret = btrfs_search_old_slot(root, &key, path, time_seq); |
5691 | else | 5681 | else |
5692 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); | 5682 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); |
5693 | path->keep_locks = 0; | 5683 | path->keep_locks = 0; |
5694 | 5684 | ||
5695 | if (ret < 0) | 5685 | if (ret < 0) |
5696 | return ret; | 5686 | return ret; |
5697 | 5687 | ||
5698 | nritems = btrfs_header_nritems(path->nodes[0]); | 5688 | nritems = btrfs_header_nritems(path->nodes[0]); |
5699 | /* | 5689 | /* |
5700 | * by releasing the path above we dropped all our locks. A balance | 5690 | * by releasing the path above we dropped all our locks. A balance |
5701 | * could have added more items next to the key that used to be | 5691 | * could have added more items next to the key that used to be |
5702 | * at the very end of the block. So, check again here and | 5692 | * at the very end of the block. So, check again here and |
5703 | * advance the path if there are now more items available. | 5693 | * advance the path if there are now more items available. |
5704 | */ | 5694 | */ |
5705 | if (nritems > 0 && path->slots[0] < nritems - 1) { | 5695 | if (nritems > 0 && path->slots[0] < nritems - 1) { |
5706 | if (ret == 0) | 5696 | if (ret == 0) |
5707 | path->slots[0]++; | 5697 | path->slots[0]++; |
5708 | ret = 0; | 5698 | ret = 0; |
5709 | goto done; | 5699 | goto done; |
5710 | } | 5700 | } |
5711 | /* | 5701 | /* |
5712 | * So the above check misses one case: | 5702 | * So the above check misses one case: |
5713 | * - after releasing the path above, someone has removed the item that | 5703 | * - after releasing the path above, someone has removed the item that |
5714 | * used to be at the very end of the block, and balance between leafs | 5704 | * used to be at the very end of the block, and balance between leafs |
5715 | * gets another one with bigger key.offset to replace it. | 5705 | * gets another one with bigger key.offset to replace it. |
5716 | * | 5706 | * |
5717 | * This one should be returned as well, or we can get leaf corruption | 5707 | * This one should be returned as well, or we can get leaf corruption |
5718 | * later(esp. in __btrfs_drop_extents()). | 5708 | * later(esp. in __btrfs_drop_extents()). |
5719 | * | 5709 | * |
5720 | * And a bit more explanation about this check, | 5710 | * And a bit more explanation about this check, |
5721 | * with ret > 0, the key isn't found, the path points to the slot | 5711 | * with ret > 0, the key isn't found, the path points to the slot |
5722 | * where it should be inserted, so the path->slots[0] item must be the | 5712 | * where it should be inserted, so the path->slots[0] item must be the |
5723 | * bigger one. | 5713 | * bigger one. |
5724 | */ | 5714 | */ |
5725 | if (nritems > 0 && ret > 0 && path->slots[0] == nritems - 1) { | 5715 | if (nritems > 0 && ret > 0 && path->slots[0] == nritems - 1) { |
5726 | ret = 0; | 5716 | ret = 0; |
5727 | goto done; | 5717 | goto done; |
5728 | } | 5718 | } |
5729 | 5719 | ||
5730 | while (level < BTRFS_MAX_LEVEL) { | 5720 | while (level < BTRFS_MAX_LEVEL) { |
5731 | if (!path->nodes[level]) { | 5721 | if (!path->nodes[level]) { |
5732 | ret = 1; | 5722 | ret = 1; |
5733 | goto done; | 5723 | goto done; |
5734 | } | 5724 | } |
5735 | 5725 | ||
5736 | slot = path->slots[level] + 1; | 5726 | slot = path->slots[level] + 1; |
5737 | c = path->nodes[level]; | 5727 | c = path->nodes[level]; |
5738 | if (slot >= btrfs_header_nritems(c)) { | 5728 | if (slot >= btrfs_header_nritems(c)) { |
5739 | level++; | 5729 | level++; |
5740 | if (level == BTRFS_MAX_LEVEL) { | 5730 | if (level == BTRFS_MAX_LEVEL) { |
5741 | ret = 1; | 5731 | ret = 1; |
5742 | goto done; | 5732 | goto done; |
5743 | } | 5733 | } |
5744 | continue; | 5734 | continue; |
5745 | } | 5735 | } |
5746 | 5736 | ||
5747 | if (next) { | 5737 | if (next) { |
5748 | btrfs_tree_unlock_rw(next, next_rw_lock); | 5738 | btrfs_tree_unlock_rw(next, next_rw_lock); |
5749 | free_extent_buffer(next); | 5739 | free_extent_buffer(next); |
5750 | } | 5740 | } |
5751 | 5741 | ||
5752 | next = c; | 5742 | next = c; |
5753 | next_rw_lock = path->locks[level]; | 5743 | next_rw_lock = path->locks[level]; |
5754 | ret = read_block_for_search(NULL, root, path, &next, level, | 5744 | ret = read_block_for_search(NULL, root, path, &next, level, |
5755 | slot, &key, 0); | 5745 | slot, &key, 0); |
5756 | if (ret == -EAGAIN) | 5746 | if (ret == -EAGAIN) |
5757 | goto again; | 5747 | goto again; |
5758 | 5748 | ||
5759 | if (ret < 0) { | 5749 | if (ret < 0) { |
5760 | btrfs_release_path(path); | 5750 | btrfs_release_path(path); |
5761 | goto done; | 5751 | goto done; |
5762 | } | 5752 | } |
5763 | 5753 | ||
5764 | if (!path->skip_locking) { | 5754 | if (!path->skip_locking) { |
5765 | ret = btrfs_try_tree_read_lock(next); | 5755 | ret = btrfs_try_tree_read_lock(next); |
5766 | if (!ret && time_seq) { | 5756 | if (!ret && time_seq) { |
5767 | /* | 5757 | /* |
5768 | * If we don't get the lock, we may be racing | 5758 | * If we don't get the lock, we may be racing |
5769 | * with push_leaf_left, holding that lock while | 5759 | * with push_leaf_left, holding that lock while |
5770 | * itself waiting for the leaf we've currently | 5760 | * itself waiting for the leaf we've currently |
5771 | * locked. To solve this situation, we give up | 5761 | * locked. To solve this situation, we give up |
5772 | * on our lock and cycle. | 5762 | * on our lock and cycle. |
5773 | */ | 5763 | */ |
5774 | free_extent_buffer(next); | 5764 | free_extent_buffer(next); |
5775 | btrfs_release_path(path); | 5765 | btrfs_release_path(path); |
5776 | cond_resched(); | 5766 | cond_resched(); |
5777 | goto again; | 5767 | goto again; |
5778 | } | 5768 | } |
5779 | if (!ret) { | 5769 | if (!ret) { |
5780 | btrfs_set_path_blocking(path); | 5770 | btrfs_set_path_blocking(path); |
5781 | btrfs_tree_read_lock(next); | 5771 | btrfs_tree_read_lock(next); |
5782 | btrfs_clear_path_blocking(path, next, | 5772 | btrfs_clear_path_blocking(path, next, |
5783 | BTRFS_READ_LOCK); | 5773 | BTRFS_READ_LOCK); |
5784 | } | 5774 | } |
5785 | next_rw_lock = BTRFS_READ_LOCK; | 5775 | next_rw_lock = BTRFS_READ_LOCK; |
5786 | } | 5776 | } |
5787 | break; | 5777 | break; |
5788 | } | 5778 | } |
5789 | path->slots[level] = slot; | 5779 | path->slots[level] = slot; |
5790 | while (1) { | 5780 | while (1) { |
5791 | level--; | 5781 | level--; |
5792 | c = path->nodes[level]; | 5782 | c = path->nodes[level]; |
5793 | if (path->locks[level]) | 5783 | if (path->locks[level]) |
5794 | btrfs_tree_unlock_rw(c, path->locks[level]); | 5784 | btrfs_tree_unlock_rw(c, path->locks[level]); |
5795 | 5785 | ||
5796 | free_extent_buffer(c); | 5786 | free_extent_buffer(c); |
5797 | path->nodes[level] = next; | 5787 | path->nodes[level] = next; |
5798 | path->slots[level] = 0; | 5788 | path->slots[level] = 0; |
5799 | if (!path->skip_locking) | 5789 | if (!path->skip_locking) |
5800 | path->locks[level] = next_rw_lock; | 5790 | path->locks[level] = next_rw_lock; |
5801 | if (!level) | 5791 | if (!level) |
5802 | break; | 5792 | break; |
5803 | 5793 | ||
5804 | ret = read_block_for_search(NULL, root, path, &next, level, | 5794 | ret = read_block_for_search(NULL, root, path, &next, level, |
5805 | 0, &key, 0); | 5795 | 0, &key, 0); |
5806 | if (ret == -EAGAIN) | 5796 | if (ret == -EAGAIN) |
5807 | goto again; | 5797 | goto again; |
5808 | 5798 | ||
5809 | if (ret < 0) { | 5799 | if (ret < 0) { |
5810 | btrfs_release_path(path); | 5800 | btrfs_release_path(path); |
5811 | goto done; | 5801 | goto done; |
5812 | } | 5802 | } |
5813 | 5803 | ||
5814 | if (!path->skip_locking) { | 5804 | if (!path->skip_locking) { |
5815 | ret = btrfs_try_tree_read_lock(next); | 5805 | ret = btrfs_try_tree_read_lock(next); |
5816 | if (!ret) { | 5806 | if (!ret) { |
5817 | btrfs_set_path_blocking(path); | 5807 | btrfs_set_path_blocking(path); |
5818 | btrfs_tree_read_lock(next); | 5808 | btrfs_tree_read_lock(next); |
5819 | btrfs_clear_path_blocking(path, next, | 5809 | btrfs_clear_path_blocking(path, next, |
5820 | BTRFS_READ_LOCK); | 5810 | BTRFS_READ_LOCK); |
5821 | } | 5811 | } |
5822 | next_rw_lock = BTRFS_READ_LOCK; | 5812 | next_rw_lock = BTRFS_READ_LOCK; |
5823 | } | 5813 | } |
5824 | } | 5814 | } |
5825 | ret = 0; | 5815 | ret = 0; |
5826 | done: | 5816 | done: |
5827 | unlock_up(path, 0, 1, 0, NULL); | 5817 | unlock_up(path, 0, 1, 0, NULL); |
5828 | path->leave_spinning = old_spinning; | 5818 | path->leave_spinning = old_spinning; |
5829 | if (!old_spinning) | 5819 | if (!old_spinning) |
5830 | btrfs_set_path_blocking(path); | 5820 | btrfs_set_path_blocking(path); |
5831 | 5821 | ||
5832 | return ret; | 5822 | return ret; |
5833 | } | 5823 | } |
5834 | 5824 | ||
5835 | /* | 5825 | /* |
5836 | * this uses btrfs_prev_leaf to walk backwards in the tree, and keeps | 5826 | * this uses btrfs_prev_leaf to walk backwards in the tree, and keeps |
5837 | * searching until it gets past min_objectid or finds an item of 'type' | 5827 | * searching until it gets past min_objectid or finds an item of 'type' |
5838 | * | 5828 | * |
5839 | * returns 0 if something is found, 1 if nothing was found and < 0 on error | 5829 | * returns 0 if something is found, 1 if nothing was found and < 0 on error |
5840 | */ | 5830 | */ |
5841 | int btrfs_previous_item(struct btrfs_root *root, | 5831 | int btrfs_previous_item(struct btrfs_root *root, |
5842 | struct btrfs_path *path, u64 min_objectid, | 5832 | struct btrfs_path *path, u64 min_objectid, |
5843 | int type) | 5833 | int type) |
5844 | { | 5834 | { |
5845 | struct btrfs_key found_key; | 5835 | struct btrfs_key found_key; |
5846 | struct extent_buffer *leaf; | 5836 | struct extent_buffer *leaf; |
5847 | u32 nritems; | 5837 | u32 nritems; |
5848 | int ret; | 5838 | int ret; |
5849 | 5839 | ||
5850 | while (1) { | 5840 | while (1) { |
5851 | if (path->slots[0] == 0) { | 5841 | if (path->slots[0] == 0) { |
5852 | btrfs_set_path_blocking(path); | 5842 | btrfs_set_path_blocking(path); |
5853 | ret = btrfs_prev_leaf(root, path); | 5843 | ret = btrfs_prev_leaf(root, path); |
5854 | if (ret != 0) | 5844 | if (ret != 0) |
5855 | return ret; | 5845 | return ret; |
5856 | } else { | 5846 | } else { |
5857 | path->slots[0]--; | 5847 | path->slots[0]--; |
5858 | } | 5848 | } |
5859 | leaf = path->nodes[0]; | 5849 | leaf = path->nodes[0]; |
5860 | nritems = btrfs_header_nritems(leaf); | 5850 | nritems = btrfs_header_nritems(leaf); |
5861 | if (nritems == 0) | 5851 | if (nritems == 0) |
5862 | return 1; | 5852 | return 1; |
5863 | if (path->slots[0] == nritems) | 5853 | if (path->slots[0] == nritems) |
5864 | path->slots[0]--; | 5854 | path->slots[0]--; |
5865 | 5855 | ||
5866 | btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); | 5856 | btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); |
5867 | if (found_key.objectid < min_objectid) | 5857 | if (found_key.objectid < min_objectid) |
5868 | break; | 5858 | break; |
5869 | if (found_key.type == type) | 5859 | if (found_key.type == type) |
5870 | return 0; | 5860 | return 0; |
5871 | if (found_key.objectid == min_objectid && | 5861 | if (found_key.objectid == min_objectid && |
5872 | found_key.type < type) | 5862 | found_key.type < type) |
5873 | break; | 5863 | break; |
5874 | } | 5864 | } |
5875 | return 1; | 5865 | return 1; |
5876 | } | 5866 | } |
5877 | 5867 | ||
5878 | /* | 5868 | /* |
5879 | * search in extent tree to find a previous Metadata/Data extent item with | 5869 | * search in extent tree to find a previous Metadata/Data extent item with |
5880 | * min objecitd. | 5870 | * min objecitd. |
5881 | * | 5871 | * |
5882 | * returns 0 if something is found, 1 if nothing was found and < 0 on error | 5872 | * returns 0 if something is found, 1 if nothing was found and < 0 on error |
5883 | */ | 5873 | */ |
5884 | int btrfs_previous_extent_item(struct btrfs_root *root, | 5874 | int btrfs_previous_extent_item(struct btrfs_root *root, |
5885 | struct btrfs_path *path, u64 min_objectid) | 5875 | struct btrfs_path *path, u64 min_objectid) |
5886 | { | 5876 | { |
5887 | struct btrfs_key found_key; | 5877 | struct btrfs_key found_key; |
5888 | struct extent_buffer *leaf; | 5878 | struct extent_buffer *leaf; |
5889 | u32 nritems; | 5879 | u32 nritems; |
5890 | int ret; | 5880 | int ret; |
5891 | 5881 | ||
5892 | while (1) { | 5882 | while (1) { |
5893 | if (path->slots[0] == 0) { | 5883 | if (path->slots[0] == 0) { |
5894 | btrfs_set_path_blocking(path); | 5884 | btrfs_set_path_blocking(path); |
5895 | ret = btrfs_prev_leaf(root, path); | 5885 | ret = btrfs_prev_leaf(root, path); |
5896 | if (ret != 0) | 5886 | if (ret != 0) |
5897 | return ret; | 5887 | return ret; |
5898 | } else { | 5888 | } else { |
5899 | path->slots[0]--; | 5889 | path->slots[0]--; |
5900 | } | 5890 | } |
5901 | leaf = path->nodes[0]; | 5891 | leaf = path->nodes[0]; |
5902 | nritems = btrfs_header_nritems(leaf); | 5892 | nritems = btrfs_header_nritems(leaf); |
5903 | if (nritems == 0) | 5893 | if (nritems == 0) |
5904 | return 1; | 5894 | return 1; |
5905 | if (path->slots[0] == nritems) | 5895 | if (path->slots[0] == nritems) |
5906 | path->slots[0]--; | 5896 | path->slots[0]--; |
5907 | 5897 | ||
5908 | btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); | 5898 | btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); |
5909 | if (found_key.objectid < min_objectid) | 5899 | if (found_key.objectid < min_objectid) |
5910 | break; | 5900 | break; |
5911 | if (found_key.type == BTRFS_EXTENT_ITEM_KEY || | 5901 | if (found_key.type == BTRFS_EXTENT_ITEM_KEY || |
5912 | found_key.type == BTRFS_METADATA_ITEM_KEY) | 5902 | found_key.type == BTRFS_METADATA_ITEM_KEY) |
5913 | return 0; | 5903 | return 0; |
5914 | if (found_key.objectid == min_objectid && | 5904 | if (found_key.objectid == min_objectid && |
5915 | found_key.type < BTRFS_EXTENT_ITEM_KEY) | 5905 | found_key.type < BTRFS_EXTENT_ITEM_KEY) |
5916 | break; | 5906 | break; |
5917 | } | 5907 | } |
5918 | return 1; | 5908 | return 1; |
5919 | } | 5909 | } |
5920 | 5910 |
fs/btrfs/locking.c
1 | /* | 1 | /* |
2 | * Copyright (C) 2008 Oracle. All rights reserved. | 2 | * Copyright (C) 2008 Oracle. All rights reserved. |
3 | * | 3 | * |
4 | * This program is free software; you can redistribute it and/or | 4 | * This program is free software; you can redistribute it and/or |
5 | * modify it under the terms of the GNU General Public | 5 | * modify it under the terms of the GNU General Public |
6 | * License v2 as published by the Free Software Foundation. | 6 | * License v2 as published by the Free Software Foundation. |
7 | * | 7 | * |
8 | * This program is distributed in the hope that it will be useful, | 8 | * This program is distributed in the hope that it will be useful, |
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | 10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
11 | * General Public License for more details. | 11 | * General Public License for more details. |
12 | * | 12 | * |
13 | * You should have received a copy of the GNU General Public | 13 | * You should have received a copy of the GNU General Public |
14 | * License along with this program; if not, write to the | 14 | * License along with this program; if not, write to the |
15 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, | 15 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, |
16 | * Boston, MA 021110-1307, USA. | 16 | * Boston, MA 021110-1307, USA. |
17 | */ | 17 | */ |
18 | #include <linux/sched.h> | 18 | #include <linux/sched.h> |
19 | #include <linux/pagemap.h> | 19 | #include <linux/pagemap.h> |
20 | #include <linux/spinlock.h> | 20 | #include <linux/spinlock.h> |
21 | #include <linux/page-flags.h> | 21 | #include <linux/page-flags.h> |
22 | #include <asm/bug.h> | 22 | #include <asm/bug.h> |
23 | #include "ctree.h" | 23 | #include "ctree.h" |
24 | #include "extent_io.h" | 24 | #include "extent_io.h" |
25 | #include "locking.h" | 25 | #include "locking.h" |
26 | 26 | ||
27 | static void btrfs_assert_tree_read_locked(struct extent_buffer *eb); | 27 | static void btrfs_assert_tree_read_locked(struct extent_buffer *eb); |
28 | 28 | ||
29 | /* | 29 | /* |
30 | * if we currently have a spinning reader or writer lock | 30 | * if we currently have a spinning reader or writer lock |
31 | * (indicated by the rw flag) this will bump the count | 31 | * (indicated by the rw flag) this will bump the count |
32 | * of blocking holders and drop the spinlock. | 32 | * of blocking holders and drop the spinlock. |
33 | */ | 33 | */ |
34 | void btrfs_set_lock_blocking_rw(struct extent_buffer *eb, int rw) | 34 | void btrfs_set_lock_blocking_rw(struct extent_buffer *eb, int rw) |
35 | { | 35 | { |
36 | /* | 36 | /* |
37 | * no lock is required. The lock owner may change if | 37 | * no lock is required. The lock owner may change if |
38 | * we have a read lock, but it won't change to or away | 38 | * we have a read lock, but it won't change to or away |
39 | * from us. If we have the write lock, we are the owner | 39 | * from us. If we have the write lock, we are the owner |
40 | * and it'll never change. | 40 | * and it'll never change. |
41 | */ | 41 | */ |
42 | if (eb->lock_nested && current->pid == eb->lock_owner) | 42 | if (eb->lock_nested && current->pid == eb->lock_owner) |
43 | return; | 43 | return; |
44 | if (rw == BTRFS_WRITE_LOCK) { | 44 | if (rw == BTRFS_WRITE_LOCK) { |
45 | if (atomic_read(&eb->blocking_writers) == 0) { | 45 | if (atomic_read(&eb->blocking_writers) == 0) { |
46 | WARN_ON(atomic_read(&eb->spinning_writers) != 1); | 46 | WARN_ON(atomic_read(&eb->spinning_writers) != 1); |
47 | atomic_dec(&eb->spinning_writers); | 47 | atomic_dec(&eb->spinning_writers); |
48 | btrfs_assert_tree_locked(eb); | 48 | btrfs_assert_tree_locked(eb); |
49 | atomic_inc(&eb->blocking_writers); | 49 | atomic_inc(&eb->blocking_writers); |
50 | write_unlock(&eb->lock); | 50 | write_unlock(&eb->lock); |
51 | } | 51 | } |
52 | } else if (rw == BTRFS_READ_LOCK) { | 52 | } else if (rw == BTRFS_READ_LOCK) { |
53 | btrfs_assert_tree_read_locked(eb); | 53 | btrfs_assert_tree_read_locked(eb); |
54 | atomic_inc(&eb->blocking_readers); | 54 | atomic_inc(&eb->blocking_readers); |
55 | WARN_ON(atomic_read(&eb->spinning_readers) == 0); | 55 | WARN_ON(atomic_read(&eb->spinning_readers) == 0); |
56 | atomic_dec(&eb->spinning_readers); | 56 | atomic_dec(&eb->spinning_readers); |
57 | read_unlock(&eb->lock); | 57 | read_unlock(&eb->lock); |
58 | } | 58 | } |
59 | return; | 59 | return; |
60 | } | 60 | } |
61 | 61 | ||
62 | /* | 62 | /* |
63 | * if we currently have a blocking lock, take the spinlock | 63 | * if we currently have a blocking lock, take the spinlock |
64 | * and drop our blocking count | 64 | * and drop our blocking count |
65 | */ | 65 | */ |
66 | void btrfs_clear_lock_blocking_rw(struct extent_buffer *eb, int rw) | 66 | void btrfs_clear_lock_blocking_rw(struct extent_buffer *eb, int rw) |
67 | { | 67 | { |
68 | /* | 68 | /* |
69 | * no lock is required. The lock owner may change if | 69 | * no lock is required. The lock owner may change if |
70 | * we have a read lock, but it won't change to or away | 70 | * we have a read lock, but it won't change to or away |
71 | * from us. If we have the write lock, we are the owner | 71 | * from us. If we have the write lock, we are the owner |
72 | * and it'll never change. | 72 | * and it'll never change. |
73 | */ | 73 | */ |
74 | if (eb->lock_nested && current->pid == eb->lock_owner) | 74 | if (eb->lock_nested && current->pid == eb->lock_owner) |
75 | return; | 75 | return; |
76 | 76 | ||
77 | if (rw == BTRFS_WRITE_LOCK_BLOCKING) { | 77 | if (rw == BTRFS_WRITE_LOCK_BLOCKING) { |
78 | BUG_ON(atomic_read(&eb->blocking_writers) != 1); | 78 | BUG_ON(atomic_read(&eb->blocking_writers) != 1); |
79 | write_lock(&eb->lock); | 79 | write_lock(&eb->lock); |
80 | WARN_ON(atomic_read(&eb->spinning_writers)); | 80 | WARN_ON(atomic_read(&eb->spinning_writers)); |
81 | atomic_inc(&eb->spinning_writers); | 81 | atomic_inc(&eb->spinning_writers); |
82 | if (atomic_dec_and_test(&eb->blocking_writers) && | 82 | if (atomic_dec_and_test(&eb->blocking_writers) && |
83 | waitqueue_active(&eb->write_lock_wq)) | 83 | waitqueue_active(&eb->write_lock_wq)) |
84 | wake_up(&eb->write_lock_wq); | 84 | wake_up(&eb->write_lock_wq); |
85 | } else if (rw == BTRFS_READ_LOCK_BLOCKING) { | 85 | } else if (rw == BTRFS_READ_LOCK_BLOCKING) { |
86 | BUG_ON(atomic_read(&eb->blocking_readers) == 0); | 86 | BUG_ON(atomic_read(&eb->blocking_readers) == 0); |
87 | read_lock(&eb->lock); | 87 | read_lock(&eb->lock); |
88 | atomic_inc(&eb->spinning_readers); | 88 | atomic_inc(&eb->spinning_readers); |
89 | if (atomic_dec_and_test(&eb->blocking_readers) && | 89 | if (atomic_dec_and_test(&eb->blocking_readers) && |
90 | waitqueue_active(&eb->read_lock_wq)) | 90 | waitqueue_active(&eb->read_lock_wq)) |
91 | wake_up(&eb->read_lock_wq); | 91 | wake_up(&eb->read_lock_wq); |
92 | } | 92 | } |
93 | return; | 93 | return; |
94 | } | 94 | } |
95 | 95 | ||
96 | /* | 96 | /* |
97 | * take a spinning read lock. This will wait for any blocking | 97 | * take a spinning read lock. This will wait for any blocking |
98 | * writers | 98 | * writers |
99 | */ | 99 | */ |
100 | void btrfs_tree_read_lock(struct extent_buffer *eb) | 100 | void btrfs_tree_read_lock(struct extent_buffer *eb) |
101 | { | 101 | { |
102 | again: | 102 | again: |
103 | BUG_ON(!atomic_read(&eb->blocking_writers) && | 103 | BUG_ON(!atomic_read(&eb->blocking_writers) && |
104 | current->pid == eb->lock_owner); | 104 | current->pid == eb->lock_owner); |
105 | 105 | ||
106 | read_lock(&eb->lock); | 106 | read_lock(&eb->lock); |
107 | if (atomic_read(&eb->blocking_writers) && | 107 | if (atomic_read(&eb->blocking_writers) && |
108 | current->pid == eb->lock_owner) { | 108 | current->pid == eb->lock_owner) { |
109 | /* | 109 | /* |
110 | * This extent is already write-locked by our thread. We allow | 110 | * This extent is already write-locked by our thread. We allow |
111 | * an additional read lock to be added because it's for the same | 111 | * an additional read lock to be added because it's for the same |
112 | * thread. btrfs_find_all_roots() depends on this as it may be | 112 | * thread. btrfs_find_all_roots() depends on this as it may be |
113 | * called on a partly (write-)locked tree. | 113 | * called on a partly (write-)locked tree. |
114 | */ | 114 | */ |
115 | BUG_ON(eb->lock_nested); | 115 | BUG_ON(eb->lock_nested); |
116 | eb->lock_nested = 1; | 116 | eb->lock_nested = 1; |
117 | read_unlock(&eb->lock); | 117 | read_unlock(&eb->lock); |
118 | return; | 118 | return; |
119 | } | 119 | } |
120 | if (atomic_read(&eb->blocking_writers)) { | 120 | if (atomic_read(&eb->blocking_writers)) { |
121 | read_unlock(&eb->lock); | 121 | read_unlock(&eb->lock); |
122 | wait_event(eb->write_lock_wq, | 122 | wait_event(eb->write_lock_wq, |
123 | atomic_read(&eb->blocking_writers) == 0); | 123 | atomic_read(&eb->blocking_writers) == 0); |
124 | goto again; | 124 | goto again; |
125 | } | 125 | } |
126 | atomic_inc(&eb->read_locks); | 126 | atomic_inc(&eb->read_locks); |
127 | atomic_inc(&eb->spinning_readers); | 127 | atomic_inc(&eb->spinning_readers); |
128 | } | 128 | } |
129 | 129 | ||
130 | /* | 130 | /* |
131 | * take a spinning read lock. | ||
131 | * returns 1 if we get the read lock and 0 if we don't | 132 | * returns 1 if we get the read lock and 0 if we don't |
132 | * this won't wait for blocking writers | 133 | * this won't wait for blocking writers |
133 | */ | 134 | */ |
135 | int btrfs_tree_read_lock_atomic(struct extent_buffer *eb) | ||
136 | { | ||
137 | if (atomic_read(&eb->blocking_writers)) | ||
138 | return 0; | ||
139 | |||
140 | read_lock(&eb->lock); | ||
141 | if (atomic_read(&eb->blocking_writers)) { | ||
142 | read_unlock(&eb->lock); | ||
143 | return 0; | ||
144 | } | ||
145 | atomic_inc(&eb->read_locks); | ||
146 | atomic_inc(&eb->spinning_readers); | ||
147 | return 1; | ||
148 | } | ||
149 | |||
150 | /* | ||
151 | * returns 1 if we get the read lock and 0 if we don't | ||
152 | * this won't wait for blocking writers | ||
153 | */ | ||
134 | int btrfs_try_tree_read_lock(struct extent_buffer *eb) | 154 | int btrfs_try_tree_read_lock(struct extent_buffer *eb) |
135 | { | 155 | { |
136 | if (atomic_read(&eb->blocking_writers)) | 156 | if (atomic_read(&eb->blocking_writers)) |
137 | return 0; | 157 | return 0; |
138 | 158 | ||
139 | if (!read_trylock(&eb->lock)) | 159 | if (!read_trylock(&eb->lock)) |
140 | return 0; | 160 | return 0; |
141 | 161 | ||
142 | if (atomic_read(&eb->blocking_writers)) { | 162 | if (atomic_read(&eb->blocking_writers)) { |
143 | read_unlock(&eb->lock); | 163 | read_unlock(&eb->lock); |
144 | return 0; | 164 | return 0; |
145 | } | 165 | } |
146 | atomic_inc(&eb->read_locks); | 166 | atomic_inc(&eb->read_locks); |
147 | atomic_inc(&eb->spinning_readers); | 167 | atomic_inc(&eb->spinning_readers); |
148 | return 1; | 168 | return 1; |
149 | } | 169 | } |
150 | 170 | ||
151 | /* | 171 | /* |
152 | * returns 1 if we get the read lock and 0 if we don't | 172 | * returns 1 if we get the read lock and 0 if we don't |
153 | * this won't wait for blocking writers or readers | 173 | * this won't wait for blocking writers or readers |
154 | */ | 174 | */ |
155 | int btrfs_try_tree_write_lock(struct extent_buffer *eb) | 175 | int btrfs_try_tree_write_lock(struct extent_buffer *eb) |
156 | { | 176 | { |
157 | if (atomic_read(&eb->blocking_writers) || | 177 | if (atomic_read(&eb->blocking_writers) || |
158 | atomic_read(&eb->blocking_readers)) | 178 | atomic_read(&eb->blocking_readers)) |
159 | return 0; | 179 | return 0; |
160 | 180 | ||
161 | if (!write_trylock(&eb->lock)) | 181 | write_lock(&eb->lock); |
162 | return 0; | ||
163 | |||
164 | if (atomic_read(&eb->blocking_writers) || | 182 | if (atomic_read(&eb->blocking_writers) || |
165 | atomic_read(&eb->blocking_readers)) { | 183 | atomic_read(&eb->blocking_readers)) { |
166 | write_unlock(&eb->lock); | 184 | write_unlock(&eb->lock); |
167 | return 0; | 185 | return 0; |
168 | } | 186 | } |
169 | atomic_inc(&eb->write_locks); | 187 | atomic_inc(&eb->write_locks); |
170 | atomic_inc(&eb->spinning_writers); | 188 | atomic_inc(&eb->spinning_writers); |
171 | eb->lock_owner = current->pid; | 189 | eb->lock_owner = current->pid; |
172 | return 1; | 190 | return 1; |
173 | } | 191 | } |
174 | 192 | ||
175 | /* | 193 | /* |
176 | * drop a spinning read lock | 194 | * drop a spinning read lock |
177 | */ | 195 | */ |
178 | void btrfs_tree_read_unlock(struct extent_buffer *eb) | 196 | void btrfs_tree_read_unlock(struct extent_buffer *eb) |
179 | { | 197 | { |
180 | /* | 198 | /* |
181 | * if we're nested, we have the write lock. No new locking | 199 | * if we're nested, we have the write lock. No new locking |
182 | * is needed as long as we are the lock owner. | 200 | * is needed as long as we are the lock owner. |
183 | * The write unlock will do a barrier for us, and the lock_nested | 201 | * The write unlock will do a barrier for us, and the lock_nested |
184 | * field only matters to the lock owner. | 202 | * field only matters to the lock owner. |
185 | */ | 203 | */ |
186 | if (eb->lock_nested && current->pid == eb->lock_owner) { | 204 | if (eb->lock_nested && current->pid == eb->lock_owner) { |
187 | eb->lock_nested = 0; | 205 | eb->lock_nested = 0; |
188 | return; | 206 | return; |
189 | } | 207 | } |
190 | btrfs_assert_tree_read_locked(eb); | 208 | btrfs_assert_tree_read_locked(eb); |
191 | WARN_ON(atomic_read(&eb->spinning_readers) == 0); | 209 | WARN_ON(atomic_read(&eb->spinning_readers) == 0); |
192 | atomic_dec(&eb->spinning_readers); | 210 | atomic_dec(&eb->spinning_readers); |
193 | atomic_dec(&eb->read_locks); | 211 | atomic_dec(&eb->read_locks); |
194 | read_unlock(&eb->lock); | 212 | read_unlock(&eb->lock); |
195 | } | 213 | } |
196 | 214 | ||
197 | /* | 215 | /* |
198 | * drop a blocking read lock | 216 | * drop a blocking read lock |
199 | */ | 217 | */ |
200 | void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb) | 218 | void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb) |
201 | { | 219 | { |
202 | /* | 220 | /* |
203 | * if we're nested, we have the write lock. No new locking | 221 | * if we're nested, we have the write lock. No new locking |
204 | * is needed as long as we are the lock owner. | 222 | * is needed as long as we are the lock owner. |
205 | * The write unlock will do a barrier for us, and the lock_nested | 223 | * The write unlock will do a barrier for us, and the lock_nested |
206 | * field only matters to the lock owner. | 224 | * field only matters to the lock owner. |
207 | */ | 225 | */ |
208 | if (eb->lock_nested && current->pid == eb->lock_owner) { | 226 | if (eb->lock_nested && current->pid == eb->lock_owner) { |
209 | eb->lock_nested = 0; | 227 | eb->lock_nested = 0; |
210 | return; | 228 | return; |
211 | } | 229 | } |
212 | btrfs_assert_tree_read_locked(eb); | 230 | btrfs_assert_tree_read_locked(eb); |
213 | WARN_ON(atomic_read(&eb->blocking_readers) == 0); | 231 | WARN_ON(atomic_read(&eb->blocking_readers) == 0); |
214 | if (atomic_dec_and_test(&eb->blocking_readers) && | 232 | if (atomic_dec_and_test(&eb->blocking_readers) && |
215 | waitqueue_active(&eb->read_lock_wq)) | 233 | waitqueue_active(&eb->read_lock_wq)) |
216 | wake_up(&eb->read_lock_wq); | 234 | wake_up(&eb->read_lock_wq); |
217 | atomic_dec(&eb->read_locks); | 235 | atomic_dec(&eb->read_locks); |
218 | } | 236 | } |
219 | 237 | ||
220 | /* | 238 | /* |
221 | * take a spinning write lock. This will wait for both | 239 | * take a spinning write lock. This will wait for both |
222 | * blocking readers or writers | 240 | * blocking readers or writers |
223 | */ | 241 | */ |
224 | void btrfs_tree_lock(struct extent_buffer *eb) | 242 | void btrfs_tree_lock(struct extent_buffer *eb) |
225 | { | 243 | { |
226 | again: | 244 | again: |
227 | wait_event(eb->read_lock_wq, atomic_read(&eb->blocking_readers) == 0); | 245 | wait_event(eb->read_lock_wq, atomic_read(&eb->blocking_readers) == 0); |
228 | wait_event(eb->write_lock_wq, atomic_read(&eb->blocking_writers) == 0); | 246 | wait_event(eb->write_lock_wq, atomic_read(&eb->blocking_writers) == 0); |
229 | write_lock(&eb->lock); | 247 | write_lock(&eb->lock); |
230 | if (atomic_read(&eb->blocking_readers)) { | 248 | if (atomic_read(&eb->blocking_readers)) { |
231 | write_unlock(&eb->lock); | 249 | write_unlock(&eb->lock); |
232 | wait_event(eb->read_lock_wq, | 250 | wait_event(eb->read_lock_wq, |
233 | atomic_read(&eb->blocking_readers) == 0); | 251 | atomic_read(&eb->blocking_readers) == 0); |
234 | goto again; | 252 | goto again; |
235 | } | 253 | } |
236 | if (atomic_read(&eb->blocking_writers)) { | 254 | if (atomic_read(&eb->blocking_writers)) { |
237 | write_unlock(&eb->lock); | 255 | write_unlock(&eb->lock); |
238 | wait_event(eb->write_lock_wq, | 256 | wait_event(eb->write_lock_wq, |
239 | atomic_read(&eb->blocking_writers) == 0); | 257 | atomic_read(&eb->blocking_writers) == 0); |
240 | goto again; | 258 | goto again; |
241 | } | 259 | } |
242 | WARN_ON(atomic_read(&eb->spinning_writers)); | 260 | WARN_ON(atomic_read(&eb->spinning_writers)); |
243 | atomic_inc(&eb->spinning_writers); | 261 | atomic_inc(&eb->spinning_writers); |
244 | atomic_inc(&eb->write_locks); | 262 | atomic_inc(&eb->write_locks); |
245 | eb->lock_owner = current->pid; | 263 | eb->lock_owner = current->pid; |
246 | } | 264 | } |
247 | 265 | ||
248 | /* | 266 | /* |
249 | * drop a spinning or a blocking write lock. | 267 | * drop a spinning or a blocking write lock. |
250 | */ | 268 | */ |
251 | void btrfs_tree_unlock(struct extent_buffer *eb) | 269 | void btrfs_tree_unlock(struct extent_buffer *eb) |
252 | { | 270 | { |
253 | int blockers = atomic_read(&eb->blocking_writers); | 271 | int blockers = atomic_read(&eb->blocking_writers); |
254 | 272 | ||
255 | BUG_ON(blockers > 1); | 273 | BUG_ON(blockers > 1); |
256 | 274 | ||
257 | btrfs_assert_tree_locked(eb); | 275 | btrfs_assert_tree_locked(eb); |
258 | eb->lock_owner = 0; | 276 | eb->lock_owner = 0; |
259 | atomic_dec(&eb->write_locks); | 277 | atomic_dec(&eb->write_locks); |
260 | 278 | ||
261 | if (blockers) { | 279 | if (blockers) { |
262 | WARN_ON(atomic_read(&eb->spinning_writers)); | 280 | WARN_ON(atomic_read(&eb->spinning_writers)); |
263 | atomic_dec(&eb->blocking_writers); | 281 | atomic_dec(&eb->blocking_writers); |
264 | smp_mb(); | 282 | smp_mb(); |
265 | if (waitqueue_active(&eb->write_lock_wq)) | 283 | if (waitqueue_active(&eb->write_lock_wq)) |
266 | wake_up(&eb->write_lock_wq); | 284 | wake_up(&eb->write_lock_wq); |
267 | } else { | 285 | } else { |
268 | WARN_ON(atomic_read(&eb->spinning_writers) != 1); | 286 | WARN_ON(atomic_read(&eb->spinning_writers) != 1); |
269 | atomic_dec(&eb->spinning_writers); | 287 | atomic_dec(&eb->spinning_writers); |
270 | write_unlock(&eb->lock); | 288 | write_unlock(&eb->lock); |
271 | } | 289 | } |
272 | } | 290 | } |
273 | 291 | ||
274 | void btrfs_assert_tree_locked(struct extent_buffer *eb) | 292 | void btrfs_assert_tree_locked(struct extent_buffer *eb) |
275 | { | 293 | { |
276 | BUG_ON(!atomic_read(&eb->write_locks)); | 294 | BUG_ON(!atomic_read(&eb->write_locks)); |
277 | } | 295 | } |
278 | 296 | ||
279 | static void btrfs_assert_tree_read_locked(struct extent_buffer *eb) | 297 | static void btrfs_assert_tree_read_locked(struct extent_buffer *eb) |
280 | { | 298 | { |
281 | BUG_ON(!atomic_read(&eb->read_locks)); | 299 | BUG_ON(!atomic_read(&eb->read_locks)); |
fs/btrfs/locking.h
1 | /* | 1 | /* |
2 | * Copyright (C) 2008 Oracle. All rights reserved. | 2 | * Copyright (C) 2008 Oracle. All rights reserved. |
3 | * | 3 | * |
4 | * This program is free software; you can redistribute it and/or | 4 | * This program is free software; you can redistribute it and/or |
5 | * modify it under the terms of the GNU General Public | 5 | * modify it under the terms of the GNU General Public |
6 | * License v2 as published by the Free Software Foundation. | 6 | * License v2 as published by the Free Software Foundation. |
7 | * | 7 | * |
8 | * This program is distributed in the hope that it will be useful, | 8 | * This program is distributed in the hope that it will be useful, |
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | 10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
11 | * General Public License for more details. | 11 | * General Public License for more details. |
12 | * | 12 | * |
13 | * You should have received a copy of the GNU General Public | 13 | * You should have received a copy of the GNU General Public |
14 | * License along with this program; if not, write to the | 14 | * License along with this program; if not, write to the |
15 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, | 15 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, |
16 | * Boston, MA 021110-1307, USA. | 16 | * Boston, MA 021110-1307, USA. |
17 | */ | 17 | */ |
18 | 18 | ||
19 | #ifndef __BTRFS_LOCKING_ | 19 | #ifndef __BTRFS_LOCKING_ |
20 | #define __BTRFS_LOCKING_ | 20 | #define __BTRFS_LOCKING_ |
21 | 21 | ||
22 | #define BTRFS_WRITE_LOCK 1 | 22 | #define BTRFS_WRITE_LOCK 1 |
23 | #define BTRFS_READ_LOCK 2 | 23 | #define BTRFS_READ_LOCK 2 |
24 | #define BTRFS_WRITE_LOCK_BLOCKING 3 | 24 | #define BTRFS_WRITE_LOCK_BLOCKING 3 |
25 | #define BTRFS_READ_LOCK_BLOCKING 4 | 25 | #define BTRFS_READ_LOCK_BLOCKING 4 |
26 | 26 | ||
27 | void btrfs_tree_lock(struct extent_buffer *eb); | 27 | void btrfs_tree_lock(struct extent_buffer *eb); |
28 | void btrfs_tree_unlock(struct extent_buffer *eb); | 28 | void btrfs_tree_unlock(struct extent_buffer *eb); |
29 | 29 | ||
30 | void btrfs_tree_read_lock(struct extent_buffer *eb); | 30 | void btrfs_tree_read_lock(struct extent_buffer *eb); |
31 | void btrfs_tree_read_unlock(struct extent_buffer *eb); | 31 | void btrfs_tree_read_unlock(struct extent_buffer *eb); |
32 | void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb); | 32 | void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb); |
33 | void btrfs_set_lock_blocking_rw(struct extent_buffer *eb, int rw); | 33 | void btrfs_set_lock_blocking_rw(struct extent_buffer *eb, int rw); |
34 | void btrfs_clear_lock_blocking_rw(struct extent_buffer *eb, int rw); | 34 | void btrfs_clear_lock_blocking_rw(struct extent_buffer *eb, int rw); |
35 | void btrfs_assert_tree_locked(struct extent_buffer *eb); | 35 | void btrfs_assert_tree_locked(struct extent_buffer *eb); |
36 | int btrfs_try_tree_read_lock(struct extent_buffer *eb); | 36 | int btrfs_try_tree_read_lock(struct extent_buffer *eb); |
37 | int btrfs_try_tree_write_lock(struct extent_buffer *eb); | 37 | int btrfs_try_tree_write_lock(struct extent_buffer *eb); |
38 | int btrfs_tree_read_lock_atomic(struct extent_buffer *eb); | ||
39 | |||
38 | 40 | ||
39 | static inline void btrfs_tree_unlock_rw(struct extent_buffer *eb, int rw) | 41 | static inline void btrfs_tree_unlock_rw(struct extent_buffer *eb, int rw) |
40 | { | 42 | { |
41 | if (rw == BTRFS_WRITE_LOCK || rw == BTRFS_WRITE_LOCK_BLOCKING) | 43 | if (rw == BTRFS_WRITE_LOCK || rw == BTRFS_WRITE_LOCK_BLOCKING) |
42 | btrfs_tree_unlock(eb); | 44 | btrfs_tree_unlock(eb); |
43 | else if (rw == BTRFS_READ_LOCK_BLOCKING) | 45 | else if (rw == BTRFS_READ_LOCK_BLOCKING) |
44 | btrfs_tree_read_unlock_blocking(eb); | 46 | btrfs_tree_read_unlock_blocking(eb); |
45 | else if (rw == BTRFS_READ_LOCK) | 47 | else if (rw == BTRFS_READ_LOCK) |
46 | btrfs_tree_read_unlock(eb); | 48 | btrfs_tree_read_unlock(eb); |
47 | else | 49 | else |
48 | BUG(); | 50 | BUG(); |
49 | } | 51 | } |
50 | 52 | ||
51 | static inline void btrfs_set_lock_blocking(struct extent_buffer *eb) | 53 | static inline void btrfs_set_lock_blocking(struct extent_buffer *eb) |
52 | { | 54 | { |
53 | btrfs_set_lock_blocking_rw(eb, BTRFS_WRITE_LOCK); | 55 | btrfs_set_lock_blocking_rw(eb, BTRFS_WRITE_LOCK); |
54 | } | 56 | } |
55 | 57 | ||
56 | static inline void btrfs_clear_lock_blocking(struct extent_buffer *eb) | 58 | static inline void btrfs_clear_lock_blocking(struct extent_buffer *eb) |
57 | { | 59 | { |
58 | btrfs_clear_lock_blocking_rw(eb, BTRFS_WRITE_LOCK_BLOCKING); | 60 | btrfs_clear_lock_blocking_rw(eb, BTRFS_WRITE_LOCK_BLOCKING); |
59 | } | 61 | } |
60 | #endif | 62 | #endif |
61 | 63 |