Commit ed8c4913da4951957bf8afc788522788881ff405

Authored by Josef Bacik
Committed by Chris Mason
1 parent 8ca15e05e6

Btrfs: make sure the backref walker catches all refs to our extent

Because we don't mess with the offset into the extent for compressed we will
properly find both extents for this case

[extent a][extent b][rest of extent a]

but because we already added a ref for the front half we won't add the inode
information for the second half.  This causes us to leak that memory and not
print out the other offset when we do logical-resolve.  So fix this by calling
ulist_add_merge and then add our eie to the existing entry if there is one.
With this patch we get both offsets out of logical-resolve.  With this and the
other 2 patches I've sent we now pass btrfs/276 on my vm with compress-force=lzo
set.  Thanks,

Signed-off-by: Josef Bacik <jbacik@fusionio.com>
Signed-off-by: Chris Mason <chris.mason@fusionio.com>

Showing 1 changed file with 14 additions and 11 deletions Inline Diff

1 /* 1 /*
2 * Copyright (C) 2011 STRATO. All rights reserved. 2 * Copyright (C) 2011 STRATO. All rights reserved.
3 * 3 *
4 * This program is free software; you can redistribute it and/or 4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public 5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation. 6 * License v2 as published by the Free Software Foundation.
7 * 7 *
8 * This program is distributed in the hope that it will be useful, 8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details. 11 * General Public License for more details.
12 * 12 *
13 * You should have received a copy of the GNU General Public 13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the 14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA. 16 * Boston, MA 021110-1307, USA.
17 */ 17 */
18 18
19 #include <linux/vmalloc.h> 19 #include <linux/vmalloc.h>
20 #include "ctree.h" 20 #include "ctree.h"
21 #include "disk-io.h" 21 #include "disk-io.h"
22 #include "backref.h" 22 #include "backref.h"
23 #include "ulist.h" 23 #include "ulist.h"
24 #include "transaction.h" 24 #include "transaction.h"
25 #include "delayed-ref.h" 25 #include "delayed-ref.h"
26 #include "locking.h" 26 #include "locking.h"
27 27
28 struct extent_inode_elem { 28 struct extent_inode_elem {
29 u64 inum; 29 u64 inum;
30 u64 offset; 30 u64 offset;
31 struct extent_inode_elem *next; 31 struct extent_inode_elem *next;
32 }; 32 };
33 33
34 static int check_extent_in_eb(struct btrfs_key *key, struct extent_buffer *eb, 34 static int check_extent_in_eb(struct btrfs_key *key, struct extent_buffer *eb,
35 struct btrfs_file_extent_item *fi, 35 struct btrfs_file_extent_item *fi,
36 u64 extent_item_pos, 36 u64 extent_item_pos,
37 struct extent_inode_elem **eie) 37 struct extent_inode_elem **eie)
38 { 38 {
39 u64 offset = 0; 39 u64 offset = 0;
40 struct extent_inode_elem *e; 40 struct extent_inode_elem *e;
41 41
42 if (!btrfs_file_extent_compression(eb, fi) && 42 if (!btrfs_file_extent_compression(eb, fi) &&
43 !btrfs_file_extent_encryption(eb, fi) && 43 !btrfs_file_extent_encryption(eb, fi) &&
44 !btrfs_file_extent_other_encoding(eb, fi)) { 44 !btrfs_file_extent_other_encoding(eb, fi)) {
45 u64 data_offset; 45 u64 data_offset;
46 u64 data_len; 46 u64 data_len;
47 47
48 data_offset = btrfs_file_extent_offset(eb, fi); 48 data_offset = btrfs_file_extent_offset(eb, fi);
49 data_len = btrfs_file_extent_num_bytes(eb, fi); 49 data_len = btrfs_file_extent_num_bytes(eb, fi);
50 50
51 if (extent_item_pos < data_offset || 51 if (extent_item_pos < data_offset ||
52 extent_item_pos >= data_offset + data_len) 52 extent_item_pos >= data_offset + data_len)
53 return 1; 53 return 1;
54 offset = extent_item_pos - data_offset; 54 offset = extent_item_pos - data_offset;
55 } 55 }
56 56
57 e = kmalloc(sizeof(*e), GFP_NOFS); 57 e = kmalloc(sizeof(*e), GFP_NOFS);
58 if (!e) 58 if (!e)
59 return -ENOMEM; 59 return -ENOMEM;
60 60
61 e->next = *eie; 61 e->next = *eie;
62 e->inum = key->objectid; 62 e->inum = key->objectid;
63 e->offset = key->offset + offset; 63 e->offset = key->offset + offset;
64 *eie = e; 64 *eie = e;
65 65
66 return 0; 66 return 0;
67 } 67 }
68 68
69 static int find_extent_in_eb(struct extent_buffer *eb, u64 wanted_disk_byte, 69 static int find_extent_in_eb(struct extent_buffer *eb, u64 wanted_disk_byte,
70 u64 extent_item_pos, 70 u64 extent_item_pos,
71 struct extent_inode_elem **eie) 71 struct extent_inode_elem **eie)
72 { 72 {
73 u64 disk_byte; 73 u64 disk_byte;
74 struct btrfs_key key; 74 struct btrfs_key key;
75 struct btrfs_file_extent_item *fi; 75 struct btrfs_file_extent_item *fi;
76 int slot; 76 int slot;
77 int nritems; 77 int nritems;
78 int extent_type; 78 int extent_type;
79 int ret; 79 int ret;
80 80
81 /* 81 /*
82 * from the shared data ref, we only have the leaf but we need 82 * from the shared data ref, we only have the leaf but we need
83 * the key. thus, we must look into all items and see that we 83 * the key. thus, we must look into all items and see that we
84 * find one (some) with a reference to our extent item. 84 * find one (some) with a reference to our extent item.
85 */ 85 */
86 nritems = btrfs_header_nritems(eb); 86 nritems = btrfs_header_nritems(eb);
87 for (slot = 0; slot < nritems; ++slot) { 87 for (slot = 0; slot < nritems; ++slot) {
88 btrfs_item_key_to_cpu(eb, &key, slot); 88 btrfs_item_key_to_cpu(eb, &key, slot);
89 if (key.type != BTRFS_EXTENT_DATA_KEY) 89 if (key.type != BTRFS_EXTENT_DATA_KEY)
90 continue; 90 continue;
91 fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item); 91 fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
92 extent_type = btrfs_file_extent_type(eb, fi); 92 extent_type = btrfs_file_extent_type(eb, fi);
93 if (extent_type == BTRFS_FILE_EXTENT_INLINE) 93 if (extent_type == BTRFS_FILE_EXTENT_INLINE)
94 continue; 94 continue;
95 /* don't skip BTRFS_FILE_EXTENT_PREALLOC, we can handle that */ 95 /* don't skip BTRFS_FILE_EXTENT_PREALLOC, we can handle that */
96 disk_byte = btrfs_file_extent_disk_bytenr(eb, fi); 96 disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
97 if (disk_byte != wanted_disk_byte) 97 if (disk_byte != wanted_disk_byte)
98 continue; 98 continue;
99 99
100 ret = check_extent_in_eb(&key, eb, fi, extent_item_pos, eie); 100 ret = check_extent_in_eb(&key, eb, fi, extent_item_pos, eie);
101 if (ret < 0) 101 if (ret < 0)
102 return ret; 102 return ret;
103 } 103 }
104 104
105 return 0; 105 return 0;
106 } 106 }
107 107
108 /* 108 /*
109 * this structure records all encountered refs on the way up to the root 109 * this structure records all encountered refs on the way up to the root
110 */ 110 */
111 struct __prelim_ref { 111 struct __prelim_ref {
112 struct list_head list; 112 struct list_head list;
113 u64 root_id; 113 u64 root_id;
114 struct btrfs_key key_for_search; 114 struct btrfs_key key_for_search;
115 int level; 115 int level;
116 int count; 116 int count;
117 struct extent_inode_elem *inode_list; 117 struct extent_inode_elem *inode_list;
118 u64 parent; 118 u64 parent;
119 u64 wanted_disk_byte; 119 u64 wanted_disk_byte;
120 }; 120 };
121 121
122 /* 122 /*
123 * the rules for all callers of this function are: 123 * the rules for all callers of this function are:
124 * - obtaining the parent is the goal 124 * - obtaining the parent is the goal
125 * - if you add a key, you must know that it is a correct key 125 * - if you add a key, you must know that it is a correct key
126 * - if you cannot add the parent or a correct key, then we will look into the 126 * - if you cannot add the parent or a correct key, then we will look into the
127 * block later to set a correct key 127 * block later to set a correct key
128 * 128 *
129 * delayed refs 129 * delayed refs
130 * ============ 130 * ============
131 * backref type | shared | indirect | shared | indirect 131 * backref type | shared | indirect | shared | indirect
132 * information | tree | tree | data | data 132 * information | tree | tree | data | data
133 * --------------------+--------+----------+--------+---------- 133 * --------------------+--------+----------+--------+----------
134 * parent logical | y | - | - | - 134 * parent logical | y | - | - | -
135 * key to resolve | - | y | y | y 135 * key to resolve | - | y | y | y
136 * tree block logical | - | - | - | - 136 * tree block logical | - | - | - | -
137 * root for resolving | y | y | y | y 137 * root for resolving | y | y | y | y
138 * 138 *
139 * - column 1: we've the parent -> done 139 * - column 1: we've the parent -> done
140 * - column 2, 3, 4: we use the key to find the parent 140 * - column 2, 3, 4: we use the key to find the parent
141 * 141 *
142 * on disk refs (inline or keyed) 142 * on disk refs (inline or keyed)
143 * ============================== 143 * ==============================
144 * backref type | shared | indirect | shared | indirect 144 * backref type | shared | indirect | shared | indirect
145 * information | tree | tree | data | data 145 * information | tree | tree | data | data
146 * --------------------+--------+----------+--------+---------- 146 * --------------------+--------+----------+--------+----------
147 * parent logical | y | - | y | - 147 * parent logical | y | - | y | -
148 * key to resolve | - | - | - | y 148 * key to resolve | - | - | - | y
149 * tree block logical | y | y | y | y 149 * tree block logical | y | y | y | y
150 * root for resolving | - | y | y | y 150 * root for resolving | - | y | y | y
151 * 151 *
152 * - column 1, 3: we've the parent -> done 152 * - column 1, 3: we've the parent -> done
153 * - column 2: we take the first key from the block to find the parent 153 * - column 2: we take the first key from the block to find the parent
154 * (see __add_missing_keys) 154 * (see __add_missing_keys)
155 * - column 4: we use the key to find the parent 155 * - column 4: we use the key to find the parent
156 * 156 *
157 * additional information that's available but not required to find the parent 157 * additional information that's available but not required to find the parent
158 * block might help in merging entries to gain some speed. 158 * block might help in merging entries to gain some speed.
159 */ 159 */
160 160
161 static int __add_prelim_ref(struct list_head *head, u64 root_id, 161 static int __add_prelim_ref(struct list_head *head, u64 root_id,
162 struct btrfs_key *key, int level, 162 struct btrfs_key *key, int level,
163 u64 parent, u64 wanted_disk_byte, int count) 163 u64 parent, u64 wanted_disk_byte, int count)
164 { 164 {
165 struct __prelim_ref *ref; 165 struct __prelim_ref *ref;
166 166
167 /* in case we're adding delayed refs, we're holding the refs spinlock */ 167 /* in case we're adding delayed refs, we're holding the refs spinlock */
168 ref = kmalloc(sizeof(*ref), GFP_ATOMIC); 168 ref = kmalloc(sizeof(*ref), GFP_ATOMIC);
169 if (!ref) 169 if (!ref)
170 return -ENOMEM; 170 return -ENOMEM;
171 171
172 ref->root_id = root_id; 172 ref->root_id = root_id;
173 if (key) 173 if (key)
174 ref->key_for_search = *key; 174 ref->key_for_search = *key;
175 else 175 else
176 memset(&ref->key_for_search, 0, sizeof(ref->key_for_search)); 176 memset(&ref->key_for_search, 0, sizeof(ref->key_for_search));
177 177
178 ref->inode_list = NULL; 178 ref->inode_list = NULL;
179 ref->level = level; 179 ref->level = level;
180 ref->count = count; 180 ref->count = count;
181 ref->parent = parent; 181 ref->parent = parent;
182 ref->wanted_disk_byte = wanted_disk_byte; 182 ref->wanted_disk_byte = wanted_disk_byte;
183 list_add_tail(&ref->list, head); 183 list_add_tail(&ref->list, head);
184 184
185 return 0; 185 return 0;
186 } 186 }
187 187
188 static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path, 188 static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path,
189 struct ulist *parents, int level, 189 struct ulist *parents, int level,
190 struct btrfs_key *key_for_search, u64 time_seq, 190 struct btrfs_key *key_for_search, u64 time_seq,
191 u64 wanted_disk_byte, 191 u64 wanted_disk_byte,
192 const u64 *extent_item_pos) 192 const u64 *extent_item_pos)
193 { 193 {
194 int ret = 0; 194 int ret = 0;
195 int slot; 195 int slot;
196 struct extent_buffer *eb; 196 struct extent_buffer *eb;
197 struct btrfs_key key; 197 struct btrfs_key key;
198 struct btrfs_file_extent_item *fi; 198 struct btrfs_file_extent_item *fi;
199 struct extent_inode_elem *eie = NULL; 199 struct extent_inode_elem *eie = NULL, *old = NULL;
200 u64 disk_byte; 200 u64 disk_byte;
201 201
202 if (level != 0) { 202 if (level != 0) {
203 eb = path->nodes[level]; 203 eb = path->nodes[level];
204 ret = ulist_add(parents, eb->start, 0, GFP_NOFS); 204 ret = ulist_add(parents, eb->start, 0, GFP_NOFS);
205 if (ret < 0) 205 if (ret < 0)
206 return ret; 206 return ret;
207 return 0; 207 return 0;
208 } 208 }
209 209
210 /* 210 /*
211 * We normally enter this function with the path already pointing to 211 * We normally enter this function with the path already pointing to
212 * the first item to check. But sometimes, we may enter it with 212 * the first item to check. But sometimes, we may enter it with
213 * slot==nritems. In that case, go to the next leaf before we continue. 213 * slot==nritems. In that case, go to the next leaf before we continue.
214 */ 214 */
215 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) 215 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0]))
216 ret = btrfs_next_old_leaf(root, path, time_seq); 216 ret = btrfs_next_old_leaf(root, path, time_seq);
217 217
218 while (!ret) { 218 while (!ret) {
219 eb = path->nodes[0]; 219 eb = path->nodes[0];
220 slot = path->slots[0]; 220 slot = path->slots[0];
221 221
222 btrfs_item_key_to_cpu(eb, &key, slot); 222 btrfs_item_key_to_cpu(eb, &key, slot);
223 223
224 if (key.objectid != key_for_search->objectid || 224 if (key.objectid != key_for_search->objectid ||
225 key.type != BTRFS_EXTENT_DATA_KEY) 225 key.type != BTRFS_EXTENT_DATA_KEY)
226 break; 226 break;
227 227
228 fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item); 228 fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
229 disk_byte = btrfs_file_extent_disk_bytenr(eb, fi); 229 disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
230 230
231 if (disk_byte == wanted_disk_byte) { 231 if (disk_byte == wanted_disk_byte) {
232 eie = NULL; 232 eie = NULL;
233 old = NULL;
233 if (extent_item_pos) { 234 if (extent_item_pos) {
234 ret = check_extent_in_eb(&key, eb, fi, 235 ret = check_extent_in_eb(&key, eb, fi,
235 *extent_item_pos, 236 *extent_item_pos,
236 &eie); 237 &eie);
237 if (ret < 0) 238 if (ret < 0)
238 break; 239 break;
239 } 240 }
240 if (!ret) { 241 if (ret > 0)
241 ret = ulist_add(parents, eb->start, 242 goto next;
242 (uintptr_t)eie, GFP_NOFS); 243 ret = ulist_add_merge(parents, eb->start,
243 if (ret < 0) 244 (uintptr_t)eie,
244 break; 245 (u64 *)&old, GFP_NOFS);
245 if (!extent_item_pos) { 246 if (ret < 0)
246 ret = btrfs_next_old_leaf(root, path, 247 break;
247 time_seq); 248 if (!ret && extent_item_pos) {
248 continue; 249 while (old->next)
249 } 250 old = old->next;
251 old->next = eie;
250 } 252 }
251 } 253 }
254 next:
252 ret = btrfs_next_old_item(root, path, time_seq); 255 ret = btrfs_next_old_item(root, path, time_seq);
253 } 256 }
254 257
255 if (ret > 0) 258 if (ret > 0)
256 ret = 0; 259 ret = 0;
257 return ret; 260 return ret;
258 } 261 }
259 262
260 /* 263 /*
261 * resolve an indirect backref in the form (root_id, key, level) 264 * resolve an indirect backref in the form (root_id, key, level)
262 * to a logical address 265 * to a logical address
263 */ 266 */
264 static int __resolve_indirect_ref(struct btrfs_fs_info *fs_info, 267 static int __resolve_indirect_ref(struct btrfs_fs_info *fs_info,
265 struct btrfs_path *path, u64 time_seq, 268 struct btrfs_path *path, u64 time_seq,
266 struct __prelim_ref *ref, 269 struct __prelim_ref *ref,
267 struct ulist *parents, 270 struct ulist *parents,
268 const u64 *extent_item_pos) 271 const u64 *extent_item_pos)
269 { 272 {
270 struct btrfs_root *root; 273 struct btrfs_root *root;
271 struct btrfs_key root_key; 274 struct btrfs_key root_key;
272 struct extent_buffer *eb; 275 struct extent_buffer *eb;
273 int ret = 0; 276 int ret = 0;
274 int root_level; 277 int root_level;
275 int level = ref->level; 278 int level = ref->level;
276 279
277 root_key.objectid = ref->root_id; 280 root_key.objectid = ref->root_id;
278 root_key.type = BTRFS_ROOT_ITEM_KEY; 281 root_key.type = BTRFS_ROOT_ITEM_KEY;
279 root_key.offset = (u64)-1; 282 root_key.offset = (u64)-1;
280 root = btrfs_read_fs_root_no_name(fs_info, &root_key); 283 root = btrfs_read_fs_root_no_name(fs_info, &root_key);
281 if (IS_ERR(root)) { 284 if (IS_ERR(root)) {
282 ret = PTR_ERR(root); 285 ret = PTR_ERR(root);
283 goto out; 286 goto out;
284 } 287 }
285 288
286 root_level = btrfs_old_root_level(root, time_seq); 289 root_level = btrfs_old_root_level(root, time_seq);
287 290
288 if (root_level + 1 == level) 291 if (root_level + 1 == level)
289 goto out; 292 goto out;
290 293
291 path->lowest_level = level; 294 path->lowest_level = level;
292 ret = btrfs_search_old_slot(root, &ref->key_for_search, path, time_seq); 295 ret = btrfs_search_old_slot(root, &ref->key_for_search, path, time_seq);
293 pr_debug("search slot in root %llu (level %d, ref count %d) returned " 296 pr_debug("search slot in root %llu (level %d, ref count %d) returned "
294 "%d for key (%llu %u %llu)\n", 297 "%d for key (%llu %u %llu)\n",
295 (unsigned long long)ref->root_id, level, ref->count, ret, 298 (unsigned long long)ref->root_id, level, ref->count, ret,
296 (unsigned long long)ref->key_for_search.objectid, 299 (unsigned long long)ref->key_for_search.objectid,
297 ref->key_for_search.type, 300 ref->key_for_search.type,
298 (unsigned long long)ref->key_for_search.offset); 301 (unsigned long long)ref->key_for_search.offset);
299 if (ret < 0) 302 if (ret < 0)
300 goto out; 303 goto out;
301 304
302 eb = path->nodes[level]; 305 eb = path->nodes[level];
303 while (!eb) { 306 while (!eb) {
304 if (!level) { 307 if (!level) {
305 WARN_ON(1); 308 WARN_ON(1);
306 ret = 1; 309 ret = 1;
307 goto out; 310 goto out;
308 } 311 }
309 level--; 312 level--;
310 eb = path->nodes[level]; 313 eb = path->nodes[level];
311 } 314 }
312 315
313 ret = add_all_parents(root, path, parents, level, &ref->key_for_search, 316 ret = add_all_parents(root, path, parents, level, &ref->key_for_search,
314 time_seq, ref->wanted_disk_byte, 317 time_seq, ref->wanted_disk_byte,
315 extent_item_pos); 318 extent_item_pos);
316 out: 319 out:
317 path->lowest_level = 0; 320 path->lowest_level = 0;
318 btrfs_release_path(path); 321 btrfs_release_path(path);
319 return ret; 322 return ret;
320 } 323 }
321 324
322 /* 325 /*
323 * resolve all indirect backrefs from the list 326 * resolve all indirect backrefs from the list
324 */ 327 */
325 static int __resolve_indirect_refs(struct btrfs_fs_info *fs_info, 328 static int __resolve_indirect_refs(struct btrfs_fs_info *fs_info,
326 struct btrfs_path *path, u64 time_seq, 329 struct btrfs_path *path, u64 time_seq,
327 struct list_head *head, 330 struct list_head *head,
328 const u64 *extent_item_pos) 331 const u64 *extent_item_pos)
329 { 332 {
330 int err; 333 int err;
331 int ret = 0; 334 int ret = 0;
332 struct __prelim_ref *ref; 335 struct __prelim_ref *ref;
333 struct __prelim_ref *ref_safe; 336 struct __prelim_ref *ref_safe;
334 struct __prelim_ref *new_ref; 337 struct __prelim_ref *new_ref;
335 struct ulist *parents; 338 struct ulist *parents;
336 struct ulist_node *node; 339 struct ulist_node *node;
337 struct ulist_iterator uiter; 340 struct ulist_iterator uiter;
338 341
339 parents = ulist_alloc(GFP_NOFS); 342 parents = ulist_alloc(GFP_NOFS);
340 if (!parents) 343 if (!parents)
341 return -ENOMEM; 344 return -ENOMEM;
342 345
343 /* 346 /*
344 * _safe allows us to insert directly after the current item without 347 * _safe allows us to insert directly after the current item without
345 * iterating over the newly inserted items. 348 * iterating over the newly inserted items.
346 * we're also allowed to re-assign ref during iteration. 349 * we're also allowed to re-assign ref during iteration.
347 */ 350 */
348 list_for_each_entry_safe(ref, ref_safe, head, list) { 351 list_for_each_entry_safe(ref, ref_safe, head, list) {
349 if (ref->parent) /* already direct */ 352 if (ref->parent) /* already direct */
350 continue; 353 continue;
351 if (ref->count == 0) 354 if (ref->count == 0)
352 continue; 355 continue;
353 err = __resolve_indirect_ref(fs_info, path, time_seq, ref, 356 err = __resolve_indirect_ref(fs_info, path, time_seq, ref,
354 parents, extent_item_pos); 357 parents, extent_item_pos);
355 if (err == -ENOMEM) 358 if (err == -ENOMEM)
356 goto out; 359 goto out;
357 if (err) 360 if (err)
358 continue; 361 continue;
359 362
360 /* we put the first parent into the ref at hand */ 363 /* we put the first parent into the ref at hand */
361 ULIST_ITER_INIT(&uiter); 364 ULIST_ITER_INIT(&uiter);
362 node = ulist_next(parents, &uiter); 365 node = ulist_next(parents, &uiter);
363 ref->parent = node ? node->val : 0; 366 ref->parent = node ? node->val : 0;
364 ref->inode_list = node ? 367 ref->inode_list = node ?
365 (struct extent_inode_elem *)(uintptr_t)node->aux : 0; 368 (struct extent_inode_elem *)(uintptr_t)node->aux : 0;
366 369
367 /* additional parents require new refs being added here */ 370 /* additional parents require new refs being added here */
368 while ((node = ulist_next(parents, &uiter))) { 371 while ((node = ulist_next(parents, &uiter))) {
369 new_ref = kmalloc(sizeof(*new_ref), GFP_NOFS); 372 new_ref = kmalloc(sizeof(*new_ref), GFP_NOFS);
370 if (!new_ref) { 373 if (!new_ref) {
371 ret = -ENOMEM; 374 ret = -ENOMEM;
372 goto out; 375 goto out;
373 } 376 }
374 memcpy(new_ref, ref, sizeof(*ref)); 377 memcpy(new_ref, ref, sizeof(*ref));
375 new_ref->parent = node->val; 378 new_ref->parent = node->val;
376 new_ref->inode_list = (struct extent_inode_elem *) 379 new_ref->inode_list = (struct extent_inode_elem *)
377 (uintptr_t)node->aux; 380 (uintptr_t)node->aux;
378 list_add(&new_ref->list, &ref->list); 381 list_add(&new_ref->list, &ref->list);
379 } 382 }
380 ulist_reinit(parents); 383 ulist_reinit(parents);
381 } 384 }
382 out: 385 out:
383 ulist_free(parents); 386 ulist_free(parents);
384 return ret; 387 return ret;
385 } 388 }
386 389
387 static inline int ref_for_same_block(struct __prelim_ref *ref1, 390 static inline int ref_for_same_block(struct __prelim_ref *ref1,
388 struct __prelim_ref *ref2) 391 struct __prelim_ref *ref2)
389 { 392 {
390 if (ref1->level != ref2->level) 393 if (ref1->level != ref2->level)
391 return 0; 394 return 0;
392 if (ref1->root_id != ref2->root_id) 395 if (ref1->root_id != ref2->root_id)
393 return 0; 396 return 0;
394 if (ref1->key_for_search.type != ref2->key_for_search.type) 397 if (ref1->key_for_search.type != ref2->key_for_search.type)
395 return 0; 398 return 0;
396 if (ref1->key_for_search.objectid != ref2->key_for_search.objectid) 399 if (ref1->key_for_search.objectid != ref2->key_for_search.objectid)
397 return 0; 400 return 0;
398 if (ref1->key_for_search.offset != ref2->key_for_search.offset) 401 if (ref1->key_for_search.offset != ref2->key_for_search.offset)
399 return 0; 402 return 0;
400 if (ref1->parent != ref2->parent) 403 if (ref1->parent != ref2->parent)
401 return 0; 404 return 0;
402 405
403 return 1; 406 return 1;
404 } 407 }
405 408
406 /* 409 /*
407 * read tree blocks and add keys where required. 410 * read tree blocks and add keys where required.
408 */ 411 */
409 static int __add_missing_keys(struct btrfs_fs_info *fs_info, 412 static int __add_missing_keys(struct btrfs_fs_info *fs_info,
410 struct list_head *head) 413 struct list_head *head)
411 { 414 {
412 struct list_head *pos; 415 struct list_head *pos;
413 struct extent_buffer *eb; 416 struct extent_buffer *eb;
414 417
415 list_for_each(pos, head) { 418 list_for_each(pos, head) {
416 struct __prelim_ref *ref; 419 struct __prelim_ref *ref;
417 ref = list_entry(pos, struct __prelim_ref, list); 420 ref = list_entry(pos, struct __prelim_ref, list);
418 421
419 if (ref->parent) 422 if (ref->parent)
420 continue; 423 continue;
421 if (ref->key_for_search.type) 424 if (ref->key_for_search.type)
422 continue; 425 continue;
423 BUG_ON(!ref->wanted_disk_byte); 426 BUG_ON(!ref->wanted_disk_byte);
424 eb = read_tree_block(fs_info->tree_root, ref->wanted_disk_byte, 427 eb = read_tree_block(fs_info->tree_root, ref->wanted_disk_byte,
425 fs_info->tree_root->leafsize, 0); 428 fs_info->tree_root->leafsize, 0);
426 if (!eb || !extent_buffer_uptodate(eb)) { 429 if (!eb || !extent_buffer_uptodate(eb)) {
427 free_extent_buffer(eb); 430 free_extent_buffer(eb);
428 return -EIO; 431 return -EIO;
429 } 432 }
430 btrfs_tree_read_lock(eb); 433 btrfs_tree_read_lock(eb);
431 if (btrfs_header_level(eb) == 0) 434 if (btrfs_header_level(eb) == 0)
432 btrfs_item_key_to_cpu(eb, &ref->key_for_search, 0); 435 btrfs_item_key_to_cpu(eb, &ref->key_for_search, 0);
433 else 436 else
434 btrfs_node_key_to_cpu(eb, &ref->key_for_search, 0); 437 btrfs_node_key_to_cpu(eb, &ref->key_for_search, 0);
435 btrfs_tree_read_unlock(eb); 438 btrfs_tree_read_unlock(eb);
436 free_extent_buffer(eb); 439 free_extent_buffer(eb);
437 } 440 }
438 return 0; 441 return 0;
439 } 442 }
440 443
441 /* 444 /*
442 * merge two lists of backrefs and adjust counts accordingly 445 * merge two lists of backrefs and adjust counts accordingly
443 * 446 *
444 * mode = 1: merge identical keys, if key is set 447 * mode = 1: merge identical keys, if key is set
445 * FIXME: if we add more keys in __add_prelim_ref, we can merge more here. 448 * FIXME: if we add more keys in __add_prelim_ref, we can merge more here.
446 * additionally, we could even add a key range for the blocks we 449 * additionally, we could even add a key range for the blocks we
447 * looked into to merge even more (-> replace unresolved refs by those 450 * looked into to merge even more (-> replace unresolved refs by those
448 * having a parent). 451 * having a parent).
449 * mode = 2: merge identical parents 452 * mode = 2: merge identical parents
450 */ 453 */
451 static void __merge_refs(struct list_head *head, int mode) 454 static void __merge_refs(struct list_head *head, int mode)
452 { 455 {
453 struct list_head *pos1; 456 struct list_head *pos1;
454 457
455 list_for_each(pos1, head) { 458 list_for_each(pos1, head) {
456 struct list_head *n2; 459 struct list_head *n2;
457 struct list_head *pos2; 460 struct list_head *pos2;
458 struct __prelim_ref *ref1; 461 struct __prelim_ref *ref1;
459 462
460 ref1 = list_entry(pos1, struct __prelim_ref, list); 463 ref1 = list_entry(pos1, struct __prelim_ref, list);
461 464
462 for (pos2 = pos1->next, n2 = pos2->next; pos2 != head; 465 for (pos2 = pos1->next, n2 = pos2->next; pos2 != head;
463 pos2 = n2, n2 = pos2->next) { 466 pos2 = n2, n2 = pos2->next) {
464 struct __prelim_ref *ref2; 467 struct __prelim_ref *ref2;
465 struct __prelim_ref *xchg; 468 struct __prelim_ref *xchg;
466 struct extent_inode_elem *eie; 469 struct extent_inode_elem *eie;
467 470
468 ref2 = list_entry(pos2, struct __prelim_ref, list); 471 ref2 = list_entry(pos2, struct __prelim_ref, list);
469 472
470 if (mode == 1) { 473 if (mode == 1) {
471 if (!ref_for_same_block(ref1, ref2)) 474 if (!ref_for_same_block(ref1, ref2))
472 continue; 475 continue;
473 if (!ref1->parent && ref2->parent) { 476 if (!ref1->parent && ref2->parent) {
474 xchg = ref1; 477 xchg = ref1;
475 ref1 = ref2; 478 ref1 = ref2;
476 ref2 = xchg; 479 ref2 = xchg;
477 } 480 }
478 } else { 481 } else {
479 if (ref1->parent != ref2->parent) 482 if (ref1->parent != ref2->parent)
480 continue; 483 continue;
481 } 484 }
482 485
483 eie = ref1->inode_list; 486 eie = ref1->inode_list;
484 while (eie && eie->next) 487 while (eie && eie->next)
485 eie = eie->next; 488 eie = eie->next;
486 if (eie) 489 if (eie)
487 eie->next = ref2->inode_list; 490 eie->next = ref2->inode_list;
488 else 491 else
489 ref1->inode_list = ref2->inode_list; 492 ref1->inode_list = ref2->inode_list;
490 ref1->count += ref2->count; 493 ref1->count += ref2->count;
491 494
492 list_del(&ref2->list); 495 list_del(&ref2->list);
493 kfree(ref2); 496 kfree(ref2);
494 } 497 }
495 498
496 } 499 }
497 } 500 }
498 501
499 /* 502 /*
500 * add all currently queued delayed refs from this head whose seq nr is 503 * add all currently queued delayed refs from this head whose seq nr is
501 * smaller or equal that seq to the list 504 * smaller or equal that seq to the list
502 */ 505 */
503 static int __add_delayed_refs(struct btrfs_delayed_ref_head *head, u64 seq, 506 static int __add_delayed_refs(struct btrfs_delayed_ref_head *head, u64 seq,
504 struct list_head *prefs) 507 struct list_head *prefs)
505 { 508 {
506 struct btrfs_delayed_extent_op *extent_op = head->extent_op; 509 struct btrfs_delayed_extent_op *extent_op = head->extent_op;
507 struct rb_node *n = &head->node.rb_node; 510 struct rb_node *n = &head->node.rb_node;
508 struct btrfs_key key; 511 struct btrfs_key key;
509 struct btrfs_key op_key = {0}; 512 struct btrfs_key op_key = {0};
510 int sgn; 513 int sgn;
511 int ret = 0; 514 int ret = 0;
512 515
513 if (extent_op && extent_op->update_key) 516 if (extent_op && extent_op->update_key)
514 btrfs_disk_key_to_cpu(&op_key, &extent_op->key); 517 btrfs_disk_key_to_cpu(&op_key, &extent_op->key);
515 518
516 while ((n = rb_prev(n))) { 519 while ((n = rb_prev(n))) {
517 struct btrfs_delayed_ref_node *node; 520 struct btrfs_delayed_ref_node *node;
518 node = rb_entry(n, struct btrfs_delayed_ref_node, 521 node = rb_entry(n, struct btrfs_delayed_ref_node,
519 rb_node); 522 rb_node);
520 if (node->bytenr != head->node.bytenr) 523 if (node->bytenr != head->node.bytenr)
521 break; 524 break;
522 WARN_ON(node->is_head); 525 WARN_ON(node->is_head);
523 526
524 if (node->seq > seq) 527 if (node->seq > seq)
525 continue; 528 continue;
526 529
527 switch (node->action) { 530 switch (node->action) {
528 case BTRFS_ADD_DELAYED_EXTENT: 531 case BTRFS_ADD_DELAYED_EXTENT:
529 case BTRFS_UPDATE_DELAYED_HEAD: 532 case BTRFS_UPDATE_DELAYED_HEAD:
530 WARN_ON(1); 533 WARN_ON(1);
531 continue; 534 continue;
532 case BTRFS_ADD_DELAYED_REF: 535 case BTRFS_ADD_DELAYED_REF:
533 sgn = 1; 536 sgn = 1;
534 break; 537 break;
535 case BTRFS_DROP_DELAYED_REF: 538 case BTRFS_DROP_DELAYED_REF:
536 sgn = -1; 539 sgn = -1;
537 break; 540 break;
538 default: 541 default:
539 BUG_ON(1); 542 BUG_ON(1);
540 } 543 }
541 switch (node->type) { 544 switch (node->type) {
542 case BTRFS_TREE_BLOCK_REF_KEY: { 545 case BTRFS_TREE_BLOCK_REF_KEY: {
543 struct btrfs_delayed_tree_ref *ref; 546 struct btrfs_delayed_tree_ref *ref;
544 547
545 ref = btrfs_delayed_node_to_tree_ref(node); 548 ref = btrfs_delayed_node_to_tree_ref(node);
546 ret = __add_prelim_ref(prefs, ref->root, &op_key, 549 ret = __add_prelim_ref(prefs, ref->root, &op_key,
547 ref->level + 1, 0, node->bytenr, 550 ref->level + 1, 0, node->bytenr,
548 node->ref_mod * sgn); 551 node->ref_mod * sgn);
549 break; 552 break;
550 } 553 }
551 case BTRFS_SHARED_BLOCK_REF_KEY: { 554 case BTRFS_SHARED_BLOCK_REF_KEY: {
552 struct btrfs_delayed_tree_ref *ref; 555 struct btrfs_delayed_tree_ref *ref;
553 556
554 ref = btrfs_delayed_node_to_tree_ref(node); 557 ref = btrfs_delayed_node_to_tree_ref(node);
555 ret = __add_prelim_ref(prefs, ref->root, NULL, 558 ret = __add_prelim_ref(prefs, ref->root, NULL,
556 ref->level + 1, ref->parent, 559 ref->level + 1, ref->parent,
557 node->bytenr, 560 node->bytenr,
558 node->ref_mod * sgn); 561 node->ref_mod * sgn);
559 break; 562 break;
560 } 563 }
561 case BTRFS_EXTENT_DATA_REF_KEY: { 564 case BTRFS_EXTENT_DATA_REF_KEY: {
562 struct btrfs_delayed_data_ref *ref; 565 struct btrfs_delayed_data_ref *ref;
563 ref = btrfs_delayed_node_to_data_ref(node); 566 ref = btrfs_delayed_node_to_data_ref(node);
564 567
565 key.objectid = ref->objectid; 568 key.objectid = ref->objectid;
566 key.type = BTRFS_EXTENT_DATA_KEY; 569 key.type = BTRFS_EXTENT_DATA_KEY;
567 key.offset = ref->offset; 570 key.offset = ref->offset;
568 ret = __add_prelim_ref(prefs, ref->root, &key, 0, 0, 571 ret = __add_prelim_ref(prefs, ref->root, &key, 0, 0,
569 node->bytenr, 572 node->bytenr,
570 node->ref_mod * sgn); 573 node->ref_mod * sgn);
571 break; 574 break;
572 } 575 }
573 case BTRFS_SHARED_DATA_REF_KEY: { 576 case BTRFS_SHARED_DATA_REF_KEY: {
574 struct btrfs_delayed_data_ref *ref; 577 struct btrfs_delayed_data_ref *ref;
575 578
576 ref = btrfs_delayed_node_to_data_ref(node); 579 ref = btrfs_delayed_node_to_data_ref(node);
577 580
578 key.objectid = ref->objectid; 581 key.objectid = ref->objectid;
579 key.type = BTRFS_EXTENT_DATA_KEY; 582 key.type = BTRFS_EXTENT_DATA_KEY;
580 key.offset = ref->offset; 583 key.offset = ref->offset;
581 ret = __add_prelim_ref(prefs, ref->root, &key, 0, 584 ret = __add_prelim_ref(prefs, ref->root, &key, 0,
582 ref->parent, node->bytenr, 585 ref->parent, node->bytenr,
583 node->ref_mod * sgn); 586 node->ref_mod * sgn);
584 break; 587 break;
585 } 588 }
586 default: 589 default:
587 WARN_ON(1); 590 WARN_ON(1);
588 } 591 }
589 if (ret) 592 if (ret)
590 return ret; 593 return ret;
591 } 594 }
592 595
593 return 0; 596 return 0;
594 } 597 }
595 598
596 /* 599 /*
597 * add all inline backrefs for bytenr to the list 600 * add all inline backrefs for bytenr to the list
598 */ 601 */
599 static int __add_inline_refs(struct btrfs_fs_info *fs_info, 602 static int __add_inline_refs(struct btrfs_fs_info *fs_info,
600 struct btrfs_path *path, u64 bytenr, 603 struct btrfs_path *path, u64 bytenr,
601 int *info_level, struct list_head *prefs) 604 int *info_level, struct list_head *prefs)
602 { 605 {
603 int ret = 0; 606 int ret = 0;
604 int slot; 607 int slot;
605 struct extent_buffer *leaf; 608 struct extent_buffer *leaf;
606 struct btrfs_key key; 609 struct btrfs_key key;
607 struct btrfs_key found_key; 610 struct btrfs_key found_key;
608 unsigned long ptr; 611 unsigned long ptr;
609 unsigned long end; 612 unsigned long end;
610 struct btrfs_extent_item *ei; 613 struct btrfs_extent_item *ei;
611 u64 flags; 614 u64 flags;
612 u64 item_size; 615 u64 item_size;
613 616
614 /* 617 /*
615 * enumerate all inline refs 618 * enumerate all inline refs
616 */ 619 */
617 leaf = path->nodes[0]; 620 leaf = path->nodes[0];
618 slot = path->slots[0]; 621 slot = path->slots[0];
619 622
620 item_size = btrfs_item_size_nr(leaf, slot); 623 item_size = btrfs_item_size_nr(leaf, slot);
621 BUG_ON(item_size < sizeof(*ei)); 624 BUG_ON(item_size < sizeof(*ei));
622 625
623 ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item); 626 ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item);
624 flags = btrfs_extent_flags(leaf, ei); 627 flags = btrfs_extent_flags(leaf, ei);
625 btrfs_item_key_to_cpu(leaf, &found_key, slot); 628 btrfs_item_key_to_cpu(leaf, &found_key, slot);
626 629
627 ptr = (unsigned long)(ei + 1); 630 ptr = (unsigned long)(ei + 1);
628 end = (unsigned long)ei + item_size; 631 end = (unsigned long)ei + item_size;
629 632
630 if (found_key.type == BTRFS_EXTENT_ITEM_KEY && 633 if (found_key.type == BTRFS_EXTENT_ITEM_KEY &&
631 flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) { 634 flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
632 struct btrfs_tree_block_info *info; 635 struct btrfs_tree_block_info *info;
633 636
634 info = (struct btrfs_tree_block_info *)ptr; 637 info = (struct btrfs_tree_block_info *)ptr;
635 *info_level = btrfs_tree_block_level(leaf, info); 638 *info_level = btrfs_tree_block_level(leaf, info);
636 ptr += sizeof(struct btrfs_tree_block_info); 639 ptr += sizeof(struct btrfs_tree_block_info);
637 BUG_ON(ptr > end); 640 BUG_ON(ptr > end);
638 } else if (found_key.type == BTRFS_METADATA_ITEM_KEY) { 641 } else if (found_key.type == BTRFS_METADATA_ITEM_KEY) {
639 *info_level = found_key.offset; 642 *info_level = found_key.offset;
640 } else { 643 } else {
641 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_DATA)); 644 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_DATA));
642 } 645 }
643 646
644 while (ptr < end) { 647 while (ptr < end) {
645 struct btrfs_extent_inline_ref *iref; 648 struct btrfs_extent_inline_ref *iref;
646 u64 offset; 649 u64 offset;
647 int type; 650 int type;
648 651
649 iref = (struct btrfs_extent_inline_ref *)ptr; 652 iref = (struct btrfs_extent_inline_ref *)ptr;
650 type = btrfs_extent_inline_ref_type(leaf, iref); 653 type = btrfs_extent_inline_ref_type(leaf, iref);
651 offset = btrfs_extent_inline_ref_offset(leaf, iref); 654 offset = btrfs_extent_inline_ref_offset(leaf, iref);
652 655
653 switch (type) { 656 switch (type) {
654 case BTRFS_SHARED_BLOCK_REF_KEY: 657 case BTRFS_SHARED_BLOCK_REF_KEY:
655 ret = __add_prelim_ref(prefs, 0, NULL, 658 ret = __add_prelim_ref(prefs, 0, NULL,
656 *info_level + 1, offset, 659 *info_level + 1, offset,
657 bytenr, 1); 660 bytenr, 1);
658 break; 661 break;
659 case BTRFS_SHARED_DATA_REF_KEY: { 662 case BTRFS_SHARED_DATA_REF_KEY: {
660 struct btrfs_shared_data_ref *sdref; 663 struct btrfs_shared_data_ref *sdref;
661 int count; 664 int count;
662 665
663 sdref = (struct btrfs_shared_data_ref *)(iref + 1); 666 sdref = (struct btrfs_shared_data_ref *)(iref + 1);
664 count = btrfs_shared_data_ref_count(leaf, sdref); 667 count = btrfs_shared_data_ref_count(leaf, sdref);
665 ret = __add_prelim_ref(prefs, 0, NULL, 0, offset, 668 ret = __add_prelim_ref(prefs, 0, NULL, 0, offset,
666 bytenr, count); 669 bytenr, count);
667 break; 670 break;
668 } 671 }
669 case BTRFS_TREE_BLOCK_REF_KEY: 672 case BTRFS_TREE_BLOCK_REF_KEY:
670 ret = __add_prelim_ref(prefs, offset, NULL, 673 ret = __add_prelim_ref(prefs, offset, NULL,
671 *info_level + 1, 0, 674 *info_level + 1, 0,
672 bytenr, 1); 675 bytenr, 1);
673 break; 676 break;
674 case BTRFS_EXTENT_DATA_REF_KEY: { 677 case BTRFS_EXTENT_DATA_REF_KEY: {
675 struct btrfs_extent_data_ref *dref; 678 struct btrfs_extent_data_ref *dref;
676 int count; 679 int count;
677 u64 root; 680 u64 root;
678 681
679 dref = (struct btrfs_extent_data_ref *)(&iref->offset); 682 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
680 count = btrfs_extent_data_ref_count(leaf, dref); 683 count = btrfs_extent_data_ref_count(leaf, dref);
681 key.objectid = btrfs_extent_data_ref_objectid(leaf, 684 key.objectid = btrfs_extent_data_ref_objectid(leaf,
682 dref); 685 dref);
683 key.type = BTRFS_EXTENT_DATA_KEY; 686 key.type = BTRFS_EXTENT_DATA_KEY;
684 key.offset = btrfs_extent_data_ref_offset(leaf, dref); 687 key.offset = btrfs_extent_data_ref_offset(leaf, dref);
685 root = btrfs_extent_data_ref_root(leaf, dref); 688 root = btrfs_extent_data_ref_root(leaf, dref);
686 ret = __add_prelim_ref(prefs, root, &key, 0, 0, 689 ret = __add_prelim_ref(prefs, root, &key, 0, 0,
687 bytenr, count); 690 bytenr, count);
688 break; 691 break;
689 } 692 }
690 default: 693 default:
691 WARN_ON(1); 694 WARN_ON(1);
692 } 695 }
693 if (ret) 696 if (ret)
694 return ret; 697 return ret;
695 ptr += btrfs_extent_inline_ref_size(type); 698 ptr += btrfs_extent_inline_ref_size(type);
696 } 699 }
697 700
698 return 0; 701 return 0;
699 } 702 }
700 703
701 /* 704 /*
702 * add all non-inline backrefs for bytenr to the list 705 * add all non-inline backrefs for bytenr to the list
703 */ 706 */
704 static int __add_keyed_refs(struct btrfs_fs_info *fs_info, 707 static int __add_keyed_refs(struct btrfs_fs_info *fs_info,
705 struct btrfs_path *path, u64 bytenr, 708 struct btrfs_path *path, u64 bytenr,
706 int info_level, struct list_head *prefs) 709 int info_level, struct list_head *prefs)
707 { 710 {
708 struct btrfs_root *extent_root = fs_info->extent_root; 711 struct btrfs_root *extent_root = fs_info->extent_root;
709 int ret; 712 int ret;
710 int slot; 713 int slot;
711 struct extent_buffer *leaf; 714 struct extent_buffer *leaf;
712 struct btrfs_key key; 715 struct btrfs_key key;
713 716
714 while (1) { 717 while (1) {
715 ret = btrfs_next_item(extent_root, path); 718 ret = btrfs_next_item(extent_root, path);
716 if (ret < 0) 719 if (ret < 0)
717 break; 720 break;
718 if (ret) { 721 if (ret) {
719 ret = 0; 722 ret = 0;
720 break; 723 break;
721 } 724 }
722 725
723 slot = path->slots[0]; 726 slot = path->slots[0];
724 leaf = path->nodes[0]; 727 leaf = path->nodes[0];
725 btrfs_item_key_to_cpu(leaf, &key, slot); 728 btrfs_item_key_to_cpu(leaf, &key, slot);
726 729
727 if (key.objectid != bytenr) 730 if (key.objectid != bytenr)
728 break; 731 break;
729 if (key.type < BTRFS_TREE_BLOCK_REF_KEY) 732 if (key.type < BTRFS_TREE_BLOCK_REF_KEY)
730 continue; 733 continue;
731 if (key.type > BTRFS_SHARED_DATA_REF_KEY) 734 if (key.type > BTRFS_SHARED_DATA_REF_KEY)
732 break; 735 break;
733 736
734 switch (key.type) { 737 switch (key.type) {
735 case BTRFS_SHARED_BLOCK_REF_KEY: 738 case BTRFS_SHARED_BLOCK_REF_KEY:
736 ret = __add_prelim_ref(prefs, 0, NULL, 739 ret = __add_prelim_ref(prefs, 0, NULL,
737 info_level + 1, key.offset, 740 info_level + 1, key.offset,
738 bytenr, 1); 741 bytenr, 1);
739 break; 742 break;
740 case BTRFS_SHARED_DATA_REF_KEY: { 743 case BTRFS_SHARED_DATA_REF_KEY: {
741 struct btrfs_shared_data_ref *sdref; 744 struct btrfs_shared_data_ref *sdref;
742 int count; 745 int count;
743 746
744 sdref = btrfs_item_ptr(leaf, slot, 747 sdref = btrfs_item_ptr(leaf, slot,
745 struct btrfs_shared_data_ref); 748 struct btrfs_shared_data_ref);
746 count = btrfs_shared_data_ref_count(leaf, sdref); 749 count = btrfs_shared_data_ref_count(leaf, sdref);
747 ret = __add_prelim_ref(prefs, 0, NULL, 0, key.offset, 750 ret = __add_prelim_ref(prefs, 0, NULL, 0, key.offset,
748 bytenr, count); 751 bytenr, count);
749 break; 752 break;
750 } 753 }
751 case BTRFS_TREE_BLOCK_REF_KEY: 754 case BTRFS_TREE_BLOCK_REF_KEY:
752 ret = __add_prelim_ref(prefs, key.offset, NULL, 755 ret = __add_prelim_ref(prefs, key.offset, NULL,
753 info_level + 1, 0, 756 info_level + 1, 0,
754 bytenr, 1); 757 bytenr, 1);
755 break; 758 break;
756 case BTRFS_EXTENT_DATA_REF_KEY: { 759 case BTRFS_EXTENT_DATA_REF_KEY: {
757 struct btrfs_extent_data_ref *dref; 760 struct btrfs_extent_data_ref *dref;
758 int count; 761 int count;
759 u64 root; 762 u64 root;
760 763
761 dref = btrfs_item_ptr(leaf, slot, 764 dref = btrfs_item_ptr(leaf, slot,
762 struct btrfs_extent_data_ref); 765 struct btrfs_extent_data_ref);
763 count = btrfs_extent_data_ref_count(leaf, dref); 766 count = btrfs_extent_data_ref_count(leaf, dref);
764 key.objectid = btrfs_extent_data_ref_objectid(leaf, 767 key.objectid = btrfs_extent_data_ref_objectid(leaf,
765 dref); 768 dref);
766 key.type = BTRFS_EXTENT_DATA_KEY; 769 key.type = BTRFS_EXTENT_DATA_KEY;
767 key.offset = btrfs_extent_data_ref_offset(leaf, dref); 770 key.offset = btrfs_extent_data_ref_offset(leaf, dref);
768 root = btrfs_extent_data_ref_root(leaf, dref); 771 root = btrfs_extent_data_ref_root(leaf, dref);
769 ret = __add_prelim_ref(prefs, root, &key, 0, 0, 772 ret = __add_prelim_ref(prefs, root, &key, 0, 0,
770 bytenr, count); 773 bytenr, count);
771 break; 774 break;
772 } 775 }
773 default: 776 default:
774 WARN_ON(1); 777 WARN_ON(1);
775 } 778 }
776 if (ret) 779 if (ret)
777 return ret; 780 return ret;
778 781
779 } 782 }
780 783
781 return ret; 784 return ret;
782 } 785 }
783 786
784 /* 787 /*
785 * this adds all existing backrefs (inline backrefs, backrefs and delayed 788 * this adds all existing backrefs (inline backrefs, backrefs and delayed
786 * refs) for the given bytenr to the refs list, merges duplicates and resolves 789 * refs) for the given bytenr to the refs list, merges duplicates and resolves
787 * indirect refs to their parent bytenr. 790 * indirect refs to their parent bytenr.
788 * When roots are found, they're added to the roots list 791 * When roots are found, they're added to the roots list
789 * 792 *
790 * FIXME some caching might speed things up 793 * FIXME some caching might speed things up
791 */ 794 */
792 static int find_parent_nodes(struct btrfs_trans_handle *trans, 795 static int find_parent_nodes(struct btrfs_trans_handle *trans,
793 struct btrfs_fs_info *fs_info, u64 bytenr, 796 struct btrfs_fs_info *fs_info, u64 bytenr,
794 u64 time_seq, struct ulist *refs, 797 u64 time_seq, struct ulist *refs,
795 struct ulist *roots, const u64 *extent_item_pos) 798 struct ulist *roots, const u64 *extent_item_pos)
796 { 799 {
797 struct btrfs_key key; 800 struct btrfs_key key;
798 struct btrfs_path *path; 801 struct btrfs_path *path;
799 struct btrfs_delayed_ref_root *delayed_refs = NULL; 802 struct btrfs_delayed_ref_root *delayed_refs = NULL;
800 struct btrfs_delayed_ref_head *head; 803 struct btrfs_delayed_ref_head *head;
801 int info_level = 0; 804 int info_level = 0;
802 int ret; 805 int ret;
803 struct list_head prefs_delayed; 806 struct list_head prefs_delayed;
804 struct list_head prefs; 807 struct list_head prefs;
805 struct __prelim_ref *ref; 808 struct __prelim_ref *ref;
806 809
807 INIT_LIST_HEAD(&prefs); 810 INIT_LIST_HEAD(&prefs);
808 INIT_LIST_HEAD(&prefs_delayed); 811 INIT_LIST_HEAD(&prefs_delayed);
809 812
810 key.objectid = bytenr; 813 key.objectid = bytenr;
811 key.offset = (u64)-1; 814 key.offset = (u64)-1;
812 if (btrfs_fs_incompat(fs_info, SKINNY_METADATA)) 815 if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
813 key.type = BTRFS_METADATA_ITEM_KEY; 816 key.type = BTRFS_METADATA_ITEM_KEY;
814 else 817 else
815 key.type = BTRFS_EXTENT_ITEM_KEY; 818 key.type = BTRFS_EXTENT_ITEM_KEY;
816 819
817 path = btrfs_alloc_path(); 820 path = btrfs_alloc_path();
818 if (!path) 821 if (!path)
819 return -ENOMEM; 822 return -ENOMEM;
820 if (!trans) 823 if (!trans)
821 path->search_commit_root = 1; 824 path->search_commit_root = 1;
822 825
823 /* 826 /*
824 * grab both a lock on the path and a lock on the delayed ref head. 827 * grab both a lock on the path and a lock on the delayed ref head.
825 * We need both to get a consistent picture of how the refs look 828 * We need both to get a consistent picture of how the refs look
826 * at a specified point in time 829 * at a specified point in time
827 */ 830 */
828 again: 831 again:
829 head = NULL; 832 head = NULL;
830 833
831 ret = btrfs_search_slot(trans, fs_info->extent_root, &key, path, 0, 0); 834 ret = btrfs_search_slot(trans, fs_info->extent_root, &key, path, 0, 0);
832 if (ret < 0) 835 if (ret < 0)
833 goto out; 836 goto out;
834 BUG_ON(ret == 0); 837 BUG_ON(ret == 0);
835 838
836 if (trans) { 839 if (trans) {
837 /* 840 /*
838 * look if there are updates for this ref queued and lock the 841 * look if there are updates for this ref queued and lock the
839 * head 842 * head
840 */ 843 */
841 delayed_refs = &trans->transaction->delayed_refs; 844 delayed_refs = &trans->transaction->delayed_refs;
842 spin_lock(&delayed_refs->lock); 845 spin_lock(&delayed_refs->lock);
843 head = btrfs_find_delayed_ref_head(trans, bytenr); 846 head = btrfs_find_delayed_ref_head(trans, bytenr);
844 if (head) { 847 if (head) {
845 if (!mutex_trylock(&head->mutex)) { 848 if (!mutex_trylock(&head->mutex)) {
846 atomic_inc(&head->node.refs); 849 atomic_inc(&head->node.refs);
847 spin_unlock(&delayed_refs->lock); 850 spin_unlock(&delayed_refs->lock);
848 851
849 btrfs_release_path(path); 852 btrfs_release_path(path);
850 853
851 /* 854 /*
852 * Mutex was contended, block until it's 855 * Mutex was contended, block until it's
853 * released and try again 856 * released and try again
854 */ 857 */
855 mutex_lock(&head->mutex); 858 mutex_lock(&head->mutex);
856 mutex_unlock(&head->mutex); 859 mutex_unlock(&head->mutex);
857 btrfs_put_delayed_ref(&head->node); 860 btrfs_put_delayed_ref(&head->node);
858 goto again; 861 goto again;
859 } 862 }
860 ret = __add_delayed_refs(head, time_seq, 863 ret = __add_delayed_refs(head, time_seq,
861 &prefs_delayed); 864 &prefs_delayed);
862 mutex_unlock(&head->mutex); 865 mutex_unlock(&head->mutex);
863 if (ret) { 866 if (ret) {
864 spin_unlock(&delayed_refs->lock); 867 spin_unlock(&delayed_refs->lock);
865 goto out; 868 goto out;
866 } 869 }
867 } 870 }
868 spin_unlock(&delayed_refs->lock); 871 spin_unlock(&delayed_refs->lock);
869 } 872 }
870 873
871 if (path->slots[0]) { 874 if (path->slots[0]) {
872 struct extent_buffer *leaf; 875 struct extent_buffer *leaf;
873 int slot; 876 int slot;
874 877
875 path->slots[0]--; 878 path->slots[0]--;
876 leaf = path->nodes[0]; 879 leaf = path->nodes[0];
877 slot = path->slots[0]; 880 slot = path->slots[0];
878 btrfs_item_key_to_cpu(leaf, &key, slot); 881 btrfs_item_key_to_cpu(leaf, &key, slot);
879 if (key.objectid == bytenr && 882 if (key.objectid == bytenr &&
880 (key.type == BTRFS_EXTENT_ITEM_KEY || 883 (key.type == BTRFS_EXTENT_ITEM_KEY ||
881 key.type == BTRFS_METADATA_ITEM_KEY)) { 884 key.type == BTRFS_METADATA_ITEM_KEY)) {
882 ret = __add_inline_refs(fs_info, path, bytenr, 885 ret = __add_inline_refs(fs_info, path, bytenr,
883 &info_level, &prefs); 886 &info_level, &prefs);
884 if (ret) 887 if (ret)
885 goto out; 888 goto out;
886 ret = __add_keyed_refs(fs_info, path, bytenr, 889 ret = __add_keyed_refs(fs_info, path, bytenr,
887 info_level, &prefs); 890 info_level, &prefs);
888 if (ret) 891 if (ret)
889 goto out; 892 goto out;
890 } 893 }
891 } 894 }
892 btrfs_release_path(path); 895 btrfs_release_path(path);
893 896
894 list_splice_init(&prefs_delayed, &prefs); 897 list_splice_init(&prefs_delayed, &prefs);
895 898
896 ret = __add_missing_keys(fs_info, &prefs); 899 ret = __add_missing_keys(fs_info, &prefs);
897 if (ret) 900 if (ret)
898 goto out; 901 goto out;
899 902
900 __merge_refs(&prefs, 1); 903 __merge_refs(&prefs, 1);
901 904
902 ret = __resolve_indirect_refs(fs_info, path, time_seq, &prefs, 905 ret = __resolve_indirect_refs(fs_info, path, time_seq, &prefs,
903 extent_item_pos); 906 extent_item_pos);
904 if (ret) 907 if (ret)
905 goto out; 908 goto out;
906 909
907 __merge_refs(&prefs, 2); 910 __merge_refs(&prefs, 2);
908 911
909 while (!list_empty(&prefs)) { 912 while (!list_empty(&prefs)) {
910 ref = list_first_entry(&prefs, struct __prelim_ref, list); 913 ref = list_first_entry(&prefs, struct __prelim_ref, list);
911 list_del(&ref->list); 914 list_del(&ref->list);
912 WARN_ON(ref->count < 0); 915 WARN_ON(ref->count < 0);
913 if (ref->count && ref->root_id && ref->parent == 0) { 916 if (ref->count && ref->root_id && ref->parent == 0) {
914 /* no parent == root of tree */ 917 /* no parent == root of tree */
915 ret = ulist_add(roots, ref->root_id, 0, GFP_NOFS); 918 ret = ulist_add(roots, ref->root_id, 0, GFP_NOFS);
916 if (ret < 0) 919 if (ret < 0)
917 goto out; 920 goto out;
918 } 921 }
919 if (ref->count && ref->parent) { 922 if (ref->count && ref->parent) {
920 struct extent_inode_elem *eie = NULL; 923 struct extent_inode_elem *eie = NULL;
921 if (extent_item_pos && !ref->inode_list) { 924 if (extent_item_pos && !ref->inode_list) {
922 u32 bsz; 925 u32 bsz;
923 struct extent_buffer *eb; 926 struct extent_buffer *eb;
924 bsz = btrfs_level_size(fs_info->extent_root, 927 bsz = btrfs_level_size(fs_info->extent_root,
925 info_level); 928 info_level);
926 eb = read_tree_block(fs_info->extent_root, 929 eb = read_tree_block(fs_info->extent_root,
927 ref->parent, bsz, 0); 930 ref->parent, bsz, 0);
928 if (!eb || !extent_buffer_uptodate(eb)) { 931 if (!eb || !extent_buffer_uptodate(eb)) {
929 free_extent_buffer(eb); 932 free_extent_buffer(eb);
930 ret = -EIO; 933 ret = -EIO;
931 goto out; 934 goto out;
932 } 935 }
933 ret = find_extent_in_eb(eb, bytenr, 936 ret = find_extent_in_eb(eb, bytenr,
934 *extent_item_pos, &eie); 937 *extent_item_pos, &eie);
935 ref->inode_list = eie; 938 ref->inode_list = eie;
936 free_extent_buffer(eb); 939 free_extent_buffer(eb);
937 } 940 }
938 ret = ulist_add_merge(refs, ref->parent, 941 ret = ulist_add_merge(refs, ref->parent,
939 (uintptr_t)ref->inode_list, 942 (uintptr_t)ref->inode_list,
940 (u64 *)&eie, GFP_NOFS); 943 (u64 *)&eie, GFP_NOFS);
941 if (ret < 0) 944 if (ret < 0)
942 goto out; 945 goto out;
943 if (!ret && extent_item_pos) { 946 if (!ret && extent_item_pos) {
944 /* 947 /*
945 * we've recorded that parent, so we must extend 948 * we've recorded that parent, so we must extend
946 * its inode list here 949 * its inode list here
947 */ 950 */
948 BUG_ON(!eie); 951 BUG_ON(!eie);
949 while (eie->next) 952 while (eie->next)
950 eie = eie->next; 953 eie = eie->next;
951 eie->next = ref->inode_list; 954 eie->next = ref->inode_list;
952 } 955 }
953 } 956 }
954 kfree(ref); 957 kfree(ref);
955 } 958 }
956 959
957 out: 960 out:
958 btrfs_free_path(path); 961 btrfs_free_path(path);
959 while (!list_empty(&prefs)) { 962 while (!list_empty(&prefs)) {
960 ref = list_first_entry(&prefs, struct __prelim_ref, list); 963 ref = list_first_entry(&prefs, struct __prelim_ref, list);
961 list_del(&ref->list); 964 list_del(&ref->list);
962 kfree(ref); 965 kfree(ref);
963 } 966 }
964 while (!list_empty(&prefs_delayed)) { 967 while (!list_empty(&prefs_delayed)) {
965 ref = list_first_entry(&prefs_delayed, struct __prelim_ref, 968 ref = list_first_entry(&prefs_delayed, struct __prelim_ref,
966 list); 969 list);
967 list_del(&ref->list); 970 list_del(&ref->list);
968 kfree(ref); 971 kfree(ref);
969 } 972 }
970 973
971 return ret; 974 return ret;
972 } 975 }
973 976
974 static void free_leaf_list(struct ulist *blocks) 977 static void free_leaf_list(struct ulist *blocks)
975 { 978 {
976 struct ulist_node *node = NULL; 979 struct ulist_node *node = NULL;
977 struct extent_inode_elem *eie; 980 struct extent_inode_elem *eie;
978 struct extent_inode_elem *eie_next; 981 struct extent_inode_elem *eie_next;
979 struct ulist_iterator uiter; 982 struct ulist_iterator uiter;
980 983
981 ULIST_ITER_INIT(&uiter); 984 ULIST_ITER_INIT(&uiter);
982 while ((node = ulist_next(blocks, &uiter))) { 985 while ((node = ulist_next(blocks, &uiter))) {
983 if (!node->aux) 986 if (!node->aux)
984 continue; 987 continue;
985 eie = (struct extent_inode_elem *)(uintptr_t)node->aux; 988 eie = (struct extent_inode_elem *)(uintptr_t)node->aux;
986 for (; eie; eie = eie_next) { 989 for (; eie; eie = eie_next) {
987 eie_next = eie->next; 990 eie_next = eie->next;
988 kfree(eie); 991 kfree(eie);
989 } 992 }
990 node->aux = 0; 993 node->aux = 0;
991 } 994 }
992 995
993 ulist_free(blocks); 996 ulist_free(blocks);
994 } 997 }
995 998
996 /* 999 /*
997 * Finds all leafs with a reference to the specified combination of bytenr and 1000 * Finds all leafs with a reference to the specified combination of bytenr and
998 * offset. key_list_head will point to a list of corresponding keys (caller must 1001 * offset. key_list_head will point to a list of corresponding keys (caller must
999 * free each list element). The leafs will be stored in the leafs ulist, which 1002 * free each list element). The leafs will be stored in the leafs ulist, which
1000 * must be freed with ulist_free. 1003 * must be freed with ulist_free.
1001 * 1004 *
1002 * returns 0 on success, <0 on error 1005 * returns 0 on success, <0 on error
1003 */ 1006 */
1004 static int btrfs_find_all_leafs(struct btrfs_trans_handle *trans, 1007 static int btrfs_find_all_leafs(struct btrfs_trans_handle *trans,
1005 struct btrfs_fs_info *fs_info, u64 bytenr, 1008 struct btrfs_fs_info *fs_info, u64 bytenr,
1006 u64 time_seq, struct ulist **leafs, 1009 u64 time_seq, struct ulist **leafs,
1007 const u64 *extent_item_pos) 1010 const u64 *extent_item_pos)
1008 { 1011 {
1009 struct ulist *tmp; 1012 struct ulist *tmp;
1010 int ret; 1013 int ret;
1011 1014
1012 tmp = ulist_alloc(GFP_NOFS); 1015 tmp = ulist_alloc(GFP_NOFS);
1013 if (!tmp) 1016 if (!tmp)
1014 return -ENOMEM; 1017 return -ENOMEM;
1015 *leafs = ulist_alloc(GFP_NOFS); 1018 *leafs = ulist_alloc(GFP_NOFS);
1016 if (!*leafs) { 1019 if (!*leafs) {
1017 ulist_free(tmp); 1020 ulist_free(tmp);
1018 return -ENOMEM; 1021 return -ENOMEM;
1019 } 1022 }
1020 1023
1021 ret = find_parent_nodes(trans, fs_info, bytenr, 1024 ret = find_parent_nodes(trans, fs_info, bytenr,
1022 time_seq, *leafs, tmp, extent_item_pos); 1025 time_seq, *leafs, tmp, extent_item_pos);
1023 ulist_free(tmp); 1026 ulist_free(tmp);
1024 1027
1025 if (ret < 0 && ret != -ENOENT) { 1028 if (ret < 0 && ret != -ENOENT) {
1026 free_leaf_list(*leafs); 1029 free_leaf_list(*leafs);
1027 return ret; 1030 return ret;
1028 } 1031 }
1029 1032
1030 return 0; 1033 return 0;
1031 } 1034 }
1032 1035
1033 /* 1036 /*
1034 * walk all backrefs for a given extent to find all roots that reference this 1037 * walk all backrefs for a given extent to find all roots that reference this
1035 * extent. Walking a backref means finding all extents that reference this 1038 * extent. Walking a backref means finding all extents that reference this
1036 * extent and in turn walk the backrefs of those, too. Naturally this is a 1039 * extent and in turn walk the backrefs of those, too. Naturally this is a
1037 * recursive process, but here it is implemented in an iterative fashion: We 1040 * recursive process, but here it is implemented in an iterative fashion: We
1038 * find all referencing extents for the extent in question and put them on a 1041 * find all referencing extents for the extent in question and put them on a
1039 * list. In turn, we find all referencing extents for those, further appending 1042 * list. In turn, we find all referencing extents for those, further appending
1040 * to the list. The way we iterate the list allows adding more elements after 1043 * to the list. The way we iterate the list allows adding more elements after
1041 * the current while iterating. The process stops when we reach the end of the 1044 * the current while iterating. The process stops when we reach the end of the
1042 * list. Found roots are added to the roots list. 1045 * list. Found roots are added to the roots list.
1043 * 1046 *
1044 * returns 0 on success, < 0 on error. 1047 * returns 0 on success, < 0 on error.
1045 */ 1048 */
1046 int btrfs_find_all_roots(struct btrfs_trans_handle *trans, 1049 int btrfs_find_all_roots(struct btrfs_trans_handle *trans,
1047 struct btrfs_fs_info *fs_info, u64 bytenr, 1050 struct btrfs_fs_info *fs_info, u64 bytenr,
1048 u64 time_seq, struct ulist **roots) 1051 u64 time_seq, struct ulist **roots)
1049 { 1052 {
1050 struct ulist *tmp; 1053 struct ulist *tmp;
1051 struct ulist_node *node = NULL; 1054 struct ulist_node *node = NULL;
1052 struct ulist_iterator uiter; 1055 struct ulist_iterator uiter;
1053 int ret; 1056 int ret;
1054 1057
1055 tmp = ulist_alloc(GFP_NOFS); 1058 tmp = ulist_alloc(GFP_NOFS);
1056 if (!tmp) 1059 if (!tmp)
1057 return -ENOMEM; 1060 return -ENOMEM;
1058 *roots = ulist_alloc(GFP_NOFS); 1061 *roots = ulist_alloc(GFP_NOFS);
1059 if (!*roots) { 1062 if (!*roots) {
1060 ulist_free(tmp); 1063 ulist_free(tmp);
1061 return -ENOMEM; 1064 return -ENOMEM;
1062 } 1065 }
1063 1066
1064 ULIST_ITER_INIT(&uiter); 1067 ULIST_ITER_INIT(&uiter);
1065 while (1) { 1068 while (1) {
1066 ret = find_parent_nodes(trans, fs_info, bytenr, 1069 ret = find_parent_nodes(trans, fs_info, bytenr,
1067 time_seq, tmp, *roots, NULL); 1070 time_seq, tmp, *roots, NULL);
1068 if (ret < 0 && ret != -ENOENT) { 1071 if (ret < 0 && ret != -ENOENT) {
1069 ulist_free(tmp); 1072 ulist_free(tmp);
1070 ulist_free(*roots); 1073 ulist_free(*roots);
1071 return ret; 1074 return ret;
1072 } 1075 }
1073 node = ulist_next(tmp, &uiter); 1076 node = ulist_next(tmp, &uiter);
1074 if (!node) 1077 if (!node)
1075 break; 1078 break;
1076 bytenr = node->val; 1079 bytenr = node->val;
1077 } 1080 }
1078 1081
1079 ulist_free(tmp); 1082 ulist_free(tmp);
1080 return 0; 1083 return 0;
1081 } 1084 }
1082 1085
1083 1086
1084 static int __inode_info(u64 inum, u64 ioff, u8 key_type, 1087 static int __inode_info(u64 inum, u64 ioff, u8 key_type,
1085 struct btrfs_root *fs_root, struct btrfs_path *path, 1088 struct btrfs_root *fs_root, struct btrfs_path *path,
1086 struct btrfs_key *found_key) 1089 struct btrfs_key *found_key)
1087 { 1090 {
1088 int ret; 1091 int ret;
1089 struct btrfs_key key; 1092 struct btrfs_key key;
1090 struct extent_buffer *eb; 1093 struct extent_buffer *eb;
1091 1094
1092 key.type = key_type; 1095 key.type = key_type;
1093 key.objectid = inum; 1096 key.objectid = inum;
1094 key.offset = ioff; 1097 key.offset = ioff;
1095 1098
1096 ret = btrfs_search_slot(NULL, fs_root, &key, path, 0, 0); 1099 ret = btrfs_search_slot(NULL, fs_root, &key, path, 0, 0);
1097 if (ret < 0) 1100 if (ret < 0)
1098 return ret; 1101 return ret;
1099 1102
1100 eb = path->nodes[0]; 1103 eb = path->nodes[0];
1101 if (ret && path->slots[0] >= btrfs_header_nritems(eb)) { 1104 if (ret && path->slots[0] >= btrfs_header_nritems(eb)) {
1102 ret = btrfs_next_leaf(fs_root, path); 1105 ret = btrfs_next_leaf(fs_root, path);
1103 if (ret) 1106 if (ret)
1104 return ret; 1107 return ret;
1105 eb = path->nodes[0]; 1108 eb = path->nodes[0];
1106 } 1109 }
1107 1110
1108 btrfs_item_key_to_cpu(eb, found_key, path->slots[0]); 1111 btrfs_item_key_to_cpu(eb, found_key, path->slots[0]);
1109 if (found_key->type != key.type || found_key->objectid != key.objectid) 1112 if (found_key->type != key.type || found_key->objectid != key.objectid)
1110 return 1; 1113 return 1;
1111 1114
1112 return 0; 1115 return 0;
1113 } 1116 }
1114 1117
1115 /* 1118 /*
1116 * this makes the path point to (inum INODE_ITEM ioff) 1119 * this makes the path point to (inum INODE_ITEM ioff)
1117 */ 1120 */
1118 int inode_item_info(u64 inum, u64 ioff, struct btrfs_root *fs_root, 1121 int inode_item_info(u64 inum, u64 ioff, struct btrfs_root *fs_root,
1119 struct btrfs_path *path) 1122 struct btrfs_path *path)
1120 { 1123 {
1121 struct btrfs_key key; 1124 struct btrfs_key key;
1122 return __inode_info(inum, ioff, BTRFS_INODE_ITEM_KEY, fs_root, path, 1125 return __inode_info(inum, ioff, BTRFS_INODE_ITEM_KEY, fs_root, path,
1123 &key); 1126 &key);
1124 } 1127 }
1125 1128
1126 static int inode_ref_info(u64 inum, u64 ioff, struct btrfs_root *fs_root, 1129 static int inode_ref_info(u64 inum, u64 ioff, struct btrfs_root *fs_root,
1127 struct btrfs_path *path, 1130 struct btrfs_path *path,
1128 struct btrfs_key *found_key) 1131 struct btrfs_key *found_key)
1129 { 1132 {
1130 return __inode_info(inum, ioff, BTRFS_INODE_REF_KEY, fs_root, path, 1133 return __inode_info(inum, ioff, BTRFS_INODE_REF_KEY, fs_root, path,
1131 found_key); 1134 found_key);
1132 } 1135 }
1133 1136
1134 int btrfs_find_one_extref(struct btrfs_root *root, u64 inode_objectid, 1137 int btrfs_find_one_extref(struct btrfs_root *root, u64 inode_objectid,
1135 u64 start_off, struct btrfs_path *path, 1138 u64 start_off, struct btrfs_path *path,
1136 struct btrfs_inode_extref **ret_extref, 1139 struct btrfs_inode_extref **ret_extref,
1137 u64 *found_off) 1140 u64 *found_off)
1138 { 1141 {
1139 int ret, slot; 1142 int ret, slot;
1140 struct btrfs_key key; 1143 struct btrfs_key key;
1141 struct btrfs_key found_key; 1144 struct btrfs_key found_key;
1142 struct btrfs_inode_extref *extref; 1145 struct btrfs_inode_extref *extref;
1143 struct extent_buffer *leaf; 1146 struct extent_buffer *leaf;
1144 unsigned long ptr; 1147 unsigned long ptr;
1145 1148
1146 key.objectid = inode_objectid; 1149 key.objectid = inode_objectid;
1147 btrfs_set_key_type(&key, BTRFS_INODE_EXTREF_KEY); 1150 btrfs_set_key_type(&key, BTRFS_INODE_EXTREF_KEY);
1148 key.offset = start_off; 1151 key.offset = start_off;
1149 1152
1150 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 1153 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1151 if (ret < 0) 1154 if (ret < 0)
1152 return ret; 1155 return ret;
1153 1156
1154 while (1) { 1157 while (1) {
1155 leaf = path->nodes[0]; 1158 leaf = path->nodes[0];
1156 slot = path->slots[0]; 1159 slot = path->slots[0];
1157 if (slot >= btrfs_header_nritems(leaf)) { 1160 if (slot >= btrfs_header_nritems(leaf)) {
1158 /* 1161 /*
1159 * If the item at offset is not found, 1162 * If the item at offset is not found,
1160 * btrfs_search_slot will point us to the slot 1163 * btrfs_search_slot will point us to the slot
1161 * where it should be inserted. In our case 1164 * where it should be inserted. In our case
1162 * that will be the slot directly before the 1165 * that will be the slot directly before the
1163 * next INODE_REF_KEY_V2 item. In the case 1166 * next INODE_REF_KEY_V2 item. In the case
1164 * that we're pointing to the last slot in a 1167 * that we're pointing to the last slot in a
1165 * leaf, we must move one leaf over. 1168 * leaf, we must move one leaf over.
1166 */ 1169 */
1167 ret = btrfs_next_leaf(root, path); 1170 ret = btrfs_next_leaf(root, path);
1168 if (ret) { 1171 if (ret) {
1169 if (ret >= 1) 1172 if (ret >= 1)
1170 ret = -ENOENT; 1173 ret = -ENOENT;
1171 break; 1174 break;
1172 } 1175 }
1173 continue; 1176 continue;
1174 } 1177 }
1175 1178
1176 btrfs_item_key_to_cpu(leaf, &found_key, slot); 1179 btrfs_item_key_to_cpu(leaf, &found_key, slot);
1177 1180
1178 /* 1181 /*
1179 * Check that we're still looking at an extended ref key for 1182 * Check that we're still looking at an extended ref key for
1180 * this particular objectid. If we have different 1183 * this particular objectid. If we have different
1181 * objectid or type then there are no more to be found 1184 * objectid or type then there are no more to be found
1182 * in the tree and we can exit. 1185 * in the tree and we can exit.
1183 */ 1186 */
1184 ret = -ENOENT; 1187 ret = -ENOENT;
1185 if (found_key.objectid != inode_objectid) 1188 if (found_key.objectid != inode_objectid)
1186 break; 1189 break;
1187 if (btrfs_key_type(&found_key) != BTRFS_INODE_EXTREF_KEY) 1190 if (btrfs_key_type(&found_key) != BTRFS_INODE_EXTREF_KEY)
1188 break; 1191 break;
1189 1192
1190 ret = 0; 1193 ret = 0;
1191 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); 1194 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
1192 extref = (struct btrfs_inode_extref *)ptr; 1195 extref = (struct btrfs_inode_extref *)ptr;
1193 *ret_extref = extref; 1196 *ret_extref = extref;
1194 if (found_off) 1197 if (found_off)
1195 *found_off = found_key.offset; 1198 *found_off = found_key.offset;
1196 break; 1199 break;
1197 } 1200 }
1198 1201
1199 return ret; 1202 return ret;
1200 } 1203 }
1201 1204
1202 /* 1205 /*
1203 * this iterates to turn a name (from iref/extref) into a full filesystem path. 1206 * this iterates to turn a name (from iref/extref) into a full filesystem path.
1204 * Elements of the path are separated by '/' and the path is guaranteed to be 1207 * Elements of the path are separated by '/' and the path is guaranteed to be
1205 * 0-terminated. the path is only given within the current file system. 1208 * 0-terminated. the path is only given within the current file system.
1206 * Therefore, it never starts with a '/'. the caller is responsible to provide 1209 * Therefore, it never starts with a '/'. the caller is responsible to provide
1207 * "size" bytes in "dest". the dest buffer will be filled backwards. finally, 1210 * "size" bytes in "dest". the dest buffer will be filled backwards. finally,
1208 * the start point of the resulting string is returned. this pointer is within 1211 * the start point of the resulting string is returned. this pointer is within
1209 * dest, normally. 1212 * dest, normally.
1210 * in case the path buffer would overflow, the pointer is decremented further 1213 * in case the path buffer would overflow, the pointer is decremented further
1211 * as if output was written to the buffer, though no more output is actually 1214 * as if output was written to the buffer, though no more output is actually
1212 * generated. that way, the caller can determine how much space would be 1215 * generated. that way, the caller can determine how much space would be
1213 * required for the path to fit into the buffer. in that case, the returned 1216 * required for the path to fit into the buffer. in that case, the returned
1214 * value will be smaller than dest. callers must check this! 1217 * value will be smaller than dest. callers must check this!
1215 */ 1218 */
1216 char *btrfs_ref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path, 1219 char *btrfs_ref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
1217 u32 name_len, unsigned long name_off, 1220 u32 name_len, unsigned long name_off,
1218 struct extent_buffer *eb_in, u64 parent, 1221 struct extent_buffer *eb_in, u64 parent,
1219 char *dest, u32 size) 1222 char *dest, u32 size)
1220 { 1223 {
1221 int slot; 1224 int slot;
1222 u64 next_inum; 1225 u64 next_inum;
1223 int ret; 1226 int ret;
1224 s64 bytes_left = ((s64)size) - 1; 1227 s64 bytes_left = ((s64)size) - 1;
1225 struct extent_buffer *eb = eb_in; 1228 struct extent_buffer *eb = eb_in;
1226 struct btrfs_key found_key; 1229 struct btrfs_key found_key;
1227 int leave_spinning = path->leave_spinning; 1230 int leave_spinning = path->leave_spinning;
1228 struct btrfs_inode_ref *iref; 1231 struct btrfs_inode_ref *iref;
1229 1232
1230 if (bytes_left >= 0) 1233 if (bytes_left >= 0)
1231 dest[bytes_left] = '\0'; 1234 dest[bytes_left] = '\0';
1232 1235
1233 path->leave_spinning = 1; 1236 path->leave_spinning = 1;
1234 while (1) { 1237 while (1) {
1235 bytes_left -= name_len; 1238 bytes_left -= name_len;
1236 if (bytes_left >= 0) 1239 if (bytes_left >= 0)
1237 read_extent_buffer(eb, dest + bytes_left, 1240 read_extent_buffer(eb, dest + bytes_left,
1238 name_off, name_len); 1241 name_off, name_len);
1239 if (eb != eb_in) { 1242 if (eb != eb_in) {
1240 btrfs_tree_read_unlock_blocking(eb); 1243 btrfs_tree_read_unlock_blocking(eb);
1241 free_extent_buffer(eb); 1244 free_extent_buffer(eb);
1242 } 1245 }
1243 ret = inode_ref_info(parent, 0, fs_root, path, &found_key); 1246 ret = inode_ref_info(parent, 0, fs_root, path, &found_key);
1244 if (ret > 0) 1247 if (ret > 0)
1245 ret = -ENOENT; 1248 ret = -ENOENT;
1246 if (ret) 1249 if (ret)
1247 break; 1250 break;
1248 1251
1249 next_inum = found_key.offset; 1252 next_inum = found_key.offset;
1250 1253
1251 /* regular exit ahead */ 1254 /* regular exit ahead */
1252 if (parent == next_inum) 1255 if (parent == next_inum)
1253 break; 1256 break;
1254 1257
1255 slot = path->slots[0]; 1258 slot = path->slots[0];
1256 eb = path->nodes[0]; 1259 eb = path->nodes[0];
1257 /* make sure we can use eb after releasing the path */ 1260 /* make sure we can use eb after releasing the path */
1258 if (eb != eb_in) { 1261 if (eb != eb_in) {
1259 atomic_inc(&eb->refs); 1262 atomic_inc(&eb->refs);
1260 btrfs_tree_read_lock(eb); 1263 btrfs_tree_read_lock(eb);
1261 btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK); 1264 btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
1262 } 1265 }
1263 btrfs_release_path(path); 1266 btrfs_release_path(path);
1264 iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref); 1267 iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
1265 1268
1266 name_len = btrfs_inode_ref_name_len(eb, iref); 1269 name_len = btrfs_inode_ref_name_len(eb, iref);
1267 name_off = (unsigned long)(iref + 1); 1270 name_off = (unsigned long)(iref + 1);
1268 1271
1269 parent = next_inum; 1272 parent = next_inum;
1270 --bytes_left; 1273 --bytes_left;
1271 if (bytes_left >= 0) 1274 if (bytes_left >= 0)
1272 dest[bytes_left] = '/'; 1275 dest[bytes_left] = '/';
1273 } 1276 }
1274 1277
1275 btrfs_release_path(path); 1278 btrfs_release_path(path);
1276 path->leave_spinning = leave_spinning; 1279 path->leave_spinning = leave_spinning;
1277 1280
1278 if (ret) 1281 if (ret)
1279 return ERR_PTR(ret); 1282 return ERR_PTR(ret);
1280 1283
1281 return dest + bytes_left; 1284 return dest + bytes_left;
1282 } 1285 }
1283 1286
1284 /* 1287 /*
1285 * this makes the path point to (logical EXTENT_ITEM *) 1288 * this makes the path point to (logical EXTENT_ITEM *)
1286 * returns BTRFS_EXTENT_FLAG_DATA for data, BTRFS_EXTENT_FLAG_TREE_BLOCK for 1289 * returns BTRFS_EXTENT_FLAG_DATA for data, BTRFS_EXTENT_FLAG_TREE_BLOCK for
1287 * tree blocks and <0 on error. 1290 * tree blocks and <0 on error.
1288 */ 1291 */
1289 int extent_from_logical(struct btrfs_fs_info *fs_info, u64 logical, 1292 int extent_from_logical(struct btrfs_fs_info *fs_info, u64 logical,
1290 struct btrfs_path *path, struct btrfs_key *found_key, 1293 struct btrfs_path *path, struct btrfs_key *found_key,
1291 u64 *flags_ret) 1294 u64 *flags_ret)
1292 { 1295 {
1293 int ret; 1296 int ret;
1294 u64 flags; 1297 u64 flags;
1295 u64 size = 0; 1298 u64 size = 0;
1296 u32 item_size; 1299 u32 item_size;
1297 struct extent_buffer *eb; 1300 struct extent_buffer *eb;
1298 struct btrfs_extent_item *ei; 1301 struct btrfs_extent_item *ei;
1299 struct btrfs_key key; 1302 struct btrfs_key key;
1300 1303
1301 if (btrfs_fs_incompat(fs_info, SKINNY_METADATA)) 1304 if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
1302 key.type = BTRFS_METADATA_ITEM_KEY; 1305 key.type = BTRFS_METADATA_ITEM_KEY;
1303 else 1306 else
1304 key.type = BTRFS_EXTENT_ITEM_KEY; 1307 key.type = BTRFS_EXTENT_ITEM_KEY;
1305 key.objectid = logical; 1308 key.objectid = logical;
1306 key.offset = (u64)-1; 1309 key.offset = (u64)-1;
1307 1310
1308 ret = btrfs_search_slot(NULL, fs_info->extent_root, &key, path, 0, 0); 1311 ret = btrfs_search_slot(NULL, fs_info->extent_root, &key, path, 0, 0);
1309 if (ret < 0) 1312 if (ret < 0)
1310 return ret; 1313 return ret;
1311 ret = btrfs_previous_item(fs_info->extent_root, path, 1314 ret = btrfs_previous_item(fs_info->extent_root, path,
1312 0, BTRFS_EXTENT_ITEM_KEY); 1315 0, BTRFS_EXTENT_ITEM_KEY);
1313 if (ret < 0) 1316 if (ret < 0)
1314 return ret; 1317 return ret;
1315 1318
1316 btrfs_item_key_to_cpu(path->nodes[0], found_key, path->slots[0]); 1319 btrfs_item_key_to_cpu(path->nodes[0], found_key, path->slots[0]);
1317 if (found_key->type == BTRFS_METADATA_ITEM_KEY) 1320 if (found_key->type == BTRFS_METADATA_ITEM_KEY)
1318 size = fs_info->extent_root->leafsize; 1321 size = fs_info->extent_root->leafsize;
1319 else if (found_key->type == BTRFS_EXTENT_ITEM_KEY) 1322 else if (found_key->type == BTRFS_EXTENT_ITEM_KEY)
1320 size = found_key->offset; 1323 size = found_key->offset;
1321 1324
1322 if ((found_key->type != BTRFS_EXTENT_ITEM_KEY && 1325 if ((found_key->type != BTRFS_EXTENT_ITEM_KEY &&
1323 found_key->type != BTRFS_METADATA_ITEM_KEY) || 1326 found_key->type != BTRFS_METADATA_ITEM_KEY) ||
1324 found_key->objectid > logical || 1327 found_key->objectid > logical ||
1325 found_key->objectid + size <= logical) { 1328 found_key->objectid + size <= logical) {
1326 pr_debug("logical %llu is not within any extent\n", 1329 pr_debug("logical %llu is not within any extent\n",
1327 (unsigned long long)logical); 1330 (unsigned long long)logical);
1328 return -ENOENT; 1331 return -ENOENT;
1329 } 1332 }
1330 1333
1331 eb = path->nodes[0]; 1334 eb = path->nodes[0];
1332 item_size = btrfs_item_size_nr(eb, path->slots[0]); 1335 item_size = btrfs_item_size_nr(eb, path->slots[0]);
1333 BUG_ON(item_size < sizeof(*ei)); 1336 BUG_ON(item_size < sizeof(*ei));
1334 1337
1335 ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item); 1338 ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
1336 flags = btrfs_extent_flags(eb, ei); 1339 flags = btrfs_extent_flags(eb, ei);
1337 1340
1338 pr_debug("logical %llu is at position %llu within the extent (%llu " 1341 pr_debug("logical %llu is at position %llu within the extent (%llu "
1339 "EXTENT_ITEM %llu) flags %#llx size %u\n", 1342 "EXTENT_ITEM %llu) flags %#llx size %u\n",
1340 (unsigned long long)logical, 1343 (unsigned long long)logical,
1341 (unsigned long long)(logical - found_key->objectid), 1344 (unsigned long long)(logical - found_key->objectid),
1342 (unsigned long long)found_key->objectid, 1345 (unsigned long long)found_key->objectid,
1343 (unsigned long long)found_key->offset, 1346 (unsigned long long)found_key->offset,
1344 (unsigned long long)flags, item_size); 1347 (unsigned long long)flags, item_size);
1345 1348
1346 WARN_ON(!flags_ret); 1349 WARN_ON(!flags_ret);
1347 if (flags_ret) { 1350 if (flags_ret) {
1348 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) 1351 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
1349 *flags_ret = BTRFS_EXTENT_FLAG_TREE_BLOCK; 1352 *flags_ret = BTRFS_EXTENT_FLAG_TREE_BLOCK;
1350 else if (flags & BTRFS_EXTENT_FLAG_DATA) 1353 else if (flags & BTRFS_EXTENT_FLAG_DATA)
1351 *flags_ret = BTRFS_EXTENT_FLAG_DATA; 1354 *flags_ret = BTRFS_EXTENT_FLAG_DATA;
1352 else 1355 else
1353 BUG_ON(1); 1356 BUG_ON(1);
1354 return 0; 1357 return 0;
1355 } 1358 }
1356 1359
1357 return -EIO; 1360 return -EIO;
1358 } 1361 }
1359 1362
1360 /* 1363 /*
1361 * helper function to iterate extent inline refs. ptr must point to a 0 value 1364 * helper function to iterate extent inline refs. ptr must point to a 0 value
1362 * for the first call and may be modified. it is used to track state. 1365 * for the first call and may be modified. it is used to track state.
1363 * if more refs exist, 0 is returned and the next call to 1366 * if more refs exist, 0 is returned and the next call to
1364 * __get_extent_inline_ref must pass the modified ptr parameter to get the 1367 * __get_extent_inline_ref must pass the modified ptr parameter to get the
1365 * next ref. after the last ref was processed, 1 is returned. 1368 * next ref. after the last ref was processed, 1 is returned.
1366 * returns <0 on error 1369 * returns <0 on error
1367 */ 1370 */
1368 static int __get_extent_inline_ref(unsigned long *ptr, struct extent_buffer *eb, 1371 static int __get_extent_inline_ref(unsigned long *ptr, struct extent_buffer *eb,
1369 struct btrfs_extent_item *ei, u32 item_size, 1372 struct btrfs_extent_item *ei, u32 item_size,
1370 struct btrfs_extent_inline_ref **out_eiref, 1373 struct btrfs_extent_inline_ref **out_eiref,
1371 int *out_type) 1374 int *out_type)
1372 { 1375 {
1373 unsigned long end; 1376 unsigned long end;
1374 u64 flags; 1377 u64 flags;
1375 struct btrfs_tree_block_info *info; 1378 struct btrfs_tree_block_info *info;
1376 1379
1377 if (!*ptr) { 1380 if (!*ptr) {
1378 /* first call */ 1381 /* first call */
1379 flags = btrfs_extent_flags(eb, ei); 1382 flags = btrfs_extent_flags(eb, ei);
1380 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) { 1383 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
1381 info = (struct btrfs_tree_block_info *)(ei + 1); 1384 info = (struct btrfs_tree_block_info *)(ei + 1);
1382 *out_eiref = 1385 *out_eiref =
1383 (struct btrfs_extent_inline_ref *)(info + 1); 1386 (struct btrfs_extent_inline_ref *)(info + 1);
1384 } else { 1387 } else {
1385 *out_eiref = (struct btrfs_extent_inline_ref *)(ei + 1); 1388 *out_eiref = (struct btrfs_extent_inline_ref *)(ei + 1);
1386 } 1389 }
1387 *ptr = (unsigned long)*out_eiref; 1390 *ptr = (unsigned long)*out_eiref;
1388 if ((void *)*ptr >= (void *)ei + item_size) 1391 if ((void *)*ptr >= (void *)ei + item_size)
1389 return -ENOENT; 1392 return -ENOENT;
1390 } 1393 }
1391 1394
1392 end = (unsigned long)ei + item_size; 1395 end = (unsigned long)ei + item_size;
1393 *out_eiref = (struct btrfs_extent_inline_ref *)*ptr; 1396 *out_eiref = (struct btrfs_extent_inline_ref *)*ptr;
1394 *out_type = btrfs_extent_inline_ref_type(eb, *out_eiref); 1397 *out_type = btrfs_extent_inline_ref_type(eb, *out_eiref);
1395 1398
1396 *ptr += btrfs_extent_inline_ref_size(*out_type); 1399 *ptr += btrfs_extent_inline_ref_size(*out_type);
1397 WARN_ON(*ptr > end); 1400 WARN_ON(*ptr > end);
1398 if (*ptr == end) 1401 if (*ptr == end)
1399 return 1; /* last */ 1402 return 1; /* last */
1400 1403
1401 return 0; 1404 return 0;
1402 } 1405 }
1403 1406
1404 /* 1407 /*
1405 * reads the tree block backref for an extent. tree level and root are returned 1408 * reads the tree block backref for an extent. tree level and root are returned
1406 * through out_level and out_root. ptr must point to a 0 value for the first 1409 * through out_level and out_root. ptr must point to a 0 value for the first
1407 * call and may be modified (see __get_extent_inline_ref comment). 1410 * call and may be modified (see __get_extent_inline_ref comment).
1408 * returns 0 if data was provided, 1 if there was no more data to provide or 1411 * returns 0 if data was provided, 1 if there was no more data to provide or
1409 * <0 on error. 1412 * <0 on error.
1410 */ 1413 */
1411 int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb, 1414 int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb,
1412 struct btrfs_extent_item *ei, u32 item_size, 1415 struct btrfs_extent_item *ei, u32 item_size,
1413 u64 *out_root, u8 *out_level) 1416 u64 *out_root, u8 *out_level)
1414 { 1417 {
1415 int ret; 1418 int ret;
1416 int type; 1419 int type;
1417 struct btrfs_tree_block_info *info; 1420 struct btrfs_tree_block_info *info;
1418 struct btrfs_extent_inline_ref *eiref; 1421 struct btrfs_extent_inline_ref *eiref;
1419 1422
1420 if (*ptr == (unsigned long)-1) 1423 if (*ptr == (unsigned long)-1)
1421 return 1; 1424 return 1;
1422 1425
1423 while (1) { 1426 while (1) {
1424 ret = __get_extent_inline_ref(ptr, eb, ei, item_size, 1427 ret = __get_extent_inline_ref(ptr, eb, ei, item_size,
1425 &eiref, &type); 1428 &eiref, &type);
1426 if (ret < 0) 1429 if (ret < 0)
1427 return ret; 1430 return ret;
1428 1431
1429 if (type == BTRFS_TREE_BLOCK_REF_KEY || 1432 if (type == BTRFS_TREE_BLOCK_REF_KEY ||
1430 type == BTRFS_SHARED_BLOCK_REF_KEY) 1433 type == BTRFS_SHARED_BLOCK_REF_KEY)
1431 break; 1434 break;
1432 1435
1433 if (ret == 1) 1436 if (ret == 1)
1434 return 1; 1437 return 1;
1435 } 1438 }
1436 1439
1437 /* we can treat both ref types equally here */ 1440 /* we can treat both ref types equally here */
1438 info = (struct btrfs_tree_block_info *)(ei + 1); 1441 info = (struct btrfs_tree_block_info *)(ei + 1);
1439 *out_root = btrfs_extent_inline_ref_offset(eb, eiref); 1442 *out_root = btrfs_extent_inline_ref_offset(eb, eiref);
1440 *out_level = btrfs_tree_block_level(eb, info); 1443 *out_level = btrfs_tree_block_level(eb, info);
1441 1444
1442 if (ret == 1) 1445 if (ret == 1)
1443 *ptr = (unsigned long)-1; 1446 *ptr = (unsigned long)-1;
1444 1447
1445 return 0; 1448 return 0;
1446 } 1449 }
1447 1450
1448 static int iterate_leaf_refs(struct extent_inode_elem *inode_list, 1451 static int iterate_leaf_refs(struct extent_inode_elem *inode_list,
1449 u64 root, u64 extent_item_objectid, 1452 u64 root, u64 extent_item_objectid,
1450 iterate_extent_inodes_t *iterate, void *ctx) 1453 iterate_extent_inodes_t *iterate, void *ctx)
1451 { 1454 {
1452 struct extent_inode_elem *eie; 1455 struct extent_inode_elem *eie;
1453 int ret = 0; 1456 int ret = 0;
1454 1457
1455 for (eie = inode_list; eie; eie = eie->next) { 1458 for (eie = inode_list; eie; eie = eie->next) {
1456 pr_debug("ref for %llu resolved, key (%llu EXTEND_DATA %llu), " 1459 pr_debug("ref for %llu resolved, key (%llu EXTEND_DATA %llu), "
1457 "root %llu\n", extent_item_objectid, 1460 "root %llu\n", extent_item_objectid,
1458 eie->inum, eie->offset, root); 1461 eie->inum, eie->offset, root);
1459 ret = iterate(eie->inum, eie->offset, root, ctx); 1462 ret = iterate(eie->inum, eie->offset, root, ctx);
1460 if (ret) { 1463 if (ret) {
1461 pr_debug("stopping iteration for %llu due to ret=%d\n", 1464 pr_debug("stopping iteration for %llu due to ret=%d\n",
1462 extent_item_objectid, ret); 1465 extent_item_objectid, ret);
1463 break; 1466 break;
1464 } 1467 }
1465 } 1468 }
1466 1469
1467 return ret; 1470 return ret;
1468 } 1471 }
1469 1472
1470 /* 1473 /*
1471 * calls iterate() for every inode that references the extent identified by 1474 * calls iterate() for every inode that references the extent identified by
1472 * the given parameters. 1475 * the given parameters.
1473 * when the iterator function returns a non-zero value, iteration stops. 1476 * when the iterator function returns a non-zero value, iteration stops.
1474 */ 1477 */
1475 int iterate_extent_inodes(struct btrfs_fs_info *fs_info, 1478 int iterate_extent_inodes(struct btrfs_fs_info *fs_info,
1476 u64 extent_item_objectid, u64 extent_item_pos, 1479 u64 extent_item_objectid, u64 extent_item_pos,
1477 int search_commit_root, 1480 int search_commit_root,
1478 iterate_extent_inodes_t *iterate, void *ctx) 1481 iterate_extent_inodes_t *iterate, void *ctx)
1479 { 1482 {
1480 int ret; 1483 int ret;
1481 struct btrfs_trans_handle *trans = NULL; 1484 struct btrfs_trans_handle *trans = NULL;
1482 struct ulist *refs = NULL; 1485 struct ulist *refs = NULL;
1483 struct ulist *roots = NULL; 1486 struct ulist *roots = NULL;
1484 struct ulist_node *ref_node = NULL; 1487 struct ulist_node *ref_node = NULL;
1485 struct ulist_node *root_node = NULL; 1488 struct ulist_node *root_node = NULL;
1486 struct seq_list tree_mod_seq_elem = {}; 1489 struct seq_list tree_mod_seq_elem = {};
1487 struct ulist_iterator ref_uiter; 1490 struct ulist_iterator ref_uiter;
1488 struct ulist_iterator root_uiter; 1491 struct ulist_iterator root_uiter;
1489 1492
1490 pr_debug("resolving all inodes for extent %llu\n", 1493 pr_debug("resolving all inodes for extent %llu\n",
1491 extent_item_objectid); 1494 extent_item_objectid);
1492 1495
1493 if (!search_commit_root) { 1496 if (!search_commit_root) {
1494 trans = btrfs_join_transaction(fs_info->extent_root); 1497 trans = btrfs_join_transaction(fs_info->extent_root);
1495 if (IS_ERR(trans)) 1498 if (IS_ERR(trans))
1496 return PTR_ERR(trans); 1499 return PTR_ERR(trans);
1497 btrfs_get_tree_mod_seq(fs_info, &tree_mod_seq_elem); 1500 btrfs_get_tree_mod_seq(fs_info, &tree_mod_seq_elem);
1498 } 1501 }
1499 1502
1500 ret = btrfs_find_all_leafs(trans, fs_info, extent_item_objectid, 1503 ret = btrfs_find_all_leafs(trans, fs_info, extent_item_objectid,
1501 tree_mod_seq_elem.seq, &refs, 1504 tree_mod_seq_elem.seq, &refs,
1502 &extent_item_pos); 1505 &extent_item_pos);
1503 if (ret) 1506 if (ret)
1504 goto out; 1507 goto out;
1505 1508
1506 ULIST_ITER_INIT(&ref_uiter); 1509 ULIST_ITER_INIT(&ref_uiter);
1507 while (!ret && (ref_node = ulist_next(refs, &ref_uiter))) { 1510 while (!ret && (ref_node = ulist_next(refs, &ref_uiter))) {
1508 ret = btrfs_find_all_roots(trans, fs_info, ref_node->val, 1511 ret = btrfs_find_all_roots(trans, fs_info, ref_node->val,
1509 tree_mod_seq_elem.seq, &roots); 1512 tree_mod_seq_elem.seq, &roots);
1510 if (ret) 1513 if (ret)
1511 break; 1514 break;
1512 ULIST_ITER_INIT(&root_uiter); 1515 ULIST_ITER_INIT(&root_uiter);
1513 while (!ret && (root_node = ulist_next(roots, &root_uiter))) { 1516 while (!ret && (root_node = ulist_next(roots, &root_uiter))) {
1514 pr_debug("root %llu references leaf %llu, data list " 1517 pr_debug("root %llu references leaf %llu, data list "
1515 "%#llx\n", root_node->val, ref_node->val, 1518 "%#llx\n", root_node->val, ref_node->val,
1516 (long long)ref_node->aux); 1519 (long long)ref_node->aux);
1517 ret = iterate_leaf_refs((struct extent_inode_elem *) 1520 ret = iterate_leaf_refs((struct extent_inode_elem *)
1518 (uintptr_t)ref_node->aux, 1521 (uintptr_t)ref_node->aux,
1519 root_node->val, 1522 root_node->val,
1520 extent_item_objectid, 1523 extent_item_objectid,
1521 iterate, ctx); 1524 iterate, ctx);
1522 } 1525 }
1523 ulist_free(roots); 1526 ulist_free(roots);
1524 } 1527 }
1525 1528
1526 free_leaf_list(refs); 1529 free_leaf_list(refs);
1527 out: 1530 out:
1528 if (!search_commit_root) { 1531 if (!search_commit_root) {
1529 btrfs_put_tree_mod_seq(fs_info, &tree_mod_seq_elem); 1532 btrfs_put_tree_mod_seq(fs_info, &tree_mod_seq_elem);
1530 btrfs_end_transaction(trans, fs_info->extent_root); 1533 btrfs_end_transaction(trans, fs_info->extent_root);
1531 } 1534 }
1532 1535
1533 return ret; 1536 return ret;
1534 } 1537 }
1535 1538
1536 int iterate_inodes_from_logical(u64 logical, struct btrfs_fs_info *fs_info, 1539 int iterate_inodes_from_logical(u64 logical, struct btrfs_fs_info *fs_info,
1537 struct btrfs_path *path, 1540 struct btrfs_path *path,
1538 iterate_extent_inodes_t *iterate, void *ctx) 1541 iterate_extent_inodes_t *iterate, void *ctx)
1539 { 1542 {
1540 int ret; 1543 int ret;
1541 u64 extent_item_pos; 1544 u64 extent_item_pos;
1542 u64 flags = 0; 1545 u64 flags = 0;
1543 struct btrfs_key found_key; 1546 struct btrfs_key found_key;
1544 int search_commit_root = path->search_commit_root; 1547 int search_commit_root = path->search_commit_root;
1545 1548
1546 ret = extent_from_logical(fs_info, logical, path, &found_key, &flags); 1549 ret = extent_from_logical(fs_info, logical, path, &found_key, &flags);
1547 btrfs_release_path(path); 1550 btrfs_release_path(path);
1548 if (ret < 0) 1551 if (ret < 0)
1549 return ret; 1552 return ret;
1550 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) 1553 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
1551 return -EINVAL; 1554 return -EINVAL;
1552 1555
1553 extent_item_pos = logical - found_key.objectid; 1556 extent_item_pos = logical - found_key.objectid;
1554 ret = iterate_extent_inodes(fs_info, found_key.objectid, 1557 ret = iterate_extent_inodes(fs_info, found_key.objectid,
1555 extent_item_pos, search_commit_root, 1558 extent_item_pos, search_commit_root,
1556 iterate, ctx); 1559 iterate, ctx);
1557 1560
1558 return ret; 1561 return ret;
1559 } 1562 }
1560 1563
1561 typedef int (iterate_irefs_t)(u64 parent, u32 name_len, unsigned long name_off, 1564 typedef int (iterate_irefs_t)(u64 parent, u32 name_len, unsigned long name_off,
1562 struct extent_buffer *eb, void *ctx); 1565 struct extent_buffer *eb, void *ctx);
1563 1566
1564 static int iterate_inode_refs(u64 inum, struct btrfs_root *fs_root, 1567 static int iterate_inode_refs(u64 inum, struct btrfs_root *fs_root,
1565 struct btrfs_path *path, 1568 struct btrfs_path *path,
1566 iterate_irefs_t *iterate, void *ctx) 1569 iterate_irefs_t *iterate, void *ctx)
1567 { 1570 {
1568 int ret = 0; 1571 int ret = 0;
1569 int slot; 1572 int slot;
1570 u32 cur; 1573 u32 cur;
1571 u32 len; 1574 u32 len;
1572 u32 name_len; 1575 u32 name_len;
1573 u64 parent = 0; 1576 u64 parent = 0;
1574 int found = 0; 1577 int found = 0;
1575 struct extent_buffer *eb; 1578 struct extent_buffer *eb;
1576 struct btrfs_item *item; 1579 struct btrfs_item *item;
1577 struct btrfs_inode_ref *iref; 1580 struct btrfs_inode_ref *iref;
1578 struct btrfs_key found_key; 1581 struct btrfs_key found_key;
1579 1582
1580 while (!ret) { 1583 while (!ret) {
1581 path->leave_spinning = 1; 1584 path->leave_spinning = 1;
1582 ret = inode_ref_info(inum, parent ? parent+1 : 0, fs_root, path, 1585 ret = inode_ref_info(inum, parent ? parent+1 : 0, fs_root, path,
1583 &found_key); 1586 &found_key);
1584 if (ret < 0) 1587 if (ret < 0)
1585 break; 1588 break;
1586 if (ret) { 1589 if (ret) {
1587 ret = found ? 0 : -ENOENT; 1590 ret = found ? 0 : -ENOENT;
1588 break; 1591 break;
1589 } 1592 }
1590 ++found; 1593 ++found;
1591 1594
1592 parent = found_key.offset; 1595 parent = found_key.offset;
1593 slot = path->slots[0]; 1596 slot = path->slots[0];
1594 eb = path->nodes[0]; 1597 eb = path->nodes[0];
1595 /* make sure we can use eb after releasing the path */ 1598 /* make sure we can use eb after releasing the path */
1596 atomic_inc(&eb->refs); 1599 atomic_inc(&eb->refs);
1597 btrfs_tree_read_lock(eb); 1600 btrfs_tree_read_lock(eb);
1598 btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK); 1601 btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
1599 btrfs_release_path(path); 1602 btrfs_release_path(path);
1600 1603
1601 item = btrfs_item_nr(eb, slot); 1604 item = btrfs_item_nr(eb, slot);
1602 iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref); 1605 iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
1603 1606
1604 for (cur = 0; cur < btrfs_item_size(eb, item); cur += len) { 1607 for (cur = 0; cur < btrfs_item_size(eb, item); cur += len) {
1605 name_len = btrfs_inode_ref_name_len(eb, iref); 1608 name_len = btrfs_inode_ref_name_len(eb, iref);
1606 /* path must be released before calling iterate()! */ 1609 /* path must be released before calling iterate()! */
1607 pr_debug("following ref at offset %u for inode %llu in " 1610 pr_debug("following ref at offset %u for inode %llu in "
1608 "tree %llu\n", cur, 1611 "tree %llu\n", cur,
1609 (unsigned long long)found_key.objectid, 1612 (unsigned long long)found_key.objectid,
1610 (unsigned long long)fs_root->objectid); 1613 (unsigned long long)fs_root->objectid);
1611 ret = iterate(parent, name_len, 1614 ret = iterate(parent, name_len,
1612 (unsigned long)(iref + 1), eb, ctx); 1615 (unsigned long)(iref + 1), eb, ctx);
1613 if (ret) 1616 if (ret)
1614 break; 1617 break;
1615 len = sizeof(*iref) + name_len; 1618 len = sizeof(*iref) + name_len;
1616 iref = (struct btrfs_inode_ref *)((char *)iref + len); 1619 iref = (struct btrfs_inode_ref *)((char *)iref + len);
1617 } 1620 }
1618 btrfs_tree_read_unlock_blocking(eb); 1621 btrfs_tree_read_unlock_blocking(eb);
1619 free_extent_buffer(eb); 1622 free_extent_buffer(eb);
1620 } 1623 }
1621 1624
1622 btrfs_release_path(path); 1625 btrfs_release_path(path);
1623 1626
1624 return ret; 1627 return ret;
1625 } 1628 }
1626 1629
1627 static int iterate_inode_extrefs(u64 inum, struct btrfs_root *fs_root, 1630 static int iterate_inode_extrefs(u64 inum, struct btrfs_root *fs_root,
1628 struct btrfs_path *path, 1631 struct btrfs_path *path,
1629 iterate_irefs_t *iterate, void *ctx) 1632 iterate_irefs_t *iterate, void *ctx)
1630 { 1633 {
1631 int ret; 1634 int ret;
1632 int slot; 1635 int slot;
1633 u64 offset = 0; 1636 u64 offset = 0;
1634 u64 parent; 1637 u64 parent;
1635 int found = 0; 1638 int found = 0;
1636 struct extent_buffer *eb; 1639 struct extent_buffer *eb;
1637 struct btrfs_inode_extref *extref; 1640 struct btrfs_inode_extref *extref;
1638 struct extent_buffer *leaf; 1641 struct extent_buffer *leaf;
1639 u32 item_size; 1642 u32 item_size;
1640 u32 cur_offset; 1643 u32 cur_offset;
1641 unsigned long ptr; 1644 unsigned long ptr;
1642 1645
1643 while (1) { 1646 while (1) {
1644 ret = btrfs_find_one_extref(fs_root, inum, offset, path, &extref, 1647 ret = btrfs_find_one_extref(fs_root, inum, offset, path, &extref,
1645 &offset); 1648 &offset);
1646 if (ret < 0) 1649 if (ret < 0)
1647 break; 1650 break;
1648 if (ret) { 1651 if (ret) {
1649 ret = found ? 0 : -ENOENT; 1652 ret = found ? 0 : -ENOENT;
1650 break; 1653 break;
1651 } 1654 }
1652 ++found; 1655 ++found;
1653 1656
1654 slot = path->slots[0]; 1657 slot = path->slots[0];
1655 eb = path->nodes[0]; 1658 eb = path->nodes[0];
1656 /* make sure we can use eb after releasing the path */ 1659 /* make sure we can use eb after releasing the path */
1657 atomic_inc(&eb->refs); 1660 atomic_inc(&eb->refs);
1658 1661
1659 btrfs_tree_read_lock(eb); 1662 btrfs_tree_read_lock(eb);
1660 btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK); 1663 btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
1661 btrfs_release_path(path); 1664 btrfs_release_path(path);
1662 1665
1663 leaf = path->nodes[0]; 1666 leaf = path->nodes[0];
1664 item_size = btrfs_item_size_nr(leaf, path->slots[0]); 1667 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1665 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); 1668 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
1666 cur_offset = 0; 1669 cur_offset = 0;
1667 1670
1668 while (cur_offset < item_size) { 1671 while (cur_offset < item_size) {
1669 u32 name_len; 1672 u32 name_len;
1670 1673
1671 extref = (struct btrfs_inode_extref *)(ptr + cur_offset); 1674 extref = (struct btrfs_inode_extref *)(ptr + cur_offset);
1672 parent = btrfs_inode_extref_parent(eb, extref); 1675 parent = btrfs_inode_extref_parent(eb, extref);
1673 name_len = btrfs_inode_extref_name_len(eb, extref); 1676 name_len = btrfs_inode_extref_name_len(eb, extref);
1674 ret = iterate(parent, name_len, 1677 ret = iterate(parent, name_len,
1675 (unsigned long)&extref->name, eb, ctx); 1678 (unsigned long)&extref->name, eb, ctx);
1676 if (ret) 1679 if (ret)
1677 break; 1680 break;
1678 1681
1679 cur_offset += btrfs_inode_extref_name_len(leaf, extref); 1682 cur_offset += btrfs_inode_extref_name_len(leaf, extref);
1680 cur_offset += sizeof(*extref); 1683 cur_offset += sizeof(*extref);
1681 } 1684 }
1682 btrfs_tree_read_unlock_blocking(eb); 1685 btrfs_tree_read_unlock_blocking(eb);
1683 free_extent_buffer(eb); 1686 free_extent_buffer(eb);
1684 1687
1685 offset++; 1688 offset++;
1686 } 1689 }
1687 1690
1688 btrfs_release_path(path); 1691 btrfs_release_path(path);
1689 1692
1690 return ret; 1693 return ret;
1691 } 1694 }
1692 1695
1693 static int iterate_irefs(u64 inum, struct btrfs_root *fs_root, 1696 static int iterate_irefs(u64 inum, struct btrfs_root *fs_root,
1694 struct btrfs_path *path, iterate_irefs_t *iterate, 1697 struct btrfs_path *path, iterate_irefs_t *iterate,
1695 void *ctx) 1698 void *ctx)
1696 { 1699 {
1697 int ret; 1700 int ret;
1698 int found_refs = 0; 1701 int found_refs = 0;
1699 1702
1700 ret = iterate_inode_refs(inum, fs_root, path, iterate, ctx); 1703 ret = iterate_inode_refs(inum, fs_root, path, iterate, ctx);
1701 if (!ret) 1704 if (!ret)
1702 ++found_refs; 1705 ++found_refs;
1703 else if (ret != -ENOENT) 1706 else if (ret != -ENOENT)
1704 return ret; 1707 return ret;
1705 1708
1706 ret = iterate_inode_extrefs(inum, fs_root, path, iterate, ctx); 1709 ret = iterate_inode_extrefs(inum, fs_root, path, iterate, ctx);
1707 if (ret == -ENOENT && found_refs) 1710 if (ret == -ENOENT && found_refs)
1708 return 0; 1711 return 0;
1709 1712
1710 return ret; 1713 return ret;
1711 } 1714 }
1712 1715
1713 /* 1716 /*
1714 * returns 0 if the path could be dumped (probably truncated) 1717 * returns 0 if the path could be dumped (probably truncated)
1715 * returns <0 in case of an error 1718 * returns <0 in case of an error
1716 */ 1719 */
1717 static int inode_to_path(u64 inum, u32 name_len, unsigned long name_off, 1720 static int inode_to_path(u64 inum, u32 name_len, unsigned long name_off,
1718 struct extent_buffer *eb, void *ctx) 1721 struct extent_buffer *eb, void *ctx)
1719 { 1722 {
1720 struct inode_fs_paths *ipath = ctx; 1723 struct inode_fs_paths *ipath = ctx;
1721 char *fspath; 1724 char *fspath;
1722 char *fspath_min; 1725 char *fspath_min;
1723 int i = ipath->fspath->elem_cnt; 1726 int i = ipath->fspath->elem_cnt;
1724 const int s_ptr = sizeof(char *); 1727 const int s_ptr = sizeof(char *);
1725 u32 bytes_left; 1728 u32 bytes_left;
1726 1729
1727 bytes_left = ipath->fspath->bytes_left > s_ptr ? 1730 bytes_left = ipath->fspath->bytes_left > s_ptr ?
1728 ipath->fspath->bytes_left - s_ptr : 0; 1731 ipath->fspath->bytes_left - s_ptr : 0;
1729 1732
1730 fspath_min = (char *)ipath->fspath->val + (i + 1) * s_ptr; 1733 fspath_min = (char *)ipath->fspath->val + (i + 1) * s_ptr;
1731 fspath = btrfs_ref_to_path(ipath->fs_root, ipath->btrfs_path, name_len, 1734 fspath = btrfs_ref_to_path(ipath->fs_root, ipath->btrfs_path, name_len,
1732 name_off, eb, inum, fspath_min, bytes_left); 1735 name_off, eb, inum, fspath_min, bytes_left);
1733 if (IS_ERR(fspath)) 1736 if (IS_ERR(fspath))
1734 return PTR_ERR(fspath); 1737 return PTR_ERR(fspath);
1735 1738
1736 if (fspath > fspath_min) { 1739 if (fspath > fspath_min) {
1737 ipath->fspath->val[i] = (u64)(unsigned long)fspath; 1740 ipath->fspath->val[i] = (u64)(unsigned long)fspath;
1738 ++ipath->fspath->elem_cnt; 1741 ++ipath->fspath->elem_cnt;
1739 ipath->fspath->bytes_left = fspath - fspath_min; 1742 ipath->fspath->bytes_left = fspath - fspath_min;
1740 } else { 1743 } else {
1741 ++ipath->fspath->elem_missed; 1744 ++ipath->fspath->elem_missed;
1742 ipath->fspath->bytes_missing += fspath_min - fspath; 1745 ipath->fspath->bytes_missing += fspath_min - fspath;
1743 ipath->fspath->bytes_left = 0; 1746 ipath->fspath->bytes_left = 0;
1744 } 1747 }
1745 1748
1746 return 0; 1749 return 0;
1747 } 1750 }
1748 1751
1749 /* 1752 /*
1750 * this dumps all file system paths to the inode into the ipath struct, provided 1753 * this dumps all file system paths to the inode into the ipath struct, provided
1751 * is has been created large enough. each path is zero-terminated and accessed 1754 * is has been created large enough. each path is zero-terminated and accessed
1752 * from ipath->fspath->val[i]. 1755 * from ipath->fspath->val[i].
1753 * when it returns, there are ipath->fspath->elem_cnt number of paths available 1756 * when it returns, there are ipath->fspath->elem_cnt number of paths available
1754 * in ipath->fspath->val[]. when the allocated space wasn't sufficient, the 1757 * in ipath->fspath->val[]. when the allocated space wasn't sufficient, the
1755 * number of missed paths in recored in ipath->fspath->elem_missed, otherwise, 1758 * number of missed paths in recored in ipath->fspath->elem_missed, otherwise,
1756 * it's zero. ipath->fspath->bytes_missing holds the number of bytes that would 1759 * it's zero. ipath->fspath->bytes_missing holds the number of bytes that would
1757 * have been needed to return all paths. 1760 * have been needed to return all paths.
1758 */ 1761 */
1759 int paths_from_inode(u64 inum, struct inode_fs_paths *ipath) 1762 int paths_from_inode(u64 inum, struct inode_fs_paths *ipath)
1760 { 1763 {
1761 return iterate_irefs(inum, ipath->fs_root, ipath->btrfs_path, 1764 return iterate_irefs(inum, ipath->fs_root, ipath->btrfs_path,
1762 inode_to_path, ipath); 1765 inode_to_path, ipath);
1763 } 1766 }
1764 1767
1765 struct btrfs_data_container *init_data_container(u32 total_bytes) 1768 struct btrfs_data_container *init_data_container(u32 total_bytes)
1766 { 1769 {
1767 struct btrfs_data_container *data; 1770 struct btrfs_data_container *data;
1768 size_t alloc_bytes; 1771 size_t alloc_bytes;
1769 1772
1770 alloc_bytes = max_t(size_t, total_bytes, sizeof(*data)); 1773 alloc_bytes = max_t(size_t, total_bytes, sizeof(*data));
1771 data = vmalloc(alloc_bytes); 1774 data = vmalloc(alloc_bytes);
1772 if (!data) 1775 if (!data)
1773 return ERR_PTR(-ENOMEM); 1776 return ERR_PTR(-ENOMEM);
1774 1777
1775 if (total_bytes >= sizeof(*data)) { 1778 if (total_bytes >= sizeof(*data)) {
1776 data->bytes_left = total_bytes - sizeof(*data); 1779 data->bytes_left = total_bytes - sizeof(*data);
1777 data->bytes_missing = 0; 1780 data->bytes_missing = 0;
1778 } else { 1781 } else {
1779 data->bytes_missing = sizeof(*data) - total_bytes; 1782 data->bytes_missing = sizeof(*data) - total_bytes;
1780 data->bytes_left = 0; 1783 data->bytes_left = 0;
1781 } 1784 }
1782 1785
1783 data->elem_cnt = 0; 1786 data->elem_cnt = 0;
1784 data->elem_missed = 0; 1787 data->elem_missed = 0;
1785 1788
1786 return data; 1789 return data;
1787 } 1790 }
1788 1791
1789 /* 1792 /*
1790 * allocates space to return multiple file system paths for an inode. 1793 * allocates space to return multiple file system paths for an inode.
1791 * total_bytes to allocate are passed, note that space usable for actual path 1794 * total_bytes to allocate are passed, note that space usable for actual path
1792 * information will be total_bytes - sizeof(struct inode_fs_paths). 1795 * information will be total_bytes - sizeof(struct inode_fs_paths).
1793 * the returned pointer must be freed with free_ipath() in the end. 1796 * the returned pointer must be freed with free_ipath() in the end.
1794 */ 1797 */
1795 struct inode_fs_paths *init_ipath(s32 total_bytes, struct btrfs_root *fs_root, 1798 struct inode_fs_paths *init_ipath(s32 total_bytes, struct btrfs_root *fs_root,
1796 struct btrfs_path *path) 1799 struct btrfs_path *path)
1797 { 1800 {
1798 struct inode_fs_paths *ifp; 1801 struct inode_fs_paths *ifp;
1799 struct btrfs_data_container *fspath; 1802 struct btrfs_data_container *fspath;
1800 1803
1801 fspath = init_data_container(total_bytes); 1804 fspath = init_data_container(total_bytes);
1802 if (IS_ERR(fspath)) 1805 if (IS_ERR(fspath))
1803 return (void *)fspath; 1806 return (void *)fspath;
1804 1807
1805 ifp = kmalloc(sizeof(*ifp), GFP_NOFS); 1808 ifp = kmalloc(sizeof(*ifp), GFP_NOFS);
1806 if (!ifp) { 1809 if (!ifp) {
1807 kfree(fspath); 1810 kfree(fspath);
1808 return ERR_PTR(-ENOMEM); 1811 return ERR_PTR(-ENOMEM);
1809 } 1812 }
1810 1813
1811 ifp->btrfs_path = path; 1814 ifp->btrfs_path = path;
1812 ifp->fspath = fspath; 1815 ifp->fspath = fspath;
1813 ifp->fs_root = fs_root; 1816 ifp->fs_root = fs_root;
1814 1817
1815 return ifp; 1818 return ifp;
1816 } 1819 }
1817 1820
1818 void free_ipath(struct inode_fs_paths *ipath) 1821 void free_ipath(struct inode_fs_paths *ipath)
1819 { 1822 {
1820 if (!ipath) 1823 if (!ipath)
1821 return; 1824 return;
1822 vfree(ipath->fspath); 1825 vfree(ipath->fspath);
1823 kfree(ipath); 1826 kfree(ipath);
1824 } 1827 }
1825 1828